Merge commit 'v2.6.35' into perf/core

Conflicts:
	tools/perf/Makefile
	tools/perf/util/hist.c

Merge reason: Resolve the conflicts and update to latest upstream.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Ingo Molnar 2010-08-02 08:29:56 +02:00
commit 3772b73472
209 changed files with 1509 additions and 649 deletions

View file

@ -417,6 +417,9 @@ reference on them using:
This does all the RCU magic inside of it. The caller must call put_cred() on This does all the RCU magic inside of it. The caller must call put_cred() on
the credentials so obtained when they're finished with. the credentials so obtained when they're finished with.
[*] Note: The result of __task_cred() should not be passed directly to
get_cred() as this may race with commit_cred().
There are a couple of convenience functions to access bits of another task's There are a couple of convenience functions to access bits of another task's
credentials, hiding the RCU magic from the caller: credentials, hiding the RCU magic from the caller:

View file

@ -647,3 +647,10 @@ Who: Stefan Richter <stefanr@s5r6.in-berlin.de>
---------------------------- ----------------------------
What: The acpi_sleep=s4_nonvs command line option
When: 2.6.37
Files: arch/x86/kernel/acpi/sleep.c
Why: superseded by acpi_sleep=nonvs
Who: Rafael J. Wysocki <rjw@sisk.pl>
----------------------------

View file

@ -254,8 +254,8 @@ and is between 256 and 4096 characters. It is defined in the file
control method, with respect to putting devices into control method, with respect to putting devices into
low power states, to be enforced (the ACPI 2.0 ordering low power states, to be enforced (the ACPI 2.0 ordering
of _PTS is used by default). of _PTS is used by default).
s4_nonvs prevents the kernel from saving/restoring the nonvs prevents the kernel from saving/restoring the
ACPI NVS memory during hibernation. ACPI NVS memory during suspend/hibernation and resume.
sci_force_enable causes the kernel to set SCI_EN directly sci_force_enable causes the kernel to set SCI_EN directly
on resume from S1/S3 (which is against the ACPI spec, on resume from S1/S3 (which is against the ACPI spec,
but some broken systems don't work without it). but some broken systems don't work without it).

View file

@ -6236,6 +6236,8 @@ F: drivers/mmc/host/wbsd.*
WATCHDOG DEVICE DRIVERS WATCHDOG DEVICE DRIVERS
M: Wim Van Sebroeck <wim@iguana.be> M: Wim Van Sebroeck <wim@iguana.be>
L: linux-watchdog@vger.kernel.org
W: http://www.linux-watchdog.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/wim/linux-2.6-watchdog.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/wim/linux-2.6-watchdog.git
S: Maintained S: Maintained
F: Documentation/watchdog/ F: Documentation/watchdog/

View file

@ -1,7 +1,7 @@
VERSION = 2 VERSION = 2
PATCHLEVEL = 6 PATCHLEVEL = 6
SUBLEVEL = 35 SUBLEVEL = 35
EXTRAVERSION = -rc5 EXTRAVERSION =
NAME = Sheep on Meth NAME = Sheep on Meth
# *DOCUMENTATION* # *DOCUMENTATION*

View file

@ -71,6 +71,9 @@ targets := vmlinux vmlinux.lds \
piggy.$(suffix_y) piggy.$(suffix_y).o \ piggy.$(suffix_y) piggy.$(suffix_y).o \
font.o font.c head.o misc.o $(OBJS) font.o font.c head.o misc.o $(OBJS)
# Make sure files are removed during clean
extra-y += piggy.gzip piggy.lzo piggy.lzma lib1funcs.S
ifeq ($(CONFIG_FUNCTION_TRACER),y) ifeq ($(CONFIG_FUNCTION_TRACER),y)
ORIG_CFLAGS := $(KBUILD_CFLAGS) ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))

View file

@ -1028,13 +1028,12 @@ static int sa1111_remove(struct platform_device *pdev)
struct sa1111 *sachip = platform_get_drvdata(pdev); struct sa1111 *sachip = platform_get_drvdata(pdev);
if (sachip) { if (sachip) {
__sa1111_remove(sachip);
platform_set_drvdata(pdev, NULL);
#ifdef CONFIG_PM #ifdef CONFIG_PM
kfree(sachip->saved_state); kfree(sachip->saved_state);
sachip->saved_state = NULL; sachip->saved_state = NULL;
#endif #endif
__sa1111_remove(sachip);
platform_set_drvdata(pdev, NULL);
} }
return 0; return 0;

View file

@ -26,6 +26,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/system.h>
/* /*
* ISA I/O bus memory addresses are 1:1 with the physical address. * ISA I/O bus memory addresses are 1:1 with the physical address.
@ -179,25 +180,38 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
* IO port primitives for more information. * IO port primitives for more information.
*/ */
#ifdef __mem_pci #ifdef __mem_pci
#define readb(c) ({ __u8 __v = __raw_readb(__mem_pci(c)); __v; }) #define readb_relaxed(c) ({ u8 __v = __raw_readb(__mem_pci(c)); __v; })
#define readw(c) ({ __u16 __v = le16_to_cpu((__force __le16) \ #define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16) \
__raw_readw(__mem_pci(c))); __v; }) __raw_readw(__mem_pci(c))); __v; })
#define readl(c) ({ __u32 __v = le32_to_cpu((__force __le32) \ #define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32) \
__raw_readl(__mem_pci(c))); __v; }) __raw_readl(__mem_pci(c))); __v; })
#define readb_relaxed(addr) readb(addr)
#define readw_relaxed(addr) readw(addr) #define writeb_relaxed(v,c) ((void)__raw_writeb(v,__mem_pci(c)))
#define readl_relaxed(addr) readl(addr) #define writew_relaxed(v,c) ((void)__raw_writew((__force u16) \
cpu_to_le16(v),__mem_pci(c)))
#define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \
cpu_to_le32(v),__mem_pci(c)))
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
#define __iormb() rmb()
#define __iowmb() wmb()
#else
#define __iormb() do { } while (0)
#define __iowmb() do { } while (0)
#endif
#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); })
#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); })
#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
#define readsb(p,d,l) __raw_readsb(__mem_pci(p),d,l) #define readsb(p,d,l) __raw_readsb(__mem_pci(p),d,l)
#define readsw(p,d,l) __raw_readsw(__mem_pci(p),d,l) #define readsw(p,d,l) __raw_readsw(__mem_pci(p),d,l)
#define readsl(p,d,l) __raw_readsl(__mem_pci(p),d,l) #define readsl(p,d,l) __raw_readsl(__mem_pci(p),d,l)
#define writeb(v,c) __raw_writeb(v,__mem_pci(c))
#define writew(v,c) __raw_writew((__force __u16) \
cpu_to_le16(v),__mem_pci(c))
#define writel(v,c) __raw_writel((__force __u32) \
cpu_to_le32(v),__mem_pci(c))
#define writesb(p,d,l) __raw_writesb(__mem_pci(p),d,l) #define writesb(p,d,l) __raw_writesb(__mem_pci(p),d,l)
#define writesw(p,d,l) __raw_writesw(__mem_pci(p),d,l) #define writesw(p,d,l) __raw_writesw(__mem_pci(p),d,l)
#define writesl(p,d,l) __raw_writesl(__mem_pci(p),d,l) #define writesl(p,d,l) __raw_writesl(__mem_pci(p),d,l)
@ -244,13 +258,13 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
* io{read,write}{8,16,32} macros * io{read,write}{8,16,32} macros
*/ */
#ifndef ioread8 #ifndef ioread8
#define ioread8(p) ({ unsigned int __v = __raw_readb(p); __v; }) #define ioread8(p) ({ unsigned int __v = __raw_readb(p); __iormb(); __v; })
#define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __v; }) #define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __iormb(); __v; })
#define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __v; }) #define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __iormb(); __v; })
#define iowrite8(v,p) __raw_writeb(v, p) #define iowrite8(v,p) ({ __iowmb(); (void)__raw_writeb(v, p); })
#define iowrite16(v,p) __raw_writew((__force __u16)cpu_to_le16(v), p) #define iowrite16(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_le16(v), p); })
#define iowrite32(v,p) __raw_writel((__force __u32)cpu_to_le32(v), p) #define iowrite32(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_le32(v), p); })
#define ioread8_rep(p,d,c) __raw_readsb(p,d,c) #define ioread8_rep(p,d,c) __raw_readsb(p,d,c)
#define ioread16_rep(p,d,c) __raw_readsw(p,d,c) #define ioread16_rep(p,d,c) __raw_readsw(p,d,c)

View file

@ -71,7 +71,7 @@
.pushsection .fixup,"ax" .pushsection .fixup,"ax"
.align 4 .align 4
9001: mov r4, #-EFAULT 9001: mov r4, #-EFAULT
ldr r5, [fp, #4] @ *err_ptr ldr r5, [sp, #8*4] @ *err_ptr
str r4, [r5] str r4, [r5]
ldmia sp, {r1, r2} @ retrieve dst, len ldmia sp, {r1, r2} @ retrieve dst, len
add r2, r2, r1 add r2, r2, r1

View file

@ -11,6 +11,7 @@
* *
*/ */
#include <mach/hardware.h>
#include <asm/hardware/clps7111.h> #include <asm/hardware/clps7111.h>
.macro addruart, rx, tmp .macro addruart, rx, tmp

View file

@ -25,6 +25,7 @@
#include <linux/mtd/partitions.h> #include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h> #include <linux/mtd/physmap.h>
#include <linux/regulator/machine.h> #include <linux/regulator/machine.h>
#include <linux/regulator/tps6507x.h>
#include <linux/mfd/tps6507x.h> #include <linux/mfd/tps6507x.h>
#include <linux/input/tps6507x-ts.h> #include <linux/input/tps6507x-ts.h>
@ -469,6 +470,11 @@ struct regulator_consumer_supply tps65070_ldo2_consumers[] = {
}, },
}; };
/* We take advantage of the fact that both defdcdc{2,3} are tied high */
static struct tps6507x_reg_platform_data tps6507x_platform_data = {
.defdcdc_default = true,
};
struct regulator_init_data tps65070_regulator_data[] = { struct regulator_init_data tps65070_regulator_data[] = {
/* dcdc1 */ /* dcdc1 */
{ {
@ -494,6 +500,7 @@ struct regulator_init_data tps65070_regulator_data[] = {
}, },
.num_consumer_supplies = ARRAY_SIZE(tps65070_dcdc2_consumers), .num_consumer_supplies = ARRAY_SIZE(tps65070_dcdc2_consumers),
.consumer_supplies = tps65070_dcdc2_consumers, .consumer_supplies = tps65070_dcdc2_consumers,
.driver_data = &tps6507x_platform_data,
}, },
/* dcdc3 */ /* dcdc3 */
@ -507,6 +514,7 @@ struct regulator_init_data tps65070_regulator_data[] = {
}, },
.num_consumer_supplies = ARRAY_SIZE(tps65070_dcdc3_consumers), .num_consumer_supplies = ARRAY_SIZE(tps65070_dcdc3_consumers),
.consumer_supplies = tps65070_dcdc3_consumers, .consumer_supplies = tps65070_dcdc3_consumers,
.driver_data = &tps6507x_platform_data,
}, },
/* ldo1 */ /* ldo1 */

View file

@ -232,7 +232,7 @@ EXPORT_SYMBOL(__bus_to_virt);
unsigned long __pfn_to_bus(unsigned long pfn) unsigned long __pfn_to_bus(unsigned long pfn)
{ {
return __pfn_to_phys(pfn) + (fb_bus_sdram_offset() - PHYS_OFFSET)); return __pfn_to_phys(pfn) + (fb_bus_sdram_offset() - PHYS_OFFSET);
} }
EXPORT_SYMBOL(__pfn_to_bus); EXPORT_SYMBOL(__pfn_to_bus);

View file

@ -11,8 +11,10 @@
* *
*/ */
.equ io_virt, IO_BASE #include <mach/hardware.h>
.equ io_phys, IO_START
.equ io_virt, IO_VIRT
.equ io_phys, IO_PHYS
.macro addruart, rx, tmp .macro addruart, rx, tmp
mrc p15, 0, \rx, c1, c0 mrc p15, 0, \rx, c1, c0

View file

@ -77,7 +77,7 @@ struct spi_board_info __initdata qnap_tsx1x_spi_slave_info[] = {
}, },
}; };
void qnap_tsx1x_register_flash(void) void __init qnap_tsx1x_register_flash(void)
{ {
spi_register_board_info(qnap_tsx1x_spi_slave_info, spi_register_board_info(qnap_tsx1x_spi_slave_info,
ARRAY_SIZE(qnap_tsx1x_spi_slave_info)); ARRAY_SIZE(qnap_tsx1x_spi_slave_info));

View file

@ -1,7 +1,7 @@
#ifndef __ARCH_KIRKWOOD_TSX1X_COMMON_H #ifndef __ARCH_KIRKWOOD_TSX1X_COMMON_H
#define __ARCH_KIRKWOOD_TSX1X_COMMON_H #define __ARCH_KIRKWOOD_TSX1X_COMMON_H
extern void qnap_tsx1x_register_flash(void); extern void __init qnap_tsx1x_register_flash(void);
extern void qnap_tsx1x_power_off(void); extern void qnap_tsx1x_power_off(void);
#endif #endif

View file

@ -8,6 +8,7 @@
* the Free Software Foundation. * the Free Software Foundation.
*/ */
#include <mach/hardware.h> #include <mach/hardware.h>
#include <asm/memory.h>
#include <mach/regs-board-a9m9750dev.h> #include <mach/regs-board-a9m9750dev.h>

View file

@ -20,50 +20,49 @@ static void putc_dummy(char c, void __iomem *base)
/* nothing */ /* nothing */
} }
static int timeout;
static void putc_ns9360(char c, void __iomem *base) static void putc_ns9360(char c, void __iomem *base)
{ {
static int t = 0x10000;
do { do {
if (t) if (timeout)
--t; --timeout;
if (__raw_readl(base + 8) & (1 << 3)) { if (__raw_readl(base + 8) & (1 << 3)) {
__raw_writeb(c, base + 16); __raw_writeb(c, base + 16);
t = 0x10000; timeout = 0x10000;
break; break;
} }
} while (t); } while (timeout);
} }
static void putc_a9m9750dev(char c, void __iomem *base) static void putc_a9m9750dev(char c, void __iomem *base)
{ {
static int t = 0x10000;
do { do {
if (t) if (timeout)
--t; --timeout;
if (__raw_readb(base + 5) & (1 << 5)) { if (__raw_readb(base + 5) & (1 << 5)) {
__raw_writeb(c, base); __raw_writeb(c, base);
t = 0x10000; timeout = 0x10000;
break; break;
} }
} while (t); } while (timeout);
} }
static void putc_ns921x(char c, void __iomem *base) static void putc_ns921x(char c, void __iomem *base)
{ {
static int t = 0x10000;
do { do {
if (t) if (timeout)
--t; --timeout;
if (!(__raw_readl(base) & (1 << 11))) { if (!(__raw_readl(base) & (1 << 11))) {
__raw_writeb(c, base + 0x0028); __raw_writeb(c, base + 0x0028);
t = 0x10000; timeout = 0x10000;
break; break;
} }
} while (t); } while (timeout);
} }
#define MSCS __REG(0xA0900184) #define MSCS __REG(0xA0900184)
@ -89,6 +88,7 @@ static void putc_ns921x(char c, void __iomem *base)
static void autodetect(void (**putc)(char, void __iomem *), void __iomem **base) static void autodetect(void (**putc)(char, void __iomem *), void __iomem **base)
{ {
timeout = 0x10000;
if (((__raw_readl(MSCS) >> 16) & 0xfe) == 0x00) { if (((__raw_readl(MSCS) >> 16) & 0xfe) == 0x00) {
/* ns9360 or ns9750 */ /* ns9360 or ns9750 */
if (NS9360_UART_ENABLED(NS9360_UARTA)) { if (NS9360_UART_ENABLED(NS9360_UARTA)) {

View file

@ -175,6 +175,10 @@ static void __init rx51_add_gpio_keys(void)
#endif /* CONFIG_KEYBOARD_GPIO || CONFIG_KEYBOARD_GPIO_MODULE */ #endif /* CONFIG_KEYBOARD_GPIO || CONFIG_KEYBOARD_GPIO_MODULE */
static int board_keymap[] = { static int board_keymap[] = {
/*
* Note that KEY(x, 8, KEY_XXX) entries represent "entrire row
* connected to the ground" matrix state.
*/
KEY(0, 0, KEY_Q), KEY(0, 0, KEY_Q),
KEY(0, 1, KEY_O), KEY(0, 1, KEY_O),
KEY(0, 2, KEY_P), KEY(0, 2, KEY_P),
@ -182,6 +186,7 @@ static int board_keymap[] = {
KEY(0, 4, KEY_BACKSPACE), KEY(0, 4, KEY_BACKSPACE),
KEY(0, 6, KEY_A), KEY(0, 6, KEY_A),
KEY(0, 7, KEY_S), KEY(0, 7, KEY_S),
KEY(1, 0, KEY_W), KEY(1, 0, KEY_W),
KEY(1, 1, KEY_D), KEY(1, 1, KEY_D),
KEY(1, 2, KEY_F), KEY(1, 2, KEY_F),
@ -190,6 +195,7 @@ static int board_keymap[] = {
KEY(1, 5, KEY_J), KEY(1, 5, KEY_J),
KEY(1, 6, KEY_K), KEY(1, 6, KEY_K),
KEY(1, 7, KEY_L), KEY(1, 7, KEY_L),
KEY(2, 0, KEY_E), KEY(2, 0, KEY_E),
KEY(2, 1, KEY_DOT), KEY(2, 1, KEY_DOT),
KEY(2, 2, KEY_UP), KEY(2, 2, KEY_UP),
@ -197,6 +203,8 @@ static int board_keymap[] = {
KEY(2, 5, KEY_Z), KEY(2, 5, KEY_Z),
KEY(2, 6, KEY_X), KEY(2, 6, KEY_X),
KEY(2, 7, KEY_C), KEY(2, 7, KEY_C),
KEY(2, 8, KEY_F9),
KEY(3, 0, KEY_R), KEY(3, 0, KEY_R),
KEY(3, 1, KEY_V), KEY(3, 1, KEY_V),
KEY(3, 2, KEY_B), KEY(3, 2, KEY_B),
@ -205,20 +213,23 @@ static int board_keymap[] = {
KEY(3, 5, KEY_SPACE), KEY(3, 5, KEY_SPACE),
KEY(3, 6, KEY_SPACE), KEY(3, 6, KEY_SPACE),
KEY(3, 7, KEY_LEFT), KEY(3, 7, KEY_LEFT),
KEY(4, 0, KEY_T), KEY(4, 0, KEY_T),
KEY(4, 1, KEY_DOWN), KEY(4, 1, KEY_DOWN),
KEY(4, 2, KEY_RIGHT), KEY(4, 2, KEY_RIGHT),
KEY(4, 4, KEY_LEFTCTRL), KEY(4, 4, KEY_LEFTCTRL),
KEY(4, 5, KEY_RIGHTALT), KEY(4, 5, KEY_RIGHTALT),
KEY(4, 6, KEY_LEFTSHIFT), KEY(4, 6, KEY_LEFTSHIFT),
KEY(4, 8, KEY_F10),
KEY(5, 0, KEY_Y), KEY(5, 0, KEY_Y),
KEY(5, 8, KEY_F11),
KEY(6, 0, KEY_U), KEY(6, 0, KEY_U),
KEY(7, 0, KEY_I), KEY(7, 0, KEY_I),
KEY(7, 1, KEY_F7), KEY(7, 1, KEY_F7),
KEY(7, 2, KEY_F8), KEY(7, 2, KEY_F8),
KEY(0xff, 2, KEY_F9),
KEY(0xff, 4, KEY_F10),
KEY(0xff, 5, KEY_F11),
}; };
static struct matrix_keymap_data board_map_data = { static struct matrix_keymap_data board_map_data = {

View file

@ -26,6 +26,7 @@
#include <mach/colibri.h> #include <mach/colibri.h>
#include <mach/ohci.h> #include <mach/ohci.h>
#include <mach/pxafb.h> #include <mach/pxafb.h>
#include <mach/audio.h>
#include "generic.h" #include "generic.h"
#include "devices.h" #include "devices.h"
@ -145,7 +146,7 @@ static void __init colibri_pxa300_init_lcd(void)
static inline void colibri_pxa300_init_lcd(void) {} static inline void colibri_pxa300_init_lcd(void) {}
#endif /* CONFIG_FB_PXA || CONFIG_FB_PXA_MODULE */ #endif /* CONFIG_FB_PXA || CONFIG_FB_PXA_MODULE */
#if defined(SND_AC97_CODEC) || defined(SND_AC97_CODEC_MODULE) #if defined(CONFIG_SND_AC97_CODEC) || defined(CONFIG_SND_AC97_CODEC_MODULE)
static mfp_cfg_t colibri_pxa310_ac97_pin_config[] __initdata = { static mfp_cfg_t colibri_pxa310_ac97_pin_config[] __initdata = {
GPIO24_AC97_SYSCLK, GPIO24_AC97_SYSCLK,
GPIO23_AC97_nACRESET, GPIO23_AC97_nACRESET,

View file

@ -446,7 +446,7 @@ static struct platform_device corgiled_device = {
static struct pxamci_platform_data corgi_mci_platform_data = { static struct pxamci_platform_data corgi_mci_platform_data = {
.detect_delay_ms = 250, .detect_delay_ms = 250,
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.gpio_card_detect = -1, .gpio_card_detect = CORGI_GPIO_nSD_DETECT,
.gpio_card_ro = CORGI_GPIO_nSD_WP, .gpio_card_ro = CORGI_GPIO_nSD_WP,
.gpio_power = CORGI_GPIO_SD_PWR, .gpio_power = CORGI_GPIO_SD_PWR,
}; };

View file

@ -256,13 +256,9 @@ static void init_sdram_rows(void)
static u32 mdrefr_dri(unsigned int freq) static u32 mdrefr_dri(unsigned int freq)
{ {
u32 dri = 0; u32 interval = freq * SDRAM_TREF / sdram_rows;
if (cpu_is_pxa25x()) return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32;
dri = ((freq * SDRAM_TREF) / (sdram_rows * 32));
if (cpu_is_pxa27x())
dri = ((freq * SDRAM_TREF) / (sdram_rows - 31)) / 32;
return dri;
} }
/* find a valid frequency point */ /* find a valid frequency point */

View file

@ -41,10 +41,10 @@ void pxa27x_clear_otgph(void)
EXPORT_SYMBOL(pxa27x_clear_otgph); EXPORT_SYMBOL(pxa27x_clear_otgph);
static unsigned long ac97_reset_config[] = { static unsigned long ac97_reset_config[] = {
GPIO95_AC97_nRESET,
GPIO95_GPIO,
GPIO113_AC97_nRESET,
GPIO113_GPIO, GPIO113_GPIO,
GPIO113_AC97_nRESET,
GPIO95_GPIO,
GPIO95_AC97_nRESET,
}; };
void pxa27x_assert_ac97reset(int reset_gpio, int on) void pxa27x_assert_ac97reset(int reset_gpio, int on)

View file

@ -237,7 +237,7 @@ static unsigned int realview_mmc_status(struct device *dev)
else else
mask = 2; mask = 2;
return !(readl(REALVIEW_SYSMCI) & mask); return readl(REALVIEW_SYSMCI) & mask;
} }
struct mmci_platform_data realview_mmc0_plat_data = { struct mmci_platform_data realview_mmc0_plat_data = {

View file

@ -20,6 +20,9 @@
strb \rd, [\rx] strb \rd, [\rx]
.endm .endm
.macro waituart,rd,rx
.endm
.macro busyuart,rd,rx .macro busyuart,rd,rx
mov \rd, #0 mov \rd, #0
1001: add \rd, \rd, #1 1001: add \rd, \rd, #1

View file

@ -30,22 +30,22 @@
static void putc(const char c) static void putc(const char c)
{ {
/* Do nothing if the UART is not enabled. */ /* Do nothing if the UART is not enabled. */
if (!(readb(U8500_UART_CR) & 0x1)) if (!(__raw_readb(U8500_UART_CR) & 0x1))
return; return;
if (c == '\n') if (c == '\n')
putc('\r'); putc('\r');
while (readb(U8500_UART_FR) & (1 << 5)) while (__raw_readb(U8500_UART_FR) & (1 << 5))
barrier(); barrier();
writeb(c, U8500_UART_DR); __raw_writeb(c, U8500_UART_DR);
} }
static void flush(void) static void flush(void)
{ {
if (!(readb(U8500_UART_CR) & 0x1)) if (!(__raw_readb(U8500_UART_CR) & 0x1))
return; return;
while (readb(U8500_UART_FR) & (1 << 3)) while (__raw_readb(U8500_UART_FR) & (1 << 3))
barrier(); barrier();
} }

View file

@ -241,7 +241,7 @@ static struct platform_device v2m_flash_device = {
static unsigned int v2m_mmci_status(struct device *dev) static unsigned int v2m_mmci_status(struct device *dev)
{ {
return !(readl(MMIO_P2V(V2M_SYS_MCI)) & (1 << 0)); return readl(MMIO_P2V(V2M_SYS_MCI)) & (1 << 0);
} }
static struct mmci_platform_data v2m_mmci_data = { static struct mmci_platform_data v2m_mmci_data = {

View file

@ -93,7 +93,7 @@ static struct clk_lookup nuc900_clkregs[] = {
DEF_CLKLOOK(&clk_kpi, "nuc900-kpi", NULL), DEF_CLKLOOK(&clk_kpi, "nuc900-kpi", NULL),
DEF_CLKLOOK(&clk_wdt, "nuc900-wdt", NULL), DEF_CLKLOOK(&clk_wdt, "nuc900-wdt", NULL),
DEF_CLKLOOK(&clk_gdma, "nuc900-gdma", NULL), DEF_CLKLOOK(&clk_gdma, "nuc900-gdma", NULL),
DEF_CLKLOOK(&clk_adc, "nuc900-adc", NULL), DEF_CLKLOOK(&clk_adc, "nuc900-ts", NULL),
DEF_CLKLOOK(&clk_usi, "nuc900-spi", NULL), DEF_CLKLOOK(&clk_usi, "nuc900-spi", NULL),
DEF_CLKLOOK(&clk_ext, NULL, "ext"), DEF_CLKLOOK(&clk_ext, NULL, "ext"),
DEF_CLKLOOK(&clk_timer0, NULL, "timer0"), DEF_CLKLOOK(&clk_timer0, NULL, "timer0"),

View file

@ -32,14 +32,14 @@ static uint32_t l2x0_way_mask; /* Bitmask of active ways */
static inline void cache_wait(void __iomem *reg, unsigned long mask) static inline void cache_wait(void __iomem *reg, unsigned long mask)
{ {
/* wait for the operation to complete */ /* wait for the operation to complete */
while (readl(reg) & mask) while (readl_relaxed(reg) & mask)
; ;
} }
static inline void cache_sync(void) static inline void cache_sync(void)
{ {
void __iomem *base = l2x0_base; void __iomem *base = l2x0_base;
writel(0, base + L2X0_CACHE_SYNC); writel_relaxed(0, base + L2X0_CACHE_SYNC);
cache_wait(base + L2X0_CACHE_SYNC, 1); cache_wait(base + L2X0_CACHE_SYNC, 1);
} }
@ -47,14 +47,14 @@ static inline void l2x0_clean_line(unsigned long addr)
{ {
void __iomem *base = l2x0_base; void __iomem *base = l2x0_base;
cache_wait(base + L2X0_CLEAN_LINE_PA, 1); cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
writel(addr, base + L2X0_CLEAN_LINE_PA); writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
} }
static inline void l2x0_inv_line(unsigned long addr) static inline void l2x0_inv_line(unsigned long addr)
{ {
void __iomem *base = l2x0_base; void __iomem *base = l2x0_base;
cache_wait(base + L2X0_INV_LINE_PA, 1); cache_wait(base + L2X0_INV_LINE_PA, 1);
writel(addr, base + L2X0_INV_LINE_PA); writel_relaxed(addr, base + L2X0_INV_LINE_PA);
} }
#ifdef CONFIG_PL310_ERRATA_588369 #ifdef CONFIG_PL310_ERRATA_588369
@ -75,9 +75,9 @@ static inline void l2x0_flush_line(unsigned long addr)
/* Clean by PA followed by Invalidate by PA */ /* Clean by PA followed by Invalidate by PA */
cache_wait(base + L2X0_CLEAN_LINE_PA, 1); cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
writel(addr, base + L2X0_CLEAN_LINE_PA); writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
cache_wait(base + L2X0_INV_LINE_PA, 1); cache_wait(base + L2X0_INV_LINE_PA, 1);
writel(addr, base + L2X0_INV_LINE_PA); writel_relaxed(addr, base + L2X0_INV_LINE_PA);
} }
#else #else
@ -90,7 +90,7 @@ static inline void l2x0_flush_line(unsigned long addr)
{ {
void __iomem *base = l2x0_base; void __iomem *base = l2x0_base;
cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
writel(addr, base + L2X0_CLEAN_INV_LINE_PA); writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
} }
#endif #endif
@ -109,7 +109,7 @@ static inline void l2x0_inv_all(void)
/* invalidate all ways */ /* invalidate all ways */
spin_lock_irqsave(&l2x0_lock, flags); spin_lock_irqsave(&l2x0_lock, flags);
writel(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
cache_sync(); cache_sync();
spin_unlock_irqrestore(&l2x0_lock, flags); spin_unlock_irqrestore(&l2x0_lock, flags);
@ -215,8 +215,8 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
l2x0_base = base; l2x0_base = base;
cache_id = readl(l2x0_base + L2X0_CACHE_ID); cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
aux = readl(l2x0_base + L2X0_AUX_CTRL); aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
aux &= aux_mask; aux &= aux_mask;
aux |= aux_val; aux |= aux_val;
@ -248,15 +248,15 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
* If you are booting from non-secure mode * If you are booting from non-secure mode
* accessing the below registers will fault. * accessing the below registers will fault.
*/ */
if (!(readl(l2x0_base + L2X0_CTRL) & 1)) { if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
/* l2x0 controller is disabled */ /* l2x0 controller is disabled */
writel(aux, l2x0_base + L2X0_AUX_CTRL); writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
l2x0_inv_all(); l2x0_inv_all();
/* enable L2X0 */ /* enable L2X0 */
writel(1, l2x0_base + L2X0_CTRL); writel_relaxed(1, l2x0_base + L2X0_CTRL);
} }
outer_cache.inv_range = l2x0_inv_range; outer_cache.inv_range = l2x0_inv_range;

View file

@ -163,19 +163,22 @@ static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
{ {
unsigned int idx, cpu = smp_processor_id(); unsigned int idx, cpu;
int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); int *depth;
unsigned long vaddr, flags; unsigned long vaddr, flags;
pte_t pte, *ptep; pte_t pte, *ptep;
if (!in_interrupt())
preempt_disable();
cpu = smp_processor_id();
depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
idx = KM_L1_CACHE + KM_TYPE_NR * cpu; idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
ptep = TOP_PTE(vaddr); ptep = TOP_PTE(vaddr);
pte = mk_pte(page, kmap_prot); pte = mk_pte(page, kmap_prot);
if (!in_interrupt())
preempt_disable();
raw_local_irq_save(flags); raw_local_irq_save(flags);
(*depth)++; (*depth)++;
if (pte_val(*ptep) == pte_val(pte)) { if (pte_val(*ptep) == pte_val(pte)) {

View file

@ -17,8 +17,8 @@
.macro addruart, rx .macro addruart, rx
mrc p15, 0, \rx, c1, c0 mrc p15, 0, \rx, c1, c0
tst \rx, #1 @ MMU enabled? tst \rx, #1 @ MMU enabled?
moveq \rx, =SPEAR_DBG_UART_BASE @ Physical base moveq \rx, #SPEAR_DBG_UART_BASE @ Physical base
movne \rx, =VA_SPEAR_DBG_UART_BASE @ Virtual base movne \rx, #VA_SPEAR_DBG_UART_BASE @ Virtual base
.endm .endm
.macro senduart, rd, rx .macro senduart, rd, rx

View file

@ -54,6 +54,9 @@
#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
#define TIOCGRS485 0x542E
#define TIOCSRS485 0x542F
#define FIONCLEX 0x5450 #define FIONCLEX 0x5450
#define FIOCLEX 0x5451 #define FIOCLEX 0x5451
#define FIOASYNC 0x5452 #define FIOASYNC 0x5452

View file

@ -5,6 +5,7 @@
#define __ASM_ARCH_BOARD_H #define __ASM_ARCH_BOARD_H
#include <linux/types.h> #include <linux/types.h>
#include <linux/serial.h>
#define GPIO_PIN_NONE (-1) #define GPIO_PIN_NONE (-1)
@ -35,6 +36,7 @@ struct atmel_uart_data {
short use_dma_tx; /* use transmit DMA? */ short use_dma_tx; /* use transmit DMA? */
short use_dma_rx; /* use receive DMA? */ short use_dma_rx; /* use receive DMA? */
void __iomem *regs; /* virtual base address, if any */ void __iomem *regs; /* virtual base address, if any */
struct serial_rs485 rs485; /* rs485 settings */
}; };
void at32_map_usart(unsigned int hw_id, unsigned int line, int flags); void at32_map_usart(unsigned int hw_id, unsigned int line, int flags);
struct platform_device *at32_add_device_usart(unsigned int id); struct platform_device *at32_add_device_usart(unsigned int id);

View file

@ -435,20 +435,21 @@ static struct platform_device *au1xxx_platform_devices[] __initdata = {
static int __init au1xxx_platform_init(void) static int __init au1xxx_platform_init(void)
{ {
unsigned int uartclk = get_au1x00_uart_baud_base() * 16; unsigned int uartclk = get_au1x00_uart_baud_base() * 16;
int i; int err, i;
/* Fill up uartclk. */ /* Fill up uartclk. */
for (i = 0; au1x00_uart_data[i].flags; i++) for (i = 0; au1x00_uart_data[i].flags; i++)
au1x00_uart_data[i].uartclk = uartclk; au1x00_uart_data[i].uartclk = uartclk;
err = platform_add_devices(au1xxx_platform_devices,
ARRAY_SIZE(au1xxx_platform_devices));
#ifndef CONFIG_SOC_AU1100 #ifndef CONFIG_SOC_AU1100
/* Register second MAC if enabled in pinfunc */ /* Register second MAC if enabled in pinfunc */
if (!(au_readl(SYS_PINFUNC) & (u32)SYS_PF_NI2)) if (!err && !(au_readl(SYS_PINFUNC) & (u32)SYS_PF_NI2))
platform_device_register(&au1xxx_eth1_device); platform_device_register(&au1xxx_eth1_device);
#endif #endif
return platform_add_devices(au1xxx_platform_devices, return err;
ARRAY_SIZE(au1xxx_platform_devices));
} }
arch_initcall(au1xxx_platform_init); arch_initcall(au1xxx_platform_init);

View file

@ -67,8 +67,6 @@ static void mtx1_power_off(void)
void __init board_setup(void) void __init board_setup(void)
{ {
alchemy_gpio2_enable();
#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
/* Enable USB power switch */ /* Enable USB power switch */
alchemy_gpio_direction_output(204, 0); alchemy_gpio_direction_output(204, 0);
@ -117,11 +115,11 @@ mtx1_pci_idsel(unsigned int devsel, int assert)
if (assert && devsel != 0) if (assert && devsel != 0)
/* Suppress signal to Cardbus */ /* Suppress signal to Cardbus */
gpio_set_value(1, 0); /* set EXT_IO3 OFF */ alchemy_gpio_set_value(1, 0); /* set EXT_IO3 OFF */
else else
gpio_set_value(1, 1); /* set EXT_IO3 ON */ alchemy_gpio_set_value(1, 1); /* set EXT_IO3 ON */
au_sync_udelay(1); udelay(1);
return 1; return 1;
} }

View file

@ -104,6 +104,9 @@ int __init bcm63xx_enet_register(int unit,
if (unit > 1) if (unit > 1)
return -ENODEV; return -ENODEV;
if (unit == 1 && BCMCPU_IS_6338())
return -ENODEV;
if (!shared_device_registered) { if (!shared_device_registered) {
shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA); shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA);
shared_res[0].end = shared_res[0].start; shared_res[0].end = shared_res[0].start;

View file

@ -434,7 +434,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set mips3 \n"
"1: lld %0, %1 # atomic64_add \n" "1: lld %0, %1 # atomic64_add \n"
" addu %0, %2 \n" " daddu %0, %2 \n"
" scd %0, %1 \n" " scd %0, %1 \n"
" beqzl %0, 1b \n" " beqzl %0, 1b \n"
" .set mips0 \n" " .set mips0 \n"
@ -446,7 +446,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set mips3 \n"
"1: lld %0, %1 # atomic64_add \n" "1: lld %0, %1 # atomic64_add \n"
" addu %0, %2 \n" " daddu %0, %2 \n"
" scd %0, %1 \n" " scd %0, %1 \n"
" beqz %0, 2f \n" " beqz %0, 2f \n"
" .subsection 2 \n" " .subsection 2 \n"
@ -479,7 +479,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set mips3 \n"
"1: lld %0, %1 # atomic64_sub \n" "1: lld %0, %1 # atomic64_sub \n"
" subu %0, %2 \n" " dsubu %0, %2 \n"
" scd %0, %1 \n" " scd %0, %1 \n"
" beqzl %0, 1b \n" " beqzl %0, 1b \n"
" .set mips0 \n" " .set mips0 \n"
@ -491,7 +491,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set mips3 \n"
"1: lld %0, %1 # atomic64_sub \n" "1: lld %0, %1 # atomic64_sub \n"
" subu %0, %2 \n" " dsubu %0, %2 \n"
" scd %0, %1 \n" " scd %0, %1 \n"
" beqz %0, 2f \n" " beqz %0, 2f \n"
" .subsection 2 \n" " .subsection 2 \n"
@ -524,10 +524,10 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set mips3 \n"
"1: lld %1, %2 # atomic64_add_return \n" "1: lld %1, %2 # atomic64_add_return \n"
" addu %0, %1, %3 \n" " daddu %0, %1, %3 \n"
" scd %0, %2 \n" " scd %0, %2 \n"
" beqzl %0, 1b \n" " beqzl %0, 1b \n"
" addu %0, %1, %3 \n" " daddu %0, %1, %3 \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter) : "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter) : "Ir" (i), "m" (v->counter)
@ -538,10 +538,10 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set mips3 \n"
"1: lld %1, %2 # atomic64_add_return \n" "1: lld %1, %2 # atomic64_add_return \n"
" addu %0, %1, %3 \n" " daddu %0, %1, %3 \n"
" scd %0, %2 \n" " scd %0, %2 \n"
" beqz %0, 2f \n" " beqz %0, 2f \n"
" addu %0, %1, %3 \n" " daddu %0, %1, %3 \n"
" .subsection 2 \n" " .subsection 2 \n"
"2: b 1b \n" "2: b 1b \n"
" .previous \n" " .previous \n"
@ -576,10 +576,10 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set mips3 \n"
"1: lld %1, %2 # atomic64_sub_return \n" "1: lld %1, %2 # atomic64_sub_return \n"
" subu %0, %1, %3 \n" " dsubu %0, %1, %3 \n"
" scd %0, %2 \n" " scd %0, %2 \n"
" beqzl %0, 1b \n" " beqzl %0, 1b \n"
" subu %0, %1, %3 \n" " dsubu %0, %1, %3 \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter) : "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter) : "Ir" (i), "m" (v->counter)
@ -590,10 +590,10 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set mips3 \n"
"1: lld %1, %2 # atomic64_sub_return \n" "1: lld %1, %2 # atomic64_sub_return \n"
" subu %0, %1, %3 \n" " dsubu %0, %1, %3 \n"
" scd %0, %2 \n" " scd %0, %2 \n"
" beqz %0, 2f \n" " beqz %0, 2f \n"
" subu %0, %1, %3 \n" " dsubu %0, %1, %3 \n"
" .subsection 2 \n" " .subsection 2 \n"
"2: b 1b \n" "2: b 1b \n"
" .previous \n" " .previous \n"

View file

@ -984,16 +984,17 @@
#define __NR_perf_event_open (__NR_Linux + 296) #define __NR_perf_event_open (__NR_Linux + 296)
#define __NR_accept4 (__NR_Linux + 297) #define __NR_accept4 (__NR_Linux + 297)
#define __NR_recvmmsg (__NR_Linux + 298) #define __NR_recvmmsg (__NR_Linux + 298)
#define __NR_getdents64 (__NR_Linux + 299)
/* /*
* Offset of the last N32 flavoured syscall * Offset of the last N32 flavoured syscall
*/ */
#define __NR_Linux_syscalls 298 #define __NR_Linux_syscalls 299
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000 #define __NR_N32_Linux 6000
#define __NR_N32_Linux_syscalls 298 #define __NR_N32_Linux_syscalls 299
#ifdef __KERNEL__ #ifdef __KERNEL__

View file

@ -419,4 +419,5 @@ EXPORT(sysn32_call_table)
PTR sys_perf_event_open PTR sys_perf_event_open
PTR sys_accept4 PTR sys_accept4
PTR compat_sys_recvmmsg PTR compat_sys_recvmmsg
PTR sys_getdents
.size sysn32_call_table,.-sysn32_call_table .size sysn32_call_table,.-sysn32_call_table

View file

@ -61,11 +61,9 @@ static int __init init_vdso(void)
vunmap(vdso); vunmap(vdso);
pr_notice("init_vdso successfull\n");
return 0; return 0;
} }
device_initcall(init_vdso); subsys_initcall(init_vdso);
static unsigned long vdso_addr(unsigned long start) static unsigned long vdso_addr(unsigned long start)
{ {

View file

@ -247,6 +247,8 @@ void __init mips_pcibios_init(void)
iomem_resource.end &= 0xfffffffffULL; /* 64 GB */ iomem_resource.end &= 0xfffffffffULL; /* 64 GB */
ioport_resource.end = controller->io_resource->end; ioport_resource.end = controller->io_resource->end;
controller->io_map_base = mips_io_port_base;
register_pci_controller(controller); register_pci_controller(controller);
} }

View file

@ -44,6 +44,7 @@ extern struct pci_ops pnx8550_pci_ops;
static struct pci_controller pnx8550_controller = { static struct pci_controller pnx8550_controller = {
.pci_ops = &pnx8550_pci_ops, .pci_ops = &pnx8550_pci_ops,
.io_map_base = PNX8550_PORT_BASE,
.io_resource = &pci_io_resource, .io_resource = &pci_io_resource,
.mem_resource = &pci_mem_resource, .mem_resource = &pci_mem_resource,
}; };

View file

@ -113,7 +113,7 @@ void __init plat_mem_setup(void)
PNX8550_GLB2_ENAB_INTA_O = 0; PNX8550_GLB2_ENAB_INTA_O = 0;
/* IO/MEM resources. */ /* IO/MEM resources. */
set_io_port_base(KSEG1); set_io_port_base(PNX8550_PORT_BASE);
ioport_resource.start = 0; ioport_resource.start = 0;
ioport_resource.end = ~0; ioport_resource.end = ~0;
iomem_resource.start = 0; iomem_resource.start = 0;

View file

@ -944,6 +944,7 @@ static struct pci_controller msp_pci_controller = {
.pci_ops = &msp_pci_ops, .pci_ops = &msp_pci_ops,
.mem_resource = &pci_mem_resource, .mem_resource = &pci_mem_resource,
.mem_offset = 0, .mem_offset = 0,
.io_map_base = MSP_PCI_IOSPACE_BASE,
.io_resource = &pci_io_resource, .io_resource = &pci_io_resource,
.io_offset = 0 .io_offset = 0
}; };

View file

@ -54,6 +54,7 @@ static int __init pmc_yosemite_setup(void)
panic(ioremap_failed); panic(ioremap_failed);
set_io_port_base(io_v_base); set_io_port_base(io_v_base);
py_controller.io_map_base = io_v_base;
TITAN_WRITE(RM9000x2_OCD_LKM7, TITAN_READ(RM9000x2_OCD_LKM7) | 1); TITAN_WRITE(RM9000x2_OCD_LKM7, TITAN_READ(RM9000x2_OCD_LKM7) | 1);
ioport_resource.end = TITAN_IO_SIZE - 1; ioport_resource.end = TITAN_IO_SIZE - 1;

View file

@ -472,6 +472,9 @@ void __init configure_platform(void)
* it*/ * it*/
platform_features = FFS_CAPABLE | DISPLAY_CAPABLE; platform_features = FFS_CAPABLE | DISPLAY_CAPABLE;
/* Cronus and Cronus Lite have the same register map */
set_register_map(CRONUS_IO_BASE, &cronus_register_map);
/* ASIC version will determine if this is a real CronusLite or /* ASIC version will determine if this is a real CronusLite or
* Castrati(Cronus) */ * Castrati(Cronus) */
chipversion = asic_read(chipver3) << 24; chipversion = asic_read(chipver3) << 24;
@ -484,8 +487,6 @@ void __init configure_platform(void)
else else
asic = ASIC_CRONUSLITE; asic = ASIC_CRONUSLITE;
/* Cronus and Cronus Lite have the same register map */
set_register_map(CRONUS_IO_BASE, &cronus_register_map);
gp_resources = non_dvr_cronuslite_resources; gp_resources = non_dvr_cronuslite_resources;
pr_info("Platform: 4600 - %s, NON_DVR_CAPABLE, " pr_info("Platform: 4600 - %s, NON_DVR_CAPABLE, "
"chipversion=0x%08X\n", "chipversion=0x%08X\n",

View file

@ -8,9 +8,9 @@
* On FSL-BookE we setup a 1:1 mapping which covers the first 2GiB of memory * On FSL-BookE we setup a 1:1 mapping which covers the first 2GiB of memory
* and therefore we can only deal with memory within this range * and therefore we can only deal with memory within this range
*/ */
#define KEXEC_SOURCE_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL) #define KEXEC_SOURCE_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL - 1)
#define KEXEC_DESTINATION_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL) #define KEXEC_DESTINATION_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL - 1)
#define KEXEC_CONTROL_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL) #define KEXEC_CONTROL_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL - 1)
#else #else

View file

@ -250,7 +250,9 @@ extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
pte_t *ptep, unsigned long trap, int local, int ssize, pte_t *ptep, unsigned long trap, int local, int ssize,
unsigned int shift, unsigned int mmu_psize); unsigned int shift, unsigned int mmu_psize);
extern void hash_failure_debug(unsigned long ea, unsigned long access,
unsigned long vsid, unsigned long trap,
int ssize, int psize, unsigned long pte);
extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
unsigned long pstart, unsigned long prot, unsigned long pstart, unsigned long prot,
int psize, int ssize); int psize, int ssize);

View file

@ -566,9 +566,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
* Finally record data if requested. * Finally record data if requested.
*/ */
if (record) { if (record) {
struct perf_sample_data data = { struct perf_sample_data data;
.period = event->hw.last_period,
}; perf_sample_data_init(&data, 0);
if (perf_event_overflow(event, nmi, &data, regs)) { if (perf_event_overflow(event, nmi, &data, regs)) {
/* /*

View file

@ -414,7 +414,7 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
u64 base, size, memblock_size; u64 base, size, memblock_size;
unsigned int is_kexec_kdump = 0, rngs; unsigned int is_kexec_kdump = 0, rngs;
ls = of_get_flat_dt_prop(node, "ibm,memblock-size", &l); ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
if (ls == NULL || l < dt_root_size_cells * sizeof(__be32)) if (ls == NULL || l < dt_root_size_cells * sizeof(__be32))
return 0; return 0;
memblock_size = dt_mem_next_cell(dt_root_size_cells, &ls); memblock_size = dt_mem_next_cell(dt_root_size_cells, &ls);

View file

@ -68,9 +68,6 @@ _GLOBAL(__hash_page_4K)
std r8,STK_PARM(r8)(r1) std r8,STK_PARM(r8)(r1)
std r9,STK_PARM(r9)(r1) std r9,STK_PARM(r9)(r1)
/* Add _PAGE_PRESENT to access */
ori r4,r4,_PAGE_PRESENT
/* Save non-volatile registers. /* Save non-volatile registers.
* r31 will hold "old PTE" * r31 will hold "old PTE"
* r30 is "new PTE" * r30 is "new PTE"
@ -347,9 +344,6 @@ _GLOBAL(__hash_page_4K)
std r8,STK_PARM(r8)(r1) std r8,STK_PARM(r8)(r1)
std r9,STK_PARM(r9)(r1) std r9,STK_PARM(r9)(r1)
/* Add _PAGE_PRESENT to access */
ori r4,r4,_PAGE_PRESENT
/* Save non-volatile registers. /* Save non-volatile registers.
* r31 will hold "old PTE" * r31 will hold "old PTE"
* r30 is "new PTE" * r30 is "new PTE"
@ -687,9 +681,6 @@ _GLOBAL(__hash_page_64K)
std r8,STK_PARM(r8)(r1) std r8,STK_PARM(r8)(r1)
std r9,STK_PARM(r9)(r1) std r9,STK_PARM(r9)(r1)
/* Add _PAGE_PRESENT to access */
ori r4,r4,_PAGE_PRESENT
/* Save non-volatile registers. /* Save non-volatile registers.
* r31 will hold "old PTE" * r31 will hold "old PTE"
* r30 is "new PTE" * r30 is "new PTE"

View file

@ -871,6 +871,18 @@ static inline int subpage_protection(struct mm_struct *mm, unsigned long ea)
} }
#endif #endif
void hash_failure_debug(unsigned long ea, unsigned long access,
unsigned long vsid, unsigned long trap,
int ssize, int psize, unsigned long pte)
{
if (!printk_ratelimit())
return;
pr_info("mm: Hashing failure ! EA=0x%lx access=0x%lx current=%s\n",
ea, access, current->comm);
pr_info(" trap=0x%lx vsid=0x%lx ssize=%d psize=%d pte=0x%lx\n",
trap, vsid, ssize, psize, pte);
}
/* Result code is: /* Result code is:
* 0 - handled * 0 - handled
* 1 - normal page fault * 1 - normal page fault
@ -955,6 +967,17 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
return 1; return 1;
} }
/* Add _PAGE_PRESENT to the required access perm */
access |= _PAGE_PRESENT;
/* Pre-check access permissions (will be re-checked atomically
* in __hash_page_XX but this pre-check is a fast path
*/
if (access & ~pte_val(*ptep)) {
DBG_LOW(" no access !\n");
return 1;
}
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
if (hugeshift) if (hugeshift)
return __hash_page_huge(ea, access, vsid, ptep, trap, local, return __hash_page_huge(ea, access, vsid, ptep, trap, local,
@ -967,14 +990,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep), DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
pte_val(*(ptep + PTRS_PER_PTE))); pte_val(*(ptep + PTRS_PER_PTE)));
#endif #endif
/* Pre-check access permissions (will be re-checked atomically
* in __hash_page_XX but this pre-check is a fast path
*/
if (access & ~pte_val(*ptep)) {
DBG_LOW(" no access !\n");
return 1;
}
/* Do actual hashing */ /* Do actual hashing */
#ifdef CONFIG_PPC_64K_PAGES #ifdef CONFIG_PPC_64K_PAGES
/* If _PAGE_4K_PFN is set, make sure this is a 4k segment */ /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
@ -1033,6 +1048,12 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
local, ssize, spp); local, ssize, spp);
} }
/* Dump some info in case of hash insertion failure, they should
* never happen so it is really useful to know if/when they do
*/
if (rc == -1)
hash_failure_debug(ea, access, vsid, trap, ssize, psize,
pte_val(*ptep));
#ifndef CONFIG_PPC_64K_PAGES #ifndef CONFIG_PPC_64K_PAGES
DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep)); DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
#else #else
@ -1051,8 +1072,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
void *pgdir; void *pgdir;
pte_t *ptep; pte_t *ptep;
unsigned long flags; unsigned long flags;
int local = 0; int rc, ssize, local = 0;
int ssize;
BUG_ON(REGION_ID(ea) != USER_REGION_ID); BUG_ON(REGION_ID(ea) != USER_REGION_ID);
@ -1098,11 +1118,18 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
/* Hash it in */ /* Hash it in */
#ifdef CONFIG_PPC_HAS_HASH_64K #ifdef CONFIG_PPC_HAS_HASH_64K
if (mm->context.user_psize == MMU_PAGE_64K) if (mm->context.user_psize == MMU_PAGE_64K)
__hash_page_64K(ea, access, vsid, ptep, trap, local, ssize); rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
else else
#endif /* CONFIG_PPC_HAS_HASH_64K */ #endif /* CONFIG_PPC_HAS_HASH_64K */
__hash_page_4K(ea, access, vsid, ptep, trap, local, ssize, rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize,
subpage_protection(pgdir, ea)); subpage_protection(pgdir, ea));
/* Dump some info in case of hash insertion failure, they should
* never happen so it is really useful to know if/when they do
*/
if (rc == -1)
hash_failure_debug(ea, access, vsid, trap, ssize,
mm->context.user_psize, pte_val(*ptep));
local_irq_restore(flags); local_irq_restore(flags);
} }

View file

@ -21,21 +21,13 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
unsigned long old_pte, new_pte; unsigned long old_pte, new_pte;
unsigned long va, rflags, pa, sz; unsigned long va, rflags, pa, sz;
long slot; long slot;
int err = 1;
BUG_ON(shift != mmu_psize_defs[mmu_psize].shift); BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
/* Search the Linux page table for a match with va */ /* Search the Linux page table for a match with va */
va = hpt_va(ea, vsid, ssize); va = hpt_va(ea, vsid, ssize);
/* /* At this point, we have a pte (old_pte) which can be used to build
* Check the user's access rights to the page. If access should be
* prevented then send the problem up to do_page_fault.
*/
if (unlikely(access & ~pte_val(*ptep)))
goto out;
/*
* At this point, we have a pte (old_pte) which can be used to build
* or update an HPTE. There are 2 cases: * or update an HPTE. There are 2 cases:
* *
* 1. There is a valid (present) pte with no associated HPTE (this is * 1. There is a valid (present) pte with no associated HPTE (this is
@ -49,9 +41,17 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
do { do {
old_pte = pte_val(*ptep); old_pte = pte_val(*ptep);
if (old_pte & _PAGE_BUSY) /* If PTE busy, retry the access */
goto out; if (unlikely(old_pte & _PAGE_BUSY))
return 0;
/* If PTE permissions don't match, take page fault */
if (unlikely(access & ~old_pte))
return 1;
/* Try to lock the PTE, add ACCESSED and DIRTY if it was
* a write access */
new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED; new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
if (access & _PAGE_RW)
new_pte |= _PAGE_DIRTY;
} while(old_pte != __cmpxchg_u64((unsigned long *)ptep, } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
old_pte, new_pte)); old_pte, new_pte));
@ -121,8 +121,16 @@ repeat:
} }
} }
if (unlikely(slot == -2)) /*
panic("hash_huge_page: pte_insert failed\n"); * Hypervisor failure. Restore old pte and return -1
* similar to __hash_page_*
*/
if (unlikely(slot == -2)) {
*ptep = __pte(old_pte);
hash_failure_debug(ea, access, vsid, trap, ssize,
mmu_psize, old_pte);
return -1;
}
new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX); new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
} }
@ -131,9 +139,5 @@ repeat:
* No need to use ldarx/stdcx here * No need to use ldarx/stdcx here
*/ */
*ptep = __pte(new_pte & ~_PAGE_BUSY); *ptep = __pte(new_pte & ~_PAGE_BUSY);
return 0;
err = 0;
out:
return err;
} }

View file

@ -398,15 +398,15 @@ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
} }
/* /*
* Retreive and validate the ibm,memblock-size property for drconf memory * Retreive and validate the ibm,lmb-size property for drconf memory
* from the device tree. * from the device tree.
*/ */
static u64 of_get_memblock_size(struct device_node *memory) static u64 of_get_lmb_size(struct device_node *memory)
{ {
const u32 *prop; const u32 *prop;
u32 len; u32 len;
prop = of_get_property(memory, "ibm,memblock-size", &len); prop = of_get_property(memory, "ibm,lmb-size", &len);
if (!prop || len < sizeof(unsigned int)) if (!prop || len < sizeof(unsigned int))
return 0; return 0;
@ -562,7 +562,7 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
static inline int __init read_usm_ranges(const u32 **usm) static inline int __init read_usm_ranges(const u32 **usm)
{ {
/* /*
* For each memblock in ibm,dynamic-memory a corresponding * For each lmb in ibm,dynamic-memory a corresponding
* entry in linux,drconf-usable-memory property contains * entry in linux,drconf-usable-memory property contains
* a counter followed by that many (base, size) duple. * a counter followed by that many (base, size) duple.
* read the counter from linux,drconf-usable-memory * read the counter from linux,drconf-usable-memory
@ -578,7 +578,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
{ {
const u32 *dm, *usm; const u32 *dm, *usm;
unsigned int n, rc, ranges, is_kexec_kdump = 0; unsigned int n, rc, ranges, is_kexec_kdump = 0;
unsigned long memblock_size, base, size, sz; unsigned long lmb_size, base, size, sz;
int nid; int nid;
struct assoc_arrays aa; struct assoc_arrays aa;
@ -586,8 +586,8 @@ static void __init parse_drconf_memory(struct device_node *memory)
if (!n) if (!n)
return; return;
memblock_size = of_get_memblock_size(memory); lmb_size = of_get_lmb_size(memory);
if (!memblock_size) if (!lmb_size)
return; return;
rc = of_get_assoc_arrays(memory, &aa); rc = of_get_assoc_arrays(memory, &aa);
@ -611,7 +611,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
continue; continue;
base = drmem.base_addr; base = drmem.base_addr;
size = memblock_size; size = lmb_size;
ranges = 1; ranges = 1;
if (is_kexec_kdump) { if (is_kexec_kdump) {
@ -1072,7 +1072,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
{ {
const u32 *dm; const u32 *dm;
unsigned int drconf_cell_cnt, rc; unsigned int drconf_cell_cnt, rc;
unsigned long memblock_size; unsigned long lmb_size;
struct assoc_arrays aa; struct assoc_arrays aa;
int nid = -1; int nid = -1;
@ -1080,8 +1080,8 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
if (!drconf_cell_cnt) if (!drconf_cell_cnt)
return -1; return -1;
memblock_size = of_get_memblock_size(memory); lmb_size = of_get_lmb_size(memory);
if (!memblock_size) if (!lmb_size)
return -1; return -1;
rc = of_get_assoc_arrays(memory, &aa); rc = of_get_assoc_arrays(memory, &aa);
@ -1100,7 +1100,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
continue; continue;
if ((scn_addr < drmem.base_addr) if ((scn_addr < drmem.base_addr)
|| (scn_addr >= (drmem.base_addr + memblock_size))) || (scn_addr >= (drmem.base_addr + lmb_size)))
continue; continue;
nid = of_drconf_to_nid_single(&drmem, &aa); nid = of_drconf_to_nid_single(&drmem, &aa);

View file

@ -69,7 +69,7 @@ static int pseries_remove_memory(struct device_node *np)
const char *type; const char *type;
const unsigned int *regs; const unsigned int *regs;
unsigned long base; unsigned long base;
unsigned int memblock_size; unsigned int lmb_size;
int ret = -EINVAL; int ret = -EINVAL;
/* /*
@ -87,9 +87,9 @@ static int pseries_remove_memory(struct device_node *np)
return ret; return ret;
base = *(unsigned long *)regs; base = *(unsigned long *)regs;
memblock_size = regs[3]; lmb_size = regs[3];
ret = pseries_remove_memblock(base, memblock_size); ret = pseries_remove_memblock(base, lmb_size);
return ret; return ret;
} }
@ -98,7 +98,7 @@ static int pseries_add_memory(struct device_node *np)
const char *type; const char *type;
const unsigned int *regs; const unsigned int *regs;
unsigned long base; unsigned long base;
unsigned int memblock_size; unsigned int lmb_size;
int ret = -EINVAL; int ret = -EINVAL;
/* /*
@ -116,36 +116,36 @@ static int pseries_add_memory(struct device_node *np)
return ret; return ret;
base = *(unsigned long *)regs; base = *(unsigned long *)regs;
memblock_size = regs[3]; lmb_size = regs[3];
/* /*
* Update memory region to represent the memory add * Update memory region to represent the memory add
*/ */
ret = memblock_add(base, memblock_size); ret = memblock_add(base, lmb_size);
return (ret < 0) ? -EINVAL : 0; return (ret < 0) ? -EINVAL : 0;
} }
static int pseries_drconf_memory(unsigned long *base, unsigned int action) static int pseries_drconf_memory(unsigned long *base, unsigned int action)
{ {
struct device_node *np; struct device_node *np;
const unsigned long *memblock_size; const unsigned long *lmb_size;
int rc; int rc;
np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
if (!np) if (!np)
return -EINVAL; return -EINVAL;
memblock_size = of_get_property(np, "ibm,memblock-size", NULL); lmb_size = of_get_property(np, "ibm,lmb-size", NULL);
if (!memblock_size) { if (!lmb_size) {
of_node_put(np); of_node_put(np);
return -EINVAL; return -EINVAL;
} }
if (action == PSERIES_DRCONF_MEM_ADD) { if (action == PSERIES_DRCONF_MEM_ADD) {
rc = memblock_add(*base, *memblock_size); rc = memblock_add(*base, *lmb_size);
rc = (rc < 0) ? -EINVAL : 0; rc = (rc < 0) ? -EINVAL : 0;
} else if (action == PSERIES_DRCONF_MEM_REMOVE) { } else if (action == PSERIES_DRCONF_MEM_REMOVE) {
rc = pseries_remove_memblock(*base, *memblock_size); rc = pseries_remove_memblock(*base, *lmb_size);
} else { } else {
rc = -EINVAL; rc = -EINVAL;
} }

View file

@ -535,8 +535,16 @@ pgm_no_vtime2:
l %r3,__LC_PGM_ILC # load program interruption code l %r3,__LC_PGM_ILC # load program interruption code
la %r8,0x7f la %r8,0x7f
nr %r8,%r3 # clear per-event-bit and ilc nr %r8,%r3 # clear per-event-bit and ilc
be BASED(pgm_exit) # only per or per+check ? be BASED(pgm_exit2) # only per or per+check ?
b BASED(pgm_do_call) l %r7,BASED(.Ljump_table)
sll %r8,2
l %r7,0(%r8,%r7) # load address of handler routine
la %r2,SP_PTREGS(%r15) # address of register-save area
basr %r14,%r7 # branch to interrupt-handler
pgm_exit2:
TRACE_IRQS_ON
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
b BASED(sysc_return)
# #
# it was a single stepped SVC that is causing all the trouble # it was a single stepped SVC that is causing all the trouble

View file

@ -544,8 +544,16 @@ pgm_no_vtime2:
lgf %r3,__LC_PGM_ILC # load program interruption code lgf %r3,__LC_PGM_ILC # load program interruption code
lghi %r8,0x7f lghi %r8,0x7f
ngr %r8,%r3 # clear per-event-bit and ilc ngr %r8,%r3 # clear per-event-bit and ilc
je pgm_exit je pgm_exit2
j pgm_do_call sll %r8,3
larl %r1,pgm_check_table
lg %r1,0(%r8,%r1) # load address of handler routine
la %r2,SP_PTREGS(%r15) # address of register-save area
basr %r14,%r1 # branch to interrupt-handler
pgm_exit2:
TRACE_IRQS_ON
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
j sysc_return
# #
# it was a single stepped SVC that is causing all the trouble # it was a single stepped SVC that is causing all the trouble

View file

@ -524,8 +524,11 @@ void etr_switch_to_local(void)
if (!etr_eacr.sl) if (!etr_eacr.sl)
return; return;
disable_sync_clock(NULL); disable_sync_clock(NULL);
set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); if (!test_and_set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events)) {
queue_work(time_sync_wq, &etr_work); etr_eacr.es = etr_eacr.sl = 0;
etr_setr(&etr_eacr);
queue_work(time_sync_wq, &etr_work);
}
} }
/* /*
@ -539,8 +542,11 @@ void etr_sync_check(void)
if (!etr_eacr.es) if (!etr_eacr.es)
return; return;
disable_sync_clock(NULL); disable_sync_clock(NULL);
set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); if (!test_and_set_bit(ETR_EVENT_SYNC_CHECK, &etr_events)) {
queue_work(time_sync_wq, &etr_work); etr_eacr.es = 0;
etr_setr(&etr_eacr);
queue_work(time_sync_wq, &etr_work);
}
} }
/* /*
@ -902,7 +908,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib,
* Do not try to get the alternate port aib if the clock * Do not try to get the alternate port aib if the clock
* is not in sync yet. * is not in sync yet.
*/ */
if (!check_sync_clock()) if (!eacr.es || !check_sync_clock())
return eacr; return eacr;
/* /*
@ -1064,7 +1070,7 @@ static void etr_work_fn(struct work_struct *work)
* If the clock is in sync just update the eacr and return. * If the clock is in sync just update the eacr and return.
* If there is no valid sync port wait for a port update. * If there is no valid sync port wait for a port update.
*/ */
if (check_sync_clock() || sync_port < 0) { if ((eacr.es && check_sync_clock()) || sync_port < 0) {
etr_update_eacr(eacr); etr_update_eacr(eacr);
etr_set_tolec_timeout(now); etr_set_tolec_timeout(now);
goto out_unlock; goto out_unlock;

View file

@ -145,6 +145,15 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
percpu_entry->states[cx->index].eax = cx->address; percpu_entry->states[cx->index].eax = cx->address;
percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK; percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
} }
/*
* For _CST FFH on Intel, if GAS.access_size bit 1 is cleared,
* then we should skip checking BM_STS for this C-state.
* ref: "Intel Processor Vendor-Specific ACPI Interface Specification"
*/
if ((c->x86_vendor == X86_VENDOR_INTEL) && !(reg->access_size & 0x2))
cx->bm_sts_skip = 1;
return retval; return retval;
} }
EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);

View file

@ -157,9 +157,14 @@ static int __init acpi_sleep_setup(char *str)
#ifdef CONFIG_HIBERNATION #ifdef CONFIG_HIBERNATION
if (strncmp(str, "s4_nohwsig", 10) == 0) if (strncmp(str, "s4_nohwsig", 10) == 0)
acpi_no_s4_hw_signature(); acpi_no_s4_hw_signature();
if (strncmp(str, "s4_nonvs", 8) == 0) if (strncmp(str, "s4_nonvs", 8) == 0) {
acpi_s4_no_nvs(); pr_warning("ACPI: acpi_sleep=s4_nonvs is deprecated, "
"please use acpi_sleep=nonvs instead");
acpi_nvs_nosave();
}
#endif #endif
if (strncmp(str, "nonvs", 5) == 0)
acpi_nvs_nosave();
if (strncmp(str, "old_ordering", 12) == 0) if (strncmp(str, "old_ordering", 12) == 0)
acpi_old_suspend_ordering(); acpi_old_suspend_ordering();
str = strchr(str, ','); str = strchr(str, ',');

View file

@ -368,22 +368,16 @@ static int __init pcc_cpufreq_do_osc(acpi_handle *handle)
return -ENODEV; return -ENODEV;
out_obj = output.pointer; out_obj = output.pointer;
if (out_obj->type != ACPI_TYPE_BUFFER) { if (out_obj->type != ACPI_TYPE_BUFFER)
ret = -ENODEV; return -ENODEV;
goto out_free;
}
errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0); errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
if (errors) { if (errors)
ret = -ENODEV; return -ENODEV;
goto out_free;
}
supported = *((u32 *)(out_obj->buffer.pointer + 4)); supported = *((u32 *)(out_obj->buffer.pointer + 4));
if (!(supported & 0x1)) { if (!(supported & 0x1))
ret = -ENODEV; return -ENODEV;
goto out_free;
}
out_free: out_free:
kfree(output.pointer); kfree(output.pointer);
@ -397,13 +391,17 @@ static int __init pcc_cpufreq_probe(void)
struct pcc_memory_resource *mem_resource; struct pcc_memory_resource *mem_resource;
struct pcc_register_resource *reg_resource; struct pcc_register_resource *reg_resource;
union acpi_object *out_obj, *member; union acpi_object *out_obj, *member;
acpi_handle handle, osc_handle; acpi_handle handle, osc_handle, pcch_handle;
int ret = 0; int ret = 0;
status = acpi_get_handle(NULL, "\\_SB", &handle); status = acpi_get_handle(NULL, "\\_SB", &handle);
if (ACPI_FAILURE(status)) if (ACPI_FAILURE(status))
return -ENODEV; return -ENODEV;
status = acpi_get_handle(handle, "PCCH", &pcch_handle);
if (ACPI_FAILURE(status))
return -ENODEV;
status = acpi_get_handle(handle, "_OSC", &osc_handle); status = acpi_get_handle(handle, "_OSC", &osc_handle);
if (ACPI_SUCCESS(status)) { if (ACPI_SUCCESS(status)) {
ret = pcc_cpufreq_do_osc(&osc_handle); ret = pcc_cpufreq_do_osc(&osc_handle);
@ -543,13 +541,13 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (!pcch_virt_addr) { if (!pcch_virt_addr) {
result = -1; result = -1;
goto pcch_null; goto out;
} }
result = pcc_get_offset(cpu); result = pcc_get_offset(cpu);
if (result) { if (result) {
dprintk("init: PCCP evaluation failed\n"); dprintk("init: PCCP evaluation failed\n");
goto free; goto out;
} }
policy->max = policy->cpuinfo.max_freq = policy->max = policy->cpuinfo.max_freq =
@ -558,14 +556,15 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy)
ioread32(&pcch_hdr->minimum_frequency) * 1000; ioread32(&pcch_hdr->minimum_frequency) * 1000;
policy->cur = pcc_get_freq(cpu); policy->cur = pcc_get_freq(cpu);
if (!policy->cur) {
dprintk("init: Unable to get current CPU frequency\n");
result = -EINVAL;
goto out;
}
dprintk("init: policy->max is %d, policy->min is %d\n", dprintk("init: policy->max is %d, policy->min is %d\n",
policy->max, policy->min); policy->max, policy->min);
out:
return 0;
free:
pcc_clear_mapping();
free_percpu(pcc_cpu_info);
pcch_null:
return result; return result;
} }

View file

@ -1023,13 +1023,12 @@ static int get_transition_latency(struct powernow_k8_data *data)
} }
if (max_latency == 0) { if (max_latency == 0) {
/* /*
* Fam 11h always returns 0 as transition latency. * Fam 11h and later may return 0 as transition latency. This
* This is intended and means "very fast". While cpufreq core * is intended and means "very fast". While cpufreq core and
* and governors currently can handle that gracefully, better * governors currently can handle that gracefully, better set it
* set it to 1 to avoid problems in the future. * to 1 to avoid problems in the future.
* For all others it's a BIOS bug.
*/ */
if (boot_cpu_data.x86 != 0x11) if (boot_cpu_data.x86 < 0x11)
printk(KERN_ERR FW_WARN PFX "Invalid zero transition " printk(KERN_ERR FW_WARN PFX "Invalid zero transition "
"latency\n"); "latency\n");
max_latency = 1; max_latency = 1;

View file

@ -571,8 +571,8 @@ auditsys:
* masked off. * masked off.
*/ */
sysret_audit: sysret_audit:
movq %rax,%rsi /* second arg, syscall return value */ movq RAX-ARGOFFSET(%rsp),%rsi /* second arg, syscall return value */
cmpq $0,%rax /* is it < 0? */ cmpq $0,%rsi /* is it < 0? */
setl %al /* 1 if so, 0 if not */ setl %al /* 1 if so, 0 if not */
movzbl %al,%edi /* zero-extend that into %edi */ movzbl %al,%edi /* zero-extend that into %edi */
inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */ inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */

View file

@ -964,7 +964,7 @@ fs_initcall(hpet_late_init);
void hpet_disable(void) void hpet_disable(void)
{ {
if (is_hpet_capable()) { if (is_hpet_capable() && hpet_virt_address) {
unsigned int cfg = hpet_readl(HPET_CFG); unsigned int cfg = hpet_readl(HPET_CFG);
if (hpet_legacy_int_enabled) { if (hpet_legacy_int_enabled) {

View file

@ -276,16 +276,6 @@ static struct sys_device device_i8259A = {
.cls = &i8259_sysdev_class, .cls = &i8259_sysdev_class,
}; };
static int __init i8259A_init_sysfs(void)
{
int error = sysdev_class_register(&i8259_sysdev_class);
if (!error)
error = sysdev_register(&device_i8259A);
return error;
}
device_initcall(i8259A_init_sysfs);
static void mask_8259A(void) static void mask_8259A(void)
{ {
unsigned long flags; unsigned long flags;
@ -407,3 +397,18 @@ struct legacy_pic default_legacy_pic = {
}; };
struct legacy_pic *legacy_pic = &default_legacy_pic; struct legacy_pic *legacy_pic = &default_legacy_pic;
static int __init i8259A_init_sysfs(void)
{
int error;
if (legacy_pic != &default_legacy_pic)
return 0;
error = sysdev_class_register(&i8259_sysdev_class);
if (!error)
error = sysdev_register(&device_i8259A);
return error;
}
device_initcall(i8259A_init_sysfs);

View file

@ -572,7 +572,6 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
return NOTIFY_STOP; return NOTIFY_STOP;
} }
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
int kgdb_ll_trap(int cmd, const char *str, int kgdb_ll_trap(int cmd, const char *str,
struct pt_regs *regs, long err, int trap, int sig) struct pt_regs *regs, long err, int trap, int sig)
{ {
@ -590,7 +589,6 @@ int kgdb_ll_trap(int cmd, const char *str,
return __kgdb_notify(&args, cmd); return __kgdb_notify(&args, cmd);
} }
#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
static int static int
kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
@ -625,6 +623,12 @@ int kgdb_arch_init(void)
return register_die_notifier(&kgdb_notifier); return register_die_notifier(&kgdb_notifier);
} }
static void kgdb_hw_overflow_handler(struct perf_event *event, int nmi,
struct perf_sample_data *data, struct pt_regs *regs)
{
kgdb_ll_trap(DIE_DEBUG, "debug", regs, 0, 0, SIGTRAP);
}
void kgdb_arch_late(void) void kgdb_arch_late(void)
{ {
int i, cpu; int i, cpu;
@ -655,6 +659,7 @@ void kgdb_arch_late(void)
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
pevent = per_cpu_ptr(breakinfo[i].pev, cpu); pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
pevent[0]->hw.sample_period = 1; pevent[0]->hw.sample_period = 1;
pevent[0]->overflow_handler = kgdb_hw_overflow_handler;
if (pevent[0]->destroy != NULL) { if (pevent[0]->destroy != NULL) {
pevent[0]->destroy = NULL; pevent[0]->destroy = NULL;
release_bp_slot(*pevent); release_bp_slot(*pevent);

View file

@ -342,6 +342,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
/* advance table_gfn when emulating 1gb pages with 4k */ /* advance table_gfn when emulating 1gb pages with 4k */
if (delta == 0) if (delta == 0)
table_gfn += PT_INDEX(addr, level); table_gfn += PT_INDEX(addr, level);
access &= gw->pte_access;
} else { } else {
direct = 0; direct = 0;
table_gfn = gw->table_gfn[level - 2]; table_gfn = gw->table_gfn[level - 2];

View file

@ -1562,7 +1562,7 @@ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
r = -ENOMEM; r = -ENOMEM;
size = sizeof(struct kvm_msr_entry) * msrs.nmsrs; size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
entries = vmalloc(size); entries = kmalloc(size, GFP_KERNEL);
if (!entries) if (!entries)
goto out; goto out;
@ -1581,7 +1581,7 @@ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
r = n; r = n;
out_free: out_free:
vfree(entries); kfree(entries);
out: out:
return r; return r;
} }

View file

@ -70,6 +70,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
acpi_status acpi_enable(void) acpi_status acpi_enable(void)
{ {
acpi_status status; acpi_status status;
int retry;
ACPI_FUNCTION_TRACE(acpi_enable); ACPI_FUNCTION_TRACE(acpi_enable);
@ -98,16 +99,18 @@ acpi_status acpi_enable(void)
/* Sanity check that transition succeeded */ /* Sanity check that transition succeeded */
if (acpi_hw_get_mode() != ACPI_SYS_MODE_ACPI) { for (retry = 0; retry < 30000; ++retry) {
ACPI_ERROR((AE_INFO, if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
"Hardware did not enter ACPI mode")); if (retry != 0)
return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); ACPI_WARNING((AE_INFO,
"Platform took > %d00 usec to enter ACPI mode", retry));
return_ACPI_STATUS(AE_OK);
}
acpi_os_stall(100); /* 100 usec */
} }
ACPI_DEBUG_PRINT((ACPI_DB_INIT, ACPI_ERROR((AE_INFO, "Hardware did not enter ACPI mode"));
"Transition to ACPI mode successful\n")); return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
return_ACPI_STATUS(AE_OK);
} }
ACPI_EXPORT_SYMBOL(acpi_enable) ACPI_EXPORT_SYMBOL(acpi_enable)

View file

@ -868,9 +868,15 @@ static void acpi_battery_remove_fs(struct acpi_device *device)
static void acpi_battery_notify(struct acpi_device *device, u32 event) static void acpi_battery_notify(struct acpi_device *device, u32 event)
{ {
struct acpi_battery *battery = acpi_driver_data(device); struct acpi_battery *battery = acpi_driver_data(device);
#ifdef CONFIG_ACPI_SYSFS_POWER
struct device *old;
#endif
if (!battery) if (!battery)
return; return;
#ifdef CONFIG_ACPI_SYSFS_POWER
old = battery->bat.dev;
#endif
acpi_battery_update(battery); acpi_battery_update(battery);
acpi_bus_generate_proc_event(device, event, acpi_bus_generate_proc_event(device, event,
acpi_battery_present(battery)); acpi_battery_present(battery));
@ -879,7 +885,7 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
acpi_battery_present(battery)); acpi_battery_present(battery));
#ifdef CONFIG_ACPI_SYSFS_POWER #ifdef CONFIG_ACPI_SYSFS_POWER
/* acpi_battery_update could remove power_supply object */ /* acpi_battery_update could remove power_supply object */
if (battery->bat.dev) if (old && battery->bat.dev)
power_supply_changed(&battery->bat); power_supply_changed(&battery->bat);
#endif #endif
} }

View file

@ -214,7 +214,7 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
.ident = "Sony VGN-SR290J", .ident = "Sony VGN-SR290J",
.matches = { .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "Sony VGN-SR290J"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR290J"),
}, },
}, },
{ {

View file

@ -223,7 +223,7 @@ static bool processor_physically_present(acpi_handle handle)
type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0; type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
cpuid = acpi_get_cpuid(handle, type, acpi_id); cpuid = acpi_get_cpuid(handle, type, acpi_id);
if (cpuid == -1) if ((cpuid == -1) && (num_possible_cpus() > 1))
return false; return false;
return true; return true;

View file

@ -76,14 +76,19 @@ static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
module_param(max_cstate, uint, 0000); module_param(max_cstate, uint, 0000);
static unsigned int nocst __read_mostly; static unsigned int nocst __read_mostly;
module_param(nocst, uint, 0000); module_param(nocst, uint, 0000);
static int bm_check_disable __read_mostly;
module_param(bm_check_disable, uint, 0000);
static unsigned int latency_factor __read_mostly = 2; static unsigned int latency_factor __read_mostly = 2;
module_param(latency_factor, uint, 0644); module_param(latency_factor, uint, 0644);
#ifdef CONFIG_ACPI_PROCFS
static u64 us_to_pm_timer_ticks(s64 t) static u64 us_to_pm_timer_ticks(s64 t)
{ {
return div64_u64(t * PM_TIMER_FREQUENCY, 1000000); return div64_u64(t * PM_TIMER_FREQUENCY, 1000000);
} }
#endif
/* /*
* IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
* For now disable this. Probably a bug somewhere else. * For now disable this. Probably a bug somewhere else.
@ -763,6 +768,9 @@ static int acpi_idle_bm_check(void)
{ {
u32 bm_status = 0; u32 bm_status = 0;
if (bm_check_disable)
return 0;
acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
if (bm_status) if (bm_status)
acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
@ -947,7 +955,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
if (acpi_idle_suspend) if (acpi_idle_suspend)
return(acpi_idle_enter_c1(dev, state)); return(acpi_idle_enter_c1(dev, state));
if (acpi_idle_bm_check()) { if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
if (dev->safe_state) { if (dev->safe_state) {
dev->last_state = dev->safe_state; dev->last_state = dev->safe_state;
return dev->safe_state->enter(dev, dev->safe_state); return dev->safe_state->enter(dev, dev->safe_state);

View file

@ -81,6 +81,20 @@ static int acpi_sleep_prepare(u32 acpi_state)
#ifdef CONFIG_ACPI_SLEEP #ifdef CONFIG_ACPI_SLEEP
static u32 acpi_target_sleep_state = ACPI_STATE_S0; static u32 acpi_target_sleep_state = ACPI_STATE_S0;
/*
* The ACPI specification wants us to save NVS memory regions during hibernation
* and to restore them during the subsequent resume. Windows does that also for
* suspend to RAM. However, it is known that this mechanism does not work on
* all machines, so we allow the user to disable it with the help of the
* 'acpi_sleep=nonvs' kernel command line option.
*/
static bool nvs_nosave;
void __init acpi_nvs_nosave(void)
{
nvs_nosave = true;
}
/* /*
* ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
* user to request that behavior by using the 'acpi_old_suspend_ordering' * user to request that behavior by using the 'acpi_old_suspend_ordering'
@ -197,8 +211,7 @@ static int acpi_suspend_begin(suspend_state_t pm_state)
u32 acpi_state = acpi_suspend_states[pm_state]; u32 acpi_state = acpi_suspend_states[pm_state];
int error = 0; int error = 0;
error = suspend_nvs_alloc(); error = nvs_nosave ? 0 : suspend_nvs_alloc();
if (error) if (error)
return error; return error;
@ -388,20 +401,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
#endif /* CONFIG_SUSPEND */ #endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION #ifdef CONFIG_HIBERNATION
/*
* The ACPI specification wants us to save NVS memory regions during hibernation
* and to restore them during the subsequent resume. However, it is not certain
* if this mechanism is going to work on all machines, so we allow the user to
* disable this mechanism using the 'acpi_sleep=s4_nonvs' kernel command line
* option.
*/
static bool s4_no_nvs;
void __init acpi_s4_no_nvs(void)
{
s4_no_nvs = true;
}
static unsigned long s4_hardware_signature; static unsigned long s4_hardware_signature;
static struct acpi_table_facs *facs; static struct acpi_table_facs *facs;
static bool nosigcheck; static bool nosigcheck;
@ -415,7 +414,7 @@ static int acpi_hibernation_begin(void)
{ {
int error; int error;
error = s4_no_nvs ? 0 : suspend_nvs_alloc(); error = nvs_nosave ? 0 : suspend_nvs_alloc();
if (!error) { if (!error) {
acpi_target_sleep_state = ACPI_STATE_S4; acpi_target_sleep_state = ACPI_STATE_S4;
acpi_sleep_tts_switch(acpi_target_sleep_state); acpi_sleep_tts_switch(acpi_target_sleep_state);
@ -510,7 +509,7 @@ static int acpi_hibernation_begin_old(void)
error = acpi_sleep_prepare(ACPI_STATE_S4); error = acpi_sleep_prepare(ACPI_STATE_S4);
if (!error) { if (!error) {
if (!s4_no_nvs) if (!nvs_nosave)
error = suspend_nvs_alloc(); error = suspend_nvs_alloc();
if (!error) if (!error)
acpi_target_sleep_state = ACPI_STATE_S4; acpi_target_sleep_state = ACPI_STATE_S4;

View file

@ -673,7 +673,7 @@ static struct kobject *get_device_parent(struct device *dev,
*/ */
if (parent == NULL) if (parent == NULL)
parent_kobj = virtual_device_parent(dev); parent_kobj = virtual_device_parent(dev);
else if (parent->class) else if (parent->class && !dev->class->ns_type)
return &parent->kobj; return &parent->kobj;
else else
parent_kobj = &parent->kobj; parent_kobj = &parent->kobj;

View file

@ -1216,17 +1216,20 @@ static int intel_i915_get_gtt_size(void)
/* G33's GTT size defined in gmch_ctrl */ /* G33's GTT size defined in gmch_ctrl */
pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
case G33_PGETBL_SIZE_1M: case I830_GMCH_GMS_STOLEN_512:
size = 512;
break;
case I830_GMCH_GMS_STOLEN_1024:
size = 1024; size = 1024;
break; break;
case G33_PGETBL_SIZE_2M: case I830_GMCH_GMS_STOLEN_8192:
size = 2048; size = 8*1024;
break; break;
default: default:
dev_info(&agp_bridge->dev->dev, dev_info(&agp_bridge->dev->dev,
"unknown page table size 0x%x, assuming 512KB\n", "unknown page table size 0x%x, assuming 512KB\n",
(gmch_ctrl & G33_PGETBL_SIZE_MASK)); (gmch_ctrl & I830_GMCH_GMS_MASK));
size = 512; size = 512;
} }
} else { } else {

View file

@ -493,7 +493,7 @@ static void __sysrq_put_key_op(int key, struct sysrq_key_op *op_p)
sysrq_key_table[i] = op_p; sysrq_key_table[i] = op_p;
} }
static void __handle_sysrq(int key, struct tty_struct *tty, int check_mask) void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
{ {
struct sysrq_key_op *op_p; struct sysrq_key_op *op_p;
int orig_log_level; int orig_log_level;

View file

@ -623,7 +623,14 @@ static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
static int tpm_tis_pnp_resume(struct pnp_dev *dev) static int tpm_tis_pnp_resume(struct pnp_dev *dev)
{ {
return tpm_pm_resume(&dev->dev); struct tpm_chip *chip = pnp_get_drvdata(dev);
int ret;
ret = tpm_pm_resume(&dev->dev);
if (!ret)
tpm_continue_selftest(chip);
return ret;
} }
static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = { static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {

View file

@ -1080,6 +1080,7 @@ err_out_unregister:
err_unlock_policy: err_unlock_policy:
unlock_policy_rwsem_write(cpu); unlock_policy_rwsem_write(cpu);
free_cpumask_var(policy->related_cpus);
err_free_cpumask: err_free_cpumask:
free_cpumask_var(policy->cpus); free_cpumask_var(policy->cpus);
err_free_policy: err_free_policy:
@ -1765,17 +1766,8 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
dprintk("governor switch\n"); dprintk("governor switch\n");
/* end old governor */ /* end old governor */
if (data->governor) { if (data->governor)
/*
* Need to release the rwsem around governor
* stop due to lock dependency between
* cancel_delayed_work_sync and the read lock
* taken in the delayed work handler.
*/
unlock_policy_rwsem_write(data->cpu);
__cpufreq_governor(data, CPUFREQ_GOV_STOP); __cpufreq_governor(data, CPUFREQ_GOV_STOP);
lock_policy_rwsem_write(data->cpu);
}
/* start new governor */ /* start new governor */
data->governor = policy->governor; data->governor = policy->governor;

View file

@ -1183,10 +1183,14 @@ static size_t sg_copy_end_to_buffer(struct scatterlist *sgl, unsigned int nents,
/* Copy part of this segment */ /* Copy part of this segment */
ignore = skip - offset; ignore = skip - offset;
len = miter.length - ignore; len = miter.length - ignore;
if (boffset + len > buflen)
len = buflen - boffset;
memcpy(buf + boffset, miter.addr + ignore, len); memcpy(buf + boffset, miter.addr + ignore, len);
} else { } else {
/* Copy all of this segment */ /* Copy all of this segment (up to buflen) */
len = miter.length; len = miter.length;
if (boffset + len > buflen)
len = buflen - boffset;
memcpy(buf + boffset, miter.addr, len); memcpy(buf + boffset, miter.addr, len);
} }
boffset += len; boffset += len;

View file

@ -1300,7 +1300,7 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno,
if (devno == 0) if (devno == 0)
return -ENODEV; return -ENODEV;
i7core_printk(KERN_ERR, i7core_printk(KERN_INFO,
"Device not found: dev %02x.%d PCI ID %04x:%04x\n", "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
dev_descr->dev, dev_descr->func, dev_descr->dev, dev_descr->func,
PCI_VENDOR_ID_INTEL, dev_descr->dev_id); PCI_VENDOR_ID_INTEL, dev_descr->dev_id);

View file

@ -336,6 +336,7 @@ static struct of_device_id mpc85xx_pci_err_of_match[] = {
}, },
{}, {},
}; };
MODULE_DEVICE_TABLE(of, mpc85xx_pci_err_of_match);
static struct of_platform_driver mpc85xx_pci_err_driver = { static struct of_platform_driver mpc85xx_pci_err_driver = {
.probe = mpc85xx_pci_err_probe, .probe = mpc85xx_pci_err_probe,
@ -650,6 +651,7 @@ static struct of_device_id mpc85xx_l2_err_of_match[] = {
{ .compatible = "fsl,p2020-l2-cache-controller", }, { .compatible = "fsl,p2020-l2-cache-controller", },
{}, {},
}; };
MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match);
static struct of_platform_driver mpc85xx_l2_err_driver = { static struct of_platform_driver mpc85xx_l2_err_driver = {
.probe = mpc85xx_l2_err_probe, .probe = mpc85xx_l2_err_probe,
@ -1126,6 +1128,7 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = {
{ .compatible = "fsl,p2020-memory-controller", }, { .compatible = "fsl,p2020-memory-controller", },
{}, {},
}; };
MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
static struct of_platform_driver mpc85xx_mc_err_driver = { static struct of_platform_driver mpc85xx_mc_err_driver = {
.probe = mpc85xx_mc_err_probe, .probe = mpc85xx_mc_err_probe,

View file

@ -893,10 +893,12 @@ EXPORT_SYMBOL_GPL(gpio_sysfs_set_active_low);
void gpio_unexport(unsigned gpio) void gpio_unexport(unsigned gpio)
{ {
struct gpio_desc *desc; struct gpio_desc *desc;
int status = -EINVAL; int status = 0;
if (!gpio_is_valid(gpio)) if (!gpio_is_valid(gpio)) {
status = -EINVAL;
goto done; goto done;
}
mutex_lock(&sysfs_lock); mutex_lock(&sysfs_lock);
@ -911,7 +913,6 @@ void gpio_unexport(unsigned gpio)
clear_bit(FLAG_EXPORT, &desc->flags); clear_bit(FLAG_EXPORT, &desc->flags);
put_device(dev); put_device(dev);
device_unregister(dev); device_unregister(dev);
status = 0;
} else } else
status = -ENODEV; status = -ENODEV;
} }

View file

@ -864,8 +864,8 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid,
mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0, mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
false); false);
mode->hdisplay = 1366; mode->hdisplay = 1366;
mode->vsync_start = mode->vsync_start - 1; mode->hsync_start = mode->hsync_start - 1;
mode->vsync_end = mode->vsync_end - 1; mode->hsync_end = mode->hsync_end - 1;
return mode; return mode;
} }

View file

@ -605,6 +605,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
case FBC_NOT_TILED: case FBC_NOT_TILED:
seq_printf(m, "scanout buffer not tiled"); seq_printf(m, "scanout buffer not tiled");
break; break;
case FBC_MULTIPLE_PIPES:
seq_printf(m, "multiple pipes are enabled");
break;
default: default:
seq_printf(m, "unknown reason"); seq_printf(m, "unknown reason");
} }

View file

@ -1300,7 +1300,7 @@ static void i915_cleanup_compression(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
drm_mm_put_block(dev_priv->compressed_fb); drm_mm_put_block(dev_priv->compressed_fb);
if (!IS_GM45(dev)) if (dev_priv->compressed_llb)
drm_mm_put_block(dev_priv->compressed_llb); drm_mm_put_block(dev_priv->compressed_llb);
} }

View file

@ -215,6 +215,7 @@ enum no_fbc_reason {
FBC_MODE_TOO_LARGE, /* mode too large for compression */ FBC_MODE_TOO_LARGE, /* mode too large for compression */
FBC_BAD_PLANE, /* fbc not supported on plane */ FBC_BAD_PLANE, /* fbc not supported on plane */
FBC_NOT_TILED, /* buffer not tiled */ FBC_NOT_TILED, /* buffer not tiled */
FBC_MULTIPLE_PIPES, /* more than one pipe active */
}; };
enum intel_pch { enum intel_pch {
@ -222,6 +223,8 @@ enum intel_pch {
PCH_CPT, /* Cougarpoint PCH */ PCH_CPT, /* Cougarpoint PCH */
}; };
#define QUIRK_PIPEA_FORCE (1<<0)
struct intel_fbdev; struct intel_fbdev;
typedef struct drm_i915_private { typedef struct drm_i915_private {
@ -337,6 +340,8 @@ typedef struct drm_i915_private {
/* PCH chipset type */ /* PCH chipset type */
enum intel_pch pch_type; enum intel_pch pch_type;
unsigned long quirks;
/* Register state */ /* Register state */
bool modeset_on_lid; bool modeset_on_lid;
u8 saveLBB; u8 saveLBB;

View file

@ -3647,6 +3647,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
return ret; return ret;
} }
int int
i915_gem_do_execbuffer(struct drm_device *dev, void *data, i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv, struct drm_file *file_priv,
@ -3794,7 +3795,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
unsigned long long total_size = 0; unsigned long long total_size = 0;
int num_fences = 0; int num_fences = 0;
for (i = 0; i < args->buffer_count; i++) { for (i = 0; i < args->buffer_count; i++) {
obj_priv = object_list[i]->driver_private; obj_priv = to_intel_bo(object_list[i]);
total_size += object_list[i]->size; total_size += object_list[i]->size;
num_fences += num_fences +=

View file

@ -2869,6 +2869,7 @@
#define PCH_PP_STATUS 0xc7200 #define PCH_PP_STATUS 0xc7200
#define PCH_PP_CONTROL 0xc7204 #define PCH_PP_CONTROL 0xc7204
#define PANEL_UNLOCK_REGS (0xabcd << 16)
#define EDP_FORCE_VDD (1 << 3) #define EDP_FORCE_VDD (1 << 3)
#define EDP_BLC_ENABLE (1 << 2) #define EDP_BLC_ENABLE (1 << 2)
#define PANEL_POWER_RESET (1 << 1) #define PANEL_POWER_RESET (1 << 1)

View file

@ -862,8 +862,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
intel_clock_t clock; intel_clock_t clock;
int max_n; int max_n;
bool found; bool found;
/* approximately equals target * 0.00488 */ /* approximately equals target * 0.00585 */
int err_most = (target >> 8) + (target >> 10); int err_most = (target >> 8) + (target >> 9);
found = false; found = false;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
@ -1180,8 +1180,12 @@ static void intel_update_fbc(struct drm_crtc *crtc,
struct drm_framebuffer *fb = crtc->fb; struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb; struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj_priv; struct drm_i915_gem_object *obj_priv;
struct drm_crtc *tmp_crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane; int plane = intel_crtc->plane;
int crtcs_enabled = 0;
DRM_DEBUG_KMS("\n");
if (!i915_powersave) if (!i915_powersave)
return; return;
@ -1199,10 +1203,21 @@ static void intel_update_fbc(struct drm_crtc *crtc,
* If FBC is already on, we just have to verify that we can * If FBC is already on, we just have to verify that we can
* keep it that way... * keep it that way...
* Need to disable if: * Need to disable if:
* - more than one pipe is active
* - changing FBC params (stride, fence, mode) * - changing FBC params (stride, fence, mode)
* - new fb is too large to fit in compressed buffer * - new fb is too large to fit in compressed buffer
* - going to an unsupported config (interlace, pixel multiply, etc.) * - going to an unsupported config (interlace, pixel multiply, etc.)
*/ */
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
if (tmp_crtc->enabled)
crtcs_enabled++;
}
DRM_DEBUG_KMS("%d pipes active\n", crtcs_enabled);
if (crtcs_enabled > 1) {
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
goto out_disable;
}
if (intel_fb->obj->size > dev_priv->cfb_size) { if (intel_fb->obj->size > dev_priv->cfb_size) {
DRM_DEBUG_KMS("framebuffer too large, disabling " DRM_DEBUG_KMS("framebuffer too large, disabling "
"compression\n"); "compression\n");
@ -1255,7 +1270,7 @@ out_disable:
} }
} }
static int int
intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
{ {
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
@ -2255,6 +2270,11 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
intel_wait_for_vblank(dev); intel_wait_for_vblank(dev);
} }
/* Don't disable pipe A or pipe A PLLs if needed */
if (pipeconf_reg == PIPEACONF &&
(dev_priv->quirks & QUIRK_PIPEA_FORCE))
goto skip_pipe_off;
/* Next, disable display pipes */ /* Next, disable display pipes */
temp = I915_READ(pipeconf_reg); temp = I915_READ(pipeconf_reg);
if ((temp & PIPEACONF_ENABLE) != 0) { if ((temp & PIPEACONF_ENABLE) != 0) {
@ -2270,7 +2290,7 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
I915_READ(dpll_reg); I915_READ(dpll_reg);
} }
skip_pipe_off:
/* Wait for the clocks to turn off. */ /* Wait for the clocks to turn off. */
udelay(150); udelay(150);
break; break;
@ -2356,8 +2376,6 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
if (mode->clock * 3 > 27000 * 4) if (mode->clock * 3 > 27000 * 4)
return MODE_CLOCK_HIGH; return MODE_CLOCK_HIGH;
} }
drm_mode_set_crtcinfo(adjusted_mode, 0);
return true; return true;
} }
@ -3736,6 +3754,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
if (dev_priv->lvds_dither) { if (dev_priv->lvds_dither) {
if (HAS_PCH_SPLIT(dev)) { if (HAS_PCH_SPLIT(dev)) {
pipeconf |= PIPE_ENABLE_DITHER; pipeconf |= PIPE_ENABLE_DITHER;
pipeconf &= ~PIPE_DITHER_TYPE_MASK;
pipeconf |= PIPE_DITHER_TYPE_ST01; pipeconf |= PIPE_DITHER_TYPE_ST01;
} else } else
lvds |= LVDS_ENABLE_DITHER; lvds |= LVDS_ENABLE_DITHER;
@ -4412,7 +4431,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
DRM_DEBUG_DRIVER("upclocking LVDS\n"); DRM_DEBUG_DRIVER("upclocking LVDS\n");
/* Unlock panel regs */ /* Unlock panel regs */
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
PANEL_UNLOCK_REGS);
dpll &= ~DISPLAY_RATE_SELECT_FPA1; dpll &= ~DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll); I915_WRITE(dpll_reg, dpll);
@ -4455,7 +4475,8 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
DRM_DEBUG_DRIVER("downclocking LVDS\n"); DRM_DEBUG_DRIVER("downclocking LVDS\n");
/* Unlock panel regs */ /* Unlock panel regs */
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
PANEL_UNLOCK_REGS);
dpll |= DISPLAY_RATE_SELECT_FPA1; dpll |= DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll); I915_WRITE(dpll_reg, dpll);
@ -4695,7 +4716,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_gem_object *obj; struct drm_gem_object *obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work; struct intel_unpin_work *work;
unsigned long flags; unsigned long flags, offset;
int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
int ret, pipesrc; int ret, pipesrc;
u32 flip_mask; u32 flip_mask;
@ -4762,19 +4783,23 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
while (I915_READ(ISR) & flip_mask) while (I915_READ(ISR) & flip_mask)
; ;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
offset = obj_priv->gtt_offset;
offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8);
BEGIN_LP_RING(4); BEGIN_LP_RING(4);
if (IS_I965G(dev)) { if (IS_I965G(dev)) {
OUT_RING(MI_DISPLAY_FLIP | OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch); OUT_RING(fb->pitch);
OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); OUT_RING(offset | obj_priv->tiling_mode);
pipesrc = I915_READ(pipesrc_reg); pipesrc = I915_READ(pipesrc_reg);
OUT_RING(pipesrc & 0x0fff0fff); OUT_RING(pipesrc & 0x0fff0fff);
} else { } else {
OUT_RING(MI_DISPLAY_FLIP_I915 | OUT_RING(MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch); OUT_RING(fb->pitch);
OUT_RING(obj_priv->gtt_offset); OUT_RING(offset);
OUT_RING(MI_NOOP); OUT_RING(MI_NOOP);
} }
ADVANCE_LP_RING(); ADVANCE_LP_RING();
@ -5506,6 +5531,66 @@ static void intel_init_display(struct drm_device *dev)
} }
} }
/*
* Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
* resume, or other times. This quirk makes sure that's the case for
* affected systems.
*/
static void quirk_pipea_force (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->quirks |= QUIRK_PIPEA_FORCE;
DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
}
struct intel_quirk {
int device;
int subsystem_vendor;
int subsystem_device;
void (*hook)(struct drm_device *dev);
};
struct intel_quirk intel_quirks[] = {
/* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
{ 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
/* HP Mini needs pipe A force quirk (LP: #322104) */
{ 0x27ae,0x103c, 0x361a, quirk_pipea_force },
/* Thinkpad R31 needs pipe A force quirk */
{ 0x3577, 0x1014, 0x0505, quirk_pipea_force },
/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
/* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
{ 0x3577, 0x1014, 0x0513, quirk_pipea_force },
/* ThinkPad X40 needs pipe A force quirk */
/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
/* 855 & before need to leave pipe A & dpll A up */
{ 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
};
static void intel_init_quirks(struct drm_device *dev)
{
struct pci_dev *d = dev->pdev;
int i;
for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
struct intel_quirk *q = &intel_quirks[i];
if (d->device == q->device &&
(d->subsystem_vendor == q->subsystem_vendor ||
q->subsystem_vendor == PCI_ANY_ID) &&
(d->subsystem_device == q->subsystem_device ||
q->subsystem_device == PCI_ANY_ID))
q->hook(dev);
}
}
void intel_modeset_init(struct drm_device *dev) void intel_modeset_init(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
@ -5518,6 +5603,8 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.funcs = (void *)&intel_mode_funcs; dev->mode_config.funcs = (void *)&intel_mode_funcs;
intel_init_quirks(dev);
intel_init_display(dev); intel_init_display(dev);
if (IS_I965G(dev)) { if (IS_I965G(dev)) {

View file

@ -717,6 +717,51 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
} }
} }
static void ironlake_edp_panel_on (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long timeout = jiffies + msecs_to_jiffies(5000);
u32 pp, pp_status;
pp_status = I915_READ(PCH_PP_STATUS);
if (pp_status & PP_ON)
return;
pp = I915_READ(PCH_PP_CONTROL);
pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
I915_WRITE(PCH_PP_CONTROL, pp);
do {
pp_status = I915_READ(PCH_PP_STATUS);
} while (((pp_status & PP_ON) == 0) && !time_after(jiffies, timeout));
if (time_after(jiffies, timeout))
DRM_DEBUG_KMS("panel on wait timed out: 0x%08x\n", pp_status);
pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD);
I915_WRITE(PCH_PP_CONTROL, pp);
}
static void ironlake_edp_panel_off (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long timeout = jiffies + msecs_to_jiffies(5000);
u32 pp, pp_status;
pp = I915_READ(PCH_PP_CONTROL);
pp &= ~POWER_TARGET_ON;
I915_WRITE(PCH_PP_CONTROL, pp);
do {
pp_status = I915_READ(PCH_PP_STATUS);
} while ((pp_status & PP_ON) && !time_after(jiffies, timeout));
if (time_after(jiffies, timeout))
DRM_DEBUG_KMS("panel off wait timed out\n");
/* Make sure VDD is enabled so DP AUX will work */
pp |= EDP_FORCE_VDD;
I915_WRITE(PCH_PP_CONTROL, pp);
}
static void ironlake_edp_backlight_on (struct drm_device *dev) static void ironlake_edp_backlight_on (struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
@ -751,14 +796,18 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
if (mode != DRM_MODE_DPMS_ON) { if (mode != DRM_MODE_DPMS_ON) {
if (dp_reg & DP_PORT_EN) { if (dp_reg & DP_PORT_EN) {
intel_dp_link_down(intel_encoder, dp_priv->DP); intel_dp_link_down(intel_encoder, dp_priv->DP);
if (IS_eDP(intel_encoder)) if (IS_eDP(intel_encoder)) {
ironlake_edp_backlight_off(dev); ironlake_edp_backlight_off(dev);
ironlake_edp_panel_off(dev);
}
} }
} else { } else {
if (!(dp_reg & DP_PORT_EN)) { if (!(dp_reg & DP_PORT_EN)) {
intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
if (IS_eDP(intel_encoder)) if (IS_eDP(intel_encoder)) {
ironlake_edp_panel_on(dev);
ironlake_edp_backlight_on(dev); ironlake_edp_backlight_on(dev);
}
} }
} }
dp_priv->dpms_mode = mode; dp_priv->dpms_mode = mode;

View file

@ -215,6 +215,9 @@ extern void intel_init_clock_gating(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev); extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev); extern void ironlake_disable_drps(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_gem_object *obj);
extern int intel_framebuffer_init(struct drm_device *dev, extern int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb, struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd *mode_cmd, struct drm_mode_fb_cmd *mode_cmd,

View file

@ -98,7 +98,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
ret = i915_gem_object_pin(fbo, 64*1024); ret = intel_pin_and_fence_fb_obj(dev, fbo);
if (ret) { if (ret) {
DRM_ERROR("failed to pin fb: %d\n", ret); DRM_ERROR("failed to pin fb: %d\n", ret);
goto out_unref; goto out_unref;
@ -236,7 +236,7 @@ int intel_fbdev_destroy(struct drm_device *dev,
drm_framebuffer_cleanup(&ifb->base); drm_framebuffer_cleanup(&ifb->base);
if (ifb->obj) if (ifb->obj)
drm_gem_object_unreference_unlocked(ifb->obj); drm_gem_object_unreference(ifb->obj);
return 0; return 0;
} }

View file

@ -599,6 +599,26 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
return 0; return 0;
} }
static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
{
DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident);
return 1;
}
/* The GPU hangs up on these systems if modeset is performed on LID open */
static const struct dmi_system_id intel_no_modeset_on_lid[] = {
{
.callback = intel_no_modeset_on_lid_dmi_callback,
.ident = "Toshiba Tecra A11",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"),
},
},
{ } /* terminating entry */
};
/* /*
* Lid events. Note the use of 'modeset_on_lid': * Lid events. Note the use of 'modeset_on_lid':
* - we set it on lid close, and reset it on open * - we set it on lid close, and reset it on open
@ -622,6 +642,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
*/ */
if (connector) if (connector)
connector->status = connector->funcs->detect(connector); connector->status = connector->funcs->detect(connector);
/* Don't force modeset on machines where it causes a GPU lockup */
if (dmi_check_system(intel_no_modeset_on_lid))
return NOTIFY_OK;
if (!acpi_lid_open()) { if (!acpi_lid_open()) {
dev_priv->modeset_on_lid = 1; dev_priv->modeset_on_lid = 1;
return NOTIFY_OK; return NOTIFY_OK;

View file

@ -333,7 +333,6 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, h_idx); header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
reg = CP_PACKET0_GET_REG(header); reg = CP_PACKET0_GET_REG(header);
mutex_lock(&p->rdev->ddev->mode_config.mutex);
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) { if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id); DRM_ERROR("cannot find crtc %d\n", crtc_id);
@ -368,7 +367,6 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
} }
} }
out: out:
mutex_unlock(&p->rdev->ddev->mode_config.mutex);
return r; return r;
} }

View file

@ -1230,7 +1230,6 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, h_idx); header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 5); crtc_id = radeon_get_ib_value(p, h_idx + 5);
reg = CP_PACKET0_GET_REG(header); reg = CP_PACKET0_GET_REG(header);
mutex_lock(&p->rdev->ddev->mode_config.mutex);
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) { if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id); DRM_ERROR("cannot find crtc %d\n", crtc_id);
@ -1264,7 +1263,6 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
} }
out: out:
mutex_unlock(&p->rdev->ddev->mode_config.mutex);
return r; return r;
} }

View file

@ -585,7 +585,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, h_idx); header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
reg = CP_PACKET0_GET_REG(header); reg = CP_PACKET0_GET_REG(header);
mutex_lock(&p->rdev->ddev->mode_config.mutex);
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) { if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id); DRM_ERROR("cannot find crtc %d\n", crtc_id);
@ -620,7 +620,6 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2; ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
} }
out: out:
mutex_unlock(&p->rdev->ddev->mode_config.mutex);
return r; return r;
} }

View file

@ -3050,6 +3050,14 @@ void radeon_combios_asic_init(struct drm_device *dev)
rdev->pdev->subsystem_device == 0x308b) rdev->pdev->subsystem_device == 0x308b)
return; return;
/* quirk for rs4xx HP dv5000 laptop to make it resume
* - it hangs on resume inside the dynclk 1 table.
*/
if (rdev->family == CHIP_RS480 &&
rdev->pdev->subsystem_vendor == 0x103c &&
rdev->pdev->subsystem_device == 0x30a4)
return;
/* DYN CLK 1 */ /* DYN CLK 1 */
table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
if (table) if (table)

View file

@ -128,7 +128,8 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
for (i = 0, found = 0; i < rdev->num_crtc; i++) { for (i = 0, found = 0; i < rdev->num_crtc; i++) {
crtc = (struct drm_crtc *)minfo->crtcs[i]; crtc = (struct drm_crtc *)minfo->crtcs[i];
if (crtc && crtc->base.id == value) { if (crtc && crtc->base.id == value) {
value = i; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
value = radeon_crtc->crtc_id;
found = 1; found = 1;
break; break;
} }

View file

@ -108,6 +108,7 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
udelay(panel_pwr_delay * 1000); udelay(panel_pwr_delay * 1000);
WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
udelay(panel_pwr_delay * 1000);
break; break;
} }

View file

@ -333,6 +333,7 @@ static ssize_t radeon_get_pm_profile(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%s\n", return snprintf(buf, PAGE_SIZE, "%s\n",
(cp == PM_PROFILE_AUTO) ? "auto" : (cp == PM_PROFILE_AUTO) ? "auto" :
(cp == PM_PROFILE_LOW) ? "low" : (cp == PM_PROFILE_LOW) ? "low" :
(cp == PM_PROFILE_MID) ? "mid" :
(cp == PM_PROFILE_HIGH) ? "high" : "default"); (cp == PM_PROFILE_HIGH) ? "high" : "default");
} }

Some files were not shown because too many files have changed in this diff Show more