Merge branch 'linux-linaro-lsk-v4.4' into linux-linaro-lsk-v4.4-android

Conflicts solution:
	keep 'KBUILD_CFLAGS += -fno-pic'
	in arch/arm64/Makefile
This commit is contained in:
Alex Shi 2016-03-14 15:32:21 +08:00
commit fa6e6c7406
424 changed files with 4620 additions and 1992 deletions

View file

@ -14,3 +14,10 @@ filesystem.
efivarfs is typically mounted like this, efivarfs is typically mounted like this,
mount -t efivarfs none /sys/firmware/efi/efivars mount -t efivarfs none /sys/firmware/efi/efivars
Due to the presence of numerous firmware bugs where removing non-standard
UEFI variables causes the system firmware to fail to POST, efivarfs
files that are not well-known standardized variables are created
as immutable files. This doesn't prevent removal - "chattr -i" will work -
but it does prevent this kind of failure from being accomplished
accidentally.

View file

@ -1,6 +1,6 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 3 SUBLEVEL = 5
EXTRAVERSION = EXTRAVERSION =
NAME = Blurry Fish Butt NAME = Blurry Fish Butt

View file

@ -22,6 +22,7 @@
#define AUX_IRQ_CTRL 0x00E #define AUX_IRQ_CTRL 0x00E
#define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */ #define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */
#define AUX_IRQ_LVL_PEND 0x200 /* Pending Intr across all levels */ #define AUX_IRQ_LVL_PEND 0x200 /* Pending Intr across all levels */
#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
#define AUX_IRQ_PRIORITY 0x206 #define AUX_IRQ_PRIORITY 0x206
#define ICAUSE 0x40a #define ICAUSE 0x40a
#define AUX_IRQ_SELECT 0x40b #define AUX_IRQ_SELECT 0x40b
@ -112,6 +113,16 @@ static inline int arch_irqs_disabled(void)
return arch_irqs_disabled_flags(arch_local_save_flags()); return arch_irqs_disabled_flags(arch_local_save_flags());
} }
static inline void arc_softirq_trigger(int irq)
{
write_aux_reg(AUX_IRQ_HINT, irq);
}
static inline void arc_softirq_clear(int irq)
{
write_aux_reg(AUX_IRQ_HINT, 0);
}
#else #else
.macro IRQ_DISABLE scratch .macro IRQ_DISABLE scratch

View file

@ -45,11 +45,12 @@ VECTOR reserved ; Reserved slots
VECTOR handle_interrupt ; (16) Timer0 VECTOR handle_interrupt ; (16) Timer0
VECTOR handle_interrupt ; unused (Timer1) VECTOR handle_interrupt ; unused (Timer1)
VECTOR handle_interrupt ; unused (WDT) VECTOR handle_interrupt ; unused (WDT)
VECTOR handle_interrupt ; (19) ICI (inter core interrupt) VECTOR handle_interrupt ; (19) Inter core Interrupt (IPI)
VECTOR handle_interrupt VECTOR handle_interrupt ; (20) perf Interrupt
VECTOR handle_interrupt VECTOR handle_interrupt ; (21) Software Triggered Intr (Self IPI)
VECTOR handle_interrupt VECTOR handle_interrupt ; unused
VECTOR handle_interrupt ; (23) End of fixed IRQs VECTOR handle_interrupt ; (23) unused
# End of fixed IRQs
.rept CONFIG_ARC_NUMBER_OF_INTERRUPTS - 8 .rept CONFIG_ARC_NUMBER_OF_INTERRUPTS - 8
VECTOR handle_interrupt VECTOR handle_interrupt
@ -211,7 +212,11 @@ debug_marker_syscall:
; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig ; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig
; entry was via Exception in DS which got preempted in kernel). ; entry was via Exception in DS which got preempted in kernel).
; ;
; IRQ RTIE won't reliably restore DE bit and/or BTA, needs handling ; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround
;
; Solution is return from Intr w/o any delay slot quirks into a kernel trampoline
; and from pure kernel mode return to delay slot which handles DS bit/BTA correctly
.Lintr_ret_to_delay_slot: .Lintr_ret_to_delay_slot:
debug_marker_ds: debug_marker_ds:
@ -222,18 +227,23 @@ debug_marker_ds:
ld r2, [sp, PT_ret] ld r2, [sp, PT_ret]
ld r3, [sp, PT_status32] ld r3, [sp, PT_status32]
; STAT32 for Int return created from scratch
; (No delay dlot, disable Further intr in trampoline)
bic r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK bic r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK
st r0, [sp, PT_status32] st r0, [sp, PT_status32]
mov r1, .Lintr_ret_to_delay_slot_2 mov r1, .Lintr_ret_to_delay_slot_2
st r1, [sp, PT_ret] st r1, [sp, PT_ret]
; Orig exception PC/STAT32 safekept @orig_r0 and @event stack slots
st r2, [sp, 0] st r2, [sp, 0]
st r3, [sp, 4] st r3, [sp, 4]
b .Lisr_ret_fast_path b .Lisr_ret_fast_path
.Lintr_ret_to_delay_slot_2: .Lintr_ret_to_delay_slot_2:
; Trampoline to restore orig exception PC/STAT32/BTA/AUX_USER_SP
sub sp, sp, SZ_PT_REGS sub sp, sp, SZ_PT_REGS
st r9, [sp, -4] st r9, [sp, -4]
@ -243,11 +253,19 @@ debug_marker_ds:
ld r9, [sp, 4] ld r9, [sp, 4]
sr r9, [erstatus] sr r9, [erstatus]
; restore AUX_USER_SP if returning to U mode
bbit0 r9, STATUS_U_BIT, 1f
ld r9, [sp, PT_sp]
sr r9, [AUX_USER_SP]
1:
ld r9, [sp, 8] ld r9, [sp, 8]
sr r9, [erbta] sr r9, [erbta]
ld r9, [sp, -4] ld r9, [sp, -4]
add sp, sp, SZ_PT_REGS add sp, sp, SZ_PT_REGS
; return from pure kernel mode to delay slot
rtie rtie
END(ret_from_exception) END(ret_from_exception)

View file

@ -11,9 +11,12 @@
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <asm/irqflags-arcv2.h>
#include <asm/mcip.h> #include <asm/mcip.h>
#include <asm/setup.h> #include <asm/setup.h>
#define SOFTIRQ_IRQ 21
static char smp_cpuinfo_buf[128]; static char smp_cpuinfo_buf[128];
static int idu_detected; static int idu_detected;
@ -22,6 +25,7 @@ static DEFINE_RAW_SPINLOCK(mcip_lock);
static void mcip_setup_per_cpu(int cpu) static void mcip_setup_per_cpu(int cpu)
{ {
smp_ipi_irq_setup(cpu, IPI_IRQ); smp_ipi_irq_setup(cpu, IPI_IRQ);
smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
} }
static void mcip_ipi_send(int cpu) static void mcip_ipi_send(int cpu)
@ -29,6 +33,12 @@ static void mcip_ipi_send(int cpu)
unsigned long flags; unsigned long flags;
int ipi_was_pending; int ipi_was_pending;
/* ARConnect can only send IPI to others */
if (unlikely(cpu == raw_smp_processor_id())) {
arc_softirq_trigger(SOFTIRQ_IRQ);
return;
}
/* /*
* NOTE: We must spin here if the other cpu hasn't yet * NOTE: We must spin here if the other cpu hasn't yet
* serviced a previous message. This can burn lots * serviced a previous message. This can burn lots
@ -63,6 +73,11 @@ static void mcip_ipi_clear(int irq)
unsigned long flags; unsigned long flags;
unsigned int __maybe_unused copy; unsigned int __maybe_unused copy;
if (unlikely(irq == SOFTIRQ_IRQ)) {
arc_softirq_clear(irq);
return;
}
raw_spin_lock_irqsave(&mcip_lock, flags); raw_spin_lock_irqsave(&mcip_lock, flags);
/* Who sent the IPI */ /* Who sent the IPI */

View file

@ -162,10 +162,9 @@ choice
mobile SoCs in the Kona family of chips (e.g. bcm28155, mobile SoCs in the Kona family of chips (e.g. bcm28155,
bcm11351, etc...) bcm11351, etc...)
config DEBUG_BCM63XX config DEBUG_BCM63XX_UART
bool "Kernel low-level debugging on BCM63XX UART" bool "Kernel low-level debugging on BCM63XX UART"
depends on ARCH_BCM_63XX depends on ARCH_BCM_63XX
select DEBUG_UART_BCM63XX
config DEBUG_BERLIN_UART config DEBUG_BERLIN_UART
bool "Marvell Berlin SoC Debug UART" bool "Marvell Berlin SoC Debug UART"
@ -1348,7 +1347,7 @@ config DEBUG_LL_INCLUDE
default "debug/vf.S" if DEBUG_VF_UART default "debug/vf.S" if DEBUG_VF_UART
default "debug/vt8500.S" if DEBUG_VT8500_UART0 default "debug/vt8500.S" if DEBUG_VT8500_UART0
default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1 default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1
default "debug/bcm63xx.S" if DEBUG_UART_BCM63XX default "debug/bcm63xx.S" if DEBUG_BCM63XX_UART
default "debug/digicolor.S" if DEBUG_DIGICOLOR_UA0 default "debug/digicolor.S" if DEBUG_DIGICOLOR_UA0
default "mach/debug-macro.S" default "mach/debug-macro.S"
@ -1364,10 +1363,6 @@ config DEBUG_UART_8250
ARCH_IOP33X || ARCH_IXP4XX || \ ARCH_IOP33X || ARCH_IXP4XX || \
ARCH_LPC32XX || ARCH_MV78XX0 || ARCH_ORION5X || ARCH_RPC ARCH_LPC32XX || ARCH_MV78XX0 || ARCH_ORION5X || ARCH_RPC
# Compatibility options for BCM63xx
config DEBUG_UART_BCM63XX
def_bool ARCH_BCM_63XX
config DEBUG_UART_PHYS config DEBUG_UART_PHYS
hex "Physical base address of debug UART" hex "Physical base address of debug UART"
default 0x00100a00 if DEBUG_NETX_UART default 0x00100a00 if DEBUG_NETX_UART
@ -1462,7 +1457,7 @@ config DEBUG_UART_PHYS
default 0xfffb0000 if DEBUG_OMAP1UART1 || DEBUG_OMAP7XXUART1 default 0xfffb0000 if DEBUG_OMAP1UART1 || DEBUG_OMAP7XXUART1
default 0xfffb0800 if DEBUG_OMAP1UART2 || DEBUG_OMAP7XXUART2 default 0xfffb0800 if DEBUG_OMAP1UART2 || DEBUG_OMAP7XXUART2
default 0xfffb9800 if DEBUG_OMAP1UART3 || DEBUG_OMAP7XXUART3 default 0xfffb9800 if DEBUG_OMAP1UART3 || DEBUG_OMAP7XXUART3
default 0xfffe8600 if DEBUG_UART_BCM63XX default 0xfffe8600 if DEBUG_BCM63XX_UART
default 0xfffff700 if ARCH_IOP33X default 0xfffff700 if ARCH_IOP33X
depends on ARCH_EP93XX || \ depends on ARCH_EP93XX || \
DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \ DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
@ -1474,7 +1469,7 @@ config DEBUG_UART_PHYS
DEBUG_RCAR_GEN2_SCIF0 || DEBUG_RCAR_GEN2_SCIF2 || \ DEBUG_RCAR_GEN2_SCIF0 || DEBUG_RCAR_GEN2_SCIF2 || \
DEBUG_RMOBILE_SCIFA0 || DEBUG_RMOBILE_SCIFA1 || \ DEBUG_RMOBILE_SCIFA0 || DEBUG_RMOBILE_SCIFA1 || \
DEBUG_RMOBILE_SCIFA4 || DEBUG_S3C24XX_UART || \ DEBUG_RMOBILE_SCIFA4 || DEBUG_S3C24XX_UART || \
DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \ DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0 || \ DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0 || \
DEBUG_AT91_UART DEBUG_AT91_UART
@ -1515,7 +1510,7 @@ config DEBUG_UART_VIRT
default 0xfb10c000 if DEBUG_REALVIEW_PB1176_PORT default 0xfb10c000 if DEBUG_REALVIEW_PB1176_PORT
default 0xfc40ab00 if DEBUG_BRCMSTB_UART default 0xfc40ab00 if DEBUG_BRCMSTB_UART
default 0xfc705000 if DEBUG_ZTE_ZX default 0xfc705000 if DEBUG_ZTE_ZX
default 0xfcfe8600 if DEBUG_UART_BCM63XX default 0xfcfe8600 if DEBUG_BCM63XX_UART
default 0xfd000000 if ARCH_SPEAR3XX || ARCH_SPEAR6XX default 0xfd000000 if ARCH_SPEAR3XX || ARCH_SPEAR6XX
default 0xfd000000 if ARCH_SPEAR13XX default 0xfd000000 if ARCH_SPEAR13XX
default 0xfd012000 if ARCH_MV78XX0 default 0xfd012000 if ARCH_MV78XX0
@ -1566,7 +1561,7 @@ config DEBUG_UART_VIRT
DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \ DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \
DEBUG_NETX_UART || \ DEBUG_NETX_UART || \
DEBUG_QCOM_UARTDM || DEBUG_S3C24XX_UART || \ DEBUG_QCOM_UARTDM || DEBUG_S3C24XX_UART || \
DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \ DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0 DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0
config DEBUG_UART_8250_SHIFT config DEBUG_UART_8250_SHIFT

View file

@ -90,7 +90,7 @@
#define PIN_PA14__I2SC1_MCK PINMUX_PIN(PIN_PA14, 4, 2) #define PIN_PA14__I2SC1_MCK PINMUX_PIN(PIN_PA14, 4, 2)
#define PIN_PA14__FLEXCOM3_IO2 PINMUX_PIN(PIN_PA14, 5, 1) #define PIN_PA14__FLEXCOM3_IO2 PINMUX_PIN(PIN_PA14, 5, 1)
#define PIN_PA14__D9 PINMUX_PIN(PIN_PA14, 6, 2) #define PIN_PA14__D9 PINMUX_PIN(PIN_PA14, 6, 2)
#define PIN_PA15 14 #define PIN_PA15 15
#define PIN_PA15__GPIO PINMUX_PIN(PIN_PA15, 0, 0) #define PIN_PA15__GPIO PINMUX_PIN(PIN_PA15, 0, 0)
#define PIN_PA15__SPI0_MOSI PINMUX_PIN(PIN_PA15, 1, 1) #define PIN_PA15__SPI0_MOSI PINMUX_PIN(PIN_PA15, 1, 1)
#define PIN_PA15__TF1 PINMUX_PIN(PIN_PA15, 2, 1) #define PIN_PA15__TF1 PINMUX_PIN(PIN_PA15, 2, 1)

View file

@ -16,7 +16,7 @@
extern struct smp_operations psci_smp_ops; extern struct smp_operations psci_smp_ops;
#ifdef CONFIG_ARM_PSCI #if defined(CONFIG_SMP) && defined(CONFIG_ARM_PSCI)
bool psci_smp_available(void); bool psci_smp_available(void);
#else #else
static inline bool psci_smp_available(void) { return false; } static inline bool psci_smp_available(void) { return false; }

View file

@ -35,14 +35,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
dma_addr_t dev_addr, unsigned long offset, size_t size, dma_addr_t dev_addr, unsigned long offset, size_t size,
enum dma_data_direction dir, struct dma_attrs *attrs) enum dma_data_direction dir, struct dma_attrs *attrs)
{ {
bool local = XEN_PFN_DOWN(dev_addr) == page_to_xen_pfn(page); unsigned long page_pfn = page_to_xen_pfn(page);
unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
unsigned long compound_pages =
(1<<compound_order(page)) * XEN_PFN_PER_PAGE;
bool local = (page_pfn <= dev_pfn) &&
(dev_pfn - page_pfn < compound_pages);
/* /*
* Dom0 is mapped 1:1, while the Linux page can be spanned accross * Dom0 is mapped 1:1, while the Linux page can span across
* multiple Xen page, it's not possible to have a mix of local and * multiple Xen pages, it's not possible for it to contain a
* foreign Xen page. So if the first xen_pfn == mfn the page is local * mix of local and foreign Xen pages. So if the first xen_pfn
* otherwise it's a foreign page grant-mapped in dom0. If the page is * == mfn the page is local otherwise it's a foreign page
* local we can safely call the native dma_ops function, otherwise we * grant-mapped in dom0. If the page is local we can safely
* call the xen specific function. * call the native dma_ops function, otherwise we call the xen
* specific function.
*/ */
if (local) if (local)
__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);

View file

@ -155,7 +155,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
u64 val; u64 val;
val = kvm_arm_timer_get_reg(vcpu, reg->id); val = kvm_arm_timer_get_reg(vcpu, reg->id);
return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)); return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
} }
static unsigned long num_core_regs(void) static unsigned long num_core_regs(void)

View file

@ -101,10 +101,8 @@ static void omap2_onenand_set_async_mode(void __iomem *onenand_base)
static void set_onenand_cfg(void __iomem *onenand_base) static void set_onenand_cfg(void __iomem *onenand_base)
{ {
u32 reg; u32 reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
reg = readw(onenand_base + ONENAND_REG_SYS_CFG1);
reg &= ~((0x7 << ONENAND_SYS_CFG1_BRL_SHIFT) | (0x7 << 9));
reg |= (latency << ONENAND_SYS_CFG1_BRL_SHIFT) | reg |= (latency << ONENAND_SYS_CFG1_BRL_SHIFT) |
ONENAND_SYS_CFG1_BL_16; ONENAND_SYS_CFG1_BL_16;
if (onenand_flags & ONENAND_FLAG_SYNCREAD) if (onenand_flags & ONENAND_FLAG_SYNCREAD)
@ -123,6 +121,7 @@ static void set_onenand_cfg(void __iomem *onenand_base)
reg |= ONENAND_SYS_CFG1_VHF; reg |= ONENAND_SYS_CFG1_VHF;
else else
reg &= ~ONENAND_SYS_CFG1_VHF; reg &= ~ONENAND_SYS_CFG1_VHF;
writew(reg, onenand_base + ONENAND_REG_SYS_CFG1); writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
} }
@ -289,6 +288,7 @@ static int omap2_onenand_setup_async(void __iomem *onenand_base)
} }
} }
onenand_async.sync_write = true;
omap2_onenand_calc_async_timings(&t); omap2_onenand_calc_async_timings(&t);
ret = gpmc_cs_program_settings(gpmc_onenand_data->cs, &onenand_async); ret = gpmc_cs_program_settings(gpmc_onenand_data->cs, &onenand_async);

View file

@ -28,6 +28,7 @@ endif
KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr)
KBUILD_CFLAGS += -fno-pic KBUILD_CFLAGS += -fno-pic
KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads)
KBUILD_AFLAGS += $(lseinstr) KBUILD_AFLAGS += $(lseinstr)
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)

View file

@ -34,13 +34,13 @@
/* /*
* VMALLOC and SPARSEMEM_VMEMMAP ranges. * VMALLOC and SPARSEMEM_VMEMMAP ranges.
* *
* VMEMAP_SIZE: allows the whole VA space to be covered by a struct page array * VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array
* (rounded up to PUD_SIZE). * (rounded up to PUD_SIZE).
* VMALLOC_START: beginning of the kernel VA space * VMALLOC_START: beginning of the kernel VA space
* VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space, * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
* fixed mappings and modules * fixed mappings and modules
*/ */
#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE) #define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT - 1)) * sizeof(struct page), PUD_SIZE)
#ifndef CONFIG_KASAN #ifndef CONFIG_KASAN
#define VMALLOC_START (VA_START) #define VMALLOC_START (VA_START)
@ -51,7 +51,8 @@
#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K)) #define VMEMMAP_START (VMALLOC_END + SZ_64K)
#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
#define FIRST_USER_ADDRESS 0UL #define FIRST_USER_ADDRESS 0UL

View file

@ -186,7 +186,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
u64 val; u64 val;
val = kvm_arm_timer_get_reg(vcpu, reg->id); val = kvm_arm_timer_get_reg(vcpu, reg->id);
return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)); return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
} }
/** /**

View file

@ -321,8 +321,8 @@ void __init mem_init(void)
#endif #endif
MLG(VMALLOC_START, VMALLOC_END), MLG(VMALLOC_START, VMALLOC_END),
#ifdef CONFIG_SPARSEMEM_VMEMMAP #ifdef CONFIG_SPARSEMEM_VMEMMAP
MLG((unsigned long)vmemmap, MLG(VMEMMAP_START,
(unsigned long)vmemmap + VMEMMAP_SIZE), VMEMMAP_START + VMEMMAP_SIZE),
MLM((unsigned long)virt_to_page(PAGE_OFFSET), MLM((unsigned long)virt_to_page(PAGE_OFFSET),
(unsigned long)virt_to_page(high_memory)), (unsigned long)virt_to_page(high_memory)),
#endif #endif

View file

@ -33,7 +33,7 @@
#define PAGE_SHIFT 16 #define PAGE_SHIFT 16
#endif #endif
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE - 1)) #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
/* /*
* This is used for calculating the real page sizes * This is used for calculating the real page sizes

View file

@ -353,7 +353,7 @@ static inline pte_t pte_mkdirty(pte_t pte)
static inline pte_t pte_mkyoung(pte_t pte) static inline pte_t pte_mkyoung(pte_t pte)
{ {
pte_val(pte) |= _PAGE_ACCESSED; pte_val(pte) |= _PAGE_ACCESSED;
#ifdef CONFIG_CPU_MIPSR2 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
if (!(pte_val(pte) & _PAGE_NO_READ)) if (!(pte_val(pte) & _PAGE_NO_READ))
pte_val(pte) |= _PAGE_SILENT_READ; pte_val(pte) |= _PAGE_SILENT_READ;
else else
@ -560,7 +560,7 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
{ {
pmd_val(pmd) |= _PAGE_ACCESSED; pmd_val(pmd) |= _PAGE_ACCESSED;
#ifdef CONFIG_CPU_MIPSR2 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
if (!(pmd_val(pmd) & _PAGE_NO_READ)) if (!(pmd_val(pmd) & _PAGE_NO_READ))
pmd_val(pmd) |= _PAGE_SILENT_READ; pmd_val(pmd) |= _PAGE_SILENT_READ;
else else

View file

@ -101,10 +101,8 @@ static inline void syscall_get_arguments(struct task_struct *task,
/* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */ /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
if ((config_enabled(CONFIG_32BIT) || if ((config_enabled(CONFIG_32BIT) ||
test_tsk_thread_flag(task, TIF_32BIT_REGS)) && test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
(regs->regs[2] == __NR_syscall)) { (regs->regs[2] == __NR_syscall))
i++; i++;
n++;
}
while (n--) while (n--)
ret |= mips_get_syscall_arg(args++, task, regs, i++); ret |= mips_get_syscall_arg(args++, task, regs, i++);

View file

@ -690,15 +690,15 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
asmlinkage void do_ov(struct pt_regs *regs) asmlinkage void do_ov(struct pt_regs *regs)
{ {
enum ctx_state prev_state; enum ctx_state prev_state;
siginfo_t info; siginfo_t info = {
.si_signo = SIGFPE,
.si_code = FPE_INTOVF,
.si_addr = (void __user *)regs->cp0_epc,
};
prev_state = exception_enter(); prev_state = exception_enter();
die_if_kernel("Integer overflow", regs); die_if_kernel("Integer overflow", regs);
info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current); force_sig_info(SIGFPE, &info, current);
exception_exit(prev_state); exception_exit(prev_state);
} }
@ -874,7 +874,7 @@ out:
void do_trap_or_bp(struct pt_regs *regs, unsigned int code, void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
const char *str) const char *str)
{ {
siginfo_t info; siginfo_t info = { 0 };
char b[40]; char b[40];
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
@ -903,7 +903,6 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
else else
info.si_code = FPE_INTOVF; info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE; info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_addr = (void __user *) regs->cp0_epc; info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current); force_sig_info(SIGFPE, &info, current);
break; break;

View file

@ -702,7 +702,7 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
void __user *uaddr = (void __user *)(long)reg->addr; void __user *uaddr = (void __user *)(long)reg->addr;
return copy_to_user(uaddr, vs, 16); return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
} else { } else {
return -EINVAL; return -EINVAL;
} }
@ -732,7 +732,7 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
void __user *uaddr = (void __user *)(long)reg->addr; void __user *uaddr = (void __user *)(long)reg->addr;
return copy_from_user(vs, uaddr, 16); return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
} else { } else {
return -EINVAL; return -EINVAL;
} }

View file

@ -13,6 +13,9 @@
#define SMBUS_PCI_REG64 0x64 #define SMBUS_PCI_REG64 0x64
#define SMBUS_PCI_REGB4 0xb4 #define SMBUS_PCI_REGB4 0xb4
#define HPET_MIN_CYCLES 64
#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
static DEFINE_SPINLOCK(hpet_lock); static DEFINE_SPINLOCK(hpet_lock);
DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device); DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device);
@ -161,8 +164,9 @@ static int hpet_next_event(unsigned long delta,
cnt += delta; cnt += delta;
hpet_write(HPET_T0_CMP, cnt); hpet_write(HPET_T0_CMP, cnt);
res = ((int)(hpet_read(HPET_COUNTER) - cnt) > 0) ? -ETIME : 0; res = (int)(cnt - hpet_read(HPET_COUNTER));
return res;
return res < HPET_MIN_CYCLES ? -ETIME : 0;
} }
static irqreturn_t hpet_irq_handler(int irq, void *data) static irqreturn_t hpet_irq_handler(int irq, void *data)
@ -237,7 +241,7 @@ void __init setup_hpet_timer(void)
cd->cpumask = cpumask_of(cpu); cd->cpumask = cpumask_of(cpu);
clockevent_set_clock(cd, HPET_FREQ); clockevent_set_clock(cd, HPET_FREQ);
cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
cd->min_delta_ns = 5000; cd->min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA, cd);
clockevents_register_device(cd); clockevents_register_device(cd);
setup_irq(HPET_T0_IRQ, &hpet_irq); setup_irq(HPET_T0_IRQ, &hpet_irq);

View file

@ -30,13 +30,13 @@
#include "smp.h" #include "smp.h"
DEFINE_PER_CPU(int, cpu_state); DEFINE_PER_CPU(int, cpu_state);
DEFINE_PER_CPU(uint32_t, core0_c0count);
static void *ipi_set0_regs[16]; static void *ipi_set0_regs[16];
static void *ipi_clear0_regs[16]; static void *ipi_clear0_regs[16];
static void *ipi_status0_regs[16]; static void *ipi_status0_regs[16];
static void *ipi_en0_regs[16]; static void *ipi_en0_regs[16];
static void *ipi_mailbox_buf[16]; static void *ipi_mailbox_buf[16];
static uint32_t core0_c0count[NR_CPUS];
/* read a 32bit value from ipi register */ /* read a 32bit value from ipi register */
#define loongson3_ipi_read32(addr) readl(addr) #define loongson3_ipi_read32(addr) readl(addr)
@ -275,12 +275,14 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
if (action & SMP_ASK_C0COUNT) { if (action & SMP_ASK_C0COUNT) {
BUG_ON(cpu != 0); BUG_ON(cpu != 0);
c0count = read_c0_count(); c0count = read_c0_count();
for (i = 1; i < num_possible_cpus(); i++) c0count = c0count ? c0count : 1;
per_cpu(core0_c0count, i) = c0count; for (i = 1; i < nr_cpu_ids; i++)
core0_c0count[i] = c0count;
__wbflush(); /* Let others see the result ASAP */
} }
} }
#define MAX_LOOPS 1111 #define MAX_LOOPS 800
/* /*
* SMP init and finish on secondary CPUs * SMP init and finish on secondary CPUs
*/ */
@ -305,16 +307,20 @@ static void loongson3_init_secondary(void)
cpu_logical_map(cpu) / loongson_sysconf.cores_per_package; cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
i = 0; i = 0;
__this_cpu_write(core0_c0count, 0); core0_c0count[cpu] = 0;
loongson3_send_ipi_single(0, SMP_ASK_C0COUNT); loongson3_send_ipi_single(0, SMP_ASK_C0COUNT);
while (!__this_cpu_read(core0_c0count)) { while (!core0_c0count[cpu]) {
i++; i++;
cpu_relax(); cpu_relax();
} }
if (i > MAX_LOOPS) if (i > MAX_LOOPS)
i = MAX_LOOPS; i = MAX_LOOPS;
initcount = __this_cpu_read(core0_c0count) + i; if (cpu_data[cpu].package)
initcount = core0_c0count[cpu] + i;
else /* Local access is faster for loops */
initcount = core0_c0count[cpu] + i/2;
write_c0_count(initcount); write_c0_count(initcount);
} }

View file

@ -164,11 +164,13 @@ static int __init mips_sc_probe_cm3(void)
sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE_MSK; sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE_MSK;
sets >>= CM_GCR_L2_CONFIG_SET_SIZE_SHF; sets >>= CM_GCR_L2_CONFIG_SET_SIZE_SHF;
c->scache.sets = 64 << sets; if (sets)
c->scache.sets = 64 << sets;
line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE_MSK; line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE_MSK;
line_sz >>= CM_GCR_L2_CONFIG_LINE_SIZE_SHF; line_sz >>= CM_GCR_L2_CONFIG_LINE_SIZE_SHF;
c->scache.linesz = 2 << line_sz; if (line_sz)
c->scache.linesz = 2 << line_sz;
assoc = cfg & CM_GCR_L2_CONFIG_ASSOC_MSK; assoc = cfg & CM_GCR_L2_CONFIG_ASSOC_MSK;
assoc >>= CM_GCR_L2_CONFIG_ASSOC_SHF; assoc >>= CM_GCR_L2_CONFIG_ASSOC_SHF;
@ -176,9 +178,12 @@ static int __init mips_sc_probe_cm3(void)
c->scache.waysize = c->scache.sets * c->scache.linesz; c->scache.waysize = c->scache.sets * c->scache.linesz;
c->scache.waybit = __ffs(c->scache.waysize); c->scache.waybit = __ffs(c->scache.waysize);
c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT; if (c->scache.linesz) {
c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
return 1;
}
return 1; return 0;
} }
void __weak platform_early_l2_init(void) void __weak platform_early_l2_init(void)

View file

@ -242,7 +242,7 @@ static void output_pgtable_bits_defines(void)
pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT); pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT); pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT);
#endif #endif
#ifdef CONFIG_CPU_MIPSR2 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
if (cpu_has_rixi) { if (cpu_has_rixi) {
#ifdef _PAGE_NO_EXEC_SHIFT #ifdef _PAGE_NO_EXEC_SHIFT
pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT); pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);

View file

@ -269,14 +269,19 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
long do_syscall_trace_enter(struct pt_regs *regs) long do_syscall_trace_enter(struct pt_regs *regs)
{ {
long ret = 0;
/* Do the secure computing check first. */ /* Do the secure computing check first. */
secure_computing_strict(regs->gr[20]); secure_computing_strict(regs->gr[20]);
if (test_thread_flag(TIF_SYSCALL_TRACE) && if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs)) tracehook_report_syscall_entry(regs)) {
ret = -1L; /*
* Tracing decided this syscall should not happen or the
* debugger stored an invalid system call number. Skip
* the system call and the system call restart handling.
*/
regs->gr[20] = -1UL;
goto out;
}
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
if (!is_compat_task()) if (!is_compat_task())
@ -290,7 +295,8 @@ long do_syscall_trace_enter(struct pt_regs *regs)
regs->gr[24] & 0xffffffff, regs->gr[24] & 0xffffffff,
regs->gr[23] & 0xffffffff); regs->gr[23] & 0xffffffff);
return ret ? : regs->gr[20]; out:
return regs->gr[20];
} }
void do_syscall_trace_exit(struct pt_regs *regs) void do_syscall_trace_exit(struct pt_regs *regs)

View file

@ -343,7 +343,7 @@ tracesys_next:
#endif #endif
comiclr,>>= __NR_Linux_syscalls, %r20, %r0 comiclr,>>= __NR_Linux_syscalls, %r20, %r0
b,n .Lsyscall_nosys b,n .Ltracesys_nosys
LDREGX %r20(%r19), %r19 LDREGX %r20(%r19), %r19
@ -359,6 +359,9 @@ tracesys_next:
be 0(%sr7,%r19) be 0(%sr7,%r19)
ldo R%tracesys_exit(%r2),%r2 ldo R%tracesys_exit(%r2),%r2
.Ltracesys_nosys:
ldo -ENOSYS(%r0),%r28 /* set errno */
/* Do *not* call this function on the gateway page, because it /* Do *not* call this function on the gateway page, because it
makes a direct call to syscall_trace. */ makes a direct call to syscall_trace. */

View file

@ -418,8 +418,7 @@ static void *eeh_rmv_device(void *data, void *userdata)
eeh_pcid_put(dev); eeh_pcid_put(dev);
if (driver->err_handler && if (driver->err_handler &&
driver->err_handler->error_detected && driver->err_handler->error_detected &&
driver->err_handler->slot_reset && driver->err_handler->slot_reset)
driver->err_handler->resume)
return NULL; return NULL;
} }

View file

@ -48,6 +48,7 @@ static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs)
static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu) static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
{ {
fpregs->pad = 0; fpregs->pad = 0;
fpregs->fpc = fpu->fpc;
if (MACHINE_HAS_VX) if (MACHINE_HAS_VX)
convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs); convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
else else
@ -57,6 +58,7 @@ static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu) static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
{ {
fpu->fpc = fpregs->fpc;
if (MACHINE_HAS_VX) if (MACHINE_HAS_VX)
convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs); convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
else else

View file

@ -506,7 +506,6 @@ struct kvm_vcpu_arch {
struct kvm_s390_sie_block *sie_block; struct kvm_s390_sie_block *sie_block;
unsigned int host_acrs[NUM_ACRS]; unsigned int host_acrs[NUM_ACRS];
struct fpu host_fpregs; struct fpu host_fpregs;
struct fpu guest_fpregs;
struct kvm_s390_local_interrupt local_int; struct kvm_s390_local_interrupt local_int;
struct hrtimer ckc_timer; struct hrtimer ckc_timer;
struct kvm_s390_pgm_info pgm; struct kvm_s390_pgm_info pgm;

View file

@ -181,6 +181,7 @@ int main(void)
OFFSET(__LC_PSW_SAVE_AREA, _lowcore, psw_save_area); OFFSET(__LC_PSW_SAVE_AREA, _lowcore, psw_save_area);
OFFSET(__LC_PREFIX_SAVE_AREA, _lowcore, prefixreg_save_area); OFFSET(__LC_PREFIX_SAVE_AREA, _lowcore, prefixreg_save_area);
OFFSET(__LC_FP_CREG_SAVE_AREA, _lowcore, fpt_creg_save_area); OFFSET(__LC_FP_CREG_SAVE_AREA, _lowcore, fpt_creg_save_area);
OFFSET(__LC_TOD_PROGREG_SAVE_AREA, _lowcore, tod_progreg_save_area);
OFFSET(__LC_CPU_TIMER_SAVE_AREA, _lowcore, cpu_timer_save_area); OFFSET(__LC_CPU_TIMER_SAVE_AREA, _lowcore, cpu_timer_save_area);
OFFSET(__LC_CLOCK_COMP_SAVE_AREA, _lowcore, clock_comp_save_area); OFFSET(__LC_CLOCK_COMP_SAVE_AREA, _lowcore, clock_comp_save_area);
OFFSET(__LC_AREGS_SAVE_AREA, _lowcore, access_regs_save_area); OFFSET(__LC_AREGS_SAVE_AREA, _lowcore, access_regs_save_area);

View file

@ -271,7 +271,7 @@ static int restore_sigregs_ext32(struct pt_regs *regs,
/* Restore high gprs from signal stack */ /* Restore high gprs from signal stack */
if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high, if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high,
sizeof(&sregs_ext->gprs_high))) sizeof(sregs_ext->gprs_high)))
return -EFAULT; return -EFAULT;
for (i = 0; i < NUM_GPRS; i++) for (i = 0; i < NUM_GPRS; i++)
*(__u32 *)&regs->gprs[i] = gprs_high[i]; *(__u32 *)&regs->gprs[i] = gprs_high[i];

View file

@ -1268,44 +1268,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
/*
* Backs up the current FP/VX register save area on a particular
* destination. Used to switch between different register save
* areas.
*/
static inline void save_fpu_to(struct fpu *dst)
{
dst->fpc = current->thread.fpu.fpc;
dst->regs = current->thread.fpu.regs;
}
/*
* Switches the FP/VX register save area from which to lazy
* restore register contents.
*/
static inline void load_fpu_from(struct fpu *from)
{
current->thread.fpu.fpc = from->fpc;
current->thread.fpu.regs = from->regs;
}
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
/* Save host register state */ /* Save host register state */
save_fpu_regs(); save_fpu_regs();
save_fpu_to(&vcpu->arch.host_fpregs); vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
if (test_kvm_facility(vcpu->kvm, 129)) {
current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
/*
* Use the register save area in the SIE-control block
* for register restore and save in kvm_arch_vcpu_put()
*/
current->thread.fpu.vxrs =
(__vector128 *)&vcpu->run->s.regs.vrs;
} else
load_fpu_from(&vcpu->arch.guest_fpregs);
/* Depending on MACHINE_HAS_VX, data stored to vrs either
* has vector register or floating point register format.
*/
current->thread.fpu.regs = vcpu->run->s.regs.vrs;
current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
if (test_fp_ctl(current->thread.fpu.fpc)) if (test_fp_ctl(current->thread.fpu.fpc))
/* User space provided an invalid FPC, let's clear it */ /* User space provided an invalid FPC, let's clear it */
current->thread.fpu.fpc = 0; current->thread.fpu.fpc = 0;
@ -1321,19 +1295,13 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
gmap_disable(vcpu->arch.gmap); gmap_disable(vcpu->arch.gmap);
/* Save guest register state */
save_fpu_regs(); save_fpu_regs();
vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
if (test_kvm_facility(vcpu->kvm, 129)) /* Restore host register state */
/* current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
* kvm_arch_vcpu_load() set up the register save area to current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
* the &vcpu->run->s.regs.vrs and, thus, the vector registers
* are already saved. Only the floating-point control must be
* copied.
*/
vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
else
save_fpu_to(&vcpu->arch.guest_fpregs);
load_fpu_from(&vcpu->arch.host_fpregs);
save_access_regs(vcpu->run->s.regs.acrs); save_access_regs(vcpu->run->s.regs.acrs);
restore_access_regs(vcpu->arch.host_acrs); restore_access_regs(vcpu->arch.host_acrs);
@ -1351,8 +1319,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
vcpu->arch.sie_block->gcr[0] = 0xE0UL; vcpu->arch.sie_block->gcr[0] = 0xE0UL;
vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
vcpu->arch.guest_fpregs.fpc = 0; /* make sure the new fpc will be lazily loaded */
asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); save_fpu_regs();
current->thread.fpu.fpc = 0;
vcpu->arch.sie_block->gbea = 1; vcpu->arch.sie_block->gbea = 1;
vcpu->arch.sie_block->pp = 0; vcpu->arch.sie_block->pp = 0;
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
@ -1501,19 +1470,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.local_int.wq = &vcpu->wq; vcpu->arch.local_int.wq = &vcpu->wq;
vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
/*
* Allocate a save area for floating-point registers. If the vector
* extension is available, register contents are saved in the SIE
* control block. The allocated save area is still required in
* particular places, for example, in kvm_s390_vcpu_store_status().
*/
vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS,
GFP_KERNEL);
if (!vcpu->arch.guest_fpregs.fprs) {
rc = -ENOMEM;
goto out_free_sie_block;
}
rc = kvm_vcpu_init(vcpu, kvm, id); rc = kvm_vcpu_init(vcpu, kvm, id);
if (rc) if (rc)
goto out_free_sie_block; goto out_free_sie_block;
@ -1734,19 +1690,27 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{ {
/* make sure the new values will be lazily loaded */
save_fpu_regs();
if (test_fp_ctl(fpu->fpc)) if (test_fp_ctl(fpu->fpc))
return -EINVAL; return -EINVAL;
memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); current->thread.fpu.fpc = fpu->fpc;
vcpu->arch.guest_fpregs.fpc = fpu->fpc; if (MACHINE_HAS_VX)
save_fpu_regs(); convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
load_fpu_from(&vcpu->arch.guest_fpregs); else
memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
return 0; return 0;
} }
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{ {
memcpy(&fpu->fprs, vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); /* make sure we have the latest values */
fpu->fpc = vcpu->arch.guest_fpregs.fpc; save_fpu_regs();
if (MACHINE_HAS_VX)
convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
else
memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
fpu->fpc = current->thread.fpu.fpc;
return 0; return 0;
} }
@ -2266,41 +2230,50 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
{ {
unsigned char archmode = 1; unsigned char archmode = 1;
freg_t fprs[NUM_FPRS];
unsigned int px; unsigned int px;
u64 clkcomp; u64 clkcomp;
int rc; int rc;
px = kvm_s390_get_prefix(vcpu);
if (gpa == KVM_S390_STORE_STATUS_NOADDR) { if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
if (write_guest_abs(vcpu, 163, &archmode, 1)) if (write_guest_abs(vcpu, 163, &archmode, 1))
return -EFAULT; return -EFAULT;
gpa = SAVE_AREA_BASE; gpa = 0;
} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) { } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
if (write_guest_real(vcpu, 163, &archmode, 1)) if (write_guest_real(vcpu, 163, &archmode, 1))
return -EFAULT; return -EFAULT;
gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE); gpa = px;
} else
gpa -= __LC_FPREGS_SAVE_AREA;
/* manually convert vector registers if necessary */
if (MACHINE_HAS_VX) {
convert_vx_to_fp(fprs, current->thread.fpu.vxrs);
rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
fprs, 128);
} else {
rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
vcpu->run->s.regs.vrs, 128);
} }
rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs), rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
vcpu->arch.guest_fpregs.fprs, 128);
rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
vcpu->run->s.regs.gprs, 128); vcpu->run->s.regs.gprs, 128);
rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw), rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
&vcpu->arch.sie_block->gpsw, 16); &vcpu->arch.sie_block->gpsw, 16);
px = kvm_s390_get_prefix(vcpu); rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
&px, 4); &px, 4);
rc |= write_guest_abs(vcpu, rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
gpa + offsetof(struct save_area, fp_ctrl_reg), &vcpu->run->s.regs.fpc, 4);
&vcpu->arch.guest_fpregs.fpc, 4); rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
&vcpu->arch.sie_block->todpr, 4); &vcpu->arch.sie_block->todpr, 4);
rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer), rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
&vcpu->arch.sie_block->cputm, 8); &vcpu->arch.sie_block->cputm, 8);
clkcomp = vcpu->arch.sie_block->ckc >> 8; clkcomp = vcpu->arch.sie_block->ckc >> 8;
rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp), rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
&clkcomp, 8); &clkcomp, 8);
rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs), rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
&vcpu->run->s.regs.acrs, 64); &vcpu->run->s.regs.acrs, 64);
rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs), rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
&vcpu->arch.sie_block->gcr, 128); &vcpu->arch.sie_block->gcr, 128);
return rc ? -EFAULT : 0; return rc ? -EFAULT : 0;
} }
@ -2313,19 +2286,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
* it into the save area * it into the save area
*/ */
save_fpu_regs(); save_fpu_regs();
if (test_kvm_facility(vcpu->kvm, 129)) { vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
/*
* If the vector extension is available, the vector registers
* which overlaps with floating-point registers are saved in
* the SIE-control block. Hence, extract the floating-point
* registers and the FPC value and store them in the
* guest_fpregs structure.
*/
vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc;
convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs,
current->thread.fpu.vxrs);
} else
save_fpu_to(&vcpu->arch.guest_fpregs);
save_access_regs(vcpu->run->s.regs.acrs); save_access_regs(vcpu->run->s.regs.acrs);
return kvm_s390_store_status_unloaded(vcpu, addr); return kvm_s390_store_status_unloaded(vcpu, addr);

View file

@ -52,12 +52,16 @@ void sort_extable(struct exception_table_entry *start,
int i; int i;
/* Normalize entries to being relative to the start of the section */ /* Normalize entries to being relative to the start of the section */
for (p = start, i = 0; p < finish; p++, i += 8) for (p = start, i = 0; p < finish; p++, i += 8) {
p->insn += i; p->insn += i;
p->fixup += i + 4;
}
sort(start, finish - start, sizeof(*start), cmp_ex, NULL); sort(start, finish - start, sizeof(*start), cmp_ex, NULL);
/* Denormalize all entries */ /* Denormalize all entries */
for (p = start, i = 0; p < finish; p++, i += 8) for (p = start, i = 0; p < finish; p++, i += 8) {
p->insn -= i; p->insn -= i;
p->fixup -= i + 4;
}
} }
#ifdef CONFIG_MODULES #ifdef CONFIG_MODULES

View file

@ -413,7 +413,7 @@ out:
SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality) SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
{ {
int ret; long ret;
if (personality(current->personality) == PER_LINUX32 && if (personality(current->personality) == PER_LINUX32 &&
personality(personality) == PER_LINUX) personality(personality) == PER_LINUX)

View file

@ -94,6 +94,8 @@ static int start_ptraced_child(void)
{ {
int pid, n, status; int pid, n, status;
fflush(stdout);
pid = fork(); pid = fork();
if (pid == 0) if (pid == 0)
ptrace_child(); ptrace_child();

View file

@ -267,6 +267,7 @@ ENTRY(entry_INT80_compat)
* Interrupts are off on entry. * Interrupts are off on entry.
*/ */
PARAVIRT_ADJUST_EXCEPTION_FRAME PARAVIRT_ADJUST_EXCEPTION_FRAME
ASM_CLAC /* Do this early to minimize exposure */
SWAPGS SWAPGS
/* /*

View file

@ -23,11 +23,13 @@ extern void irq_ctx_init(int cpu);
#define __ARCH_HAS_DO_SOFTIRQ #define __ARCH_HAS_DO_SOFTIRQ
struct irq_desc;
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
#include <linux/cpumask.h> #include <linux/cpumask.h>
extern int check_irq_vectors_for_cpu_disable(void); extern int check_irq_vectors_for_cpu_disable(void);
extern void fixup_irqs(void); extern void fixup_irqs(void);
extern void irq_force_complete_move(int); extern void irq_force_complete_move(struct irq_desc *desc);
#endif #endif
#ifdef CONFIG_HAVE_KVM #ifdef CONFIG_HAVE_KVM
@ -37,7 +39,6 @@ extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
extern void (*x86_platform_ipi_callback)(void); extern void (*x86_platform_ipi_callback)(void);
extern void native_init_IRQ(void); extern void native_init_IRQ(void);
struct irq_desc;
extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs); extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
extern __visible unsigned int do_IRQ(struct pt_regs *regs); extern __visible unsigned int do_IRQ(struct pt_regs *regs);

View file

@ -16,6 +16,7 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/realmode.h> #include <asm/realmode.h>
#include <linux/ftrace.h>
#include "../../realmode/rm/wakeup.h" #include "../../realmode/rm/wakeup.h"
#include "sleep.h" #include "sleep.h"
@ -107,7 +108,13 @@ int x86_acpi_suspend_lowlevel(void)
saved_magic = 0x123456789abcdef0L; saved_magic = 0x123456789abcdef0L;
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
/*
* Pause/unpause graph tracing around do_suspend_lowlevel as it has
* inconsistent call/return info after it jumps to the wakeup vector.
*/
pause_graph_tracing();
do_suspend_lowlevel(); do_suspend_lowlevel();
unpause_graph_tracing();
return 0; return 0;
} }

View file

@ -2521,6 +2521,7 @@ void __init setup_ioapic_dest(void)
{ {
int pin, ioapic, irq, irq_entry; int pin, ioapic, irq, irq_entry;
const struct cpumask *mask; const struct cpumask *mask;
struct irq_desc *desc;
struct irq_data *idata; struct irq_data *idata;
struct irq_chip *chip; struct irq_chip *chip;
@ -2536,7 +2537,9 @@ void __init setup_ioapic_dest(void)
if (irq < 0 || !mp_init_irq_at_boot(ioapic, irq)) if (irq < 0 || !mp_init_irq_at_boot(ioapic, irq))
continue; continue;
idata = irq_get_irq_data(irq); desc = irq_to_desc(irq);
raw_spin_lock_irq(&desc->lock);
idata = irq_desc_get_irq_data(desc);
/* /*
* Honour affinities which have been set in early boot * Honour affinities which have been set in early boot
@ -2550,6 +2553,7 @@ void __init setup_ioapic_dest(void)
/* Might be lapic_chip for irq 0 */ /* Might be lapic_chip for irq 0 */
if (chip->irq_set_affinity) if (chip->irq_set_affinity)
chip->irq_set_affinity(idata, mask, false); chip->irq_set_affinity(idata, mask, false);
raw_spin_unlock_irq(&desc->lock);
} }
} }
#endif #endif

View file

@ -30,7 +30,7 @@ struct apic_chip_data {
struct irq_domain *x86_vector_domain; struct irq_domain *x86_vector_domain;
static DEFINE_RAW_SPINLOCK(vector_lock); static DEFINE_RAW_SPINLOCK(vector_lock);
static cpumask_var_t vector_cpumask; static cpumask_var_t vector_cpumask, vector_searchmask, searched_cpumask;
static struct irq_chip lapic_controller; static struct irq_chip lapic_controller;
#ifdef CONFIG_X86_IO_APIC #ifdef CONFIG_X86_IO_APIC
static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY]; static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
@ -116,35 +116,47 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
*/ */
static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
static int current_offset = VECTOR_OFFSET_START % 16; static int current_offset = VECTOR_OFFSET_START % 16;
int cpu, err; int cpu, vector;
if (d->move_in_progress) /*
* If there is still a move in progress or the previous move has not
* been cleaned up completely, tell the caller to come back later.
*/
if (d->move_in_progress ||
cpumask_intersects(d->old_domain, cpu_online_mask))
return -EBUSY; return -EBUSY;
/* Only try and allocate irqs on cpus that are present */ /* Only try and allocate irqs on cpus that are present */
err = -ENOSPC;
cpumask_clear(d->old_domain); cpumask_clear(d->old_domain);
cpumask_clear(searched_cpumask);
cpu = cpumask_first_and(mask, cpu_online_mask); cpu = cpumask_first_and(mask, cpu_online_mask);
while (cpu < nr_cpu_ids) { while (cpu < nr_cpu_ids) {
int new_cpu, vector, offset; int new_cpu, offset;
/* Get the possible target cpus for @mask/@cpu from the apic */
apic->vector_allocation_domain(cpu, vector_cpumask, mask); apic->vector_allocation_domain(cpu, vector_cpumask, mask);
/*
* Clear the offline cpus from @vector_cpumask for searching
* and verify whether the result overlaps with @mask. If true,
* then the call to apic->cpu_mask_to_apicid_and() will
* succeed as well. If not, no point in trying to find a
* vector in this mask.
*/
cpumask_and(vector_searchmask, vector_cpumask, cpu_online_mask);
if (!cpumask_intersects(vector_searchmask, mask))
goto next_cpu;
if (cpumask_subset(vector_cpumask, d->domain)) { if (cpumask_subset(vector_cpumask, d->domain)) {
err = 0;
if (cpumask_equal(vector_cpumask, d->domain)) if (cpumask_equal(vector_cpumask, d->domain))
break; goto success;
/* /*
* New cpumask using the vector is a proper subset of * Mark the cpus which are not longer in the mask for
* the current in use mask. So cleanup the vector * cleanup.
* allocation for the members that are not used anymore.
*/ */
cpumask_andnot(d->old_domain, d->domain, cpumask_andnot(d->old_domain, d->domain, vector_cpumask);
vector_cpumask); vector = d->cfg.vector;
d->move_in_progress = goto update;
cpumask_intersects(d->old_domain, cpu_online_mask);
cpumask_and(d->domain, d->domain, vector_cpumask);
break;
} }
vector = current_vector; vector = current_vector;
@ -156,45 +168,60 @@ next:
vector = FIRST_EXTERNAL_VECTOR + offset; vector = FIRST_EXTERNAL_VECTOR + offset;
} }
if (unlikely(current_vector == vector)) { /* If the search wrapped around, try the next cpu */
cpumask_or(d->old_domain, d->old_domain, if (unlikely(current_vector == vector))
vector_cpumask); goto next_cpu;
cpumask_andnot(vector_cpumask, mask, d->old_domain);
cpu = cpumask_first_and(vector_cpumask,
cpu_online_mask);
continue;
}
if (test_bit(vector, used_vectors)) if (test_bit(vector, used_vectors))
goto next; goto next;
for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) { for_each_cpu(new_cpu, vector_searchmask) {
if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector])) if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
goto next; goto next;
} }
/* Found one! */ /* Found one! */
current_vector = vector; current_vector = vector;
current_offset = offset; current_offset = offset;
if (d->cfg.vector) { /* Schedule the old vector for cleanup on all cpus */
if (d->cfg.vector)
cpumask_copy(d->old_domain, d->domain); cpumask_copy(d->old_domain, d->domain);
d->move_in_progress = for_each_cpu(new_cpu, vector_searchmask)
cpumask_intersects(d->old_domain, cpu_online_mask);
}
for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq); per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
d->cfg.vector = vector; goto update;
cpumask_copy(d->domain, vector_cpumask);
err = 0;
break;
}
if (!err) { next_cpu:
/* cache destination APIC IDs into cfg->dest_apicid */ /*
err = apic->cpu_mask_to_apicid_and(mask, d->domain, * We exclude the current @vector_cpumask from the requested
&d->cfg.dest_apicid); * @mask and try again with the next online cpu in the
* result. We cannot modify @mask, so we use @vector_cpumask
* as a temporary buffer here as it will be reassigned when
* calling apic->vector_allocation_domain() above.
*/
cpumask_or(searched_cpumask, searched_cpumask, vector_cpumask);
cpumask_andnot(vector_cpumask, mask, searched_cpumask);
cpu = cpumask_first_and(vector_cpumask, cpu_online_mask);
continue;
} }
return -ENOSPC;
return err; update:
/*
* Exclude offline cpus from the cleanup mask and set the
* move_in_progress flag when the result is not empty.
*/
cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
d->move_in_progress = !cpumask_empty(d->old_domain);
d->cfg.vector = vector;
cpumask_copy(d->domain, vector_cpumask);
success:
/*
* Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
* as we already established, that mask & d->domain & cpu_online_mask
* is not empty.
*/
BUG_ON(apic->cpu_mask_to_apicid_and(mask, d->domain,
&d->cfg.dest_apicid));
return 0;
} }
static int assign_irq_vector(int irq, struct apic_chip_data *data, static int assign_irq_vector(int irq, struct apic_chip_data *data,
@ -224,10 +251,8 @@ static int assign_irq_vector_policy(int irq, int node,
static void clear_irq_vector(int irq, struct apic_chip_data *data) static void clear_irq_vector(int irq, struct apic_chip_data *data)
{ {
struct irq_desc *desc; struct irq_desc *desc;
unsigned long flags;
int cpu, vector; int cpu, vector;
raw_spin_lock_irqsave(&vector_lock, flags);
BUG_ON(!data->cfg.vector); BUG_ON(!data->cfg.vector);
vector = data->cfg.vector; vector = data->cfg.vector;
@ -237,10 +262,13 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
data->cfg.vector = 0; data->cfg.vector = 0;
cpumask_clear(data->domain); cpumask_clear(data->domain);
if (likely(!data->move_in_progress)) { /*
raw_spin_unlock_irqrestore(&vector_lock, flags); * If move is in progress or the old_domain mask is not empty,
* i.e. the cleanup IPI has not been processed yet, we need to remove
* the old references to desc from all cpus vector tables.
*/
if (!data->move_in_progress && cpumask_empty(data->old_domain))
return; return;
}
desc = irq_to_desc(irq); desc = irq_to_desc(irq);
for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) { for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
@ -253,7 +281,6 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
} }
} }
data->move_in_progress = 0; data->move_in_progress = 0;
raw_spin_unlock_irqrestore(&vector_lock, flags);
} }
void init_irq_alloc_info(struct irq_alloc_info *info, void init_irq_alloc_info(struct irq_alloc_info *info,
@ -274,19 +301,24 @@ void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
static void x86_vector_free_irqs(struct irq_domain *domain, static void x86_vector_free_irqs(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs) unsigned int virq, unsigned int nr_irqs)
{ {
struct apic_chip_data *apic_data;
struct irq_data *irq_data; struct irq_data *irq_data;
unsigned long flags;
int i; int i;
for (i = 0; i < nr_irqs; i++) { for (i = 0; i < nr_irqs; i++) {
irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i); irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
if (irq_data && irq_data->chip_data) { if (irq_data && irq_data->chip_data) {
raw_spin_lock_irqsave(&vector_lock, flags);
clear_irq_vector(virq + i, irq_data->chip_data); clear_irq_vector(virq + i, irq_data->chip_data);
free_apic_chip_data(irq_data->chip_data); apic_data = irq_data->chip_data;
irq_domain_reset_irq_data(irq_data);
raw_spin_unlock_irqrestore(&vector_lock, flags);
free_apic_chip_data(apic_data);
#ifdef CONFIG_X86_IO_APIC #ifdef CONFIG_X86_IO_APIC
if (virq + i < nr_legacy_irqs()) if (virq + i < nr_legacy_irqs())
legacy_irq_data[virq + i] = NULL; legacy_irq_data[virq + i] = NULL;
#endif #endif
irq_domain_reset_irq_data(irq_data);
} }
} }
} }
@ -404,6 +436,8 @@ int __init arch_early_irq_init(void)
arch_init_htirq_domain(x86_vector_domain); arch_init_htirq_domain(x86_vector_domain);
BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL)); BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL));
return arch_early_ioapic_init(); return arch_early_ioapic_init();
} }
@ -492,14 +526,7 @@ static int apic_set_affinity(struct irq_data *irq_data,
return -EINVAL; return -EINVAL;
err = assign_irq_vector(irq, data, dest); err = assign_irq_vector(irq, data, dest);
if (err) { return err ? err : IRQ_SET_MASK_OK;
if (assign_irq_vector(irq, data,
irq_data_get_affinity_mask(irq_data)))
pr_err("Failed to recover vector for irq %d\n", irq);
return err;
}
return IRQ_SET_MASK_OK;
} }
static struct irq_chip lapic_controller = { static struct irq_chip lapic_controller = {
@ -511,20 +538,12 @@ static struct irq_chip lapic_controller = {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static void __send_cleanup_vector(struct apic_chip_data *data) static void __send_cleanup_vector(struct apic_chip_data *data)
{ {
cpumask_var_t cleanup_mask; raw_spin_lock(&vector_lock);
cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
unsigned int i;
for_each_cpu_and(i, data->old_domain, cpu_online_mask)
apic->send_IPI_mask(cpumask_of(i),
IRQ_MOVE_CLEANUP_VECTOR);
} else {
cpumask_and(cleanup_mask, data->old_domain, cpu_online_mask);
apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
free_cpumask_var(cleanup_mask);
}
data->move_in_progress = 0; data->move_in_progress = 0;
if (!cpumask_empty(data->old_domain))
apic->send_IPI_mask(data->old_domain, IRQ_MOVE_CLEANUP_VECTOR);
raw_spin_unlock(&vector_lock);
} }
void send_cleanup_vector(struct irq_cfg *cfg) void send_cleanup_vector(struct irq_cfg *cfg)
@ -568,12 +587,25 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
goto unlock; goto unlock;
/* /*
* Check if the irq migration is in progress. If so, we * Nothing to cleanup if irq migration is in progress
* haven't received the cleanup request yet for this irq. * or this cpu is not set in the cleanup mask.
*/ */
if (data->move_in_progress) if (data->move_in_progress ||
!cpumask_test_cpu(me, data->old_domain))
goto unlock; goto unlock;
/*
* We have two cases to handle here:
* 1) vector is unchanged but the target mask got reduced
* 2) vector and the target mask has changed
*
* #1 is obvious, but in #2 we have two vectors with the same
* irq descriptor: the old and the new vector. So we need to
* make sure that we only cleanup the old vector. The new
* vector has the current @vector number in the config and
* this cpu is part of the target mask. We better leave that
* one alone.
*/
if (vector == data->cfg.vector && if (vector == data->cfg.vector &&
cpumask_test_cpu(me, data->domain)) cpumask_test_cpu(me, data->domain))
goto unlock; goto unlock;
@ -591,6 +623,7 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
goto unlock; goto unlock;
} }
__this_cpu_write(vector_irq[vector], VECTOR_UNUSED); __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
cpumask_clear_cpu(me, data->old_domain);
unlock: unlock:
raw_spin_unlock(&desc->lock); raw_spin_unlock(&desc->lock);
} }
@ -619,12 +652,48 @@ void irq_complete_move(struct irq_cfg *cfg)
__irq_complete_move(cfg, ~get_irq_regs()->orig_ax); __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
} }
void irq_force_complete_move(int irq) /*
* Called with @desc->lock held and interrupts disabled.
*/
void irq_force_complete_move(struct irq_desc *desc)
{ {
struct irq_cfg *cfg = irq_cfg(irq); struct irq_data *irqdata = irq_desc_get_irq_data(desc);
struct apic_chip_data *data = apic_chip_data(irqdata);
struct irq_cfg *cfg = data ? &data->cfg : NULL;
if (cfg) if (!cfg)
__irq_complete_move(cfg, cfg->vector); return;
__irq_complete_move(cfg, cfg->vector);
/*
* This is tricky. If the cleanup of @data->old_domain has not been
* done yet, then the following setaffinity call will fail with
* -EBUSY. This can leave the interrupt in a stale state.
*
* The cleanup cannot make progress because we hold @desc->lock. So in
* case @data->old_domain is not yet cleaned up, we need to drop the
* lock and acquire it again. @desc cannot go away, because the
* hotplug code holds the sparse irq lock.
*/
raw_spin_lock(&vector_lock);
/* Clean out all offline cpus (including ourself) first. */
cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
while (!cpumask_empty(data->old_domain)) {
raw_spin_unlock(&vector_lock);
raw_spin_unlock(&desc->lock);
cpu_relax();
raw_spin_lock(&desc->lock);
/*
* Reevaluate apic_chip_data. It might have been cleared after
* we dropped @desc->lock.
*/
data = apic_chip_data(irqdata);
if (!data)
return;
raw_spin_lock(&vector_lock);
}
raw_spin_unlock(&vector_lock);
} }
#endif #endif

View file

@ -462,7 +462,7 @@ void fixup_irqs(void)
* non intr-remapping case, we can't wait till this interrupt * non intr-remapping case, we can't wait till this interrupt
* arrives at this cpu before completing the irq move. * arrives at this cpu before completing the irq move.
*/ */
irq_force_complete_move(irq); irq_force_complete_move(desc);
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
break_affinity = 1; break_affinity = 1;
@ -470,6 +470,15 @@ void fixup_irqs(void)
} }
chip = irq_data_get_irq_chip(data); chip = irq_data_get_irq_chip(data);
/*
* The interrupt descriptor might have been cleaned up
* already, but it is not yet removed from the radix tree
*/
if (!chip) {
raw_spin_unlock(&desc->lock);
continue;
}
if (!irqd_can_move_in_process_context(data) && chip->irq_mask) if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
chip->irq_mask(data); chip->irq_mask(data);

View file

@ -650,10 +650,10 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
u16 sel; u16 sel;
la = seg_base(ctxt, addr.seg) + addr.ea; la = seg_base(ctxt, addr.seg) + addr.ea;
*linear = la;
*max_size = 0; *max_size = 0;
switch (mode) { switch (mode) {
case X86EMUL_MODE_PROT64: case X86EMUL_MODE_PROT64:
*linear = la;
if (is_noncanonical_address(la)) if (is_noncanonical_address(la))
goto bad; goto bad;
@ -662,6 +662,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
goto bad; goto bad;
break; break;
default: default:
*linear = la = (u32)la;
usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
addr.seg); addr.seg);
if (!usable) if (!usable)
@ -689,7 +690,6 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
if (size > *max_size) if (size > *max_size)
goto bad; goto bad;
} }
la &= (u32)-1;
break; break;
} }
if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0)) if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))

View file

@ -249,7 +249,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
return ret; return ret;
kvm_vcpu_mark_page_dirty(vcpu, table_gfn); kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
walker->ptes[level] = pte; walker->ptes[level - 1] = pte;
} }
return 0; return 0;
} }

View file

@ -595,6 +595,8 @@ struct vcpu_vmx {
/* Support for PML */ /* Support for PML */
#define PML_ENTITY_NUM 512 #define PML_ENTITY_NUM 512
struct page *pml_pg; struct page *pml_pg;
u64 current_tsc_ratio;
}; };
enum segment_cache_field { enum segment_cache_field {
@ -2062,14 +2064,16 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
/* Setup TSC multiplier */
if (cpu_has_vmx_tsc_scaling())
vmcs_write64(TSC_MULTIPLIER,
vcpu->arch.tsc_scaling_ratio);
vmx->loaded_vmcs->cpu = cpu; vmx->loaded_vmcs->cpu = cpu;
} }
/* Setup TSC multiplier */
if (kvm_has_tsc_control &&
vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) {
vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio;
vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
}
vmx_vcpu_pi_load(vcpu, cpu); vmx_vcpu_pi_load(vcpu, cpu);
} }

View file

@ -6544,12 +6544,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
* KVM_DEBUGREG_WONT_EXIT again. * KVM_DEBUGREG_WONT_EXIT again.
*/ */
if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
int i;
WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
kvm_x86_ops->sync_dirty_debug_regs(vcpu); kvm_x86_ops->sync_dirty_debug_regs(vcpu);
for (i = 0; i < KVM_NR_DB_REGS; i++) kvm_update_dr0123(vcpu);
vcpu->arch.eff_db[i] = vcpu->arch.db[i]; kvm_update_dr6(vcpu);
kvm_update_dr7(vcpu);
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
} }
/* /*

View file

@ -123,7 +123,7 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
break; break;
} }
if (regno > nr_registers) { if (regno >= nr_registers) {
WARN_ONCE(1, "decoded an instruction with an invalid register"); WARN_ONCE(1, "decoded an instruction with an invalid register");
return -EINVAL; return -EINVAL;
} }

View file

@ -1090,9 +1090,12 @@ int bio_uncopy_user(struct bio *bio)
if (!bio_flagged(bio, BIO_NULL_MAPPED)) { if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
/* /*
* if we're in a workqueue, the request is orphaned, so * if we're in a workqueue, the request is orphaned, so
* don't copy into a random user address space, just free. * don't copy into a random user address space, just free
* and return -EINTR so user space doesn't expect any data.
*/ */
if (current->mm && bio_data_dir(bio) == READ) if (!current->mm)
ret = -EINTR;
else if (bio_data_dir(bio) == READ)
ret = bio_copy_to_iter(bio, bmd->iter); ret = bio_copy_to_iter(bio, bmd->iter);
if (bmd->is_our_pages) if (bmd->is_our_pages)
bio_free_pages(bio); bio_free_pages(bio);

View file

@ -91,8 +91,8 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
lim->virt_boundary_mask = 0; lim->virt_boundary_mask = 0;
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
lim->max_sectors = lim->max_dev_sectors = lim->max_hw_sectors = lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
BLK_SAFE_MAX_SECTORS; lim->max_dev_sectors = 0;
lim->chunk_sectors = 0; lim->chunk_sectors = 0;
lim->max_write_same_sectors = 0; lim->max_write_same_sectors = 0;
lim->max_discard_sectors = 0; lim->max_discard_sectors = 0;

View file

@ -464,6 +464,15 @@ static struct dmi_system_id video_dmi_table[] = {
* control on these systems, but do not register a backlight sysfs * control on these systems, but do not register a backlight sysfs
* as brightness control does not work. * as brightness control does not work.
*/ */
{
/* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
.callback = video_disable_backlight_sysfs_if,
.ident = "Toshiba Portege R700",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"),
},
},
{ {
/* https://bugs.freedesktop.org/show_bug.cgi?id=82634 */ /* https://bugs.freedesktop.org/show_bug.cgi?id=82634 */
.callback = video_disable_backlight_sysfs_if, .callback = video_disable_backlight_sysfs_if,
@ -473,6 +482,15 @@ static struct dmi_system_id video_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R830"), DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R830"),
}, },
}, },
{
/* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
.callback = video_disable_backlight_sysfs_if,
.ident = "Toshiba Satellite R830",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE R830"),
},
},
/* /*
* Some machine's _DOD IDs don't have bit 31(Device ID Scheme) set * Some machine's _DOD IDs don't have bit 31(Device ID Scheme) set
* but the IDs actually follow the Device ID Scheme. * but the IDs actually follow the Device ID Scheme.

View file

@ -468,37 +468,16 @@ static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
nfit_mem->bdw = NULL; nfit_mem->bdw = NULL;
} }
static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc, static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
{ {
u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
struct nfit_memdev *nfit_memdev; struct nfit_memdev *nfit_memdev;
struct nfit_flush *nfit_flush; struct nfit_flush *nfit_flush;
struct nfit_dcr *nfit_dcr;
struct nfit_bdw *nfit_bdw; struct nfit_bdw *nfit_bdw;
struct nfit_idt *nfit_idt; struct nfit_idt *nfit_idt;
u16 idt_idx, range_index; u16 idt_idx, range_index;
list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
if (nfit_dcr->dcr->region_index != dcr)
continue;
nfit_mem->dcr = nfit_dcr->dcr;
break;
}
if (!nfit_mem->dcr) {
dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n",
spa->range_index, __to_nfit_memdev(nfit_mem)
? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR");
return -ENODEV;
}
/*
* We've found enough to create an nvdimm, optionally
* find an associated BDW
*/
list_add(&nfit_mem->list, &acpi_desc->dimms);
list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
if (nfit_bdw->bdw->region_index != dcr) if (nfit_bdw->bdw->region_index != dcr)
continue; continue;
@ -507,12 +486,12 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
} }
if (!nfit_mem->bdw) if (!nfit_mem->bdw)
return 0; return;
nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
if (!nfit_mem->spa_bdw) if (!nfit_mem->spa_bdw)
return 0; return;
range_index = nfit_mem->spa_bdw->range_index; range_index = nfit_mem->spa_bdw->range_index;
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
@ -537,8 +516,6 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
} }
break; break;
} }
return 0;
} }
static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
@ -547,7 +524,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
struct nfit_mem *nfit_mem, *found; struct nfit_mem *nfit_mem, *found;
struct nfit_memdev *nfit_memdev; struct nfit_memdev *nfit_memdev;
int type = nfit_spa_type(spa); int type = nfit_spa_type(spa);
u16 dcr;
switch (type) { switch (type) {
case NFIT_SPA_DCR: case NFIT_SPA_DCR:
@ -558,14 +534,18 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
} }
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
int rc; struct nfit_dcr *nfit_dcr;
u32 device_handle;
u16 dcr;
if (nfit_memdev->memdev->range_index != spa->range_index) if (nfit_memdev->memdev->range_index != spa->range_index)
continue; continue;
found = NULL; found = NULL;
dcr = nfit_memdev->memdev->region_index; dcr = nfit_memdev->memdev->region_index;
device_handle = nfit_memdev->memdev->device_handle;
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
if (__to_nfit_memdev(nfit_mem)->region_index == dcr) { if (__to_nfit_memdev(nfit_mem)->device_handle
== device_handle) {
found = nfit_mem; found = nfit_mem;
break; break;
} }
@ -578,6 +558,31 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
if (!nfit_mem) if (!nfit_mem)
return -ENOMEM; return -ENOMEM;
INIT_LIST_HEAD(&nfit_mem->list); INIT_LIST_HEAD(&nfit_mem->list);
list_add(&nfit_mem->list, &acpi_desc->dimms);
}
list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
if (nfit_dcr->dcr->region_index != dcr)
continue;
/*
* Record the control region for the dimm. For
* the ACPI 6.1 case, where there are separate
* control regions for the pmem vs blk
* interfaces, be sure to record the extended
* blk details.
*/
if (!nfit_mem->dcr)
nfit_mem->dcr = nfit_dcr->dcr;
else if (nfit_mem->dcr->windows == 0
&& nfit_dcr->dcr->windows)
nfit_mem->dcr = nfit_dcr->dcr;
break;
}
if (dcr && !nfit_mem->dcr) {
dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
spa->range_index, dcr);
return -ENODEV;
} }
if (type == NFIT_SPA_DCR) { if (type == NFIT_SPA_DCR) {
@ -594,6 +599,7 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
nfit_mem->idt_dcr = nfit_idt->idt; nfit_mem->idt_dcr = nfit_idt->idt;
break; break;
} }
nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
} else { } else {
/* /*
* A single dimm may belong to multiple SPA-PM * A single dimm may belong to multiple SPA-PM
@ -602,13 +608,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
*/ */
nfit_mem->memdev_pmem = nfit_memdev->memdev; nfit_mem->memdev_pmem = nfit_memdev->memdev;
} }
if (found)
continue;
rc = nfit_mem_add(acpi_desc, nfit_mem, spa);
if (rc)
return rc;
} }
return 0; return 0;

View file

@ -135,14 +135,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"), DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
}, },
}, },
{
.callback = video_detect_force_vendor,
.ident = "Dell Inspiron 5737",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"),
},
},
/* /*
* These models have a working acpi_video backlight control, and using * These models have a working acpi_video backlight control, and using

View file

@ -2081,7 +2081,7 @@ static int binder_thread_write(struct binder_proc *proc,
if (get_user(cookie, (binder_uintptr_t __user *)ptr)) if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT; return -EFAULT;
ptr += sizeof(void *); ptr += sizeof(cookie);
list_for_each_entry(w, &proc->delivered_death, entry) { list_for_each_entry(w, &proc->delivered_death, entry) {
struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);

View file

@ -367,15 +367,21 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */ { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/ { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/ { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
/* JMicron 360/1/3/5/6, match class to avoid IDE function */ /* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,

View file

@ -1142,8 +1142,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
/* mark esata ports */ /* mark esata ports */
tmp = readl(port_mmio + PORT_CMD); tmp = readl(port_mmio + PORT_CMD);
if ((tmp & PORT_CMD_HPCP) || if ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS))
((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)))
ap->pflags |= ATA_PFLAG_EXTERNAL; ap->pflags |= ATA_PFLAG_EXTERNAL;
} }

View file

@ -675,19 +675,18 @@ static int ata_ioc32(struct ata_port *ap)
int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
int cmd, void __user *arg) int cmd, void __user *arg)
{ {
int val = -EINVAL, rc = -EINVAL; unsigned long val;
int rc = -EINVAL;
unsigned long flags; unsigned long flags;
switch (cmd) { switch (cmd) {
case ATA_IOC_GET_IO32: case HDIO_GET_32BIT:
spin_lock_irqsave(ap->lock, flags); spin_lock_irqsave(ap->lock, flags);
val = ata_ioc32(ap); val = ata_ioc32(ap);
spin_unlock_irqrestore(ap->lock, flags); spin_unlock_irqrestore(ap->lock, flags);
if (copy_to_user(arg, &val, 1)) return put_user(val, (unsigned long __user *)arg);
return -EFAULT;
return 0;
case ATA_IOC_SET_IO32: case HDIO_SET_32BIT:
val = (unsigned long) arg; val = (unsigned long) arg;
rc = 0; rc = 0;
spin_lock_irqsave(ap->lock, flags); spin_lock_irqsave(ap->lock, flags);

View file

@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
{ {
struct ata_port *ap = qc->ap; struct ata_port *ap = qc->ap;
unsigned long flags;
if (ap->ops->error_handler) { if (ap->ops->error_handler) {
if (in_wq) { if (in_wq) {
spin_lock_irqsave(ap->lock, flags);
/* EH might have kicked in while host lock is /* EH might have kicked in while host lock is
* released. * released.
*/ */
@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
} else } else
ata_port_freeze(ap); ata_port_freeze(ap);
} }
spin_unlock_irqrestore(ap->lock, flags);
} else { } else {
if (likely(!(qc->err_mask & AC_ERR_HSM))) if (likely(!(qc->err_mask & AC_ERR_HSM)))
ata_qc_complete(qc); ata_qc_complete(qc);
@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
} }
} else { } else {
if (in_wq) { if (in_wq) {
spin_lock_irqsave(ap->lock, flags);
ata_sff_irq_on(ap); ata_sff_irq_on(ap);
ata_qc_complete(qc); ata_qc_complete(qc);
spin_unlock_irqrestore(ap->lock, flags);
} else } else
ata_qc_complete(qc); ata_qc_complete(qc);
} }
@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
{ {
struct ata_link *link = qc->dev->link; struct ata_link *link = qc->dev->link;
struct ata_eh_info *ehi = &link->eh_info; struct ata_eh_info *ehi = &link->eh_info;
unsigned long flags = 0;
int poll_next; int poll_next;
lockdep_assert_held(ap->lock);
WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
/* Make sure ata_sff_qc_issue() does not throw things /* Make sure ata_sff_qc_issue() does not throw things
@ -1112,14 +1106,6 @@ fsm_start:
} }
} }
/* Send the CDB (atapi) or the first data block (ata pio out).
* During the state transition, interrupt handler shouldn't
* be invoked before the data transfer is complete and
* hsm_task_state is changed. Hence, the following locking.
*/
if (in_wq)
spin_lock_irqsave(ap->lock, flags);
if (qc->tf.protocol == ATA_PROT_PIO) { if (qc->tf.protocol == ATA_PROT_PIO) {
/* PIO data out protocol. /* PIO data out protocol.
* send first data block. * send first data block.
@ -1135,9 +1121,6 @@ fsm_start:
/* send CDB */ /* send CDB */
atapi_send_cdb(ap, qc); atapi_send_cdb(ap, qc);
if (in_wq)
spin_unlock_irqrestore(ap->lock, flags);
/* if polling, ata_sff_pio_task() handles the rest. /* if polling, ata_sff_pio_task() handles the rest.
* otherwise, interrupt handler takes over from here. * otherwise, interrupt handler takes over from here.
*/ */
@ -1361,12 +1344,14 @@ static void ata_sff_pio_task(struct work_struct *work)
u8 status; u8 status;
int poll_next; int poll_next;
spin_lock_irq(ap->lock);
BUG_ON(ap->sff_pio_task_link == NULL); BUG_ON(ap->sff_pio_task_link == NULL);
/* qc can be NULL if timeout occurred */ /* qc can be NULL if timeout occurred */
qc = ata_qc_from_tag(ap, link->active_tag); qc = ata_qc_from_tag(ap, link->active_tag);
if (!qc) { if (!qc) {
ap->sff_pio_task_link = NULL; ap->sff_pio_task_link = NULL;
return; goto out_unlock;
} }
fsm_start: fsm_start:
@ -1381,11 +1366,14 @@ fsm_start:
*/ */
status = ata_sff_busy_wait(ap, ATA_BUSY, 5); status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
if (status & ATA_BUSY) { if (status & ATA_BUSY) {
spin_unlock_irq(ap->lock);
ata_msleep(ap, 2); ata_msleep(ap, 2);
spin_lock_irq(ap->lock);
status = ata_sff_busy_wait(ap, ATA_BUSY, 10); status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
if (status & ATA_BUSY) { if (status & ATA_BUSY) {
ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
return; goto out_unlock;
} }
} }
@ -1402,6 +1390,8 @@ fsm_start:
*/ */
if (poll_next) if (poll_next)
goto fsm_start; goto fsm_start;
out_unlock:
spin_unlock_irq(ap->lock);
} }
/** /**

View file

@ -32,6 +32,8 @@
#include <linux/libata.h> #include <linux/libata.h>
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
#include <asm/mach-rc32434/rb.h>
#define DRV_NAME "pata-rb532-cf" #define DRV_NAME "pata-rb532-cf"
#define DRV_VERSION "0.1.0" #define DRV_VERSION "0.1.0"
#define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash" #define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash"
@ -107,6 +109,7 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
int gpio; int gpio;
struct resource *res; struct resource *res;
struct ata_host *ah; struct ata_host *ah;
struct cf_device *pdata;
struct rb532_cf_info *info; struct rb532_cf_info *info;
int ret; int ret;
@ -122,7 +125,13 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
return -ENOENT; return -ENOENT;
} }
gpio = irq_to_gpio(irq); pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "no platform data specified\n");
return -EINVAL;
}
gpio = pdata->gpio_pin;
if (gpio < 0) { if (gpio < 0) {
dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq); dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq);
return -ENOENT; return -ENOENT;

View file

@ -153,6 +153,10 @@ static const struct usb_device_id btusb_table[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01), { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01),
.driver_info = BTUSB_BCM_PATCHRAM }, .driver_info = BTUSB_BCM_PATCHRAM },
/* Toshiba Corp - Broadcom based */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0930, 0xff, 0x01, 0x01),
.driver_info = BTUSB_BCM_PATCHRAM },
/* Intel Bluetooth USB Bootloader (RAM module) */ /* Intel Bluetooth USB Bootloader (RAM module) */
{ USB_DEVICE(0x8087, 0x0a5a), { USB_DEVICE(0x8087, 0x0a5a),
.driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC }, .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC },

View file

@ -148,6 +148,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent); unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent);
unsigned long alt_div = 0, alt_div_mask = DIV_MASK; unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
unsigned long div0, div1 = 0, mux_reg; unsigned long div0, div1 = 0, mux_reg;
unsigned long flags;
/* find out the divider values to use for clock data */ /* find out the divider values to use for clock data */
while ((cfg_data->prate * 1000) != ndata->new_rate) { while ((cfg_data->prate * 1000) != ndata->new_rate) {
@ -156,7 +157,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
cfg_data++; cfg_data++;
} }
spin_lock(cpuclk->lock); spin_lock_irqsave(cpuclk->lock, flags);
/* /*
* For the selected PLL clock frequency, get the pre-defined divider * For the selected PLL clock frequency, get the pre-defined divider
@ -212,7 +213,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
DIV_MASK_ALL); DIV_MASK_ALL);
} }
spin_unlock(cpuclk->lock); spin_unlock_irqrestore(cpuclk->lock, flags);
return 0; return 0;
} }
@ -223,6 +224,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg; const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
unsigned long div = 0, div_mask = DIV_MASK; unsigned long div = 0, div_mask = DIV_MASK;
unsigned long mux_reg; unsigned long mux_reg;
unsigned long flags;
/* find out the divider values to use for clock data */ /* find out the divider values to use for clock data */
if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) { if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
@ -233,7 +235,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
} }
} }
spin_lock(cpuclk->lock); spin_lock_irqsave(cpuclk->lock, flags);
/* select mout_apll as the alternate parent */ /* select mout_apll as the alternate parent */
mux_reg = readl(base + E4210_SRC_CPU); mux_reg = readl(base + E4210_SRC_CPU);
@ -246,7 +248,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
} }
exynos_set_safe_div(base, div, div_mask); exynos_set_safe_div(base, div, div_mask);
spin_unlock(cpuclk->lock); spin_unlock_irqrestore(cpuclk->lock, flags);
return 0; return 0;
} }

View file

@ -98,7 +98,8 @@ static int tc_shutdown(struct clock_event_device *d)
__raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR)); __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
__raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
clk_disable(tcd->clk); if (!clockevent_state_detached(d))
clk_disable(tcd->clk);
return 0; return 0;
} }

View file

@ -50,6 +50,8 @@
#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
#define MIN_OSCR_DELTA 16
static void __iomem *regbase; static void __iomem *regbase;
static cycle_t vt8500_timer_read(struct clocksource *cs) static cycle_t vt8500_timer_read(struct clocksource *cs)
@ -80,7 +82,7 @@ static int vt8500_timer_set_next_event(unsigned long cycles,
cpu_relax(); cpu_relax();
writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL); writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL);
if ((signed)(alarm - clocksource.read(&clocksource)) <= 16) if ((signed)(alarm - clocksource.read(&clocksource)) <= MIN_OSCR_DELTA)
return -ETIME; return -ETIME;
writel(1, regbase + TIMER_IER_VAL); writel(1, regbase + TIMER_IER_VAL);
@ -151,7 +153,7 @@ static void __init vt8500_timer_init(struct device_node *np)
pr_err("%s: setup_irq failed for %s\n", __func__, pr_err("%s: setup_irq failed for %s\n", __func__,
clockevent.name); clockevent.name);
clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ, clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
4, 0xf0000000); MIN_OSCR_DELTA * 2, 0xf0000000);
} }
CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init); CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);

View file

@ -356,16 +356,18 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
if (!have_governor_per_policy()) if (!have_governor_per_policy())
cdata->gdbs_data = dbs_data; cdata->gdbs_data = dbs_data;
policy->governor_data = dbs_data;
ret = sysfs_create_group(get_governor_parent_kobj(policy), ret = sysfs_create_group(get_governor_parent_kobj(policy),
get_sysfs_attr(dbs_data)); get_sysfs_attr(dbs_data));
if (ret) if (ret)
goto reset_gdbs_data; goto reset_gdbs_data;
policy->governor_data = dbs_data;
return 0; return 0;
reset_gdbs_data: reset_gdbs_data:
policy->governor_data = NULL;
if (!have_governor_per_policy()) if (!have_governor_per_policy())
cdata->gdbs_data = NULL; cdata->gdbs_data = NULL;
cdata->exit(dbs_data, !policy->governor->initialized); cdata->exit(dbs_data, !policy->governor->initialized);
@ -386,16 +388,19 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy,
if (!cdbs->shared || cdbs->shared->policy) if (!cdbs->shared || cdbs->shared->policy)
return -EBUSY; return -EBUSY;
policy->governor_data = NULL;
if (!--dbs_data->usage_count) { if (!--dbs_data->usage_count) {
sysfs_remove_group(get_governor_parent_kobj(policy), sysfs_remove_group(get_governor_parent_kobj(policy),
get_sysfs_attr(dbs_data)); get_sysfs_attr(dbs_data));
policy->governor_data = NULL;
if (!have_governor_per_policy()) if (!have_governor_per_policy())
cdata->gdbs_data = NULL; cdata->gdbs_data = NULL;
cdata->exit(dbs_data, policy->governor->initialized == 1); cdata->exit(dbs_data, policy->governor->initialized == 1);
kfree(dbs_data); kfree(dbs_data);
} else {
policy->governor_data = NULL;
} }
free_common_dbs_info(policy, cdata); free_common_dbs_info(policy, cdata);

View file

@ -202,7 +202,7 @@ static void __init pxa_cpufreq_init_voltages(void)
} }
} }
#else #else
static int pxa_cpufreq_change_voltage(struct pxa_freqs *pxa_freq) static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
{ {
return 0; return 0;
} }

View file

@ -1688,6 +1688,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
at_xdmac_remove_xfer(atchan, desc); at_xdmac_remove_xfer(atchan, desc);
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
spin_unlock_irqrestore(&atchan->lock, flags); spin_unlock_irqrestore(&atchan->lock, flags);
@ -1820,6 +1821,8 @@ static int atmel_xdmac_resume(struct device *dev)
atchan = to_at_xdmac_chan(chan); atchan = to_at_xdmac_chan(chan);
at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc); at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
if (at_xdmac_chan_is_cyclic(atchan)) { if (at_xdmac_chan_is_cyclic(atchan)) {
if (at_xdmac_chan_is_paused(atchan))
at_xdmac_device_resume(chan);
at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda); at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc); at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim); at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);

View file

@ -536,16 +536,17 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
/* Called with dwc->lock held and all DMAC interrupts disabled */ /* Called with dwc->lock held and all DMAC interrupts disabled */
static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
u32 status_err, u32 status_xfer) u32 status_block, u32 status_err, u32 status_xfer)
{ {
unsigned long flags; unsigned long flags;
if (dwc->mask) { if (status_block & dwc->mask) {
void (*callback)(void *param); void (*callback)(void *param);
void *callback_param; void *callback_param;
dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
channel_readl(dwc, LLP)); channel_readl(dwc, LLP));
dma_writel(dw, CLEAR.BLOCK, dwc->mask);
callback = dwc->cdesc->period_callback; callback = dwc->cdesc->period_callback;
callback_param = dwc->cdesc->period_callback_param; callback_param = dwc->cdesc->period_callback_param;
@ -577,6 +578,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
channel_writel(dwc, CTL_LO, 0); channel_writel(dwc, CTL_LO, 0);
channel_writel(dwc, CTL_HI, 0); channel_writel(dwc, CTL_HI, 0);
dma_writel(dw, CLEAR.BLOCK, dwc->mask);
dma_writel(dw, CLEAR.ERROR, dwc->mask); dma_writel(dw, CLEAR.ERROR, dwc->mask);
dma_writel(dw, CLEAR.XFER, dwc->mask); dma_writel(dw, CLEAR.XFER, dwc->mask);
@ -585,6 +587,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
spin_unlock_irqrestore(&dwc->lock, flags); spin_unlock_irqrestore(&dwc->lock, flags);
} }
/* Re-enable interrupts */
channel_set_bit(dw, MASK.BLOCK, dwc->mask);
} }
/* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */
@ -593,10 +598,12 @@ static void dw_dma_tasklet(unsigned long data)
{ {
struct dw_dma *dw = (struct dw_dma *)data; struct dw_dma *dw = (struct dw_dma *)data;
struct dw_dma_chan *dwc; struct dw_dma_chan *dwc;
u32 status_block;
u32 status_xfer; u32 status_xfer;
u32 status_err; u32 status_err;
int i; int i;
status_block = dma_readl(dw, RAW.BLOCK);
status_xfer = dma_readl(dw, RAW.XFER); status_xfer = dma_readl(dw, RAW.XFER);
status_err = dma_readl(dw, RAW.ERROR); status_err = dma_readl(dw, RAW.ERROR);
@ -605,16 +612,15 @@ static void dw_dma_tasklet(unsigned long data)
for (i = 0; i < dw->dma.chancnt; i++) { for (i = 0; i < dw->dma.chancnt; i++) {
dwc = &dw->chan[i]; dwc = &dw->chan[i];
if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
dwc_handle_cyclic(dw, dwc, status_err, status_xfer); dwc_handle_cyclic(dw, dwc, status_block, status_err,
status_xfer);
else if (status_err & (1 << i)) else if (status_err & (1 << i))
dwc_handle_error(dw, dwc); dwc_handle_error(dw, dwc);
else if (status_xfer & (1 << i)) else if (status_xfer & (1 << i))
dwc_scan_descriptors(dw, dwc); dwc_scan_descriptors(dw, dwc);
} }
/* /* Re-enable interrupts */
* Re-enable interrupts.
*/
channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
} }
@ -635,6 +641,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
* softirq handler. * softirq handler.
*/ */
channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
status = dma_readl(dw, STATUS_INT); status = dma_readl(dw, STATUS_INT);
@ -645,6 +652,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
/* Try to recover */ /* Try to recover */
channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
@ -1111,6 +1119,7 @@ static void dw_dma_off(struct dw_dma *dw)
dma_writel(dw, CFG, 0); dma_writel(dw, CFG, 0);
channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
@ -1216,6 +1225,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
/* Disable interrupts */ /* Disable interrupts */
channel_clear_bit(dw, MASK.XFER, dwc->mask); channel_clear_bit(dw, MASK.XFER, dwc->mask);
channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
channel_clear_bit(dw, MASK.ERROR, dwc->mask); channel_clear_bit(dw, MASK.ERROR, dwc->mask);
spin_unlock_irqrestore(&dwc->lock, flags); spin_unlock_irqrestore(&dwc->lock, flags);
@ -1245,7 +1255,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
int dw_dma_cyclic_start(struct dma_chan *chan) int dw_dma_cyclic_start(struct dma_chan *chan)
{ {
struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(dwc->chan.device); struct dw_dma *dw = to_dw_dma(chan->device);
unsigned long flags; unsigned long flags;
if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
@ -1255,25 +1265,10 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
spin_lock_irqsave(&dwc->lock, flags); spin_lock_irqsave(&dwc->lock, flags);
/* Assert channel is idle */ /* Enable interrupts to perform cyclic transfer */
if (dma_readl(dw, CH_EN) & dwc->mask) { channel_set_bit(dw, MASK.BLOCK, dwc->mask);
dev_err(chan2dev(&dwc->chan),
"%s: BUG: Attempted to start non-idle channel\n",
__func__);
dwc_dump_chan_regs(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
return -EBUSY;
}
dma_writel(dw, CLEAR.ERROR, dwc->mask); dwc_dostart(dwc, dwc->cdesc->desc[0]);
dma_writel(dw, CLEAR.XFER, dwc->mask);
/* Setup DMAC channel registers */
channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
channel_writel(dwc, CTL_HI, 0);
channel_set_bit(dw, CH_EN, dwc->mask);
spin_unlock_irqrestore(&dwc->lock, flags); spin_unlock_irqrestore(&dwc->lock, flags);
@ -1479,6 +1474,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
dwc_chan_disable(dw, dwc); dwc_chan_disable(dw, dwc);
dma_writel(dw, CLEAR.BLOCK, dwc->mask);
dma_writel(dw, CLEAR.ERROR, dwc->mask); dma_writel(dw, CLEAR.ERROR, dwc->mask);
dma_writel(dw, CLEAR.XFER, dwc->mask); dma_writel(dw, CLEAR.XFER, dwc->mask);
@ -1567,9 +1563,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
/* Force dma off, just in case */ /* Force dma off, just in case */
dw_dma_off(dw); dw_dma_off(dw);
/* Disable BLOCK interrupts as well */
channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
/* Create a pool of consistent memory blocks for hardware descriptors */ /* Create a pool of consistent memory blocks for hardware descriptors */
dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
sizeof(struct dw_desc), 4, 0); sizeof(struct dw_desc), 4, 0);

View file

@ -583,6 +583,8 @@ static void set_updater_desc(struct pxad_desc_sw *sw_desc,
(PXA_DCMD_LENGTH & sizeof(u32)); (PXA_DCMD_LENGTH & sizeof(u32));
if (flags & DMA_PREP_INTERRUPT) if (flags & DMA_PREP_INTERRUPT)
updater->dcmd |= PXA_DCMD_ENDIRQEN; updater->dcmd |= PXA_DCMD_ENDIRQEN;
if (sw_desc->cyclic)
sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr = sw_desc->first;
} }
static bool is_desc_completed(struct virt_dma_desc *vd) static bool is_desc_completed(struct virt_dma_desc *vd)
@ -673,6 +675,10 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
dev_dbg(&chan->vc.chan.dev->device, dev_dbg(&chan->vc.chan.dev->device,
"%s(): checking txd %p[%x]: completed=%d\n", "%s(): checking txd %p[%x]: completed=%d\n",
__func__, vd, vd->tx.cookie, is_desc_completed(vd)); __func__, vd, vd->tx.cookie, is_desc_completed(vd));
if (to_pxad_sw_desc(vd)->cyclic) {
vchan_cyclic_callback(vd);
break;
}
if (is_desc_completed(vd)) { if (is_desc_completed(vd)) {
list_del(&vd->node); list_del(&vd->node);
vchan_cookie_complete(vd); vchan_cookie_complete(vd);
@ -1080,7 +1086,7 @@ pxad_prep_dma_cyclic(struct dma_chan *dchan,
return NULL; return NULL;
pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr); pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH | period_len); dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH & period_len);
dev_dbg(&chan->vc.chan.dev->device, dev_dbg(&chan->vc.chan.dev->device,
"%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n", "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
__func__, (unsigned long)buf_addr, len, period_len, dir, flags); __func__, (unsigned long)buf_addr, len, period_len, dir, flags);

View file

@ -435,16 +435,13 @@ void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
*/ */
void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev) void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
{ {
int status;
if (!edac_dev->edac_check) if (!edac_dev->edac_check)
return; return;
status = cancel_delayed_work(&edac_dev->work); edac_dev->op_state = OP_OFFLINE;
if (status == 0) {
/* workq instance might be running, wait for it */ cancel_delayed_work_sync(&edac_dev->work);
flush_workqueue(edac_workqueue); flush_workqueue(edac_workqueue);
}
} }
/* /*

View file

@ -586,18 +586,10 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
*/ */
static void edac_mc_workq_teardown(struct mem_ctl_info *mci) static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
{ {
int status; mci->op_state = OP_OFFLINE;
if (mci->op_state != OP_RUNNING_POLL) cancel_delayed_work_sync(&mci->work);
return; flush_workqueue(edac_workqueue);
status = cancel_delayed_work(&mci->work);
if (status == 0) {
edac_dbg(0, "not canceled, flush the queue\n");
/* workq instance might be running, wait for it */
flush_workqueue(edac_workqueue);
}
} }
/* /*

View file

@ -880,21 +880,26 @@ static struct device_type mci_attr_type = {
int edac_create_sysfs_mci_device(struct mem_ctl_info *mci, int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
const struct attribute_group **groups) const struct attribute_group **groups)
{ {
char *name;
int i, err; int i, err;
/* /*
* The memory controller needs its own bus, in order to avoid * The memory controller needs its own bus, in order to avoid
* namespace conflicts at /sys/bus/edac. * namespace conflicts at /sys/bus/edac.
*/ */
mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
if (!mci->bus->name) if (!name)
return -ENOMEM; return -ENOMEM;
mci->bus->name = name;
edac_dbg(0, "creating bus %s\n", mci->bus->name); edac_dbg(0, "creating bus %s\n", mci->bus->name);
err = bus_register(mci->bus); err = bus_register(mci->bus);
if (err < 0) if (err < 0) {
goto fail_free_name; kfree(name);
return err;
}
/* get the /sys/devices/system/edac subsys reference */ /* get the /sys/devices/system/edac subsys reference */
mci->dev.type = &mci_attr_type; mci->dev.type = &mci_attr_type;
@ -961,8 +966,8 @@ fail_unregister_dimm:
device_unregister(&mci->dev); device_unregister(&mci->dev);
fail_unregister_bus: fail_unregister_bus:
bus_unregister(mci->bus); bus_unregister(mci->bus);
fail_free_name: kfree(name);
kfree(mci->bus->name);
return err; return err;
} }
@ -993,10 +998,12 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
void edac_unregister_sysfs(struct mem_ctl_info *mci) void edac_unregister_sysfs(struct mem_ctl_info *mci)
{ {
const char *name = mci->bus->name;
edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev)); edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
device_unregister(&mci->dev); device_unregister(&mci->dev);
bus_unregister(mci->bus); bus_unregister(mci->bus);
kfree(mci->bus->name); kfree(name);
} }
static void mc_attr_release(struct device *dev) static void mc_attr_release(struct device *dev)

View file

@ -274,13 +274,12 @@ static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
*/ */
static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci) static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
{ {
int status;
edac_dbg(0, "\n"); edac_dbg(0, "\n");
status = cancel_delayed_work(&pci->work); pci->op_state = OP_OFFLINE;
if (status == 0)
flush_workqueue(edac_workqueue); cancel_delayed_work_sync(&pci->work);
flush_workqueue(edac_workqueue);
} }
/* /*

View file

@ -221,7 +221,7 @@ sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor,
} }
if ((attributes & ~EFI_VARIABLE_MASK) != 0 || if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
efivar_validate(name, data, size) == false) { efivar_validate(vendor, name, data, size) == false) {
printk(KERN_ERR "efivars: Malformed variable content\n"); printk(KERN_ERR "efivars: Malformed variable content\n");
return -EINVAL; return -EINVAL;
} }
@ -447,7 +447,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
} }
if ((attributes & ~EFI_VARIABLE_MASK) != 0 || if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
efivar_validate(name, data, size) == false) { efivar_validate(new_var->VendorGuid, name, data,
size) == false) {
printk(KERN_ERR "efivars: Malformed variable content\n"); printk(KERN_ERR "efivars: Malformed variable content\n");
return -EINVAL; return -EINVAL;
} }
@ -540,38 +541,30 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
static int static int
efivar_create_sysfs_entry(struct efivar_entry *new_var) efivar_create_sysfs_entry(struct efivar_entry *new_var)
{ {
int i, short_name_size; int short_name_size;
char *short_name; char *short_name;
unsigned long variable_name_size; unsigned long utf8_name_size;
efi_char16_t *variable_name; efi_char16_t *variable_name = new_var->var.VariableName;
int ret; int ret;
variable_name = new_var->var.VariableName;
variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t);
/* /*
* Length of the variable bytes in ASCII, plus the '-' separator, * Length of the variable bytes in UTF8, plus the '-' separator,
* plus the GUID, plus trailing NUL * plus the GUID, plus trailing NUL
*/ */
short_name_size = variable_name_size / sizeof(efi_char16_t) utf8_name_size = ucs2_utf8size(variable_name);
+ 1 + EFI_VARIABLE_GUID_LEN + 1; short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1;
short_name = kzalloc(short_name_size, GFP_KERNEL);
short_name = kmalloc(short_name_size, GFP_KERNEL);
if (!short_name) if (!short_name)
return -ENOMEM; return -ENOMEM;
/* Convert Unicode to normal chars (assume top bits are 0), ucs2_as_utf8(short_name, variable_name, short_name_size);
ala UTF-8 */
for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) {
short_name[i] = variable_name[i] & 0xFF;
}
/* This is ugly, but necessary to separate one vendor's /* This is ugly, but necessary to separate one vendor's
private variables from another's. */ private variables from another's. */
short_name[utf8_name_size] = '-';
*(short_name + strlen(short_name)) = '-';
efi_guid_to_str(&new_var->var.VendorGuid, efi_guid_to_str(&new_var->var.VendorGuid,
short_name + strlen(short_name)); short_name + utf8_name_size + 1);
new_var->kobj.kset = efivars_kset; new_var->kobj.kset = efivars_kset;

View file

@ -165,67 +165,133 @@ validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer,
} }
struct variable_validate { struct variable_validate {
efi_guid_t vendor;
char *name; char *name;
bool (*validate)(efi_char16_t *var_name, int match, u8 *data, bool (*validate)(efi_char16_t *var_name, int match, u8 *data,
unsigned long len); unsigned long len);
}; };
/*
* This is the list of variables we need to validate, as well as the
* whitelist for what we think is safe not to default to immutable.
*
* If it has a validate() method that's not NULL, it'll go into the
* validation routine. If not, it is assumed valid, but still used for
* whitelisting.
*
* Note that it's sorted by {vendor,name}, but globbed names must come after
* any other name with the same prefix.
*/
static const struct variable_validate variable_validate[] = { static const struct variable_validate variable_validate[] = {
{ "BootNext", validate_uint16 }, { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 },
{ "BootOrder", validate_boot_order }, { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order },
{ "DriverOrder", validate_boot_order }, { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option },
{ "Boot*", validate_load_option }, { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order },
{ "Driver*", validate_load_option }, { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option },
{ "ConIn", validate_device_path }, { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path },
{ "ConInDev", validate_device_path }, { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path },
{ "ConOut", validate_device_path }, { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path },
{ "ConOutDev", validate_device_path }, { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path },
{ "ErrOut", validate_device_path }, { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path },
{ "ErrOutDev", validate_device_path }, { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path },
{ "Timeout", validate_uint16 }, { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string },
{ "Lang", validate_ascii_string }, { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL },
{ "PlatformLang", validate_ascii_string }, { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string },
{ "", NULL }, { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 },
{ LINUX_EFI_CRASH_GUID, "*", NULL },
{ NULL_GUID, "", NULL },
}; };
static bool
variable_matches(const char *var_name, size_t len, const char *match_name,
int *match)
{
for (*match = 0; ; (*match)++) {
char c = match_name[*match];
char u = var_name[*match];
/* Wildcard in the matching name means we've matched */
if (c == '*')
return true;
/* Case sensitive match */
if (!c && *match == len)
return true;
if (c != u)
return false;
if (!c)
return true;
}
return true;
}
bool bool
efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len) efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
unsigned long data_size)
{ {
int i; int i;
u16 *unicode_name = var_name; unsigned long utf8_size;
u8 *utf8_name;
for (i = 0; variable_validate[i].validate != NULL; i++) { utf8_size = ucs2_utf8size(var_name);
utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL);
if (!utf8_name)
return false;
ucs2_as_utf8(utf8_name, var_name, utf8_size);
utf8_name[utf8_size] = '\0';
for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
const char *name = variable_validate[i].name; const char *name = variable_validate[i].name;
int match; int match = 0;
for (match = 0; ; match++) { if (efi_guidcmp(vendor, variable_validate[i].vendor))
char c = name[match]; continue;
u16 u = unicode_name[match];
/* All special variables are plain ascii */ if (variable_matches(utf8_name, utf8_size+1, name, &match)) {
if (u > 127) if (variable_validate[i].validate == NULL)
return true;
/* Wildcard in the matching name means we've matched */
if (c == '*')
return variable_validate[i].validate(var_name,
match, data, len);
/* Case sensitive match */
if (c != u)
break; break;
kfree(utf8_name);
/* Reached the end of the string while matching */ return variable_validate[i].validate(var_name, match,
if (!c) data, data_size);
return variable_validate[i].validate(var_name,
match, data, len);
} }
} }
kfree(utf8_name);
return true; return true;
} }
EXPORT_SYMBOL_GPL(efivar_validate); EXPORT_SYMBOL_GPL(efivar_validate);
bool
efivar_variable_is_removable(efi_guid_t vendor, const char *var_name,
size_t len)
{
int i;
bool found = false;
int match = 0;
/*
* Check if our variable is in the validated variables list
*/
for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
if (efi_guidcmp(variable_validate[i].vendor, vendor))
continue;
if (variable_matches(var_name, len,
variable_validate[i].name, &match)) {
found = true;
break;
}
}
/*
* If it's in our list, it is removable.
*/
return found;
}
EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
static efi_status_t static efi_status_t
check_var_size(u32 attributes, unsigned long size) check_var_size(u32 attributes, unsigned long size)
{ {
@ -852,7 +918,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
*set = false; *set = false;
if (efivar_validate(name, data, *size) == false) if (efivar_validate(*vendor, name, data, *size) == false)
return -EINVAL; return -EINVAL;
/* /*

View file

@ -22,7 +22,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
# add asic specific block # add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \ ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
amdgpu_amdkfd_gfx_v7.o amdgpu_amdkfd_gfx_v7.o
@ -31,6 +31,7 @@ amdgpu-y += \
# add GMC block # add GMC block
amdgpu-y += \ amdgpu-y += \
gmc_v7_0.o \
gmc_v8_0.o gmc_v8_0.o
# add IH block # add IH block

View file

@ -604,8 +604,6 @@ struct amdgpu_sa_manager {
uint32_t align; uint32_t align;
}; };
struct amdgpu_sa_bo;
/* sub-allocation buffer */ /* sub-allocation buffer */
struct amdgpu_sa_bo { struct amdgpu_sa_bo {
struct list_head olist; struct list_head olist;
@ -2314,6 +2312,8 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
uint32_t flags); uint32_t flags);
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end);
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem); struct ttm_mem_reg *mem);

View file

@ -77,7 +77,7 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
} else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
/* Don't try to start link training before we /* Don't try to start link training before we
* have the dpcd */ * have the dpcd */
if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
return; return;
/* set it to OFF so that drm_helper_connector_dpms() /* set it to OFF so that drm_helper_connector_dpms()

View file

@ -1744,15 +1744,20 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
} }
/* post card */ /* post card */
amdgpu_atom_asic_init(adev->mode_info.atom_context); if (!amdgpu_card_posted(adev))
amdgpu_atom_asic_init(adev->mode_info.atom_context);
r = amdgpu_resume(adev); r = amdgpu_resume(adev);
if (r)
DRM_ERROR("amdgpu_resume failed (%d).\n", r);
amdgpu_fence_driver_resume(adev); amdgpu_fence_driver_resume(adev);
r = amdgpu_ib_ring_tests(adev); if (resume) {
if (r) r = amdgpu_ib_ring_tests(adev);
DRM_ERROR("ib ring test failed (%d).\n", r); if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
}
r = amdgpu_late_init(adev); r = amdgpu_late_init(adev);
if (r) if (r)
@ -1788,6 +1793,7 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
} }
drm_kms_helper_poll_enable(dev); drm_kms_helper_poll_enable(dev);
drm_helper_hpd_irq_event(dev);
if (fbcon) { if (fbcon) {
amdgpu_fbdev_set_suspend(adev, 0); amdgpu_fbdev_set_suspend(adev, 0);

View file

@ -72,8 +72,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
struct drm_crtc *crtc = &amdgpuCrtc->base; struct drm_crtc *crtc = &amdgpuCrtc->base;
unsigned long flags; unsigned long flags;
unsigned i; unsigned i, repcnt = 4;
int vpos, hpos, stat, min_udelay; int vpos, hpos, stat, min_udelay = 0;
struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
amdgpu_flip_wait_fence(adev, &work->excl); amdgpu_flip_wait_fence(adev, &work->excl);
@ -96,7 +96,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
* In practice this won't execute very often unless on very fast * In practice this won't execute very often unless on very fast
* machines because the time window for this to happen is very small. * machines because the time window for this to happen is very small.
*/ */
for (;;) { while (amdgpuCrtc->enabled && repcnt--) {
/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
* start in hpos, and to the "fudged earlier" vblank start in * start in hpos, and to the "fudged earlier" vblank start in
* vpos. * vpos.
@ -114,10 +114,22 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
/* Sleep at least until estimated real start of hw vblank */ /* Sleep at least until estimated real start of hw vblank */
spin_unlock_irqrestore(&crtc->dev->event_lock, flags); spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
if (min_udelay > vblank->framedur_ns / 2000) {
/* Don't wait ridiculously long - something is wrong */
repcnt = 0;
break;
}
usleep_range(min_udelay, 2 * min_udelay); usleep_range(min_udelay, 2 * min_udelay);
spin_lock_irqsave(&crtc->dev->event_lock, flags); spin_lock_irqsave(&crtc->dev->event_lock, flags);
}; };
if (!repcnt)
DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
"framedur %d, linedur %d, stat %d, vpos %d, "
"hpos %d\n", work->crtc_id, min_udelay,
vblank->framedur_ns / 1000,
vblank->linedur_ns / 1000, stat, vpos, hpos);
/* do the flip (mmio) */ /* do the flip (mmio) */
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
/* set the flip status */ /* set the flip status */

View file

@ -250,11 +250,11 @@ static struct pci_device_id pciidlist[] = {
{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
#endif #endif
/* topaz */ /* topaz */
{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
{0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
{0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
{0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
{0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
/* tonga */ /* tonga */
{0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
{0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},

View file

@ -142,7 +142,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
list_for_each_entry(bo, &node->bos, mn_list) { list_for_each_entry(bo, &node->bos, mn_list) {
if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
end))
continue; continue;
r = amdgpu_bo_reserve(bo, true); r = amdgpu_bo_reserve(bo, true);

View file

@ -33,6 +33,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/amdgpu_drm.h> #include <drm/amdgpu_drm.h>
#include <drm/drm_cache.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_trace.h" #include "amdgpu_trace.h"
@ -261,6 +262,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
AMDGPU_GEM_DOMAIN_OA); AMDGPU_GEM_DOMAIN_OA);
bo->flags = flags; bo->flags = flags;
/* For architectures that don't support WC memory,
* mask out the WC flag from the BO
*/
if (!drm_arch_can_wc_memory())
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
amdgpu_fill_placement_to_bo(bo, placement); amdgpu_fill_placement_to_bo(bo, placement);
/* Kernel allocation are uninterruptible */ /* Kernel allocation are uninterruptible */
r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
@ -399,7 +407,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
} }
if (fpfn > bo->placements[i].fpfn) if (fpfn > bo->placements[i].fpfn)
bo->placements[i].fpfn = fpfn; bo->placements[i].fpfn = fpfn;
if (lpfn && lpfn < bo->placements[i].lpfn) if (!bo->placements[i].lpfn ||
(lpfn && lpfn < bo->placements[i].lpfn))
bo->placements[i].lpfn = lpfn; bo->placements[i].lpfn = lpfn;
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
} }

View file

@ -595,11 +595,6 @@ force:
/* update display watermarks based on new power state */ /* update display watermarks based on new power state */
amdgpu_display_bandwidth_update(adev); amdgpu_display_bandwidth_update(adev);
/* update displays */
amdgpu_dpm_display_configuration_changed(adev);
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
/* wait for the rings to drain */ /* wait for the rings to drain */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) { for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
@ -616,6 +611,12 @@ force:
amdgpu_dpm_post_set_power_state(adev); amdgpu_dpm_post_set_power_state(adev);
/* update displays */
amdgpu_dpm_display_configuration_changed(adev);
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
if (adev->pm.funcs->force_performance_level) { if (adev->pm.funcs->force_performance_level) {
if (adev->pm.dpm.thermal_active) { if (adev->pm.dpm.thermal_active) {
enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level; enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;

View file

@ -354,12 +354,15 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i) for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
if (fences[i]) if (fences[i])
fences[count++] = fences[i]; fences[count++] = fence_get(fences[i]);
if (count) { if (count) {
spin_unlock(&sa_manager->wq.lock); spin_unlock(&sa_manager->wq.lock);
t = fence_wait_any_timeout(fences, count, false, t = fence_wait_any_timeout(fences, count, false,
MAX_SCHEDULE_TIMEOUT); MAX_SCHEDULE_TIMEOUT);
for (i = 0; i < count; ++i)
fence_put(fences[i]);
r = (t > 0) ? 0 : t; r = (t > 0) ? 0 : t;
spin_lock(&sa_manager->wq.lock); spin_lock(&sa_manager->wq.lock);
} else { } else {

View file

@ -293,7 +293,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
fence = to_amdgpu_fence(sync->sync_to[i]); fence = to_amdgpu_fence(sync->sync_to[i]);
/* check if we really need to sync */ /* check if we really need to sync */
if (!amdgpu_fence_need_sync(fence, ring)) if (!amdgpu_enable_scheduler &&
!amdgpu_fence_need_sync(fence, ring))
continue; continue;
/* prevent GPU deadlocks */ /* prevent GPU deadlocks */
@ -303,7 +304,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
} }
if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) { if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) {
r = fence_wait(&fence->base, true); r = fence_wait(sync->sync_to[i], true);
if (r) if (r)
return r; return r;
continue; continue;

View file

@ -712,7 +712,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
0, PAGE_SIZE, 0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL); PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) { if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
while (--i) { while (i--) {
pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i], pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
gtt->ttm.dma_address[i] = 0; gtt->ttm.dma_address[i] = 0;
@ -783,6 +783,25 @@ bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm)
return !!gtt->userptr; return !!gtt->userptr;
} }
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned long size;
if (gtt == NULL)
return false;
if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr)
return false;
size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
if (gtt->userptr > end || gtt->userptr + size <= start)
return false;
return true;
}
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
{ {
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
@ -808,7 +827,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
flags |= AMDGPU_PTE_SNOOPED; flags |= AMDGPU_PTE_SNOOPED;
} }
if (adev->asic_type >= CHIP_TOPAZ) if (adev->asic_type >= CHIP_TONGA)
flags |= AMDGPU_PTE_EXECUTABLE; flags |= AMDGPU_PTE_EXECUTABLE;
flags |= AMDGPU_PTE_READABLE; flags |= AMDGPU_PTE_READABLE;

View file

@ -1010,13 +1010,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
return -EINVAL; return -EINVAL;
/* make sure object fit at this offset */ /* make sure object fit at this offset */
eaddr = saddr + size; eaddr = saddr + size - 1;
if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
return -EINVAL; return -EINVAL;
last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
if (last_pfn > adev->vm_manager.max_pfn) { if (last_pfn >= adev->vm_manager.max_pfn) {
dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n", dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
last_pfn, adev->vm_manager.max_pfn); last_pfn, adev->vm_manager.max_pfn);
return -EINVAL; return -EINVAL;
} }
@ -1025,7 +1025,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
eaddr /= AMDGPU_GPU_PAGE_SIZE; eaddr /= AMDGPU_GPU_PAGE_SIZE;
spin_lock(&vm->it_lock); spin_lock(&vm->it_lock);
it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1); it = interval_tree_iter_first(&vm->va, saddr, eaddr);
spin_unlock(&vm->it_lock); spin_unlock(&vm->it_lock);
if (it) { if (it) {
struct amdgpu_bo_va_mapping *tmp; struct amdgpu_bo_va_mapping *tmp;
@ -1046,7 +1046,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
INIT_LIST_HEAD(&mapping->list); INIT_LIST_HEAD(&mapping->list);
mapping->it.start = saddr; mapping->it.start = saddr;
mapping->it.last = eaddr - 1; mapping->it.last = eaddr;
mapping->offset = offset; mapping->offset = offset;
mapping->flags = flags; mapping->flags = flags;
@ -1248,7 +1248,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{ {
const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
AMDGPU_VM_PTE_COUNT * 8); AMDGPU_VM_PTE_COUNT * 8);
unsigned pd_size, pd_entries, pts_size; unsigned pd_size, pd_entries;
int i, r; int i, r;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@ -1266,8 +1266,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
pd_entries = amdgpu_vm_num_pdes(adev); pd_entries = amdgpu_vm_num_pdes(adev);
/* allocate page table array */ /* allocate page table array */
pts_size = pd_entries * sizeof(struct amdgpu_vm_pt); vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
if (vm->page_tables == NULL) { if (vm->page_tables == NULL) {
DRM_ERROR("Cannot allocate memory for page table array\n"); DRM_ERROR("Cannot allocate memory for page table array\n");
return -ENOMEM; return -ENOMEM;
@ -1327,7 +1326,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
amdgpu_bo_unref(&vm->page_tables[i].bo); amdgpu_bo_unref(&vm->page_tables[i].bo);
kfree(vm->page_tables); drm_free_large(vm->page_tables);
amdgpu_bo_unref(&vm->page_directory); amdgpu_bo_unref(&vm->page_directory);
fence_put(vm->page_directory_fence); fence_put(vm->page_directory_fence);

View file

@ -3628,6 +3628,19 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr) unsigned vm_id, uint64_t pd_addr)
{ {
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
WAIT_REG_MEM_FUNCTION(3) | /* equal */
WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, 4); /* poll interval */
if (usepfp) { if (usepfp) {
/* synce CE with ME to prevent CE fetch CEIB before context switch done */ /* synce CE with ME to prevent CE fetch CEIB before context switch done */
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));

View file

@ -90,7 +90,6 @@ MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
MODULE_FIRMWARE("amdgpu/topaz_pfp.bin"); MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
MODULE_FIRMWARE("amdgpu/topaz_me.bin"); MODULE_FIRMWARE("amdgpu/topaz_me.bin");
MODULE_FIRMWARE("amdgpu/topaz_mec.bin"); MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
MODULE_FIRMWARE("amdgpu/topaz_mec2.bin");
MODULE_FIRMWARE("amdgpu/topaz_rlc.bin"); MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
MODULE_FIRMWARE("amdgpu/fiji_ce.bin"); MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
@ -807,7 +806,8 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
if (adev->asic_type != CHIP_STONEY) { if ((adev->asic_type != CHIP_STONEY) &&
(adev->asic_type != CHIP_TOPAZ)) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
if (!err) { if (!err) {
@ -4681,7 +4681,8 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
WAIT_REG_MEM_FUNCTION(3))); /* equal */ WAIT_REG_MEM_FUNCTION(3) | /* equal */
WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
amdgpu_ring_write(ring, addr & 0xfffffffc); amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); amdgpu_ring_write(ring, seq);

View file

@ -42,9 +42,39 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
MODULE_FIRMWARE("radeon/bonaire_mc.bin"); MODULE_FIRMWARE("radeon/bonaire_mc.bin");
MODULE_FIRMWARE("radeon/hawaii_mc.bin"); MODULE_FIRMWARE("radeon/hawaii_mc.bin");
MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
static const u32 golden_settings_iceland_a11[] =
{
mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
};
static const u32 iceland_mgcg_cgcg_init[] =
{
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
};
static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_TOPAZ:
amdgpu_program_register_sequence(adev,
iceland_mgcg_cgcg_init,
(const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
amdgpu_program_register_sequence(adev,
golden_settings_iceland_a11,
(const u32)ARRAY_SIZE(golden_settings_iceland_a11));
break;
default:
break;
}
}
/** /**
* gmc8_mc_wait_for_idle - wait for MC idle callback. * gmc7_mc_wait_for_idle - wait for MC idle callback.
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* *
@ -132,13 +162,20 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
case CHIP_HAWAII: case CHIP_HAWAII:
chip_name = "hawaii"; chip_name = "hawaii";
break; break;
case CHIP_TOPAZ:
chip_name = "topaz";
break;
case CHIP_KAVERI: case CHIP_KAVERI:
case CHIP_KABINI: case CHIP_KABINI:
return 0; return 0;
default: BUG(); default: BUG();
} }
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); if (adev->asic_type == CHIP_TOPAZ)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
else
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
err = request_firmware(&adev->mc.fw, fw_name, adev->dev); err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
if (err) if (err)
goto out; goto out;
@ -980,6 +1017,8 @@ static int gmc_v7_0_hw_init(void *handle)
int r; int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v7_0_init_golden_registers(adev);
gmc_v7_0_mc_program(adev); gmc_v7_0_mc_program(adev);
if (!(adev->flags & AMD_IS_APU)) { if (!(adev->flags & AMD_IS_APU)) {

View file

@ -42,9 +42,7 @@
static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev); static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
MODULE_FIRMWARE("amdgpu/fiji_mc.bin");
static const u32 golden_settings_tonga_a11[] = static const u32 golden_settings_tonga_a11[] =
{ {
@ -75,19 +73,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
}; };
static const u32 golden_settings_iceland_a11[] =
{
mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
};
static const u32 iceland_mgcg_cgcg_init[] =
{
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
};
static const u32 cz_mgcg_cgcg_init[] = static const u32 cz_mgcg_cgcg_init[] =
{ {
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
@ -102,14 +87,6 @@ static const u32 stoney_mgcg_cgcg_init[] =
static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
{ {
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_TOPAZ:
amdgpu_program_register_sequence(adev,
iceland_mgcg_cgcg_init,
(const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
amdgpu_program_register_sequence(adev,
golden_settings_iceland_a11,
(const u32)ARRAY_SIZE(golden_settings_iceland_a11));
break;
case CHIP_FIJI: case CHIP_FIJI:
amdgpu_program_register_sequence(adev, amdgpu_program_register_sequence(adev,
fiji_mgcg_cgcg_init, fiji_mgcg_cgcg_init,
@ -229,15 +206,10 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n"); DRM_DEBUG("\n");
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_TOPAZ:
chip_name = "topaz";
break;
case CHIP_TONGA: case CHIP_TONGA:
chip_name = "tonga"; chip_name = "tonga";
break; break;
case CHIP_FIJI: case CHIP_FIJI:
chip_name = "fiji";
break;
case CHIP_CARRIZO: case CHIP_CARRIZO:
case CHIP_STONEY: case CHIP_STONEY:
return 0; return 0;
@ -1003,7 +975,7 @@ static int gmc_v8_0_hw_init(void *handle)
gmc_v8_0_mc_program(adev); gmc_v8_0_mc_program(adev);
if (!(adev->flags & AMD_IS_APU)) { if (adev->asic_type == CHIP_TONGA) {
r = gmc_v8_0_mc_load_microcode(adev); r = gmc_v8_0_mc_load_microcode(adev);
if (r) { if (r) {
DRM_ERROR("Failed to load MC firmware!\n"); DRM_ERROR("Failed to load MC firmware!\n");

View file

@ -432,7 +432,7 @@ static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
case AMDGPU_UCODE_ID_CP_ME: case AMDGPU_UCODE_ID_CP_ME:
return UCODE_ID_CP_ME_MASK; return UCODE_ID_CP_ME_MASK;
case AMDGPU_UCODE_ID_CP_MEC1: case AMDGPU_UCODE_ID_CP_MEC1:
return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK; return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
case AMDGPU_UCODE_ID_CP_MEC2: case AMDGPU_UCODE_ID_CP_MEC2:
return UCODE_ID_CP_MEC_MASK; return UCODE_ID_CP_MEC_MASK;
case AMDGPU_UCODE_ID_RLC_G: case AMDGPU_UCODE_ID_RLC_G:
@ -522,12 +522,6 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
return -EINVAL; return -EINVAL;
} }
if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
return -EINVAL;
}
if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0, if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
&toc->entry[toc->num_entries++])) { &toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for SDMA0\n"); DRM_ERROR("Failed to get firmware entry for SDMA0\n");
@ -550,8 +544,8 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
UCODE_ID_CP_ME_MASK | UCODE_ID_CP_ME_MASK |
UCODE_ID_CP_PFP_MASK | UCODE_ID_CP_PFP_MASK |
UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_MASK |
UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT1_MASK;
UCODE_ID_CP_MEC_JT2_MASK;
if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) { if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
DRM_ERROR("Fail to request SMU load ucode\n"); DRM_ERROR("Fail to request SMU load ucode\n");

View file

@ -122,25 +122,12 @@ static int tonga_dpm_hw_fini(void *handle)
static int tonga_dpm_suspend(void *handle) static int tonga_dpm_suspend(void *handle)
{ {
return 0; return tonga_dpm_hw_fini(handle);
} }
static int tonga_dpm_resume(void *handle) static int tonga_dpm_resume(void *handle)
{ {
int ret; return tonga_dpm_hw_init(handle);
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
mutex_lock(&adev->pm.mutex);
ret = tonga_smu_start(adev);
if (ret) {
DRM_ERROR("SMU start failed\n");
goto fail;
}
fail:
mutex_unlock(&adev->pm.mutex);
return ret;
} }
static int tonga_dpm_set_clockgating_state(void *handle, static int tonga_dpm_set_clockgating_state(void *handle,

View file

@ -60,6 +60,7 @@
#include "vi.h" #include "vi.h"
#include "vi_dpm.h" #include "vi_dpm.h"
#include "gmc_v8_0.h" #include "gmc_v8_0.h"
#include "gmc_v7_0.h"
#include "gfx_v8_0.h" #include "gfx_v8_0.h"
#include "sdma_v2_4.h" #include "sdma_v2_4.h"
#include "sdma_v3_0.h" #include "sdma_v3_0.h"
@ -1081,10 +1082,10 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
}, },
{ {
.type = AMD_IP_BLOCK_TYPE_GMC, .type = AMD_IP_BLOCK_TYPE_GMC,
.major = 8, .major = 7,
.minor = 0, .minor = 4,
.rev = 0, .rev = 0,
.funcs = &gmc_v8_0_ip_funcs, .funcs = &gmc_v7_0_ip_funcs,
}, },
{ {
.type = AMD_IP_BLOCK_TYPE_IH, .type = AMD_IP_BLOCK_TYPE_IH,

View file

@ -227,7 +227,7 @@ static int ast_get_dram_info(struct drm_device *dev)
} while (ast_read32(ast, 0x10000) != 0x01); } while (ast_read32(ast, 0x10000) != 0x01);
data = ast_read32(ast, 0x10004); data = ast_read32(ast, 0x10004);
if (data & 0x400) if (data & 0x40)
ast->dram_bus_width = 16; ast->dram_bus_width = 16;
else else
ast->dram_bus_width = 32; ast->dram_bus_width = 32;

View file

@ -798,12 +798,33 @@ static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
return mstb; return mstb;
} }
static void drm_dp_free_mst_port(struct kref *kref);
static void drm_dp_free_mst_branch_device(struct kref *kref)
{
struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
if (mstb->port_parent) {
if (list_empty(&mstb->port_parent->next))
kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
}
kfree(mstb);
}
static void drm_dp_destroy_mst_branch_device(struct kref *kref) static void drm_dp_destroy_mst_branch_device(struct kref *kref)
{ {
struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref); struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
struct drm_dp_mst_port *port, *tmp; struct drm_dp_mst_port *port, *tmp;
bool wake_tx = false; bool wake_tx = false;
/*
* init kref again to be used by ports to remove mst branch when it is
* not needed anymore
*/
kref_init(kref);
if (mstb->port_parent && list_empty(&mstb->port_parent->next))
kref_get(&mstb->port_parent->kref);
/* /*
* destroy all ports - don't need lock * destroy all ports - don't need lock
* as there are no more references to the mst branch * as there are no more references to the mst branch
@ -830,7 +851,8 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
if (wake_tx) if (wake_tx)
wake_up(&mstb->mgr->tx_waitq); wake_up(&mstb->mgr->tx_waitq);
kfree(mstb);
kref_put(kref, drm_dp_free_mst_branch_device);
} }
static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb) static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
@ -878,6 +900,7 @@ static void drm_dp_destroy_port(struct kref *kref)
* from an EDID retrieval */ * from an EDID retrieval */
mutex_lock(&mgr->destroy_connector_lock); mutex_lock(&mgr->destroy_connector_lock);
kref_get(&port->parent->kref);
list_add(&port->next, &mgr->destroy_connector_list); list_add(&port->next, &mgr->destroy_connector_list);
mutex_unlock(&mgr->destroy_connector_lock); mutex_unlock(&mgr->destroy_connector_lock);
schedule_work(&mgr->destroy_connector_work); schedule_work(&mgr->destroy_connector_work);
@ -973,17 +996,17 @@ static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u
static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
u8 *rad) u8 *rad)
{ {
int lct = port->parent->lct; int parent_lct = port->parent->lct;
int shift = 4; int shift = 4;
int idx = lct / 2; int idx = (parent_lct - 1) / 2;
if (lct > 1) { if (parent_lct > 1) {
memcpy(rad, port->parent->rad, idx); memcpy(rad, port->parent->rad, idx + 1);
shift = (lct % 2) ? 4 : 0; shift = (parent_lct % 2) ? 4 : 0;
} else } else
rad[0] = 0; rad[0] = 0;
rad[idx] |= port->port_num << shift; rad[idx] |= port->port_num << shift;
return lct + 1; return parent_lct + 1;
} }
/* /*
@ -1013,18 +1036,27 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
return send_link; return send_link;
} }
static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb, static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
struct drm_dp_mst_port *port)
{ {
int ret; int ret;
if (port->dpcd_rev >= 0x12) {
port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid); memcpy(mstb->guid, guid, 16);
if (!port->guid_valid) {
ret = drm_dp_send_dpcd_write(mstb->mgr, if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
port, if (mstb->port_parent) {
DP_GUID, ret = drm_dp_send_dpcd_write(
16, port->guid); mstb->mgr,
port->guid_valid = true; mstb->port_parent,
DP_GUID,
16,
mstb->guid);
} else {
ret = drm_dp_dpcd_write(
mstb->mgr->aux,
DP_GUID,
mstb->guid,
16);
} }
} }
} }
@ -1039,7 +1071,7 @@ static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id); snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
for (i = 0; i < (mstb->lct - 1); i++) { for (i = 0; i < (mstb->lct - 1); i++) {
int shift = (i % 2) ? 0 : 4; int shift = (i % 2) ? 0 : 4;
int port_num = mstb->rad[i / 2] >> shift; int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
snprintf(temp, sizeof(temp), "-%d", port_num); snprintf(temp, sizeof(temp), "-%d", port_num);
strlcat(proppath, temp, proppath_size); strlcat(proppath, temp, proppath_size);
} }
@ -1081,7 +1113,6 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
port->dpcd_rev = port_msg->dpcd_revision; port->dpcd_rev = port_msg->dpcd_revision;
port->num_sdp_streams = port_msg->num_sdp_streams; port->num_sdp_streams = port_msg->num_sdp_streams;
port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks; port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
memcpy(port->guid, port_msg->peer_guid, 16);
/* manage mstb port lists with mgr lock - take a reference /* manage mstb port lists with mgr lock - take a reference
for this list */ for this list */
@ -1094,11 +1125,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
if (old_ddps != port->ddps) { if (old_ddps != port->ddps) {
if (port->ddps) { if (port->ddps) {
drm_dp_check_port_guid(mstb, port);
if (!port->input) if (!port->input)
drm_dp_send_enum_path_resources(mstb->mgr, mstb, port); drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
} else { } else {
port->guid_valid = false;
port->available_pbn = 0; port->available_pbn = 0;
} }
} }
@ -1157,10 +1186,8 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
if (old_ddps != port->ddps) { if (old_ddps != port->ddps) {
if (port->ddps) { if (port->ddps) {
drm_dp_check_port_guid(mstb, port);
dowork = true; dowork = true;
} else { } else {
port->guid_valid = false;
port->available_pbn = 0; port->available_pbn = 0;
} }
} }
@ -1190,7 +1217,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
for (i = 0; i < lct - 1; i++) { for (i = 0; i < lct - 1; i++) {
int shift = (i % 2) ? 0 : 4; int shift = (i % 2) ? 0 : 4;
int port_num = rad[i / 2] >> shift; int port_num = (rad[i / 2] >> shift) & 0xf;
list_for_each_entry(port, &mstb->ports, next) { list_for_each_entry(port, &mstb->ports, next) {
if (port->port_num == port_num) { if (port->port_num == port_num) {
@ -1210,6 +1237,48 @@ out:
return mstb; return mstb;
} }
static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
struct drm_dp_mst_branch *mstb,
uint8_t *guid)
{
struct drm_dp_mst_branch *found_mstb;
struct drm_dp_mst_port *port;
if (memcmp(mstb->guid, guid, 16) == 0)
return mstb;
list_for_each_entry(port, &mstb->ports, next) {
if (!port->mstb)
continue;
found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
if (found_mstb)
return found_mstb;
}
return NULL;
}
static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
struct drm_dp_mst_topology_mgr *mgr,
uint8_t *guid)
{
struct drm_dp_mst_branch *mstb;
/* find the port by iterating down */
mutex_lock(&mgr->lock);
mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
if (mstb)
kref_get(&mstb->kref);
mutex_unlock(&mgr->lock);
return mstb;
}
static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb) struct drm_dp_mst_branch *mstb)
{ {
@ -1320,6 +1389,7 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
struct drm_dp_sideband_msg_tx *txmsg) struct drm_dp_sideband_msg_tx *txmsg)
{ {
struct drm_dp_mst_branch *mstb = txmsg->dst; struct drm_dp_mst_branch *mstb = txmsg->dst;
u8 req_type;
/* both msg slots are full */ /* both msg slots are full */
if (txmsg->seqno == -1) { if (txmsg->seqno == -1) {
@ -1336,7 +1406,13 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
txmsg->seqno = 1; txmsg->seqno = 1;
mstb->tx_slots[txmsg->seqno] = txmsg; mstb->tx_slots[txmsg->seqno] = txmsg;
} }
hdr->broadcast = 0;
req_type = txmsg->msg[0] & 0x7f;
if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
req_type == DP_RESOURCE_STATUS_NOTIFY)
hdr->broadcast = 1;
else
hdr->broadcast = 0;
hdr->path_msg = txmsg->path_msg; hdr->path_msg = txmsg->path_msg;
hdr->lct = mstb->lct; hdr->lct = mstb->lct;
hdr->lcr = mstb->lct - 1; hdr->lcr = mstb->lct - 1;
@ -1438,26 +1514,18 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
} }
/* called holding qlock */ /* called holding qlock */
static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_tx *txmsg)
{ {
struct drm_dp_sideband_msg_tx *txmsg;
int ret; int ret;
/* construct a chunk from the first msg in the tx_msg queue */ /* construct a chunk from the first msg in the tx_msg queue */
if (list_empty(&mgr->tx_msg_upq)) {
mgr->tx_up_in_progress = false;
return;
}
txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
ret = process_single_tx_qlock(mgr, txmsg, true); ret = process_single_tx_qlock(mgr, txmsg, true);
if (ret == 1) {
/* up txmsgs aren't put in slots - so free after we send it */ if (ret != 1)
list_del(&txmsg->next);
kfree(txmsg);
} else if (ret)
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
mgr->tx_up_in_progress = true;
txmsg->dst->tx_slots[txmsg->seqno] = NULL;
} }
static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
@ -1507,6 +1575,9 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
txmsg->reply.u.link_addr.ports[i].num_sdp_streams, txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks); txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
} }
drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
} }
@ -1554,6 +1625,37 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
return 0; return 0;
} }
static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
{
if (!mstb->port_parent)
return NULL;
if (mstb->port_parent->mstb != mstb)
return mstb->port_parent;
return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
}
static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
int *port_num)
{
struct drm_dp_mst_branch *rmstb = NULL;
struct drm_dp_mst_port *found_port;
mutex_lock(&mgr->lock);
if (mgr->mst_primary) {
found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
if (found_port) {
rmstb = found_port->parent;
kref_get(&rmstb->kref);
*port_num = found_port->port_num;
}
}
mutex_unlock(&mgr->lock);
return rmstb;
}
static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, struct drm_dp_mst_port *port,
int id, int id,
@ -1561,11 +1663,16 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
{ {
struct drm_dp_sideband_msg_tx *txmsg; struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb; struct drm_dp_mst_branch *mstb;
int len, ret; int len, ret, port_num;
port_num = port->port_num;
mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
if (!mstb) if (!mstb) {
return -EINVAL; mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
if (!mstb)
return -EINVAL;
}
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg) { if (!txmsg) {
@ -1574,7 +1681,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
} }
txmsg->dst = mstb; txmsg->dst = mstb;
len = build_allocate_payload(txmsg, port->port_num, len = build_allocate_payload(txmsg, port_num,
id, id,
pbn); pbn);
@ -1844,11 +1951,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
drm_dp_encode_up_ack_reply(txmsg, req_type); drm_dp_encode_up_ack_reply(txmsg, req_type);
mutex_lock(&mgr->qlock); mutex_lock(&mgr->qlock);
list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
if (!mgr->tx_up_in_progress) { process_single_up_tx_qlock(mgr, txmsg);
process_single_up_tx_qlock(mgr);
}
mutex_unlock(&mgr->qlock); mutex_unlock(&mgr->qlock);
kfree(txmsg);
return 0; return 0;
} }
@ -1927,6 +2035,12 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mgr->mst_primary = mstb; mgr->mst_primary = mstb;
kref_get(&mgr->mst_primary->kref); kref_get(&mgr->mst_primary->kref);
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
if (ret < 0) {
goto out_unlock;
}
{ {
struct drm_dp_payload reset_pay; struct drm_dp_payload reset_pay;
reset_pay.start_slot = 0; reset_pay.start_slot = 0;
@ -1934,26 +2048,6 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
drm_dp_dpcd_write_payload(mgr, 0, &reset_pay); drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
} }
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
if (ret < 0) {
goto out_unlock;
}
/* sort out guid */
ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
if (ret != 16) {
DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
goto out_unlock;
}
mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
if (!mgr->guid_valid) {
ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
mgr->guid_valid = true;
}
queue_work(system_long_wq, &mgr->work); queue_work(system_long_wq, &mgr->work);
ret = 0; ret = 0;
@ -2145,28 +2239,51 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
if (mgr->up_req_recv.have_eomt) { if (mgr->up_req_recv.have_eomt) {
struct drm_dp_sideband_msg_req_body msg; struct drm_dp_sideband_msg_req_body msg;
struct drm_dp_mst_branch *mstb; struct drm_dp_mst_branch *mstb = NULL;
bool seqno; bool seqno;
mstb = drm_dp_get_mst_branch_device(mgr,
mgr->up_req_recv.initial_hdr.lct, if (!mgr->up_req_recv.initial_hdr.broadcast) {
mgr->up_req_recv.initial_hdr.rad); mstb = drm_dp_get_mst_branch_device(mgr,
if (!mstb) { mgr->up_req_recv.initial_hdr.lct,
DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); mgr->up_req_recv.initial_hdr.rad);
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); if (!mstb) {
return 0; DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
return 0;
}
} }
seqno = mgr->up_req_recv.initial_hdr.seqno; seqno = mgr->up_req_recv.initial_hdr.seqno;
drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg); drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false); drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
if (!mstb)
mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
if (!mstb) {
DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
return 0;
}
drm_dp_update_port(mstb, &msg.u.conn_stat); drm_dp_update_port(mstb, &msg.u.conn_stat);
DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
(*mgr->cbs->hotplug)(mgr); (*mgr->cbs->hotplug)(mgr);
} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false); drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
if (!mstb)
mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
if (!mstb) {
DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
return 0;
}
DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
} }
@ -2346,6 +2463,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp
DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn); DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
if (pbn == port->vcpi.pbn) { if (pbn == port->vcpi.pbn) {
*slots = port->vcpi.num_slots; *slots = port->vcpi.num_slots;
drm_dp_put_port(port);
return true; return true;
} }
} }
@ -2505,32 +2623,31 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
*/ */
int drm_dp_calc_pbn_mode(int clock, int bpp) int drm_dp_calc_pbn_mode(int clock, int bpp)
{ {
fixed20_12 pix_bw; u64 kbps;
fixed20_12 fbpp; s64 peak_kbps;
fixed20_12 result; u32 numerator;
fixed20_12 margin, tmp; u32 denominator;
u32 res;
pix_bw.full = dfixed_const(clock); kbps = clock * bpp;
fbpp.full = dfixed_const(bpp);
tmp.full = dfixed_const(8);
fbpp.full = dfixed_div(fbpp, tmp);
result.full = dfixed_mul(pix_bw, fbpp); /*
margin.full = dfixed_const(54); * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
tmp.full = dfixed_const(64); * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
margin.full = dfixed_div(margin, tmp); * common multiplier to render an integer PBN for all link rate/lane
result.full = dfixed_div(result, margin); * counts combinations
* calculate
* peak_kbps *= (1006/1000)
* peak_kbps *= (64/54)
* peak_kbps *= 8 convert to bytes
*/
margin.full = dfixed_const(1006); numerator = 64 * 1006;
tmp.full = dfixed_const(1000); denominator = 54 * 8 * 1000 * 1000;
margin.full = dfixed_div(margin, tmp);
result.full = dfixed_mul(result, margin);
result.full = dfixed_div(result, tmp); kbps *= numerator;
result.full = dfixed_ceil(result); peak_kbps = drm_fixp_from_fraction(kbps, denominator);
res = dfixed_trunc(result);
return res; return drm_fixp2int_ceil(peak_kbps);
} }
EXPORT_SYMBOL(drm_dp_calc_pbn_mode); EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
@ -2538,11 +2655,23 @@ static int test_calc_pbn_mode(void)
{ {
int ret; int ret;
ret = drm_dp_calc_pbn_mode(154000, 30); ret = drm_dp_calc_pbn_mode(154000, 30);
if (ret != 689) if (ret != 689) {
DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
154000, 30, 689, ret);
return -EINVAL; return -EINVAL;
}
ret = drm_dp_calc_pbn_mode(234000, 30); ret = drm_dp_calc_pbn_mode(234000, 30);
if (ret != 1047) if (ret != 1047) {
DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
234000, 30, 1047, ret);
return -EINVAL; return -EINVAL;
}
ret = drm_dp_calc_pbn_mode(297000, 24);
if (ret != 1063) {
DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
297000, 24, 1063, ret);
return -EINVAL;
}
return 0; return 0;
} }
@ -2683,6 +2812,13 @@ static void drm_dp_tx_work(struct work_struct *work)
mutex_unlock(&mgr->qlock); mutex_unlock(&mgr->qlock);
} }
static void drm_dp_free_mst_port(struct kref *kref)
{
struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
kfree(port);
}
static void drm_dp_destroy_connector_work(struct work_struct *work) static void drm_dp_destroy_connector_work(struct work_struct *work)
{ {
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
@ -2703,13 +2839,22 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
list_del(&port->next); list_del(&port->next);
mutex_unlock(&mgr->destroy_connector_lock); mutex_unlock(&mgr->destroy_connector_lock);
kref_init(&port->kref);
INIT_LIST_HEAD(&port->next);
mgr->cbs->destroy_connector(mgr, port->connector); mgr->cbs->destroy_connector(mgr, port->connector);
drm_dp_port_teardown_pdt(port, port->pdt); drm_dp_port_teardown_pdt(port, port->pdt);
if (!port->input && port->vcpi.vcpi > 0) if (!port->input && port->vcpi.vcpi > 0) {
drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); if (mgr->mst_state) {
kfree(port); drm_dp_mst_reset_vcpi_slots(mgr, port);
drm_dp_update_payload_part1(mgr);
drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
}
}
kref_put(&port->kref, drm_dp_free_mst_port);
send_hotplug = true; send_hotplug = true;
} }
if (send_hotplug) if (send_hotplug)
@ -2736,7 +2881,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
mutex_init(&mgr->qlock); mutex_init(&mgr->qlock);
mutex_init(&mgr->payload_lock); mutex_init(&mgr->payload_lock);
mutex_init(&mgr->destroy_connector_lock); mutex_init(&mgr->destroy_connector_lock);
INIT_LIST_HEAD(&mgr->tx_msg_upq);
INIT_LIST_HEAD(&mgr->tx_msg_downq); INIT_LIST_HEAD(&mgr->tx_msg_downq);
INIT_LIST_HEAD(&mgr->destroy_connector_list); INIT_LIST_HEAD(&mgr->destroy_connector_list);
INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);

View file

@ -221,6 +221,64 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0; diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0;
} }
/*
* Within a drm_vblank_pre_modeset - drm_vblank_post_modeset
* interval? If so then vblank irqs keep running and it will likely
* happen that the hardware vblank counter is not trustworthy as it
* might reset at some point in that interval and vblank timestamps
* are not trustworthy either in that interval. Iow. this can result
* in a bogus diff >> 1 which must be avoided as it would cause
* random large forward jumps of the software vblank counter.
*/
if (diff > 1 && (vblank->inmodeset & 0x2)) {
DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u"
" due to pre-modeset.\n", pipe, diff);
diff = 1;
}
/*
* FIMXE: Need to replace this hack with proper seqlocks.
*
* Restrict the bump of the software vblank counter to a safe maximum
* value of +1 whenever there is the possibility that concurrent readers
* of vblank timestamps could be active at the moment, as the current
* implementation of the timestamp caching and updating is not safe
* against concurrent readers for calls to store_vblank() with a bump
* of anything but +1. A bump != 1 would very likely return corrupted
* timestamps to userspace, because the same slot in the cache could
* be concurrently written by store_vblank() and read by one of those
* readers without the read-retry logic detecting the collision.
*
* Concurrent readers can exist when we are called from the
* drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
* irq callers. However, all those calls to us are happening with the
* vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
* can't increase while we are executing. Therefore a zero refcount at
* this point is safe for arbitrary counter bumps if we are called
* outside vblank irq, a non-zero count is not 100% safe. Unfortunately
* we must also accept a refcount of 1, as whenever we are called from
* drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
* we must let that one pass through in order to not lose vblank counts
* during vblank irq off - which would completely defeat the whole
* point of this routine.
*
* Whenever we are called from vblank irq, we have to assume concurrent
* readers exist or can show up any time during our execution, even if
* the refcount is currently zero, as vblank irqs are usually only
* enabled due to the presence of readers, and because when we are called
* from vblank irq we can't hold the vbl_lock to protect us from sudden
* bumps in vblank refcount. Therefore also restrict bumps to +1 when
* called from vblank irq.
*/
if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
(flags & DRM_CALLED_FROM_VBLIRQ))) {
DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
"refcount %u, vblirq %u\n", pipe, diff,
atomic_read(&vblank->refcount),
(flags & DRM_CALLED_FROM_VBLIRQ) != 0);
diff = 1;
}
DRM_DEBUG_VBL("updating vblank count on crtc %u:" DRM_DEBUG_VBL("updating vblank count on crtc %u:"
" current=%u, diff=%u, hw=%u hw_last=%u\n", " current=%u, diff=%u, hw=%u hw_last=%u\n",
pipe, vblank->count, diff, cur_vblank, vblank->last); pipe, vblank->count, diff, cur_vblank, vblank->last);
@ -1313,7 +1371,13 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
spin_lock_irqsave(&dev->event_lock, irqflags); spin_lock_irqsave(&dev->event_lock, irqflags);
spin_lock(&dev->vbl_lock); spin_lock(&dev->vbl_lock);
vblank_disable_and_save(dev, pipe); DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
pipe, vblank->enabled, vblank->inmodeset);
/* Avoid redundant vblank disables without previous drm_vblank_on(). */
if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset)
vblank_disable_and_save(dev, pipe);
wake_up(&vblank->queue); wake_up(&vblank->queue);
/* /*
@ -1415,6 +1479,9 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
return; return;
spin_lock_irqsave(&dev->vbl_lock, irqflags); spin_lock_irqsave(&dev->vbl_lock, irqflags);
DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
pipe, vblank->enabled, vblank->inmodeset);
/* Drop our private "prevent drm_vblank_get" refcount */ /* Drop our private "prevent drm_vblank_get" refcount */
if (vblank->inmodeset) { if (vblank->inmodeset) {
atomic_dec(&vblank->refcount); atomic_dec(&vblank->refcount);
@ -1427,8 +1494,7 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
* re-enable interrupts if there are users left, or the * re-enable interrupts if there are users left, or the
* user wishes vblank interrupts to be enabled all the time. * user wishes vblank interrupts to be enabled all the time.
*/ */
if (atomic_read(&vblank->refcount) != 0 || if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0)
(!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
WARN_ON(drm_vblank_enable(dev, pipe)); WARN_ON(drm_vblank_enable(dev, pipe));
spin_unlock_irqrestore(&dev->vbl_lock, irqflags); spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
} }
@ -1523,6 +1589,7 @@ void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
if (vblank->inmodeset) { if (vblank->inmodeset) {
spin_lock_irqsave(&dev->vbl_lock, irqflags); spin_lock_irqsave(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = true; dev->vblank_disable_allowed = true;
drm_reset_vblank_timestamp(dev, pipe);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags); spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
if (vblank->inmodeset & 0x2) if (vblank->inmodeset & 0x2)

View file

@ -130,7 +130,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
return ret; return ret;
} }
/* We have the initial and handle reference but need only one now */ /* We have the initial and handle reference but need only one now */
drm_gem_object_unreference(&r->gem); drm_gem_object_unreference_unlocked(&r->gem);
*handlep = handle; *handlep = handle;
return 0; return 0;
} }

View file

@ -402,6 +402,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret) if (ret)
goto cleanup_gem_stolen; goto cleanup_gem_stolen;
intel_setup_gmbus(dev);
/* Important: The output setup functions called by modeset_init need /* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */ * working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev); intel_modeset_init(dev);
@ -451,6 +453,7 @@ cleanup_gem:
cleanup_irq: cleanup_irq:
intel_guc_ucode_fini(dev); intel_guc_ucode_fini(dev);
drm_irq_uninstall(dev); drm_irq_uninstall(dev);
intel_teardown_gmbus(dev);
cleanup_gem_stolen: cleanup_gem_stolen:
i915_gem_cleanup_stolen(dev); i915_gem_cleanup_stolen(dev);
cleanup_vga_switcheroo: cleanup_vga_switcheroo:
@ -1028,7 +1031,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* Try to make sure MCHBAR is enabled before poking at it */ /* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev); intel_setup_mchbar(dev);
intel_setup_gmbus(dev);
intel_opregion_setup(dev); intel_opregion_setup(dev);
i915_gem_load(dev); i915_gem_load(dev);
@ -1099,7 +1101,6 @@ out_gem_unload:
if (dev->pdev->msi_enabled) if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev); pci_disable_msi(dev->pdev);
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev); intel_teardown_mchbar(dev);
pm_qos_remove_request(&dev_priv->pm_qos); pm_qos_remove_request(&dev_priv->pm_qos);
destroy_workqueue(dev_priv->gpu_error.hangcheck_wq); destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
@ -1198,7 +1199,6 @@ int i915_driver_unload(struct drm_device *dev)
intel_csr_ucode_fini(dev); intel_csr_ucode_fini(dev);
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev); intel_teardown_mchbar(dev);
destroy_workqueue(dev_priv->hotplug.dp_wq); destroy_workqueue(dev_priv->hotplug.dp_wq);

View file

@ -531,7 +531,10 @@ void intel_detect_pch(struct drm_device *dev)
dev_priv->pch_type = PCH_SPT; dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev)); WARN_ON(!IS_SKYLAKE(dev));
} else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) { } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
pch->subsystem_vendor == 0x1af4 &&
pch->subsystem_device == 0x1100)) {
dev_priv->pch_type = intel_virt_detect_pch(dev); dev_priv->pch_type = intel_virt_detect_pch(dev);
} else } else
continue; continue;

View file

@ -2614,6 +2614,7 @@ struct drm_i915_cmd_table {
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)

View file

@ -340,6 +340,10 @@ void i915_gem_context_reset(struct drm_device *dev)
i915_gem_context_unreference(lctx); i915_gem_context_unreference(lctx);
ring->last_context = NULL; ring->last_context = NULL;
} }
/* Force the GPU state to be reinitialised on enabling */
if (ring->default_context)
ring->default_context->legacy_hw_ctx.initialized = false;
} }
} }
@ -708,7 +712,7 @@ static int do_switch(struct drm_i915_gem_request *req)
if (ret) if (ret)
goto unpin_out; goto unpin_out;
if (!to->legacy_hw_ctx.initialized) { if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
hw_flags |= MI_RESTORE_INHIBIT; hw_flags |= MI_RESTORE_INHIBIT;
/* NB: If we inhibit the restore, the context is not allowed to /* NB: If we inhibit the restore, the context is not allowed to
* die because future work may end up depending on valid address * die because future work may end up depending on valid address

Some files were not shown because too many files have changed in this diff Show more