Merge tag 'v4.4.6' into linux-linaro-lsk-v4.4

This is the 4.4.6 stable release
This commit is contained in:
Alex Shi 2016-03-17 12:51:14 +08:00
commit 6d0b88c88b
65 changed files with 466 additions and 164 deletions

View file

@ -23,6 +23,7 @@ Optional properties:
during suspend. during suspend.
- ti,no-reset-on-init: When present, the module should not be reset at init - ti,no-reset-on-init: When present, the module should not be reset at init
- ti,no-idle-on-init: When present, the module should not be idled at init - ti,no-idle-on-init: When present, the module should not be idled at init
- ti,no-idle: When present, the module is never allowed to idle.
Example: Example:

View file

@ -358,7 +358,8 @@ In the first case there are two additional complications:
- if CR4.SMEP is enabled: since we've turned the page into a kernel page, - if CR4.SMEP is enabled: since we've turned the page into a kernel page,
the kernel may now execute it. We handle this by also setting spte.nx. the kernel may now execute it. We handle this by also setting spte.nx.
If we get a user fetch or read fault, we'll change spte.u=1 and If we get a user fetch or read fault, we'll change spte.u=1 and
spte.nx=gpte.nx back. spte.nx=gpte.nx back. For this to work, KVM forces EFER.NX to 1 when
shadow paging is in use.
- if CR4.SMAP is disabled: since the page has been changed to a kernel - if CR4.SMAP is disabled: since the page has been changed to a kernel
page, it can not be reused when CR4.SMAP is enabled. We set page, it can not be reused when CR4.SMAP is enabled. We set
CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note, CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note,

View file

@ -1,6 +1,6 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 5 SUBLEVEL = 6
EXTRAVERSION = EXTRAVERSION =
NAME = Blurry Fish Butt NAME = Blurry Fish Butt

View file

@ -70,8 +70,8 @@
soc { soc {
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000 ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000 MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000 MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>; MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
pcie-controller { pcie-controller {
status = "okay"; status = "okay";

View file

@ -76,8 +76,8 @@
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000 ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000 MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
MBUS_ID(0x01, 0x2f) 0 0 0xf0000000 0x1000000 MBUS_ID(0x01, 0x2f) 0 0 0xf0000000 0x1000000
MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000 MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>; MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
devbus-bootcs { devbus-bootcs {
status = "okay"; status = "okay";

View file

@ -95,8 +95,8 @@
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000 ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000 MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
MBUS_ID(0x01, 0x2f) 0 0 0xf0000000 0x1000000 MBUS_ID(0x01, 0x2f) 0 0 0xf0000000 0x1000000
MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000 MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>; MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
devbus-bootcs { devbus-bootcs {
status = "okay"; status = "okay";

View file

@ -65,8 +65,8 @@
soc { soc {
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xd0000000 0x100000 ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xd0000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000 MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000 MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>; MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
pcie-controller { pcie-controller {
status = "okay"; status = "okay";

View file

@ -70,8 +70,8 @@
soc { soc {
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000 ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000 MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000 MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>; MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
pcie-controller { pcie-controller {
status = "okay"; status = "okay";

View file

@ -68,8 +68,8 @@
soc { soc {
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000 ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000 MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000 MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>; MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
internal-regs { internal-regs {
serial@12000 { serial@12000 {

View file

@ -64,8 +64,8 @@
soc { soc {
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xd0000000 0x100000 ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xd0000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000 MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000 MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>; MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
pcie-controller { pcie-controller {
status = "okay"; status = "okay";

View file

@ -65,9 +65,9 @@
soc { soc {
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xd0000000 0x100000 ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xd0000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000 MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
MBUS_ID(0x01, 0x2f) 0 0 0xf0000000 0x8000000 MBUS_ID(0x01, 0x2f) 0 0 0xe8000000 0x8000000
MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000 MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>; MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
devbus-bootcs { devbus-bootcs {
status = "okay"; status = "okay";

View file

@ -78,8 +78,8 @@
soc { soc {
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000 ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000 MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000 MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>; MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
pcie-controller { pcie-controller {
status = "okay"; status = "okay";

View file

@ -1497,6 +1497,16 @@
0x48485200 0x2E00>; 0x48485200 0x2E00>;
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
/*
* Do not allow gating of cpsw clock as workaround
* for errata i877. Keeping internal clock disabled
* causes the device switching characteristics
* to degrade over time and eventually fail to meet
* the data manual delay time/skew specs.
*/
ti,no-idle;
/* /*
* rx_thresh_pend * rx_thresh_pend
* rx_pend * rx_pend

View file

@ -2200,6 +2200,11 @@ static int _enable(struct omap_hwmod *oh)
*/ */
static int _idle(struct omap_hwmod *oh) static int _idle(struct omap_hwmod *oh)
{ {
if (oh->flags & HWMOD_NO_IDLE) {
oh->_int_flags |= _HWMOD_SKIP_ENABLE;
return 0;
}
pr_debug("omap_hwmod: %s: idling\n", oh->name); pr_debug("omap_hwmod: %s: idling\n", oh->name);
if (oh->_state != _HWMOD_STATE_ENABLED) { if (oh->_state != _HWMOD_STATE_ENABLED) {
@ -2504,6 +2509,8 @@ static int __init _init(struct omap_hwmod *oh, void *data)
oh->flags |= HWMOD_INIT_NO_RESET; oh->flags |= HWMOD_INIT_NO_RESET;
if (of_find_property(np, "ti,no-idle-on-init", NULL)) if (of_find_property(np, "ti,no-idle-on-init", NULL))
oh->flags |= HWMOD_INIT_NO_IDLE; oh->flags |= HWMOD_INIT_NO_IDLE;
if (of_find_property(np, "ti,no-idle", NULL))
oh->flags |= HWMOD_NO_IDLE;
} }
oh->_state = _HWMOD_STATE_INITIALIZED; oh->_state = _HWMOD_STATE_INITIALIZED;
@ -2630,7 +2637,7 @@ static void __init _setup_postsetup(struct omap_hwmod *oh)
* XXX HWMOD_INIT_NO_IDLE does not belong in hwmod data - * XXX HWMOD_INIT_NO_IDLE does not belong in hwmod data -
* it should be set by the core code as a runtime flag during startup * it should be set by the core code as a runtime flag during startup
*/ */
if ((oh->flags & HWMOD_INIT_NO_IDLE) && if ((oh->flags & (HWMOD_INIT_NO_IDLE | HWMOD_NO_IDLE)) &&
(postsetup_state == _HWMOD_STATE_IDLE)) { (postsetup_state == _HWMOD_STATE_IDLE)) {
oh->_int_flags |= _HWMOD_SKIP_ENABLE; oh->_int_flags |= _HWMOD_SKIP_ENABLE;
postsetup_state = _HWMOD_STATE_ENABLED; postsetup_state = _HWMOD_STATE_ENABLED;

View file

@ -525,6 +525,8 @@ struct omap_hwmod_omap4_prcm {
* or idled. * or idled.
* HWMOD_OPT_CLKS_NEEDED: The optional clocks are needed for the module to * HWMOD_OPT_CLKS_NEEDED: The optional clocks are needed for the module to
* operate and they need to be handled at the same time as the main_clk. * operate and they need to be handled at the same time as the main_clk.
* HWMOD_NO_IDLE: Do not idle the hwmod at all. Useful to handle certain
* IPs like CPSW on DRA7, where clocks to this module cannot be disabled.
*/ */
#define HWMOD_SWSUP_SIDLE (1 << 0) #define HWMOD_SWSUP_SIDLE (1 << 0)
#define HWMOD_SWSUP_MSTANDBY (1 << 1) #define HWMOD_SWSUP_MSTANDBY (1 << 1)
@ -541,6 +543,7 @@ struct omap_hwmod_omap4_prcm {
#define HWMOD_SWSUP_SIDLE_ACT (1 << 12) #define HWMOD_SWSUP_SIDLE_ACT (1 << 12)
#define HWMOD_RECONFIG_IO_CHAIN (1 << 13) #define HWMOD_RECONFIG_IO_CHAIN (1 << 13)
#define HWMOD_OPT_CLKS_NEEDED (1 << 14) #define HWMOD_OPT_CLKS_NEEDED (1 << 14)
#define HWMOD_NO_IDLE (1 << 15)
/* /*
* omap_hwmod._int_flags definitions * omap_hwmod._int_flags definitions

View file

@ -40,7 +40,7 @@
* VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space, * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
* fixed mappings and modules * fixed mappings and modules
*/ */
#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT - 1)) * sizeof(struct page), PUD_SIZE) #define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
#ifndef CONFIG_KASAN #ifndef CONFIG_KASAN
#define VMALLOC_START (VA_START) #define VMALLOC_START (VA_START)
@ -52,7 +52,8 @@
#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
#define VMEMMAP_START (VMALLOC_END + SZ_64K) #define VMEMMAP_START (VMALLOC_END + SZ_64K)
#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) #define vmemmap ((struct page *)VMEMMAP_START - \
SECTION_ALIGN_DOWN(memstart_addr >> PAGE_SHIFT))
#define FIRST_USER_ADDRESS 0UL #define FIRST_USER_ADDRESS 0UL

View file

@ -2155,7 +2155,7 @@ config MIPS_MT_SMP
select CPU_MIPSR2_IRQ_VI select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_IRQ_EI select CPU_MIPSR2_IRQ_EI
select SYNC_R4K select SYNC_R4K
select MIPS_GIC_IPI select MIPS_GIC_IPI if MIPS_GIC
select MIPS_MT select MIPS_MT
select SMP select SMP
select SMP_UP select SMP_UP
@ -2253,7 +2253,7 @@ config MIPS_VPE_APSP_API_MT
config MIPS_CMP config MIPS_CMP
bool "MIPS CMP framework support (DEPRECATED)" bool "MIPS CMP framework support (DEPRECATED)"
depends on SYS_SUPPORTS_MIPS_CMP && !CPU_MIPSR6 depends on SYS_SUPPORTS_MIPS_CMP && !CPU_MIPSR6
select MIPS_GIC_IPI select MIPS_GIC_IPI if MIPS_GIC
select SMP select SMP
select SYNC_R4K select SYNC_R4K
select SYS_SUPPORTS_SMP select SYS_SUPPORTS_SMP
@ -2273,7 +2273,7 @@ config MIPS_CPS
select MIPS_CM select MIPS_CM
select MIPS_CPC select MIPS_CPC
select MIPS_CPS_PM if HOTPLUG_CPU select MIPS_CPS_PM if HOTPLUG_CPU
select MIPS_GIC_IPI select MIPS_GIC_IPI if MIPS_GIC
select SMP select SMP
select SYNC_R4K if (CEVT_R4K || CSRC_R4K) select SYNC_R4K if (CEVT_R4K || CSRC_R4K)
select SYS_SUPPORTS_HOTPLUG_CPU select SYS_SUPPORTS_HOTPLUG_CPU
@ -2292,6 +2292,7 @@ config MIPS_CPS_PM
bool bool
config MIPS_GIC_IPI config MIPS_GIC_IPI
depends on MIPS_GIC
bool bool
config MIPS_CM config MIPS_CM

View file

@ -121,6 +121,7 @@ static inline void calculate_cpu_foreign_map(void)
cpumask_t temp_foreign_map; cpumask_t temp_foreign_map;
/* Re-calculate the mask */ /* Re-calculate the mask */
cpumask_clear(&temp_foreign_map);
for_each_online_cpu(i) { for_each_online_cpu(i) {
core_present = 0; core_present = 0;
for_each_cpu(k, &temp_foreign_map) for_each_cpu(k, &temp_foreign_map)

View file

@ -157,7 +157,8 @@
#define OPAL_LEDS_GET_INDICATOR 114 #define OPAL_LEDS_GET_INDICATOR 114
#define OPAL_LEDS_SET_INDICATOR 115 #define OPAL_LEDS_SET_INDICATOR 115
#define OPAL_CEC_REBOOT2 116 #define OPAL_CEC_REBOOT2 116
#define OPAL_LAST 116 #define OPAL_CONSOLE_FLUSH 117
#define OPAL_LAST 117
/* Device tree flags */ /* Device tree flags */

View file

@ -35,6 +35,7 @@ int64_t opal_console_read(int64_t term_number, __be64 *length,
uint8_t *buffer); uint8_t *buffer);
int64_t opal_console_write_buffer_space(int64_t term_number, int64_t opal_console_write_buffer_space(int64_t term_number,
__be64 *length); __be64 *length);
int64_t opal_console_flush(int64_t term_number);
int64_t opal_rtc_read(__be32 *year_month_day, int64_t opal_rtc_read(__be32 *year_month_day,
__be64 *hour_minute_second_millisecond); __be64 *hour_minute_second_millisecond);
int64_t opal_rtc_write(uint32_t year_month_day, int64_t opal_rtc_write(uint32_t year_month_day,
@ -262,6 +263,8 @@ extern int opal_resync_timebase(void);
extern void opal_lpc_init(void); extern void opal_lpc_init(void);
extern void opal_kmsg_init(void);
extern int opal_event_request(unsigned int opal_event_nr); extern int opal_event_request(unsigned int opal_event_nr);
struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr, struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,

View file

@ -335,7 +335,7 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
if (syms[i].st_shndx == SHN_UNDEF) { if (syms[i].st_shndx == SHN_UNDEF) {
char *name = strtab + syms[i].st_name; char *name = strtab + syms[i].st_name;
if (name[0] == '.') if (name[0] == '.')
memmove(name, name+1, strlen(name)); syms[i].st_name++;
} }
} }
} }

View file

@ -1370,6 +1370,20 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
std r6, VCPU_ACOP(r9) std r6, VCPU_ACOP(r9)
stw r7, VCPU_GUEST_PID(r9) stw r7, VCPU_GUEST_PID(r9)
std r8, VCPU_WORT(r9) std r8, VCPU_WORT(r9)
/*
* Restore various registers to 0, where non-zero values
* set by the guest could disrupt the host.
*/
li r0, 0
mtspr SPRN_IAMR, r0
mtspr SPRN_CIABR, r0
mtspr SPRN_DAWRX, r0
mtspr SPRN_TCSCR, r0
mtspr SPRN_WORT, r0
/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
li r0, 1
sldi r0, r0, 31
mtspr SPRN_MMCRS, r0
8: 8:
/* Save and reset AMR and UAMOR before turning on the MMU */ /* Save and reset AMR and UAMOR before turning on the MMU */

View file

@ -2,6 +2,7 @@ obj-y += setup.o opal-wrappers.o opal.o opal-async.o idle.o
obj-y += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o obj-y += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o
obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o
obj-y += opal-msglog.o opal-hmi.o opal-power.o opal-irqchip.o obj-y += opal-msglog.o opal-hmi.o opal-power.o opal-irqchip.o
obj-y += opal-kmsg.o
obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o
obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o

View file

@ -0,0 +1,75 @@
/*
* kmsg dumper that ensures the OPAL console fully flushes panic messages
*
* Author: Russell Currey <ruscur@russell.cc>
*
* Copyright 2015 IBM Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kmsg_dump.h>
#include <asm/opal.h>
#include <asm/opal-api.h>
/*
* Console output is controlled by OPAL firmware. The kernel regularly calls
* OPAL_POLL_EVENTS, which flushes some console output. In a panic state,
* however, the kernel no longer calls OPAL_POLL_EVENTS and the panic message
* may not be completely printed. This function does not actually dump the
* message, it just ensures that OPAL completely flushes the console buffer.
*/
static void force_opal_console_flush(struct kmsg_dumper *dumper,
enum kmsg_dump_reason reason)
{
int i;
int64_t ret;
/*
* Outside of a panic context the pollers will continue to run,
* so we don't need to do any special flushing.
*/
if (reason != KMSG_DUMP_PANIC)
return;
if (opal_check_token(OPAL_CONSOLE_FLUSH)) {
ret = opal_console_flush(0);
if (ret == OPAL_UNSUPPORTED || ret == OPAL_PARAMETER)
return;
/* Incrementally flush until there's nothing left */
while (opal_console_flush(0) != OPAL_SUCCESS);
} else {
/*
* If OPAL_CONSOLE_FLUSH is not implemented in the firmware,
* the console can still be flushed by calling the polling
* function enough times to flush the buffer. We don't know
* how much output still needs to be flushed, but we can be
* generous since the kernel is in panic and doesn't need
* to do much else.
*/
printk(KERN_NOTICE "opal: OPAL_CONSOLE_FLUSH missing.\n");
for (i = 0; i < 1024; i++) {
opal_poll_events(NULL);
}
}
}
static struct kmsg_dumper opal_kmsg_dumper = {
.dump = force_opal_console_flush
};
void __init opal_kmsg_init(void)
{
int rc;
/* Add our dumper to the list */
rc = kmsg_dump_register(&opal_kmsg_dumper);
if (rc != 0)
pr_err("opal: kmsg_dump_register failed; returned %d\n", rc);
}

View file

@ -301,3 +301,4 @@ OPAL_CALL(opal_flash_erase, OPAL_FLASH_ERASE);
OPAL_CALL(opal_prd_msg, OPAL_PRD_MSG); OPAL_CALL(opal_prd_msg, OPAL_PRD_MSG);
OPAL_CALL(opal_leds_get_ind, OPAL_LEDS_GET_INDICATOR); OPAL_CALL(opal_leds_get_ind, OPAL_LEDS_GET_INDICATOR);
OPAL_CALL(opal_leds_set_ind, OPAL_LEDS_SET_INDICATOR); OPAL_CALL(opal_leds_set_ind, OPAL_LEDS_SET_INDICATOR);
OPAL_CALL(opal_console_flush, OPAL_CONSOLE_FLUSH);

View file

@ -758,6 +758,9 @@ static int __init opal_init(void)
opal_pdev_init(opal_node, "ibm,opal-flash"); opal_pdev_init(opal_node, "ibm,opal-flash");
opal_pdev_init(opal_node, "ibm,opal-prd"); opal_pdev_init(opal_node, "ibm,opal-prd");
/* Initialise OPAL kmsg dumper for flushing console on panic */
opal_kmsg_init();
return 0; return 0;
} }
machine_subsys_initcall(powernv, opal_init); machine_subsys_initcall(powernv, opal_init);

View file

@ -15,17 +15,25 @@
static inline int init_new_context(struct task_struct *tsk, static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm) struct mm_struct *mm)
{ {
spin_lock_init(&mm->context.list_lock);
INIT_LIST_HEAD(&mm->context.pgtable_list);
INIT_LIST_HEAD(&mm->context.gmap_list);
cpumask_clear(&mm->context.cpu_attach_mask); cpumask_clear(&mm->context.cpu_attach_mask);
atomic_set(&mm->context.attach_count, 0); atomic_set(&mm->context.attach_count, 0);
mm->context.flush_mm = 0; mm->context.flush_mm = 0;
mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
mm->context.asce_bits |= _ASCE_TYPE_REGION3;
#ifdef CONFIG_PGSTE #ifdef CONFIG_PGSTE
mm->context.alloc_pgste = page_table_allocate_pgste; mm->context.alloc_pgste = page_table_allocate_pgste;
mm->context.has_pgste = 0; mm->context.has_pgste = 0;
mm->context.use_skey = 0; mm->context.use_skey = 0;
#endif #endif
if (mm->context.asce_limit == 0) {
/* context created by exec, set asce limit to 4TB */
mm->context.asce_bits = _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_REGION3;
mm->context.asce_limit = STACK_TOP_MAX; mm->context.asce_limit = STACK_TOP_MAX;
} else if (mm->context.asce_limit == (1UL << 31)) {
mm_inc_nr_pmds(mm);
}
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
return 0; return 0;
} }
@ -111,8 +119,6 @@ static inline void activate_mm(struct mm_struct *prev,
static inline void arch_dup_mmap(struct mm_struct *oldmm, static inline void arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm) struct mm_struct *mm)
{ {
if (oldmm->context.asce_limit < mm->context.asce_limit)
crst_table_downgrade(mm, oldmm->context.asce_limit);
} }
static inline void arch_exit_mmap(struct mm_struct *mm) static inline void arch_exit_mmap(struct mm_struct *mm)

View file

@ -100,12 +100,26 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
static inline pgd_t *pgd_alloc(struct mm_struct *mm) static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{ {
spin_lock_init(&mm->context.list_lock); unsigned long *table = crst_table_alloc(mm);
INIT_LIST_HEAD(&mm->context.pgtable_list);
INIT_LIST_HEAD(&mm->context.gmap_list); if (!table)
return (pgd_t *) crst_table_alloc(mm); return NULL;
if (mm->context.asce_limit == (1UL << 31)) {
/* Forking a compat process with 2 page table levels */
if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
crst_table_free(mm, table);
return NULL;
}
}
return (pgd_t *) table;
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
if (mm->context.asce_limit == (1UL << 31))
pgtable_pmd_page_dtor(virt_to_page(pgd));
crst_table_free(mm, (unsigned long *) pgd);
} }
#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
static inline void pmd_populate(struct mm_struct *mm, static inline void pmd_populate(struct mm_struct *mm,
pmd_t *pmd, pgtable_t pte) pmd_t *pmd, pgtable_t pte)

View file

@ -2249,7 +2249,7 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
/* manually convert vector registers if necessary */ /* manually convert vector registers if necessary */
if (MACHINE_HAS_VX) { if (MACHINE_HAS_VX) {
convert_vx_to_fp(fprs, current->thread.fpu.vxrs); convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA, rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
fprs, 128); fprs, 128);
} else { } else {

View file

@ -3754,13 +3754,15 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
void void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
{ {
bool uses_nx = context->nx || context->base_role.smep_andnot_wp;
/* /*
* Passing "true" to the last argument is okay; it adds a check * Passing "true" to the last argument is okay; it adds a check
* on bit 8 of the SPTEs which KVM doesn't use anyway. * on bit 8 of the SPTEs which KVM doesn't use anyway.
*/ */
__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
boot_cpu_data.x86_phys_bits, boot_cpu_data.x86_phys_bits,
context->shadow_root_level, context->nx, context->shadow_root_level, uses_nx,
guest_cpuid_has_gbpages(vcpu), is_pse(vcpu), guest_cpuid_has_gbpages(vcpu), is_pse(vcpu),
true); true);
} }

View file

@ -1748,6 +1748,13 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
return; return;
} }
break; break;
case MSR_IA32_PEBS_ENABLE:
/* PEBS needs a quiescent period after being disabled (to write
* a record). Disabling PEBS through VMX MSR swapping doesn't
* provide that period, so a CPU could write host's record into
* guest's memory.
*/
wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
} }
for (i = 0; i < m->nr; ++i) for (i = 0; i < m->nr; ++i)
@ -1785,26 +1792,31 @@ static void reload_tss(void)
static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
{ {
u64 guest_efer; u64 guest_efer = vmx->vcpu.arch.efer;
u64 ignore_bits; u64 ignore_bits = 0;
guest_efer = vmx->vcpu.arch.efer; if (!enable_ept) {
/*
* NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing
* host CPUID is more efficient than testing guest CPUID
* or CR4. Host SMEP is anyway a requirement for guest SMEP.
*/
if (boot_cpu_has(X86_FEATURE_SMEP))
guest_efer |= EFER_NX;
else if (!(guest_efer & EFER_NX))
ignore_bits |= EFER_NX;
}
/* /*
* NX is emulated; LMA and LME handled by hardware; SCE meaningless * LMA and LME handled by hardware; SCE meaningless outside long mode.
* outside long mode
*/ */
ignore_bits = EFER_NX | EFER_SCE; ignore_bits |= EFER_SCE;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
ignore_bits |= EFER_LMA | EFER_LME; ignore_bits |= EFER_LMA | EFER_LME;
/* SCE is meaningful only in long mode on Intel */ /* SCE is meaningful only in long mode on Intel */
if (guest_efer & EFER_LMA) if (guest_efer & EFER_LMA)
ignore_bits &= ~(u64)EFER_SCE; ignore_bits &= ~(u64)EFER_SCE;
#endif #endif
guest_efer &= ~ignore_bits;
guest_efer |= host_efer & ignore_bits;
vmx->guest_msrs[efer_offset].data = guest_efer;
vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
clear_atomic_switch_msr(vmx, MSR_EFER); clear_atomic_switch_msr(vmx, MSR_EFER);
@ -1815,16 +1827,21 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
*/ */
if (cpu_has_load_ia32_efer || if (cpu_has_load_ia32_efer ||
(enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
guest_efer = vmx->vcpu.arch.efer;
if (!(guest_efer & EFER_LMA)) if (!(guest_efer & EFER_LMA))
guest_efer &= ~EFER_LME; guest_efer &= ~EFER_LME;
if (guest_efer != host_efer) if (guest_efer != host_efer)
add_atomic_switch_msr(vmx, MSR_EFER, add_atomic_switch_msr(vmx, MSR_EFER,
guest_efer, host_efer); guest_efer, host_efer);
return false; return false;
} } else {
guest_efer &= ~ignore_bits;
guest_efer |= host_efer & ignore_bits;
vmx->guest_msrs[efer_offset].data = guest_efer;
vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
return true; return true;
}
} }
static unsigned long segment_base(u16 selector) static unsigned long segment_base(u16 selector)

View file

@ -414,24 +414,30 @@ pmd_t *lookup_pmd_address(unsigned long address)
phys_addr_t slow_virt_to_phys(void *__virt_addr) phys_addr_t slow_virt_to_phys(void *__virt_addr)
{ {
unsigned long virt_addr = (unsigned long)__virt_addr; unsigned long virt_addr = (unsigned long)__virt_addr;
unsigned long phys_addr, offset; phys_addr_t phys_addr;
unsigned long offset;
enum pg_level level; enum pg_level level;
pte_t *pte; pte_t *pte;
pte = lookup_address(virt_addr, &level); pte = lookup_address(virt_addr, &level);
BUG_ON(!pte); BUG_ON(!pte);
/*
* pXX_pfn() returns unsigned long, which must be cast to phys_addr_t
* before being left-shifted PAGE_SHIFT bits -- this trick is to
* make 32-PAE kernel work correctly.
*/
switch (level) { switch (level) {
case PG_LEVEL_1G: case PG_LEVEL_1G:
phys_addr = pud_pfn(*(pud_t *)pte) << PAGE_SHIFT; phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
offset = virt_addr & ~PUD_PAGE_MASK; offset = virt_addr & ~PUD_PAGE_MASK;
break; break;
case PG_LEVEL_2M: case PG_LEVEL_2M:
phys_addr = pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT; phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
offset = virt_addr & ~PMD_PAGE_MASK; offset = virt_addr & ~PMD_PAGE_MASK;
break; break;
default: default:
phys_addr = pte_pfn(*pte) << PAGE_SHIFT; phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
offset = virt_addr & ~PAGE_MASK; offset = virt_addr & ~PAGE_MASK;
} }

View file

@ -176,6 +176,7 @@
#define AT_XDMAC_MAX_CHAN 0x20 #define AT_XDMAC_MAX_CHAN 0x20
#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */ #define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */ #define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
#define AT_XDMAC_RESIDUE_MAX_RETRIES 5
#define AT_XDMAC_DMA_BUSWIDTHS\ #define AT_XDMAC_DMA_BUSWIDTHS\
(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
@ -1383,8 +1384,8 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct at_xdmac_desc *desc, *_desc; struct at_xdmac_desc *desc, *_desc;
struct list_head *descs_list; struct list_head *descs_list;
enum dma_status ret; enum dma_status ret;
int residue; int residue, retry;
u32 cur_nda, mask, value; u32 cur_nda, check_nda, cur_ubc, mask, value;
u8 dwidth = 0; u8 dwidth = 0;
unsigned long flags; unsigned long flags;
@ -1421,7 +1422,42 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
cpu_relax(); cpu_relax();
} }
/*
* When processing the residue, we need to read two registers but we
* can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
* we stand in the descriptor list and AT_XDMAC_CUBC is used
* to know how many data are remaining for the current descriptor.
* Since the dma channel is not paused to not loose data, between the
* AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
* descriptor.
* For that reason, after reading AT_XDMAC_CUBC, we check if we are
* still using the same descriptor by reading a second time
* AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
* read again AT_XDMAC_CUBC.
* Memory barriers are used to ensure the read order of the registers.
* A max number of retries is set because unlikely it can never ends if
* we are transferring a lot of data with small buffers.
*/
cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
rmb();
cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
rmb();
check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
if (likely(cur_nda == check_nda))
break;
cur_nda = check_nda;
rmb();
cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
}
if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
ret = DMA_ERROR;
goto spin_unlock;
}
/* /*
* Remove size of all microblocks already transferred and the current * Remove size of all microblocks already transferred and the current
* one. Then add the remaining size to transfer of the current * one. Then add the remaining size to transfer of the current
@ -1434,7 +1470,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda) if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
break; break;
} }
residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth; residue += cur_ubc << dwidth;
dma_set_residue(txstate, residue); dma_set_residue(txstate, residue);

View file

@ -96,7 +96,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
* In practice this won't execute very often unless on very fast * In practice this won't execute very often unless on very fast
* machines because the time window for this to happen is very small. * machines because the time window for this to happen is very small.
*/ */
while (amdgpuCrtc->enabled && repcnt--) { while (amdgpuCrtc->enabled && --repcnt) {
/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
* start in hpos, and to the "fudged earlier" vblank start in * start in hpos, and to the "fudged earlier" vblank start in
* vpos. * vpos.
@ -112,13 +112,13 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
break; break;
/* Sleep at least until estimated real start of hw vblank */ /* Sleep at least until estimated real start of hw vblank */
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
if (min_udelay > vblank->framedur_ns / 2000) { if (min_udelay > vblank->framedur_ns / 2000) {
/* Don't wait ridiculously long - something is wrong */ /* Don't wait ridiculously long - something is wrong */
repcnt = 0; repcnt = 0;
break; break;
} }
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
usleep_range(min_udelay, 2 * min_udelay); usleep_range(min_udelay, 2 * min_udelay);
spin_lock_irqsave(&crtc->dev->event_lock, flags); spin_lock_irqsave(&crtc->dev->event_lock, flags);
}; };

View file

@ -1744,7 +1744,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
} }
drm_kms_helper_poll_enable(dev); drm_kms_helper_poll_enable(dev);
drm_helper_hpd_irq_event(dev);
/* set the power state here in case we are a PX system or headless */ /* set the power state here in case we are a PX system or headless */
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)

View file

@ -455,7 +455,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
* In practice this won't execute very often unless on very fast * In practice this won't execute very often unless on very fast
* machines because the time window for this to happen is very small. * machines because the time window for this to happen is very small.
*/ */
while (radeon_crtc->enabled && repcnt--) { while (radeon_crtc->enabled && --repcnt) {
/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
* start in hpos, and to the "fudged earlier" vblank start in * start in hpos, and to the "fudged earlier" vblank start in
* vpos. * vpos.
@ -471,13 +471,13 @@ static void radeon_flip_work_func(struct work_struct *__work)
break; break;
/* Sleep at least until estimated real start of hw vblank */ /* Sleep at least until estimated real start of hw vblank */
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
if (min_udelay > vblank->framedur_ns / 2000) { if (min_udelay > vblank->framedur_ns / 2000) {
/* Don't wait ridiculously long - something is wrong */ /* Don't wait ridiculously long - something is wrong */
repcnt = 0; repcnt = 0;
break; break;
} }
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
usleep_range(min_udelay, 2 * min_udelay); usleep_range(min_udelay, 2 * min_udelay);
spin_lock_irqsave(&crtc->dev->event_lock, flags); spin_lock_irqsave(&crtc->dev->event_lock, flags);
}; };

View file

@ -1075,6 +1075,8 @@ force:
/* update display watermarks based on new power state */ /* update display watermarks based on new power state */
radeon_bandwidth_update(rdev); radeon_bandwidth_update(rdev);
/* update displays */
radeon_dpm_display_configuration_changed(rdev);
/* wait for the rings to drain */ /* wait for the rings to drain */
for (i = 0; i < RADEON_NUM_RINGS; i++) { for (i = 0; i < RADEON_NUM_RINGS; i++) {
@ -1091,9 +1093,6 @@ force:
radeon_dpm_post_set_power_state(rdev); radeon_dpm_post_set_power_state(rdev);
/* update displays */
radeon_dpm_display_configuration_changed(rdev);
rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
rdev->pm.dpm.single_display = single_display; rdev->pm.dpm.single_display = single_display;

View file

@ -1050,6 +1050,17 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
for (i = 0; i < ARRAY_SIZE(client_reg); i++) { for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
const struct ipu_platform_reg *reg = &client_reg[i]; const struct ipu_platform_reg *reg = &client_reg[i];
struct platform_device *pdev; struct platform_device *pdev;
struct device_node *of_node;
/* Associate subdevice with the corresponding port node */
of_node = of_graph_get_port_by_id(dev->of_node, i);
if (!of_node) {
dev_info(dev,
"no port@%d node in %s, not using %s%d\n",
i, dev->of_node->full_name,
(i / 2) ? "DI" : "CSI", i % 2);
continue;
}
pdev = platform_device_alloc(reg->name, id++); pdev = platform_device_alloc(reg->name, id++);
if (!pdev) { if (!pdev) {
@ -1057,17 +1068,9 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
goto err_register; goto err_register;
} }
pdev->dev.of_node = of_node;
pdev->dev.parent = dev; pdev->dev.parent = dev;
/* Associate subdevice with the corresponding port node */
pdev->dev.of_node = of_graph_get_port_by_id(dev->of_node, i);
if (!pdev->dev.of_node) {
dev_err(dev, "missing port@%d node in %s\n", i,
dev->of_node->full_name);
ret = -ENODEV;
goto err_register;
}
ret = platform_device_add_data(pdev, &reg->pdata, ret = platform_device_add_data(pdev, &reg->pdata,
sizeof(reg->pdata)); sizeof(reg->pdata));
if (!ret) if (!ret)

View file

@ -826,9 +826,8 @@ static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface
static void gs_destroy_candev(struct gs_can *dev) static void gs_destroy_candev(struct gs_can *dev)
{ {
unregister_candev(dev->netdev); unregister_candev(dev->netdev);
free_candev(dev->netdev);
usb_kill_anchored_urbs(&dev->tx_submitted); usb_kill_anchored_urbs(&dev->tx_submitted);
kfree(dev); free_candev(dev->netdev);
} }
static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
@ -913,12 +912,15 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
for (i = 0; i < icount; i++) { for (i = 0; i < icount; i++) {
dev->canch[i] = gs_make_candev(i, intf); dev->canch[i] = gs_make_candev(i, intf);
if (IS_ERR_OR_NULL(dev->canch[i])) { if (IS_ERR_OR_NULL(dev->canch[i])) {
/* save error code to return later */
rc = PTR_ERR(dev->canch[i]);
/* on failure destroy previously created candevs */ /* on failure destroy previously created candevs */
icount = i; icount = i;
for (i = 0; i < icount; i++) { for (i = 0; i < icount; i++)
gs_destroy_candev(dev->canch[i]); gs_destroy_candev(dev->canch[i]);
dev->canch[i] = NULL;
} usb_kill_anchored_urbs(&dev->rx_submitted);
kfree(dev); kfree(dev);
return rc; return rc;
} }
@ -939,16 +941,12 @@ static void gs_usb_disconnect(struct usb_interface *intf)
return; return;
} }
for (i = 0; i < GS_MAX_INTF; i++) { for (i = 0; i < GS_MAX_INTF; i++)
struct gs_can *can = dev->canch[i]; if (dev->canch[i])
gs_destroy_candev(dev->canch[i]);
if (!can)
continue;
gs_destroy_candev(can);
}
usb_kill_anchored_urbs(&dev->rx_submitted); usb_kill_anchored_urbs(&dev->rx_submitted);
kfree(dev);
} }
static const struct usb_device_id gs_usb_table[] = { static const struct usb_device_id gs_usb_table[] = {

View file

@ -421,6 +421,15 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
return -1; return -1;
} }
/*
* Increase the pending frames counter, so that later when a reply comes
* in and the counter is decreased - we don't start getting negative
* values.
* Note that we don't need to make sure it isn't agg'd, since we're
* TXing non-sta
*/
atomic_inc(&mvm->pending_frames[sta_id]);
return 0; return 0;
} }

View file

@ -4772,8 +4772,10 @@ int pci_get_new_domain_nr(void)
void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent) void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
{ {
static int use_dt_domains = -1; static int use_dt_domains = -1;
int domain = of_get_pci_domain_nr(parent->of_node); int domain = -1;
if (parent)
domain = of_get_pci_domain_nr(parent->of_node);
/* /*
* Check DT domain and use_dt_domains values. * Check DT domain and use_dt_domains values.
* *

View file

@ -67,7 +67,7 @@ static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
* and function code cmd. * and function code cmd.
* In case of an exception return 3. Otherwise return result of bitwise OR of * In case of an exception return 3. Otherwise return result of bitwise OR of
* resulting condition code and DIAG return code. */ * resulting condition code and DIAG return code. */
static inline int dia250(void *iob, int cmd) static inline int __dia250(void *iob, int cmd)
{ {
register unsigned long reg2 asm ("2") = (unsigned long) iob; register unsigned long reg2 asm ("2") = (unsigned long) iob;
typedef union { typedef union {
@ -77,7 +77,6 @@ static inline int dia250(void *iob, int cmd)
int rc; int rc;
rc = 3; rc = 3;
diag_stat_inc(DIAG_STAT_X250);
asm volatile( asm volatile(
" diag 2,%2,0x250\n" " diag 2,%2,0x250\n"
"0: ipm %0\n" "0: ipm %0\n"
@ -91,6 +90,12 @@ static inline int dia250(void *iob, int cmd)
return rc; return rc;
} }
static inline int dia250(void *iob, int cmd)
{
diag_stat_inc(DIAG_STAT_X250);
return __dia250(iob, cmd);
}
/* Initialize block I/O to DIAG device using the specified blocksize and /* Initialize block I/O to DIAG device using the specified blocksize and
* block offset. On success, return zero and set end_block to contain the * block offset. On success, return zero and set end_block to contain the
* number of blocks on the device minus the specified offset. Return non-zero * number of blocks on the device minus the specified offset. Return non-zero

View file

@ -177,7 +177,6 @@ void core_tmr_abort_task(
if (!__target_check_io_state(se_cmd, se_sess, 0)) { if (!__target_check_io_state(se_cmd, se_sess, 0)) {
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
target_put_sess_cmd(se_cmd);
goto out; goto out;
} }
list_del_init(&se_cmd->se_cmd_list); list_del_init(&se_cmd->se_cmd_list);

View file

@ -843,9 +843,14 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
pr_notice("%s(): Link succeeded, unlink failed (err %d). You now have a hard link\n", pr_notice("%s(): Link succeeded, unlink failed (err %d). You now have a hard link\n",
__func__, ret); __func__, ret);
/* Might as well let the VFS know */ /*
d_instantiate(new_dentry, d_inode(old_dentry)); * We can't keep the target in dcache after that.
ihold(d_inode(old_dentry)); * For one thing, we can't afford dentry aliases for directories.
* For another, if there was a victim, we _can't_ set new inode
* for that sucker and we have to trigger mount eviction - the
* caller won't do it on its own since we are returning an error.
*/
d_invalidate(new_dentry);
new_dir_i->i_mtime = new_dir_i->i_ctime = ITIME(now); new_dir_i->i_mtime = new_dir_i->i_ctime = ITIME(now);
return ret; return ret;
} }

View file

@ -633,7 +633,7 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx,
d_rehash(newdent); d_rehash(newdent);
} else { } else {
spin_lock(&dentry->d_lock); spin_lock(&dentry->d_lock);
NCP_FINFO(inode)->flags &= ~NCPI_DIR_CACHE; NCP_FINFO(dir)->flags &= ~NCPI_DIR_CACHE;
spin_unlock(&dentry->d_lock); spin_unlock(&dentry->d_lock);
} }
} else { } else {

View file

@ -618,6 +618,7 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir)
* sole user of this dentry. Too tricky... Just unhash for * sole user of this dentry. Too tricky... Just unhash for
* now. * now.
*/ */
if (!err)
d_drop(dentry); d_drop(dentry);
mutex_unlock(&dir->i_mutex); mutex_unlock(&dir->i_mutex);
@ -903,6 +904,13 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
if (!overwrite && new_is_dir && !old_opaque && new_opaque) if (!overwrite && new_is_dir && !old_opaque && new_opaque)
ovl_remove_opaque(newdentry); ovl_remove_opaque(newdentry);
/*
* Old dentry now lives in different location. Dentries in
* lowerstack are stale. We cannot drop them here because
* access to them is lockless. This could be only pure upper
* or opaque directory - numlower is zero. Or upper non-dir
* entry - its pureness is tracked by flag opaque.
*/
if (old_opaque != new_opaque) { if (old_opaque != new_opaque) {
ovl_dentry_set_opaque(old, new_opaque); ovl_dentry_set_opaque(old, new_opaque);
if (!overwrite) if (!overwrite)

View file

@ -65,6 +65,8 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
mutex_lock(&upperdentry->d_inode->i_mutex); mutex_lock(&upperdentry->d_inode->i_mutex);
err = notify_change(upperdentry, attr, NULL); err = notify_change(upperdentry, attr, NULL);
if (!err)
ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
mutex_unlock(&upperdentry->d_inode->i_mutex); mutex_unlock(&upperdentry->d_inode->i_mutex);
} }
ovl_drop_write(dentry); ovl_drop_write(dentry);

View file

@ -76,12 +76,14 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry)
if (oe->__upperdentry) { if (oe->__upperdentry) {
type = __OVL_PATH_UPPER; type = __OVL_PATH_UPPER;
if (oe->numlower) { /*
if (S_ISDIR(dentry->d_inode->i_mode)) * Non-dir dentry can hold lower dentry from previous
* location. Its purity depends only on opaque flag.
*/
if (oe->numlower && S_ISDIR(dentry->d_inode->i_mode))
type |= __OVL_PATH_MERGE; type |= __OVL_PATH_MERGE;
} else if (!oe->opaque) { else if (!oe->opaque)
type |= __OVL_PATH_PURE; type |= __OVL_PATH_PURE;
}
} else { } else {
if (oe->numlower > 1) if (oe->numlower > 1)
type |= __OVL_PATH_MERGE; type |= __OVL_PATH_MERGE;
@ -322,6 +324,7 @@ static const struct dentry_operations ovl_dentry_operations = {
static const struct dentry_operations ovl_reval_dentry_operations = { static const struct dentry_operations ovl_reval_dentry_operations = {
.d_release = ovl_dentry_release, .d_release = ovl_dentry_release,
.d_select_inode = ovl_d_select_inode,
.d_revalidate = ovl_dentry_revalidate, .d_revalidate = ovl_dentry_revalidate,
.d_weak_revalidate = ovl_dentry_weak_revalidate, .d_weak_revalidate = ovl_dentry_weak_revalidate,
}; };

View file

@ -286,6 +286,12 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
if (unlikely(ACCESS_ONCE(ctx->released))) if (unlikely(ACCESS_ONCE(ctx->released)))
goto out; goto out;
/*
* We don't do userfault handling for the final child pid update.
*/
if (current->flags & PF_EXITING)
goto out;
/* /*
* Check that we can return VM_FAULT_RETRY. * Check that we can return VM_FAULT_RETRY.
* *

View file

@ -320,11 +320,6 @@ static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
struct bvec_iter iter = bio->bi_iter; struct bvec_iter iter = bio->bi_iter;
int idx; int idx;
if (!bio_flagged(bio, BIO_CLONED)) {
*bv = bio->bi_io_vec[bio->bi_vcnt - 1];
return;
}
if (unlikely(!bio_multiple_segments(bio))) { if (unlikely(!bio_multiple_segments(bio))) {
*bv = bio_iovec(bio); *bv = bio_iovec(bio);
return; return;

View file

@ -148,9 +148,6 @@ extern void syscall_unregfunc(void);
void *it_func; \ void *it_func; \
void *__data; \ void *__data; \
\ \
if (!cpu_online(raw_smp_processor_id())) \
return; \
\
if (!(cond)) \ if (!(cond)) \
return; \ return; \
prercu; \ prercu; \
@ -357,15 +354,19 @@ extern void syscall_unregfunc(void);
* "void *__data, proto" as the callback prototype. * "void *__data, proto" as the callback prototype.
*/ */
#define DECLARE_TRACE_NOARGS(name) \ #define DECLARE_TRACE_NOARGS(name) \
__DECLARE_TRACE(name, void, , 1, void *__data, __data) __DECLARE_TRACE(name, void, , \
cpu_online(raw_smp_processor_id()), \
void *__data, __data)
#define DECLARE_TRACE(name, proto, args) \ #define DECLARE_TRACE(name, proto, args) \
__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), 1, \ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
cpu_online(raw_smp_processor_id()), \
PARAMS(void *__data, proto), \ PARAMS(void *__data, proto), \
PARAMS(__data, args)) PARAMS(__data, args))
#define DECLARE_TRACE_CONDITION(name, proto, args, cond) \ #define DECLARE_TRACE_CONDITION(name, proto, args, cond) \
__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \
PARAMS(void *__data, proto), \ PARAMS(void *__data, proto), \
PARAMS(__data, args)) PARAMS(__data, args))

View file

@ -439,6 +439,12 @@ int dev_get_wireless_info(char *buffer, char **start, off_t offset, int length);
/* Send a single event to user space */ /* Send a single event to user space */
void wireless_send_event(struct net_device *dev, unsigned int cmd, void wireless_send_event(struct net_device *dev, unsigned int cmd,
union iwreq_data *wrqu, const char *extra); union iwreq_data *wrqu, const char *extra);
#ifdef CONFIG_WEXT_CORE
/* flush all previous wext events - if work is done from netdev notifiers */
void wireless_nlevent_flush(void);
#else
static inline void wireless_nlevent_flush(void) {}
#endif
/* We may need a function to send a stream of events to user space. /* We may need a function to send a stream of events to user space.
* More on that later... */ * More on that later... */

View file

@ -291,7 +291,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
} }
/* prepare A-MPDU MLME for Rx aggregation */ /* prepare A-MPDU MLME for Rx aggregation */
tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL); tid_agg_rx = kzalloc(sizeof(*tid_agg_rx), GFP_KERNEL);
if (!tid_agg_rx) if (!tid_agg_rx)
goto end; goto end;

View file

@ -92,7 +92,7 @@ struct ieee80211_fragment_entry {
u16 extra_len; u16 extra_len;
u16 last_frag; u16 last_frag;
u8 rx_queue; u8 rx_queue;
bool ccmp; /* Whether fragments were encrypted with CCMP */ bool check_sequential_pn; /* needed for CCMP/GCMP */
u8 last_pn[6]; /* PN of the last fragment if CCMP was used */ u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
}; };

View file

@ -711,7 +711,7 @@ static u32 minstrel_get_expected_throughput(void *priv_sta)
* computing cur_tp * computing cur_tp
*/ */
tmp_mrs = &mi->r[idx].stats; tmp_mrs = &mi->r[idx].stats;
tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma); tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma) * 10;
tmp_cur_tp = tmp_cur_tp * 1200 * 8 / 1024; tmp_cur_tp = tmp_cur_tp * 1200 * 8 / 1024;
return tmp_cur_tp; return tmp_cur_tp;

View file

@ -691,7 +691,7 @@ minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
if (likely(sta->ampdu_mlme.tid_tx[tid])) if (likely(sta->ampdu_mlme.tid_tx[tid]))
return; return;
ieee80211_start_tx_ba_session(pubsta, tid, 5000); ieee80211_start_tx_ba_session(pubsta, tid, 0);
} }
static void static void
@ -871,7 +871,7 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
* - if station is in dynamic SMPS (and streams > 1) * - if station is in dynamic SMPS (and streams > 1)
* - for fallback rates, to increase chances of getting through * - for fallback rates, to increase chances of getting through
*/ */
if (offset > 0 && if (offset > 0 ||
(mi->sta->smps_mode == IEEE80211_SMPS_DYNAMIC && (mi->sta->smps_mode == IEEE80211_SMPS_DYNAMIC &&
group->streams > 1)) { group->streams > 1)) {
ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts; ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts;
@ -1334,7 +1334,8 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
prob = mi->groups[i].rates[j].prob_ewma; prob = mi->groups[i].rates[j].prob_ewma;
/* convert tp_avg from pkt per second in kbps */ /* convert tp_avg from pkt per second in kbps */
tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * AVG_PKT_SIZE * 8 / 1024; tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * 10;
tp_avg = tp_avg * AVG_PKT_SIZE * 8 / 1024;
return tp_avg; return tp_avg;
} }

View file

@ -1754,7 +1754,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
entry->seq = seq; entry->seq = seq;
entry->rx_queue = rx_queue; entry->rx_queue = rx_queue;
entry->last_frag = frag; entry->last_frag = frag;
entry->ccmp = 0; entry->check_sequential_pn = false;
entry->extra_len = 0; entry->extra_len = 0;
return entry; return entry;
@ -1850,15 +1850,27 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
rx->seqno_idx, &(rx->skb)); rx->seqno_idx, &(rx->skb));
if (rx->key && if (rx->key &&
(rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP || (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256) && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
ieee80211_has_protected(fc)) { ieee80211_has_protected(fc)) {
int queue = rx->security_idx; int queue = rx->security_idx;
/* Store CCMP PN so that we can verify that the next
* fragment has a sequential PN value. */ /* Store CCMP/GCMP PN so that we can verify that the
entry->ccmp = 1; * next fragment has a sequential PN value.
*/
entry->check_sequential_pn = true;
memcpy(entry->last_pn, memcpy(entry->last_pn,
rx->key->u.ccmp.rx_pn[queue], rx->key->u.ccmp.rx_pn[queue],
IEEE80211_CCMP_PN_LEN); IEEE80211_CCMP_PN_LEN);
BUILD_BUG_ON(offsetof(struct ieee80211_key,
u.ccmp.rx_pn) !=
offsetof(struct ieee80211_key,
u.gcmp.rx_pn));
BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) !=
sizeof(rx->key->u.gcmp.rx_pn[queue]));
BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
IEEE80211_GCMP_PN_LEN);
} }
return RX_QUEUED; return RX_QUEUED;
} }
@ -1873,15 +1885,21 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
return RX_DROP_MONITOR; return RX_DROP_MONITOR;
} }
/* Verify that MPDUs within one MSDU have sequential PN values. /* "The receiver shall discard MSDUs and MMPDUs whose constituent
* (IEEE 802.11i, 8.3.3.4.5) */ * MPDU PN values are not incrementing in steps of 1."
if (entry->ccmp) { * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP)
* and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP)
*/
if (entry->check_sequential_pn) {
int i; int i;
u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
int queue; int queue;
if (!rx->key || if (!rx->key ||
(rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP && (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP &&
rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256)) rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 &&
rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP &&
rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256))
return RX_DROP_UNUSABLE; return RX_DROP_UNUSABLE;
memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
@ -3367,6 +3385,7 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
return false; return false;
/* ignore action frames to TDLS-peers */ /* ignore action frames to TDLS-peers */
if (ieee80211_is_action(hdr->frame_control) && if (ieee80211_is_action(hdr->frame_control) &&
!is_broadcast_ether_addr(bssid) &&
!ether_addr_equal(bssid, hdr->addr1)) !ether_addr_equal(bssid, hdr->addr1))
return false; return false;
} }

View file

@ -1147,6 +1147,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
return NOTIFY_DONE; return NOTIFY_DONE;
} }
wireless_nlevent_flush();
return NOTIFY_OK; return NOTIFY_OK;
} }

View file

@ -342,6 +342,40 @@ static const int compat_event_type_size[] = {
/* IW event code */ /* IW event code */
void wireless_nlevent_flush(void)
{
struct sk_buff *skb;
struct net *net;
ASSERT_RTNL();
for_each_net(net) {
while ((skb = skb_dequeue(&net->wext_nlevents)))
rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL,
GFP_KERNEL);
}
}
EXPORT_SYMBOL_GPL(wireless_nlevent_flush);
static int wext_netdev_notifier_call(struct notifier_block *nb,
unsigned long state, void *ptr)
{
/*
* When a netdev changes state in any way, flush all pending messages
* to avoid them going out in a strange order, e.g. RTM_NEWLINK after
* RTM_DELLINK, or with IFF_UP after without IFF_UP during dev_close()
* or similar - all of which could otherwise happen due to delays from
* schedule_work().
*/
wireless_nlevent_flush();
return NOTIFY_OK;
}
static struct notifier_block wext_netdev_notifier = {
.notifier_call = wext_netdev_notifier_call,
};
static int __net_init wext_pernet_init(struct net *net) static int __net_init wext_pernet_init(struct net *net)
{ {
skb_queue_head_init(&net->wext_nlevents); skb_queue_head_init(&net->wext_nlevents);
@ -360,7 +394,12 @@ static struct pernet_operations wext_pernet_ops = {
static int __init wireless_nlevent_init(void) static int __init wireless_nlevent_init(void)
{ {
return register_pernet_subsys(&wext_pernet_ops); int err = register_pernet_subsys(&wext_pernet_ops);
if (err)
return err;
return register_netdevice_notifier(&wext_netdev_notifier);
} }
subsys_initcall(wireless_nlevent_init); subsys_initcall(wireless_nlevent_init);
@ -368,17 +407,8 @@ subsys_initcall(wireless_nlevent_init);
/* Process events generated by the wireless layer or the driver. */ /* Process events generated by the wireless layer or the driver. */
static void wireless_nlevent_process(struct work_struct *work) static void wireless_nlevent_process(struct work_struct *work)
{ {
struct sk_buff *skb;
struct net *net;
rtnl_lock(); rtnl_lock();
wireless_nlevent_flush();
for_each_net(net) {
while ((skb = skb_dequeue(&net->wext_nlevents)))
rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL,
GFP_KERNEL);
}
rtnl_unlock(); rtnl_unlock();
} }

View file

@ -1,7 +1,7 @@
#!/usr/bin/awk -f #!/usr/bin/awk -f
# extract linker version number from stdin and turn into single number # extract linker version number from stdin and turn into single number
{ {
gsub(".*)", ""); gsub(".*\\)", "");
split($1,a, "."); split($1,a, ".");
print a[1]*10000000 + a[2]*100000 + a[3]*10000 + a[4]*100 + a[5]; print a[1]*10000000 + a[2]*100000 + a[3]*10000 + a[4]*100 + a[5];
exit exit

View file

@ -459,7 +459,7 @@ static int wm8958_put_mbc_enum(struct snd_kcontrol *kcontrol,
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
struct wm8994 *control = wm8994->wm8994; struct wm8994 *control = wm8994->wm8994;
int value = ucontrol->value.integer.value[0]; int value = ucontrol->value.enumerated.item[0];
int reg; int reg;
/* Don't allow on the fly reconfiguration */ /* Don't allow on the fly reconfiguration */
@ -549,7 +549,7 @@ static int wm8958_put_vss_enum(struct snd_kcontrol *kcontrol,
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
struct wm8994 *control = wm8994->wm8994; struct wm8994 *control = wm8994->wm8994;
int value = ucontrol->value.integer.value[0]; int value = ucontrol->value.enumerated.item[0];
int reg; int reg;
/* Don't allow on the fly reconfiguration */ /* Don't allow on the fly reconfiguration */
@ -582,7 +582,7 @@ static int wm8958_put_vss_hpf_enum(struct snd_kcontrol *kcontrol,
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
struct wm8994 *control = wm8994->wm8994; struct wm8994 *control = wm8994->wm8994;
int value = ucontrol->value.integer.value[0]; int value = ucontrol->value.enumerated.item[0];
int reg; int reg;
/* Don't allow on the fly reconfiguration */ /* Don't allow on the fly reconfiguration */
@ -749,7 +749,7 @@ static int wm8958_put_enh_eq_enum(struct snd_kcontrol *kcontrol,
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
struct wm8994 *control = wm8994->wm8994; struct wm8994 *control = wm8994->wm8994;
int value = ucontrol->value.integer.value[0]; int value = ucontrol->value.enumerated.item[0];
int reg; int reg;
/* Don't allow on the fly reconfiguration */ /* Don't allow on the fly reconfiguration */

View file

@ -362,7 +362,7 @@ static int wm8994_put_drc_enum(struct snd_kcontrol *kcontrol,
struct wm8994 *control = wm8994->wm8994; struct wm8994 *control = wm8994->wm8994;
struct wm8994_pdata *pdata = &control->pdata; struct wm8994_pdata *pdata = &control->pdata;
int drc = wm8994_get_drc(kcontrol->id.name); int drc = wm8994_get_drc(kcontrol->id.name);
int value = ucontrol->value.integer.value[0]; int value = ucontrol->value.enumerated.item[0];
if (drc < 0) if (drc < 0)
return drc; return drc;
@ -469,7 +469,7 @@ static int wm8994_put_retune_mobile_enum(struct snd_kcontrol *kcontrol,
struct wm8994 *control = wm8994->wm8994; struct wm8994 *control = wm8994->wm8994;
struct wm8994_pdata *pdata = &control->pdata; struct wm8994_pdata *pdata = &control->pdata;
int block = wm8994_get_retune_mobile_block(kcontrol->id.name); int block = wm8994_get_retune_mobile_block(kcontrol->id.name);
int value = ucontrol->value.integer.value[0]; int value = ucontrol->value.enumerated.item[0];
if (block < 0) if (block < 0)
return block; return block;

View file

@ -480,10 +480,11 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
unsigned int cdcon_mask = 1 << i2s_regs->cdclkcon_off; unsigned int cdcon_mask = 1 << i2s_regs->cdclkcon_off;
unsigned int rsrc_mask = 1 << i2s_regs->rclksrc_off; unsigned int rsrc_mask = 1 << i2s_regs->rclksrc_off;
u32 mod, mask, val = 0; u32 mod, mask, val = 0;
unsigned long flags;
spin_lock(i2s->lock); spin_lock_irqsave(i2s->lock, flags);
mod = readl(i2s->addr + I2SMOD); mod = readl(i2s->addr + I2SMOD);
spin_unlock(i2s->lock); spin_unlock_irqrestore(i2s->lock, flags);
switch (clk_id) { switch (clk_id) {
case SAMSUNG_I2S_OPCLK: case SAMSUNG_I2S_OPCLK:
@ -574,11 +575,11 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
return -EINVAL; return -EINVAL;
} }
spin_lock(i2s->lock); spin_lock_irqsave(i2s->lock, flags);
mod = readl(i2s->addr + I2SMOD); mod = readl(i2s->addr + I2SMOD);
mod = (mod & ~mask) | val; mod = (mod & ~mask) | val;
writel(mod, i2s->addr + I2SMOD); writel(mod, i2s->addr + I2SMOD);
spin_unlock(i2s->lock); spin_unlock_irqrestore(i2s->lock, flags);
return 0; return 0;
} }
@ -589,6 +590,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
struct i2s_dai *i2s = to_info(dai); struct i2s_dai *i2s = to_info(dai);
int lrp_shift, sdf_shift, sdf_mask, lrp_rlow, mod_slave; int lrp_shift, sdf_shift, sdf_mask, lrp_rlow, mod_slave;
u32 mod, tmp = 0; u32 mod, tmp = 0;
unsigned long flags;
lrp_shift = i2s->variant_regs->lrp_off; lrp_shift = i2s->variant_regs->lrp_off;
sdf_shift = i2s->variant_regs->sdf_off; sdf_shift = i2s->variant_regs->sdf_off;
@ -648,7 +650,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
return -EINVAL; return -EINVAL;
} }
spin_lock(i2s->lock); spin_lock_irqsave(i2s->lock, flags);
mod = readl(i2s->addr + I2SMOD); mod = readl(i2s->addr + I2SMOD);
/* /*
* Don't change the I2S mode if any controller is active on this * Don't change the I2S mode if any controller is active on this
@ -656,7 +658,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
*/ */
if (any_active(i2s) && if (any_active(i2s) &&
((mod & (sdf_mask | lrp_rlow | mod_slave)) != tmp)) { ((mod & (sdf_mask | lrp_rlow | mod_slave)) != tmp)) {
spin_unlock(i2s->lock); spin_unlock_irqrestore(i2s->lock, flags);
dev_err(&i2s->pdev->dev, dev_err(&i2s->pdev->dev,
"%s:%d Other DAI busy\n", __func__, __LINE__); "%s:%d Other DAI busy\n", __func__, __LINE__);
return -EAGAIN; return -EAGAIN;
@ -665,7 +667,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
mod &= ~(sdf_mask | lrp_rlow | mod_slave); mod &= ~(sdf_mask | lrp_rlow | mod_slave);
mod |= tmp; mod |= tmp;
writel(mod, i2s->addr + I2SMOD); writel(mod, i2s->addr + I2SMOD);
spin_unlock(i2s->lock); spin_unlock_irqrestore(i2s->lock, flags);
return 0; return 0;
} }
@ -675,6 +677,7 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
{ {
struct i2s_dai *i2s = to_info(dai); struct i2s_dai *i2s = to_info(dai);
u32 mod, mask = 0, val = 0; u32 mod, mask = 0, val = 0;
unsigned long flags;
if (!is_secondary(i2s)) if (!is_secondary(i2s))
mask |= (MOD_DC2_EN | MOD_DC1_EN); mask |= (MOD_DC2_EN | MOD_DC1_EN);
@ -743,11 +746,11 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
return -EINVAL; return -EINVAL;
} }
spin_lock(i2s->lock); spin_lock_irqsave(i2s->lock, flags);
mod = readl(i2s->addr + I2SMOD); mod = readl(i2s->addr + I2SMOD);
mod = (mod & ~mask) | val; mod = (mod & ~mask) | val;
writel(mod, i2s->addr + I2SMOD); writel(mod, i2s->addr + I2SMOD);
spin_unlock(i2s->lock); spin_unlock_irqrestore(i2s->lock, flags);
samsung_asoc_init_dma_data(dai, &i2s->dma_playback, &i2s->dma_capture); samsung_asoc_init_dma_data(dai, &i2s->dma_playback, &i2s->dma_capture);

View file

@ -3568,7 +3568,7 @@ static int snd_soc_dapm_dai_link_get(struct snd_kcontrol *kcontrol,
{ {
struct snd_soc_dapm_widget *w = snd_kcontrol_chip(kcontrol); struct snd_soc_dapm_widget *w = snd_kcontrol_chip(kcontrol);
ucontrol->value.integer.value[0] = w->params_select; ucontrol->value.enumerated.item[0] = w->params_select;
return 0; return 0;
} }
@ -3582,13 +3582,13 @@ static int snd_soc_dapm_dai_link_put(struct snd_kcontrol *kcontrol,
if (w->power) if (w->power)
return -EBUSY; return -EBUSY;
if (ucontrol->value.integer.value[0] == w->params_select) if (ucontrol->value.enumerated.item[0] == w->params_select)
return 0; return 0;
if (ucontrol->value.integer.value[0] >= w->num_params) if (ucontrol->value.enumerated.item[0] >= w->num_params)
return -EINVAL; return -EINVAL;
w->params_select = ucontrol->value.integer.value[0]; w->params_select = ucontrol->value.enumerated.item[0];
return 0; return 0;
} }

View file

@ -1961,6 +1961,9 @@ static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
else else
val *= halt_poll_ns_grow; val *= halt_poll_ns_grow;
if (val > halt_poll_ns)
val = halt_poll_ns;
vcpu->halt_poll_ns = val; vcpu->halt_poll_ns = val;
trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
} }