This is the 4.4.175 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlxtGjsACgkQONu9yGCS aT6jNBAAnn+z92N44iyvF/OfhvSFSqdHgRMTzaZNCirY2IjEPrwgIW3PYBBBIESq 1ZSDDvlo/tQ1R/Tg6VgRRHb69Wxx2Vu7G/JP/huVhUlsdUIzrLU/CIIF9sEwYvFp sn4r4l/ILLcsQFFQbWxwk3QhmjCRbPNRSZN0k11z3/pI5wUlFzZ8ttMFW2MmY3C5 0o5sT7KVCDa9YmcnZkGl2cODe3drk2TbsCKIaxSsGrENTHprDe1+YyGMcE2mpWwP iIac6o24Na8g41TFMKjSLU9uz6np1s+P62DNQu28x5mIND0KyvEoQ5Nbb2/Tg/U5 RvcNdEIDNB5CcqQRi/S2C2W/+9VvTaXM3691D4g9J7jUa9reXcIQ8sdOsuvxB3/H NFdPGaTcqyU2xMi/25IKsKpOp8As5lIFvhH6+rlYhZ681KK2iSscJeIrEDbu2ern 7iA04FfxCZZ3G4aKCtMig//tjLpycAJ3nWlxZZDv5s3acxAkNbuYs2tKOFNpKhgc I6fjaBMRQXCGdgvGLui+rhtfAFWSS2AP0okIgEQU5aNixu4785caizbn/xuoPLIz z42WgJfXNWYqPOZtvKYfgbk7HVGUOGaouuEAXWC6l9xKdQAYHjMRDRjiZLXcZcml YP5TH8JIh68cQ1rwhEEsqwbzPVV8SRXIhDnznER6aXJw1WreMDU= =wMOh -----END PGP SIGNATURE----- Merge 4.4.175 into android-4.4 Changes in 4.4.175 drm/bufs: Fix Spectre v1 vulnerability staging: iio: adc: ad7280a: handle error from __ad7280_read32() ASoC: Intel: mrfld: fix uninitialized variable access scsi: lpfc: Correct LCB RJT handling ARM: 8808/1: kexec:offline panic_smp_self_stop CPU dlm: Don't swamp the CPU with callbacks queued during recovery x86/PCI: Fix Broadcom CNB20LE unintended sign extension (redux) powerpc/pseries: add of_node_put() in dlpar_detach_node() serial: fsl_lpuart: clear parity enable bit when disable parity ptp: check gettime64 return code in PTP_SYS_OFFSET ioctl staging:iio:ad2s90: Make probe handle spi_setup failure staging: iio: ad7780: update voltage on read ARM: OMAP2+: hwmod: Fix some section annotations modpost: validate symbol names also in find_elf_symbol perf tools: Add Hygon Dhyana support soc/tegra: Don't leak device tree node reference f2fs: move dir data flush to write checkpoint process f2fs: fix wrong return value of f2fs_acl_create sunvdc: Do not spin in an infinite loop when vio_ldc_send() returns EAGAIN nfsd4: fix crash on writing v4_end_grace before nfsd startup arm64: ftrace: don't adjust the LR value ARM: dts: mmp2: fix TWSI2 x86/fpu: Add might_fault() to user_insn() media: DaVinci-VPBE: fix error handling in vpbe_initialize() smack: fix access permissions for keyring usb: hub: delay hub autosuspend if USB3 port is still link training timekeeping: Use proper seqcount initializer ARM: dts: Fix OMAP4430 SDP Ethernet startup mips: bpf: fix encoding bug for mm_srlv32_op iommu/arm-smmu-v3: Use explicit mb() when moving cons pointer sata_rcar: fix deferred probing clk: imx6sl: ensure MMDC CH0 handshake is bypassed cpuidle: big.LITTLE: fix refcount leak i2c-axxia: check for error conditions first udf: Fix BUG on corrupted inode ARM: pxa: avoid section mismatch warning ASoC: fsl: Fix SND_SOC_EUKREA_TLV320 build error on i.MX8M memstick: Prevent memstick host from getting runtime suspended during card detection tty: serial: samsung: Properly set flags in autoCTS mode arm64: KVM: Skip MMIO insn after emulation powerpc/uaccess: fix warning/error with access_ok() mac80211: fix radiotap vendor presence bitmap handling xfrm6_tunnel: Fix spi check in __xfrm6_tunnel_alloc_spi Bluetooth: Fix unnecessary error message for HCI request completion cw1200: Fix concurrency use-after-free bugs in cw1200_hw_scan() drbd: narrow rcu_read_lock in drbd_sync_handshake drbd: disconnect, if the wrong UUIDs are attached on a connected peer drbd: skip spurious timeout (ping-timeo) when failing promote drbd: Avoid Clang warning about pointless switch statment video: clps711x-fb: release disp device node in probe() fbdev: fbmem: behave better with small rotated displays and many CPUs igb: Fix an issue that PME is not enabled during runtime suspend fbdev: fbcon: Fix unregister crash when more than one framebuffer KVM: x86: svm: report MSR_IA32_MCG_EXT_CTL as unsupported NFS: nfs_compare_mount_options always compare auth flavors. hwmon: (lm80) fix a missing check of the status of SMBus read hwmon: (lm80) fix a missing check of bus read in lm80 probe seq_buf: Make seq_buf_puts() null-terminate the buffer crypto: ux500 - Use proper enum in cryp_set_dma_transfer crypto: ux500 - Use proper enum in hash_set_dma_transfer cifs: check ntwrk_buf_start for NULL before dereferencing it um: Avoid marking pages with "changed protection" niu: fix missing checks of niu_pci_eeprom_read scripts/decode_stacktrace: only strip base path when a prefix of the path ocfs2: don't clear bh uptodate for block read isdn: hisax: hfc_pci: Fix a possible concurrency use-after-free bug in HFCPCI_l1hw() gdrom: fix a memory leak bug block/swim3: Fix -EBUSY error when re-opening device after unmount HID: lenovo: Add checks to fix of_led_classdev_register kernel/hung_task.c: break RCU locks based on jiffies fs/epoll: drop ovflist branch prediction exec: load_script: don't blindly truncate shebang string thermal: hwmon: inline helpers when CONFIG_THERMAL_HWMON is not set test_hexdump: use memcpy instead of strncpy tipc: use destination length for copy string string: drop __must_check from strscpy() and restore strscpy() usages in cgroup dccp: fool proof ccid_hc_[rt]x_parse_options() enic: fix checksum validation for IPv6 net: dp83640: expire old TX-skb skge: potential memory corruption in skge_get_regs() net: systemport: Fix WoL with password after deep sleep net: dsa: slave: Don't propagate flag changes on down slave interfaces ALSA: compress: Fix stop handling on compressed capture streams ALSA: hda - Serialize codec registrations fuse: call pipe_buf_release() under pipe lock fuse: decrement NR_WRITEBACK_TEMP on the right page fuse: handle zero sized retrieve correctly dmaengine: imx-dma: fix wrong callback invoke usb: phy: am335x: fix race condition in _probe usb: gadget: udc: net2272: Fix bitwise and boolean operations KVM: x86: work around leak of uninitialized stack contents (CVE-2019-7222) KVM: nVMX: unconditionally cancel preemption timer in free_nested (CVE-2019-7221) perf/x86/intel/uncore: Add Node ID mask x86/MCE: Initialize mce.bank in the case of a fatal error in mce_no_way_out() perf/core: Don't WARN() for impossible ring-buffer sizes perf tests evsel-tp-sched: Fix bitwise operator mtd: rawnand: gpmi: fix MX28 bus master lockup problem signal: Always notice exiting tasks signal: Better detection of synchronous signals misc: vexpress: Off by one in vexpress_syscfg_exec() debugfs: fix debugfs_rename parameter checking mips: cm: reprime error cause MIPS: OCTEON: don't set octeon_dma_bar_type if PCI is disabled MIPS: VDSO: Include $(ccflags-vdso) in o32,n32 .lds builds ARM: iop32x/n2100: fix PCI IRQ mapping mac80211: ensure that mgmt tx skbs have tailroom for encryption drm/modes: Prevent division by zero htotal drm/vmwgfx: Fix setting of dma masks drm/vmwgfx: Return error code from vmw_execbuf_copy_fence_user HID: debug: fix the ring buffer implementation NFC: nxp-nci: Include unaligned.h instead of access_ok.h Revert "cifs: In Kconfig CONFIG_CIFS_POSIX needs depends on legacy (insecure cifs)" libceph: avoid KEEPALIVE_PENDING races in ceph_con_keepalive() xfrm: refine validation of template and selector families batman-adv: Avoid WARN on net_device without parent in netns batman-adv: Force mac header to start of data on xmit Revert "exec: load_script: don't blindly truncate shebang string" uapi/if_ether.h: prevent redefinition of struct ethhdr ARM: dts: da850-evm: Correct the sound card name ARM: dts: kirkwood: Fix polarity of GPIO fan lines gpio: pl061: handle failed allocations cifs: Limit memory used by lock request calls to a page Documentation/network: reword kernel version reference Revert "Input: elan_i2c - add ACPI ID for touchpad in ASUS Aspire F5-573G" Input: elan_i2c - add ACPI ID for touchpad in Lenovo V330-15ISK perf/core: Fix impossible ring-buffer sizes warning ALSA: hda - Add quirk for HP EliteBook 840 G5 ALSA: usb-audio: Fix implicit fb endpoint setup by quirk Input: bma150 - register input device after setting private data Input: elantech - enable 3rd button support on Fujitsu CELSIUS H780 alpha: fix page fault handling for r16-r18 targets alpha: Fix Eiger NR_IRQS to 128 tracing/uprobes: Fix output for multiple string arguments x86/platform/UV: Use efi_runtime_lock to serialise BIOS calls signal: Restore the stop PTRACE_EVENT_EXIT x86/a.out: Clear the dump structure initially dm thin: fix bug where bio that overwrites thin block ignores FUA smsc95xx: Use skb_cow_head to deal with cloned skbs ch9200: use skb_cow_head() to deal with cloned skbs kaweth: use skb_cow_head() to deal with cloned skbs usb: dwc2: Remove unnecessary kfree pinctrl: msm: fix gpio-hog related boot issues uapi/if_ether.h: move __UAPI_DEF_ETHHDR libc define Linux 4.4.175 Change-Id: I41d95e9717106bcc9342573855fec8f823d9b28c Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
08d58678a9
142 changed files with 785 additions and 364 deletions
|
@ -116,7 +116,7 @@ ipfrag_high_thresh - LONG INTEGER
|
||||||
Maximum memory used to reassemble IP fragments.
|
Maximum memory used to reassemble IP fragments.
|
||||||
|
|
||||||
ipfrag_low_thresh - LONG INTEGER
|
ipfrag_low_thresh - LONG INTEGER
|
||||||
(Obsolete since linux-4.17)
|
(Obsolete since linux-4.4.174, backported from linux-4.17)
|
||||||
Maximum memory used to reassemble IP fragments before the kernel
|
Maximum memory used to reassemble IP fragments before the kernel
|
||||||
begins to remove incomplete fragment queues to free up resources.
|
begins to remove incomplete fragment queues to free up resources.
|
||||||
The kernel still accepts new fragments for defragmentation.
|
The kernel still accepts new fragments for defragmentation.
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 4
|
PATCHLEVEL = 4
|
||||||
SUBLEVEL = 174
|
SUBLEVEL = 175
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = Blurry Fish Butt
|
NAME = Blurry Fish Butt
|
||||||
|
|
||||||
|
|
|
@ -55,15 +55,15 @@
|
||||||
|
|
||||||
#elif defined(CONFIG_ALPHA_DP264) || \
|
#elif defined(CONFIG_ALPHA_DP264) || \
|
||||||
defined(CONFIG_ALPHA_LYNX) || \
|
defined(CONFIG_ALPHA_LYNX) || \
|
||||||
defined(CONFIG_ALPHA_SHARK) || \
|
defined(CONFIG_ALPHA_SHARK)
|
||||||
defined(CONFIG_ALPHA_EIGER)
|
|
||||||
# define NR_IRQS 64
|
# define NR_IRQS 64
|
||||||
|
|
||||||
#elif defined(CONFIG_ALPHA_TITAN)
|
#elif defined(CONFIG_ALPHA_TITAN)
|
||||||
#define NR_IRQS 80
|
#define NR_IRQS 80
|
||||||
|
|
||||||
#elif defined(CONFIG_ALPHA_RAWHIDE) || \
|
#elif defined(CONFIG_ALPHA_RAWHIDE) || \
|
||||||
defined(CONFIG_ALPHA_TAKARA)
|
defined(CONFIG_ALPHA_TAKARA) || \
|
||||||
|
defined(CONFIG_ALPHA_EIGER)
|
||||||
# define NR_IRQS 128
|
# define NR_IRQS 128
|
||||||
|
|
||||||
#elif defined(CONFIG_ALPHA_WILDFIRE)
|
#elif defined(CONFIG_ALPHA_WILDFIRE)
|
||||||
|
|
|
@ -77,7 +77,7 @@ __load_new_mm_context(struct mm_struct *next_mm)
|
||||||
/* Macro for exception fixup code to access integer registers. */
|
/* Macro for exception fixup code to access integer registers. */
|
||||||
#define dpf_reg(r) \
|
#define dpf_reg(r) \
|
||||||
(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
|
(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
|
||||||
(r) <= 18 ? (r)+8 : (r)-10])
|
(r) <= 18 ? (r)+10 : (r)-10])
|
||||||
|
|
||||||
asmlinkage void
|
asmlinkage void
|
||||||
do_page_fault(unsigned long address, unsigned long mmcsr,
|
do_page_fault(unsigned long address, unsigned long mmcsr,
|
||||||
|
|
|
@ -147,7 +147,7 @@
|
||||||
|
|
||||||
sound {
|
sound {
|
||||||
compatible = "simple-audio-card";
|
compatible = "simple-audio-card";
|
||||||
simple-audio-card,name = "DA850/OMAP-L138 EVM";
|
simple-audio-card,name = "DA850-OMAPL138 EVM";
|
||||||
simple-audio-card,widgets =
|
simple-audio-card,widgets =
|
||||||
"Line", "Line In",
|
"Line", "Line In",
|
||||||
"Line", "Line Out";
|
"Line", "Line Out";
|
||||||
|
|
|
@ -35,8 +35,8 @@
|
||||||
compatible = "gpio-fan";
|
compatible = "gpio-fan";
|
||||||
pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>;
|
pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>;
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default";
|
||||||
gpios = <&gpio1 14 GPIO_ACTIVE_LOW
|
gpios = <&gpio1 14 GPIO_ACTIVE_HIGH
|
||||||
&gpio1 13 GPIO_ACTIVE_LOW>;
|
&gpio1 13 GPIO_ACTIVE_HIGH>;
|
||||||
gpio-fan,speed-map = <0 0
|
gpio-fan,speed-map = <0 0
|
||||||
3000 1
|
3000 1
|
||||||
6000 2>;
|
6000 2>;
|
||||||
|
|
|
@ -220,12 +220,15 @@
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
||||||
twsi2: i2c@d4025000 {
|
twsi2: i2c@d4031000 {
|
||||||
compatible = "mrvl,mmp-twsi";
|
compatible = "mrvl,mmp-twsi";
|
||||||
reg = <0xd4025000 0x1000>;
|
reg = <0xd4031000 0x1000>;
|
||||||
interrupts = <58>;
|
interrupt-parent = <&intcmux17>;
|
||||||
|
interrupts = <0>;
|
||||||
clocks = <&soc_clocks MMP2_CLK_TWSI1>;
|
clocks = <&soc_clocks MMP2_CLK_TWSI1>;
|
||||||
resets = <&soc_clocks MMP2_CLK_TWSI1>;
|
resets = <&soc_clocks MMP2_CLK_TWSI1>;
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <0>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -33,6 +33,7 @@
|
||||||
gpio = <&gpio2 16 GPIO_ACTIVE_HIGH>; /* gpio line 48 */
|
gpio = <&gpio2 16 GPIO_ACTIVE_HIGH>; /* gpio line 48 */
|
||||||
enable-active-high;
|
enable-active-high;
|
||||||
regulator-boot-on;
|
regulator-boot-on;
|
||||||
|
startup-delay-us = <25000>;
|
||||||
};
|
};
|
||||||
|
|
||||||
vbat: fixedregulator-vbat {
|
vbat: fixedregulator-vbat {
|
||||||
|
|
|
@ -687,6 +687,21 @@ void smp_send_stop(void)
|
||||||
pr_warn("SMP: failed to stop secondary CPUs\n");
|
pr_warn("SMP: failed to stop secondary CPUs\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* In case panic() and panic() called at the same time on CPU1 and CPU2,
|
||||||
|
* and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop()
|
||||||
|
* CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online,
|
||||||
|
* kdump fails. So split out the panic_smp_self_stop() and add
|
||||||
|
* set_cpu_online(smp_processor_id(), false).
|
||||||
|
*/
|
||||||
|
void panic_smp_self_stop(void)
|
||||||
|
{
|
||||||
|
pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
|
||||||
|
smp_processor_id());
|
||||||
|
set_cpu_online(smp_processor_id(), false);
|
||||||
|
while (1)
|
||||||
|
cpu_relax();
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* not supported here
|
* not supported here
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -118,6 +118,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||||
vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
|
vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The MMIO instruction is emulated and should not be re-executed
|
||||||
|
* in the guest.
|
||||||
|
*/
|
||||||
|
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -151,11 +157,6 @@ static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
|
||||||
vcpu->arch.mmio_decode.sign_extend = sign_extend;
|
vcpu->arch.mmio_decode.sign_extend = sign_extend;
|
||||||
vcpu->arch.mmio_decode.rt = rt;
|
vcpu->arch.mmio_decode.rt = rt;
|
||||||
|
|
||||||
/*
|
|
||||||
* The MMIO instruction is emulated and should not be re-executed
|
|
||||||
* in the guest.
|
|
||||||
*/
|
|
||||||
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -394,7 +394,11 @@ static int __init_refok impd1_probe(struct lm_device *dev)
|
||||||
sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup),
|
sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL);
|
chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL);
|
||||||
mmciname = kasprintf(GFP_KERNEL, "lm%x:00700", dev->id);
|
mmciname = devm_kasprintf(&dev->dev, GFP_KERNEL,
|
||||||
|
"lm%x:00700", dev->id);
|
||||||
|
if (!lookup || !chipname || !mmciname)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
lookup->dev_id = mmciname;
|
lookup->dev_id = mmciname;
|
||||||
/*
|
/*
|
||||||
* Offsets on GPIO block 1:
|
* Offsets on GPIO block 1:
|
||||||
|
|
|
@ -75,8 +75,7 @@ void __init n2100_map_io(void)
|
||||||
/*
|
/*
|
||||||
* N2100 PCI.
|
* N2100 PCI.
|
||||||
*/
|
*/
|
||||||
static int __init
|
static int n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
||||||
n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
|
||||||
{
|
{
|
||||||
int irq;
|
int irq;
|
||||||
|
|
||||||
|
|
|
@ -2526,7 +2526,7 @@ static int __init _init(struct omap_hwmod *oh, void *data)
|
||||||
* a stub; implementing this properly requires iclk autoidle usecounting in
|
* a stub; implementing this properly requires iclk autoidle usecounting in
|
||||||
* the clock code. No return value.
|
* the clock code. No return value.
|
||||||
*/
|
*/
|
||||||
static void __init _setup_iclk_autoidle(struct omap_hwmod *oh)
|
static void _setup_iclk_autoidle(struct omap_hwmod *oh)
|
||||||
{
|
{
|
||||||
struct omap_hwmod_ocp_if *os;
|
struct omap_hwmod_ocp_if *os;
|
||||||
struct list_head *p;
|
struct list_head *p;
|
||||||
|
@ -2561,7 +2561,7 @@ static void __init _setup_iclk_autoidle(struct omap_hwmod *oh)
|
||||||
* reset. Returns 0 upon success or a negative error code upon
|
* reset. Returns 0 upon success or a negative error code upon
|
||||||
* failure.
|
* failure.
|
||||||
*/
|
*/
|
||||||
static int __init _setup_reset(struct omap_hwmod *oh)
|
static int _setup_reset(struct omap_hwmod *oh)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
@ -2622,7 +2622,7 @@ static int __init _setup_reset(struct omap_hwmod *oh)
|
||||||
*
|
*
|
||||||
* No return value.
|
* No return value.
|
||||||
*/
|
*/
|
||||||
static void __init _setup_postsetup(struct omap_hwmod *oh)
|
static void _setup_postsetup(struct omap_hwmod *oh)
|
||||||
{
|
{
|
||||||
u8 postsetup_state;
|
u8 postsetup_state;
|
||||||
|
|
||||||
|
|
|
@ -547,7 +547,7 @@ static struct pxa3xx_u2d_platform_data cm_x300_u2d_platform_data = {
|
||||||
.exit = cm_x300_u2d_exit,
|
.exit = cm_x300_u2d_exit,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void cm_x300_init_u2d(void)
|
static void __init cm_x300_init_u2d(void)
|
||||||
{
|
{
|
||||||
pxa3xx_set_u2d_info(&cm_x300_u2d_platform_data);
|
pxa3xx_set_u2d_info(&cm_x300_u2d_platform_data);
|
||||||
}
|
}
|
||||||
|
|
|
@ -183,7 +183,7 @@ static struct pxafb_mach_info littleton_lcd_info = {
|
||||||
.lcd_conn = LCD_COLOR_TFT_16BPP,
|
.lcd_conn = LCD_COLOR_TFT_16BPP,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void littleton_init_lcd(void)
|
static void __init littleton_init_lcd(void)
|
||||||
{
|
{
|
||||||
pxa_set_fb_info(NULL, &littleton_lcd_info);
|
pxa_set_fb_info(NULL, &littleton_lcd_info);
|
||||||
}
|
}
|
||||||
|
|
|
@ -558,7 +558,7 @@ static struct pxaohci_platform_data zeus_ohci_platform_data = {
|
||||||
.flags = ENABLE_PORT_ALL | POWER_SENSE_LOW,
|
.flags = ENABLE_PORT_ALL | POWER_SENSE_LOW,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void zeus_register_ohci(void)
|
static void __init zeus_register_ohci(void)
|
||||||
{
|
{
|
||||||
/* Port 2 is shared between host and client interface. */
|
/* Port 2 is shared between host and client interface. */
|
||||||
UP2OCR = UP2OCR_HXOE | UP2OCR_HXS | UP2OCR_DMPDE | UP2OCR_DPPDE;
|
UP2OCR = UP2OCR_HXOE | UP2OCR_HXS | UP2OCR_DMPDE | UP2OCR_DPPDE;
|
||||||
|
|
|
@ -78,7 +78,6 @@
|
||||||
.macro mcount_get_lr reg
|
.macro mcount_get_lr reg
|
||||||
ldr \reg, [x29]
|
ldr \reg, [x29]
|
||||||
ldr \reg, [\reg, #8]
|
ldr \reg, [\reg, #8]
|
||||||
mcount_adjust_addr \reg, \reg
|
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro mcount_get_lr_addr reg
|
.macro mcount_get_lr_addr reg
|
||||||
|
|
|
@ -291,8 +291,8 @@ enum mm_32a_minor_op {
|
||||||
mm_ext_op = 0x02c,
|
mm_ext_op = 0x02c,
|
||||||
mm_pool32axf_op = 0x03c,
|
mm_pool32axf_op = 0x03c,
|
||||||
mm_srl32_op = 0x040,
|
mm_srl32_op = 0x040,
|
||||||
|
mm_srlv32_op = 0x050,
|
||||||
mm_sra_op = 0x080,
|
mm_sra_op = 0x080,
|
||||||
mm_srlv32_op = 0x090,
|
|
||||||
mm_rotr_op = 0x0c0,
|
mm_rotr_op = 0x0c0,
|
||||||
mm_lwxs_op = 0x118,
|
mm_lwxs_op = 0x118,
|
||||||
mm_addu32_op = 0x150,
|
mm_addu32_op = 0x150,
|
||||||
|
|
|
@ -450,5 +450,5 @@ void mips_cm_error_report(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* reprime cause register */
|
/* reprime cause register */
|
||||||
write_gcr_error_cause(0);
|
write_gcr_error_cause(cm_error);
|
||||||
}
|
}
|
||||||
|
|
|
@ -571,6 +571,11 @@ static int __init octeon_pci_setup(void)
|
||||||
if (octeon_has_feature(OCTEON_FEATURE_PCIE))
|
if (octeon_has_feature(OCTEON_FEATURE_PCIE))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
if (!octeon_is_pci_host()) {
|
||||||
|
pr_notice("Not in host mode, PCI Controller not initialized\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* Point pcibios_map_irq() to the PCI version of it */
|
/* Point pcibios_map_irq() to the PCI version of it */
|
||||||
octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
|
octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
|
||||||
|
|
||||||
|
@ -582,11 +587,6 @@ static int __init octeon_pci_setup(void)
|
||||||
else
|
else
|
||||||
octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
|
octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
|
||||||
|
|
||||||
if (!octeon_is_pci_host()) {
|
|
||||||
pr_notice("Not in host mode, PCI Controller not initialized\n");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* PCI I/O and PCI MEM values */
|
/* PCI I/O and PCI MEM values */
|
||||||
set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
|
set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
|
||||||
ioport_resource.start = 0;
|
ioport_resource.start = 0;
|
||||||
|
|
|
@ -107,7 +107,7 @@ $(obj)/%-o32.o: $(src)/%.c FORCE
|
||||||
$(call cmd,force_checksrc)
|
$(call cmd,force_checksrc)
|
||||||
$(call if_changed_rule,cc_o_c)
|
$(call if_changed_rule,cc_o_c)
|
||||||
|
|
||||||
$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32
|
$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32
|
||||||
$(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE
|
$(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE
|
||||||
$(call if_changed_dep,cpp_lds_S)
|
$(call if_changed_dep,cpp_lds_S)
|
||||||
|
|
||||||
|
@ -143,7 +143,7 @@ $(obj)/%-n32.o: $(src)/%.c FORCE
|
||||||
$(call cmd,force_checksrc)
|
$(call cmd,force_checksrc)
|
||||||
$(call if_changed_rule,cc_o_c)
|
$(call if_changed_rule,cc_o_c)
|
||||||
|
|
||||||
$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32
|
$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32
|
||||||
$(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE
|
$(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE
|
||||||
$(call if_changed_dep,cpp_lds_S)
|
$(call if_changed_dep,cpp_lds_S)
|
||||||
|
|
||||||
|
|
|
@ -59,7 +59,7 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define access_ok(type, addr, size) \
|
#define access_ok(type, addr, size) \
|
||||||
(__chk_user_ptr(addr), \
|
(__chk_user_ptr(addr), (void)(type), \
|
||||||
__access_ok((__force unsigned long)(addr), (size), get_fs()))
|
__access_ok((__force unsigned long)(addr), (size), get_fs()))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -280,6 +280,8 @@ int dlpar_detach_node(struct device_node *dn)
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
|
of_node_put(dn);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -197,12 +197,17 @@ static inline pte_t pte_mkold(pte_t pte)
|
||||||
|
|
||||||
static inline pte_t pte_wrprotect(pte_t pte)
|
static inline pte_t pte_wrprotect(pte_t pte)
|
||||||
{
|
{
|
||||||
pte_clear_bits(pte, _PAGE_RW);
|
if (likely(pte_get_bits(pte, _PAGE_RW)))
|
||||||
|
pte_clear_bits(pte, _PAGE_RW);
|
||||||
|
else
|
||||||
|
return pte;
|
||||||
return(pte_mknewprot(pte));
|
return(pte_mknewprot(pte));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline pte_t pte_mkread(pte_t pte)
|
static inline pte_t pte_mkread(pte_t pte)
|
||||||
{
|
{
|
||||||
|
if (unlikely(pte_get_bits(pte, _PAGE_USER)))
|
||||||
|
return pte;
|
||||||
pte_set_bits(pte, _PAGE_USER);
|
pte_set_bits(pte, _PAGE_USER);
|
||||||
return(pte_mknewprot(pte));
|
return(pte_mknewprot(pte));
|
||||||
}
|
}
|
||||||
|
@ -221,6 +226,8 @@ static inline pte_t pte_mkyoung(pte_t pte)
|
||||||
|
|
||||||
static inline pte_t pte_mkwrite(pte_t pte)
|
static inline pte_t pte_mkwrite(pte_t pte)
|
||||||
{
|
{
|
||||||
|
if (unlikely(pte_get_bits(pte, _PAGE_RW)))
|
||||||
|
return pte;
|
||||||
pte_set_bits(pte, _PAGE_RW);
|
pte_set_bits(pte, _PAGE_RW);
|
||||||
return(pte_mknewprot(pte));
|
return(pte_mknewprot(pte));
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,7 +50,7 @@ static unsigned long get_dr(int n)
|
||||||
/*
|
/*
|
||||||
* fill in the user structure for a core dump..
|
* fill in the user structure for a core dump..
|
||||||
*/
|
*/
|
||||||
static void dump_thread32(struct pt_regs *regs, struct user32 *dump)
|
static void fill_dump(struct pt_regs *regs, struct user32 *dump)
|
||||||
{
|
{
|
||||||
u32 fs, gs;
|
u32 fs, gs;
|
||||||
memset(dump, 0, sizeof(*dump));
|
memset(dump, 0, sizeof(*dump));
|
||||||
|
@ -156,10 +156,12 @@ static int aout_core_dump(struct coredump_params *cprm)
|
||||||
fs = get_fs();
|
fs = get_fs();
|
||||||
set_fs(KERNEL_DS);
|
set_fs(KERNEL_DS);
|
||||||
has_dumped = 1;
|
has_dumped = 1;
|
||||||
|
|
||||||
|
fill_dump(cprm->regs, &dump);
|
||||||
|
|
||||||
strncpy(dump.u_comm, current->comm, sizeof(current->comm));
|
strncpy(dump.u_comm, current->comm, sizeof(current->comm));
|
||||||
dump.u_ar0 = offsetof(struct user32, regs);
|
dump.u_ar0 = offsetof(struct user32, regs);
|
||||||
dump.signal = cprm->siginfo->si_signo;
|
dump.signal = cprm->siginfo->si_signo;
|
||||||
dump_thread32(cprm->regs, &dump);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the size of the dump file exceeds the rlimit, then see
|
* If the size of the dump file exceeds the rlimit, then see
|
||||||
|
|
|
@ -94,6 +94,9 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
|
||||||
#define user_insn(insn, output, input...) \
|
#define user_insn(insn, output, input...) \
|
||||||
({ \
|
({ \
|
||||||
int err; \
|
int err; \
|
||||||
|
\
|
||||||
|
might_fault(); \
|
||||||
|
\
|
||||||
asm volatile(ASM_STAC "\n" \
|
asm volatile(ASM_STAC "\n" \
|
||||||
"1:" #insn "\n\t" \
|
"1:" #insn "\n\t" \
|
||||||
"2: " ASM_CLAC "\n" \
|
"2: " ASM_CLAC "\n" \
|
||||||
|
|
|
@ -48,7 +48,8 @@ enum {
|
||||||
BIOS_STATUS_SUCCESS = 0,
|
BIOS_STATUS_SUCCESS = 0,
|
||||||
BIOS_STATUS_UNIMPLEMENTED = -ENOSYS,
|
BIOS_STATUS_UNIMPLEMENTED = -ENOSYS,
|
||||||
BIOS_STATUS_EINVAL = -EINVAL,
|
BIOS_STATUS_EINVAL = -EINVAL,
|
||||||
BIOS_STATUS_UNAVAIL = -EBUSY
|
BIOS_STATUS_UNAVAIL = -EBUSY,
|
||||||
|
BIOS_STATUS_ABORT = -EINTR,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -111,4 +112,9 @@ extern long system_serial_number;
|
||||||
|
|
||||||
extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
|
extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details
|
||||||
|
*/
|
||||||
|
extern struct semaphore __efi_uv_runtime_lock;
|
||||||
|
|
||||||
#endif /* _ASM_X86_UV_BIOS_H */
|
#endif /* _ASM_X86_UV_BIOS_H */
|
||||||
|
|
|
@ -670,6 +670,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
|
if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
|
||||||
|
m->bank = i;
|
||||||
*msg = tmp;
|
*msg = tmp;
|
||||||
ret = 1;
|
ret = 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1081,6 +1081,8 @@ static struct pci_driver snbep_uncore_pci_driver = {
|
||||||
.id_table = snbep_uncore_pci_ids,
|
.id_table = snbep_uncore_pci_ids,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define NODE_ID_MASK 0x7
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* build pci bus to socket mapping
|
* build pci bus to socket mapping
|
||||||
*/
|
*/
|
||||||
|
@ -1102,7 +1104,7 @@ static int snbep_pci2phy_map_init(int devid)
|
||||||
err = pci_read_config_dword(ubox_dev, 0x40, &config);
|
err = pci_read_config_dword(ubox_dev, 0x40, &config);
|
||||||
if (err)
|
if (err)
|
||||||
break;
|
break;
|
||||||
nodeid = config;
|
nodeid = config & NODE_ID_MASK;
|
||||||
/* get the Node ID mapping */
|
/* get the Node ID mapping */
|
||||||
err = pci_read_config_dword(ubox_dev, 0x54, &config);
|
err = pci_read_config_dword(ubox_dev, 0x54, &config);
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
@ -4156,6 +4156,13 @@ static bool svm_cpu_has_accelerated_tpr(void)
|
||||||
|
|
||||||
static bool svm_has_emulated_msr(int index)
|
static bool svm_has_emulated_msr(int index)
|
||||||
{
|
{
|
||||||
|
switch (index) {
|
||||||
|
case MSR_IA32_MCG_EXT_CTL:
|
||||||
|
return false;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6965,6 +6965,7 @@ static void free_nested(struct vcpu_vmx *vmx)
|
||||||
if (!vmx->nested.vmxon)
|
if (!vmx->nested.vmxon)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
hrtimer_cancel(&vmx->nested.preemption_timer);
|
||||||
vmx->nested.vmxon = false;
|
vmx->nested.vmxon = false;
|
||||||
free_vpid(vmx->nested.vpid02);
|
free_vpid(vmx->nested.vpid02);
|
||||||
nested_release_vmcs12(vmx);
|
nested_release_vmcs12(vmx);
|
||||||
|
|
|
@ -4247,6 +4247,13 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
|
||||||
{
|
{
|
||||||
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
|
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
|
||||||
|
* is returned, but our callers are not ready for that and they blindly
|
||||||
|
* call kvm_inject_page_fault. Ensure that they at least do not leak
|
||||||
|
* uninitialized kernel stack memory into cr2 and error code.
|
||||||
|
*/
|
||||||
|
memset(exception, 0, sizeof(*exception));
|
||||||
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
|
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
|
||||||
exception);
|
exception);
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,8 +50,8 @@ static void __init cnb20le_res(u8 bus, u8 slot, u8 func)
|
||||||
word1 = read_pci_config_16(bus, slot, func, 0xc0);
|
word1 = read_pci_config_16(bus, slot, func, 0xc0);
|
||||||
word2 = read_pci_config_16(bus, slot, func, 0xc2);
|
word2 = read_pci_config_16(bus, slot, func, 0xc2);
|
||||||
if (word1 != word2) {
|
if (word1 != word2) {
|
||||||
res.start = (word1 << 16) | 0x0000;
|
res.start = ((resource_size_t) word1 << 16) | 0x0000;
|
||||||
res.end = (word2 << 16) | 0xffff;
|
res.end = ((resource_size_t) word2 << 16) | 0xffff;
|
||||||
res.flags = IORESOURCE_MEM;
|
res.flags = IORESOURCE_MEM;
|
||||||
update_res(info, res.start, res.end, res.flags, 0);
|
update_res(info, res.start, res.end, res.flags, 0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,8 @@
|
||||||
|
|
||||||
static struct uv_systab uv_systab;
|
static struct uv_systab uv_systab;
|
||||||
|
|
||||||
s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
|
static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
|
||||||
|
u64 a4, u64 a5)
|
||||||
{
|
{
|
||||||
struct uv_systab *tab = &uv_systab;
|
struct uv_systab *tab = &uv_systab;
|
||||||
s64 ret;
|
s64 ret;
|
||||||
|
@ -43,6 +44,19 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
|
||||||
a1, a2, a3, a4, a5);
|
a1, a2, a3, a4, a5);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
|
||||||
|
{
|
||||||
|
s64 ret;
|
||||||
|
|
||||||
|
if (down_interruptible(&__efi_uv_runtime_lock))
|
||||||
|
return BIOS_STATUS_ABORT;
|
||||||
|
|
||||||
|
ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
|
||||||
|
up(&__efi_uv_runtime_lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
EXPORT_SYMBOL_GPL(uv_bios_call);
|
EXPORT_SYMBOL_GPL(uv_bios_call);
|
||||||
|
|
||||||
s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
|
s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
|
||||||
|
@ -51,10 +65,15 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
|
||||||
unsigned long bios_flags;
|
unsigned long bios_flags;
|
||||||
s64 ret;
|
s64 ret;
|
||||||
|
|
||||||
|
if (down_interruptible(&__efi_uv_runtime_lock))
|
||||||
|
return BIOS_STATUS_ABORT;
|
||||||
|
|
||||||
local_irq_save(bios_flags);
|
local_irq_save(bios_flags);
|
||||||
ret = uv_bios_call(which, a1, a2, a3, a4, a5);
|
ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
|
||||||
local_irq_restore(bios_flags);
|
local_irq_restore(bios_flags);
|
||||||
|
|
||||||
|
up(&__efi_uv_runtime_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -875,7 +875,9 @@ static int sata_rcar_probe(struct platform_device *pdev)
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
irq = platform_get_irq(pdev, 0);
|
irq = platform_get_irq(pdev, 0);
|
||||||
if (irq <= 0)
|
if (irq < 0)
|
||||||
|
return irq;
|
||||||
|
if (!irq)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
priv = devm_kzalloc(&pdev->dev, sizeof(struct sata_rcar_priv),
|
priv = devm_kzalloc(&pdev->dev, sizeof(struct sata_rcar_priv),
|
||||||
|
|
|
@ -632,14 +632,15 @@ drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int for
|
||||||
if (rv == SS_TWO_PRIMARIES) {
|
if (rv == SS_TWO_PRIMARIES) {
|
||||||
/* Maybe the peer is detected as dead very soon...
|
/* Maybe the peer is detected as dead very soon...
|
||||||
retry at most once more in this case. */
|
retry at most once more in this case. */
|
||||||
int timeo;
|
if (try < max_tries) {
|
||||||
rcu_read_lock();
|
int timeo;
|
||||||
nc = rcu_dereference(connection->net_conf);
|
|
||||||
timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
|
|
||||||
rcu_read_unlock();
|
|
||||||
schedule_timeout_interruptible(timeo);
|
|
||||||
if (try < max_tries)
|
|
||||||
try = max_tries - 1;
|
try = max_tries - 1;
|
||||||
|
rcu_read_lock();
|
||||||
|
nc = rcu_dereference(connection->net_conf);
|
||||||
|
timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
|
||||||
|
rcu_read_unlock();
|
||||||
|
schedule_timeout_interruptible(timeo);
|
||||||
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (rv < SS_SUCCESS) {
|
if (rv < SS_SUCCESS) {
|
||||||
|
|
|
@ -3126,7 +3126,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
|
||||||
enum drbd_conns rv = C_MASK;
|
enum drbd_conns rv = C_MASK;
|
||||||
enum drbd_disk_state mydisk;
|
enum drbd_disk_state mydisk;
|
||||||
struct net_conf *nc;
|
struct net_conf *nc;
|
||||||
int hg, rule_nr, rr_conflict, tentative;
|
int hg, rule_nr, rr_conflict, tentative, always_asbp;
|
||||||
|
|
||||||
mydisk = device->state.disk;
|
mydisk = device->state.disk;
|
||||||
if (mydisk == D_NEGOTIATING)
|
if (mydisk == D_NEGOTIATING)
|
||||||
|
@ -3168,8 +3168,12 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
nc = rcu_dereference(peer_device->connection->net_conf);
|
nc = rcu_dereference(peer_device->connection->net_conf);
|
||||||
|
always_asbp = nc->always_asbp;
|
||||||
|
rr_conflict = nc->rr_conflict;
|
||||||
|
tentative = nc->tentative;
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
if (hg == 100 || (hg == -100 && nc->always_asbp)) {
|
if (hg == 100 || (hg == -100 && always_asbp)) {
|
||||||
int pcount = (device->state.role == R_PRIMARY)
|
int pcount = (device->state.role == R_PRIMARY)
|
||||||
+ (peer_role == R_PRIMARY);
|
+ (peer_role == R_PRIMARY);
|
||||||
int forced = (hg == -100);
|
int forced = (hg == -100);
|
||||||
|
@ -3208,9 +3212,6 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
|
||||||
"Sync from %s node\n",
|
"Sync from %s node\n",
|
||||||
(hg < 0) ? "peer" : "this");
|
(hg < 0) ? "peer" : "this");
|
||||||
}
|
}
|
||||||
rr_conflict = nc->rr_conflict;
|
|
||||||
tentative = nc->tentative;
|
|
||||||
rcu_read_unlock();
|
|
||||||
|
|
||||||
if (hg == -100) {
|
if (hg == -100) {
|
||||||
/* FIXME this log message is not correct if we end up here
|
/* FIXME this log message is not correct if we end up here
|
||||||
|
@ -3889,7 +3890,7 @@ static int receive_uuids(struct drbd_connection *connection, struct packet_info
|
||||||
kfree(device->p_uuid);
|
kfree(device->p_uuid);
|
||||||
device->p_uuid = p_uuid;
|
device->p_uuid = p_uuid;
|
||||||
|
|
||||||
if (device->state.conn < C_CONNECTED &&
|
if ((device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS) &&
|
||||||
device->state.disk < D_INCONSISTENT &&
|
device->state.disk < D_INCONSISTENT &&
|
||||||
device->state.role == R_PRIMARY &&
|
device->state.role == R_PRIMARY &&
|
||||||
(device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
|
(device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
|
||||||
|
|
|
@ -40,6 +40,8 @@ MODULE_VERSION(DRV_MODULE_VERSION);
|
||||||
#define WAITING_FOR_GEN_CMD 0x04
|
#define WAITING_FOR_GEN_CMD 0x04
|
||||||
#define WAITING_FOR_ANY -1
|
#define WAITING_FOR_ANY -1
|
||||||
|
|
||||||
|
#define VDC_MAX_RETRIES 10
|
||||||
|
|
||||||
static struct workqueue_struct *sunvdc_wq;
|
static struct workqueue_struct *sunvdc_wq;
|
||||||
|
|
||||||
struct vdc_req_entry {
|
struct vdc_req_entry {
|
||||||
|
@ -419,6 +421,7 @@ static int __vdc_tx_trigger(struct vdc_port *port)
|
||||||
.end_idx = dr->prod,
|
.end_idx = dr->prod,
|
||||||
};
|
};
|
||||||
int err, delay;
|
int err, delay;
|
||||||
|
int retries = 0;
|
||||||
|
|
||||||
hdr.seq = dr->snd_nxt;
|
hdr.seq = dr->snd_nxt;
|
||||||
delay = 1;
|
delay = 1;
|
||||||
|
@ -431,6 +434,8 @@ static int __vdc_tx_trigger(struct vdc_port *port)
|
||||||
udelay(delay);
|
udelay(delay);
|
||||||
if ((delay <<= 1) > 128)
|
if ((delay <<= 1) > 128)
|
||||||
delay = 128;
|
delay = 128;
|
||||||
|
if (retries++ > VDC_MAX_RETRIES)
|
||||||
|
break;
|
||||||
} while (err == -EAGAIN);
|
} while (err == -EAGAIN);
|
||||||
|
|
||||||
if (err == -ENOTCONN)
|
if (err == -ENOTCONN)
|
||||||
|
|
|
@ -1027,7 +1027,11 @@ static void floppy_release(struct gendisk *disk, fmode_t mode)
|
||||||
struct swim3 __iomem *sw = fs->swim3;
|
struct swim3 __iomem *sw = fs->swim3;
|
||||||
|
|
||||||
mutex_lock(&swim3_mutex);
|
mutex_lock(&swim3_mutex);
|
||||||
if (fs->ref_count > 0 && --fs->ref_count == 0) {
|
if (fs->ref_count > 0)
|
||||||
|
--fs->ref_count;
|
||||||
|
else if (fs->ref_count == -1)
|
||||||
|
fs->ref_count = 0;
|
||||||
|
if (fs->ref_count == 0) {
|
||||||
swim3_action(fs, MOTOR_OFF);
|
swim3_action(fs, MOTOR_OFF);
|
||||||
out_8(&sw->control_bic, 0xff);
|
out_8(&sw->control_bic, 0xff);
|
||||||
swim3_select(fs, RELAX);
|
swim3_select(fs, RELAX);
|
||||||
|
|
|
@ -882,6 +882,7 @@ static void __exit exit_gdrom(void)
|
||||||
platform_device_unregister(pd);
|
platform_device_unregister(pd);
|
||||||
platform_driver_unregister(&gdrom_driver);
|
platform_driver_unregister(&gdrom_driver);
|
||||||
kfree(gd.toc);
|
kfree(gd.toc);
|
||||||
|
kfree(gd.cd_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
module_init(init_gdrom);
|
module_init(init_gdrom);
|
||||||
|
|
|
@ -17,6 +17,8 @@
|
||||||
|
|
||||||
#include "clk.h"
|
#include "clk.h"
|
||||||
|
|
||||||
|
#define CCDR 0x4
|
||||||
|
#define BM_CCM_CCDR_MMDC_CH0_MASK (1 << 17)
|
||||||
#define CCSR 0xc
|
#define CCSR 0xc
|
||||||
#define BM_CCSR_PLL1_SW_CLK_SEL (1 << 2)
|
#define BM_CCSR_PLL1_SW_CLK_SEL (1 << 2)
|
||||||
#define CACRR 0x10
|
#define CACRR 0x10
|
||||||
|
@ -414,6 +416,10 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
|
||||||
clks[IMX6SL_CLK_USDHC3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6);
|
clks[IMX6SL_CLK_USDHC3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6);
|
||||||
clks[IMX6SL_CLK_USDHC4] = imx_clk_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8);
|
clks[IMX6SL_CLK_USDHC4] = imx_clk_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8);
|
||||||
|
|
||||||
|
/* Ensure the MMDC CH0 handshake is bypassed */
|
||||||
|
writel_relaxed(readl_relaxed(base + CCDR) |
|
||||||
|
BM_CCM_CCDR_MMDC_CH0_MASK, base + CCDR);
|
||||||
|
|
||||||
imx_check_clocks(clks, ARRAY_SIZE(clks));
|
imx_check_clocks(clks, ARRAY_SIZE(clks));
|
||||||
|
|
||||||
clk_data.clks = clks;
|
clk_data.clks = clks;
|
||||||
|
|
|
@ -167,6 +167,7 @@ static int __init bl_idle_init(void)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct device_node *root = of_find_node_by_path("/");
|
struct device_node *root = of_find_node_by_path("/");
|
||||||
|
const struct of_device_id *match_id;
|
||||||
|
|
||||||
if (!root)
|
if (!root)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
@ -174,7 +175,11 @@ static int __init bl_idle_init(void)
|
||||||
/*
|
/*
|
||||||
* Initialize the driver just for a compliant set of machines
|
* Initialize the driver just for a compliant set of machines
|
||||||
*/
|
*/
|
||||||
if (!of_match_node(compatible_machine_match, root))
|
match_id = of_match_node(compatible_machine_match, root);
|
||||||
|
|
||||||
|
of_node_put(root);
|
||||||
|
|
||||||
|
if (!match_id)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
if (!mcpm_is_available())
|
if (!mcpm_is_available())
|
||||||
|
|
|
@ -555,7 +555,7 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
|
||||||
desc = dmaengine_prep_slave_sg(channel,
|
desc = dmaengine_prep_slave_sg(channel,
|
||||||
ctx->device->dma.sg_src,
|
ctx->device->dma.sg_src,
|
||||||
ctx->device->dma.sg_src_len,
|
ctx->device->dma.sg_src_len,
|
||||||
direction, DMA_CTRL_ACK);
|
DMA_MEM_TO_DEV, DMA_CTRL_ACK);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case DMA_FROM_DEVICE:
|
case DMA_FROM_DEVICE:
|
||||||
|
@ -579,7 +579,7 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
|
||||||
desc = dmaengine_prep_slave_sg(channel,
|
desc = dmaengine_prep_slave_sg(channel,
|
||||||
ctx->device->dma.sg_dst,
|
ctx->device->dma.sg_dst,
|
||||||
ctx->device->dma.sg_dst_len,
|
ctx->device->dma.sg_dst_len,
|
||||||
direction,
|
DMA_DEV_TO_MEM,
|
||||||
DMA_CTRL_ACK |
|
DMA_CTRL_ACK |
|
||||||
DMA_PREP_INTERRUPT);
|
DMA_PREP_INTERRUPT);
|
||||||
|
|
||||||
|
|
|
@ -181,7 +181,7 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
|
||||||
__func__);
|
__func__);
|
||||||
desc = dmaengine_prep_slave_sg(channel,
|
desc = dmaengine_prep_slave_sg(channel,
|
||||||
ctx->device->dma.sg, ctx->device->dma.sg_len,
|
ctx->device->dma.sg, ctx->device->dma.sg_len,
|
||||||
direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
|
DMA_MEM_TO_DEV, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
|
||||||
if (!desc) {
|
if (!desc) {
|
||||||
dev_err(ctx->device->dev,
|
dev_err(ctx->device->dev,
|
||||||
"%s: dmaengine_prep_slave_sg() failed!\n", __func__);
|
"%s: dmaengine_prep_slave_sg() failed!\n", __func__);
|
||||||
|
|
|
@ -619,7 +619,7 @@ static void imxdma_tasklet(unsigned long data)
|
||||||
{
|
{
|
||||||
struct imxdma_channel *imxdmac = (void *)data;
|
struct imxdma_channel *imxdmac = (void *)data;
|
||||||
struct imxdma_engine *imxdma = imxdmac->imxdma;
|
struct imxdma_engine *imxdma = imxdmac->imxdma;
|
||||||
struct imxdma_desc *desc;
|
struct imxdma_desc *desc, *next_desc;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&imxdma->lock, flags);
|
spin_lock_irqsave(&imxdma->lock, flags);
|
||||||
|
@ -649,10 +649,10 @@ static void imxdma_tasklet(unsigned long data)
|
||||||
list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
|
list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
|
||||||
|
|
||||||
if (!list_empty(&imxdmac->ld_queue)) {
|
if (!list_empty(&imxdmac->ld_queue)) {
|
||||||
desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
|
next_desc = list_first_entry(&imxdmac->ld_queue,
|
||||||
node);
|
struct imxdma_desc, node);
|
||||||
list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
|
list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
|
||||||
if (imxdma_xfer_desc(desc) < 0)
|
if (imxdma_xfer_desc(next_desc) < 0)
|
||||||
dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
|
dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
|
||||||
__func__, imxdmac->channel);
|
__func__, imxdmac->channel);
|
||||||
}
|
}
|
||||||
|
|
|
@ -87,6 +87,13 @@ static DEFINE_SPINLOCK(efi_runtime_lock);
|
||||||
* context through efi_pstore_write().
|
* context through efi_pstore_write().
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Expose the EFI runtime lock to the UV platform
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_X86_UV
|
||||||
|
extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* As per commit ef68c8f87ed1 ("x86: Serialize EFI time accesses on rtc_lock"),
|
* As per commit ef68c8f87ed1 ("x86: Serialize EFI time accesses on rtc_lock"),
|
||||||
* the EFI specification requires that callers of the time related runtime
|
* the EFI specification requires that callers of the time related runtime
|
||||||
|
|
|
@ -36,6 +36,8 @@
|
||||||
#include <drm/drmP.h>
|
#include <drm/drmP.h>
|
||||||
#include "drm_legacy.h"
|
#include "drm_legacy.h"
|
||||||
|
|
||||||
|
#include <linux/nospec.h>
|
||||||
|
|
||||||
static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
|
static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
|
||||||
struct drm_local_map *map)
|
struct drm_local_map *map)
|
||||||
{
|
{
|
||||||
|
@ -1332,6 +1334,7 @@ int drm_legacy_freebufs(struct drm_device *dev, void *data,
|
||||||
idx, dma->buf_count - 1);
|
idx, dma->buf_count - 1);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
idx = array_index_nospec(idx, dma->buf_count);
|
||||||
buf = dma->buflist[idx];
|
buf = dma->buflist[idx];
|
||||||
if (buf->file_priv != file_priv) {
|
if (buf->file_priv != file_priv) {
|
||||||
DRM_ERROR("Process %d freeing buffer not owned\n",
|
DRM_ERROR("Process %d freeing buffer not owned\n",
|
||||||
|
|
|
@ -722,7 +722,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode)
|
||||||
if (mode->hsync)
|
if (mode->hsync)
|
||||||
return mode->hsync;
|
return mode->hsync;
|
||||||
|
|
||||||
if (mode->htotal < 0)
|
if (mode->htotal <= 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
|
calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
|
||||||
|
|
|
@ -594,13 +594,16 @@ out_fixup:
|
||||||
static int vmw_dma_masks(struct vmw_private *dev_priv)
|
static int vmw_dma_masks(struct vmw_private *dev_priv)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = dev_priv->dev;
|
struct drm_device *dev = dev_priv->dev;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
if (intel_iommu_enabled &&
|
ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
|
||||||
|
if (dev_priv->map_mode != vmw_dma_phys &&
|
||||||
(sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
|
(sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
|
||||||
DRM_INFO("Restricting DMA addresses to 44 bits.\n");
|
DRM_INFO("Restricting DMA addresses to 44 bits.\n");
|
||||||
return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
|
return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
|
||||||
}
|
}
|
||||||
return 0;
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static int vmw_dma_masks(struct vmw_private *dev_priv)
|
static int vmw_dma_masks(struct vmw_private *dev_priv)
|
||||||
|
|
|
@ -3663,7 +3663,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
|
||||||
*p_fence = NULL;
|
*p_fence = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
|
|
||||||
#include <linux/debugfs.h>
|
#include <linux/debugfs.h>
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
|
#include <linux/kfifo.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
@ -455,7 +456,7 @@ static char *resolv_usage_page(unsigned page, struct seq_file *f) {
|
||||||
char *buf = NULL;
|
char *buf = NULL;
|
||||||
|
|
||||||
if (!f) {
|
if (!f) {
|
||||||
buf = kzalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_ATOMIC);
|
buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_ATOMIC);
|
||||||
if (!buf)
|
if (!buf)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
|
@ -659,17 +660,12 @@ EXPORT_SYMBOL_GPL(hid_dump_device);
|
||||||
/* enqueue string to 'events' ring buffer */
|
/* enqueue string to 'events' ring buffer */
|
||||||
void hid_debug_event(struct hid_device *hdev, char *buf)
|
void hid_debug_event(struct hid_device *hdev, char *buf)
|
||||||
{
|
{
|
||||||
int i;
|
|
||||||
struct hid_debug_list *list;
|
struct hid_debug_list *list;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&hdev->debug_list_lock, flags);
|
spin_lock_irqsave(&hdev->debug_list_lock, flags);
|
||||||
list_for_each_entry(list, &hdev->debug_list, node) {
|
list_for_each_entry(list, &hdev->debug_list, node)
|
||||||
for (i = 0; i < strlen(buf); i++)
|
kfifo_in(&list->hid_debug_fifo, buf, strlen(buf));
|
||||||
list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] =
|
|
||||||
buf[i];
|
|
||||||
list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&hdev->debug_list_lock, flags);
|
spin_unlock_irqrestore(&hdev->debug_list_lock, flags);
|
||||||
|
|
||||||
wake_up_interruptible(&hdev->debug_wait);
|
wake_up_interruptible(&hdev->debug_wait);
|
||||||
|
@ -720,8 +716,7 @@ void hid_dump_input(struct hid_device *hdev, struct hid_usage *usage, __s32 valu
|
||||||
hid_debug_event(hdev, buf);
|
hid_debug_event(hdev, buf);
|
||||||
|
|
||||||
kfree(buf);
|
kfree(buf);
|
||||||
wake_up_interruptible(&hdev->debug_wait);
|
wake_up_interruptible(&hdev->debug_wait);
|
||||||
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(hid_dump_input);
|
EXPORT_SYMBOL_GPL(hid_dump_input);
|
||||||
|
|
||||||
|
@ -1086,8 +1081,8 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(list->hid_debug_buf = kzalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_KERNEL))) {
|
err = kfifo_alloc(&list->hid_debug_fifo, HID_DEBUG_FIFOSIZE, GFP_KERNEL);
|
||||||
err = -ENOMEM;
|
if (err) {
|
||||||
kfree(list);
|
kfree(list);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -1107,77 +1102,57 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer,
|
||||||
size_t count, loff_t *ppos)
|
size_t count, loff_t *ppos)
|
||||||
{
|
{
|
||||||
struct hid_debug_list *list = file->private_data;
|
struct hid_debug_list *list = file->private_data;
|
||||||
int ret = 0, len;
|
int ret = 0, copied;
|
||||||
DECLARE_WAITQUEUE(wait, current);
|
DECLARE_WAITQUEUE(wait, current);
|
||||||
|
|
||||||
mutex_lock(&list->read_mutex);
|
mutex_lock(&list->read_mutex);
|
||||||
while (ret == 0) {
|
if (kfifo_is_empty(&list->hid_debug_fifo)) {
|
||||||
if (list->head == list->tail) {
|
add_wait_queue(&list->hdev->debug_wait, &wait);
|
||||||
add_wait_queue(&list->hdev->debug_wait, &wait);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
|
||||||
|
|
||||||
while (list->head == list->tail) {
|
while (kfifo_is_empty(&list->hid_debug_fifo)) {
|
||||||
if (file->f_flags & O_NONBLOCK) {
|
if (file->f_flags & O_NONBLOCK) {
|
||||||
ret = -EAGAIN;
|
ret = -EAGAIN;
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
if (signal_pending(current)) {
|
|
||||||
ret = -ERESTARTSYS;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!list->hdev || !list->hdev->debug) {
|
|
||||||
ret = -EIO;
|
|
||||||
set_current_state(TASK_RUNNING);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* allow O_NONBLOCK from other threads */
|
|
||||||
mutex_unlock(&list->read_mutex);
|
|
||||||
schedule();
|
|
||||||
mutex_lock(&list->read_mutex);
|
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
set_current_state(TASK_RUNNING);
|
if (signal_pending(current)) {
|
||||||
remove_wait_queue(&list->hdev->debug_wait, &wait);
|
ret = -ERESTARTSYS;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* if list->hdev is NULL we cannot remove_wait_queue().
|
||||||
|
* if list->hdev->debug is 0 then hid_debug_unregister()
|
||||||
|
* was already called and list->hdev is being destroyed.
|
||||||
|
* if we add remove_wait_queue() here we can hit a race.
|
||||||
|
*/
|
||||||
|
if (!list->hdev || !list->hdev->debug) {
|
||||||
|
ret = -EIO;
|
||||||
|
set_current_state(TASK_RUNNING);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* allow O_NONBLOCK from other threads */
|
||||||
|
mutex_unlock(&list->read_mutex);
|
||||||
|
schedule();
|
||||||
|
mutex_lock(&list->read_mutex);
|
||||||
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
__set_current_state(TASK_RUNNING);
|
||||||
|
remove_wait_queue(&list->hdev->debug_wait, &wait);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/* pass the ringbuffer contents to userspace */
|
|
||||||
copy_rest:
|
|
||||||
if (list->tail == list->head)
|
|
||||||
goto out;
|
|
||||||
if (list->tail > list->head) {
|
|
||||||
len = list->tail - list->head;
|
|
||||||
if (len > count)
|
|
||||||
len = count;
|
|
||||||
|
|
||||||
if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
|
|
||||||
ret = -EFAULT;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
ret += len;
|
|
||||||
list->head += len;
|
|
||||||
} else {
|
|
||||||
len = HID_DEBUG_BUFSIZE - list->head;
|
|
||||||
if (len > count)
|
|
||||||
len = count;
|
|
||||||
|
|
||||||
if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
|
|
||||||
ret = -EFAULT;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
list->head = 0;
|
|
||||||
ret += len;
|
|
||||||
count -= len;
|
|
||||||
if (count > 0)
|
|
||||||
goto copy_rest;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* pass the fifo content to userspace, locking is not needed with only
|
||||||
|
* one concurrent reader and one concurrent writer
|
||||||
|
*/
|
||||||
|
ret = kfifo_to_user(&list->hid_debug_fifo, buffer, count, &copied);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
ret = copied;
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&list->read_mutex);
|
mutex_unlock(&list->read_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1188,7 +1163,7 @@ static unsigned int hid_debug_events_poll(struct file *file, poll_table *wait)
|
||||||
struct hid_debug_list *list = file->private_data;
|
struct hid_debug_list *list = file->private_data;
|
||||||
|
|
||||||
poll_wait(file, &list->hdev->debug_wait, wait);
|
poll_wait(file, &list->hdev->debug_wait, wait);
|
||||||
if (list->head != list->tail)
|
if (!kfifo_is_empty(&list->hid_debug_fifo))
|
||||||
return POLLIN | POLLRDNORM;
|
return POLLIN | POLLRDNORM;
|
||||||
if (!list->hdev->debug)
|
if (!list->hdev->debug)
|
||||||
return POLLERR | POLLHUP;
|
return POLLERR | POLLHUP;
|
||||||
|
@ -1203,7 +1178,7 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
|
||||||
spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
|
spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
|
||||||
list_del(&list->node);
|
list_del(&list->node);
|
||||||
spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
|
spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
|
||||||
kfree(list->hid_debug_buf);
|
kfifo_free(&list->hid_debug_fifo);
|
||||||
kfree(list);
|
kfree(list);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1254,4 +1229,3 @@ void hid_debug_exit(void)
|
||||||
{
|
{
|
||||||
debugfs_remove_recursive(hid_debug_root);
|
debugfs_remove_recursive(hid_debug_root);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -703,7 +703,9 @@ static int lenovo_probe_tpkbd(struct hid_device *hdev)
|
||||||
data_pointer->led_mute.brightness_get = lenovo_led_brightness_get_tpkbd;
|
data_pointer->led_mute.brightness_get = lenovo_led_brightness_get_tpkbd;
|
||||||
data_pointer->led_mute.brightness_set = lenovo_led_brightness_set_tpkbd;
|
data_pointer->led_mute.brightness_set = lenovo_led_brightness_set_tpkbd;
|
||||||
data_pointer->led_mute.dev = dev;
|
data_pointer->led_mute.dev = dev;
|
||||||
led_classdev_register(dev, &data_pointer->led_mute);
|
ret = led_classdev_register(dev, &data_pointer->led_mute);
|
||||||
|
if (ret < 0)
|
||||||
|
goto err;
|
||||||
|
|
||||||
data_pointer->led_micmute.name = name_micmute;
|
data_pointer->led_micmute.name = name_micmute;
|
||||||
data_pointer->led_micmute.brightness_get =
|
data_pointer->led_micmute.brightness_get =
|
||||||
|
@ -711,7 +713,11 @@ static int lenovo_probe_tpkbd(struct hid_device *hdev)
|
||||||
data_pointer->led_micmute.brightness_set =
|
data_pointer->led_micmute.brightness_set =
|
||||||
lenovo_led_brightness_set_tpkbd;
|
lenovo_led_brightness_set_tpkbd;
|
||||||
data_pointer->led_micmute.dev = dev;
|
data_pointer->led_micmute.dev = dev;
|
||||||
led_classdev_register(dev, &data_pointer->led_micmute);
|
ret = led_classdev_register(dev, &data_pointer->led_micmute);
|
||||||
|
if (ret < 0) {
|
||||||
|
led_classdev_unregister(&data_pointer->led_mute);
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
lenovo_features_set_tpkbd(hdev);
|
lenovo_features_set_tpkbd(hdev);
|
||||||
|
|
||||||
|
|
|
@ -360,9 +360,11 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
|
||||||
struct i2c_client *client = data->client;
|
struct i2c_client *client = data->client;
|
||||||
unsigned long min, val;
|
unsigned long min, val;
|
||||||
u8 reg;
|
u8 reg;
|
||||||
int err = kstrtoul(buf, 10, &val);
|
int rv;
|
||||||
if (err < 0)
|
|
||||||
return err;
|
rv = kstrtoul(buf, 10, &val);
|
||||||
|
if (rv < 0)
|
||||||
|
return rv;
|
||||||
|
|
||||||
/* Save fan_min */
|
/* Save fan_min */
|
||||||
mutex_lock(&data->update_lock);
|
mutex_lock(&data->update_lock);
|
||||||
|
@ -390,8 +392,11 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
reg = (lm80_read_value(client, LM80_REG_FANDIV) &
|
rv = lm80_read_value(client, LM80_REG_FANDIV);
|
||||||
~(3 << (2 * (nr + 1)))) | (data->fan_div[nr] << (2 * (nr + 1)));
|
if (rv < 0)
|
||||||
|
return rv;
|
||||||
|
reg = (rv & ~(3 << (2 * (nr + 1))))
|
||||||
|
| (data->fan_div[nr] << (2 * (nr + 1)));
|
||||||
lm80_write_value(client, LM80_REG_FANDIV, reg);
|
lm80_write_value(client, LM80_REG_FANDIV, reg);
|
||||||
|
|
||||||
/* Restore fan_min */
|
/* Restore fan_min */
|
||||||
|
@ -623,6 +628,7 @@ static int lm80_probe(struct i2c_client *client,
|
||||||
struct device *dev = &client->dev;
|
struct device *dev = &client->dev;
|
||||||
struct device *hwmon_dev;
|
struct device *hwmon_dev;
|
||||||
struct lm80_data *data;
|
struct lm80_data *data;
|
||||||
|
int rv;
|
||||||
|
|
||||||
data = devm_kzalloc(dev, sizeof(struct lm80_data), GFP_KERNEL);
|
data = devm_kzalloc(dev, sizeof(struct lm80_data), GFP_KERNEL);
|
||||||
if (!data)
|
if (!data)
|
||||||
|
@ -635,8 +641,14 @@ static int lm80_probe(struct i2c_client *client,
|
||||||
lm80_init_client(client);
|
lm80_init_client(client);
|
||||||
|
|
||||||
/* A few vars need to be filled upon startup */
|
/* A few vars need to be filled upon startup */
|
||||||
data->fan[f_min][0] = lm80_read_value(client, LM80_REG_FAN_MIN(1));
|
rv = lm80_read_value(client, LM80_REG_FAN_MIN(1));
|
||||||
data->fan[f_min][1] = lm80_read_value(client, LM80_REG_FAN_MIN(2));
|
if (rv < 0)
|
||||||
|
return rv;
|
||||||
|
data->fan[f_min][0] = rv;
|
||||||
|
rv = lm80_read_value(client, LM80_REG_FAN_MIN(2));
|
||||||
|
if (rv < 0)
|
||||||
|
return rv;
|
||||||
|
data->fan[f_min][1] = rv;
|
||||||
|
|
||||||
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
|
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
|
||||||
data, lm80_groups);
|
data, lm80_groups);
|
||||||
|
|
|
@ -296,22 +296,7 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev)
|
||||||
i2c_int_disable(idev, MST_STATUS_TFL);
|
i2c_int_disable(idev, MST_STATUS_TFL);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (status & MST_STATUS_SCC) {
|
if (unlikely(status & MST_STATUS_ERR)) {
|
||||||
/* Stop completed */
|
|
||||||
i2c_int_disable(idev, ~MST_STATUS_TSS);
|
|
||||||
complete(&idev->msg_complete);
|
|
||||||
} else if (status & MST_STATUS_SNS) {
|
|
||||||
/* Transfer done */
|
|
||||||
i2c_int_disable(idev, ~MST_STATUS_TSS);
|
|
||||||
if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len)
|
|
||||||
axxia_i2c_empty_rx_fifo(idev);
|
|
||||||
complete(&idev->msg_complete);
|
|
||||||
} else if (status & MST_STATUS_TSS) {
|
|
||||||
/* Transfer timeout */
|
|
||||||
idev->msg_err = -ETIMEDOUT;
|
|
||||||
i2c_int_disable(idev, ~MST_STATUS_TSS);
|
|
||||||
complete(&idev->msg_complete);
|
|
||||||
} else if (unlikely(status & MST_STATUS_ERR)) {
|
|
||||||
/* Transfer error */
|
/* Transfer error */
|
||||||
i2c_int_disable(idev, ~0);
|
i2c_int_disable(idev, ~0);
|
||||||
if (status & MST_STATUS_AL)
|
if (status & MST_STATUS_AL)
|
||||||
|
@ -328,6 +313,21 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev)
|
||||||
readl(idev->base + MST_TX_BYTES_XFRD),
|
readl(idev->base + MST_TX_BYTES_XFRD),
|
||||||
readl(idev->base + MST_TX_XFER));
|
readl(idev->base + MST_TX_XFER));
|
||||||
complete(&idev->msg_complete);
|
complete(&idev->msg_complete);
|
||||||
|
} else if (status & MST_STATUS_SCC) {
|
||||||
|
/* Stop completed */
|
||||||
|
i2c_int_disable(idev, ~MST_STATUS_TSS);
|
||||||
|
complete(&idev->msg_complete);
|
||||||
|
} else if (status & MST_STATUS_SNS) {
|
||||||
|
/* Transfer done */
|
||||||
|
i2c_int_disable(idev, ~MST_STATUS_TSS);
|
||||||
|
if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len)
|
||||||
|
axxia_i2c_empty_rx_fifo(idev);
|
||||||
|
complete(&idev->msg_complete);
|
||||||
|
} else if (status & MST_STATUS_TSS) {
|
||||||
|
/* Transfer timeout */
|
||||||
|
idev->msg_err = -ETIMEDOUT;
|
||||||
|
i2c_int_disable(idev, ~MST_STATUS_TSS);
|
||||||
|
complete(&idev->msg_complete);
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
|
|
@ -482,13 +482,14 @@ static int bma150_register_input_device(struct bma150_data *bma150)
|
||||||
idev->close = bma150_irq_close;
|
idev->close = bma150_irq_close;
|
||||||
input_set_drvdata(idev, bma150);
|
input_set_drvdata(idev, bma150);
|
||||||
|
|
||||||
|
bma150->input = idev;
|
||||||
|
|
||||||
error = input_register_device(idev);
|
error = input_register_device(idev);
|
||||||
if (error) {
|
if (error) {
|
||||||
input_free_device(idev);
|
input_free_device(idev);
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
bma150->input = idev;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -511,15 +512,15 @@ static int bma150_register_polled_device(struct bma150_data *bma150)
|
||||||
|
|
||||||
bma150_init_input_device(bma150, ipoll_dev->input);
|
bma150_init_input_device(bma150, ipoll_dev->input);
|
||||||
|
|
||||||
|
bma150->input_polled = ipoll_dev;
|
||||||
|
bma150->input = ipoll_dev->input;
|
||||||
|
|
||||||
error = input_register_polled_device(ipoll_dev);
|
error = input_register_polled_device(ipoll_dev);
|
||||||
if (error) {
|
if (error) {
|
||||||
input_free_polled_device(ipoll_dev);
|
input_free_polled_device(ipoll_dev);
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
bma150->input_polled = ipoll_dev;
|
|
||||||
bma150->input = ipoll_dev->input;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1240,7 +1240,6 @@ MODULE_DEVICE_TABLE(i2c, elan_id);
|
||||||
static const struct acpi_device_id elan_acpi_id[] = {
|
static const struct acpi_device_id elan_acpi_id[] = {
|
||||||
{ "ELAN0000", 0 },
|
{ "ELAN0000", 0 },
|
||||||
{ "ELAN0100", 0 },
|
{ "ELAN0100", 0 },
|
||||||
{ "ELAN0501", 0 },
|
|
||||||
{ "ELAN0600", 0 },
|
{ "ELAN0600", 0 },
|
||||||
{ "ELAN0602", 0 },
|
{ "ELAN0602", 0 },
|
||||||
{ "ELAN0605", 0 },
|
{ "ELAN0605", 0 },
|
||||||
|
@ -1251,6 +1250,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
|
||||||
{ "ELAN060C", 0 },
|
{ "ELAN060C", 0 },
|
||||||
{ "ELAN0611", 0 },
|
{ "ELAN0611", 0 },
|
||||||
{ "ELAN0612", 0 },
|
{ "ELAN0612", 0 },
|
||||||
|
{ "ELAN0617", 0 },
|
||||||
{ "ELAN0618", 0 },
|
{ "ELAN0618", 0 },
|
||||||
{ "ELAN061C", 0 },
|
{ "ELAN061C", 0 },
|
||||||
{ "ELAN061D", 0 },
|
{ "ELAN061D", 0 },
|
||||||
|
|
|
@ -1121,6 +1121,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
|
||||||
* Asus UX31 0x361f00 20, 15, 0e clickpad
|
* Asus UX31 0x361f00 20, 15, 0e clickpad
|
||||||
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
|
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
|
||||||
* Avatar AVIU-145A2 0x361f00 ? clickpad
|
* Avatar AVIU-145A2 0x361f00 ? clickpad
|
||||||
|
* Fujitsu CELSIUS H760 0x570f02 40, 14, 0c 3 hw buttons (**)
|
||||||
|
* Fujitsu CELSIUS H780 0x5d0f02 41, 16, 0d 3 hw buttons (**)
|
||||||
* Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
|
* Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
|
||||||
* Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
|
* Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
|
||||||
* Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
|
* Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
|
||||||
|
@ -1173,6 +1175,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
|
||||||
DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
|
DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
/* Fujitsu H780 also has a middle button */
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
|
||||||
|
DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H780"),
|
||||||
|
},
|
||||||
|
},
|
||||||
#endif
|
#endif
|
||||||
{ }
|
{ }
|
||||||
};
|
};
|
||||||
|
|
|
@ -683,7 +683,13 @@ static void queue_inc_cons(struct arm_smmu_queue *q)
|
||||||
u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
|
u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
|
||||||
|
|
||||||
q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
|
q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
|
||||||
writel(q->cons, q->cons_reg);
|
|
||||||
|
/*
|
||||||
|
* Ensure that all CPU accesses (reads and writes) to the queue
|
||||||
|
* are complete before we update the cons pointer.
|
||||||
|
*/
|
||||||
|
mb();
|
||||||
|
writel_relaxed(q->cons, q->cons_reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int queue_sync_prod(struct arm_smmu_queue *q)
|
static int queue_sync_prod(struct arm_smmu_queue *q)
|
||||||
|
|
|
@ -1169,11 +1169,13 @@ HFCPCI_l1hw(struct PStack *st, int pr, void *arg)
|
||||||
if (cs->debug & L1_DEB_LAPD)
|
if (cs->debug & L1_DEB_LAPD)
|
||||||
debugl1(cs, "-> PH_REQUEST_PULL");
|
debugl1(cs, "-> PH_REQUEST_PULL");
|
||||||
#endif
|
#endif
|
||||||
|
spin_lock_irqsave(&cs->lock, flags);
|
||||||
if (!cs->tx_skb) {
|
if (!cs->tx_skb) {
|
||||||
test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
|
test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
|
||||||
st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
|
st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
|
||||||
} else
|
} else
|
||||||
test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
|
test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
|
||||||
|
spin_unlock_irqrestore(&cs->lock, flags);
|
||||||
break;
|
break;
|
||||||
case (HW_RESET | REQUEST):
|
case (HW_RESET | REQUEST):
|
||||||
spin_lock_irqsave(&cs->lock, flags);
|
spin_lock_irqsave(&cs->lock, flags);
|
||||||
|
|
|
@ -256,6 +256,7 @@ struct pool {
|
||||||
|
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
struct bio_list deferred_flush_bios;
|
struct bio_list deferred_flush_bios;
|
||||||
|
struct bio_list deferred_flush_completions;
|
||||||
struct list_head prepared_mappings;
|
struct list_head prepared_mappings;
|
||||||
struct list_head prepared_discards;
|
struct list_head prepared_discards;
|
||||||
struct list_head active_thins;
|
struct list_head active_thins;
|
||||||
|
@ -920,6 +921,39 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
|
||||||
mempool_free(m, m->tc->pool->mapping_pool);
|
mempool_free(m, m->tc->pool->mapping_pool);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
|
||||||
|
{
|
||||||
|
struct pool *pool = tc->pool;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the bio has the REQ_FUA flag set we must commit the metadata
|
||||||
|
* before signaling its completion.
|
||||||
|
*/
|
||||||
|
if (!bio_triggers_commit(tc, bio)) {
|
||||||
|
bio_endio(bio);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Complete bio with an error if earlier I/O caused changes to the
|
||||||
|
* metadata that can't be committed, e.g, due to I/O errors on the
|
||||||
|
* metadata device.
|
||||||
|
*/
|
||||||
|
if (dm_thin_aborted_changes(tc->td)) {
|
||||||
|
bio_io_error(bio);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Batch together any bios that trigger commits and then issue a
|
||||||
|
* single commit for them in process_deferred_bios().
|
||||||
|
*/
|
||||||
|
spin_lock_irqsave(&pool->lock, flags);
|
||||||
|
bio_list_add(&pool->deferred_flush_completions, bio);
|
||||||
|
spin_unlock_irqrestore(&pool->lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
|
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
|
||||||
{
|
{
|
||||||
struct thin_c *tc = m->tc;
|
struct thin_c *tc = m->tc;
|
||||||
|
@ -952,7 +986,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
|
||||||
*/
|
*/
|
||||||
if (bio) {
|
if (bio) {
|
||||||
inc_remap_and_issue_cell(tc, m->cell, m->data_block);
|
inc_remap_and_issue_cell(tc, m->cell, m->data_block);
|
||||||
bio_endio(bio);
|
complete_overwrite_bio(tc, bio);
|
||||||
} else {
|
} else {
|
||||||
inc_all_io_entry(tc->pool, m->cell->holder);
|
inc_all_io_entry(tc->pool, m->cell->holder);
|
||||||
remap_and_issue(tc, m->cell->holder, m->data_block);
|
remap_and_issue(tc, m->cell->holder, m->data_block);
|
||||||
|
@ -2228,7 +2262,7 @@ static void process_deferred_bios(struct pool *pool)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
struct bio_list bios;
|
struct bio_list bios, bio_completions;
|
||||||
struct thin_c *tc;
|
struct thin_c *tc;
|
||||||
|
|
||||||
tc = get_first_thin(pool);
|
tc = get_first_thin(pool);
|
||||||
|
@ -2239,26 +2273,36 @@ static void process_deferred_bios(struct pool *pool)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If there are any deferred flush bios, we must commit
|
* If there are any deferred flush bios, we must commit the metadata
|
||||||
* the metadata before issuing them.
|
* before issuing them or signaling their completion.
|
||||||
*/
|
*/
|
||||||
bio_list_init(&bios);
|
bio_list_init(&bios);
|
||||||
|
bio_list_init(&bio_completions);
|
||||||
|
|
||||||
spin_lock_irqsave(&pool->lock, flags);
|
spin_lock_irqsave(&pool->lock, flags);
|
||||||
bio_list_merge(&bios, &pool->deferred_flush_bios);
|
bio_list_merge(&bios, &pool->deferred_flush_bios);
|
||||||
bio_list_init(&pool->deferred_flush_bios);
|
bio_list_init(&pool->deferred_flush_bios);
|
||||||
|
|
||||||
|
bio_list_merge(&bio_completions, &pool->deferred_flush_completions);
|
||||||
|
bio_list_init(&pool->deferred_flush_completions);
|
||||||
spin_unlock_irqrestore(&pool->lock, flags);
|
spin_unlock_irqrestore(&pool->lock, flags);
|
||||||
|
|
||||||
if (bio_list_empty(&bios) &&
|
if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
|
||||||
!(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
|
!(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (commit(pool)) {
|
if (commit(pool)) {
|
||||||
|
bio_list_merge(&bios, &bio_completions);
|
||||||
|
|
||||||
while ((bio = bio_list_pop(&bios)))
|
while ((bio = bio_list_pop(&bios)))
|
||||||
bio_io_error(bio);
|
bio_io_error(bio);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
pool->last_commit_jiffies = jiffies;
|
pool->last_commit_jiffies = jiffies;
|
||||||
|
|
||||||
|
while ((bio = bio_list_pop(&bio_completions)))
|
||||||
|
bio_endio(bio);
|
||||||
|
|
||||||
while ((bio = bio_list_pop(&bios)))
|
while ((bio = bio_list_pop(&bios)))
|
||||||
generic_make_request(bio);
|
generic_make_request(bio);
|
||||||
}
|
}
|
||||||
|
@ -2885,6 +2929,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
|
||||||
INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
|
INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
|
||||||
spin_lock_init(&pool->lock);
|
spin_lock_init(&pool->lock);
|
||||||
bio_list_init(&pool->deferred_flush_bios);
|
bio_list_init(&pool->deferred_flush_bios);
|
||||||
|
bio_list_init(&pool->deferred_flush_completions);
|
||||||
INIT_LIST_HEAD(&pool->prepared_mappings);
|
INIT_LIST_HEAD(&pool->prepared_mappings);
|
||||||
INIT_LIST_HEAD(&pool->prepared_discards);
|
INIT_LIST_HEAD(&pool->prepared_discards);
|
||||||
INIT_LIST_HEAD(&pool->active_thins);
|
INIT_LIST_HEAD(&pool->active_thins);
|
||||||
|
|
|
@ -753,7 +753,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
|
||||||
if (ret) {
|
if (ret) {
|
||||||
v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default output %s",
|
v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default output %s",
|
||||||
def_output);
|
def_output);
|
||||||
return ret;
|
goto fail_kfree_amp;
|
||||||
}
|
}
|
||||||
|
|
||||||
printk(KERN_NOTICE "Setting default mode to %s\n", def_mode);
|
printk(KERN_NOTICE "Setting default mode to %s\n", def_mode);
|
||||||
|
@ -761,12 +761,15 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
|
||||||
if (ret) {
|
if (ret) {
|
||||||
v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default mode %s",
|
v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default mode %s",
|
||||||
def_mode);
|
def_mode);
|
||||||
return ret;
|
goto fail_kfree_amp;
|
||||||
}
|
}
|
||||||
vpbe_dev->initialized = 1;
|
vpbe_dev->initialized = 1;
|
||||||
/* TBD handling of bootargs for default output and mode */
|
/* TBD handling of bootargs for default output and mode */
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
fail_kfree_amp:
|
||||||
|
mutex_lock(&vpbe_dev->lock);
|
||||||
|
kfree(vpbe_dev->amp);
|
||||||
fail_kfree_encoders:
|
fail_kfree_encoders:
|
||||||
kfree(vpbe_dev->encoders);
|
kfree(vpbe_dev->encoders);
|
||||||
fail_dev_unregister:
|
fail_dev_unregister:
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
#include <linux/pm_runtime.h>
|
||||||
|
|
||||||
#define DRIVER_NAME "memstick"
|
#define DRIVER_NAME "memstick"
|
||||||
|
|
||||||
|
@ -436,6 +437,7 @@ static void memstick_check(struct work_struct *work)
|
||||||
struct memstick_dev *card;
|
struct memstick_dev *card;
|
||||||
|
|
||||||
dev_dbg(&host->dev, "memstick_check started\n");
|
dev_dbg(&host->dev, "memstick_check started\n");
|
||||||
|
pm_runtime_get_noresume(host->dev.parent);
|
||||||
mutex_lock(&host->lock);
|
mutex_lock(&host->lock);
|
||||||
if (!host->card) {
|
if (!host->card) {
|
||||||
if (memstick_power_on(host))
|
if (memstick_power_on(host))
|
||||||
|
@ -479,6 +481,7 @@ out_power_off:
|
||||||
host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF);
|
host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF);
|
||||||
|
|
||||||
mutex_unlock(&host->lock);
|
mutex_unlock(&host->lock);
|
||||||
|
pm_runtime_put(host->dev.parent);
|
||||||
dev_dbg(&host->dev, "memstick_check finished\n");
|
dev_dbg(&host->dev, "memstick_check finished\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -61,7 +61,7 @@ static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func,
|
||||||
int tries;
|
int tries;
|
||||||
long timeout;
|
long timeout;
|
||||||
|
|
||||||
if (WARN_ON(index > func->num_templates))
|
if (WARN_ON(index >= func->num_templates))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
command = readl(syscfg->base + SYS_CFGCTRL);
|
command = readl(syscfg->base + SYS_CFGCTRL);
|
||||||
|
|
|
@ -168,9 +168,10 @@ int gpmi_init(struct gpmi_nand_data *this)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reset BCH here, too. We got failures otherwise :(
|
* Reset BCH here, too. We got failures otherwise :(
|
||||||
* See later BCH reset for explanation of MX23 handling
|
* See later BCH reset for explanation of MX23 and MX28 handling
|
||||||
*/
|
*/
|
||||||
ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
|
ret = gpmi_reset_block(r->bch_regs,
|
||||||
|
GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
|
||||||
|
@ -274,13 +275,11 @@ int bch_set_geometry(struct gpmi_nand_data *this)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
|
* Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
|
||||||
* chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
|
* chip, otherwise it will lock up. So we skip resetting BCH on the MX23
|
||||||
* On the other hand, the MX28 needs the reset, because one case has been
|
* and MX28.
|
||||||
* seen where the BCH produced ECC errors constantly after 10000
|
|
||||||
* consecutive reboots. The latter case has not been seen on the MX23
|
|
||||||
* yet, still we don't know if it could happen there as well.
|
|
||||||
*/
|
*/
|
||||||
ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
|
ret = gpmi_reset_block(r->bch_regs,
|
||||||
|
GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
|
||||||
|
|
|
@ -400,7 +400,6 @@ static void bcm_sysport_get_wol(struct net_device *dev,
|
||||||
struct ethtool_wolinfo *wol)
|
struct ethtool_wolinfo *wol)
|
||||||
{
|
{
|
||||||
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
||||||
u32 reg;
|
|
||||||
|
|
||||||
wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
|
wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
|
||||||
wol->wolopts = priv->wolopts;
|
wol->wolopts = priv->wolopts;
|
||||||
|
@ -408,11 +407,7 @@ static void bcm_sysport_get_wol(struct net_device *dev,
|
||||||
if (!(priv->wolopts & WAKE_MAGICSECURE))
|
if (!(priv->wolopts & WAKE_MAGICSECURE))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Return the programmed SecureOn password */
|
memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
|
||||||
reg = umac_readl(priv, UMAC_PSW_MS);
|
|
||||||
put_unaligned_be16(reg, &wol->sopass[0]);
|
|
||||||
reg = umac_readl(priv, UMAC_PSW_LS);
|
|
||||||
put_unaligned_be32(reg, &wol->sopass[2]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int bcm_sysport_set_wol(struct net_device *dev,
|
static int bcm_sysport_set_wol(struct net_device *dev,
|
||||||
|
@ -428,13 +423,8 @@ static int bcm_sysport_set_wol(struct net_device *dev,
|
||||||
if (wol->wolopts & ~supported)
|
if (wol->wolopts & ~supported)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* Program the SecureOn password */
|
if (wol->wolopts & WAKE_MAGICSECURE)
|
||||||
if (wol->wolopts & WAKE_MAGICSECURE) {
|
memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
|
||||||
umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
|
|
||||||
UMAC_PSW_MS);
|
|
||||||
umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
|
|
||||||
UMAC_PSW_LS);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Flag the device and relevant IRQ as wakeup capable */
|
/* Flag the device and relevant IRQ as wakeup capable */
|
||||||
if (wol->wolopts) {
|
if (wol->wolopts) {
|
||||||
|
@ -1889,12 +1879,17 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
|
||||||
unsigned int timeout = 1000;
|
unsigned int timeout = 1000;
|
||||||
u32 reg;
|
u32 reg;
|
||||||
|
|
||||||
/* Password has already been programmed */
|
|
||||||
reg = umac_readl(priv, UMAC_MPD_CTRL);
|
reg = umac_readl(priv, UMAC_MPD_CTRL);
|
||||||
reg |= MPD_EN;
|
reg |= MPD_EN;
|
||||||
reg &= ~PSW_EN;
|
reg &= ~PSW_EN;
|
||||||
if (priv->wolopts & WAKE_MAGICSECURE)
|
if (priv->wolopts & WAKE_MAGICSECURE) {
|
||||||
|
/* Program the SecureOn password */
|
||||||
|
umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
|
||||||
|
UMAC_PSW_MS);
|
||||||
|
umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
|
||||||
|
UMAC_PSW_LS);
|
||||||
reg |= PSW_EN;
|
reg |= PSW_EN;
|
||||||
|
}
|
||||||
umac_writel(priv, reg, UMAC_MPD_CTRL);
|
umac_writel(priv, reg, UMAC_MPD_CTRL);
|
||||||
|
|
||||||
/* Make sure RBUF entered WoL mode as result */
|
/* Make sure RBUF entered WoL mode as result */
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
#ifndef __BCM_SYSPORT_H
|
#ifndef __BCM_SYSPORT_H
|
||||||
#define __BCM_SYSPORT_H
|
#define __BCM_SYSPORT_H
|
||||||
|
|
||||||
|
#include <linux/ethtool.h>
|
||||||
#include <linux/if_vlan.h>
|
#include <linux/if_vlan.h>
|
||||||
|
|
||||||
/* Receive/transmit descriptor format */
|
/* Receive/transmit descriptor format */
|
||||||
|
@ -682,6 +683,7 @@ struct bcm_sysport_priv {
|
||||||
unsigned int crc_fwd:1;
|
unsigned int crc_fwd:1;
|
||||||
u16 rev;
|
u16 rev;
|
||||||
u32 wolopts;
|
u32 wolopts;
|
||||||
|
u8 sopass[SOPASS_MAX];
|
||||||
unsigned int wol_irq_disabled:1;
|
unsigned int wol_irq_disabled:1;
|
||||||
|
|
||||||
/* MIB related fields */
|
/* MIB related fields */
|
||||||
|
|
|
@ -1180,7 +1180,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
|
||||||
* CHECSUM_UNNECESSARY.
|
* CHECSUM_UNNECESSARY.
|
||||||
*/
|
*/
|
||||||
if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok &&
|
if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok &&
|
||||||
ipv4_csum_ok)
|
(ipv4_csum_ok || ipv6))
|
||||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||||
|
|
||||||
if (vlan_stripped)
|
if (vlan_stripped)
|
||||||
|
|
|
@ -7339,9 +7339,11 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
#ifdef CONFIG_PM
|
||||||
retval = pci_save_state(pdev);
|
if (!runtime) {
|
||||||
if (retval)
|
retval = pci_save_state(pdev);
|
||||||
return retval;
|
if (retval)
|
||||||
|
return retval;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
status = rd32(E1000_STATUS);
|
status = rd32(E1000_STATUS);
|
||||||
|
|
|
@ -152,8 +152,10 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
|
||||||
memset(p, 0, regs->len);
|
memset(p, 0, regs->len);
|
||||||
memcpy_fromio(p, io, B3_RAM_ADDR);
|
memcpy_fromio(p, io, B3_RAM_ADDR);
|
||||||
|
|
||||||
memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
|
if (regs->len > B3_RI_WTO_R1) {
|
||||||
regs->len - B3_RI_WTO_R1);
|
memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
|
||||||
|
regs->len - B3_RI_WTO_R1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Wake on Lan only supported on Yukon chips with rev 1 or above */
|
/* Wake on Lan only supported on Yukon chips with rev 1 or above */
|
||||||
|
|
|
@ -8121,6 +8121,8 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
|
||||||
start += 3;
|
start += 3;
|
||||||
|
|
||||||
prop_len = niu_pci_eeprom_read(np, start + 4);
|
prop_len = niu_pci_eeprom_read(np, start + 4);
|
||||||
|
if (prop_len < 0)
|
||||||
|
return prop_len;
|
||||||
err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
|
err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
@ -8165,8 +8167,12 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
|
||||||
netif_printk(np, probe, KERN_DEBUG, np->dev,
|
netif_printk(np, probe, KERN_DEBUG, np->dev,
|
||||||
"VPD_SCAN: Reading in property [%s] len[%d]\n",
|
"VPD_SCAN: Reading in property [%s] len[%d]\n",
|
||||||
namebuf, prop_len);
|
namebuf, prop_len);
|
||||||
for (i = 0; i < prop_len; i++)
|
for (i = 0; i < prop_len; i++) {
|
||||||
*prop_buf++ = niu_pci_eeprom_read(np, off + i);
|
err = niu_pci_eeprom_read(np, off + i);
|
||||||
|
if (err >= 0)
|
||||||
|
*prop_buf = err;
|
||||||
|
++prop_buf;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
start += len;
|
start += len;
|
||||||
|
|
|
@ -890,14 +890,14 @@ static void decode_txts(struct dp83640_private *dp83640,
|
||||||
struct phy_txts *phy_txts)
|
struct phy_txts *phy_txts)
|
||||||
{
|
{
|
||||||
struct skb_shared_hwtstamps shhwtstamps;
|
struct skb_shared_hwtstamps shhwtstamps;
|
||||||
|
struct dp83640_skb_info *skb_info;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
u64 ns;
|
|
||||||
u8 overflow;
|
u8 overflow;
|
||||||
|
u64 ns;
|
||||||
|
|
||||||
/* We must already have the skb that triggered this. */
|
/* We must already have the skb that triggered this. */
|
||||||
|
again:
|
||||||
skb = skb_dequeue(&dp83640->tx_queue);
|
skb = skb_dequeue(&dp83640->tx_queue);
|
||||||
|
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
pr_debug("have timestamp but tx_queue empty\n");
|
pr_debug("have timestamp but tx_queue empty\n");
|
||||||
return;
|
return;
|
||||||
|
@ -912,6 +912,11 @@ static void decode_txts(struct dp83640_private *dp83640,
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
skb_info = (struct dp83640_skb_info *)skb->cb;
|
||||||
|
if (time_after(jiffies, skb_info->tmo)) {
|
||||||
|
kfree_skb(skb);
|
||||||
|
goto again;
|
||||||
|
}
|
||||||
|
|
||||||
ns = phy2txts(phy_txts);
|
ns = phy2txts(phy_txts);
|
||||||
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
|
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
|
||||||
|
@ -1461,6 +1466,7 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
|
||||||
static void dp83640_txtstamp(struct phy_device *phydev,
|
static void dp83640_txtstamp(struct phy_device *phydev,
|
||||||
struct sk_buff *skb, int type)
|
struct sk_buff *skb, int type)
|
||||||
{
|
{
|
||||||
|
struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb;
|
||||||
struct dp83640_private *dp83640 = phydev->priv;
|
struct dp83640_private *dp83640 = phydev->priv;
|
||||||
|
|
||||||
switch (dp83640->hwts_tx_en) {
|
switch (dp83640->hwts_tx_en) {
|
||||||
|
@ -1473,6 +1479,7 @@ static void dp83640_txtstamp(struct phy_device *phydev,
|
||||||
/* fall through */
|
/* fall through */
|
||||||
case HWTSTAMP_TX_ON:
|
case HWTSTAMP_TX_ON:
|
||||||
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
||||||
|
skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
|
||||||
skb_queue_tail(&dp83640->tx_queue, skb);
|
skb_queue_tail(&dp83640->tx_queue, skb);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|
|
@ -255,14 +255,9 @@ static struct sk_buff *ch9200_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
|
||||||
tx_overhead = 0x40;
|
tx_overhead = 0x40;
|
||||||
|
|
||||||
len = skb->len;
|
len = skb->len;
|
||||||
if (skb_headroom(skb) < tx_overhead) {
|
if (skb_cow_head(skb, tx_overhead)) {
|
||||||
struct sk_buff *skb2;
|
|
||||||
|
|
||||||
skb2 = skb_copy_expand(skb, tx_overhead, 0, flags);
|
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
skb = skb2;
|
return NULL;
|
||||||
if (!skb)
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
__skb_push(skb, tx_overhead);
|
__skb_push(skb, tx_overhead);
|
||||||
|
|
|
@ -812,18 +812,12 @@ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We now decide whether we can put our special header into the sk_buff */
|
/* We now decide whether we can put our special header into the sk_buff */
|
||||||
if (skb_cloned(skb) || skb_headroom(skb) < 2) {
|
if (skb_cow_head(skb, 2)) {
|
||||||
/* no such luck - we make our own */
|
kaweth->stats.tx_errors++;
|
||||||
struct sk_buff *copied_skb;
|
netif_start_queue(net);
|
||||||
copied_skb = skb_copy_expand(skb, 2, 0, GFP_ATOMIC);
|
spin_unlock_irq(&kaweth->device_lock);
|
||||||
dev_kfree_skb_irq(skb);
|
dev_kfree_skb_any(skb);
|
||||||
skb = copied_skb;
|
return NETDEV_TX_OK;
|
||||||
if (!copied_skb) {
|
|
||||||
kaweth->stats.tx_errors++;
|
|
||||||
netif_start_queue(net);
|
|
||||||
spin_unlock_irq(&kaweth->device_lock);
|
|
||||||
return NETDEV_TX_OK;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private_header = (__le16 *)__skb_push(skb, 2);
|
private_header = (__le16 *)__skb_push(skb, 2);
|
||||||
|
|
|
@ -1838,13 +1838,13 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
|
||||||
/* We do not advertise SG, so skbs should be already linearized */
|
/* We do not advertise SG, so skbs should be already linearized */
|
||||||
BUG_ON(skb_shinfo(skb)->nr_frags);
|
BUG_ON(skb_shinfo(skb)->nr_frags);
|
||||||
|
|
||||||
if (skb_headroom(skb) < overhead) {
|
/* Make writable and expand header space by overhead if required */
|
||||||
struct sk_buff *skb2 = skb_copy_expand(skb,
|
if (skb_cow_head(skb, overhead)) {
|
||||||
overhead, 0, flags);
|
/* Must deallocate here as returning NULL to indicate error
|
||||||
|
* means the skb won't be deallocated in the caller.
|
||||||
|
*/
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
skb = skb2;
|
return NULL;
|
||||||
if (!skb)
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (csum) {
|
if (csum) {
|
||||||
|
|
|
@ -78,6 +78,10 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
|
||||||
if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS)
|
if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* will be unlocked in cw1200_scan_work() */
|
||||||
|
down(&priv->scan.lock);
|
||||||
|
mutex_lock(&priv->conf_mutex);
|
||||||
|
|
||||||
frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0,
|
frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0,
|
||||||
req->ie_len);
|
req->ie_len);
|
||||||
if (!frame.skb)
|
if (!frame.skb)
|
||||||
|
@ -86,19 +90,15 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
|
||||||
if (req->ie_len)
|
if (req->ie_len)
|
||||||
memcpy(skb_put(frame.skb, req->ie_len), req->ie, req->ie_len);
|
memcpy(skb_put(frame.skb, req->ie_len), req->ie, req->ie_len);
|
||||||
|
|
||||||
/* will be unlocked in cw1200_scan_work() */
|
|
||||||
down(&priv->scan.lock);
|
|
||||||
mutex_lock(&priv->conf_mutex);
|
|
||||||
|
|
||||||
ret = wsm_set_template_frame(priv, &frame);
|
ret = wsm_set_template_frame(priv, &frame);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
/* Host want to be the probe responder. */
|
/* Host want to be the probe responder. */
|
||||||
ret = wsm_set_probe_responder(priv, true);
|
ret = wsm_set_probe_responder(priv, true);
|
||||||
}
|
}
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
dev_kfree_skb(frame.skb);
|
||||||
mutex_unlock(&priv->conf_mutex);
|
mutex_unlock(&priv->conf_mutex);
|
||||||
up(&priv->scan.lock);
|
up(&priv->scan.lock);
|
||||||
dev_kfree_skb(frame.skb);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -120,10 +120,9 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
|
||||||
++priv->scan.n_ssids;
|
++priv->scan.n_ssids;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&priv->conf_mutex);
|
|
||||||
|
|
||||||
if (frame.skb)
|
if (frame.skb)
|
||||||
dev_kfree_skb(frame.skb);
|
dev_kfree_skb(frame.skb);
|
||||||
|
mutex_unlock(&priv->conf_mutex);
|
||||||
queue_work(priv->workqueue, &priv->scan.work);
|
queue_work(priv->workqueue, &priv->scan.work);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,7 +24,7 @@
|
||||||
#include <linux/completion.h>
|
#include <linux/completion.h>
|
||||||
#include <linux/firmware.h>
|
#include <linux/firmware.h>
|
||||||
#include <linux/nfc.h>
|
#include <linux/nfc.h>
|
||||||
#include <linux/unaligned/access_ok.h>
|
#include <asm/unaligned.h>
|
||||||
|
|
||||||
#include "nxp-nci.h"
|
#include "nxp-nci.h"
|
||||||
|
|
||||||
|
|
|
@ -36,7 +36,7 @@
|
||||||
#include <linux/of_gpio.h>
|
#include <linux/of_gpio.h>
|
||||||
#include <linux/of_irq.h>
|
#include <linux/of_irq.h>
|
||||||
#include <linux/platform_data/nxp-nci.h>
|
#include <linux/platform_data/nxp-nci.h>
|
||||||
#include <linux/unaligned/access_ok.h>
|
#include <asm/unaligned.h>
|
||||||
|
|
||||||
#include <net/nfc/nfc.h>
|
#include <net/nfc/nfc.h>
|
||||||
|
|
||||||
|
|
|
@ -806,11 +806,24 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev), 0, 0, chip->ngpio);
|
/*
|
||||||
if (ret) {
|
* For DeviceTree-supported systems, the gpio core checks the
|
||||||
dev_err(pctrl->dev, "Failed to add pin range\n");
|
* pinctrl's device node for the "gpio-ranges" property.
|
||||||
gpiochip_remove(&pctrl->chip);
|
* If it is present, it takes care of adding the pin ranges
|
||||||
return ret;
|
* for the driver. In this case the driver can skip ahead.
|
||||||
|
*
|
||||||
|
* In order to remain compatible with older, existing DeviceTree
|
||||||
|
* files which don't set the "gpio-ranges" property or systems that
|
||||||
|
* utilize ACPI the driver has to call gpiochip_add_pin_range().
|
||||||
|
*/
|
||||||
|
if (!of_property_read_bool(pctrl->dev->of_node, "gpio-ranges")) {
|
||||||
|
ret = gpiochip_add_pin_range(&pctrl->chip,
|
||||||
|
dev_name(pctrl->dev), 0, 0, chip->ngpio);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(pctrl->dev, "Failed to add pin range\n");
|
||||||
|
gpiochip_remove(&pctrl->chip);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = gpiochip_irqchip_add(chip,
|
ret = gpiochip_irqchip_add(chip,
|
||||||
|
|
|
@ -204,7 +204,9 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
|
||||||
pct->sec = ts.tv_sec;
|
pct->sec = ts.tv_sec;
|
||||||
pct->nsec = ts.tv_nsec;
|
pct->nsec = ts.tv_nsec;
|
||||||
pct++;
|
pct++;
|
||||||
ptp->info->gettime64(ptp->info, &ts);
|
err = ptp->info->gettime64(ptp->info, &ts);
|
||||||
|
if (err)
|
||||||
|
goto out;
|
||||||
pct->sec = ts.tv_sec;
|
pct->sec = ts.tv_sec;
|
||||||
pct->nsec = ts.tv_nsec;
|
pct->nsec = ts.tv_nsec;
|
||||||
pct++;
|
pct++;
|
||||||
|
@ -257,6 +259,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
kfree(sysoff);
|
kfree(sysoff);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -5105,6 +5105,9 @@ error:
|
||||||
stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
|
stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
|
||||||
stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
|
stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
|
||||||
|
|
||||||
|
if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)
|
||||||
|
stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
|
||||||
|
|
||||||
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
|
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
|
||||||
phba->fc_stat.elsXmitLSRJT++;
|
phba->fc_stat.elsXmitLSRJT++;
|
||||||
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
|
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
|
||||||
|
|
|
@ -22,11 +22,15 @@ static const struct of_device_id tegra_machine_match[] = {
|
||||||
|
|
||||||
bool soc_is_tegra(void)
|
bool soc_is_tegra(void)
|
||||||
{
|
{
|
||||||
|
const struct of_device_id *match;
|
||||||
struct device_node *root;
|
struct device_node *root;
|
||||||
|
|
||||||
root = of_find_node_by_path("/");
|
root = of_find_node_by_path("/");
|
||||||
if (!root)
|
if (!root)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return of_match_node(tegra_machine_match, root) != NULL;
|
match = of_match_node(tegra_machine_match, root);
|
||||||
|
of_node_put(root);
|
||||||
|
|
||||||
|
return match != NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -250,7 +250,9 @@ static int ad7280_read(struct ad7280_state *st, unsigned devaddr,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
__ad7280_read32(st, &tmp);
|
ret = __ad7280_read32(st, &tmp);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
if (ad7280_check_crc(st, tmp))
|
if (ad7280_check_crc(st, tmp))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
@ -288,7 +290,9 @@ static int ad7280_read_channel(struct ad7280_state *st, unsigned devaddr,
|
||||||
|
|
||||||
ad7280_delay(st);
|
ad7280_delay(st);
|
||||||
|
|
||||||
__ad7280_read32(st, &tmp);
|
ret = __ad7280_read32(st, &tmp);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
if (ad7280_check_crc(st, tmp))
|
if (ad7280_check_crc(st, tmp))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
@ -321,7 +325,9 @@ static int ad7280_read_all_channels(struct ad7280_state *st, unsigned cnt,
|
||||||
ad7280_delay(st);
|
ad7280_delay(st);
|
||||||
|
|
||||||
for (i = 0; i < cnt; i++) {
|
for (i = 0; i < cnt; i++) {
|
||||||
__ad7280_read32(st, &tmp);
|
ret = __ad7280_read32(st, &tmp);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
if (ad7280_check_crc(st, tmp))
|
if (ad7280_check_crc(st, tmp))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
@ -364,7 +370,10 @@ static int ad7280_chain_setup(struct ad7280_state *st)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
for (n = 0; n <= AD7280A_MAX_CHAIN; n++) {
|
for (n = 0; n <= AD7280A_MAX_CHAIN; n++) {
|
||||||
__ad7280_read32(st, &val);
|
ret = __ad7280_read32(st, &val);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
if (val == 0)
|
if (val == 0)
|
||||||
return n - 1;
|
return n - 1;
|
||||||
|
|
||||||
|
|
|
@ -90,12 +90,16 @@ static int ad7780_read_raw(struct iio_dev *indio_dev,
|
||||||
long m)
|
long m)
|
||||||
{
|
{
|
||||||
struct ad7780_state *st = iio_priv(indio_dev);
|
struct ad7780_state *st = iio_priv(indio_dev);
|
||||||
|
int voltage_uv;
|
||||||
|
|
||||||
switch (m) {
|
switch (m) {
|
||||||
case IIO_CHAN_INFO_RAW:
|
case IIO_CHAN_INFO_RAW:
|
||||||
return ad_sigma_delta_single_conversion(indio_dev, chan, val);
|
return ad_sigma_delta_single_conversion(indio_dev, chan, val);
|
||||||
case IIO_CHAN_INFO_SCALE:
|
case IIO_CHAN_INFO_SCALE:
|
||||||
*val = st->int_vref_mv * st->gain;
|
voltage_uv = regulator_get_voltage(st->reg);
|
||||||
|
if (voltage_uv < 0)
|
||||||
|
return voltage_uv;
|
||||||
|
*val = (voltage_uv / 1000) * st->gain;
|
||||||
*val2 = chan->scan_type.realbits - 1;
|
*val2 = chan->scan_type.realbits - 1;
|
||||||
return IIO_VAL_FRACTIONAL_LOG2;
|
return IIO_VAL_FRACTIONAL_LOG2;
|
||||||
case IIO_CHAN_INFO_OFFSET:
|
case IIO_CHAN_INFO_OFFSET:
|
||||||
|
|
|
@ -86,7 +86,12 @@ static int ad2s90_probe(struct spi_device *spi)
|
||||||
/* need 600ns between CS and the first falling edge of SCLK */
|
/* need 600ns between CS and the first falling edge of SCLK */
|
||||||
spi->max_speed_hz = 830000;
|
spi->max_speed_hz = 830000;
|
||||||
spi->mode = SPI_MODE_3;
|
spi->mode = SPI_MODE_3;
|
||||||
spi_setup(spi);
|
ret = spi_setup(spi);
|
||||||
|
|
||||||
|
if (ret < 0) {
|
||||||
|
dev_err(&spi->dev, "spi_setup failed!\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,13 +34,13 @@
|
||||||
int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz);
|
int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz);
|
||||||
void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz);
|
void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz);
|
||||||
#else
|
#else
|
||||||
static int
|
static inline int
|
||||||
thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
|
thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static inline void
|
||||||
thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
|
thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
|
@ -1267,6 +1267,8 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||||
else
|
else
|
||||||
cr1 &= ~UARTCR1_PT;
|
cr1 &= ~UARTCR1_PT;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
cr1 &= ~UARTCR1_PE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ask the core to calculate the divisor */
|
/* ask the core to calculate the divisor */
|
||||||
|
@ -1402,6 +1404,8 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||||
else
|
else
|
||||||
ctrl &= ~UARTCTRL_PT;
|
ctrl &= ~UARTCTRL_PT;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
ctrl &= ~UARTCTRL_PE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ask the core to calculate the divisor */
|
/* ask the core to calculate the divisor */
|
||||||
|
|
|
@ -1329,11 +1329,14 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
|
||||||
wr_regl(port, S3C2410_ULCON, ulcon);
|
wr_regl(port, S3C2410_ULCON, ulcon);
|
||||||
wr_regl(port, S3C2410_UBRDIV, quot);
|
wr_regl(port, S3C2410_UBRDIV, quot);
|
||||||
|
|
||||||
|
port->status &= ~UPSTAT_AUTOCTS;
|
||||||
|
|
||||||
umcon = rd_regl(port, S3C2410_UMCON);
|
umcon = rd_regl(port, S3C2410_UMCON);
|
||||||
if (termios->c_cflag & CRTSCTS) {
|
if (termios->c_cflag & CRTSCTS) {
|
||||||
umcon |= S3C2410_UMCOM_AFC;
|
umcon |= S3C2410_UMCOM_AFC;
|
||||||
/* Disable RTS when RX FIFO contains 63 bytes */
|
/* Disable RTS when RX FIFO contains 63 bytes */
|
||||||
umcon &= ~S3C2412_UMCON_AFC_8;
|
umcon &= ~S3C2412_UMCON_AFC_8;
|
||||||
|
port->status = UPSTAT_AUTOCTS;
|
||||||
} else {
|
} else {
|
||||||
umcon &= ~S3C2410_UMCOM_AFC;
|
umcon &= ~S3C2410_UMCOM_AFC;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1092,6 +1092,16 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
|
||||||
USB_PORT_FEAT_ENABLE);
|
USB_PORT_FEAT_ENABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Add debounce if USB3 link is in polling/link training state.
|
||||||
|
* Link will automatically transition to Enabled state after
|
||||||
|
* link training completes.
|
||||||
|
*/
|
||||||
|
if (hub_is_superspeed(hdev) &&
|
||||||
|
((portstatus & USB_PORT_STAT_LINK_STATE) ==
|
||||||
|
USB_SS_PORT_LS_POLLING))
|
||||||
|
need_debounce_delay = true;
|
||||||
|
|
||||||
/* Clear status-change flags; we'll debounce later */
|
/* Clear status-change flags; we'll debounce later */
|
||||||
if (portchange & USB_PORT_STAT_C_CONNECTION) {
|
if (portchange & USB_PORT_STAT_C_CONNECTION) {
|
||||||
need_debounce_delay = true;
|
need_debounce_delay = true;
|
||||||
|
|
|
@ -3164,7 +3164,6 @@ error3:
|
||||||
error2:
|
error2:
|
||||||
usb_put_hcd(hcd);
|
usb_put_hcd(hcd);
|
||||||
error1:
|
error1:
|
||||||
kfree(hsotg->core_params);
|
|
||||||
|
|
||||||
#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
|
#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
|
||||||
kfree(hsotg->last_frame_num_array);
|
kfree(hsotg->last_frame_num_array);
|
||||||
|
|
|
@ -2100,7 +2100,7 @@ static irqreturn_t net2272_irq(int irq, void *_dev)
|
||||||
#if defined(PLX_PCI_RDK2)
|
#if defined(PLX_PCI_RDK2)
|
||||||
/* see if PCI int for us by checking irqstat */
|
/* see if PCI int for us by checking irqstat */
|
||||||
intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
|
intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
|
||||||
if (!intcsr & (1 << NET2272_PCI_IRQ)) {
|
if (!(intcsr & (1 << NET2272_PCI_IRQ))) {
|
||||||
spin_unlock(&dev->lock);
|
spin_unlock(&dev->lock);
|
||||||
return IRQ_NONE;
|
return IRQ_NONE;
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,9 +56,6 @@ static int am335x_phy_probe(struct platform_device *pdev)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
am_phy->usb_phy_gen.phy.init = am335x_init;
|
am_phy->usb_phy_gen.phy.init = am335x_init;
|
||||||
am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown;
|
am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown;
|
||||||
|
|
||||||
|
@ -77,7 +74,7 @@ static int am335x_phy_probe(struct platform_device *pdev)
|
||||||
device_set_wakeup_enable(dev, false);
|
device_set_wakeup_enable(dev, false);
|
||||||
phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, false);
|
phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, false);
|
||||||
|
|
||||||
return 0;
|
return usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int am335x_phy_remove(struct platform_device *pdev)
|
static int am335x_phy_remove(struct platform_device *pdev)
|
||||||
|
|
|
@ -3032,7 +3032,7 @@ static int fbcon_fb_unbind(int idx)
|
||||||
for (i = first_fb_vc; i <= last_fb_vc; i++) {
|
for (i = first_fb_vc; i <= last_fb_vc; i++) {
|
||||||
if (con2fb_map[i] != idx &&
|
if (con2fb_map[i] != idx &&
|
||||||
con2fb_map[i] != -1) {
|
con2fb_map[i] != -1) {
|
||||||
new_idx = i;
|
new_idx = con2fb_map[i];
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -287,14 +287,17 @@ static int clps711x_fb_probe(struct platform_device *pdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = of_get_fb_videomode(disp, &cfb->mode, OF_USE_NATIVE_MODE);
|
ret = of_get_fb_videomode(disp, &cfb->mode, OF_USE_NATIVE_MODE);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
of_node_put(disp);
|
||||||
goto out_fb_release;
|
goto out_fb_release;
|
||||||
|
}
|
||||||
|
|
||||||
of_property_read_u32(disp, "ac-prescale", &cfb->ac_prescale);
|
of_property_read_u32(disp, "ac-prescale", &cfb->ac_prescale);
|
||||||
cfb->cmap_invert = of_property_read_bool(disp, "cmap-invert");
|
cfb->cmap_invert = of_property_read_bool(disp, "cmap-invert");
|
||||||
|
|
||||||
ret = of_property_read_u32(disp, "bits-per-pixel",
|
ret = of_property_read_u32(disp, "bits-per-pixel",
|
||||||
&info->var.bits_per_pixel);
|
&info->var.bits_per_pixel);
|
||||||
|
of_node_put(disp);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_fb_release;
|
goto out_fb_release;
|
||||||
|
|
||||||
|
|
|
@ -433,7 +433,9 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
|
||||||
image->dx += image->width + 8;
|
image->dx += image->width + 8;
|
||||||
}
|
}
|
||||||
} else if (rotate == FB_ROTATE_UD) {
|
} else if (rotate == FB_ROTATE_UD) {
|
||||||
for (x = 0; x < num; x++) {
|
u32 dx = image->dx;
|
||||||
|
|
||||||
|
for (x = 0; x < num && image->dx <= dx; x++) {
|
||||||
info->fbops->fb_imageblit(info, image);
|
info->fbops->fb_imageblit(info, image);
|
||||||
image->dx -= image->width + 8;
|
image->dx -= image->width + 8;
|
||||||
}
|
}
|
||||||
|
@ -445,7 +447,9 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
|
||||||
image->dy += image->height + 8;
|
image->dy += image->height + 8;
|
||||||
}
|
}
|
||||||
} else if (rotate == FB_ROTATE_CCW) {
|
} else if (rotate == FB_ROTATE_CCW) {
|
||||||
for (x = 0; x < num; x++) {
|
u32 dy = image->dy;
|
||||||
|
|
||||||
|
for (x = 0; x < num && image->dy <= dy; x++) {
|
||||||
info->fbops->fb_imageblit(info, image);
|
info->fbops->fb_imageblit(info, image);
|
||||||
image->dy -= image->height + 8;
|
image->dy -= image->height + 8;
|
||||||
}
|
}
|
||||||
|
|
|
@ -111,7 +111,7 @@ config CIFS_XATTR
|
||||||
|
|
||||||
config CIFS_POSIX
|
config CIFS_POSIX
|
||||||
bool "CIFS POSIX Extensions"
|
bool "CIFS POSIX Extensions"
|
||||||
depends on CIFS && CIFS_ALLOW_INSECURE_LEGACY && CIFS_XATTR
|
depends on CIFS_XATTR
|
||||||
help
|
help
|
||||||
Enabling this option will cause the cifs client to attempt to
|
Enabling this option will cause the cifs client to attempt to
|
||||||
negotiate a newer dialect with servers, such as Samba 3.0.5
|
negotiate a newer dialect with servers, such as Samba 3.0.5
|
||||||
|
|
|
@ -1081,6 +1081,10 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
|
||||||
|
PAGE_SIZE);
|
||||||
|
max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
|
||||||
|
PAGE_SIZE);
|
||||||
max_num = (max_buf - sizeof(struct smb_hdr)) /
|
max_num = (max_buf - sizeof(struct smb_hdr)) /
|
||||||
sizeof(LOCKING_ANDX_RANGE);
|
sizeof(LOCKING_ANDX_RANGE);
|
||||||
buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
|
buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
|
||||||
|
@ -1410,6 +1414,10 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
|
||||||
if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
|
if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
|
||||||
|
PAGE_SIZE);
|
||||||
|
max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
|
||||||
|
PAGE_SIZE);
|
||||||
max_num = (max_buf - sizeof(struct smb_hdr)) /
|
max_num = (max_buf - sizeof(struct smb_hdr)) /
|
||||||
sizeof(LOCKING_ANDX_RANGE);
|
sizeof(LOCKING_ANDX_RANGE);
|
||||||
buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
|
buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
|
||||||
|
|
|
@ -652,7 +652,14 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
|
||||||
/* scan and find it */
|
/* scan and find it */
|
||||||
int i;
|
int i;
|
||||||
char *cur_ent;
|
char *cur_ent;
|
||||||
char *end_of_smb = cfile->srch_inf.ntwrk_buf_start +
|
char *end_of_smb;
|
||||||
|
|
||||||
|
if (cfile->srch_inf.ntwrk_buf_start == NULL) {
|
||||||
|
cifs_dbg(VFS, "ntwrk_buf_start is NULL during readdir\n");
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
end_of_smb = cfile->srch_inf.ntwrk_buf_start +
|
||||||
server->ops->calc_smb_size(
|
server->ops->calc_smb_size(
|
||||||
cfile->srch_inf.ntwrk_buf_start);
|
cfile->srch_inf.ntwrk_buf_start);
|
||||||
|
|
||||||
|
|
|
@ -129,6 +129,8 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
|
||||||
if (max_buf < sizeof(struct smb2_lock_element))
|
if (max_buf < sizeof(struct smb2_lock_element))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
|
||||||
|
max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
|
||||||
max_num = max_buf / sizeof(struct smb2_lock_element);
|
max_num = max_buf / sizeof(struct smb2_lock_element);
|
||||||
buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
|
buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
|
||||||
if (!buf)
|
if (!buf)
|
||||||
|
@ -265,6 +267,8 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
|
||||||
|
max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
|
||||||
max_num = max_buf / sizeof(struct smb2_lock_element);
|
max_num = max_buf / sizeof(struct smb2_lock_element);
|
||||||
buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
|
buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
|
||||||
if (!buf) {
|
if (!buf) {
|
||||||
|
|
|
@ -671,6 +671,13 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
|
||||||
struct dentry *dentry = NULL, *trap;
|
struct dentry *dentry = NULL, *trap;
|
||||||
struct name_snapshot old_name;
|
struct name_snapshot old_name;
|
||||||
|
|
||||||
|
if (IS_ERR(old_dir))
|
||||||
|
return old_dir;
|
||||||
|
if (IS_ERR(new_dir))
|
||||||
|
return new_dir;
|
||||||
|
if (IS_ERR_OR_NULL(old_dentry))
|
||||||
|
return old_dentry;
|
||||||
|
|
||||||
trap = lock_rename(new_dir, old_dir);
|
trap = lock_rename(new_dir, old_dir);
|
||||||
/* Source or destination directories don't exist? */
|
/* Source or destination directories don't exist? */
|
||||||
if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir))
|
if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir))
|
||||||
|
|
10
fs/dlm/ast.c
10
fs/dlm/ast.c
|
@ -290,6 +290,8 @@ void dlm_callback_suspend(struct dlm_ls *ls)
|
||||||
flush_workqueue(ls->ls_callback_wq);
|
flush_workqueue(ls->ls_callback_wq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define MAX_CB_QUEUE 25
|
||||||
|
|
||||||
void dlm_callback_resume(struct dlm_ls *ls)
|
void dlm_callback_resume(struct dlm_ls *ls)
|
||||||
{
|
{
|
||||||
struct dlm_lkb *lkb, *safe;
|
struct dlm_lkb *lkb, *safe;
|
||||||
|
@ -300,15 +302,23 @@ void dlm_callback_resume(struct dlm_ls *ls)
|
||||||
if (!ls->ls_callback_wq)
|
if (!ls->ls_callback_wq)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
more:
|
||||||
mutex_lock(&ls->ls_cb_mutex);
|
mutex_lock(&ls->ls_cb_mutex);
|
||||||
list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) {
|
list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) {
|
||||||
list_del_init(&lkb->lkb_cb_list);
|
list_del_init(&lkb->lkb_cb_list);
|
||||||
queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
|
queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
|
||||||
count++;
|
count++;
|
||||||
|
if (count == MAX_CB_QUEUE)
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
mutex_unlock(&ls->ls_cb_mutex);
|
mutex_unlock(&ls->ls_cb_mutex);
|
||||||
|
|
||||||
if (count)
|
if (count)
|
||||||
log_rinfo(ls, "dlm_callback_resume %d", count);
|
log_rinfo(ls, "dlm_callback_resume %d", count);
|
||||||
|
if (count == MAX_CB_QUEUE) {
|
||||||
|
count = 0;
|
||||||
|
cond_resched();
|
||||||
|
goto more;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue