This is the 4.4.123 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlqzaAQACgkQONu9yGCS
 aT6/iw//SWWvNvIYVs9SUcAk1lLRdxIPEJ2mR4CELU8S9WblInX8MS/w3+qGUGVt
 9LzQUfxqbwpX0luel6QnyvME66qQI+VQ0CzA7Qtuzoy2UNlr5woW112nOzkBOuKt
 +KjHg3cUwPijljEZnhPq7eEv6TrHrb/ZsU4jjTLMrlL1u/TYfJQGrU+JAFpTbHto
 9IyVu/F80E/Vln2GHwYidHUbWG70lYIY75J/CXvklbfrxuRpsrOhQla52YspzI2p
 B9Q5p6Ct7FIO1SEBdVdBYLMMBbrt/+dbQzHMRWyUtTudiCSgeQ+8JEWRo6Pu+dGs
 1cW9uCJ+Q7AolQWicG/H80YBZczkB/p77PRCua2vVpx8e/FYSujP3E5T8Mnl7k81
 mmSoNRF8SqYXSwWDCqd+c/ck4zPRKPM++eNpkor+M0QyT9MOfh9oq4vIW4HoZQEm
 j+mZWUNihLO+O7znNGLoQLWn2mzDLjcrfnM6DOaKc5FJht/7OG8yyaobINYoUCmA
 +1cQP0RsRrWo87zpcWoHKRSZ6Iw+YtEu3i7q2bqEdtb2Jc/88nQ5/eYcBLYDaPtw
 DZ8GXL8kxXCP2J2GumwHIBTY68yZPVBknABGo5odqZwfkSgjK5fL17MzWX10Occp
 0U6r+IluHkRCSxcCuyS8w10+TPn/Rw26wJCstPYargfiw/twILA=
 =H2Hj
 -----END PGP SIGNATURE-----

Merge 4.4.123 into android-4.4

Changes in 4.4.123
	blkcg: fix double free of new_blkg in blkcg_init_queue
	Input: tsc2007 - check for presence and power down tsc2007 during probe
	staging: speakup: Replace BUG_ON() with WARN_ON().
	staging: wilc1000: add check for kmalloc allocation failure.
	HID: reject input outside logical range only if null state is set
	drm: qxl: Don't alloc fbdev if emulation is not supported
	ath10k: fix a warning during channel switch with multiple vaps
	PCI/MSI: Stop disabling MSI/MSI-X in pci_device_shutdown()
	selinux: check for address length in selinux_socket_bind()
	perf sort: Fix segfault with basic block 'cycles' sort dimension
	i40e: Acquire NVM lock before reads on all devices
	i40e: fix ethtool to get EEPROM data from X722 interface
	perf tools: Make perf_event__synthesize_mmap_events() scale
	drivers: net: xgene: Fix hardware checksum setting
	drm: Defer disabling the vblank IRQ until the next interrupt (for instant-off)
	ath10k: disallow DFS simulation if DFS channel is not enabled
	perf probe: Return errno when not hitting any event
	HID: clamp input to logical range if no null state
	net/8021q: create device with all possible features in wanted_features
	ARM: dts: Adjust moxart IRQ controller and flags
	batman-adv: handle race condition for claims between gateways
	of: fix of_device_get_modalias returned length when truncating buffers
	solo6x10: release vb2 buffers in solo_stop_streaming()
	scsi: ipr: Fix missed EH wakeup
	media: i2c/soc_camera: fix ov6650 sensor getting wrong clock
	timers, sched_clock: Update timeout for clock wrap
	sysrq: Reset the watchdog timers while displaying high-resolution timers
	Input: qt1070 - add OF device ID table
	sched: act_csum: don't mangle TCP and UDP GSO packets
	ASoC: rcar: ssi: don't set SSICR.CKDV = 000 with SSIWSR.CONT
	spi: omap2-mcspi: poll OMAP2_MCSPI_CHSTAT_RXS for PIO transfer
	tcp: sysctl: Fix a race to avoid unexpected 0 window from space
	dmaengine: imx-sdma: add 1ms delay to ensure SDMA channel is stopped
	driver: (adm1275) set the m,b and R coefficients correctly for power
	mm: Fix false-positive VM_BUG_ON() in page_cache_{get,add}_speculative()
	blk-throttle: make sure expire time isn't too big
	f2fs: relax node version check for victim data in gc
	bonding: refine bond_fold_stats() wrap detection
	braille-console: Fix value returned by _braille_console_setup
	drm/vmwgfx: Fixes to vmwgfx_fb
	vxlan: vxlan dev should inherit lowerdev's gso_max_size
	NFC: nfcmrvl: Include unaligned.h instead of access_ok.h
	NFC: nfcmrvl: double free on error path
	ARM: dts: r8a7790: Correct parent of SSI[0-9] clocks
	ARM: dts: r8a7791: Correct parent of SSI[0-9] clocks
	powerpc: Avoid taking a data miss on every userspace instruction miss
	net/faraday: Add missing include of of.h
	ARM: dts: koelsch: Correct clock frequency of X2 DU clock input
	reiserfs: Make cancel_old_flush() reliable
	ALSA: firewire-digi00x: handle all MIDI messages on streaming packets
	fm10k: correctly check if interface is removed
	scsi: ses: don't get power status of SES device slot on probe
	apparmor: Make path_max parameter readonly
	iommu/iova: Fix underflow bug in __alloc_and_insert_iova_range
	video: ARM CLCD: fix dma allocation size
	drm/radeon: Fail fb creation from imported dma-bufs.
	drm/amdgpu: Fail fb creation from imported dma-bufs. (v2)
	coresight: Fixes coresight DT parse to get correct output port ID.
	MIPS: BPF: Quit clobbering callee saved registers in JIT code.
	MIPS: BPF: Fix multiple problems in JIT skb access helpers.
	MIPS: r2-on-r6-emu: Fix BLEZL and BGTZL identification
	MIPS: r2-on-r6-emu: Clear BLTZALL and BGEZALL debugfs counters
	regulator: isl9305: fix array size
	md/raid6: Fix anomily when recovering a single device in RAID6.
	usb: dwc2: Make sure we disconnect the gadget state
	usb: gadget: dummy_hcd: Fix wrong power status bit clear/reset in dummy_hub_control()
	drivers/perf: arm_pmu: handle no platform_device
	perf inject: Copy events when reordering events in pipe mode
	perf session: Don't rely on evlist in pipe mode
	scsi: sg: check for valid direction before starting the request
	scsi: sg: close race condition in sg_remove_sfp_usercontext()
	kprobes/x86: Fix kprobe-booster not to boost far call instructions
	kprobes/x86: Set kprobes pages read-only
	pwm: tegra: Increase precision in PWM rate calculation
	wil6210: fix memory access violation in wil_memcpy_from/toio_32
	drm/edid: set ELD connector type in drm_edid_to_eld()
	video/hdmi: Allow "empty" HDMI infoframes
	HID: elo: clear BTN_LEFT mapping
	ARM: dts: exynos: Correct Trats2 panel reset line
	sched: Stop switched_to_rt() from sending IPIs to offline CPUs
	sched: Stop resched_cpu() from sending IPIs to offline CPUs
	test_firmware: fix setting old custom fw path back on exit
	net: xfrm: allow clearing socket xfrm policies.
	mtd: nand: fix interpretation of NAND_CMD_NONE in nand_command[_lp]()
	ARM: dts: am335x-pepper: Fix the audio CODEC's reset pin
	ARM: dts: omap3-n900: Fix the audio CODEC's reset pin
	ath10k: update tdls teardown state to target
	cpufreq: Fix governor module removal race
	clk: qcom: msm8916: fix mnd_width for codec_digcodec
	ath10k: fix invalid STS_CAP_OFFSET_MASK
	tools/usbip: fixes build with musl libc toolchain
	spi: sun6i: disable/unprepare clocks on remove
	scsi: core: scsi_get_device_flags_keyed(): Always return device flags
	scsi: devinfo: apply to HP XP the same flags as Hitachi VSP
	scsi: dh: add new rdac devices
	media: cpia2: Fix a couple off by one bugs
	veth: set peer GSO values
	drm/amdkfd: Fix memory leaks in kfd topology
	agp/intel: Flush all chipset writes after updating the GGTT
	mac80211_hwsim: enforce PS_MANUAL_POLL to be set after PS_ENABLED
	mac80211: remove BUG() when interface type is invalid
	ASoC: nuc900: Fix a loop timeout test
	ipvlan: add L2 check for packets arriving via virtual devices
	rcutorture/configinit: Fix build directory error message
	ima: relax requiring a file signature for new files with zero length
	selftests/x86/entry_from_vm86: Exit with 1 if we fail
	selftests/x86: Add tests for User-Mode Instruction Prevention
	selftests/x86: Add tests for the STR and SLDT instructions
	selftests/x86/entry_from_vm86: Add test cases for POPF
	x86/vm86/32: Fix POPF emulation
	x86/mm: Fix vmalloc_fault to use pXd_large
	ALSA: pcm: Fix UAF in snd_pcm_oss_get_formats()
	ALSA: hda - Revert power_save option default value
	ALSA: seq: Fix possible UAF in snd_seq_check_queue()
	ALSA: seq: Clear client entry before deleting else at closing
	drm/amdgpu/dce: Don't turn off DP sink when disconnected
	fs: Teach path_connected to handle nfs filesystems with multiple roots.
	lock_parent() needs to recheck if dentry got __dentry_kill'ed under it
	fs/aio: Add explicit RCU grace period when freeing kioctx
	fs/aio: Use RCU accessors for kioctx_table->table[]
	irqchip/gic-v3-its: Ensure nr_ites >= nr_lpis
	scsi: sg: fix SG_DXFER_FROM_DEV transfers
	scsi: sg: fix static checker warning in sg_is_valid_dxfer
	scsi: sg: only check for dxfer_len greater than 256M
	ARM: dts: LogicPD Torpedo: Fix I2C1 pinmux
	btrfs: alloc_chunk: fix DUP stripe size handling
	btrfs: Fix use-after-free when cleaning up fs_devs with a single stale device
	USB: gadget: udc: Add missing platform_device_put() on error in bdc_pci_probe()
	usb: gadget: bdc: 64-bit pointer capability check
	bpf: fix incorrect sign extension in check_alu_op()
	Linux 4.4.123

Change-Id: Ieb89411248f93522dde29edb8581f8ece22e33a7
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2018-03-22 09:57:28 +01:00
commit 79e7124a51
125 changed files with 841 additions and 331 deletions

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 122
SUBLEVEL = 123
EXTRAVERSION =
NAME = Blurry Fish Butt

View file

@ -139,7 +139,7 @@
&audio_codec {
status = "okay";
gpio-reset = <&gpio1 16 GPIO_ACTIVE_LOW>;
reset-gpios = <&gpio1 16 GPIO_ACTIVE_LOW>;
AVDD-supply = <&ldo3_reg>;
IOVDD-supply = <&ldo3_reg>;
DRVDD-supply = <&ldo3_reg>;

View file

@ -359,7 +359,7 @@
reg = <0>;
vdd3-supply = <&lcd_vdd3_reg>;
vci-supply = <&ldo25_reg>;
reset-gpios = <&gpy4 5 GPIO_ACTIVE_HIGH>;
reset-gpios = <&gpf2 1 GPIO_ACTIVE_HIGH>;
power-on-delay= <50>;
reset-delay = <100>;
init-delay = <100>;

View file

@ -90,6 +90,8 @@
};
&i2c1 {
pinctrl-names = "default";
pinctrl-0 = <&i2c1_pins>;
clock-frequency = <2600000>;
twl: twl@48 {
@ -137,6 +139,12 @@
OMAP3_CORE1_IOPAD(0x218e, PIN_OUTPUT | MUX_MODE4) /* mcbsp1_fsr.gpio_157 */
>;
};
i2c1_pins: pinmux_i2c1_pins {
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
>;
};
};
&omap3_pmx_core2 {

View file

@ -6,7 +6,7 @@
*/
/dts-v1/;
/include/ "moxart.dtsi"
#include "moxart.dtsi"
/ {
model = "MOXA UC-7112-LX";

View file

@ -6,6 +6,7 @@
*/
/include/ "skeleton.dtsi"
#include <dt-bindings/interrupt-controller/irq.h>
/ {
compatible = "moxa,moxart";
@ -36,8 +37,8 @@
ranges;
intc: interrupt-controller@98800000 {
compatible = "moxa,moxart-ic";
reg = <0x98800000 0x38>;
compatible = "moxa,moxart-ic", "faraday,ftintc010";
reg = <0x98800000 0x100>;
interrupt-controller;
#interrupt-cells = <2>;
interrupt-mask = <0x00080000>;
@ -59,7 +60,7 @@
timer: timer@98400000 {
compatible = "moxa,moxart-timer";
reg = <0x98400000 0x42>;
interrupts = <19 1>;
interrupts = <19 IRQ_TYPE_EDGE_FALLING>;
clocks = <&clk_apb>;
};
@ -80,7 +81,7 @@
dma: dma@90500000 {
compatible = "moxa,moxart-dma";
reg = <0x90500080 0x40>;
interrupts = <24 0>;
interrupts = <24 IRQ_TYPE_LEVEL_HIGH>;
#dma-cells = <1>;
};
@ -93,7 +94,7 @@
sdhci: sdhci@98e00000 {
compatible = "moxa,moxart-sdhci";
reg = <0x98e00000 0x5C>;
interrupts = <5 0>;
interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk_apb>;
dmas = <&dma 5>,
<&dma 5>;
@ -120,7 +121,7 @@
mac0: mac@90900000 {
compatible = "moxa,moxart-mac";
reg = <0x90900000 0x90>;
interrupts = <25 0>;
interrupts = <25 IRQ_TYPE_LEVEL_HIGH>;
phy-handle = <&ethphy0>;
phy-mode = "mii";
status = "disabled";
@ -129,7 +130,7 @@
mac1: mac@92000000 {
compatible = "moxa,moxart-mac";
reg = <0x92000000 0x90>;
interrupts = <27 0>;
interrupts = <27 IRQ_TYPE_LEVEL_HIGH>;
phy-handle = <&ethphy1>;
phy-mode = "mii";
status = "disabled";
@ -138,7 +139,7 @@
uart0: uart@98200000 {
compatible = "ns16550a";
reg = <0x98200000 0x20>;
interrupts = <31 8>;
interrupts = <31 IRQ_TYPE_LEVEL_HIGH>;
reg-shift = <2>;
reg-io-width = <4>;
clock-frequency = <14745600>;

View file

@ -488,7 +488,7 @@
tlv320aic3x: tlv320aic3x@18 {
compatible = "ti,tlv320aic3x";
reg = <0x18>;
gpio-reset = <&gpio2 28 GPIO_ACTIVE_HIGH>; /* 60 */
reset-gpios = <&gpio2 28 GPIO_ACTIVE_LOW>; /* 60 */
ai3x-gpio-func = <
0 /* AIC3X_GPIO1_FUNC_DISABLED */
5 /* AIC3X_GPIO2_FUNC_DIGITAL_MIC_INPUT */
@ -505,7 +505,7 @@
tlv320aic3x_aux: tlv320aic3x@19 {
compatible = "ti,tlv320aic3x";
reg = <0x19>;
gpio-reset = <&gpio2 28 GPIO_ACTIVE_HIGH>; /* 60 */
reset-gpios = <&gpio2 28 GPIO_ACTIVE_LOW>; /* 60 */
AVDD-supply = <&vmmc2>;
DRVDD-supply = <&vmmc2>;

View file

@ -1360,8 +1360,11 @@
compatible = "renesas,r8a7790-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0 0xe6150998 0 4>, <0 0xe61509a8 0 4>;
clocks = <&p_clk>,
<&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
<&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
<&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>,
<&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>,
<&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>,
<&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>,
<&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>,
<&p_clk>,
<&mstp10_clks R8A7790_CLK_SCU_ALL>, <&mstp10_clks R8A7790_CLK_SCU_ALL>,
<&mstp10_clks R8A7790_CLK_SCU_ALL>, <&mstp10_clks R8A7790_CLK_SCU_ALL>,

View file

@ -280,7 +280,7 @@
x2_clk: x2-clock {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <148500000>;
clock-frequency = <74250000>;
};
x13_clk: x13-clock {

View file

@ -1374,8 +1374,11 @@
compatible = "renesas,r8a7791-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0 0xe6150998 0 4>, <0 0xe61509a8 0 4>;
clocks = <&p_clk>,
<&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
<&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
<&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>,
<&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>,
<&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>,
<&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>,
<&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>,
<&p_clk>,
<&mstp10_clks R8A7791_CLK_SCU_ALL>, <&mstp10_clks R8A7791_CLK_SCU_ALL>,
<&mstp10_clks R8A7791_CLK_SCU_ALL>, <&mstp10_clks R8A7791_CLK_SCU_ALL>,

View file

@ -527,7 +527,8 @@ static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
u32 sflags, tmp_flags;
/* Adjust the stack pointer */
emit_stack_offset(-align_sp(offset), ctx);
if (offset)
emit_stack_offset(-align_sp(offset), ctx);
tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
/* sflags is essentially a bitmap */
@ -579,7 +580,8 @@ static void restore_bpf_jit_regs(struct jit_ctx *ctx,
emit_load_stack_reg(r_ra, r_sp, real_off, ctx);
/* Restore the sp and discard the scrach memory */
emit_stack_offset(align_sp(offset), ctx);
if (offset)
emit_stack_offset(align_sp(offset), ctx);
}
static unsigned int get_stack_depth(struct jit_ctx *ctx)
@ -626,8 +628,14 @@ static void build_prologue(struct jit_ctx *ctx)
if (ctx->flags & SEEN_X)
emit_jit_reg_move(r_X, r_zero, ctx);
/* Do not leak kernel data to userspace */
if (bpf_needs_clear_a(&ctx->skf->insns[0]))
/*
* Do not leak kernel data to userspace, we only need to clear
* r_A if it is ever used. In fact if it is never used, we
* will not save/restore it, so clearing it in this case would
* corrupt the state of the caller.
*/
if (bpf_needs_clear_a(&ctx->skf->insns[0]) &&
(ctx->flags & SEEN_A))
emit_jit_reg_move(r_A, r_zero, ctx);
}

View file

@ -90,18 +90,14 @@ FEXPORT(sk_load_half_positive)
is_offset_in_header(2, half)
/* Offset within header boundaries */
PTR_ADDU t1, $r_skb_data, offset
.set reorder
lh $r_A, 0(t1)
.set noreorder
lhu $r_A, 0(t1)
#ifdef CONFIG_CPU_LITTLE_ENDIAN
# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
wsbh t0, $r_A
seh $r_A, t0
wsbh $r_A, $r_A
# else
sll t0, $r_A, 24
andi t1, $r_A, 0xff00
sra t0, t0, 16
srl t1, t1, 8
sll t0, $r_A, 8
srl t1, $r_A, 8
andi t0, t0, 0xff00
or $r_A, t0, t1
# endif
#endif
@ -115,7 +111,7 @@ FEXPORT(sk_load_byte_positive)
is_offset_in_header(1, byte)
/* Offset within header boundaries */
PTR_ADDU t1, $r_skb_data, offset
lb $r_A, 0(t1)
lbu $r_A, 0(t1)
jr $r_ra
move $r_ret, zero
END(sk_load_byte)
@ -139,6 +135,11 @@ FEXPORT(sk_load_byte_positive)
* (void *to) is returned in r_s0
*
*/
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define DS_OFFSET(SIZE) (4 * SZREG)
#else
#define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE))
#endif
#define bpf_slow_path_common(SIZE) \
/* Quick check. Are we within reasonable boundaries? */ \
LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \
@ -150,7 +151,7 @@ FEXPORT(sk_load_byte_positive)
PTR_LA t0, skb_copy_bits; \
PTR_S $r_ra, (5 * SZREG)($r_sp); \
/* Assign low slot to a2 */ \
move a2, $r_sp; \
PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \
jalr t0; \
/* Reset our destination slot (DS but it's ok) */ \
INT_S zero, (4 * SZREG)($r_sp); \

View file

@ -294,7 +294,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
* can result in fault, which will cause a deadlock when called with
* mmap_sem held
*/
if (user_mode(regs))
if (!is_exec && user_mode(regs))
store_update_sp = store_updates_sp(regs);
if (user_mode(regs))

View file

@ -196,6 +196,8 @@ retry:
return (opcode != 0x62 && opcode != 0x67);
case 0x70:
return 0; /* can't boost conditional jump */
case 0x90:
return opcode != 0x9a; /* can't boost call far */
case 0xc0:
/* can't boost software-interruptions */
return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
@ -404,6 +406,8 @@ static int arch_copy_kprobe(struct kprobe *p)
{
int ret;
set_memory_rw((unsigned long)p->ainsn.insn & PAGE_MASK, 1);
/* Copy an instruction with recovering if other optprobe modifies it.*/
ret = __copy_instruction(p->ainsn.insn, p->addr);
if (!ret)
@ -418,6 +422,8 @@ static int arch_copy_kprobe(struct kprobe *p)
else
p->ainsn.boostable = -1;
set_memory_ro((unsigned long)p->ainsn.insn & PAGE_MASK, 1);
/* Check whether the instruction modifies Interrupt Flag or not */
p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn);

View file

@ -370,6 +370,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
}
buf = (u8 *)op->optinsn.insn;
set_memory_rw((unsigned long)buf & PAGE_MASK, 1);
/* Copy instructions into the out-of-line buffer */
ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr);
@ -392,6 +393,8 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
(u8 *)op->kp.addr + op->optinsn.size);
set_memory_ro((unsigned long)buf & PAGE_MASK, 1);
flush_icache_range((unsigned long) buf,
(unsigned long) buf + TMPL_END_IDX +
op->optinsn.size + RELATIVEJUMP_SIZE);

View file

@ -715,7 +715,8 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
return;
check_vip:
if (VEFLAGS & X86_EFLAGS_VIP) {
if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) ==
(X86_EFLAGS_VIP | X86_EFLAGS_VIF)) {
save_v86_state(regs, VM86_STI);
return;
}

View file

@ -287,7 +287,7 @@ static noinline int vmalloc_fault(unsigned long address)
if (!pmd_k)
return -1;
if (pmd_huge(*pmd_k))
if (pmd_large(*pmd_k))
return 0;
pte_k = pte_offset_kernel(pmd_k, address);
@ -407,7 +407,7 @@ static noinline int vmalloc_fault(unsigned long address)
if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
BUG();
if (pud_huge(*pud))
if (pud_large(*pud))
return 0;
pmd = pmd_offset(pud, address);
@ -418,7 +418,7 @@ static noinline int vmalloc_fault(unsigned long address)
if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
BUG();
if (pmd_huge(*pmd))
if (pmd_large(*pmd))
return 0;
pte_ref = pte_offset_kernel(pmd_ref, address);

View file

@ -1078,10 +1078,8 @@ int blkcg_init_queue(struct request_queue *q)
if (preloaded)
radix_tree_preload_end();
if (IS_ERR(blkg)) {
blkg_free(new_blkg);
if (IS_ERR(blkg))
return PTR_ERR(blkg);
}
q->root_blkg = blkg;
q->root_rl.blkg = blkg;

View file

@ -505,6 +505,17 @@ static void throtl_dequeue_tg(struct throtl_grp *tg)
static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
unsigned long expires)
{
unsigned long max_expire = jiffies + 8 * throtl_slice;
/*
* Since we are adjusting the throttle limit dynamically, the sleep
* time calculated according to previous limit might be invalid. It's
* possible the cgroup sleep time is very long and no other cgroups
* have IO running so notify the limit changes. Make sure the cgroup
* doesn't sleep too long to avoid the missed notification.
*/
if (time_after(expires, max_expire))
expires = max_expire;
mod_timer(&sq->pending_timer, expires);
throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
expires - jiffies, jiffies);

View file

@ -859,6 +859,8 @@ void intel_gtt_insert_sg_entries(struct sg_table *st,
}
}
wmb();
if (intel_private.driver->chipset_flush)
intel_private.driver->chipset_flush();
}
EXPORT_SYMBOL(intel_gtt_insert_sg_entries);

View file

@ -1437,6 +1437,7 @@ static const struct freq_tbl ftbl_codec_clk[] = {
static struct clk_rcg2 codec_digcodec_clk_src = {
.cmd_rcgr = 0x1c09c,
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll1_emclk_sleep_map,
.freq_tbl = ftbl_codec_clk,

View file

@ -647,6 +647,8 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
*governor = t;
err = 0;
}
if (t && !try_module_get(t->owner))
t = NULL;
mutex_unlock(&cpufreq_governor_mutex);
}
@ -765,6 +767,10 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
return -EINVAL;
ret = cpufreq_set_policy(policy, &new_policy);
if (new_policy.governor)
module_put(new_policy.governor->owner);
return ret ? ret : count;
}

View file

@ -911,6 +911,21 @@ static int sdma_disable_channel(struct dma_chan *chan)
return 0;
}
static int sdma_disable_channel_with_delay(struct dma_chan *chan)
{
sdma_disable_channel(chan);
/*
* According to NXP R&D team a delay of one BD SDMA cost time
* (maximum is 1ms) should be added after disable of the channel
* bit, to ensure SDMA core has really been stopped after SDMA
* clients call .device_terminate_all.
*/
mdelay(1);
return 0;
}
static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
{
struct sdma_engine *sdma = sdmac->sdma;
@ -1793,7 +1808,7 @@ static int sdma_probe(struct platform_device *pdev)
sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
sdma->dma_device.device_config = sdma_config;
sdma->dma_device.device_terminate_all = sdma_disable_channel;
sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay;
sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);

View file

@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
/* don't do anything if sink is not display port, i.e.,
* passive dp->(dvi|hdmi) adaptor
*/
if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
int saved_dpms = connector->dpms;
/* Only turn off the display if it's physically disconnected */
if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
} else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
/* Don't try to start link training before we
* have the dpcd */
if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
return;
if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) &&
amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
/* Don't start link training before we have the DPCD */
if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
return;
/* set it to OFF so that drm_helper_connector_dpms()
* won't return immediately since the current state
* is ON at this point.
*/
connector->dpms = DRM_MODE_DPMS_OFF;
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
connector->dpms = saved_dpms;
/* Turn the connector off and back on immediately, which
* will trigger link training
*/
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
}
}

View file

@ -560,6 +560,12 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOENT);
}
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
if (obj->import_attach) {
DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
return ERR_PTR(-EINVAL);
}
amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
if (amdgpu_fb == NULL) {
drm_gem_object_unreference_unlocked(obj);

View file

@ -519,11 +519,17 @@ static ssize_t sysprops_show(struct kobject *kobj, struct attribute *attr,
return ret;
}
static void kfd_topology_kobj_release(struct kobject *kobj)
{
kfree(kobj);
}
static const struct sysfs_ops sysprops_ops = {
.show = sysprops_show,
};
static struct kobj_type sysprops_type = {
.release = kfd_topology_kobj_release,
.sysfs_ops = &sysprops_ops,
};
@ -559,6 +565,7 @@ static const struct sysfs_ops iolink_ops = {
};
static struct kobj_type iolink_type = {
.release = kfd_topology_kobj_release,
.sysfs_ops = &iolink_ops,
};
@ -586,6 +593,7 @@ static const struct sysfs_ops mem_ops = {
};
static struct kobj_type mem_type = {
.release = kfd_topology_kobj_release,
.sysfs_ops = &mem_ops,
};
@ -625,6 +633,7 @@ static const struct sysfs_ops cache_ops = {
};
static struct kobj_type cache_type = {
.release = kfd_topology_kobj_release,
.sysfs_ops = &cache_ops,
};
@ -747,6 +756,7 @@ static const struct sysfs_ops node_ops = {
};
static struct kobj_type node_type = {
.release = kfd_topology_kobj_release,
.sysfs_ops = &node_ops,
};

View file

@ -3219,8 +3219,7 @@ monitor_name(struct detailed_timing *t, void *data)
* @edid: EDID to parse
*
* Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. The
* Conn_Type, HDCP and Port_ID ELD fields are left for the graphics driver to
* fill in.
* HDCP and Port_ID ELD fields are left for the graphics driver to fill in.
*/
void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
{
@ -3293,6 +3292,12 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
}
eld[5] |= sad_count << 4;
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector->connector_type == DRM_MODE_CONNECTOR_eDP)
eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_DP;
else
eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_HDMI;
eld[DRM_ELD_BASELINE_ELD_LEN] =
DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4);

View file

@ -1271,9 +1271,9 @@ void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
if (atomic_dec_and_test(&vblank->refcount)) {
if (drm_vblank_offdelay == 0)
return;
else if (dev->vblank_disable_immediate || drm_vblank_offdelay < 0)
else if (drm_vblank_offdelay < 0)
vblank_disable_fn((unsigned long)vblank);
else
else if (!dev->vblank_disable_immediate)
mod_timer(&vblank->disable_timer,
jiffies + ((drm_vblank_offdelay * HZ)/1000));
}
@ -1902,6 +1902,16 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
wake_up(&vblank->queue);
drm_handle_vblank_events(dev, pipe);
/* With instant-off, we defer disabling the interrupt until after
* we finish processing the following vblank. The disable has to
* be last (after drm_handle_vblank_events) so that the timestamp
* is always accurate.
*/
if (dev->vblank_disable_immediate &&
drm_vblank_offdelay > 0 &&
!atomic_read(&vblank->refcount))
vblank_disable_fn((unsigned long)vblank);
spin_unlock_irqrestore(&dev->event_lock, irqflags);
return true;

View file

@ -494,9 +494,11 @@ static const struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
int qxl_fbdev_init(struct qxl_device *qdev)
{
int ret = 0;
#ifdef CONFIG_DRM_FBDEV_EMULATION
struct qxl_fbdev *qfbdev;
int bpp_sel = 32; /* TODO: parameter from somewhere? */
int ret;
qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL);
if (!qfbdev)
@ -531,6 +533,8 @@ fini:
drm_fb_helper_fini(&qfbdev->helper);
free:
kfree(qfbdev);
#endif
return ret;
}
@ -546,6 +550,9 @@ void qxl_fbdev_fini(struct qxl_device *qdev)
void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state)
{
if (!qdev->mode_info.qfbdev)
return;
drm_fb_helper_set_suspend(&qdev->mode_info.qfbdev->helper, state);
}

View file

@ -1374,6 +1374,12 @@ radeon_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOENT);
}
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
if (obj->import_attach) {
DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
return ERR_PTR(-EINVAL);
}
radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
if (radeon_fb == NULL) {
drm_gem_object_unreference_unlocked(obj);

View file

@ -433,7 +433,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
set.y = 0;
set.mode = NULL;
set.fb = NULL;
set.num_connectors = 1;
set.num_connectors = 0;
set.connectors = &par->con;
ret = drm_mode_set_config_internal(&set);
if (ret) {
@ -821,7 +821,9 @@ int vmw_fb_off(struct vmw_private *vmw_priv)
flush_delayed_work(&par->local_work);
mutex_lock(&par->bo_mutex);
drm_modeset_lock_all(vmw_priv->dev);
(void) vmw_fb_kms_detach(par, true, false);
drm_modeset_unlock_all(vmw_priv->dev);
mutex_unlock(&par->bo_mutex);
return 0;

View file

@ -42,6 +42,12 @@ static int elo_input_configured(struct hid_device *hdev,
{
struct input_dev *input = hidinput->input;
/*
* ELO devices have one Button usage in GenDesk field, which makes
* hid-input map it to BTN_LEFT; that confuses userspace, which then
* considers the device to be a mouse/touchpad instead of touchscreen.
*/
clear_bit(BTN_LEFT, input->keybit);
set_bit(BTN_TOUCH, input->keybit);
set_bit(ABS_PRESSURE, input->absbit);
input_set_abs_params(input, ABS_PRESSURE, 0, 256, 0, 0);

View file

@ -1128,18 +1128,26 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
/*
* Ignore out-of-range values as per HID specification,
* section 5.10 and 6.2.25.
* section 5.10 and 6.2.25, when NULL state bit is present.
* When it's not, clamp the value to match Microsoft's input
* driver as mentioned in "Required HID usages for digitizers":
* https://msdn.microsoft.com/en-us/library/windows/hardware/dn672278(v=vs.85).asp
*
* The logical_minimum < logical_maximum check is done so that we
* don't unintentionally discard values sent by devices which
* don't specify logical min and max.
*/
if ((field->flags & HID_MAIN_ITEM_VARIABLE) &&
(field->logical_minimum < field->logical_maximum) &&
(value < field->logical_minimum ||
value > field->logical_maximum)) {
dbg_hid("Ignoring out-of-range value %x\n", value);
return;
(field->logical_minimum < field->logical_maximum)) {
if (field->flags & HID_MAIN_ITEM_NULL_STATE &&
(value < field->logical_minimum ||
value > field->logical_maximum)) {
dbg_hid("Ignoring out-of-range value %x\n", value);
return;
}
value = clamp(value,
field->logical_minimum,
field->logical_maximum);
}
/*

View file

@ -95,8 +95,8 @@ static const struct coefficients adm1075_coefficients[] = {
[0] = { 27169, 0, -1 }, /* voltage */
[1] = { 806, 20475, -1 }, /* current, irange25 */
[2] = { 404, 20475, -1 }, /* current, irange50 */
[3] = { 0, -1, 8549 }, /* power, irange25 */
[4] = { 0, -1, 4279 }, /* power, irange50 */
[3] = { 8549, 0, -1 }, /* power, irange25 */
[4] = { 4279, 0, -1 }, /* power, irange50 */
};
static const struct coefficients adm1275_coefficients[] = {

View file

@ -150,7 +150,7 @@ struct coresight_platform_data *of_get_coresight_platform_data(
continue;
/* The local out port number */
pdata->outports[i] = endpoint.id;
pdata->outports[i] = endpoint.port;
/*
* Get a handle on the remote port and parent

View file

@ -274,9 +274,18 @@ static const struct i2c_device_id qt1070_id[] = {
};
MODULE_DEVICE_TABLE(i2c, qt1070_id);
#ifdef CONFIG_OF
static const struct of_device_id qt1070_of_match[] = {
{ .compatible = "qt1070", },
{ },
};
MODULE_DEVICE_TABLE(of, qt1070_of_match);
#endif
static struct i2c_driver qt1070_driver = {
.driver = {
.name = "qt1070",
.of_match_table = of_match_ptr(qt1070_of_match),
.pm = &qt1070_pm_ops,
},
.id_table = qt1070_id,

View file

@ -455,6 +455,14 @@ static int tsc2007_probe(struct i2c_client *client,
tsc2007_stop(ts);
/* power down the chip (TSC2007_SETUP does not ACK on I2C) */
err = tsc2007_xfer(ts, PWRDOWN);
if (err < 0) {
dev_err(&client->dev,
"Failed to setup chip: %d\n", err);
return err; /* usually, chip does not respond */
}
err = input_register_device(input_dev);
if (err) {
dev_err(&client->dev,

View file

@ -126,7 +126,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
break; /* found a free slot */
}
adjust_limit_pfn:
limit_pfn = curr_iova->pfn_lo - 1;
limit_pfn = curr_iova->pfn_lo ? (curr_iova->pfn_lo - 1) : 0;
move_left:
prev = curr;
curr = rb_prev(curr);

View file

@ -663,7 +663,7 @@ static struct irq_chip its_irq_chip = {
* This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
*/
#define IRQS_PER_CHUNK_SHIFT 5
#define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT)
#define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT)
static unsigned long *lpi_bitmap;
static u32 lpi_chunks;
@ -1168,11 +1168,10 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
/*
* At least one bit of EventID is being used, hence a minimum
* of two entries. No, the architecture doesn't let you
* express an ITT with a single entry.
* We allocate at least one chunk worth of LPIs bet device,
* and thus that many ITEs. The device may require less though.
*/
nr_ites = max(2UL, roundup_pow_of_two(nvecs));
nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs));
sz = nr_ites * its->ite_size;
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
itt = kzalloc(sz, GFP_KERNEL);

View file

@ -3372,9 +3372,20 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
BUG_ON(test_bit(R5_Wantread, &dev->flags));
BUG_ON(sh->batch_head);
/*
* In the raid6 case if the only non-uptodate disk is P
* then we already trusted P to compute the other failed
* drives. It is safe to compute rather than re-read P.
* In other cases we only compute blocks from failed
* devices, otherwise check/repair might fail to detect
* a real inconsistency.
*/
if ((s->uptodate == disks - 1) &&
((sh->qd_idx >= 0 && sh->pd_idx == disk_idx) ||
(s->failed && (disk_idx == s->failed_num[0] ||
disk_idx == s->failed_num[1]))) {
disk_idx == s->failed_num[1])))) {
/* have disk failed, and we're requested to fetch it;
* do compute it
*/

View file

@ -1033,7 +1033,7 @@ static int ov6650_probe(struct i2c_client *client,
priv->code = MEDIA_BUS_FMT_YUYV8_2X8;
priv->colorspace = V4L2_COLORSPACE_JPEG;
priv->clk = v4l2_clk_get(&client->dev, "mclk");
priv->clk = v4l2_clk_get(&client->dev, NULL);
if (IS_ERR(priv->clk)) {
ret = PTR_ERR(priv->clk);
goto eclkget;

View file

@ -342,6 +342,17 @@ static void solo_stop_streaming(struct vb2_queue *q)
struct solo_dev *solo_dev = vb2_get_drv_priv(q);
solo_stop_thread(solo_dev);
spin_lock(&solo_dev->slock);
while (!list_empty(&solo_dev->vidq_active)) {
struct solo_vb2_buf *buf = list_entry(
solo_dev->vidq_active.next,
struct solo_vb2_buf, list);
list_del(&buf->list);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock(&solo_dev->slock);
INIT_LIST_HEAD(&solo_dev->vidq_active);
}

View file

@ -812,7 +812,7 @@ static int cpia2_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
struct camera_data *cam = video_drvdata(file);
if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
buf->index > cam->num_frames)
buf->index >= cam->num_frames)
return -EINVAL;
buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer;
@ -863,7 +863,7 @@ static int cpia2_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
buf->memory != V4L2_MEMORY_MMAP ||
buf->index > cam->num_frames)
buf->index >= cam->num_frames)
return -EINVAL;
DBG("QBUF #%d\n", buf->index);

View file

@ -148,7 +148,7 @@ enclosure_register(struct device *dev, const char *name, int components,
for (i = 0; i < components; i++) {
edev->component[i].number = -1;
edev->component[i].slot = -1;
edev->component[i].power_status = 1;
edev->component[i].power_status = -1;
}
mutex_lock(&container_list_lock);
@ -600,6 +600,11 @@ static ssize_t get_component_power_status(struct device *cdev,
if (edev->cb->get_power_status)
edev->cb->get_power_status(edev, ecomp);
/* If still uninitialized, the callback failed or does not exist. */
if (ecomp->power_status == -1)
return (edev->cb->get_power_status) ? -EIO : -ENOTTY;
return snprintf(buf, 40, "%s\n", ecomp->power_status ? "on" : "off");
}

View file

@ -626,7 +626,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, readcmd, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
}
chip->cmd_ctrl(mtd, command, ctrl);
if (command != NAND_CMD_NONE)
chip->cmd_ctrl(mtd, command, ctrl);
/* Address cycle, when necessary */
ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
@ -655,6 +656,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
*/
switch (command) {
case NAND_CMD_NONE:
case NAND_CMD_PAGEPROG:
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
@ -717,7 +719,9 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
}
/* Command latch cycle */
chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
if (command != NAND_CMD_NONE)
chip->cmd_ctrl(mtd, command,
NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
if (column != -1 || page_addr != -1) {
int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
@ -750,6 +754,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
*/
switch (command) {
case NAND_CMD_NONE:
case NAND_CMD_CACHEDPROG:
case NAND_CMD_PAGEPROG:
case NAND_CMD_ERASE1:

View file

@ -3276,12 +3276,17 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
u64 nv = new[i];
u64 ov = old[i];
s64 delta = nv - ov;
/* detects if this particular field is 32bit only */
if (((nv | ov) >> 32) == 0)
res[i] += (u32)nv - (u32)ov;
else
res[i] += nv - ov;
delta = (s64)(s32)((u32)nv - (u32)ov);
/* filter anomalies, some drivers reset their stats
* at down/up events.
*/
if (delta > 0)
res[i] += delta;
}
}

View file

@ -604,6 +604,7 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
cb |= CFG_CLE_BYPASS_EN0;
CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
CFG_CLE_IP_HDR_LEN_SET(&cb, 0);
xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb);
xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);

View file

@ -147,6 +147,7 @@ enum xgene_enet_rm {
#define CFG_RXCLK_MUXSEL0_SET(dst, val) xgene_set_bits(dst, val, 26, 3)
#define CFG_CLE_IP_PROTOCOL0_SET(dst, val) xgene_set_bits(dst, val, 16, 2)
#define CFG_CLE_IP_HDR_LEN_SET(dst, val) xgene_set_bits(dst, val, 8, 5)
#define CFG_CLE_DSTQID0_SET(dst, val) xgene_set_bits(dst, val, 0, 12)
#define CFG_CLE_FPSEL0_SET(dst, val) xgene_set_bits(dst, val, 16, 4)
#define CFG_MACMODE_SET(dst, val) xgene_set_bits(dst, val, 18, 2)

View file

@ -28,6 +28,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <net/ip.h>

View file

@ -983,7 +983,7 @@ static void fm10k_self_test(struct net_device *dev,
memset(data, 0, sizeof(*data) * FM10K_TEST_LEN);
if (FM10K_REMOVED(hw)) {
if (FM10K_REMOVED(hw->hw_addr)) {
netif_err(interface, drv, dev,
"Interface removed - test blocked\n");
eth_test->flags |= ETH_TEST_FL_FAILED;

View file

@ -1073,6 +1073,11 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
struct i40e_hw *hw = &np->vsi->back->hw;
u32 val;
#define X722_EEPROM_SCOPE_LIMIT 0x5B9FFF
if (hw->mac.type == I40E_MAC_X722) {
val = X722_EEPROM_SCOPE_LIMIT + 1;
return val;
}
val = (rd32(hw, I40E_GLPCI_LBARCTRL)
& I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
>> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;

View file

@ -292,14 +292,14 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
{
enum i40e_status_code ret_code = 0;
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (!ret_code) {
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (!ret_code) {
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_read_nvm_word_aq(hw, offset, data);
i40e_release_nvm(hw);
} else {
ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
}
} else {
ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
i40e_release_nvm(hw);
}
return ret_code;
}

View file

@ -282,6 +282,10 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
success = true;
} else {
if (!ether_addr_equal_64bits(eth_hdr(skb)->h_dest,
ipvlan->phy_dev->dev_addr))
skb->pkt_type = PACKET_OTHERHOST;
ret = RX_HANDLER_ANOTHER;
success = true;
}

View file

@ -399,6 +399,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
if (ifmp && (dev->ifindex != 0))
peer->ifindex = ifmp->ifi_index;
peer->gso_max_size = dev->gso_max_size;
peer->gso_max_segs = dev->gso_max_segs;
err = register_netdevice(peer);
put_net(net);
net = NULL;

View file

@ -2834,6 +2834,11 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
needed_headroom = lowerdev->hard_header_len;
}
if (lowerdev) {
dev->gso_max_size = lowerdev->gso_max_size;
dev->gso_max_segs = lowerdev->gso_max_segs;
}
if (conf->mtu) {
err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false);
if (err)

View file

@ -1892,6 +1892,15 @@ static ssize_t ath10k_write_simulate_radar(struct file *file,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
struct ath10k_vif *arvif;
/* Just check for for the first vif alone, as all the vifs will be
* sharing the same channel and if the channel is disabled, all the
* vifs will share the same 'is_started' state.
*/
arvif = list_first_entry(&ar->arvifs, typeof(*arvif), list);
if (!arvif->is_started)
return -EINVAL;
ieee80211_radar_detected(ar->hw);

View file

@ -5497,6 +5497,16 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
"mac vdev %d peer delete %pM (sta gone)\n",
arvif->vdev_id, sta->addr);
if (sta->tdls) {
ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id,
sta,
WMI_TDLS_PEER_STATE_TEARDOWN);
if (ret)
ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n",
sta->addr,
WMI_TDLS_PEER_STATE_TEARDOWN, ret);
}
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
@ -6427,7 +6437,7 @@ ath10k_mac_update_rx_channel(struct ath10k *ar,
lockdep_assert_held(&ar->data_lock);
WARN_ON(ctx && vifs);
WARN_ON(vifs && n_vifs != 1);
WARN_ON(vifs && !n_vifs);
/* FIXME: Sort of an optimization and a workaround. Peers and vifs are
* on a linked list now. Doing a lookup peer -> vif -> chanctx for each

View file

@ -4826,7 +4826,8 @@ enum wmi_10_4_vdev_param {
#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
#define WMI_TXBF_STS_CAP_OFFSET_LSB 4
#define WMI_TXBF_STS_CAP_OFFSET_MASK 0xf0
#define WMI_TXBF_STS_CAP_OFFSET_MASK 0x70
#define WMI_TXBF_CONF_IMPLICIT_BF BIT(7)
#define WMI_BF_SOUND_DIM_OFFSET_LSB 8
#define WMI_BF_SOUND_DIM_OFFSET_MASK 0xf00

View file

@ -125,9 +125,15 @@ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
u32 *d = dst;
const volatile u32 __iomem *s = src;
/* size_t is unsigned, if (count%4 != 0) it will wrap */
for (count += 4; count > 4; count -= 4)
for (; count >= 4; count -= 4)
*d++ = __raw_readl(s++);
if (unlikely(count)) {
/* count can be 1..3 */
u32 tmp = __raw_readl(s);
memcpy(d, &tmp, count);
}
}
void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
@ -136,8 +142,16 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
volatile u32 __iomem *d = dst;
const u32 *s = src;
for (count += 4; count > 4; count -= 4)
for (; count >= 4; count -= 4)
__raw_writel(*s++, d++);
if (unlikely(count)) {
/* count can be 1..3 */
u32 tmp = 0;
memcpy(&tmp, s, count);
__raw_writel(tmp, d);
}
}
static void wil_disconnect_cid(struct wil6210_priv *wil, int cid,

View file

@ -699,16 +699,21 @@ static int hwsim_fops_ps_write(void *dat, u64 val)
val != PS_MANUAL_POLL)
return -EINVAL;
if (val == PS_MANUAL_POLL) {
if (data->ps != PS_ENABLED)
return -EINVAL;
local_bh_disable();
ieee80211_iterate_active_interfaces_atomic(
data->hw, IEEE80211_IFACE_ITER_NORMAL,
hwsim_send_ps_poll, data);
local_bh_enable();
return 0;
}
old_ps = data->ps;
data->ps = val;
local_bh_disable();
if (val == PS_MANUAL_POLL) {
ieee80211_iterate_active_interfaces_atomic(
data->hw, IEEE80211_IFACE_ITER_NORMAL,
hwsim_send_ps_poll, data);
data->ps_poll_pending = true;
} else if (old_ps == PS_DISABLED && val != PS_DISABLED) {
if (old_ps == PS_DISABLED && val != PS_DISABLED) {
ieee80211_iterate_active_interfaces_atomic(
data->hw, IEEE80211_IFACE_ITER_NORMAL,
hwsim_send_nullfunc_ps, data);

View file

@ -17,7 +17,7 @@
*/
#include <linux/module.h>
#include <linux/unaligned/access_ok.h>
#include <asm/unaligned.h>
#include <linux/firmware.h>
#include <linux/nfc.h>
#include <net/nfc/nci.h>

View file

@ -96,10 +96,9 @@ static int nfcmrvl_spi_nci_send(struct nfcmrvl_private *priv,
/* Send the SPI packet */
err = nci_spi_send(drv_data->nci_spi, &drv_data->handshake_completion,
skb);
if (err != 0) {
if (err)
nfc_err(priv->dev, "spi_send failed %d", err);
kfree_skb(skb);
}
return err;
}

View file

@ -223,7 +223,7 @@ ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len)
str[i] = '_';
}
return tsize;
return repend;
}
EXPORT_SYMBOL_GPL(of_device_get_modalias);

View file

@ -463,8 +463,6 @@ static void pci_device_shutdown(struct device *dev)
if (drv && drv->shutdown)
drv->shutdown(pci_dev);
pci_msi_shutdown(pci_dev);
pci_msix_shutdown(pci_dev);
#ifdef CONFIG_KEXEC_CORE
/*

View file

@ -322,10 +322,16 @@ validate_group(struct perf_event *event)
return 0;
}
static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu)
{
struct platform_device *pdev = armpmu->plat_device;
return pdev ? dev_get_platdata(&pdev->dev) : NULL;
}
static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
{
struct arm_pmu *armpmu;
struct platform_device *plat_device;
struct arm_pmu_platdata *plat;
int ret;
u64 start_clock, finish_clock;
@ -337,8 +343,8 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
* dereference.
*/
armpmu = *(void **)dev;
plat_device = armpmu->plat_device;
plat = dev_get_platdata(&plat_device->dev);
plat = armpmu_get_platdata(armpmu);
start_clock = sched_clock();
if (plat && plat->handle_irq)

View file

@ -69,6 +69,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip);
unsigned long long c;
unsigned long rate, hz;
unsigned long long ns100 = NSEC_PER_SEC;
u32 val = 0;
int err;
@ -87,9 +88,11 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* cycles at the PWM clock rate will take period_ns nanoseconds.
*/
rate = clk_get_rate(pc->clk) >> PWM_DUTY_WIDTH;
hz = NSEC_PER_SEC / period_ns;
rate = (rate + (hz / 2)) / hz;
/* Consider precision in PWM_SCALE_WIDTH rate calculation */
ns100 *= 100;
hz = DIV_ROUND_CLOSEST_ULL(ns100, period_ns);
rate = DIV_ROUND_CLOSEST(rate * 100, hz);
/*
* Since the actual PWM divider is the register's frequency divider

View file

@ -835,8 +835,10 @@ static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
qc->err_mask |= AC_ERR_OTHER;
sata_port->ioasa.status |= ATA_BUSY;
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
ata_qc_complete(qc);
if (ipr_cmd->eh_comp)
complete(ipr_cmd->eh_comp);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
/**
@ -5864,8 +5866,10 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
res->in_erp = 0;
}
scsi_dma_unmap(ipr_cmd->scsi_cmd);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
if (ipr_cmd->eh_comp)
complete(ipr_cmd->eh_comp);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
/**
@ -6255,8 +6259,10 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
}
scsi_dma_unmap(ipr_cmd->scsi_cmd);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
if (ipr_cmd->eh_comp)
complete(ipr_cmd->eh_comp);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
/**
@ -6282,8 +6288,10 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
scsi_dma_unmap(scsi_cmd);
spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
if (ipr_cmd->eh_comp)
complete(ipr_cmd->eh_comp);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
} else {
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);

View file

@ -180,7 +180,7 @@ static struct {
{"HITACHI", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HITACHI", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */
{"HP", "OPEN-", "*", BLIST_REPORTLUN2}, /* HP XP Arrays */
{"HP", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES}, /* HP XP Arrays */
{"HP", "NetRAID-4M", NULL, BLIST_FORCELUN},
{"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
{"HP", "C1557A", NULL, BLIST_FORCELUN},
@ -589,17 +589,12 @@ int scsi_get_device_flags_keyed(struct scsi_device *sdev,
int key)
{
struct scsi_dev_info_list *devinfo;
int err;
devinfo = scsi_dev_info_list_find(vendor, model, key);
if (!IS_ERR(devinfo))
return devinfo->flags;
err = PTR_ERR(devinfo);
if (err != -ENOENT)
return err;
/* nothing found, return nothing */
/* key or device not found: return nothing */
if (key != SCSI_DEVINFO_GLOBAL)
return 0;

View file

@ -56,10 +56,13 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
{"IBM", "1815", "rdac", },
{"IBM", "1818", "rdac", },
{"IBM", "3526", "rdac", },
{"IBM", "3542", "rdac", },
{"IBM", "3552", "rdac", },
{"SGI", "TP9", "rdac", },
{"SGI", "IS", "rdac", },
{"STK", "OPENstorage D280", "rdac", },
{"STK", "OPENstorage", "rdac", },
{"STK", "FLEXLINE 380", "rdac", },
{"STK", "BladeCtlr", "rdac", },
{"SUN", "CSM", "rdac", },
{"SUN", "LCSM100", "rdac", },
{"SUN", "STK6580_6780", "rdac", },

View file

@ -546,7 +546,6 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
ecomp = &edev->component[components++];
if (!IS_ERR(ecomp)) {
ses_get_power_status(edev, ecomp);
if (addl_desc_ptr)
ses_process_descriptor(
ecomp,

View file

@ -535,6 +535,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
} else
count = (old_hdr->result == 0) ? 0 : -EIO;
sg_finish_rem_req(srp);
sg_remove_request(sfp, srp);
retval = count;
free_old_hdr:
kfree(old_hdr);
@ -575,6 +576,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
}
err_out:
err2 = sg_finish_rem_req(srp);
sg_remove_request(sfp, srp);
return err ? : err2 ? : count;
}
@ -674,18 +676,14 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
* is a non-zero input_size, so emit a warning.
*/
if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
static char cmd[TASK_COMM_LEN];
if (strcmp(current->comm, cmd)) {
printk_ratelimited(KERN_WARNING
"sg_write: data in/out %d/%d bytes "
"for SCSI command 0x%x-- guessing "
"data in;\n program %s not setting "
"count and/or reply_len properly\n",
old_hdr.reply_len - (int)SZ_SG_HEADER,
input_size, (unsigned int) cmnd[0],
current->comm);
strcpy(cmd, current->comm);
}
printk_ratelimited(KERN_WARNING
"sg_write: data in/out %d/%d bytes "
"for SCSI command 0x%x-- guessing "
"data in;\n program %s not setting "
"count and/or reply_len properly\n",
old_hdr.reply_len - (int)SZ_SG_HEADER,
input_size, (unsigned int) cmnd[0],
current->comm);
}
k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
return (k < 0) ? k : count;
@ -784,11 +782,15 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
"sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
(int) cmnd[0], (int) hp->cmd_len));
if (hp->dxfer_len >= SZ_256M)
return -EINVAL;
k = sg_start_req(srp, cmnd);
if (k) {
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
"sg_common_write: start_req err=%d\n", k));
sg_finish_rem_req(srp);
sg_remove_request(sfp, srp);
return k; /* probably out of space --> ENOMEM */
}
if (atomic_read(&sdp->detaching)) {
@ -801,6 +803,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
}
sg_finish_rem_req(srp);
sg_remove_request(sfp, srp);
return -ENODEV;
}
@ -1290,6 +1293,7 @@ sg_rq_end_io_usercontext(struct work_struct *work)
struct sg_fd *sfp = srp->parentfp;
sg_finish_rem_req(srp);
sg_remove_request(sfp, srp);
kref_put(&sfp->f_ref, sg_remove_sfp);
}
@ -1834,8 +1838,6 @@ sg_finish_rem_req(Sg_request *srp)
else
sg_remove_scat(sfp, req_schp);
sg_remove_request(sfp, srp);
return ret;
}
@ -2182,12 +2184,17 @@ sg_remove_sfp_usercontext(struct work_struct *work)
struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
struct sg_device *sdp = sfp->parentdp;
Sg_request *srp;
unsigned long iflags;
/* Cleanup any responses which were never read(). */
write_lock_irqsave(&sfp->rq_list_lock, iflags);
while (!list_empty(&sfp->rq_list)) {
srp = list_first_entry(&sfp->rq_list, Sg_request, entry);
sg_finish_rem_req(srp);
list_del(&srp->entry);
srp->parentfp = NULL;
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
if (sfp->reserve.bufflen > 0) {
SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,

View file

@ -457,6 +457,8 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
int elements = 0;
int word_len, element_count;
struct omap2_mcspi_cs *cs = spi->controller_state;
void __iomem *chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
count = xfer->len;
@ -517,8 +519,8 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
if (l & OMAP2_MCSPI_CHCONF_TURBO) {
elements--;
if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
& OMAP2_MCSPI_CHSTAT_RXS)) {
if (!mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_RXS)) {
u32 w;
w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
@ -536,8 +538,7 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
return count;
}
}
if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
& OMAP2_MCSPI_CHSTAT_RXS)) {
if (!mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_RXS)) {
u32 w;
w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);

View file

@ -457,7 +457,7 @@ err_free_master:
static int sun6i_spi_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
pm_runtime_force_suspend(&pdev->dev);
return 0;
}

View file

@ -831,7 +831,9 @@ static ssize_t message_show(struct kobject *kobj,
struct msg_group_t *group = spk_find_msg_group(attr->attr.name);
unsigned long flags;
BUG_ON(!group);
if (WARN_ON(!group))
return -EINVAL;
spin_lock_irqsave(&speakup_info.spinlock, flags);
retval = message_show_helper(buf, group->start, group->end);
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
@ -843,7 +845,9 @@ static ssize_t message_store(struct kobject *kobj, struct kobj_attribute *attr,
{
struct msg_group_t *group = spk_find_msg_group(attr->attr.name);
BUG_ON(!group);
if (WARN_ON(!group))
return -EINVAL;
return message_store_helper(buf, count, group);
}

View file

@ -2179,6 +2179,8 @@ static s32 Handle_Get_InActiveTime(struct host_if_drv *hif_drv,
wid.type = WID_STR;
wid.size = ETH_ALEN;
wid.val = kmalloc(wid.size, GFP_KERNEL);
if (!wid.val)
return -ENOMEM;
stamac = wid.val;
memcpy(stamac, strHostIfStaInactiveT->mac, ETH_ALEN);

View file

@ -1385,6 +1385,7 @@ static void dwc2_conn_id_status_change(struct work_struct *work)
dwc2_core_init(hsotg, false, -1);
dwc2_enable_global_interrupts(hsotg);
spin_lock_irqsave(&hsotg->lock, flags);
dwc2_hsotg_disconnect(hsotg);
dwc2_hsotg_core_init_disconnected(hsotg, false);
spin_unlock_irqrestore(&hsotg->lock, flags);
dwc2_hsotg_core_connect(hsotg);

View file

@ -475,7 +475,7 @@ static int bdc_probe(struct platform_device *pdev)
bdc->dev = dev;
dev_dbg(bdc->dev, "bdc->regs: %p irq=%d\n", bdc->regs, bdc->irq);
temp = bdc_readl(bdc->regs, BDC_BDCSC);
temp = bdc_readl(bdc->regs, BDC_BDCCAP1);
if ((temp & BDC_P64) &&
!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
dev_dbg(bdc->dev, "Using 64-bit address\n");

View file

@ -82,6 +82,7 @@ static int bdc_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
if (ret) {
dev_err(&pci->dev,
"couldn't add resources to bdc device\n");
platform_device_put(bdc);
return ret;
}

View file

@ -2105,16 +2105,13 @@ static int dummy_hub_control(
}
break;
case USB_PORT_FEAT_POWER:
if (hcd->speed == HCD_USB3) {
if (dum_hcd->port_status & USB_PORT_STAT_POWER)
dev_dbg(dummy_dev(dum_hcd),
"power-off\n");
} else
if (dum_hcd->port_status &
USB_SS_PORT_STAT_POWER)
dev_dbg(dummy_dev(dum_hcd),
"power-off\n");
/* FALLS THROUGH */
dev_dbg(dummy_dev(dum_hcd), "power-off\n");
if (hcd->speed == HCD_USB3)
dum_hcd->port_status &= ~USB_SS_PORT_STAT_POWER;
else
dum_hcd->port_status &= ~USB_PORT_STAT_POWER;
set_link_state(dum_hcd);
break;
default:
dum_hcd->port_status &= ~(1 << wValue);
set_link_state(dum_hcd);
@ -2285,14 +2282,13 @@ static int dummy_hub_control(
if ((dum_hcd->port_status &
USB_SS_PORT_STAT_POWER) != 0) {
dum_hcd->port_status |= (1 << wValue);
set_link_state(dum_hcd);
}
} else
if ((dum_hcd->port_status &
USB_PORT_STAT_POWER) != 0) {
dum_hcd->port_status |= (1 << wValue);
set_link_state(dum_hcd);
}
set_link_state(dum_hcd);
}
break;
case GetPortErrorCount:

View file

@ -759,8 +759,8 @@ static int clcdfb_of_dma_setup(struct clcd_fb *fb)
if (err)
return err;
framesize = fb->panel->mode.xres * fb->panel->mode.yres *
fb->panel->bpp / 8;
framesize = PAGE_ALIGN(fb->panel->mode.xres * fb->panel->mode.yres *
fb->panel->bpp / 8);
fb->fb.screen_base = dma_alloc_coherent(&fb->dev->dev, framesize,
&dma, GFP_KERNEL);
if (!fb->fb.screen_base)

View file

@ -321,6 +321,17 @@ int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame)
}
EXPORT_SYMBOL(hdmi_vendor_infoframe_init);
static int hdmi_vendor_infoframe_length(const struct hdmi_vendor_infoframe *frame)
{
/* for side by side (half) we also need to provide 3D_Ext_Data */
if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
return 6;
else if (frame->vic != 0 || frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
return 5;
else
return 4;
}
/**
* hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary buffer
* @frame: HDMI infoframe
@ -341,19 +352,11 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
u8 *ptr = buffer;
size_t length;
/* empty info frame */
if (frame->vic == 0 && frame->s3d_struct == HDMI_3D_STRUCTURE_INVALID)
return -EINVAL;
/* only one of those can be supplied */
if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
return -EINVAL;
/* for side by side (half) we also need to provide 3D_Ext_Data */
if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
frame->length = 6;
else
frame->length = 5;
frame->length = hdmi_vendor_infoframe_length(frame);
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
@ -372,14 +375,16 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
ptr[5] = 0x0c;
ptr[6] = 0x00;
if (frame->vic) {
ptr[7] = 0x1 << 5; /* video format */
ptr[8] = frame->vic;
} else {
if (frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID) {
ptr[7] = 0x2 << 5; /* video format */
ptr[8] = (frame->s3d_struct & 0xf) << 4;
if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
ptr[9] = (frame->s3d_ext_data & 0xf) << 4;
} else if (frame->vic) {
ptr[7] = 0x1 << 5; /* video format */
ptr[8] = frame->vic;
} else {
ptr[7] = 0x0 << 5; /* video format */
}
hdmi_infoframe_set_checksum(buffer, length);
@ -1161,7 +1166,7 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
if (ptr[0] != HDMI_INFOFRAME_TYPE_VENDOR ||
ptr[1] != 1 ||
(ptr[2] != 5 && ptr[2] != 6))
(ptr[2] != 4 && ptr[2] != 5 && ptr[2] != 6))
return -EINVAL;
length = ptr[2];
@ -1189,16 +1194,22 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
hvf->length = length;
if (hdmi_video_format == 0x1) {
hvf->vic = ptr[4];
} else if (hdmi_video_format == 0x2) {
if (hdmi_video_format == 0x2) {
if (length != 5 && length != 6)
return -EINVAL;
hvf->s3d_struct = ptr[4] >> 4;
if (hvf->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) {
if (length == 6)
hvf->s3d_ext_data = ptr[5] >> 4;
else
if (length != 6)
return -EINVAL;
hvf->s3d_ext_data = ptr[5] >> 4;
}
} else if (hdmi_video_format == 0x1) {
if (length != 5)
return -EINVAL;
hvf->vic = ptr[4];
} else {
if (length != 4)
return -EINVAL;
}
return 0;

View file

@ -68,9 +68,9 @@ struct aio_ring {
#define AIO_RING_PAGES 8
struct kioctx_table {
struct rcu_head rcu;
unsigned nr;
struct kioctx *table[];
struct rcu_head rcu;
unsigned nr;
struct kioctx __rcu *table[];
};
struct kioctx_cpu {
@ -115,7 +115,8 @@ struct kioctx {
struct page **ring_pages;
long nr_pages;
struct work_struct free_work;
struct rcu_head free_rcu;
struct work_struct free_work; /* see free_ioctx() */
/*
* signals when all in-flight requests are done
@ -326,7 +327,7 @@ static int aio_ring_mremap(struct vm_area_struct *vma)
for (i = 0; i < table->nr; i++) {
struct kioctx *ctx;
ctx = table->table[i];
ctx = rcu_dereference(table->table[i]);
if (ctx && ctx->aio_ring_file == file) {
if (!atomic_read(&ctx->dead)) {
ctx->user_id = ctx->mmap_base = vma->vm_start;
@ -573,6 +574,12 @@ static int kiocb_cancel(struct aio_kiocb *kiocb)
return cancel(&kiocb->common);
}
/*
* free_ioctx() should be RCU delayed to synchronize against the RCU
* protected lookup_ioctx() and also needs process context to call
* aio_free_ring(), so the double bouncing through kioctx->free_rcu and
* ->free_work.
*/
static void free_ioctx(struct work_struct *work)
{
struct kioctx *ctx = container_of(work, struct kioctx, free_work);
@ -586,6 +593,14 @@ static void free_ioctx(struct work_struct *work)
kmem_cache_free(kioctx_cachep, ctx);
}
static void free_ioctx_rcufn(struct rcu_head *head)
{
struct kioctx *ctx = container_of(head, struct kioctx, free_rcu);
INIT_WORK(&ctx->free_work, free_ioctx);
schedule_work(&ctx->free_work);
}
static void free_ioctx_reqs(struct percpu_ref *ref)
{
struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
@ -594,8 +609,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
complete(&ctx->rq_wait->comp);
INIT_WORK(&ctx->free_work, free_ioctx);
schedule_work(&ctx->free_work);
/* Synchronize against RCU protected table->table[] dereferences */
call_rcu(&ctx->free_rcu, free_ioctx_rcufn);
}
/*
@ -636,9 +651,9 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
while (1) {
if (table)
for (i = 0; i < table->nr; i++)
if (!table->table[i]) {
if (!rcu_access_pointer(table->table[i])) {
ctx->id = i;
table->table[i] = ctx;
rcu_assign_pointer(table->table[i], ctx);
spin_unlock(&mm->ioctx_lock);
/* While kioctx setup is in progress,
@ -813,11 +828,11 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
}
table = rcu_dereference_raw(mm->ioctx_table);
WARN_ON(ctx != table->table[ctx->id]);
table->table[ctx->id] = NULL;
WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id]));
RCU_INIT_POINTER(table->table[ctx->id], NULL);
spin_unlock(&mm->ioctx_lock);
/* percpu_ref_kill() will do the necessary call_rcu() */
/* free_ioctx_reqs() will do the necessary RCU synchronization */
wake_up_all(&ctx->wait);
/*
@ -859,7 +874,8 @@ void exit_aio(struct mm_struct *mm)
skipped = 0;
for (i = 0; i < table->nr; ++i) {
struct kioctx *ctx = table->table[i];
struct kioctx *ctx =
rcu_dereference_protected(table->table[i], true);
if (!ctx) {
skipped++;
@ -1048,7 +1064,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
if (!table || id >= table->nr)
goto out;
ctx = table->table[id];
ctx = rcu_dereference(table->table[id]);
if (ctx && ctx->user_id == ctx_id) {
percpu_ref_get(&ctx->users);
ret = ctx;

View file

@ -568,6 +568,7 @@ void btrfs_free_stale_device(struct btrfs_device *cur_dev)
btrfs_sysfs_remove_fsid(fs_devs);
list_del(&fs_devs->list);
free_fs_devices(fs_devs);
break;
} else {
fs_devs->num_devices--;
list_del(&dev->dev_list);
@ -4638,10 +4639,13 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
if (devs_max && ndevs > devs_max)
ndevs = devs_max;
/*
* the primary goal is to maximize the number of stripes, so use as many
* devices as possible, even if the stripes are not maximum sized.
* The primary goal is to maximize the number of stripes, so use as
* many devices as possible, even if the stripes are not maximum sized.
*
* The DUP profile stores more than one stripe per device, the
* max_avail is the total size so we have to adjust.
*/
stripe_size = devices_info[ndevs-1].max_avail;
stripe_size = div_u64(devices_info[ndevs - 1].max_avail, dev_stripes);
num_stripes = ndevs * dev_stripes;
/*
@ -4681,8 +4685,6 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
stripe_size = devices_info[ndevs-1].max_avail;
}
stripe_size = div_u64(stripe_size, dev_stripes);
/* align to BTRFS_STRIPE_LEN */
stripe_size = div_u64(stripe_size, raid_stripe_len);
stripe_size *= raid_stripe_len;

View file

@ -634,11 +634,16 @@ again:
spin_unlock(&parent->d_lock);
goto again;
}
rcu_read_unlock();
if (parent != dentry)
if (parent != dentry) {
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
else
if (unlikely(dentry->d_lockref.count < 0)) {
spin_unlock(&parent->d_lock);
parent = NULL;
}
} else {
parent = NULL;
}
rcu_read_unlock();
return parent;
}

View file

@ -585,9 +585,10 @@ static int __nd_alloc_stack(struct nameidata *nd)
static bool path_connected(const struct path *path)
{
struct vfsmount *mnt = path->mnt;
struct super_block *sb = mnt->mnt_sb;
/* Only bind mounts can have disconnected paths */
if (mnt->mnt_root == mnt->mnt_sb->s_root)
/* Bind mounts and multi-root filesystems can have disconnected paths */
if (!(sb->s_iflags & SB_I_MULTIROOT) && (mnt->mnt_root == sb->s_root))
return true;
return is_subdir(path->dentry, mnt->mnt_root);

View file

@ -2581,6 +2581,8 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
/* initial superblock/root creation */
mount_info->fill_super(s, mount_info);
nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned);
if (!(server->flags & NFS_MOUNT_UNSHARED))
s->s_iflags |= SB_I_MULTIROOT;
}
mntroot = nfs_get_root(s, mount_info->mntfh, dev_name);

View file

@ -1961,7 +1961,7 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
* will be requeued because superblock is being shutdown and doesn't
* have MS_ACTIVE set.
*/
cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work);
reiserfs_cancel_old_flush(sb);
/* wait for all commits to finish */
cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work);

View file

@ -2948,6 +2948,7 @@ int reiserfs_allocate_list_bitmaps(struct super_block *s,
struct reiserfs_list_bitmap *, unsigned int);
void reiserfs_schedule_old_flush(struct super_block *s);
void reiserfs_cancel_old_flush(struct super_block *s);
void add_save_link(struct reiserfs_transaction_handle *th,
struct inode *inode, int truncate);
int remove_save_link(struct inode *inode, int truncate);

View file

@ -90,7 +90,9 @@ static void flush_old_commits(struct work_struct *work)
s = sbi->s_journal->j_work_sb;
spin_lock(&sbi->old_work_lock);
sbi->work_queued = 0;
/* Avoid clobbering the cancel state... */
if (sbi->work_queued == 1)
sbi->work_queued = 0;
spin_unlock(&sbi->old_work_lock);
reiserfs_sync_fs(s, 1);
@ -117,21 +119,22 @@ void reiserfs_schedule_old_flush(struct super_block *s)
spin_unlock(&sbi->old_work_lock);
}
static void cancel_old_flush(struct super_block *s)
void reiserfs_cancel_old_flush(struct super_block *s)
{
struct reiserfs_sb_info *sbi = REISERFS_SB(s);
cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
spin_lock(&sbi->old_work_lock);
sbi->work_queued = 0;
/* Make sure no new flushes will be queued */
sbi->work_queued = 2;
spin_unlock(&sbi->old_work_lock);
cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
}
static int reiserfs_freeze(struct super_block *s)
{
struct reiserfs_transaction_handle th;
cancel_old_flush(s);
reiserfs_cancel_old_flush(s);
reiserfs_write_lock(s);
if (!(s->s_flags & MS_RDONLY)) {
@ -152,7 +155,13 @@ static int reiserfs_freeze(struct super_block *s)
static int reiserfs_unfreeze(struct super_block *s)
{
struct reiserfs_sb_info *sbi = REISERFS_SB(s);
reiserfs_allow_writes(s);
spin_lock(&sbi->old_work_lock);
/* Allow old_work to run again */
sbi->work_queued = 0;
spin_unlock(&sbi->old_work_lock);
return 0;
}
@ -2187,7 +2196,7 @@ error_unlocked:
if (sbi->commit_wq)
destroy_workqueue(sbi->commit_wq);
cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
reiserfs_cancel_old_flush(s);
reiserfs_free_bitmap_cache(s);
if (SB_BUFFER_WITH_SB(s))

View file

@ -1300,6 +1300,7 @@ struct mm_struct;
/* sb->s_iflags */
#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
#define SB_I_MULTIROOT 0x00000008 /* Multiple roots to the dentry tree */
/* Possible states of 'frozen' field */
enum {

View file

@ -153,7 +153,7 @@ static inline int page_cache_get_speculative(struct page *page)
#ifdef CONFIG_TINY_RCU
# ifdef CONFIG_PREEMPT_COUNT
VM_BUG_ON(!in_atomic());
VM_BUG_ON(!in_atomic() && !irqs_disabled());
# endif
/*
* Preempt must be disabled here - we rely on rcu_read_lock doing
@ -191,7 +191,7 @@ static inline int page_cache_add_speculative(struct page *page, int count)
#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
# ifdef CONFIG_PREEMPT_COUNT
VM_BUG_ON(!in_atomic());
VM_BUG_ON(!in_atomic() && !irqs_disabled());
# endif
VM_BUG_ON_PAGE(page_count(page) == 0, page);
atomic_add(count, &page->_count);

View file

@ -24,7 +24,7 @@
struct regulator_init_data;
struct isl9305_pdata {
struct regulator_init_data *init_data[ISL9305_MAX_REGULATOR];
struct regulator_init_data *init_data[ISL9305_MAX_REGULATOR + 1];
};
#endif

View file

@ -1202,9 +1202,11 @@ void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
static inline int tcp_win_from_space(int space)
{
return sysctl_tcp_adv_win_scale<=0 ?
(space>>(-sysctl_tcp_adv_win_scale)) :
space - (space>>sysctl_tcp_adv_win_scale);
int tcp_adv_win_scale = sysctl_tcp_adv_win_scale;
return tcp_adv_win_scale <= 0 ?
(space>>(-tcp_adv_win_scale)) :
space - (space>>tcp_adv_win_scale);
}
/* Note: caller must be prepared to deal with negative returns */

View file

@ -1135,7 +1135,8 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
regs[insn->dst_reg].type = UNKNOWN_VALUE;
regs[insn->dst_reg].map_ptr = NULL;
}
} else {
} else if (BPF_CLASS(insn->code) == BPF_ALU64 ||
insn->imm >= 0) {
/* case: R = imm
* remember the value we stored into this reg
*/

View file

@ -2,12 +2,13 @@
#include <linux/kernel.h>
#include <linux/console.h>
#include <linux/errno.h>
#include <linux/string.h>
#include "console_cmdline.h"
#include "braille.h"
char *_braille_console_setup(char **str, char **brl_options)
int _braille_console_setup(char **str, char **brl_options)
{
if (!strncmp(*str, "brl,", 4)) {
*brl_options = "";
@ -15,14 +16,14 @@ char *_braille_console_setup(char **str, char **brl_options)
} else if (!strncmp(*str, "brl=", 4)) {
*brl_options = *str + 4;
*str = strchr(*brl_options, ',');
if (!*str)
if (!*str) {
pr_err("need port name after brl=\n");
else
*((*str)++) = 0;
} else
return NULL;
return -EINVAL;
}
*((*str)++) = 0;
}
return *str;
return 0;
}
int

View file

@ -9,7 +9,14 @@ braille_set_options(struct console_cmdline *c, char *brl_options)
c->brl_options = brl_options;
}
char *
/*
* Setup console according to braille options.
* Return -EINVAL on syntax error, 0 on success (or no braille option was
* actually given).
* Modifies str to point to the serial options
* Sets brl_options to the parsed braille options.
*/
int
_braille_console_setup(char **str, char **brl_options);
int
@ -25,10 +32,10 @@ braille_set_options(struct console_cmdline *c, char *brl_options)
{
}
static inline char *
static inline int
_braille_console_setup(char **str, char **brl_options)
{
return NULL;
return 0;
}
static inline int

View file

@ -621,7 +621,8 @@ void resched_cpu(int cpu)
unsigned long flags;
raw_spin_lock_irqsave(&rq->lock, flags);
resched_curr(rq);
if (cpu_online(cpu) || cpu == smp_processor_id())
resched_curr(rq);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}

View file

@ -2359,7 +2359,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
queue_push_tasks(rq);
#endif /* CONFIG_SMP */
if (p->prio < rq->curr->prio)
if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
resched_curr(rq);
}
}

View file

@ -205,6 +205,11 @@ sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
update_clock_read_data(&rd);
if (sched_clock_timer.function != NULL) {
/* update timeout for clock wrap */
hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
}
r = rate;
if (r >= 4000000) {
r /= 1000000;

View file

@ -16,6 +16,7 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/kallsyms.h>
#include <linux/nmi.h>
#include <asm/uaccess.h>
@ -96,6 +97,9 @@ print_active_timers(struct seq_file *m, struct hrtimer_clock_base *base,
next_one:
i = 0;
touch_nmi_watchdog();
raw_spin_lock_irqsave(&base->cpu_base->lock, flags);
curr = timerqueue_getnext(&base->active);
@ -207,6 +211,8 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
{
struct clock_event_device *dev = td->evtdev;
touch_nmi_watchdog();
SEQ_printf(m, "Tick Device: mode: %d\n", td->mode);
if (cpu < 0)
SEQ_printf(m, "Broadcast device\n");

Some files were not shown because too many files have changed in this diff Show more