This is the 4.4.163 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlvm/EEACgkQONu9yGCS
 aT5qaw//fjbtlLntj6zurFCquQFd7MkjsY+9fxWGvrknmDQrQLVD6u5q4Ii6JUkh
 hbcnCnPH70viQsjlxnwVP7YCuHhuiuja2TCFihKyVEEJXMgwtnjjN6pgay+DCikz
 k8921xsAlpU0N5em9NExu6abQMvsFg1u3h6kLA0Gob120VM4FiK4I8WMyVDZT9ya
 gjdnAzCGfvhdBa7jUokWjOnFPg7s1Y8S4f3OR7/6NjDGupiBYq4vc19cRfofBpnI
 IMZfP9QBj+tUsj3TKBMyQyq2f6qBVaD0XvcpeEdwFxwNxfWgH1oB9tb6kugTgZ6H
 3+fX/XoSJZYKJJpTsKr16FkpLElXeAXjbVKxrNg9qLYTSnJPNkfrGvTOqXjArWC8
 92F5Q/ZlGfZhiuRXTfVoLoThUgRcyru6VPo5dBXgMqNYnV6QHEkwqHkizMHqP3nG
 dlMi40OIx02OuEy6576rLRGpF7kbZ1q6T4zxh/cGzFOz5v8v72HkZ5UildJ+DazU
 oO+tZDCP7yI42jLMafdcn1z/IK20yBiALGIQE1vMQSFxil8wn542T8eS8mAodD9V
 SIPet9oBtWIT0vf0T4JQ2W8SkFZNJwQZc7TbyiUBJrDVUSW/pGhNqAu0lLN4eIxb
 0kA043zy3+apQX6k1qbuXGApbJENk3N2/25NR/n1PQvFCIIqzfU=
 =r3vv
 -----END PGP SIGNATURE-----

Merge 4.4.163 into android-4.4

Changes in 4.4.163
	xfrm: Validate address prefix lengths in the xfrm selector.
	xfrm6: call kfree_skb when skb is toobig
	mac80211: Always report TX status
	cfg80211: reg: Init wiphy_idx in regulatory_hint_core()
	ARM: 8799/1: mm: fix pci_ioremap_io() offset check
	xfrm: validate template mode
	mac80211_hwsim: do not omit multicast announce of first added radio
	Bluetooth: SMP: fix crash in unpairing
	pxa168fb: prepare the clock
	asix: Check for supported Wake-on-LAN modes
	ax88179_178a: Check for supported Wake-on-LAN modes
	lan78xx: Check for supported Wake-on-LAN modes
	sr9800: Check for supported Wake-on-LAN modes
	r8152: Check for supported Wake-on-LAN Modes
	smsc75xx: Check for Wake-on-LAN modes
	smsc95xx: Check for Wake-on-LAN modes
	perf/ring_buffer: Prevent concurent ring buffer access
	net: cxgb3_main: fix a missing-check bug
	KEYS: put keyring if install_session_keyring_to_cred() fails
	ipv6: suppress sparse warnings in IP6_ECN_set_ce()
	net: drop write-only stack variable
	ser_gigaset: use container_of() instead of detour
	tracing: Skip more functions when doing stack tracing of events
	ARM: dts: apq8064: add ahci ports-implemented mask
	x86/mm/pat: Prevent hang during boot when mapping pages
	radix-tree: fix radix_tree_iter_retry() for tagged iterators.
	af_iucv: Move sockaddr length checks to before accessing sa_family in bind and connect handlers
	net/mlx4_en: Resolve dividing by zero in 32-bit system
	ipv6: orphan skbs in reassembly unit
	um: Avoid longjmp/setjmp symbol clashes with libpthread.a
	sched/cgroup: Fix cgroup entity load tracking tear-down
	btrfs: don't create or leak aliased root while cleaning up orphans
	thermal: allow spear-thermal driver to be a module
	thermal: allow u8500-thermal driver to be a module
	tpm: fix: return rc when devm_add_action() fails
	x86/PCI: Mark Broadwell-EP Home Agent 1 as having non-compliant BARs
	aacraid: Start adapter after updating number of MSIX vectors
	perf/core: Don't leak event in the syscall error path
	usbvision: revert commit 588afcc1
	MIPS: Fix FCSR Cause bit handling for correct SIGFPE issue
	ASoC: ak4613: Enable cache usage to fix crashes on resume
	ASoC: wm8940: Enable cache usage to fix crashes on resume
	CIFS: handle guest access errors to Windows shares
	arm64: Fix potential race with hardware DBM in ptep_set_access_flags()
	xfrm: Clear sk_dst_cache when applying per-socket policy.
	scsi: Add STARGET_CREATED_REMOVE state to scsi_target_state
	sparc/pci: Refactor dev_archdata initialization into pci_init_dev_archdata
	sch_red: update backlog as well
	usb-storage: fix bogus hardware error messages for ATA pass-thru devices
	bpf: generally move prog destruction to RCU deferral
	drm/nouveau/fbcon: fix oops without fbdev emulation
	fuse: Dont call set_page_dirty_lock() for ITER_BVEC pages for async_dio
	ixgbevf: Fix handling of NAPI budget when multiple queues are enabled per vector
	net/mlx5e: Fix LRO modify
	net/mlx5e: Correctly handle RSS indirection table when changing number of channels
	ixgbe: fix RSS limit for X550
	ixgbe: Correct X550EM_x revision check
	ALSA: timer: Fix zero-division by continue of uninitialized instance
	vti6: flush x-netns xfrm cache when vti interface is removed
	gro: Allow tunnel stacking in the case of FOU/GUE
	brcmfmac: Fix glom_skb leak in brcmf_sdiod_recv_chain
	l2tp: hold socket before dropping lock in l2tp_ip{, 6}_recv()
	tty: serial: sprd: fix error return code in sprd_probe()
	video: fbdev: pxa3xx_gcu: fix error return code in pxa3xx_gcu_probe()
	sparc64 mm: Fix more TSB sizing issues
	gpu: host1x: fix error return code in host1x_probe()
	sparc64: Fix exception handling in UltraSPARC-III memcpy.
	gpio: msic: fix error return code in platform_msic_gpio_probe()
	usb: imx21-hcd: fix error return code in imx21_probe()
	usb: ehci-omap: fix error return code in ehci_hcd_omap_probe()
	usb: dwc3: omap: fix error return code in dwc3_omap_probe()
	spi/bcm63xx-hspi: fix error return code in bcm63xx_hsspi_probe()
	MIPS: Handle non word sized instructions when examining frame
	spi/bcm63xx: fix error return code in bcm63xx_spi_probe()
	spi: xlp: fix error return code in xlp_spi_probe()
	ASoC: spear: fix error return code in spdif_in_probe()
	PM / devfreq: tegra: fix error return code in tegra_devfreq_probe()
	bonding: avoid defaulting hard_header_len to ETH_HLEN on slave removal
	scsi: aacraid: Fix typo in blink status
	MIPS: microMIPS: Fix decoding of swsp16 instruction
	igb: Remove superfluous reset to PHY and page 0 selection
	MIPS: DEC: Fix an int-handler.S CPU_DADDI_WORKAROUNDS regression
	ARM: dts: imx53-qsb: disable 1.2GHz OPP
	fs/fat/fatent.c: add cond_resched() to fat_count_free_clusters()
	mtd: spi-nor: Add support for is25wp series chips
	perf tools: Disable parallelism for 'make clean'
	bridge: do not add port to router list when receives query with source 0.0.0.0
	net: bridge: remove ipv6 zero address check in mcast queries
	ipv6: mcast: fix a use-after-free in inet6_mc_check
	ipv6/ndisc: Preserve IPv6 control buffer if protocol error handlers are called
	net/ipv6: Fix index counter for unicast addresses in in6_dump_addrs
	net: sched: gred: pass the right attribute to gred_change_table_def()
	net: socket: fix a missing-check bug
	net: stmmac: Fix stmmac_mdio_reset() when building stmmac as modules
	r8169: fix NAPI handling under high load
	sctp: fix race on sctp_id2asoc
	net: drop skb on failure in ip_check_defrag()
	vhost: Fix Spectre V1 vulnerability
	rtnetlink: Disallow FDB configuration for non-Ethernet device
	mremap: properly flush TLB before releasing the page
	crypto: shash - Fix a sleep-in-atomic bug in shash_setkey_unaligned
	ahci: don't ignore result code of ahci_reset_controller()
	cachefiles: fix the race between cachefiles_bury_object() and rmdir(2)
	ptp: fix Spectre v1 vulnerability
	RDMA/ucma: Fix Spectre v1 vulnerability
	IB/ucm: Fix Spectre v1 vulnerability
	cdc-acm: correct counting of UART states in serial state notification
	usb: gadget: storage: Fix Spectre v1 vulnerability
	USB: fix the usbfs flag sanitization for control transfers
	Input: elan_i2c - add ACPI ID for Lenovo IdeaPad 330-15IGM
	sched/fair: Fix throttle_list starvation with low CFS quota
	x86/percpu: Fix this_cpu_read()
	cpuidle: Do not access cpuidle_devices when !CONFIG_CPU_IDLE
	l2tp: hold tunnel socket when handling control frames in l2tp_ip and l2tp_ip6
	x86/time: Correct the attribute on jiffies' definition
	Linux 4.4.163

Change-Id: Idb0efd175853886145a1fb7eaaf18797c39e5f6f
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2018-11-10 07:54:26 -08:00
commit 0ca3fcabdc
130 changed files with 666 additions and 349 deletions

View file

@ -1,6 +1,6 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 162 SUBLEVEL = 163
EXTRAVERSION = EXTRAVERSION =
NAME = Blurry Fish Butt NAME = Blurry Fish Butt

View file

@ -130,6 +130,17 @@
}; };
}; };
&cpu0 {
/* CPU rated to 1GHz, not 1.2GHz as per the default settings */
operating-points = <
/* kHz uV */
166666 850000
400000 900000
800000 1050000
1000000 1200000
>;
};
&esdhc1 { &esdhc1 {
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc1>; pinctrl-0 = <&pinctrl_esdhc1>;

View file

@ -577,7 +577,7 @@
}; };
sata0: sata@29000000 { sata0: sata@29000000 {
compatible = "generic-ahci"; compatible = "qcom,apq8064-ahci", "generic-ahci";
status = "disabled"; status = "disabled";
reg = <0x29000000 0x180>; reg = <0x29000000 0x180>;
interrupts = <GIC_SPI 209 IRQ_TYPE_NONE>; interrupts = <GIC_SPI 209 IRQ_TYPE_NONE>;
@ -599,6 +599,7 @@
phys = <&sata_phy0>; phys = <&sata_phy0>;
phy-names = "sata-phy"; phy-names = "sata-phy";
ports-implemented = <0x1>;
}; };
/* Temporary fixed regulator */ /* Temporary fixed regulator */

View file

@ -460,7 +460,7 @@ void pci_ioremap_set_mem_type(int mem_type)
int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr) int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
{ {
BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT); BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
return ioremap_page_range(PCI_IO_VIRT_BASE + offset, return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
PCI_IO_VIRT_BASE + offset + SZ_64K, PCI_IO_VIRT_BASE + offset + SZ_64K,

View file

@ -107,26 +107,27 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
/* only preserve the access flags and write permission */ /* only preserve the access flags and write permission */
pte_val(entry) &= PTE_AF | PTE_WRITE | PTE_DIRTY; pte_val(entry) &= PTE_AF | PTE_WRITE | PTE_DIRTY;
/* /* set PTE_RDONLY if actual read-only or clean PTE */
* PTE_RDONLY is cleared by default in the asm below, so set it in
* back if necessary (read-only or clean PTE).
*/
if (!pte_write(entry) || !pte_sw_dirty(entry)) if (!pte_write(entry) || !pte_sw_dirty(entry))
pte_val(entry) |= PTE_RDONLY; pte_val(entry) |= PTE_RDONLY;
/* /*
* Setting the flags must be done atomically to avoid racing with the * Setting the flags must be done atomically to avoid racing with the
* hardware update of the access/dirty state. * hardware update of the access/dirty state. The PTE_RDONLY bit must
* be set to the most permissive (lowest value) of *ptep and entry
* (calculated as: a & b == ~(~a | ~b)).
*/ */
pte_val(entry) ^= PTE_RDONLY;
asm volatile("// ptep_set_access_flags\n" asm volatile("// ptep_set_access_flags\n"
" prfm pstl1strm, %2\n" " prfm pstl1strm, %2\n"
"1: ldxr %0, %2\n" "1: ldxr %0, %2\n"
" and %0, %0, %3 // clear PTE_RDONLY\n" " eor %0, %0, %3 // negate PTE_RDONLY in *ptep\n"
" orr %0, %0, %4 // set flags\n" " orr %0, %0, %4 // set flags\n"
" eor %0, %0, %3 // negate final PTE_RDONLY\n"
" stxr %w1, %0, %2\n" " stxr %w1, %0, %2\n"
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)) : "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
: "L" (~PTE_RDONLY), "r" (pte_val(entry))); : "L" (PTE_RDONLY), "r" (pte_val(entry)));
flush_tlb_fix_spurious_fault(vma, address); flush_tlb_fix_spurious_fault(vma, address);
return 1; return 1;

View file

@ -147,23 +147,12 @@
* Find irq with highest priority * Find irq with highest priority
*/ */
# open coded PTR_LA t1, cpu_mask_nr_tbl # open coded PTR_LA t1, cpu_mask_nr_tbl
#if (_MIPS_SZPTR == 32) #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
# open coded la t1, cpu_mask_nr_tbl # open coded la t1, cpu_mask_nr_tbl
lui t1, %hi(cpu_mask_nr_tbl) lui t1, %hi(cpu_mask_nr_tbl)
addiu t1, %lo(cpu_mask_nr_tbl) addiu t1, %lo(cpu_mask_nr_tbl)
#else
#endif #error GCC `-msym32' option required for 64-bit DECstation builds
#if (_MIPS_SZPTR == 64)
# open coded dla t1, cpu_mask_nr_tbl
.set push
.set noat
lui t1, %highest(cpu_mask_nr_tbl)
lui AT, %hi(cpu_mask_nr_tbl)
daddiu t1, t1, %higher(cpu_mask_nr_tbl)
daddiu AT, AT, %lo(cpu_mask_nr_tbl)
dsll t1, 32
daddu t1, t1, AT
.set pop
#endif #endif
1: lw t2,(t1) 1: lw t2,(t1)
nop nop
@ -214,23 +203,12 @@
* Find irq with highest priority * Find irq with highest priority
*/ */
# open coded PTR_LA t1,asic_mask_nr_tbl # open coded PTR_LA t1,asic_mask_nr_tbl
#if (_MIPS_SZPTR == 32) #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
# open coded la t1, asic_mask_nr_tbl # open coded la t1, asic_mask_nr_tbl
lui t1, %hi(asic_mask_nr_tbl) lui t1, %hi(asic_mask_nr_tbl)
addiu t1, %lo(asic_mask_nr_tbl) addiu t1, %lo(asic_mask_nr_tbl)
#else
#endif #error GCC `-msym32' option required for 64-bit DECstation builds
#if (_MIPS_SZPTR == 64)
# open coded dla t1, asic_mask_nr_tbl
.set push
.set noat
lui t1, %highest(asic_mask_nr_tbl)
lui AT, %hi(asic_mask_nr_tbl)
daddiu t1, t1, %higher(asic_mask_nr_tbl)
daddiu AT, AT, %lo(asic_mask_nr_tbl)
dsll t1, 32
daddu t1, t1, AT
.set pop
#endif #endif
2: lw t2,(t1) 2: lw t2,(t1)
nop nop

View file

@ -864,7 +864,7 @@ struct mm16_r3_format { /* Load from global pointer format */
struct mm16_r5_format { /* Load/store from stack pointer format */ struct mm16_r5_format { /* Load/store from stack pointer format */
__BITFIELD_FIELD(unsigned int opcode : 6, __BITFIELD_FIELD(unsigned int opcode : 6,
__BITFIELD_FIELD(unsigned int rt : 5, __BITFIELD_FIELD(unsigned int rt : 5,
__BITFIELD_FIELD(signed int simmediate : 5, __BITFIELD_FIELD(unsigned int imm : 5,
__BITFIELD_FIELD(unsigned int : 16, /* Ignored */ __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
;)))) ;))))
}; };

View file

@ -211,7 +211,7 @@ static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
if (ip->mm16_r5_format.rt != 31) if (ip->mm16_r5_format.rt != 31)
return 0; return 0;
*poff = ip->mm16_r5_format.simmediate; *poff = ip->mm16_r5_format.imm;
*poff = (*poff << 2) / sizeof(ulong); *poff = (*poff << 2) / sizeof(ulong);
return 1; return 1;
@ -345,6 +345,7 @@ static int get_frame_info(struct mips_frame_info *info)
bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS); bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
union mips_instruction insn, *ip, *ip_end; union mips_instruction insn, *ip, *ip_end;
const unsigned int max_insns = 128; const unsigned int max_insns = 128;
unsigned int last_insn_size = 0;
unsigned int i; unsigned int i;
info->pc_offset = -1; info->pc_offset = -1;
@ -356,15 +357,19 @@ static int get_frame_info(struct mips_frame_info *info)
ip_end = (void *)ip + info->func_size; ip_end = (void *)ip + info->func_size;
for (i = 0; i < max_insns && ip < ip_end; i++, ip++) { for (i = 0; i < max_insns && ip < ip_end; i++) {
ip = (void *)ip + last_insn_size;
if (is_mmips && mm_insn_16bit(ip->halfword[0])) { if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
insn.halfword[0] = 0; insn.halfword[0] = 0;
insn.halfword[1] = ip->halfword[0]; insn.halfword[1] = ip->halfword[0];
last_insn_size = 2;
} else if (is_mmips) { } else if (is_mmips) {
insn.halfword[0] = ip->halfword[1]; insn.halfword[0] = ip->halfword[1];
insn.halfword[1] = ip->halfword[0]; insn.halfword[1] = ip->halfword[0];
last_insn_size = 4;
} else { } else {
insn.word = ip->word; insn.word = ip->word;
last_insn_size = 4;
} }
if (is_jump_ins(&insn)) if (is_jump_ins(&insn))
@ -386,8 +391,6 @@ static int get_frame_info(struct mips_frame_info *info)
tmp = (ip->halfword[0] >> 1); tmp = (ip->halfword[0] >> 1);
info->frame_size = -(signed short)(tmp & 0xf); info->frame_size = -(signed short)(tmp & 0xf);
} }
ip = (void *) &ip->halfword[1];
ip--;
} else } else
#endif #endif
info->frame_size = - ip->i_format.simmediate; info->frame_size = - ip->i_format.simmediate;

View file

@ -25,6 +25,7 @@
#define HPAGE_MASK (~(HPAGE_SIZE - 1UL)) #define HPAGE_MASK (~(HPAGE_SIZE - 1UL))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT))
#endif #endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__

View file

@ -245,6 +245,18 @@ static void pci_parse_of_addrs(struct platform_device *op,
} }
} }
static void pci_init_dev_archdata(struct dev_archdata *sd, void *iommu,
void *stc, void *host_controller,
struct platform_device *op,
int numa_node)
{
sd->iommu = iommu;
sd->stc = stc;
sd->host_controller = host_controller;
sd->op = op;
sd->numa_node = numa_node;
}
static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
struct device_node *node, struct device_node *node,
struct pci_bus *bus, int devfn) struct pci_bus *bus, int devfn)
@ -259,13 +271,10 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
if (!dev) if (!dev)
return NULL; return NULL;
op = of_find_device_by_node(node);
sd = &dev->dev.archdata; sd = &dev->dev.archdata;
sd->iommu = pbm->iommu; pci_init_dev_archdata(sd, pbm->iommu, &pbm->stc, pbm, op,
sd->stc = &pbm->stc; pbm->numa_node);
sd->host_controller = pbm;
sd->op = op = of_find_device_by_node(node);
sd->numa_node = pbm->numa_node;
sd = &op->dev.archdata; sd = &op->dev.archdata;
sd->iommu = pbm->iommu; sd->iommu = pbm->iommu;
sd->stc = &pbm->stc; sd->stc = &pbm->stc;
@ -1003,9 +1012,13 @@ int pcibios_add_device(struct pci_dev *dev)
* Copy dev_archdata from PF to VF * Copy dev_archdata from PF to VF
*/ */
if (dev->is_virtfn) { if (dev->is_virtfn) {
struct dev_archdata *psd;
pdev = dev->physfn; pdev = dev->physfn;
memcpy(&dev->dev.archdata, &pdev->dev.archdata, psd = &pdev->dev.archdata;
sizeof(struct dev_archdata)); pci_init_dev_archdata(&dev->dev.archdata, psd->iommu,
psd->stc, psd->host_controller, NULL,
psd->numa_node);
} }
return 0; return 0;
} }

View file

@ -145,13 +145,13 @@ ENDPROC(U3_retl_o2_plus_GS_plus_0x08)
ENTRY(U3_retl_o2_and_7_plus_GS) ENTRY(U3_retl_o2_and_7_plus_GS)
and %o2, 7, %o2 and %o2, 7, %o2
retl retl
add %o2, GLOBAL_SPARE, %o2 add %o2, GLOBAL_SPARE, %o0
ENDPROC(U3_retl_o2_and_7_plus_GS) ENDPROC(U3_retl_o2_and_7_plus_GS)
ENTRY(U3_retl_o2_and_7_plus_GS_plus_8) ENTRY(U3_retl_o2_and_7_plus_GS_plus_8)
add GLOBAL_SPARE, 8, GLOBAL_SPARE add GLOBAL_SPARE, 8, GLOBAL_SPARE
and %o2, 7, %o2 and %o2, 7, %o2
retl retl
add %o2, GLOBAL_SPARE, %o2 add %o2, GLOBAL_SPARE, %o0
ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8) ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8)
#endif #endif

View file

@ -487,6 +487,7 @@ good_area:
tsb_grow(mm, MM_TSB_BASE, mm_rss); tsb_grow(mm, MM_TSB_BASE, mm_rss);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count; mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count;
mm_rss *= REAL_HPAGE_PER_HPAGE;
if (unlikely(mm_rss > if (unlikely(mm_rss >
mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) { mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
if (mm->context.tsb_block[MM_TSB_HUGE].tsb) if (mm->context.tsb_block[MM_TSB_HUGE].tsb)

View file

@ -174,10 +174,25 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
return; return;
if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) { if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
if (pmd_val(pmd) & _PAGE_PMD_HUGE) /*
* Note that this routine only sets pmds for THP pages.
* Hugetlb pages are handled elsewhere. We need to check
* for huge zero page. Huge zero pages are like hugetlb
* pages in that there is no RSS, but there is the need
* for TSB entries. So, huge zero page counts go into
* hugetlb_pte_count.
*/
if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
if (is_huge_zero_page(pmd_page(pmd)))
mm->context.hugetlb_pte_count++;
else
mm->context.thp_pte_count++; mm->context.thp_pte_count++;
} else {
if (is_huge_zero_page(pmd_page(orig)))
mm->context.hugetlb_pte_count--;
else else
mm->context.thp_pte_count--; mm->context.thp_pte_count--;
}
/* Do not try to allocate the TSB hash table if we /* Do not try to allocate the TSB hash table if we
* don't have one already. We have various locks held * don't have one already. We have various locks held
@ -204,6 +219,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
} }
} }
/*
* This routine is only called when splitting a THP
*/
void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp) pmd_t *pmdp)
{ {
@ -213,6 +231,15 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
set_pmd_at(vma->vm_mm, address, pmdp, entry); set_pmd_at(vma->vm_mm, address, pmdp, entry);
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
/*
* set_pmd_at() will not be called in a way to decrement
* thp_pte_count when splitting a THP, so do it now.
* Sanity check pmd before doing the actual decrement.
*/
if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
!is_huge_zero_page(pmd_page(entry)))
(vma->vm_mm)->context.thp_pte_count--;
} }
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,

View file

@ -489,8 +489,10 @@ retry_tsb_alloc:
int init_new_context(struct task_struct *tsk, struct mm_struct *mm) int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{ {
unsigned long mm_rss = get_mm_rss(mm);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
unsigned long total_huge_pte_count; unsigned long saved_hugetlb_pte_count;
unsigned long saved_thp_pte_count;
#endif #endif
unsigned int i; unsigned int i;
@ -503,10 +505,12 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
* will re-increment the counters as the parent PTEs are * will re-increment the counters as the parent PTEs are
* copied into the child address space. * copied into the child address space.
*/ */
total_huge_pte_count = mm->context.hugetlb_pte_count + saved_hugetlb_pte_count = mm->context.hugetlb_pte_count;
mm->context.thp_pte_count; saved_thp_pte_count = mm->context.thp_pte_count;
mm->context.hugetlb_pte_count = 0; mm->context.hugetlb_pte_count = 0;
mm->context.thp_pte_count = 0; mm->context.thp_pte_count = 0;
mm_rss -= saved_thp_pte_count * (HPAGE_SIZE / PAGE_SIZE);
#endif #endif
/* copy_mm() copies over the parent's mm_struct before calling /* copy_mm() copies over the parent's mm_struct before calling
@ -519,11 +523,13 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
/* If this is fork, inherit the parent's TSB size. We would /* If this is fork, inherit the parent's TSB size. We would
* grow it to that size on the first page fault anyways. * grow it to that size on the first page fault anyways.
*/ */
tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); tsb_grow(mm, MM_TSB_BASE, mm_rss);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
if (unlikely(total_huge_pte_count)) if (unlikely(saved_hugetlb_pte_count + saved_thp_pte_count))
tsb_grow(mm, MM_TSB_HUGE, total_huge_pte_count); tsb_grow(mm, MM_TSB_HUGE,
(saved_hugetlb_pte_count + saved_thp_pte_count) *
REAL_HPAGE_PER_HPAGE);
#endif #endif
if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))

View file

@ -59,10 +59,14 @@ KBUILD_CPPFLAGS += -I$(srctree)/$(HOST_DIR)/um
# Same things for in6addr_loopback and mktime - found in libc. For these two we # Same things for in6addr_loopback and mktime - found in libc. For these two we
# only get link-time error, luckily. # only get link-time error, luckily.
# #
# -Dlongjmp=kernel_longjmp prevents anything from referencing the libpthread.a
# embedded copy of longjmp, same thing for setjmp.
#
# These apply to USER_CFLAGS to. # These apply to USER_CFLAGS to.
KBUILD_CFLAGS += $(CFLAGS) $(CFLAGS-y) -D__arch_um__ \ KBUILD_CFLAGS += $(CFLAGS) $(CFLAGS-y) -D__arch_um__ \
$(ARCH_INCLUDE) $(MODE_INCLUDE) -Dvmap=kernel_vmap \ $(ARCH_INCLUDE) $(MODE_INCLUDE) -Dvmap=kernel_vmap \
-Dlongjmp=kernel_longjmp -Dsetjmp=kernel_setjmp \
-Din6addr_loopback=kernel_in6addr_loopback \ -Din6addr_loopback=kernel_in6addr_loopback \
-Din6addr_any=kernel_in6addr_any -Dstrrchr=kernel_strrchr -Din6addr_any=kernel_in6addr_any -Dstrrchr=kernel_strrchr

View file

@ -184,22 +184,22 @@ do { \
typeof(var) pfo_ret__; \ typeof(var) pfo_ret__; \
switch (sizeof(var)) { \ switch (sizeof(var)) { \
case 1: \ case 1: \
asm(op "b "__percpu_arg(1)",%0" \ asm volatile(op "b "__percpu_arg(1)",%0"\
: "=q" (pfo_ret__) \ : "=q" (pfo_ret__) \
: "m" (var)); \ : "m" (var)); \
break; \ break; \
case 2: \ case 2: \
asm(op "w "__percpu_arg(1)",%0" \ asm volatile(op "w "__percpu_arg(1)",%0"\
: "=r" (pfo_ret__) \ : "=r" (pfo_ret__) \
: "m" (var)); \ : "m" (var)); \
break; \ break; \
case 4: \ case 4: \
asm(op "l "__percpu_arg(1)",%0" \ asm volatile(op "l "__percpu_arg(1)",%0"\
: "=r" (pfo_ret__) \ : "=r" (pfo_ret__) \
: "m" (var)); \ : "m" (var)); \
break; \ break; \
case 8: \ case 8: \
asm(op "q "__percpu_arg(1)",%0" \ asm volatile(op "q "__percpu_arg(1)",%0"\
: "=r" (pfo_ret__) \ : "=r" (pfo_ret__) \
: "m" (var)); \ : "m" (var)); \
break; \ break; \

View file

@ -23,7 +23,7 @@
#include <asm/time.h> #include <asm/time.h>
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
__visible volatile unsigned long jiffies __cacheline_aligned = INITIAL_JIFFIES; __visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES;
#endif #endif
unsigned long profile_pc(struct pt_regs *regs) unsigned long profile_pc(struct pt_regs *regs)

View file

@ -955,11 +955,11 @@ static void populate_pte(struct cpa_data *cpa,
} }
} }
static int populate_pmd(struct cpa_data *cpa, static long populate_pmd(struct cpa_data *cpa,
unsigned long start, unsigned long end, unsigned long start, unsigned long end,
unsigned num_pages, pud_t *pud, pgprot_t pgprot) unsigned num_pages, pud_t *pud, pgprot_t pgprot)
{ {
unsigned int cur_pages = 0; long cur_pages = 0;
pmd_t *pmd; pmd_t *pmd;
pgprot_t pmd_pgprot; pgprot_t pmd_pgprot;
@ -1029,12 +1029,12 @@ static int populate_pmd(struct cpa_data *cpa,
return num_pages; return num_pages;
} }
static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, static long populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
pgprot_t pgprot) pgprot_t pgprot)
{ {
pud_t *pud; pud_t *pud;
unsigned long end; unsigned long end;
int cur_pages = 0; long cur_pages = 0;
pgprot_t pud_pgprot; pgprot_t pud_pgprot;
end = start + (cpa->numpages << PAGE_SHIFT); end = start + (cpa->numpages << PAGE_SHIFT);
@ -1090,7 +1090,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
/* Map trailing leftover */ /* Map trailing leftover */
if (start < end) { if (start < end) {
int tmp; long tmp;
pud = pud_offset(pgd, start); pud = pud_offset(pgd, start);
if (pud_none(*pud)) if (pud_none(*pud))
@ -1116,7 +1116,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
pgprot_t pgprot = __pgprot(_KERNPG_TABLE); pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
pud_t *pud = NULL; /* shut up gcc */ pud_t *pud = NULL; /* shut up gcc */
pgd_t *pgd_entry; pgd_t *pgd_entry;
int ret; long ret;
pgd_entry = cpa->pgd + pgd_index(addr); pgd_entry = cpa->pgd + pgd_index(addr);
@ -1351,7 +1351,8 @@ static int cpa_process_alias(struct cpa_data *cpa)
static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
{ {
int ret, numpages = cpa->numpages; unsigned long numpages = cpa->numpages;
int ret;
while (numpages) { while (numpages) {
/* /*

View file

@ -541,9 +541,16 @@ static void twinhead_reserve_killing_zone(struct pci_dev *dev)
} }
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
/*
* Broadwell EP Home Agent BARs erroneously return non-zero values when read.
*
* See http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v4-spec-update.html
* entry BDF2.
*/
static void pci_bdwep_bar(struct pci_dev *dev) static void pci_bdwep_bar(struct pci_dev *dev)
{ {
dev->non_compliant_bars = 1; dev->non_compliant_bars = 1;
} }
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_bdwep_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_bdwep_bar); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_bdwep_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_bdwep_bar); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_bdwep_bar);

View file

@ -16,9 +16,9 @@
.text .text
.align 4 .align 4
.globl setjmp .globl kernel_setjmp
.type setjmp, @function .type kernel_setjmp, @function
setjmp: kernel_setjmp:
#ifdef _REGPARM #ifdef _REGPARM
movl %eax,%edx movl %eax,%edx
#else #else
@ -35,13 +35,13 @@ setjmp:
movl %ecx,20(%edx) # Return address movl %ecx,20(%edx) # Return address
ret ret
.size setjmp,.-setjmp .size kernel_setjmp,.-kernel_setjmp
.text .text
.align 4 .align 4
.globl longjmp .globl kernel_longjmp
.type longjmp, @function .type kernel_longjmp, @function
longjmp: kernel_longjmp:
#ifdef _REGPARM #ifdef _REGPARM
xchgl %eax,%edx xchgl %eax,%edx
#else #else
@ -55,4 +55,4 @@ longjmp:
movl 16(%edx),%edi movl 16(%edx),%edi
jmp *20(%edx) jmp *20(%edx)
.size longjmp,.-longjmp .size kernel_longjmp,.-kernel_longjmp

View file

@ -18,9 +18,9 @@
.text .text
.align 4 .align 4
.globl setjmp .globl kernel_setjmp
.type setjmp, @function .type kernel_setjmp, @function
setjmp: kernel_setjmp:
pop %rsi # Return address, and adjust the stack pop %rsi # Return address, and adjust the stack
xorl %eax,%eax # Return value xorl %eax,%eax # Return value
movq %rbx,(%rdi) movq %rbx,(%rdi)
@ -34,13 +34,13 @@ setjmp:
movq %rsi,56(%rdi) # Return address movq %rsi,56(%rdi) # Return address
ret ret
.size setjmp,.-setjmp .size kernel_setjmp,.-kernel_setjmp
.text .text
.align 4 .align 4
.globl longjmp .globl kernel_longjmp
.type longjmp, @function .type kernel_longjmp, @function
longjmp: kernel_longjmp:
movl %esi,%eax # Return value (int) movl %esi,%eax # Return value (int)
movq (%rdi),%rbx movq (%rdi),%rbx
movq 8(%rdi),%rsp movq 8(%rdi),%rsp
@ -51,4 +51,4 @@ longjmp:
movq 48(%rdi),%r15 movq 48(%rdi),%r15
jmp *56(%rdi) jmp *56(%rdi)
.size longjmp,.-longjmp .size kernel_longjmp,.-kernel_longjmp

View file

@ -41,7 +41,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
int err; int err;
absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
buffer = kmalloc(absize, GFP_KERNEL); buffer = kmalloc(absize, GFP_ATOMIC);
if (!buffer) if (!buffer)
return -ENOMEM; return -ENOMEM;

View file

@ -619,8 +619,11 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
static int ahci_pci_reset_controller(struct ata_host *host) static int ahci_pci_reset_controller(struct ata_host *host)
{ {
struct pci_dev *pdev = to_pci_dev(host->dev); struct pci_dev *pdev = to_pci_dev(host->dev);
int rc;
ahci_reset_controller(host); rc = ahci_reset_controller(host);
if (rc)
return rc;
if (pdev->vendor == PCI_VENDOR_ID_INTEL) { if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
struct ahci_host_priv *hpriv = host->private_data; struct ahci_host_priv *hpriv = host->private_data;

View file

@ -230,7 +230,11 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
chip->cdev.owner = dev->driver->owner; chip->cdev.owner = dev->driver->owner;
chip->cdev.kobj.parent = &chip->dev.kobj; chip->cdev.kobj.parent = &chip->dev.kobj;
devm_add_action(dev, (void (*)(void *)) put_device, &chip->dev); rc = devm_add_action(dev, (void (*)(void *)) put_device, &chip->dev);
if (rc) {
put_device(&chip->dev);
return ERR_PTR(rc);
}
return chip; return chip;
} }

View file

@ -688,9 +688,9 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
} }
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq <= 0) { if (irq < 0) {
dev_err(&pdev->dev, "Failed to get IRQ\n"); dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
return -ENODEV; return irq;
} }
platform_set_drvdata(pdev, tegra); platform_set_drvdata(pdev, tegra);

View file

@ -266,8 +266,8 @@ static int platform_msic_gpio_probe(struct platform_device *pdev)
int i; int i;
if (irq < 0) { if (irq < 0) {
dev_err(dev, "no IRQ line\n"); dev_err(dev, "no IRQ line: %d\n", irq);
return -EINVAL; return irq;
} }
if (!pdata || !pdata->gpio_base) { if (!pdata || !pdata->gpio_base) {

View file

@ -235,7 +235,7 @@ void
nouveau_fbcon_accel_save_disable(struct drm_device *dev) nouveau_fbcon_accel_save_disable(struct drm_device *dev)
{ {
struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_drm *drm = nouveau_drm(dev);
if (drm->fbcon) { if (drm->fbcon && drm->fbcon->helper.fbdev) {
drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags; drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
} }
@ -245,7 +245,7 @@ void
nouveau_fbcon_accel_restore(struct drm_device *dev) nouveau_fbcon_accel_restore(struct drm_device *dev)
{ {
struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_drm *drm = nouveau_drm(dev);
if (drm->fbcon) { if (drm->fbcon && drm->fbcon->helper.fbdev) {
drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags; drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
} }
} }
@ -257,6 +257,7 @@ nouveau_fbcon_accel_fini(struct drm_device *dev)
struct nouveau_fbdev *fbcon = drm->fbcon; struct nouveau_fbdev *fbcon = drm->fbcon;
if (fbcon && drm->channel) { if (fbcon && drm->channel) {
console_lock(); console_lock();
if (fbcon->helper.fbdev)
fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
console_unlock(); console_unlock();
nouveau_channel_idle(drm->channel); nouveau_channel_idle(drm->channel);

View file

@ -116,8 +116,8 @@ static int host1x_probe(struct platform_device *pdev)
syncpt_irq = platform_get_irq(pdev, 0); syncpt_irq = platform_get_irq(pdev, 0);
if (syncpt_irq < 0) { if (syncpt_irq < 0) {
dev_err(&pdev->dev, "failed to get IRQ\n"); dev_err(&pdev->dev, "failed to get IRQ: %d\n", syncpt_irq);
return -ENXIO; return syncpt_irq;
} }
host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);

View file

@ -46,6 +46,8 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/nospec.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <rdma/ib.h> #include <rdma/ib.h>
@ -1115,6 +1117,7 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
if (hdr.cmd >= ARRAY_SIZE(ucm_cmd_table)) if (hdr.cmd >= ARRAY_SIZE(ucm_cmd_table))
return -EINVAL; return -EINVAL;
hdr.cmd = array_index_nospec(hdr.cmd, ARRAY_SIZE(ucm_cmd_table));
if (hdr.in + sizeof(hdr) > len) if (hdr.in + sizeof(hdr) > len)
return -EINVAL; return -EINVAL;

View file

@ -44,6 +44,8 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/nsproxy.h> #include <linux/nsproxy.h>
#include <linux/nospec.h>
#include <rdma/rdma_user_cm.h> #include <rdma/rdma_user_cm.h>
#include <rdma/ib_marshall.h> #include <rdma/ib_marshall.h>
#include <rdma/rdma_cm.h> #include <rdma/rdma_cm.h>
@ -1627,6 +1629,7 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table)) if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
return -EINVAL; return -EINVAL;
hdr.cmd = array_index_nospec(hdr.cmd, ARRAY_SIZE(ucma_cmd_table));
if (hdr.in + sizeof(hdr) > len) if (hdr.in + sizeof(hdr) > len)
return -EINVAL; return -EINVAL;

View file

@ -1251,6 +1251,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0611", 0 }, { "ELAN0611", 0 },
{ "ELAN0612", 0 }, { "ELAN0612", 0 },
{ "ELAN0618", 0 }, { "ELAN0618", 0 },
{ "ELAN061C", 0 },
{ "ELAN061D", 0 }, { "ELAN061D", 0 },
{ "ELAN0622", 0 }, { "ELAN0622", 0 },
{ "ELAN1000", 0 }, { "ELAN1000", 0 },

View file

@ -373,13 +373,7 @@ static void gigaset_freecshw(struct cardstate *cs)
static void gigaset_device_release(struct device *dev) static void gigaset_device_release(struct device *dev)
{ {
struct cardstate *cs = dev_get_drvdata(dev); kfree(container_of(dev, struct ser_cardstate, dev.dev));
if (!cs)
return;
dev_set_drvdata(dev, NULL);
kfree(cs->hw.ser);
cs->hw.ser = NULL;
} }
/* /*
@ -408,7 +402,6 @@ static int gigaset_initcshw(struct cardstate *cs)
cs->hw.ser = NULL; cs->hw.ser = NULL;
return rc; return rc;
} }
dev_set_drvdata(&cs->hw.ser->dev.dev, cs);
tasklet_init(&cs->write_tasklet, tasklet_init(&cs->write_tasklet,
gigaset_modem_fill, (unsigned long) cs); gigaset_modem_fill, (unsigned long) cs);

View file

@ -1461,13 +1461,6 @@ static int usbvision_probe(struct usb_interface *intf,
printk(KERN_INFO "%s: %s found\n", __func__, printk(KERN_INFO "%s: %s found\n", __func__,
usbvision_device_data[model].model_string); usbvision_device_data[model].model_string);
/*
* this is a security check.
* an exploit using an incorrect bInterfaceNumber is known
*/
if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum])
return -ENODEV;
if (usbvision_device_data[model].interface >= 0) if (usbvision_device_data[model].interface >= 0)
interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0]; interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
else if (ifnum < dev->actconfig->desc.bNumInterfaces) else if (ifnum < dev->actconfig->desc.bNumInterfaces)

View file

@ -2048,6 +2048,7 @@ int db8500_prcmu_config_hotmon(u8 low, u8 high)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(db8500_prcmu_config_hotmon);
static int config_hot_period(u16 val) static int config_hot_period(u16 val)
{ {
@ -2074,11 +2075,13 @@ int db8500_prcmu_start_temp_sense(u16 cycles32k)
return config_hot_period(cycles32k); return config_hot_period(cycles32k);
} }
EXPORT_SYMBOL_GPL(db8500_prcmu_start_temp_sense);
int db8500_prcmu_stop_temp_sense(void) int db8500_prcmu_stop_temp_sense(void)
{ {
return config_hot_period(0xFFFF); return config_hot_period(0xFFFF);
} }
EXPORT_SYMBOL_GPL(db8500_prcmu_stop_temp_sense);
static int prcmu_a9wdog(u8 cmd, u8 d0, u8 d1, u8 d2, u8 d3) static int prcmu_a9wdog(u8 cmd, u8 d0, u8 d1, u8 d2, u8 d3)
{ {

View file

@ -708,6 +708,12 @@ static const struct flash_info spi_nor_ids[] = {
/* ISSI */ /* ISSI */
{ "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) }, { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
{ "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
/* Macronix */ /* Macronix */
{ "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) }, { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },

View file

@ -1107,11 +1107,11 @@ static void bond_compute_features(struct bonding *bond)
gso_max_size = min(gso_max_size, slave->dev->gso_max_size); gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs); gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
} }
bond_dev->hard_header_len = max_hard_header_len;
done: done:
bond_dev->vlan_features = vlan_features; bond_dev->vlan_features = vlan_features;
bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL; bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL;
bond_dev->hard_header_len = max_hard_header_len;
bond_dev->gso_max_segs = gso_max_segs; bond_dev->gso_max_segs = gso_max_segs;
netif_set_gso_max_size(bond_dev, gso_max_size); netif_set_gso_max_size(bond_dev, gso_max_size);

View file

@ -2147,6 +2147,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EPERM; return -EPERM;
if (copy_from_user(&t, useraddr, sizeof(t))) if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT; return -EFAULT;
if (t.cmd != CHELSIO_SET_QSET_PARAMS)
return -EINVAL;
if (t.qset_idx >= SGE_QSETS) if (t.qset_idx >= SGE_QSETS)
return -EINVAL; return -EINVAL;
if (!in_range(t.intr_lat, 0, M_NEWTIMER) || if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
@ -2246,6 +2248,9 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
if (copy_from_user(&t, useraddr, sizeof(t))) if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT; return -EFAULT;
if (t.cmd != CHELSIO_GET_QSET_PARAMS)
return -EINVAL;
/* Display qsets for all ports when offload enabled */ /* Display qsets for all ports when offload enabled */
if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
q1 = 0; q1 = 0;
@ -2291,6 +2296,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EBUSY; return -EBUSY;
if (copy_from_user(&edata, useraddr, sizeof(edata))) if (copy_from_user(&edata, useraddr, sizeof(edata)))
return -EFAULT; return -EFAULT;
if (edata.cmd != CHELSIO_SET_QSET_NUM)
return -EINVAL;
if (edata.val < 1 || if (edata.val < 1 ||
(edata.val > 1 && !(adapter->flags & USING_MSIX))) (edata.val > 1 && !(adapter->flags & USING_MSIX)))
return -EINVAL; return -EINVAL;
@ -2331,6 +2338,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EPERM; return -EPERM;
if (copy_from_user(&t, useraddr, sizeof(t))) if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT; return -EFAULT;
if (t.cmd != CHELSIO_LOAD_FW)
return -EINVAL;
/* Check t.len sanity ? */ /* Check t.len sanity ? */
fw_data = memdup_user(useraddr + sizeof(t), t.len); fw_data = memdup_user(useraddr + sizeof(t), t.len);
if (IS_ERR(fw_data)) if (IS_ERR(fw_data))
@ -2354,6 +2363,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EBUSY; return -EBUSY;
if (copy_from_user(&m, useraddr, sizeof(m))) if (copy_from_user(&m, useraddr, sizeof(m)))
return -EFAULT; return -EFAULT;
if (m.cmd != CHELSIO_SETMTUTAB)
return -EINVAL;
if (m.nmtus != NMTUS) if (m.nmtus != NMTUS)
return -EINVAL; return -EINVAL;
if (m.mtus[0] < 81) /* accommodate SACK */ if (m.mtus[0] < 81) /* accommodate SACK */
@ -2395,6 +2406,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EBUSY; return -EBUSY;
if (copy_from_user(&m, useraddr, sizeof(m))) if (copy_from_user(&m, useraddr, sizeof(m)))
return -EFAULT; return -EFAULT;
if (m.cmd != CHELSIO_SET_PM)
return -EINVAL;
if (!is_power_of_2(m.rx_pg_sz) || if (!is_power_of_2(m.rx_pg_sz) ||
!is_power_of_2(m.tx_pg_sz)) !is_power_of_2(m.tx_pg_sz))
return -EINVAL; /* not power of 2 */ return -EINVAL; /* not power of 2 */
@ -2428,6 +2441,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EIO; /* need the memory controllers */ return -EIO; /* need the memory controllers */
if (copy_from_user(&t, useraddr, sizeof(t))) if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT; return -EFAULT;
if (t.cmd != CHELSIO_GET_MEM)
return -EINVAL;
if ((t.addr & 7) || (t.len & 7)) if ((t.addr & 7) || (t.len & 7))
return -EINVAL; return -EINVAL;
if (t.mem_id == MEM_CM) if (t.mem_id == MEM_CM)
@ -2480,6 +2495,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EAGAIN; return -EAGAIN;
if (copy_from_user(&t, useraddr, sizeof(t))) if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT; return -EFAULT;
if (t.cmd != CHELSIO_SET_TRACE_FILTER)
return -EINVAL;
tp = (const struct trace_params *)&t.sip; tp = (const struct trace_params *)&t.sip;
if (t.config_tx) if (t.config_tx)

View file

@ -223,17 +223,6 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
E1000_STATUS_FUNC_SHIFT; E1000_STATUS_FUNC_SHIFT;
/* Make sure the PHY is in a good state. Several people have reported
* firmware leaving the PHY's page select register set to something
* other than the default of zero, which causes the PHY ID read to
* access something other than the intended register.
*/
ret_val = hw->phy.ops.reset(hw);
if (ret_val) {
hw_dbg("Error resetting the PHY.\n");
goto out;
}
/* Set phy->phy_addr and phy->id. */ /* Set phy->phy_addr and phy->id. */
ret_val = igb_get_phy_id_82575(hw); ret_val = igb_get_phy_id_82575(hw);
if (ret_val) if (ret_val)

View file

@ -312,7 +312,7 @@ enum ixgbe_ring_f_enum {
}; };
#define IXGBE_MAX_RSS_INDICES 16 #define IXGBE_MAX_RSS_INDICES 16
#define IXGBE_MAX_RSS_INDICES_X550 64 #define IXGBE_MAX_RSS_INDICES_X550 63
#define IXGBE_MAX_VMDQ_INDICES 64 #define IXGBE_MAX_VMDQ_INDICES 64
#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */ #define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */
#define IXGBE_MAX_FCOE_INDICES 8 #define IXGBE_MAX_FCOE_INDICES 8

View file

@ -3508,7 +3508,7 @@ struct ixgbe_info {
#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4)) #define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4))
#define IXGBE_FUSES0_300MHZ BIT(5) #define IXGBE_FUSES0_300MHZ BIT(5)
#define IXGBE_FUSES0_REV1 BIT(6) #define IXGBE_FUSES0_REV_MASK (3 << 6)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) #define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010)
#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) #define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)

View file

@ -1873,10 +1873,6 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
u32 save_autoneg; u32 save_autoneg;
bool link_up; bool link_up;
/* SW LPLU not required on later HW revisions. */
if (IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))
return 0;
/* If blocked by MNG FW, then don't restart AN */ /* If blocked by MNG FW, then don't restart AN */
if (ixgbe_check_reset_blocked(hw)) if (ixgbe_check_reset_blocked(hw))
return 0; return 0;
@ -2030,8 +2026,9 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
} }
/* setup SW LPLU only for first revision */ /* setup SW LPLU only for first revision */
if (!(IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw, if (hw->mac.type == ixgbe_mac_X550EM_x &&
IXGBE_FUSES0_GROUP(0)))) !(IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)) &
IXGBE_FUSES0_REV_MASK))
phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em; phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;

View file

@ -1014,6 +1014,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
ixgbevf_for_each_ring(ring, q_vector->tx) ixgbevf_for_each_ring(ring, q_vector->tx)
clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
if (budget <= 0)
return budget;
#ifdef CONFIG_NET_RX_BUSY_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
if (!ixgbevf_qv_lock_napi(q_vector)) if (!ixgbevf_qv_lock_napi(q_vector))
return budget; return budget;

View file

@ -251,8 +251,11 @@ static u32 freq_to_shift(u16 freq)
{ {
u32 freq_khz = freq * 1000; u32 freq_khz = freq * 1000;
u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC; u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
u64 tmp_rounded =
roundup_pow_of_two(max_val_cycles) > max_val_cycles ?
roundup_pow_of_two(max_val_cycles) - 1 : UINT_MAX;
u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ? u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1; max_val_cycles : tmp_rounded;
/* calculate max possible multiplier in order to fit in 64bit */ /* calculate max possible multiplier in order to fit in 64bit */
u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded); u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);

View file

@ -586,6 +586,8 @@ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix);
int mlx5e_open_locked(struct net_device *netdev); int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev); int mlx5e_close_locked(struct net_device *netdev);
void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
int num_channels);
static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
struct mlx5e_tx_wqe *wqe, int bf_sz) struct mlx5e_tx_wqe *wqe, int bf_sz)

View file

@ -385,6 +385,8 @@ static int mlx5e_set_channels(struct net_device *dev,
mlx5e_close_locked(dev); mlx5e_close_locked(dev);
priv->params.num_channels = count; priv->params.num_channels = count;
mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, count);
if (was_opened) if (was_opened)
err = mlx5e_open_locked(dev); err = mlx5e_open_locked(dev);

View file

@ -1186,7 +1186,6 @@ static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE); ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
ix = priv->params.indirection_rqt[ix]; ix = priv->params.indirection_rqt[ix];
ix = ix % priv->params.num_channels;
MLX5_SET(rqtc, rqtc, rq_num[i], MLX5_SET(rqtc, rqtc, rq_num[i],
test_bit(MLX5E_STATE_OPENED, &priv->state) ? test_bit(MLX5E_STATE_OPENED, &priv->state) ?
priv->channel[ix]->rq.rqn : priv->channel[ix]->rq.rqn :
@ -1304,7 +1303,7 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
lro_timer_supported_periods[2])); lro_timer_supported_periods[2]));
} }
static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt) static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
{ {
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
@ -1312,6 +1311,7 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
void *tirc; void *tirc;
int inlen; int inlen;
int err; int err;
int tt;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in); inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = mlx5_vzalloc(inlen); in = mlx5_vzalloc(inlen);
@ -1323,7 +1323,11 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
mlx5e_build_tir_ctx_lro(tirc, priv); mlx5e_build_tir_ctx_lro(tirc, priv);
for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen); err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
if (err)
break;
}
kvfree(in); kvfree(in);
@ -1870,8 +1874,10 @@ static int mlx5e_set_features(struct net_device *netdev,
mlx5e_close_locked(priv->netdev); mlx5e_close_locked(priv->netdev);
priv->params.lro_en = !!(features & NETIF_F_LRO); priv->params.lro_en = !!(features & NETIF_F_LRO);
mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP); err = mlx5e_modify_tirs_lro(priv);
mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP); if (err)
mlx5_core_warn(priv->mdev, "lro modify failed, %d\n",
err);
if (was_opened) if (was_opened)
err = mlx5e_open_locked(priv->netdev); err = mlx5e_open_locked(priv->netdev);
@ -1976,12 +1982,20 @@ u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/; 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
} }
void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
int num_channels)
{
int i;
for (i = 0; i < len; i++)
indirection_rqt[i] = i % num_channels;
}
static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
struct net_device *netdev, struct net_device *netdev,
int num_channels) int num_channels)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
int i;
priv->params.log_sq_size = priv->params.log_sq_size =
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
@ -2005,8 +2019,8 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
netdev_rss_key_fill(priv->params.toeplitz_hash_key, netdev_rss_key_fill(priv->params.toeplitz_hash_key,
sizeof(priv->params.toeplitz_hash_key)); sizeof(priv->params.toeplitz_hash_key));
for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
priv->params.indirection_rqt[i] = i % num_channels; MLX5E_INDIR_RQT_SIZE, num_channels);
priv->params.lro_wqe_sz = priv->params.lro_wqe_sz =
MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;

View file

@ -7540,16 +7540,14 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
struct net_device *dev = tp->dev; struct net_device *dev = tp->dev;
u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow; u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
int work_done= 0; int work_done;
u16 status; u16 status;
status = rtl_get_events(tp); status = rtl_get_events(tp);
rtl_ack_events(tp, status & ~tp->event_slow); rtl_ack_events(tp, status & ~tp->event_slow);
if (status & RTL_EVENT_NAPI_RX)
work_done = rtl_rx(dev, tp, (u32) budget); work_done = rtl_rx(dev, tp, (u32) budget);
if (status & RTL_EVENT_NAPI_TX)
rtl_tx(dev, tp); rtl_tx(dev, tp);
if (status & tp->event_slow) { if (status & tp->event_slow) {

View file

@ -130,7 +130,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
*/ */
int stmmac_mdio_reset(struct mii_bus *bus) int stmmac_mdio_reset(struct mii_bus *bus)
{ {
#if defined(CONFIG_STMMAC_PLATFORM) #if IS_ENABLED(CONFIG_STMMAC_PLATFORM)
struct net_device *ndev = bus->priv; struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev); struct stmmac_priv *priv = netdev_priv(ndev);
unsigned int mii_address = priv->hw->mii.addr; unsigned int mii_address = priv->hw->mii.addr;

View file

@ -449,6 +449,9 @@ int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
struct usbnet *dev = netdev_priv(net); struct usbnet *dev = netdev_priv(net);
u8 opt = 0; u8 opt = 0;
if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
return -EINVAL;
if (wolinfo->wolopts & WAKE_PHY) if (wolinfo->wolopts & WAKE_PHY)
opt |= AX_MONITOR_LINK; opt |= AX_MONITOR_LINK;
if (wolinfo->wolopts & WAKE_MAGIC) if (wolinfo->wolopts & WAKE_MAGIC)

View file

@ -566,6 +566,9 @@ ax88179_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
struct usbnet *dev = netdev_priv(net); struct usbnet *dev = netdev_priv(net);
u8 opt = 0; u8 opt = 0;
if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
return -EINVAL;
if (wolinfo->wolopts & WAKE_PHY) if (wolinfo->wolopts & WAKE_PHY)
opt |= AX_MONITOR_MODE_RWLC; opt |= AX_MONITOR_MODE_RWLC;
if (wolinfo->wolopts & WAKE_MAGIC) if (wolinfo->wolopts & WAKE_MAGIC)

View file

@ -1051,19 +1051,10 @@ static int lan78xx_set_wol(struct net_device *netdev,
if (ret < 0) if (ret < 0)
return ret; return ret;
pdata->wol = 0; if (wol->wolopts & ~WAKE_ALL)
if (wol->wolopts & WAKE_UCAST) return -EINVAL;
pdata->wol |= WAKE_UCAST;
if (wol->wolopts & WAKE_MCAST) pdata->wol = wol->wolopts;
pdata->wol |= WAKE_MCAST;
if (wol->wolopts & WAKE_BCAST)
pdata->wol |= WAKE_BCAST;
if (wol->wolopts & WAKE_MAGIC)
pdata->wol |= WAKE_MAGIC;
if (wol->wolopts & WAKE_PHY)
pdata->wol |= WAKE_PHY;
if (wol->wolopts & WAKE_ARP)
pdata->wol |= WAKE_ARP;
device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts); device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);

View file

@ -3663,6 +3663,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (!rtl_can_wakeup(tp)) if (!rtl_can_wakeup(tp))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (wol->wolopts & ~WAKE_ANY)
return -EINVAL;
ret = usb_autopm_get_interface(tp->intf); ret = usb_autopm_get_interface(tp->intf);
if (ret < 0) if (ret < 0)
goto out_set_wol; goto out_set_wol;

View file

@ -728,6 +728,9 @@ static int smsc75xx_ethtool_set_wol(struct net_device *net,
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
int ret; int ret;
if (wolinfo->wolopts & ~SUPPORTED_WAKE)
return -EINVAL;
pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE; pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts); ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);

View file

@ -727,6 +727,9 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net,
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
int ret; int ret;
if (wolinfo->wolopts & ~SUPPORTED_WAKE)
return -EINVAL;
pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE; pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts); ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);

View file

@ -421,6 +421,9 @@ sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
struct usbnet *dev = netdev_priv(net); struct usbnet *dev = netdev_priv(net);
u8 opt = 0; u8 opt = 0;
if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
return -EINVAL;
if (wolinfo->wolopts & WAKE_PHY) if (wolinfo->wolopts & WAKE_PHY)
opt |= SR_MONITOR_LINK; opt |= SR_MONITOR_LINK;
if (wolinfo->wolopts & WAKE_MAGIC) if (wolinfo->wolopts & WAKE_MAGIC)

View file

@ -705,7 +705,7 @@ done:
int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev, int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
struct sk_buff_head *pktq, uint totlen) struct sk_buff_head *pktq, uint totlen)
{ {
struct sk_buff *glom_skb; struct sk_buff *glom_skb = NULL;
struct sk_buff *skb; struct sk_buff *skb;
u32 addr = sdiodev->sbwad; u32 addr = sdiodev->sbwad;
int err = 0; int err = 0;
@ -726,10 +726,8 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
return -ENOMEM; return -ENOMEM;
err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
glom_skb); glom_skb);
if (err) { if (err)
brcmu_pkt_buf_free_skb(glom_skb);
goto done; goto done;
}
skb_queue_walk(pktq, skb) { skb_queue_walk(pktq, skb) {
memcpy(skb->data, glom_skb->data, skb->len); memcpy(skb->data, glom_skb->data, skb->len);
@ -740,6 +738,7 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
pktq); pktq);
done: done:
brcmu_pkt_buf_free_skb(glom_skb);
return err; return err;
} }

View file

@ -2547,7 +2547,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
list_add_tail(&data->list, &hwsim_radios); list_add_tail(&data->list, &hwsim_radios);
spin_unlock_bh(&hwsim_radio_lock); spin_unlock_bh(&hwsim_radio_lock);
if (idx > 0)
hwsim_mcast_new_radio(idx, info, param); hwsim_mcast_new_radio(idx, info, param);
return idx; return idx;

View file

@ -23,6 +23,8 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/nospec.h>
#include "ptp_private.h" #include "ptp_private.h"
static int ptp_disable_pinfunc(struct ptp_clock_info *ops, static int ptp_disable_pinfunc(struct ptp_clock_info *ops,
@ -224,6 +226,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
err = -EINVAL; err = -EINVAL;
break; break;
} }
pin_index = array_index_nospec(pin_index, ops->n_pins);
if (mutex_lock_interruptible(&ptp->pincfg_mux)) if (mutex_lock_interruptible(&ptp->pincfg_mux))
return -ERESTARTSYS; return -ERESTARTSYS;
pd = ops->pin_config[pin_index]; pd = ops->pin_config[pin_index];
@ -242,6 +245,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
err = -EINVAL; err = -EINVAL;
break; break;
} }
pin_index = array_index_nospec(pin_index, ops->n_pins);
if (mutex_lock_interruptible(&ptp->pincfg_mux)) if (mutex_lock_interruptible(&ptp->pincfg_mux))
return -ERESTARTSYS; return -ERESTARTSYS;
err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan); err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan);

View file

@ -1416,8 +1416,8 @@ static int aac_acquire_resources(struct aac_dev *dev)
/* After EEH recovery or suspend resume, max_msix count /* After EEH recovery or suspend resume, max_msix count
* may change, therfore updating in init as well. * may change, therfore updating in init as well.
*/ */
aac_adapter_start(dev);
dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix); dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix);
aac_adapter_start(dev);
} }
return 0; return 0;

View file

@ -444,7 +444,7 @@ err_out:
return -1; return -1;
err_blink: err_blink:
return (status > 16) & 0xFF; return (status >> 16) & 0xFF;
} }
/** /**

View file

@ -381,11 +381,12 @@ static void scsi_target_reap_ref_release(struct kref *kref)
= container_of(kref, struct scsi_target, reap_ref); = container_of(kref, struct scsi_target, reap_ref);
/* /*
* if we get here and the target is still in the CREATED state that * if we get here and the target is still in a CREATED state that
* means it was allocated but never made visible (because a scan * means it was allocated but never made visible (because a scan
* turned up no LUNs), so don't call device_del() on it. * turned up no LUNs), so don't call device_del() on it.
*/ */
if (starget->state != STARGET_CREATED) { if ((starget->state != STARGET_CREATED) &&
(starget->state != STARGET_CREATED_REMOVE)) {
transport_remove_device(&starget->dev); transport_remove_device(&starget->dev);
device_del(&starget->dev); device_del(&starget->dev);
} }

View file

@ -1212,10 +1212,14 @@ restart:
spin_lock_irqsave(shost->host_lock, flags); spin_lock_irqsave(shost->host_lock, flags);
list_for_each_entry(starget, &shost->__targets, siblings) { list_for_each_entry(starget, &shost->__targets, siblings) {
if (starget->state == STARGET_DEL || if (starget->state == STARGET_DEL ||
starget->state == STARGET_REMOVE) starget->state == STARGET_REMOVE ||
starget->state == STARGET_CREATED_REMOVE)
continue; continue;
if (starget->dev.parent == dev || &starget->dev == dev) { if (starget->dev.parent == dev || &starget->dev == dev) {
kref_get(&starget->reap_ref); kref_get(&starget->reap_ref);
if (starget->state == STARGET_CREATED)
starget->state = STARGET_CREATED_REMOVE;
else
starget->state = STARGET_REMOVE; starget->state = STARGET_REMOVE;
spin_unlock_irqrestore(shost->host_lock, flags); spin_unlock_irqrestore(shost->host_lock, flags);
__scsi_remove_target(starget); __scsi_remove_target(starget);

View file

@ -336,8 +336,8 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq < 0) { if (irq < 0) {
dev_err(dev, "no irq\n"); dev_err(dev, "no irq: %d\n", irq);
return -ENXIO; return irq;
} }
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);

View file

@ -496,8 +496,8 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq < 0) { if (irq < 0) {
dev_err(dev, "no irq\n"); dev_err(dev, "no irq: %d\n", irq);
return -ENXIO; return irq;
} }
clk = devm_clk_get(dev, "spi"); clk = devm_clk_get(dev, "spi");

View file

@ -392,8 +392,8 @@ static int xlp_spi_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq < 0) { if (irq < 0) {
dev_err(&pdev->dev, "no IRQ resource found\n"); dev_err(&pdev->dev, "no IRQ resource found: %d\n", irq);
return -EINVAL; return irq;
} }
err = devm_request_irq(&pdev->dev, irq, xlp_spi_interrupt, 0, err = devm_request_irq(&pdev->dev, irq, xlp_spi_interrupt, 0,
pdev->name, xspi); pdev->name, xspi);

View file

@ -195,7 +195,7 @@ config IMX_THERMAL
passive trip is crossed. passive trip is crossed.
config SPEAR_THERMAL config SPEAR_THERMAL
bool "SPEAr thermal sensor driver" tristate "SPEAr thermal sensor driver"
depends on PLAT_SPEAR || COMPILE_TEST depends on PLAT_SPEAR || COMPILE_TEST
depends on OF depends on OF
help help
@ -237,8 +237,8 @@ config DOVE_THERMAL
framework. framework.
config DB8500_THERMAL config DB8500_THERMAL
bool "DB8500 thermal management" tristate "DB8500 thermal management"
depends on ARCH_U8500 depends on MFD_DB8500_PRCMU
default y default y
help help
Adds DB8500 thermal management implementation according to the thermal Adds DB8500 thermal management implementation according to the thermal

View file

@ -731,8 +731,8 @@ static int sprd_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq < 0) { if (irq < 0) {
dev_err(&pdev->dev, "not provide irq resource\n"); dev_err(&pdev->dev, "not provide irq resource: %d\n", irq);
return -ENODEV; return irq;
} }
up->irq = irq; up->irq = irq;

View file

@ -332,17 +332,17 @@ static void acm_ctrl_irq(struct urb *urb)
if (difference & ACM_CTRL_DSR) if (difference & ACM_CTRL_DSR)
acm->iocount.dsr++; acm->iocount.dsr++;
if (difference & ACM_CTRL_BRK)
acm->iocount.brk++;
if (difference & ACM_CTRL_RI)
acm->iocount.rng++;
if (difference & ACM_CTRL_DCD) if (difference & ACM_CTRL_DCD)
acm->iocount.dcd++; acm->iocount.dcd++;
if (difference & ACM_CTRL_FRAMING) if (newctrl & ACM_CTRL_BRK)
acm->iocount.brk++;
if (newctrl & ACM_CTRL_RI)
acm->iocount.rng++;
if (newctrl & ACM_CTRL_FRAMING)
acm->iocount.frame++; acm->iocount.frame++;
if (difference & ACM_CTRL_PARITY) if (newctrl & ACM_CTRL_PARITY)
acm->iocount.parity++; acm->iocount.parity++;
if (difference & ACM_CTRL_OVERRUN) if (newctrl & ACM_CTRL_OVERRUN)
acm->iocount.overrun++; acm->iocount.overrun++;
spin_unlock(&acm->read_lock); spin_unlock(&acm->read_lock);

View file

@ -1329,8 +1329,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
u = 0; u = 0;
switch (uurb->type) { switch (uurb->type) {
case USBDEVFS_URB_TYPE_CONTROL: case USBDEVFS_URB_TYPE_CONTROL:
if (is_in)
allow_short = true;
if (!usb_endpoint_xfer_control(&ep->desc)) if (!usb_endpoint_xfer_control(&ep->desc))
return -EINVAL; return -EINVAL;
/* min 8 byte setup packet */ /* min 8 byte setup packet */
@ -1360,6 +1358,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
is_in = 0; is_in = 0;
uurb->endpoint &= ~USB_DIR_IN; uurb->endpoint &= ~USB_DIR_IN;
} }
if (is_in)
allow_short = true;
snoop(&ps->dev->dev, "control urb: bRequestType=%02x " snoop(&ps->dev->dev, "control urb: bRequestType=%02x "
"bRequest=%02x wValue=%04x " "bRequest=%02x wValue=%04x "
"wIndex=%04x wLength=%04x\n", "wIndex=%04x wLength=%04x\n",

View file

@ -469,8 +469,8 @@ static int dwc3_omap_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq < 0) { if (irq < 0) {
dev_err(dev, "missing IRQ resource\n"); dev_err(dev, "missing IRQ resource: %d\n", irq);
return -EINVAL; return irq;
} }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);

View file

@ -220,6 +220,8 @@
#include <linux/usb/gadget.h> #include <linux/usb/gadget.h>
#include <linux/usb/composite.h> #include <linux/usb/composite.h>
#include <linux/nospec.h>
#include "configfs.h" #include "configfs.h"
@ -3260,6 +3262,7 @@ static struct config_group *fsg_lun_make(struct config_group *group,
fsg_opts = to_fsg_opts(&group->cg_item); fsg_opts = to_fsg_opts(&group->cg_item);
if (num >= FSG_MAX_LUNS) if (num >= FSG_MAX_LUNS)
return ERR_PTR(-ERANGE); return ERR_PTR(-ERANGE);
num = array_index_nospec(num, FSG_MAX_LUNS);
mutex_lock(&fsg_opts->lock); mutex_lock(&fsg_opts->lock);
if (fsg_opts->refcnt || fsg_opts->common->luns[num]) { if (fsg_opts->refcnt || fsg_opts->common->luns[num]) {

View file

@ -130,8 +130,8 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq < 0) { if (irq < 0) {
dev_err(dev, "EHCI irq failed\n"); dev_err(dev, "EHCI irq failed: %d\n", irq);
return -ENODEV; return irq;
} }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);

View file

@ -1849,8 +1849,10 @@ static int imx21_probe(struct platform_device *pdev)
if (!res) if (!res)
return -ENODEV; return -ENODEV;
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq < 0) if (irq < 0) {
return -ENXIO; dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
return irq;
}
hcd = usb_create_hcd(&imx21_hc_driver, hcd = usb_create_hcd(&imx21_hc_driver,
&pdev->dev, dev_name(&pdev->dev)); &pdev->dev, dev_name(&pdev->dev));

View file

@ -808,12 +808,24 @@ Retry_Sense:
if (result == USB_STOR_TRANSPORT_GOOD) { if (result == USB_STOR_TRANSPORT_GOOD) {
srb->result = SAM_STAT_GOOD; srb->result = SAM_STAT_GOOD;
srb->sense_buffer[0] = 0x0; srb->sense_buffer[0] = 0x0;
}
/*
* ATA-passthru commands use sense data to report
* the command completion status, and often devices
* return Check Condition status when nothing is
* wrong.
*/
else if (srb->cmnd[0] == ATA_16 ||
srb->cmnd[0] == ATA_12) {
/* leave the data alone */
}
/* If there was a problem, report an unspecified /* If there was a problem, report an unspecified
* hardware error to prevent the higher layers from * hardware error to prevent the higher layers from
* entering an infinite retry loop. * entering an infinite retry loop.
*/ */
} else { else {
srb->result = DID_ERROR << 16; srb->result = DID_ERROR << 16;
if ((sshdr.response_code & 0x72) == 0x72) if ((sshdr.response_code & 0x72) == 0x72)
srb->sense_buffer[1] = HARDWARE_ERROR; srb->sense_buffer[1] = HARDWARE_ERROR;

View file

@ -27,6 +27,7 @@
#include <linux/cgroup.h> #include <linux/cgroup.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/sort.h> #include <linux/sort.h>
#include <linux/nospec.h>
#include "vhost.h" #include "vhost.h"
@ -748,6 +749,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
if (idx >= d->nvqs) if (idx >= d->nvqs)
return -ENOBUFS; return -ENOBUFS;
idx = array_index_nospec(idx, d->nvqs);
vq = d->vqs[idx]; vq = d->vqs[idx];
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);

View file

@ -712,7 +712,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
/* /*
* enable controller clock * enable controller clock
*/ */
clk_enable(fbi->clk); clk_prepare_enable(fbi->clk);
pxa168fb_set_par(info); pxa168fb_set_par(info);
@ -767,7 +767,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
failed_free_cmap: failed_free_cmap:
fb_dealloc_cmap(&info->cmap); fb_dealloc_cmap(&info->cmap);
failed_free_clk: failed_free_clk:
clk_disable(fbi->clk); clk_disable_unprepare(fbi->clk);
failed_free_fbmem: failed_free_fbmem:
dma_free_coherent(fbi->dev, info->fix.smem_len, dma_free_coherent(fbi->dev, info->fix.smem_len,
info->screen_base, fbi->fb_start_dma); info->screen_base, fbi->fb_start_dma);
@ -807,7 +807,7 @@ static int pxa168fb_remove(struct platform_device *pdev)
dma_free_writecombine(fbi->dev, PAGE_ALIGN(info->fix.smem_len), dma_free_writecombine(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
info->screen_base, info->fix.smem_start); info->screen_base, info->fix.smem_start);
clk_disable(fbi->clk); clk_disable_unprepare(fbi->clk);
framebuffer_release(info); framebuffer_release(info);

View file

@ -626,8 +626,8 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
/* request the IRQ */ /* request the IRQ */
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq < 0) { if (irq < 0) {
dev_err(dev, "no IRQ defined\n"); dev_err(dev, "no IRQ defined: %d\n", irq);
return -ENODEV; return irq;
} }
ret = devm_request_irq(dev, irq, pxa3xx_gcu_handle_irq, ret = devm_request_irq(dev, irq, pxa3xx_gcu_handle_irq,

View file

@ -1608,7 +1608,7 @@ fail:
return ret; return ret;
} }
static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
u64 root_id) u64 root_id)
{ {
struct btrfs_root *root; struct btrfs_root *root;

View file

@ -68,6 +68,8 @@ struct extent_buffer *btrfs_find_tree_block(struct btrfs_fs_info *fs_info,
struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root, struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
struct btrfs_key *location); struct btrfs_key *location);
int btrfs_init_fs_root(struct btrfs_root *root); int btrfs_init_fs_root(struct btrfs_root *root);
struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
u64 root_id);
int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info, int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
struct btrfs_root *root); struct btrfs_root *root);
void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info); void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info);

View file

@ -272,6 +272,23 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
root_key.objectid = key.offset; root_key.objectid = key.offset;
key.offset++; key.offset++;
/*
* The root might have been inserted already, as before we look
* for orphan roots, log replay might have happened, which
* triggers a transaction commit and qgroup accounting, which
* in turn reads and inserts fs roots while doing backref
* walking.
*/
root = btrfs_lookup_fs_root(tree_root->fs_info,
root_key.objectid);
if (root) {
WARN_ON(!test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
&root->state));
if (btrfs_root_refs(&root->root_item) == 0)
btrfs_add_dead_root(root);
continue;
}
root = btrfs_read_fs_root(tree_root, &root_key); root = btrfs_read_fs_root(tree_root, &root_key);
err = PTR_ERR_OR_ZERO(root); err = PTR_ERR_OR_ZERO(root);
if (err && err != -ENOENT) { if (err && err != -ENOENT) {
@ -310,16 +327,8 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state); set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
err = btrfs_insert_fs_root(root->fs_info, root); err = btrfs_insert_fs_root(root->fs_info, root);
/*
* The root might have been inserted already, as before we look
* for orphan roots, log replay might have happened, which
* triggers a transaction commit and qgroup accounting, which
* in turn reads and inserts fs roots while doing backref
* walking.
*/
if (err == -EEXIST)
err = 0;
if (err) { if (err) {
BUG_ON(err == -EEXIST);
btrfs_free_fs_root(root); btrfs_free_fs_root(root);
break; break;
} }

View file

@ -317,7 +317,7 @@ try_again:
trap = lock_rename(cache->graveyard, dir); trap = lock_rename(cache->graveyard, dir);
/* do some checks before getting the grave dentry */ /* do some checks before getting the grave dentry */
if (rep->d_parent != dir) { if (rep->d_parent != dir || IS_DEADDIR(d_inode(rep))) {
/* the entry was probably culled when we dropped the parent dir /* the entry was probably culled when we dropped the parent dir
* lock */ * lock */
unlock_rename(cache->graveyard, dir); unlock_rename(cache->graveyard, dir);

View file

@ -3674,6 +3674,9 @@ try_mount_again:
if (IS_ERR(tcon)) { if (IS_ERR(tcon)) {
rc = PTR_ERR(tcon); rc = PTR_ERR(tcon);
tcon = NULL; tcon = NULL;
if (rc == -EACCES)
goto mount_fail_check;
goto remote_path_check; goto remote_path_check;
} }

View file

@ -681,6 +681,7 @@ int fat_count_free_clusters(struct super_block *sb)
if (ops->ent_get(&fatent) == FAT_ENT_FREE) if (ops->ent_get(&fatent) == FAT_ENT_FREE)
free++; free++;
} while (fat_ent_next(sbi, &fatent)); } while (fat_ent_next(sbi, &fatent));
cond_resched();
} }
sbi->free_clusters = free; sbi->free_clusters = free;
sbi->free_clus_valid = 1; sbi->free_clus_valid = 1;

View file

@ -625,7 +625,7 @@ static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
struct fuse_io_priv *io = req->io; struct fuse_io_priv *io = req->io;
ssize_t pos = -1; ssize_t pos = -1;
fuse_release_user_pages(req, !io->write); fuse_release_user_pages(req, io->should_dirty);
if (io->write) { if (io->write) {
if (req->misc.write.in.size != req->misc.write.out.size) if (req->misc.write.in.size != req->misc.write.out.size)
@ -1333,7 +1333,6 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
loff_t *ppos, int flags) loff_t *ppos, int flags)
{ {
int write = flags & FUSE_DIO_WRITE; int write = flags & FUSE_DIO_WRITE;
bool should_dirty = !write && iter_is_iovec(iter);
int cuse = flags & FUSE_DIO_CUSE; int cuse = flags & FUSE_DIO_CUSE;
struct file *file = io->file; struct file *file = io->file;
struct inode *inode = file->f_mapping->host; struct inode *inode = file->f_mapping->host;
@ -1362,6 +1361,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
mutex_unlock(&inode->i_mutex); mutex_unlock(&inode->i_mutex);
} }
io->should_dirty = !write && iter_is_iovec(iter);
while (count) { while (count) {
size_t nres; size_t nres;
fl_owner_t owner = current->files; fl_owner_t owner = current->files;
@ -1378,7 +1378,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
nres = fuse_send_read(req, io, pos, nbytes, owner); nres = fuse_send_read(req, io, pos, nbytes, owner);
if (!io->async) if (!io->async)
fuse_release_user_pages(req, should_dirty); fuse_release_user_pages(req, io->should_dirty);
if (req->out.h.error) { if (req->out.h.error) {
if (!res) if (!res)
res = req->out.h.error; res = req->out.h.error;

View file

@ -252,6 +252,7 @@ struct fuse_io_priv {
size_t size; size_t size;
__u64 offset; __u64 offset;
bool write; bool write;
bool should_dirty;
int err; int err;
struct kiocb *iocb; struct kiocb *iocb;
struct file *file; struct file *file;

View file

@ -177,7 +177,6 @@ void bpf_register_map_type(struct bpf_map_type_list *tl);
struct bpf_prog *bpf_prog_get(u32 ufd); struct bpf_prog *bpf_prog_get(u32 ufd);
struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog); struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog); void bpf_prog_put(struct bpf_prog *prog);
void bpf_prog_put_rcu(struct bpf_prog *prog);
struct bpf_map *bpf_map_get_with_uref(u32 ufd); struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f); struct bpf_map *__bpf_map_get(struct fd f);
@ -208,10 +207,6 @@ static inline struct bpf_prog *bpf_prog_get(u32 ufd)
static inline void bpf_prog_put(struct bpf_prog *prog) static inline void bpf_prog_put(struct bpf_prog *prog)
{ {
} }
static inline void bpf_prog_put_rcu(struct bpf_prog *prog)
{
}
#endif /* CONFIG_BPF_SYSCALL */ #endif /* CONFIG_BPF_SYSCALL */
/* verifier prototypes for helper functions called from eBPF programs */ /* verifier prototypes for helper functions called from eBPF programs */

View file

@ -152,6 +152,8 @@ extern void cpuidle_disable_device(struct cpuidle_device *dev);
extern int cpuidle_play_dead(void); extern int cpuidle_play_dead(void);
extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
static inline struct cpuidle_device *cpuidle_get_device(void)
{return __this_cpu_read(cpuidle_devices); }
#else #else
static inline void disable_cpuidle(void) { } static inline void disable_cpuidle(void) { }
static inline bool cpuidle_not_available(struct cpuidle_driver *drv, static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
@ -187,6 +189,7 @@ static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
static inline int cpuidle_play_dead(void) {return -ENODEV; } static inline int cpuidle_play_dead(void) {return -ENODEV; }
static inline struct cpuidle_driver *cpuidle_get_cpu_driver( static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
struct cpuidle_device *dev) {return NULL; } struct cpuidle_device *dev) {return NULL; }
static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
#endif #endif
#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND) #if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)

View file

@ -382,6 +382,7 @@ static inline __must_check
void **radix_tree_iter_retry(struct radix_tree_iter *iter) void **radix_tree_iter_retry(struct radix_tree_iter *iter)
{ {
iter->next_index = iter->index; iter->next_index = iter->index;
iter->tags = 0;
return NULL; return NULL;
} }

View file

@ -128,7 +128,8 @@ static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
to = from | htonl(INET_ECN_CE << 20); to = from | htonl(INET_ECN_CE << 20);
*(__be32 *)iph = to; *(__be32 *)iph = to;
if (skb->ip_summed == CHECKSUM_COMPLETE) if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_add(csum_sub(skb->csum, from), to); skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from),
(__force __wsum)to);
return 1; return 1;
} }

View file

@ -240,6 +240,7 @@ enum scsi_target_state {
STARGET_CREATED = 1, STARGET_CREATED = 1,
STARGET_RUNNING, STARGET_RUNNING,
STARGET_REMOVE, STARGET_REMOVE,
STARGET_CREATED_REMOVE,
STARGET_DEL, STARGET_DEL,
}; };

View file

@ -270,9 +270,7 @@ static void *prog_fd_array_get_ptr(struct bpf_map *map, int fd)
static void prog_fd_array_put_ptr(void *ptr) static void prog_fd_array_put_ptr(void *ptr)
{ {
struct bpf_prog *prog = ptr; bpf_prog_put(ptr);
bpf_prog_put_rcu(prog);
} }
/* decrement refcnt of all bpf_progs that are stored in this map */ /* decrement refcnt of all bpf_progs that are stored in this map */

View file

@ -487,7 +487,7 @@ static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
free_uid(user); free_uid(user);
} }
static void __prog_put_common(struct rcu_head *rcu) static void __bpf_prog_put_rcu(struct rcu_head *rcu)
{ {
struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
@ -496,17 +496,10 @@ static void __prog_put_common(struct rcu_head *rcu)
bpf_prog_free(aux->prog); bpf_prog_free(aux->prog);
} }
/* version of bpf_prog_put() that is called after a grace period */
void bpf_prog_put_rcu(struct bpf_prog *prog)
{
if (atomic_dec_and_test(&prog->aux->refcnt))
call_rcu(&prog->aux->rcu, __prog_put_common);
}
void bpf_prog_put(struct bpf_prog *prog) void bpf_prog_put(struct bpf_prog *prog)
{ {
if (atomic_dec_and_test(&prog->aux->refcnt)) if (atomic_dec_and_test(&prog->aux->refcnt))
__prog_put_common(&prog->aux->rcu); call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
} }
EXPORT_SYMBOL_GPL(bpf_prog_put); EXPORT_SYMBOL_GPL(bpf_prog_put);
@ -514,7 +507,7 @@ static int bpf_prog_release(struct inode *inode, struct file *filp)
{ {
struct bpf_prog *prog = filp->private_data; struct bpf_prog *prog = filp->private_data;
bpf_prog_put_rcu(prog); bpf_prog_put(prog);
return 0; return 0;
} }

View file

@ -7023,6 +7023,8 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
goto unlock; goto unlock;
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->cpu != smp_processor_id())
continue;
if (event->attr.type != PERF_TYPE_TRACEPOINT) if (event->attr.type != PERF_TYPE_TRACEPOINT)
continue; continue;
if (event->attr.config != entry->type) if (event->attr.config != entry->type)
@ -7144,7 +7146,7 @@ static void perf_event_free_bpf_prog(struct perf_event *event)
prog = event->tp_event->prog; prog = event->tp_event->prog;
if (prog && event->tp_event->bpf_prog_owner == event) { if (prog && event->tp_event->bpf_prog_owner == event) {
event->tp_event->prog = NULL; event->tp_event->prog = NULL;
bpf_prog_put_rcu(prog); bpf_prog_put(prog);
} }
} }
@ -8538,6 +8540,7 @@ SYSCALL_DEFINE5(perf_event_open,
f_flags); f_flags);
if (IS_ERR(event_file)) { if (IS_ERR(event_file)) {
err = PTR_ERR(event_file); err = PTR_ERR(event_file);
event_file = NULL;
goto err_context; goto err_context;
} }

View file

@ -8144,11 +8144,9 @@ void sched_destroy_group(struct task_group *tg)
void sched_offline_group(struct task_group *tg) void sched_offline_group(struct task_group *tg)
{ {
unsigned long flags; unsigned long flags;
int i;
/* end participation in shares distribution */ /* end participation in shares distribution */
for_each_possible_cpu(i) unregister_fair_sched_group(tg);
unregister_fair_sched_group(tg, i);
spin_lock_irqsave(&task_group_lock, flags); spin_lock_irqsave(&task_group_lock, flags);
list_del_rcu(&tg->list); list_del_rcu(&tg->list);

View file

@ -4104,9 +4104,13 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
/* /*
* Add to the _head_ of the list, so that an already-started * Add to the _head_ of the list, so that an already-started
* distribute_cfs_runtime will not see us * distribute_cfs_runtime will not see us. If disribute_cfs_runtime is
* not running add to the tail so that later runqueues don't get starved.
*/ */
if (cfs_b->distribute_running)
list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
else
list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
/* /*
* If we're the first throttled task, make sure the bandwidth * If we're the first throttled task, make sure the bandwidth
@ -4249,14 +4253,16 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
* in us over-using our runtime if it is all used during this loop, but * in us over-using our runtime if it is all used during this loop, but
* only by limited amounts in that extreme case. * only by limited amounts in that extreme case.
*/ */
while (throttled && cfs_b->runtime > 0) { while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
runtime = cfs_b->runtime; runtime = cfs_b->runtime;
cfs_b->distribute_running = 1;
raw_spin_unlock(&cfs_b->lock); raw_spin_unlock(&cfs_b->lock);
/* we can't nest cfs_b->lock while distributing bandwidth */ /* we can't nest cfs_b->lock while distributing bandwidth */
runtime = distribute_cfs_runtime(cfs_b, runtime, runtime = distribute_cfs_runtime(cfs_b, runtime,
runtime_expires); runtime_expires);
raw_spin_lock(&cfs_b->lock); raw_spin_lock(&cfs_b->lock);
cfs_b->distribute_running = 0;
throttled = !list_empty(&cfs_b->throttled_cfs_rq); throttled = !list_empty(&cfs_b->throttled_cfs_rq);
cfs_b->runtime -= min(runtime, cfs_b->runtime); cfs_b->runtime -= min(runtime, cfs_b->runtime);
@ -4367,6 +4373,11 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
/* confirm we're still not at a refresh boundary */ /* confirm we're still not at a refresh boundary */
raw_spin_lock(&cfs_b->lock); raw_spin_lock(&cfs_b->lock);
if (cfs_b->distribute_running) {
raw_spin_unlock(&cfs_b->lock);
return;
}
if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) { if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
raw_spin_unlock(&cfs_b->lock); raw_spin_unlock(&cfs_b->lock);
return; return;
@ -4376,6 +4387,9 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
runtime = cfs_b->runtime; runtime = cfs_b->runtime;
expires = cfs_b->runtime_expires; expires = cfs_b->runtime_expires;
if (runtime)
cfs_b->distribute_running = 1;
raw_spin_unlock(&cfs_b->lock); raw_spin_unlock(&cfs_b->lock);
if (!runtime) if (!runtime)
@ -4386,6 +4400,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
raw_spin_lock(&cfs_b->lock); raw_spin_lock(&cfs_b->lock);
if (expires == cfs_b->runtime_expires) if (expires == cfs_b->runtime_expires)
cfs_b->runtime -= min(runtime, cfs_b->runtime); cfs_b->runtime -= min(runtime, cfs_b->runtime);
cfs_b->distribute_running = 0;
raw_spin_unlock(&cfs_b->lock); raw_spin_unlock(&cfs_b->lock);
} }
@ -4497,6 +4512,7 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
cfs_b->period_timer.function = sched_cfs_period_timer; cfs_b->period_timer.function = sched_cfs_period_timer;
hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cfs_b->slack_timer.function = sched_cfs_slack_timer; cfs_b->slack_timer.function = sched_cfs_slack_timer;
cfs_b->distribute_running = 0;
} }
static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
@ -10265,12 +10281,9 @@ void free_fair_sched_group(struct task_group *tg)
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
if (tg->cfs_rq) if (tg->cfs_rq)
kfree(tg->cfs_rq[i]); kfree(tg->cfs_rq[i]);
if (tg->se) { if (tg->se)
if (tg->se[i])
remove_entity_load_avg(tg->se[i]);
kfree(tg->se[i]); kfree(tg->se[i]);
} }
}
kfree(tg->cfs_rq); kfree(tg->cfs_rq);
kfree(tg->se); kfree(tg->se);
@ -10324,22 +10337,30 @@ err:
return 0; return 0;
} }
void unregister_fair_sched_group(struct task_group *tg, int cpu) void unregister_fair_sched_group(struct task_group *tg)
{ {
struct rq *rq = cpu_rq(cpu);
unsigned long flags; unsigned long flags;
struct rq *rq;
int cpu;
for_each_possible_cpu(cpu) {
if (tg->se[cpu])
remove_entity_load_avg(tg->se[cpu]);
/* /*
* Only empty task groups can be destroyed; so we can speculatively * Only empty task groups can be destroyed; so we can speculatively
* check on_list without danger of it being re-added. * check on_list without danger of it being re-added.
*/ */
if (!tg->cfs_rq[cpu]->on_list) if (!tg->cfs_rq[cpu]->on_list)
return; continue;
rq = cpu_rq(cpu);
raw_spin_lock_irqsave(&rq->lock, flags); raw_spin_lock_irqsave(&rq->lock, flags);
list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
raw_spin_unlock_irqrestore(&rq->lock, flags); raw_spin_unlock_irqrestore(&rq->lock, flags);
} }
}
void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
struct sched_entity *se, int cpu, struct sched_entity *se, int cpu,
@ -10422,7 +10443,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
return 1; return 1;
} }
void unregister_fair_sched_group(struct task_group *tg, int cpu) { } void unregister_fair_sched_group(struct task_group *tg) { }
#endif /* CONFIG_FAIR_GROUP_SCHED */ #endif /* CONFIG_FAIR_GROUP_SCHED */

View file

@ -133,7 +133,7 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
*/ */
static void cpuidle_idle_call(void) static void cpuidle_idle_call(void)
{ {
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); struct cpuidle_device *dev = cpuidle_get_device();
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
int next_state, entered_state; int next_state, entered_state;

View file

@ -235,6 +235,8 @@ struct cfs_bandwidth {
/* statistics */ /* statistics */
int nr_periods, nr_throttled; int nr_periods, nr_throttled;
u64 throttled_time; u64 throttled_time;
bool distribute_running;
#endif #endif
}; };
@ -310,7 +312,7 @@ extern int tg_nop(struct task_group *tg, void *data);
extern void free_fair_sched_group(struct task_group *tg); extern void free_fair_sched_group(struct task_group *tg);
extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
extern void unregister_fair_sched_group(struct task_group *tg, int cpu); extern void unregister_fair_sched_group(struct task_group *tg);
extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
struct sched_entity *se, int cpu, struct sched_entity *se, int cpu,
struct sched_entity *parent); struct sched_entity *parent);

View file

@ -1803,7 +1803,17 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
{ {
__buffer_unlock_commit(buffer, event); __buffer_unlock_commit(buffer, event);
ftrace_trace_stack(tr, buffer, flags, 0, pc, regs); /*
* If regs is not set, then skip the following callers:
* trace_buffer_unlock_commit_regs
* event_trigger_unlock_commit
* trace_event_buffer_commit
* trace_event_raw_event_sched_switch
* Note, we can still get here via blktrace, wakeup tracer
* and mmiotrace, but that's ok if they lose a function or
* two. They are that meaningful.
*/
ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
ftrace_trace_userstack(buffer, flags, pc); ftrace_trace_userstack(buffer, flags, pc);
} }
EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs); EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
@ -1861,6 +1871,13 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
trace.nr_entries = 0; trace.nr_entries = 0;
trace.skip = skip; trace.skip = skip;
/*
* Add two, for this function and the call to save_stack_trace()
* If regs is set, then these functions will not be in the way.
*/
if (!regs)
trace.skip += 2;
/* /*
* Since events can happen in NMIs there's no safe way to * Since events can happen in NMIs there's no safe way to
* use the per cpu ftrace_stacks. We reserve it and if an interrupt * use the per cpu ftrace_stacks. We reserve it and if an interrupt

View file

@ -1510,7 +1510,7 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
spinlock_t *old_ptl, *new_ptl; spinlock_t *old_ptl, *new_ptl;
int ret = 0; int ret = 0;
pmd_t pmd; pmd_t pmd;
bool force_flush = false;
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
if ((old_addr & ~HPAGE_PMD_MASK) || if ((old_addr & ~HPAGE_PMD_MASK) ||
@ -1538,6 +1538,8 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
if (new_ptl != old_ptl) if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
if (pmd_present(pmd))
force_flush = true;
VM_BUG_ON(!pmd_none(*new_pmd)); VM_BUG_ON(!pmd_none(*new_pmd));
if (pmd_move_must_withdraw(new_ptl, old_ptl)) { if (pmd_move_must_withdraw(new_ptl, old_ptl)) {
@ -1546,6 +1548,8 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
pgtable_trans_huge_deposit(mm, new_pmd, pgtable); pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
} }
set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
if (force_flush)
flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
if (new_ptl != old_ptl) if (new_ptl != old_ptl)
spin_unlock(new_ptl); spin_unlock(new_ptl);
spin_unlock(old_ptl); spin_unlock(old_ptl);

View file

@ -96,6 +96,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
pte_t *old_pte, *new_pte, pte; pte_t *old_pte, *new_pte, pte;
spinlock_t *old_ptl, *new_ptl; spinlock_t *old_ptl, *new_ptl;
bool force_flush = false;
unsigned long len = old_end - old_addr;
/* /*
* When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
@ -143,12 +145,26 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
if (pte_none(*old_pte)) if (pte_none(*old_pte))
continue; continue;
pte = ptep_get_and_clear(mm, old_addr, old_pte); pte = ptep_get_and_clear(mm, old_addr, old_pte);
/*
* If we are remapping a valid PTE, make sure
* to flush TLB before we drop the PTL for the PTE.
*
* NOTE! Both old and new PTL matter: the old one
* for racing with page_mkclean(), the new one to
* make sure the physical page stays valid until
* the TLB entry for the old mapping has been
* flushed.
*/
if (pte_present(pte))
force_flush = true;
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
pte = move_soft_dirty_pte(pte); pte = move_soft_dirty_pte(pte);
set_pte_at(mm, new_addr, new_pte, pte); set_pte_at(mm, new_addr, new_pte, pte);
} }
arch_leave_lazy_mmu_mode(); arch_leave_lazy_mmu_mode();
if (force_flush)
flush_tlb_range(vma, old_end - len, old_end);
if (new_ptl != old_ptl) if (new_ptl != old_ptl)
spin_unlock(new_ptl); spin_unlock(new_ptl);
pte_unmap(new_pte - 1); pte_unmap(new_pte - 1);
@ -168,7 +184,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
{ {
unsigned long extent, next, old_end; unsigned long extent, next, old_end;
pmd_t *old_pmd, *new_pmd; pmd_t *old_pmd, *new_pmd;
bool need_flush = false;
unsigned long mmun_start; /* For mmu_notifiers */ unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */ unsigned long mmun_end; /* For mmu_notifiers */
@ -207,7 +222,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
anon_vma_unlock_write(vma->anon_vma); anon_vma_unlock_write(vma->anon_vma);
} }
if (err > 0) { if (err > 0) {
need_flush = true;
continue; continue;
} else if (!err) { } else if (!err) {
split_huge_page_pmd(vma, old_addr, old_pmd); split_huge_page_pmd(vma, old_addr, old_pmd);
@ -224,10 +238,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
extent = LATENCY_LIMIT; extent = LATENCY_LIMIT;
move_ptes(vma, old_pmd, old_addr, old_addr + extent, move_ptes(vma, old_pmd, old_addr, old_addr + extent,
new_vma, new_pmd, new_addr, need_rmap_locks); new_vma, new_pmd, new_addr, need_rmap_locks);
need_flush = true;
} }
if (likely(need_flush))
flush_tlb_range(vma, old_end-len, old_addr);
mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);

Some files were not shown because too many files have changed in this diff Show more