From 04af12976bc6d6da87438cd9a7f7f195a34e2d60 Mon Sep 17 00:00:00 2001 From: Mark Salyzyn Date: Tue, 24 Oct 2017 07:47:14 -0700 Subject: [PATCH 001/312] FROMLIST: arm64: Avoid aligning normal memory pointers in __memcpy_{to,from}io (cherry picked from arm64/for-next/core commit 9ca255bf041ddc7698b6906dbd846c0ba64b1fe1) __memcpy_{to,from}io fall back to byte-at-a-time copying if both the source and destination pointers are not 8-byte aligned. Since one of the pointers always points at normal memory, this is unnecessary and detrimental to performance, so only do byte copying until we hit an 8-byte boundary for the device pointer. This change was motivated by performance issues in the pstore driver. On a test platform, measuring probe time for pstore, console buffer size of 1/4MB and pmsg of 1/2MB, was in the 90-107ms region. Change managed to reduce it to 10-25ms, an improvement in boot time. Cc: Kees Cook Cc: Anton Vorontsov Cc: Tony Luck Cc: Catalin Marinas Cc: Will Deacon Cc: Anton Vorontsov Cc: Robin Murphy Signed-off-by: Mark Salyzyn Signed-off-by: Will Deacon Bug: 63716230 Change-Id: I245545e8243a54b44d30fbb0d0c71a9b8a77ef63 --- arch/arm64/kernel/io.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/arch/arm64/kernel/io.c b/arch/arm64/kernel/io.c index 354be2a872ae..79b17384effa 100644 --- a/arch/arm64/kernel/io.c +++ b/arch/arm64/kernel/io.c @@ -25,8 +25,7 @@ */ void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count) { - while (count && (!IS_ALIGNED((unsigned long)from, 8) || - !IS_ALIGNED((unsigned long)to, 8))) { + while (count && !IS_ALIGNED((unsigned long)from, 8)) { *(u8 *)to = __raw_readb(from); from++; to++; @@ -54,23 +53,22 @@ EXPORT_SYMBOL(__memcpy_fromio); */ void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count) { - while (count && (!IS_ALIGNED((unsigned long)to, 8) || - !IS_ALIGNED((unsigned long)from, 8))) { - __raw_writeb(*(volatile u8 *)from, to); + while (count && !IS_ALIGNED((unsigned long)to, 8)) { + __raw_writeb(*(u8 *)from, to); from++; to++; count--; } while (count >= 8) { - __raw_writeq(*(volatile u64 *)from, to); + __raw_writeq(*(u64 *)from, to); from += 8; to += 8; count -= 8; } while (count) { - __raw_writeb(*(volatile u8 *)from, to); + __raw_writeb(*(u8 *)from, to); from++; to++; count--; From 6b6637fd4678e15dd6b4d19fb9b6539a201bdbd6 Mon Sep 17 00:00:00 2001 From: Martijn Coenen Date: Mon, 13 Nov 2017 09:55:21 +0100 Subject: [PATCH 002/312] ANDROID: binder: clarify deferred thread work. Rename the function to more accurately reflect what it does, and add a comment explaining why we use it. Change-Id: I8d011c017dfc6e24b5b54fc462578f8e153e5926 Signed-off-by: Martijn Coenen --- drivers/android/binder.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 6dfcc80f3fc6..1177a6617964 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -837,7 +837,7 @@ binder_enqueue_work_ilocked(struct binder_work *work, } /** - * binder_enqueue_thread_work_ilocked_nowake() - Add thread work + * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work * @thread: thread to queue work to * @work: struct binder_work to add to list * @@ -848,8 +848,8 @@ binder_enqueue_work_ilocked(struct binder_work *work, * Requires the proc->inner_lock to be held. */ static void -binder_enqueue_thread_work_ilocked_nowake(struct binder_thread *thread, - struct binder_work *work) +binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, + struct binder_work *work) { binder_enqueue_work_ilocked(work, &thread->todo); } @@ -3334,7 +3334,14 @@ static void binder_transaction(struct binder_proc *proc, } else if (!(t->flags & TF_ONE_WAY)) { BUG_ON(t->buffer->async_transaction != 0); binder_inner_proc_lock(proc); - binder_enqueue_thread_work_ilocked_nowake(thread, tcomplete); + /* + * Defer the TRANSACTION_COMPLETE, so we don't return to + * userspace immediately; this allows the target process to + * immediately start processing this transaction, reducing + * latency. We will then return the TRANSACTION_COMPLETE when + * the target replies (or there is an error). + */ + binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete); t->need_reply = 1; t->from_parent = thread->transaction_stack; thread->transaction_stack = t; From 8a56b1b25e3cc47cafca99f18e4b17f2b7069424 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 29 Nov 2016 09:23:48 -0200 Subject: [PATCH 003/312] adv7604: Initialize drive strength to default when using DT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit da8892d410db224d9a24104529794e6e37e0c100 ] The adv7604 driver platform data contains fields for configuring the drive strength of the output pins. When probing the driver through DT these fields are not explicitly initialized, which means they are left at 0. This is a reserved setting for the drive strength configuration though and can cause signal integrity issues. Whether these signal integrity issues are visible depends on the PCB specifics (e.g. the higher the load capacitance for the output the more visible the issue). But it has been observed on existing solutions at high pixel clock rates. Initialize the drive strength settings to the power-on-reset value of the device when probing through devicetree to avoid this issue. Fixes: 0e158be0162b ("adv7604: Add DT support") Signed-off-by: Lars-Peter Clausen Reviewed-by: Laurent Pinchart Tested-by: Niklas Söderlund Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/media/i2c/adv7604.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c index 01adcdc52346..a9e2722f5e22 100644 --- a/drivers/media/i2c/adv7604.c +++ b/drivers/media/i2c/adv7604.c @@ -2856,6 +2856,9 @@ static int adv76xx_parse_dt(struct adv76xx_state *state) state->pdata.alt_data_sat = 1; state->pdata.op_format_mode_sel = ADV7604_OP_FORMAT_MODE0; state->pdata.bus_order = ADV7604_BUS_ORDER_RGB; + state->pdata.dr_str_data = ADV76XX_DR_STR_MEDIUM_HIGH; + state->pdata.dr_str_clk = ADV76XX_DR_STR_MEDIUM_HIGH; + state->pdata.dr_str_sync = ADV76XX_DR_STR_MEDIUM_HIGH; return 0; } From 414aa11de923d1c855ca1ef3d046ee27e1cf7a0f Mon Sep 17 00:00:00 2001 From: "Maciej W. Rozycki" Date: Mon, 30 Jan 2017 17:39:48 +0100 Subject: [PATCH 004/312] video: fbdev: pmag-ba-fb: Remove bad `__init' annotation [ Upstream commit 879e5a0df626f39cbb3c61bb90373e56d67012c4 ] Fix: WARNING: drivers/video/fbdev/pmag-ba-fb.o(.text+0x308): Section mismatch in reference from the function pmagbafb_probe() to the function .init.text:pmagbafb_erase_cursor() The function pmagbafb_probe() references the function __init pmagbafb_erase_cursor(). This is often because pmagbafb_probe lacks a __init annotation or the annotation of pmagbafb_erase_cursor is wrong. -- a fallout from a missed update from commit 9625b51350cc ("VIDEO: PMAG-BA: Fix section mismatch") and then commit 48c68c4f1b54 ("Drivers: video: remove __dev* attributes.") Signed-off-by: Maciej W. Rozycki Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/video/fbdev/pmag-ba-fb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/pmag-ba-fb.c b/drivers/video/fbdev/pmag-ba-fb.c index 914a52ba8477..77837665ce89 100644 --- a/drivers/video/fbdev/pmag-ba-fb.c +++ b/drivers/video/fbdev/pmag-ba-fb.c @@ -129,7 +129,7 @@ static struct fb_ops pmagbafb_ops = { /* * Turn the hardware cursor off. */ -static void __init pmagbafb_erase_cursor(struct fb_info *info) +static void pmagbafb_erase_cursor(struct fb_info *info) { struct pmagbafb_par *par = info->par; From f3170e6607be5521f3964822d3eb4052c5517774 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Mon, 12 Dec 2016 11:30:20 -0700 Subject: [PATCH 005/312] PCI: mvebu: Handle changes to the bridge windows while enabled [ Upstream commit d9bf28e2650fe3eeefed7e34841aea07d10c6543 ] The PCI core will write to the bridge window config multiple times while they are enabled. This can lead to mbus failures like this: mvebu_mbus: cannot add window '4:e8', conflicts with another window mvebu-pcie mbus:pex@e0000000: Could not create MBus window at [mem 0xe0000000-0xe00fffff]: -22 For me this is happening during a hotplug cycle. The PCI core is not changing the values, just writing them twice while active. The patch addresses the general case of any change to an active window, but not atomically. The code is slightly refactored so io and mem can share more of the window logic. Signed-off-by: Jason Gunthorpe Signed-off-by: Bjorn Helgaas Acked-by: Jason Cooper Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/pci/host/pci-mvebu.c | 97 +++++++++++++++++++++--------------- 1 file changed, 58 insertions(+), 39 deletions(-) diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index 53b79c5f0559..379d08f76146 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c @@ -131,6 +131,12 @@ struct mvebu_pcie { int nports; }; +struct mvebu_pcie_window { + phys_addr_t base; + phys_addr_t remap; + size_t size; +}; + /* Structure representing one PCIe interface */ struct mvebu_pcie_port { char *name; @@ -148,10 +154,8 @@ struct mvebu_pcie_port { struct mvebu_sw_pci_bridge bridge; struct device_node *dn; struct mvebu_pcie *pcie; - phys_addr_t memwin_base; - size_t memwin_size; - phys_addr_t iowin_base; - size_t iowin_size; + struct mvebu_pcie_window memwin; + struct mvebu_pcie_window iowin; u32 saved_pcie_stat; }; @@ -377,23 +381,45 @@ static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port, } } +static void mvebu_pcie_set_window(struct mvebu_pcie_port *port, + unsigned int target, unsigned int attribute, + const struct mvebu_pcie_window *desired, + struct mvebu_pcie_window *cur) +{ + if (desired->base == cur->base && desired->remap == cur->remap && + desired->size == cur->size) + return; + + if (cur->size != 0) { + mvebu_pcie_del_windows(port, cur->base, cur->size); + cur->size = 0; + cur->base = 0; + + /* + * If something tries to change the window while it is enabled + * the change will not be done atomically. That would be + * difficult to do in the general case. + */ + } + + if (desired->size == 0) + return; + + mvebu_pcie_add_windows(port, target, attribute, desired->base, + desired->size, desired->remap); + *cur = *desired; +} + static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) { - phys_addr_t iobase; + struct mvebu_pcie_window desired = {}; /* Are the new iobase/iolimit values invalid? */ if (port->bridge.iolimit < port->bridge.iobase || port->bridge.iolimitupper < port->bridge.iobaseupper || !(port->bridge.command & PCI_COMMAND_IO)) { - - /* If a window was configured, remove it */ - if (port->iowin_base) { - mvebu_pcie_del_windows(port, port->iowin_base, - port->iowin_size); - port->iowin_base = 0; - port->iowin_size = 0; - } - + mvebu_pcie_set_window(port, port->io_target, port->io_attr, + &desired, &port->iowin); return; } @@ -410,32 +436,27 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) * specifications. iobase is the bus address, port->iowin_base * is the CPU address. */ - iobase = ((port->bridge.iobase & 0xF0) << 8) | - (port->bridge.iobaseupper << 16); - port->iowin_base = port->pcie->io.start + iobase; - port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) | - (port->bridge.iolimitupper << 16)) - - iobase) + 1; + desired.remap = ((port->bridge.iobase & 0xF0) << 8) | + (port->bridge.iobaseupper << 16); + desired.base = port->pcie->io.start + desired.remap; + desired.size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) | + (port->bridge.iolimitupper << 16)) - + desired.remap) + + 1; - mvebu_pcie_add_windows(port, port->io_target, port->io_attr, - port->iowin_base, port->iowin_size, - iobase); + mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired, + &port->iowin); } static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) { + struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP}; + /* Are the new membase/memlimit values invalid? */ if (port->bridge.memlimit < port->bridge.membase || !(port->bridge.command & PCI_COMMAND_MEMORY)) { - - /* If a window was configured, remove it */ - if (port->memwin_base) { - mvebu_pcie_del_windows(port, port->memwin_base, - port->memwin_size); - port->memwin_base = 0; - port->memwin_size = 0; - } - + mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, + &desired, &port->memwin); return; } @@ -445,14 +466,12 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) * window to setup, according to the PCI-to-PCI bridge * specifications. */ - port->memwin_base = ((port->bridge.membase & 0xFFF0) << 16); - port->memwin_size = - (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) - - port->memwin_base + 1; + desired.base = ((port->bridge.membase & 0xFFF0) << 16); + desired.size = (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) - + desired.base + 1; - mvebu_pcie_add_windows(port, port->mem_target, port->mem_attr, - port->memwin_base, port->memwin_size, - MVEBU_MBUS_NO_REMAP); + mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired, + &port->memwin); } /* From 4e0d26b22e79219771e5ffe954986154d37b1439 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Tue, 10 Jan 2017 14:32:52 +0100 Subject: [PATCH 006/312] xen/netback: set default upper limit of tx/rx queues to 8 [ Upstream commit 56dd5af9bc23d0d5d23bb207c477715b4c2216c5 ] The default for the maximum number of tx/rx queues of one interface is the number of cpus of the system today. As each queue pair reserves 512 grant pages this default consumes a ridiculous number of grants for large guests. Limit the queue number to 8 as default. This value can be modified via a module parameter if required. Signed-off-by: Juergen Gross Reviewed-by: Boris Ostrovsky Signed-off-by: Boris Ostrovsky Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/xen-netback/netback.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 72ee1c305cc4..02db20b26749 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -67,6 +67,7 @@ module_param(rx_drain_timeout_msecs, uint, 0444); unsigned int rx_stall_timeout_msecs = 60000; module_param(rx_stall_timeout_msecs, uint, 0444); +#define MAX_QUEUES_DEFAULT 8 unsigned int xenvif_max_queues; module_param_named(max_queues, xenvif_max_queues, uint, 0644); MODULE_PARM_DESC(max_queues, @@ -2157,11 +2158,12 @@ static int __init netback_init(void) if (!xen_domain()) return -ENODEV; - /* Allow as many queues as there are CPUs if user has not + /* Allow as many queues as there are CPUs but max. 8 if user has not * specified a value. */ if (xenvif_max_queues == 0) - xenvif_max_queues = num_online_cpus(); + xenvif_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT, + num_online_cpus()); if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) { pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n", From 2393241b7a223a87761ab47fea43dbe466198629 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Noralf=20Tr=C3=B8nnes?= Date: Thu, 26 Jan 2017 23:56:04 +0100 Subject: [PATCH 007/312] drm: drm_minor_register(): Clean up debugfs on failure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit a67834f8bfa1e2f48bb27d07b9a552ba7c3af82a ] Call drm_debugfs_cleanup() in case drm_debugfs_init() fails to cover for failure in the drm_driver.debugfs_init callback. Signed-off-by: Noralf Trønnes Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170126225621.12314-3-noralf@tronnes.org Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/drm_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 7dd6728dd092..ccc2044af831 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -312,7 +312,7 @@ static int drm_minor_register(struct drm_device *dev, unsigned int type) ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root); if (ret) { DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n"); - return ret; + goto err_debugfs; } ret = device_add(minor->kdev); From f20065b466d4f7b2883b3ef26f4b1a51d8ddee91 Mon Sep 17 00:00:00 2001 From: Li Zhong Date: Fri, 11 Nov 2016 12:57:33 +0800 Subject: [PATCH 008/312] KVM: PPC: Book 3S: XICS: correct the real mode ICP rejecting counter [ Upstream commit 37451bc95dee0e666927d6ffdda302dbbaaae6fa ] Some counters are added in Commit 6e0365b78273 ("KVM: PPC: Book3S HV: Add ICP real mode counters"), to provide some performance statistics to determine whether further optimizing is needed for real mode functions. The n_reject counter counts how many times ICP rejects an irq because of priority in real mode. The redelivery of an lsi that is still asserted after eoi doesn't fall into this category, so the increasement there is removed. Also, it needs to be increased in icp_rm_deliver_irq() if it rejects another one. Signed-off-by: Li Zhong Signed-off-by: Paul Mackerras Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kvm/book3s_hv_rm_xics.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c index 24f58076d49e..1d2bc84338bf 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_xics.c +++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c @@ -280,6 +280,7 @@ static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, */ if (reject && reject != XICS_IPI) { arch_spin_unlock(&ics->lock); + icp->n_reject++; new_irq = reject; goto again; } @@ -611,10 +612,8 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) state = &ics->irq_state[src]; /* Still asserted, resend it */ - if (state->asserted) { - icp->n_reject++; + if (state->asserted) icp_rm_deliver_irq(xics, icp, irq); - } if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) { icp->rm_action |= XICS_RM_NOTIFY_EOI; From 158bc64a4dfefa65772d8ca555f5909519dff3ce Mon Sep 17 00:00:00 2001 From: Nate Watterson Date: Tue, 20 Dec 2016 23:11:48 -0500 Subject: [PATCH 009/312] iommu/arm-smmu-v3: Clear prior settings when updating STEs [ Upstream commit 810871c57011eb3e89e6768932757f169d666cd2 ] To prevent corruption of the stage-1 context pointer field when updating STEs, rebuild the entire containing dword instead of clearing individual fields. Signed-off-by: Nate Watterson Signed-off-by: Will Deacon Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/iommu/arm-smmu-v3.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 00df3832faab..64f1eb8fdcbc 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1033,13 +1033,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, } } - /* Nuke the existing Config, as we're going to rewrite it */ - val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT); - - if (ste->valid) - val |= STRTAB_STE_0_V; - else - val &= ~STRTAB_STE_0_V; + /* Nuke the existing STE_0 value, as we're going to rewrite it */ + val = ste->valid ? STRTAB_STE_0_V : 0; if (ste->bypass) { val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT @@ -1068,7 +1063,6 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK << STRTAB_STE_0_S1CTXPTR_SHIFT) | STRTAB_STE_0_CFG_S1_TRANS; - } if (ste->s2_cfg) { From 83fe38f2c4a37d0d96e682f150ba13575f0ae909 Mon Sep 17 00:00:00 2001 From: Valentin Longchamp Date: Thu, 15 Dec 2016 14:22:26 +0100 Subject: [PATCH 010/312] powerpc/corenet: explicitly disable the SDHC controller on kmcoge4 [ Upstream commit a674c7d470bb47e82f4eb1fa944eadeac2f6bbaf ] It is not implemented on the kmcoge4 hardware and if not disabled it leads to error messages with the corenet32_smp_defconfig. Signed-off-by: Valentin Longchamp Signed-off-by: Scott Wood Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/boot/dts/fsl/kmcoge4.dts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/powerpc/boot/dts/fsl/kmcoge4.dts b/arch/powerpc/boot/dts/fsl/kmcoge4.dts index 6858ec9ef295..1a953d9edf1e 100644 --- a/arch/powerpc/boot/dts/fsl/kmcoge4.dts +++ b/arch/powerpc/boot/dts/fsl/kmcoge4.dts @@ -83,6 +83,10 @@ }; }; + sdhc@114000 { + status = "disabled"; + }; + i2c@119000 { status = "disabled"; }; From 30019ca7dc099642af1d3dec3dbe2b231cbb69a6 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Fri, 20 Jan 2017 12:13:19 -0800 Subject: [PATCH 011/312] ARM: omap2plus_defconfig: Fix probe errors on UARTs 5 and 6 [ Upstream commit 4cd6a59f5c1a9b0cca0da09fbba42b9450ffc899 ] We have more than four uarts on some SoCs and that can cause noise with errors while booting. Signed-off-by: Tony Lindgren Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/arm/configs/omap2plus_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index c5e1943e5427..09ebd37e01e0 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -221,6 +221,7 @@ CONFIG_SERIO=m CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=6 CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y CONFIG_SERIAL_8250_SHARE_IRQ=y From 26fa336d69a227061d7396f1d225a4cec8e292af Mon Sep 17 00:00:00 2001 From: Li Zhong Date: Fri, 20 Jan 2017 16:35:33 +0800 Subject: [PATCH 012/312] crypto: vmx - disable preemption to enable vsx in aes_ctr.c [ Upstream commit 7dede913fc2ab9c0d3bff3a49e26fa9e858b0c13 ] Some preemptible check warnings were reported from enable_kernel_vsx(). This patch disables preemption in aes_ctr.c before enabling vsx, and they are now consistent with other files in the same directory. Signed-off-by: Li Zhong Signed-off-by: Herbert Xu Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/crypto/vmx/aes_ctr.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c index 72f138985e18..d83ab4bac8b1 100644 --- a/drivers/crypto/vmx/aes_ctr.c +++ b/drivers/crypto/vmx/aes_ctr.c @@ -80,11 +80,13 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key, int ret; struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); + preempt_disable(); pagefault_disable(); enable_kernel_altivec(); enable_kernel_vsx(); ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); pagefault_enable(); + preempt_enable(); ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); return ret; @@ -99,11 +101,13 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx, u8 *dst = walk->dst.virt.addr; unsigned int nbytes = walk->nbytes; + preempt_disable(); pagefault_disable(); enable_kernel_altivec(); enable_kernel_vsx(); aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key); pagefault_enable(); + preempt_enable(); crypto_xor(keystream, src, nbytes); memcpy(dst, keystream, nbytes); @@ -132,6 +136,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, blkcipher_walk_init(&walk, dst, src, nbytes); ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { + preempt_disable(); pagefault_disable(); enable_kernel_altivec(); enable_kernel_vsx(); @@ -143,6 +148,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, &ctx->enc_key, walk.iv); pagefault_enable(); + preempt_enable(); /* We need to update IV mostly for last bytes/round */ inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE; From 079822da0a00284aba8968f19d1579dd77140b8d Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Thu, 19 Jan 2017 19:47:38 -0800 Subject: [PATCH 013/312] iio: trigger: free trigger resource correctly [ Upstream commit 10e840dfb0b7fc345082dd9e5fff3c1c02e7690e ] These stand-alone trigger drivers were using iio_trigger_put() where they should have been using iio_trigger_free(). The iio_trigger_put() adds a module_put which is bad since they never did a module_get. In the sysfs driver, module_get/put's are used as triggers are added & removed. This extra module_put() occurs on an error path in the probe routine (probably rare). In the bfin-timer & interrupt trigger drivers, the module resources are not explicitly managed, so it's doing a put on something that was never get'd. It occurs on the probe error path and on the remove path (not so rare). Tested with the sysfs trigger driver. The bfin & interrupt drivers were build tested & inspected only. Signed-off-by: Alison Schofield Signed-off-by: Jonathan Cameron Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/iio/trigger/iio-trig-interrupt.c | 8 ++++---- drivers/iio/trigger/iio-trig-sysfs.c | 2 +- drivers/staging/iio/trigger/iio-trig-bfin-timer.c | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/iio/trigger/iio-trig-interrupt.c b/drivers/iio/trigger/iio-trig-interrupt.c index 572bc6f02ca8..e18f12b74610 100644 --- a/drivers/iio/trigger/iio-trig-interrupt.c +++ b/drivers/iio/trigger/iio-trig-interrupt.c @@ -58,7 +58,7 @@ static int iio_interrupt_trigger_probe(struct platform_device *pdev) trig_info = kzalloc(sizeof(*trig_info), GFP_KERNEL); if (!trig_info) { ret = -ENOMEM; - goto error_put_trigger; + goto error_free_trigger; } iio_trigger_set_drvdata(trig, trig_info); trig_info->irq = irq; @@ -83,8 +83,8 @@ error_release_irq: free_irq(irq, trig); error_free_trig_info: kfree(trig_info); -error_put_trigger: - iio_trigger_put(trig); +error_free_trigger: + iio_trigger_free(trig); error_ret: return ret; } @@ -99,7 +99,7 @@ static int iio_interrupt_trigger_remove(struct platform_device *pdev) iio_trigger_unregister(trig); free_irq(trig_info->irq, trig); kfree(trig_info); - iio_trigger_put(trig); + iio_trigger_free(trig); return 0; } diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c index 3dfab2bc6d69..202e8b89caf2 100644 --- a/drivers/iio/trigger/iio-trig-sysfs.c +++ b/drivers/iio/trigger/iio-trig-sysfs.c @@ -174,7 +174,7 @@ static int iio_sysfs_trigger_probe(int id) return 0; out2: - iio_trigger_put(t->trig); + iio_trigger_free(t->trig); free_t: kfree(t); out1: diff --git a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c index 035dd456d7d6..737747354db6 100644 --- a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c +++ b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c @@ -259,7 +259,7 @@ out_free_irq: out1: iio_trigger_unregister(st->trig); out: - iio_trigger_put(st->trig); + iio_trigger_free(st->trig); return ret; } @@ -272,7 +272,7 @@ static int iio_bfin_tmr_trigger_remove(struct platform_device *pdev) peripheral_free(st->t->pin); free_irq(st->irq, st); iio_trigger_unregister(st->trig); - iio_trigger_put(st->trig); + iio_trigger_free(st->trig); return 0; } From ca2090aa58ba5ad8d6821b5655b7cd64f680a5a6 Mon Sep 17 00:00:00 2001 From: Volodymyr Bendiuga Date: Thu, 19 Jan 2017 17:05:04 +0100 Subject: [PATCH 014/312] phy: increase size of MII_BUS_ID_SIZE and bus_id MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 4567d686f5c6d955e57a3afa1741944c1e7f4033 ] Some bus names are pretty long and do not fit into 17 chars. Increase therefore MII_BUS_ID_SIZE and phy_fixup.bus_id to larger number. Now mii_bus.id can host larger name. Signed-off-by: Volodymyr Bendiuga Signed-off-by: Magnus Öberg Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- include/linux/phy.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/include/linux/phy.h b/include/linux/phy.h index b64825d6ad26..5bc4b9d563a9 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -136,11 +136,7 @@ static inline const char *phy_modes(phy_interface_t interface) /* Used when trying to connect to a specific phy (mii bus id:phy device id) */ #define PHY_ID_FMT "%s:%02x" -/* - * Need to be a little smaller than phydev->dev.bus_id to leave room - * for the ":%02x" - */ -#define MII_BUS_ID_SIZE (20 - 3) +#define MII_BUS_ID_SIZE 61 /* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */ @@ -599,7 +595,7 @@ struct phy_driver { /* A Structure for boards to register fixups with the PHY Lib */ struct phy_fixup { struct list_head list; - char bus_id[20]; + char bus_id[MII_BUS_ID_SIZE + 3]; u32 phy_uid; u32 phy_uid_mask; int (*run)(struct phy_device *phydev); From 6450c9a96ff368f1a652215d1e0814343e4fa2e2 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 11 Jan 2017 16:43:32 +0200 Subject: [PATCH 015/312] serial: sh-sci: Fix register offsets for the IRDA serial port [ Upstream commit a752ba18af8285e3eeda572f40dddaebff0c3621 ] Even though most of its registers are 8-bit wide, the IRDA has two 16-bit registers that make it a 16-bit peripheral and not a 8-bit peripheral with addresses shifted by one. Fix the registers offset in the driver and the platform data regshift value. Signed-off-by: Laurent Pinchart Reviewed-by: Geert Uytterhoeven Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/sh/kernel/cpu/sh3/setup-sh770x.c | 1 - drivers/tty/serial/sh-sci.c | 17 ++++++++--------- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c index 538c10db3537..8dc315b212c2 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c @@ -165,7 +165,6 @@ static struct plat_sci_port scif2_platform_data = { .scscr = SCSCR_TE | SCSCR_RE, .type = PORT_IRDA, .ops = &sh770x_sci_port_ops, - .regshift = 1, }; static struct resource scif2_resources[] = { diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 235e150d7b81..80d0ffe7abc1 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -163,18 +163,17 @@ static const struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = { }, /* - * Common definitions for legacy IrDA ports, dependent on - * regshift value. + * Common definitions for legacy IrDA ports. */ [SCIx_IRDA_REGTYPE] = { [SCSMR] = { 0x00, 8 }, - [SCBRR] = { 0x01, 8 }, - [SCSCR] = { 0x02, 8 }, - [SCxTDR] = { 0x03, 8 }, - [SCxSR] = { 0x04, 8 }, - [SCxRDR] = { 0x05, 8 }, - [SCFCR] = { 0x06, 8 }, - [SCFDR] = { 0x07, 16 }, + [SCBRR] = { 0x02, 8 }, + [SCSCR] = { 0x04, 8 }, + [SCxTDR] = { 0x06, 8 }, + [SCxSR] = { 0x08, 16 }, + [SCxRDR] = { 0x0a, 8 }, + [SCFCR] = { 0x0c, 8 }, + [SCFDR] = { 0x0e, 16 }, [SCTFDR] = sci_reg_invalid, [SCRFDR] = sci_reg_invalid, [SCSPTR] = sci_reg_invalid, From cfb2cb34d32ea6190a15626cb374cc6649b84a5f Mon Sep 17 00:00:00 2001 From: William wu Date: Fri, 13 Jan 2017 11:04:22 +0800 Subject: [PATCH 016/312] usb: hcd: initialize hcd->flags to 0 when rm hcd [ Upstream commit 76b8db0d480e8045e1a1902fc9ab143b3b9ef115 ] On some platforms(e.g. rk3399 board), we can call hcd_add/remove consecutively without calling usb_put_hcd/usb_create_hcd in between, so hcd->flags can be stale. If the HC dies due to whatever reason then without this patch we get the below error on next hcd_add. [173.296154] xhci-hcd xhci-hcd.2.auto: HC died; cleaning up [173.296209] xhci-hcd xhci-hcd.2.auto: xHCI Host Controller [173.296762] xhci-hcd xhci-hcd.2.auto: new USB bus registered, assigned bus number 6 [173.296931] usb usb6: We don't know the algorithms for LPM for this host, disabling LPM. [173.297179] usb usb6: New USB device found, idVendor=1d6b, idProduct=0003 [173.297203] usb usb6: New USB device strings: Mfr=3, Product=2, SerialNumber=1 [173.297222] usb usb6: Product: xHCI Host Controller [173.297240] usb usb6: Manufacturer: Linux 4.4.21 xhci-hcd [173.297257] usb usb6: SerialNumber: xhci-hcd.2.auto [173.298680] hub 6-0:1.0: USB hub found [173.298749] hub 6-0:1.0: 1 port detected [173.299382] rockchip-dwc3 usb@fe800000: USB HOST connected [173.395418] hub 5-0:1.0: activate --> -19 [173.603447] irq 228: nobody cared (try booting with the "irqpoll" option) [173.603493] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.4.21 #9 [173.603513] Hardware name: Google Kevin (DT) [173.603531] Call trace: [173.603568] [] dump_backtrace+0x0/0x160 [173.603596] [] show_stack+0x20/0x28 [173.603623] [] dump_stack+0x90/0xb0 [173.603650] [] __report_bad_irq+0x48/0xe8 [173.603674] [] note_interrupt+0x1e8/0x28c [173.603698] [] handle_irq_event_percpu+0x1d4/0x25c [173.603722] [] handle_irq_event+0x4c/0x7c [173.603748] [] handle_fasteoi_irq+0xb4/0x124 [173.603777] [] generic_handle_irq+0x30/0x44 [173.603804] [] __handle_domain_irq+0x90/0xbc [173.603827] [] gic_handle_irq+0xcc/0x188 ... [173.604500] [] el1_irq+0x80/0xf8 [173.604530] [] cpu_startup_entry+0x38/0x3cc [173.604558] [] rest_init+0x8c/0x94 [173.604585] [] start_kernel+0x3d0/0x3fc [173.604607] [<0000000000b16000>] 0xb16000 [173.604622] handlers: [173.604648] [] usb_hcd_irq [173.604673] Disabling IRQ #228 Signed-off-by: William wu Acked-by: Roger Quadros Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/hcd.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 5c0952995280..87a83d925eea 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -2997,6 +2997,7 @@ void usb_remove_hcd(struct usb_hcd *hcd) } usb_put_invalidate_rhdev(hcd); + hcd->flags = 0; } EXPORT_SYMBOL_GPL(usb_remove_hcd); From 9ee83b03365abe361937785fc97f51ef1686d0d1 Mon Sep 17 00:00:00 2001 From: Liping Zhang Date: Sat, 7 Jan 2017 21:33:55 +0800 Subject: [PATCH 017/312] netfilter: nft_meta: deal with PACKET_LOOPBACK in netdev family [ Upstream commit f169fd695b192dd7b23aff8e69d25a1bc881bbfa ] After adding the following nft rule, then ping 224.0.0.1: # nft add rule netdev t c pkttype host counter The warning complain message will be printed out again and again: WARNING: CPU: 0 PID: 10182 at net/netfilter/nft_meta.c:163 \ nft_meta_get_eval+0x3fe/0x460 [nft_meta] [...] Call Trace: dump_stack+0x85/0xc2 __warn+0xcb/0xf0 warn_slowpath_null+0x1d/0x20 nft_meta_get_eval+0x3fe/0x460 [nft_meta] nft_do_chain+0xff/0x5e0 [nf_tables] So we should deal with PACKET_LOOPBACK in netdev family too. For ipv4, convert it to PACKET_BROADCAST/MULTICAST according to the destination address's type; For ipv6, convert it to PACKET_MULTICAST directly. Signed-off-by: Liping Zhang Signed-off-by: Pablo Neira Ayuso Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/netfilter/nft_meta.c | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index 9dfaf4d55ee0..a97a5bf716be 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -151,8 +151,34 @@ void nft_meta_get_eval(const struct nft_expr *expr, else *dest = PACKET_BROADCAST; break; + case NFPROTO_NETDEV: + switch (skb->protocol) { + case htons(ETH_P_IP): { + int noff = skb_network_offset(skb); + struct iphdr *iph, _iph; + + iph = skb_header_pointer(skb, noff, + sizeof(_iph), &_iph); + if (!iph) + goto err; + + if (ipv4_is_multicast(iph->daddr)) + *dest = PACKET_MULTICAST; + else + *dest = PACKET_BROADCAST; + + break; + } + case htons(ETH_P_IPV6): + *dest = PACKET_MULTICAST; + break; + default: + WARN_ON_ONCE(1); + goto err; + } + break; default: - WARN_ON(1); + WARN_ON_ONCE(1); goto err; } break; From ffa6332b8d8002d922c06a32bd2ee8982d4c13d3 Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Mon, 16 Jan 2017 13:17:55 +0200 Subject: [PATCH 018/312] IPsec: do not ignore crypto err in ah4 input [ Upstream commit ebd89a2d0675f1325c2be5b7576fd8cb7e8defd0 ] ah4 input processing uses the asynchronous hash crypto API which supplies an error code as part of the operation completion but the error code was being ignored. Treat a crypto API error indication as a verification failure. While a crypto API reported error would almost certainly result in a memcpy of the digest failing anyway and thus the security risk seems minor, performing a memory compare on what might be uninitialized memory is wrong. Signed-off-by: Gilad Ben-Yossef Signed-off-by: Steffen Klassert Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/ipv4/ah4.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index f2a71025a770..22377c8ff14b 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -270,6 +270,9 @@ static void ah_input_done(struct crypto_async_request *base, int err) int ihl = ip_hdrlen(skb); int ah_hlen = (ah->hdrlen + 2) << 2; + if (err) + goto out; + work_iph = AH_SKB_CB(skb)->tmp; auth_data = ah_tmp_auth(work_iph, ihl); icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len); From 0a731928f2649fe2cf2b00dd4b69970b7b3f8e6d Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sun, 15 Jan 2017 14:44:30 -0800 Subject: [PATCH 019/312] Input: mpr121 - handle multiple bits change of status register [ Upstream commit 08fea55e37f58371bffc5336a59e55d1f155955a ] This driver reports input events on their interrupts which are triggered by the sensor's status register changes. But only single bit change is reported in the interrupt handler. So if there are multiple bits are changed at almost the same time, other press or release events are ignored. This fixes it by detecting all changed bits in the status register. Signed-off-by: Akinobu Mita Signed-off-by: Dmitry Torokhov Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/input/keyboard/mpr121_touchkey.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c index 0fd612dd76ed..088a9f7c954e 100644 --- a/drivers/input/keyboard/mpr121_touchkey.c +++ b/drivers/input/keyboard/mpr121_touchkey.c @@ -87,7 +87,8 @@ static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id) struct mpr121_touchkey *mpr121 = dev_id; struct i2c_client *client = mpr121->client; struct input_dev *input = mpr121->input_dev; - unsigned int key_num, key_val, pressed; + unsigned long bit_changed; + unsigned int key_num; int reg; reg = i2c_smbus_read_byte_data(client, ELE_TOUCH_STATUS_1_ADDR); @@ -105,19 +106,23 @@ static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id) reg &= TOUCH_STATUS_MASK; /* use old press bit to figure out which bit changed */ - key_num = ffs(reg ^ mpr121->statusbits) - 1; - pressed = reg & (1 << key_num); + bit_changed = reg ^ mpr121->statusbits; mpr121->statusbits = reg; + for_each_set_bit(key_num, &bit_changed, mpr121->keycount) { + unsigned int key_val, pressed; - key_val = mpr121->keycodes[key_num]; + pressed = reg & BIT(key_num); + key_val = mpr121->keycodes[key_num]; - input_event(input, EV_MSC, MSC_SCAN, key_num); - input_report_key(input, key_val, pressed); + input_event(input, EV_MSC, MSC_SCAN, key_num); + input_report_key(input, key_val, pressed); + + dev_dbg(&client->dev, "key %d %d %s\n", key_num, key_val, + pressed ? "pressed" : "released"); + + } input_sync(input); - dev_dbg(&client->dev, "key %d %d %s\n", key_num, key_val, - pressed ? "pressed" : "released"); - out: return IRQ_HANDLED; } From aa4af0fca1016e1962154572fdc8ccdd1cb0a2b5 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sun, 15 Jan 2017 14:44:05 -0800 Subject: [PATCH 020/312] Input: mpr121 - set missing event capability [ Upstream commit 9723ddc8fe0d76ce41fe0dc16afb241ec7d0a29d ] This driver reports misc scan input events on the sensor's status register changes. But the event capability for them was not set in the device initialization, so these events were ignored. This change adds the missing event capability. Signed-off-by: Akinobu Mita Signed-off-by: Dmitry Torokhov Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/input/keyboard/mpr121_touchkey.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c index 088a9f7c954e..aaf43befffaa 100644 --- a/drivers/input/keyboard/mpr121_touchkey.c +++ b/drivers/input/keyboard/mpr121_touchkey.c @@ -236,6 +236,7 @@ static int mpr_touchkey_probe(struct i2c_client *client, input_dev->id.bustype = BUS_I2C; input_dev->dev.parent = &client->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); + input_set_capability(input_dev, EV_MSC, MSC_SCAN); input_dev->keycode = mpr121->keycodes; input_dev->keycodesize = sizeof(mpr121->keycodes[0]); From 3652b0b6f2c2184bebe9e8da00076cfd458ff868 Mon Sep 17 00:00:00 2001 From: Feras Daoud Date: Wed, 28 Dec 2016 14:47:28 +0200 Subject: [PATCH 021/312] IB/ipoib: Change list_del to list_del_init in the tx object [ Upstream commit 27d41d29c7f093f6f77843624fbb080c1b4a8b9c ] Since ipoib_cm_tx_start function and ipoib_cm_tx_reap function belong to different work queues, they can run in parallel. In this case if ipoib_cm_tx_reap calls list_del and release the lock, ipoib_cm_tx_start may acquire it and call list_del_init on the already deleted object. Changing list_del to list_del_init in ipoib_cm_tx_reap fixes the problem. Fixes: 839fcaba355a ("IPoIB: Connected mode experimental support") Signed-off-by: Feras Daoud Signed-off-by: Erez Shitrit Reviewed-by: Alex Vesker Signed-off-by: Leon Romanovsky Reviewed-by: Yuval Shaia Signed-off-by: Doug Ledford Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/ulp/ipoib/ipoib_cm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 2018d24344de..f74b11542603 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -1373,7 +1373,7 @@ static void ipoib_cm_tx_reap(struct work_struct *work) while (!list_empty(&priv->cm.reap_list)) { p = list_entry(priv->cm.reap_list.next, typeof(*p), list); - list_del(&p->list); + list_del_init(&p->list); spin_unlock_irqrestore(&priv->lock, flags); netif_tx_unlock_bh(dev); ipoib_cm_tx_destroy(p); From 581243dc55284f555628eb7d4081bf235a73fdb8 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 12 Jan 2017 15:48:42 +0100 Subject: [PATCH 022/312] s390/qeth: issue STARTLAN as first IPA command [ Upstream commit 1034051045d125579ab1e8fcd5a724eeb0e70149 ] STARTLAN needs to be the first IPA command after MPC initialization completes. So move the qeth_send_startlan() call from the layer disciplines into the core path, right after the MPC handshake. While at it, replace the magic LAN OFFLINE return code with the existing enum. Signed-off-by: Julian Wiedmann Reviewed-by: Thomas Richter Reviewed-by: Ursula Braun Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/s390/net/qeth_core.h | 1 - drivers/s390/net/qeth_core_main.c | 21 +++++++++++++++++---- drivers/s390/net/qeth_l2_main.c | 15 --------------- drivers/s390/net/qeth_l3_main.c | 15 --------------- 4 files changed, 17 insertions(+), 35 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 741f3ee81cfe..5006cb6ce62d 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -909,7 +909,6 @@ void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long); int qeth_core_hardsetup_card(struct qeth_card *); void qeth_print_status_message(struct qeth_card *); int qeth_init_qdio_queues(struct qeth_card *); -int qeth_send_startlan(struct qeth_card *); int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, int (*reply_cb) (struct qeth_card *, struct qeth_reply *, unsigned long), diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index d10bf3da8e5f..e5b9506698b1 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -2955,7 +2955,7 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, } EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); -int qeth_send_startlan(struct qeth_card *card) +static int qeth_send_startlan(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; @@ -2968,7 +2968,6 @@ int qeth_send_startlan(struct qeth_card *card) rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); return rc; } -EXPORT_SYMBOL_GPL(qeth_send_startlan); static int qeth_default_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) @@ -5080,6 +5079,20 @@ retriable: goto out; } + rc = qeth_send_startlan(card); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); + if (rc == IPA_RC_LAN_OFFLINE) { + dev_warn(&card->gdev->dev, + "The LAN is offline\n"); + card->lan_online = 0; + } else { + rc = -ENODEV; + goto out; + } + } else + card->lan_online = 1; + card->options.ipa4.supported_funcs = 0; card->options.ipa6.supported_funcs = 0; card->options.adp.supported_funcs = 0; @@ -5091,14 +5104,14 @@ retriable: if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { rc = qeth_query_setadapterparms(card); if (rc < 0) { - QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); + QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); goto out; } } if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { rc = qeth_query_setdiagass(card); if (rc < 0) { - QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); + QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc); goto out; } } diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index bf1e0e39334d..58bcb3c9a86a 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1203,21 +1203,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) /* softsetup */ QETH_DBF_TEXT(SETUP, 2, "softsetp"); - rc = qeth_send_startlan(card); - if (rc) { - QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); - if (rc == 0xe080) { - dev_warn(&card->gdev->dev, - "The LAN is offline\n"); - card->lan_online = 0; - goto contin; - } - rc = -ENODEV; - goto out_remove; - } else - card->lan_online = 1; - -contin: if ((card->info.type == QETH_CARD_TYPE_OSD) || (card->info.type == QETH_CARD_TYPE_OSX)) { if (qeth_l2_start_ipassists(card)) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 285fe0b2c753..bf3c1b2301db 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -3298,21 +3298,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) /* softsetup */ QETH_DBF_TEXT(SETUP, 2, "softsetp"); - rc = qeth_send_startlan(card); - if (rc) { - QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); - if (rc == 0xe080) { - dev_warn(&card->gdev->dev, - "The LAN is offline\n"); - card->lan_online = 0; - goto contin; - } - rc = -ENODEV; - goto out_remove; - } else - card->lan_online = 1; - -contin: rc = qeth_l3_setadapter_parms(card); if (rc) QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); From f38a3a1c8f1af4335dfdecf6e30d33750c33d1a2 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 9 Jan 2017 16:49:26 -0500 Subject: [PATCH 023/312] net: dsa: select NET_SWITCHDEV [ Upstream commit 3a89eaa65db68bf53bf92dedc60084f810e1779a ] The support for DSA Ethernet switch chips depends on TCP/IP networking, thus explicit that HAVE_NET_DSA depends on INET. DSA uses SWITCHDEV, thus select it instead of depending on it. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Tested-by: Randy Dunlap Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/dsa/Kconfig | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index ff7736f7ff42..fc0c09e770e6 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig @@ -1,12 +1,13 @@ config HAVE_NET_DSA def_bool y - depends on NETDEVICES && !S390 + depends on INET && NETDEVICES && !S390 # Drivers must select NET_DSA and the appropriate tagging format config NET_DSA tristate "Distributed Switch Architecture" - depends on HAVE_NET_DSA && NET_SWITCHDEV + depends on HAVE_NET_DSA + select NET_SWITCHDEV select PHYLIB ---help--- Say Y if you want to enable support for the hardware switches supported From 5ffc673161be550108bcdb34f3b4d6e277e449f8 Mon Sep 17 00:00:00 2001 From: Carlo Caione Date: Sun, 9 Apr 2017 15:56:08 +0200 Subject: [PATCH 024/312] platform/x86: hp-wmi: Fix detection for dock and tablet mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 298747b7579f5bbbced793d997b333fd10a24921 upstream. The current driver code is not checking for the error values returned by 'hp_wmi_dock_state()' and 'hp_wmi_tablet_state()' before passing the returned values down to 'input_report_switch()'. This error code is being translated to '1' in the input subsystem, reporting the wrong status. The biggest problem caused by this issue is that several laptops are wrongly reported by the driver as docked, preventing them to be put to sleep using the LID (and in most cases they are not even dockable). With this patch we create the report switches only if we are able to read the dock and tablet mode status correctly from ACPI. Signed-off-by: Carlo Caione Signed-off-by: Darren Hart (VMware) Cc: Philip Müller Signed-off-by: Greg Kroah-Hartman --- drivers/platform/x86/hp-wmi.c | 40 +++++++++++++++++++++++------------ 1 file changed, 27 insertions(+), 13 deletions(-) diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index af2046c87806..056a671165d1 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -573,10 +573,12 @@ static void hp_wmi_notify(u32 value, void *context) switch (event_id) { case HPWMI_DOCK_EVENT: - input_report_switch(hp_wmi_input_dev, SW_DOCK, - hp_wmi_dock_state()); - input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, - hp_wmi_tablet_state()); + if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit)) + input_report_switch(hp_wmi_input_dev, SW_DOCK, + hp_wmi_dock_state()); + if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit)) + input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, + hp_wmi_tablet_state()); input_sync(hp_wmi_input_dev); break; case HPWMI_PARK_HDD: @@ -649,6 +651,7 @@ static int __init hp_wmi_input_setup(void) { acpi_status status; int err; + int val; hp_wmi_input_dev = input_allocate_device(); if (!hp_wmi_input_dev) @@ -659,17 +662,26 @@ static int __init hp_wmi_input_setup(void) hp_wmi_input_dev->id.bustype = BUS_HOST; __set_bit(EV_SW, hp_wmi_input_dev->evbit); - __set_bit(SW_DOCK, hp_wmi_input_dev->swbit); - __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit); + + /* Dock */ + val = hp_wmi_dock_state(); + if (!(val < 0)) { + __set_bit(SW_DOCK, hp_wmi_input_dev->swbit); + input_report_switch(hp_wmi_input_dev, SW_DOCK, val); + } + + /* Tablet mode */ + val = hp_wmi_tablet_state(); + if (!(val < 0)) { + __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit); + input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val); + } err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL); if (err) goto err_free_dev; /* Set initial hardware state */ - input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state()); - input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, - hp_wmi_tablet_state()); input_sync(hp_wmi_input_dev); if (!hp_wmi_bios_2009_later() && hp_wmi_bios_2008_later()) @@ -982,10 +994,12 @@ static int hp_wmi_resume_handler(struct device *device) * changed. */ if (hp_wmi_input_dev) { - input_report_switch(hp_wmi_input_dev, SW_DOCK, - hp_wmi_dock_state()); - input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, - hp_wmi_tablet_state()); + if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit)) + input_report_switch(hp_wmi_input_dev, SW_DOCK, + hp_wmi_dock_state()); + if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit)) + input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, + hp_wmi_tablet_state()); input_sync(hp_wmi_input_dev); } From 75f82a703b309d706d7aa3b370d48ae705a7ee40 Mon Sep 17 00:00:00 2001 From: Enrico Mioso Date: Tue, 11 Jul 2017 17:21:52 +0200 Subject: [PATCH 025/312] cdc_ncm: Set NTB format again after altsetting switch for Huawei devices MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 2b02c20ce0c28974b44e69a2e2f5ddc6a470ad6f upstream. Some firmwares in Huawei E3372H devices have been observed to switch back to NTB 32-bit format after altsetting switch. This patch implements a driver flag to check for the device settings and set NTB format to 16-bit again if needed. The flag has been activated for devices controlled by the huawei_cdc_ncm.c driver. V1->V2: - fixed broken error checks - some corrections to the commit message V2->V3: - variable name changes, to clarify what's happening - check (and possibly set) the NTB format later in the common bind code path Signed-off-by: Enrico Mioso Reported-and-tested-by: Christian Panton Reviewed-by: Bjørn Mork CC: Bjørn Mork CC: Christian Panton CC: linux-usb@vger.kernel.org CC: netdev@vger.kernel.org CC: Oliver Neukum Signed-off-by: David S. Miller Cc: Porto Rio Signed-off-by: Greg Kroah-Hartman --- drivers/net/usb/cdc_ncm.c | 28 ++++++++++++++++++++++++++++ drivers/net/usb/huawei_cdc_ncm.c | 6 ++++++ include/linux/usb/cdc_ncm.h | 1 + 3 files changed, 35 insertions(+) diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index e0e94b855bbe..1228d0da4075 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -724,8 +724,10 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ u8 *buf; int len; int temp; + int err; u8 iface_no; struct usb_cdc_parsed_header hdr; + u16 curr_ntb_format; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) @@ -823,6 +825,32 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ goto error2; } + /* + * Some Huawei devices have been observed to come out of reset in NDP32 mode. + * Let's check if this is the case, and set the device to NDP16 mode again if + * needed. + */ + if (ctx->drvflags & CDC_NCM_FLAG_RESET_NTB16) { + err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_FORMAT, + USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE, + 0, iface_no, &curr_ntb_format, 2); + if (err < 0) { + goto error2; + } + + if (curr_ntb_format == USB_CDC_NCM_NTB32_FORMAT) { + dev_info(&intf->dev, "resetting NTB format to 16-bit"); + err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT, + USB_TYPE_CLASS | USB_DIR_OUT + | USB_RECIP_INTERFACE, + USB_CDC_NCM_NTB16_FORMAT, + iface_no, NULL, 0); + + if (err < 0) + goto error2; + } + } + cdc_ncm_find_endpoints(dev, ctx->data); cdc_ncm_find_endpoints(dev, ctx->control); if (!dev->in || !dev->out || !dev->status) { diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c index 2680a65cd5e4..63f28908afda 100644 --- a/drivers/net/usb/huawei_cdc_ncm.c +++ b/drivers/net/usb/huawei_cdc_ncm.c @@ -80,6 +80,12 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev, * be at the end of the frame. */ drvflags |= CDC_NCM_FLAG_NDP_TO_END; + + /* Additionally, it has been reported that some Huawei E3372H devices, with + * firmware version 21.318.01.00.541, come out of reset in NTB32 format mode, hence + * needing to be set to the NTB16 one again. + */ + drvflags |= CDC_NCM_FLAG_RESET_NTB16; ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags); if (ret) goto err; diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index 3a375d07d0dc..6670e9b34f20 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h @@ -82,6 +82,7 @@ /* Driver flags */ #define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */ +#define CDC_NCM_FLAG_RESET_NTB16 0x08 /* set NDP16 one more time after altsetting switch */ #define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \ (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE) From 939cafad2f5198c8cb4b067e8acbc0e935e4ffdb Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 8 Jun 2017 14:49:18 +0100 Subject: [PATCH 026/312] KEYS: trusted: sanitize all key material commit ee618b4619b72527aaed765f0f0b74072b281159 upstream. As the previous patch did for encrypted-keys, zero sensitive any potentially sensitive data related to the "trusted" key type before it is freed. Notably, we were not zeroing the tpm_buf structures in which the actual key is stored for TPM seal and unseal, nor were we zeroing the trusted_key_payload in certain error paths. Cc: Mimi Zohar Cc: David Safford Signed-off-by: Eric Biggers Signed-off-by: David Howells Signed-off-by: James Morris Signed-off-by: Greg Kroah-Hartman --- security/keys/trusted.c | 50 ++++++++++++++++++----------------------- 1 file changed, 22 insertions(+), 28 deletions(-) diff --git a/security/keys/trusted.c b/security/keys/trusted.c index 509aedcf8310..e20ead4b77a5 100644 --- a/security/keys/trusted.c +++ b/security/keys/trusted.c @@ -69,7 +69,7 @@ static int TSS_sha1(const unsigned char *data, unsigned int datalen, } ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest); - kfree(sdesc); + kzfree(sdesc); return ret; } @@ -113,7 +113,7 @@ static int TSS_rawhmac(unsigned char *digest, const unsigned char *key, if (!ret) ret = crypto_shash_final(&sdesc->shash, digest); out: - kfree(sdesc); + kzfree(sdesc); return ret; } @@ -164,7 +164,7 @@ static int TSS_authhmac(unsigned char *digest, const unsigned char *key, paramdigest, TPM_NONCE_SIZE, h1, TPM_NONCE_SIZE, h2, 1, &c, 0, 0); out: - kfree(sdesc); + kzfree(sdesc); return ret; } @@ -245,7 +245,7 @@ static int TSS_checkhmac1(unsigned char *buffer, if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE)) ret = -EINVAL; out: - kfree(sdesc); + kzfree(sdesc); return ret; } @@ -346,7 +346,7 @@ static int TSS_checkhmac2(unsigned char *buffer, if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE)) ret = -EINVAL; out: - kfree(sdesc); + kzfree(sdesc); return ret; } @@ -563,7 +563,7 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype, *bloblen = storedsize; } out: - kfree(td); + kzfree(td); return ret; } @@ -677,7 +677,7 @@ static int key_seal(struct trusted_key_payload *p, if (ret < 0) pr_info("trusted_key: srkseal failed (%d)\n", ret); - kfree(tb); + kzfree(tb); return ret; } @@ -702,7 +702,7 @@ static int key_unseal(struct trusted_key_payload *p, /* pull migratable flag out of sealed key */ p->migratable = p->key[--p->key_len]; - kfree(tb); + kzfree(tb); return ret; } @@ -984,12 +984,12 @@ static int trusted_instantiate(struct key *key, if (!ret && options->pcrlock) ret = pcrlock(options->pcrlock); out: - kfree(datablob); - kfree(options); + kzfree(datablob); + kzfree(options); if (!ret) rcu_assign_keypointer(key, payload); else - kfree(payload); + kzfree(payload); return ret; } @@ -998,8 +998,7 @@ static void trusted_rcu_free(struct rcu_head *rcu) struct trusted_key_payload *p; p = container_of(rcu, struct trusted_key_payload, rcu); - memset(p->key, 0, p->key_len); - kfree(p); + kzfree(p); } /* @@ -1041,13 +1040,13 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep) ret = datablob_parse(datablob, new_p, new_o); if (ret != Opt_update) { ret = -EINVAL; - kfree(new_p); + kzfree(new_p); goto out; } if (!new_o->keyhandle) { ret = -EINVAL; - kfree(new_p); + kzfree(new_p); goto out; } @@ -1061,22 +1060,22 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep) ret = key_seal(new_p, new_o); if (ret < 0) { pr_info("trusted_key: key_seal failed (%d)\n", ret); - kfree(new_p); + kzfree(new_p); goto out; } if (new_o->pcrlock) { ret = pcrlock(new_o->pcrlock); if (ret < 0) { pr_info("trusted_key: pcrlock failed (%d)\n", ret); - kfree(new_p); + kzfree(new_p); goto out; } } rcu_assign_keypointer(key, new_p); call_rcu(&p->rcu, trusted_rcu_free); out: - kfree(datablob); - kfree(new_o); + kzfree(datablob); + kzfree(new_o); return ret; } @@ -1105,24 +1104,19 @@ static long trusted_read(const struct key *key, char __user *buffer, for (i = 0; i < p->blob_len; i++) bufp = hex_byte_pack(bufp, p->blob[i]); if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) { - kfree(ascii_buf); + kzfree(ascii_buf); return -EFAULT; } - kfree(ascii_buf); + kzfree(ascii_buf); return 2 * p->blob_len; } /* - * trusted_destroy - before freeing the key, clear the decrypted data + * trusted_destroy - clear and free the key's payload */ static void trusted_destroy(struct key *key) { - struct trusted_key_payload *p = key->payload.data[0]; - - if (!p) - return; - memset(p->key, 0, p->key_len); - kfree(key->payload.data[0]); + kzfree(key->payload.data[0]); } struct key_type key_type_trusted = { From df5371592acafa60029bb990ca85f007be60d0f1 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Nov 2017 00:47:12 +0000 Subject: [PATCH 027/312] KEYS: trusted: fix writing past end of buffer in trusted_read() commit a3c812f7cfd80cf51e8f5b7034f7418f6beb56c1 upstream. When calling keyctl_read() on a key of type "trusted", if the user-supplied buffer was too small, the kernel ignored the buffer length and just wrote past the end of the buffer, potentially corrupting userspace memory. Fix it by instead returning the size required, as per the documentation for keyctl_read(). We also don't even fill the buffer at all in this case, as this is slightly easier to implement than doing a short read, and either behavior appears to be permitted. It also makes it match the behavior of the "encrypted" key type. Fixes: d00a1c72f7f4 ("keys: add new trusted key-type") Reported-by: Ben Hutchings Signed-off-by: Eric Biggers Signed-off-by: David Howells Reviewed-by: Mimi Zohar Reviewed-by: James Morris Signed-off-by: James Morris Signed-off-by: Greg Kroah-Hartman --- security/keys/trusted.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/security/keys/trusted.c b/security/keys/trusted.c index e20ead4b77a5..214ae2dc7f64 100644 --- a/security/keys/trusted.c +++ b/security/keys/trusted.c @@ -1094,20 +1094,21 @@ static long trusted_read(const struct key *key, char __user *buffer, p = rcu_dereference_key(key); if (!p) return -EINVAL; - if (!buffer || buflen <= 0) - return 2 * p->blob_len; - ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL); - if (!ascii_buf) - return -ENOMEM; - bufp = ascii_buf; - for (i = 0; i < p->blob_len; i++) - bufp = hex_byte_pack(bufp, p->blob[i]); - if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) { + if (buffer && buflen >= 2 * p->blob_len) { + ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL); + if (!ascii_buf) + return -ENOMEM; + + bufp = ascii_buf; + for (i = 0; i < p->blob_len; i++) + bufp = hex_byte_pack(bufp, p->blob[i]); + if (copy_to_user(buffer, ascii_buf, 2 * p->blob_len) != 0) { + kzfree(ascii_buf); + return -EFAULT; + } kzfree(ascii_buf); - return -EFAULT; } - kzfree(ascii_buf); return 2 * p->blob_len; } From ce93b664540d84b87fbbf21b5e85fbba554d83ed Mon Sep 17 00:00:00 2001 From: Carlo Caione Date: Sun, 9 Apr 2017 15:56:07 +0200 Subject: [PATCH 028/312] platform/x86: hp-wmi: Fix error value for hp_wmi_tablet_state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit c7dfc2facbd69dad89b75e13c608da709668dcd0 upstream. hp_wmi_tablet_state() fails to return the correct error code when hp_wmi_perform_query() returns the HP WMI query specific error code that is a positive value. Signed-off-by: Carlo Caione Signed-off-by: Darren Hart (VMware) Cc: Philip Müller Signed-off-by: Greg Kroah-Hartman --- drivers/platform/x86/hp-wmi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 056a671165d1..7703e22aa7e5 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -291,7 +291,7 @@ static int hp_wmi_tablet_state(void) int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state, sizeof(state), sizeof(state)); if (ret) - return ret; + return -EINVAL; return (state & 0x4) ? 1 : 0; } From 44e0e2b47af248766bfe9395e50a3729fe81b018 Mon Sep 17 00:00:00 2001 From: Carlo Caione Date: Wed, 19 Apr 2017 22:36:39 +0200 Subject: [PATCH 029/312] platform/x86: hp-wmi: Do not shadow error values MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit d313876925f3e7a480a02773fd333bcab9202d5e upstream. All the helper functions (i.e. hp_wmi_dock_state, hp_wmi_tablet_state, ...) using hp_wmi_perform_query to perform an HP WMI query shadow the returned value in case of error. We return -EINVAL only when the HP WMI query returns a positive value (the specific error code) to not mix this up with the actual value returned by the helper function. Suggested-by: Andy Shevchenko Signed-off-by: Carlo Caione Signed-off-by: Darren Hart (VMware) Cc: Philip Müller Signed-off-by: Greg Kroah-Hartman --- drivers/platform/x86/hp-wmi.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 7703e22aa7e5..847f75601591 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -249,7 +249,7 @@ static int hp_wmi_display_state(void) int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state, sizeof(state), sizeof(state)); if (ret) - return -EINVAL; + return ret < 0 ? ret : -EINVAL; return state; } @@ -259,7 +259,7 @@ static int hp_wmi_hddtemp_state(void) int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state, sizeof(state), sizeof(state)); if (ret) - return -EINVAL; + return ret < 0 ? ret : -EINVAL; return state; } @@ -269,7 +269,7 @@ static int hp_wmi_als_state(void) int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state, sizeof(state), sizeof(state)); if (ret) - return -EINVAL; + return ret < 0 ? ret : -EINVAL; return state; } @@ -280,7 +280,7 @@ static int hp_wmi_dock_state(void) sizeof(state), sizeof(state)); if (ret) - return -EINVAL; + return ret < 0 ? ret : -EINVAL; return state & 0x1; } @@ -291,7 +291,7 @@ static int hp_wmi_tablet_state(void) int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state, sizeof(state), sizeof(state)); if (ret) - return -EINVAL; + return ret < 0 ? ret : -EINVAL; return (state & 0x4) ? 1 : 0; } @@ -324,7 +324,7 @@ static int __init hp_wmi_enable_hotkeys(void) int ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &value, sizeof(value), 0); if (ret) - return -EINVAL; + return ret < 0 ? ret : -EINVAL; return 0; } @@ -337,7 +337,7 @@ static int hp_wmi_set_block(void *data, bool blocked) ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, &query, sizeof(query), 0); if (ret) - return -EINVAL; + return ret < 0 ? ret : -EINVAL; return 0; } @@ -429,7 +429,7 @@ static int hp_wmi_post_code_state(void) int ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, 0, &state, sizeof(state), sizeof(state)); if (ret) - return -EINVAL; + return ret < 0 ? ret : -EINVAL; return state; } @@ -495,7 +495,7 @@ static ssize_t set_als(struct device *dev, struct device_attribute *attr, int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp, sizeof(tmp), sizeof(tmp)); if (ret) - return -EINVAL; + return ret < 0 ? ret : -EINVAL; return count; } @@ -516,7 +516,7 @@ static ssize_t set_postcode(struct device *dev, struct device_attribute *attr, ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, 1, &tmp, sizeof(tmp), sizeof(tmp)); if (ret) - return -EINVAL; + return ret < 0 ? ret : -EINVAL; return count; } From ad8c619750c499f68f99f0bccb1a12114b7f2033 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 22 Nov 2016 10:57:15 +0100 Subject: [PATCH 030/312] x86/uaccess, sched/preempt: Verify access_ok() context commit 7c4788950ba5922fde976d80b72baf46f14dee8d upstream. I recently encountered wreckage because access_ok() was used where it should not be, add an explicit WARN when access_ok() is used wrongly. Signed-off-by: Peter Zijlstra (Intel) Cc: Andy Lutomirski Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar [add include/preempt.h to fix build error - gregkh] Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/uaccess.h | 14 ++++++++++++-- include/linux/preempt.h | 21 +++++++++++++-------- 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index b8c75f3aade8..d788b0cdc0ad 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -66,6 +67,12 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ }) +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP +# define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task()) +#else +# define WARN_ON_IN_IRQ() +#endif + /** * access_ok: - Checks if a user space pointer is valid * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that @@ -86,8 +93,11 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un * checks that the pointer is in the user space range - after calling * this function, memory access functions may still return -EFAULT. */ -#define access_ok(type, addr, size) \ - likely(!__range_not_ok(addr, size, user_addr_max())) +#define access_ok(type, addr, size) \ +({ \ + WARN_ON_IN_IRQ(); \ + likely(!__range_not_ok(addr, size, user_addr_max())); \ +}) /* * The exception table consists of pairs of addresses relative to the diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 75e4e30677f1..7eeceac52dea 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -65,19 +65,24 @@ /* * Are we doing bottom half or hardware interrupt processing? - * Are we in a softirq context? Interrupt context? - * in_softirq - Are we currently processing softirq or have bh disabled? - * in_serving_softirq - Are we currently processing softirq? + * + * in_irq() - We're in (hard) IRQ context + * in_softirq() - We have BH disabled, or are processing softirqs + * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled + * in_serving_softirq() - We're in softirq context + * in_nmi() - We're in NMI context + * in_task() - We're in task context + * + * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really + * should not be used in new code. */ #define in_irq() (hardirq_count()) #define in_softirq() (softirq_count()) #define in_interrupt() (irq_count()) #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) - -/* - * Are we in NMI context? - */ -#define in_nmi() (preempt_count() & NMI_MASK) +#define in_nmi() (preempt_count() & NMI_MASK) +#define in_task() (!(preempt_count() & \ + (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) /* * The preempt_count offset after preempt_disable(); From 44540ead8a8a159a4d2a9cb2e3e0b7388cfaa3b1 Mon Sep 17 00:00:00 2001 From: Li Bin Date: Sat, 28 Oct 2017 11:07:28 +0800 Subject: [PATCH 031/312] workqueue: Fix NULL pointer dereference commit cef572ad9bd7f85035ba8272e5352040e8be0152 upstream. When queue_work() is used in irq (not in task context), there is a potential case that trigger NULL pointer dereference. ---------------------------------------------------------------- worker_thread() |-spin_lock_irq() |-process_one_work() |-worker->current_pwq = pwq |-spin_unlock_irq() |-worker->current_func(work) |-spin_lock_irq() |-worker->current_pwq = NULL |-spin_unlock_irq() //interrupt here |-irq_handler |-__queue_work() //assuming that the wq is draining |-is_chained_work(wq) |-current_wq_worker() //Here, 'current' is the interrupted worker! |-current->current_pwq is NULL here! |-schedule() ---------------------------------------------------------------- Avoid it by checking for task context in current_wq_worker(), and if not in task context, we shouldn't use the 'current' to check the condition. Reported-by: Xiaofei Tan Signed-off-by: Li Bin Reviewed-by: Lai Jiangshan Signed-off-by: Tejun Heo Fixes: 8d03ecfe4718 ("workqueue: reimplement is_chained_work() using current_wq_worker()") Signed-off-by: Greg Kroah-Hartman --- kernel/workqueue_internal.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h index 45215870ac6c..3fa9c146fccb 100644 --- a/kernel/workqueue_internal.h +++ b/kernel/workqueue_internal.h @@ -9,6 +9,7 @@ #include #include +#include struct worker_pool; @@ -59,7 +60,7 @@ struct worker { */ static inline struct worker *current_wq_worker(void) { - if (current->flags & PF_WQ_WORKER) + if (in_task() && (current->flags & PF_WQ_WORKER)) return kthread_data(current); return NULL; } From 48276703a0371b3af436e3e378a1b9c1bba92528 Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin Date: Mon, 16 Oct 2017 18:51:31 +0300 Subject: [PATCH 032/312] crypto: x86/sha1-mb - fix panic due to unaligned access commit d041b557792c85677f17e08eee535eafbd6b9aa2 upstream. struct sha1_ctx_mgr allocated in sha1_mb_mod_init() via kzalloc() and later passed in sha1_mb_flusher_mgr_flush_avx2() function where instructions vmovdqa used to access the struct. vmovdqa requires 16-bytes aligned argument, but nothing guarantees that struct sha1_ctx_mgr will have that alignment. Unaligned vmovdqa will generate GP fault. Fix this by replacing vmovdqa with vmovdqu which doesn't have alignment requirements. Fixes: 2249cbb53ead ("crypto: sha-mb - SHA1 multibuffer submit and flush routines for AVX2") Signed-off-by: Andrey Ryabinin Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S index 85c4e1cf7172..e1693457c178 100644 --- a/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S +++ b/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S @@ -174,8 +174,8 @@ LABEL skip_ %I .endr # Find min length - vmovdqa _lens+0*16(state), %xmm0 - vmovdqa _lens+1*16(state), %xmm1 + vmovdqu _lens+0*16(state), %xmm0 + vmovdqu _lens+1*16(state), %xmm1 vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} @@ -195,8 +195,8 @@ LABEL skip_ %I vpsubd %xmm2, %xmm0, %xmm0 vpsubd %xmm2, %xmm1, %xmm1 - vmovdqa %xmm0, _lens+0*16(state) - vmovdqa %xmm1, _lens+1*16(state) + vmovdqu %xmm0, _lens+0*16(state) + vmovdqu %xmm1, _lens+1*16(state) # "state" and "args" are the same address, arg1 # len is arg2 @@ -260,8 +260,8 @@ ENTRY(sha1_mb_mgr_get_comp_job_avx2) jc .return_null # Find min length - vmovdqa _lens(state), %xmm0 - vmovdqa _lens+1*16(state), %xmm1 + vmovdqu _lens(state), %xmm0 + vmovdqu _lens+1*16(state), %xmm1 vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} From 3fc61b8dd6bbdb673d71fd06b4a0cba33805283a Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 7 Nov 2017 22:29:02 +0000 Subject: [PATCH 033/312] KEYS: fix NULL pointer dereference during ASN.1 parsing [ver #2] commit 624f5ab8720b3371367327a822c267699c1823b8 upstream. syzkaller reported a NULL pointer dereference in asn1_ber_decoder(). It can be reproduced by the following command, assuming CONFIG_PKCS7_TEST_KEY=y: keyctl add pkcs7_test desc '' @s The bug is that if the data buffer is empty, an integer underflow occurs in the following check: if (unlikely(dp >= datalen - 1)) goto data_overrun_error; This results in the NULL data pointer being dereferenced. Fix it by checking for 'datalen - dp < 2' instead. Also fix the similar check for 'dp >= datalen - n' later in the same function. That one possibly could result in a buffer overread. The NULL pointer dereference was reproducible using the "pkcs7_test" key type but not the "asymmetric" key type because the "asymmetric" key type checks for a 0-length payload before calling into the ASN.1 decoder but the "pkcs7_test" key type does not. The bug report was: BUG: unable to handle kernel NULL pointer dereference at (null) IP: asn1_ber_decoder+0x17f/0xe60 lib/asn1_decoder.c:233 PGD 7b708067 P4D 7b708067 PUD 7b6ee067 PMD 0 Oops: 0000 [#1] SMP Modules linked in: CPU: 0 PID: 522 Comm: syz-executor1 Not tainted 4.14.0-rc8 #7 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.3-20171021_125229-anatol 04/01/2014 task: ffff9b6b3798c040 task.stack: ffff9b6b37970000 RIP: 0010:asn1_ber_decoder+0x17f/0xe60 lib/asn1_decoder.c:233 RSP: 0018:ffff9b6b37973c78 EFLAGS: 00010216 RAX: 0000000000000000 RBX: 0000000000000000 RCX: 000000000000021c RDX: ffffffff814a04ed RSI: ffffb1524066e000 RDI: ffffffff910759e0 RBP: ffff9b6b37973d60 R08: 0000000000000001 R09: ffff9b6b3caa4180 R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000002 R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 FS: 00007f10ed1f2700(0000) GS:ffff9b6b3ea00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000000 CR3: 000000007b6f3000 CR4: 00000000000006f0 Call Trace: pkcs7_parse_message+0xee/0x240 crypto/asymmetric_keys/pkcs7_parser.c:139 verify_pkcs7_signature+0x33/0x180 certs/system_keyring.c:216 pkcs7_preparse+0x41/0x70 crypto/asymmetric_keys/pkcs7_key_type.c:63 key_create_or_update+0x180/0x530 security/keys/key.c:855 SYSC_add_key security/keys/keyctl.c:122 [inline] SyS_add_key+0xbf/0x250 security/keys/keyctl.c:62 entry_SYSCALL_64_fastpath+0x1f/0xbe RIP: 0033:0x4585c9 RSP: 002b:00007f10ed1f1bd8 EFLAGS: 00000216 ORIG_RAX: 00000000000000f8 RAX: ffffffffffffffda RBX: 00007f10ed1f2700 RCX: 00000000004585c9 RDX: 0000000020000000 RSI: 0000000020008ffb RDI: 0000000020008000 RBP: 0000000000000000 R08: ffffffffffffffff R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000216 R12: 00007fff1b2260ae R13: 00007fff1b2260af R14: 00007f10ed1f2700 R15: 0000000000000000 Code: dd ca ff 48 8b 45 88 48 83 e8 01 4c 39 f0 0f 86 a8 07 00 00 e8 53 dd ca ff 49 8d 46 01 48 89 85 58 ff ff ff 48 8b 85 60 ff ff ff <42> 0f b6 0c 30 89 c8 88 8d 75 ff ff ff 83 e0 1f 89 8d 28 ff ff RIP: asn1_ber_decoder+0x17f/0xe60 lib/asn1_decoder.c:233 RSP: ffff9b6b37973c78 CR2: 0000000000000000 Fixes: 42d5ec27f873 ("X.509: Add an ASN.1 decoder") Reported-by: syzbot Signed-off-by: Eric Biggers Signed-off-by: David Howells Signed-off-by: James Morris Signed-off-by: Greg Kroah-Hartman --- lib/asn1_decoder.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c index faa2a3f017f8..4fa2e54b3f59 100644 --- a/lib/asn1_decoder.c +++ b/lib/asn1_decoder.c @@ -227,7 +227,7 @@ next_op: hdr = 2; /* Extract a tag from the data */ - if (unlikely(dp >= datalen - 1)) + if (unlikely(datalen - dp < 2)) goto data_overrun_error; tag = data[dp++]; if (unlikely((tag & 0x1f) == ASN1_LONG_TAG)) @@ -273,7 +273,7 @@ next_op: int n = len - 0x80; if (unlikely(n > 2)) goto length_too_long; - if (unlikely(dp >= datalen - n)) + if (unlikely(n > datalen - dp)) goto data_overrun_error; hdr += n; for (len = 0; n > 0; n--) { From 4d5b67a54e6d5c0bb8b6638630f2a5af671798aa Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 2 Nov 2017 18:44:28 +0100 Subject: [PATCH 034/312] ARM: 8720/1: ensure dump_instr() checks addr_limit commit b9dd05c7002ee0ca8b676428b2268c26399b5e31 upstream. When CONFIG_DEBUG_USER is enabled, it's possible for a user to deliberately trigger dump_instr() with a chosen kernel address. Let's avoid problems resulting from this by using get_user() rather than __get_user(), ensuring that we don't erroneously access kernel memory. So that we can use the same code to dump user instructions and kernel instructions, the common dumping code is factored out to __dump_instr(), with the fs manipulated appropriately in dump_instr() around calls to this. Signed-off-by: Mark Rutland Signed-off-by: Russell King Signed-off-by: Greg Kroah-Hartman --- arch/arm/kernel/traps.c | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index bc698383e822..c92b535150a0 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -132,30 +132,26 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom, set_fs(fs); } -static void dump_instr(const char *lvl, struct pt_regs *regs) +static void __dump_instr(const char *lvl, struct pt_regs *regs) { unsigned long addr = instruction_pointer(regs); const int thumb = thumb_mode(regs); const int width = thumb ? 4 : 8; - mm_segment_t fs; char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; int i; /* - * We need to switch to kernel mode so that we can use __get_user - * to safely read from kernel space. Note that we now dump the - * code first, just in case the backtrace kills us. + * Note that we now dump the code first, just in case the backtrace + * kills us. */ - fs = get_fs(); - set_fs(KERNEL_DS); for (i = -4; i < 1 + !!thumb; i++) { unsigned int val, bad; if (thumb) - bad = __get_user(val, &((u16 *)addr)[i]); + bad = get_user(val, &((u16 *)addr)[i]); else - bad = __get_user(val, &((u32 *)addr)[i]); + bad = get_user(val, &((u32 *)addr)[i]); if (!bad) p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ", @@ -166,8 +162,20 @@ static void dump_instr(const char *lvl, struct pt_regs *regs) } } printk("%sCode: %s\n", lvl, str); +} - set_fs(fs); +static void dump_instr(const char *lvl, struct pt_regs *regs) +{ + mm_segment_t fs; + + if (!user_mode(regs)) { + fs = get_fs(); + set_fs(KERNEL_DS); + __dump_instr(lvl, regs); + set_fs(fs); + } else { + __dump_instr(lvl, regs); + } } #ifdef CONFIG_ARM_UNWIND From adc4bacd51647370d37d15f7ff167c8afa12c949 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Tue, 7 Nov 2017 16:05:24 +0100 Subject: [PATCH 035/312] ALSA: seq: Fix OSS sysex delivery in OSS emulation commit 132d358b183ac6ad8b3fea32ad5e0663456d18d1 upstream. The SYSEX event delivery in OSS sequencer emulation assumed that the event is encoded in the variable-length data with the straight buffering. This was the normal behavior in the past, but during the development, the chained buffers were introduced for carrying more data, while the OSS code was left intact. As a result, when a SYSEX event with the chained buffer data is passed to OSS sequencer port, it may end up with the wrong memory access, as if it were having a too large buffer. This patch addresses the bug, by applying the buffer data expansion by the generic snd_seq_dump_var_event() helper function. Reported-by: syzbot Reported-by: Mark Salyzyn Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/core/seq/oss/seq_oss_midi.c | 4 +--- sound/core/seq/oss/seq_oss_readq.c | 29 +++++++++++++++++++++++++++++ sound/core/seq/oss/seq_oss_readq.h | 2 ++ 3 files changed, 32 insertions(+), 3 deletions(-) diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c index aaff9ee32695..b30b2139e3f0 100644 --- a/sound/core/seq/oss/seq_oss_midi.c +++ b/sound/core/seq/oss/seq_oss_midi.c @@ -612,9 +612,7 @@ send_midi_event(struct seq_oss_devinfo *dp, struct snd_seq_event *ev, struct seq if (!dp->timer->running) len = snd_seq_oss_timer_start(dp->timer); if (ev->type == SNDRV_SEQ_EVENT_SYSEX) { - if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) == SNDRV_SEQ_EVENT_LENGTH_VARIABLE) - snd_seq_oss_readq_puts(dp->readq, mdev->seq_device, - ev->data.ext.ptr, ev->data.ext.len); + snd_seq_oss_readq_sysex(dp->readq, mdev->seq_device, ev); } else { len = snd_midi_event_decode(mdev->coder, msg, sizeof(msg), ev); if (len > 0) diff --git a/sound/core/seq/oss/seq_oss_readq.c b/sound/core/seq/oss/seq_oss_readq.c index 046cb586fb2f..06b21226b4e7 100644 --- a/sound/core/seq/oss/seq_oss_readq.c +++ b/sound/core/seq/oss/seq_oss_readq.c @@ -117,6 +117,35 @@ snd_seq_oss_readq_puts(struct seq_oss_readq *q, int dev, unsigned char *data, in return 0; } +/* + * put MIDI sysex bytes; the event buffer may be chained, thus it has + * to be expanded via snd_seq_dump_var_event(). + */ +struct readq_sysex_ctx { + struct seq_oss_readq *readq; + int dev; +}; + +static int readq_dump_sysex(void *ptr, void *buf, int count) +{ + struct readq_sysex_ctx *ctx = ptr; + + return snd_seq_oss_readq_puts(ctx->readq, ctx->dev, buf, count); +} + +int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev, + struct snd_seq_event *ev) +{ + struct readq_sysex_ctx ctx = { + .readq = q, + .dev = dev + }; + + if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE) + return 0; + return snd_seq_dump_var_event(ev, readq_dump_sysex, &ctx); +} + /* * copy an event to input queue: * return zero if enqueued diff --git a/sound/core/seq/oss/seq_oss_readq.h b/sound/core/seq/oss/seq_oss_readq.h index f1463f1f449e..8d033ca2d23f 100644 --- a/sound/core/seq/oss/seq_oss_readq.h +++ b/sound/core/seq/oss/seq_oss_readq.h @@ -44,6 +44,8 @@ void snd_seq_oss_readq_delete(struct seq_oss_readq *q); void snd_seq_oss_readq_clear(struct seq_oss_readq *readq); unsigned int snd_seq_oss_readq_poll(struct seq_oss_readq *readq, struct file *file, poll_table *wait); int snd_seq_oss_readq_puts(struct seq_oss_readq *readq, int dev, unsigned char *data, int len); +int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev, + struct snd_seq_event *ev); int snd_seq_oss_readq_put_event(struct seq_oss_readq *readq, union evrec *ev); int snd_seq_oss_readq_put_timestamp(struct seq_oss_readq *readq, unsigned long curt, int seq_mode); int snd_seq_oss_readq_pick(struct seq_oss_readq *q, union evrec *rec); From 1e54b5f72115e7354a8f20205c5baca3e2cbd56c Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 6 Nov 2017 20:16:50 +0100 Subject: [PATCH 036/312] ALSA: seq: Avoid invalid lockdep class warning commit 3510c7aa069aa83a2de6dab2b41401a198317bdc upstream. The recent fix for adding rwsem nesting annotation was using the given "hop" argument as the lock subclass key. Although the idea itself works, it may trigger a kernel warning like: BUG: looking up invalid subclass: 8 .... since the lockdep has a smaller number of subclasses (8) than we currently allow for the hops there (10). The current definition is merely a sanity check for avoiding the too deep delivery paths, and the 8 hops are already enough. So, as a quick fix, just follow the max hops as same as the max lockdep subclasses. Fixes: 1f20f9ff57ca ("ALSA: seq: Fix nested rwsem annotation for lockdep splat") Reported-by: syzbot Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- include/sound/seq_kernel.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h index feb58d455560..4b9ee3009aa0 100644 --- a/include/sound/seq_kernel.h +++ b/include/sound/seq_kernel.h @@ -49,7 +49,8 @@ typedef union snd_seq_timestamp snd_seq_timestamp_t; #define SNDRV_SEQ_DEFAULT_CLIENT_EVENTS 200 /* max delivery path length */ -#define SNDRV_SEQ_MAX_HOPS 10 +/* NOTE: this shouldn't be greater than MAX_LOCKDEP_SUBCLASSES */ +#define SNDRV_SEQ_MAX_HOPS 8 /* max size of event size */ #define SNDRV_SEQ_MAX_EVENT_LEN 0x3fffffff From ff8e97326b6ade630ec70745cc6255ada6dfbe83 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 31 Oct 2017 00:35:03 -0500 Subject: [PATCH 037/312] MIPS: microMIPS: Fix incorrect mask in insn_table_MM commit 77238e76b9156d28d86c1e31c00ed2960df0e4de upstream. It seems that this is a typo error and the proper bit masking is "RT | RS" instead of "RS | RS". This issue was detected with the help of Coccinelle. Fixes: d6b3314b49e1 ("MIPS: uasm: Add lh uam instruction") Reported-by: Julia Lawall Signed-off-by: Gustavo A. R. Silva Reviewed-by: James Hogan Patchwork: https://patchwork.linux-mips.org/patch/17551/ Signed-off-by: James Hogan [jhogan@kernel.org: Backported 3.16..4.12] Signed-off-by: Greg Kroah-Hartman --- arch/mips/mm/uasm-micromips.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c index d78178daea4b..e2fe48dd67b5 100644 --- a/arch/mips/mm/uasm-micromips.c +++ b/arch/mips/mm/uasm-micromips.c @@ -75,7 +75,7 @@ static struct insn insn_table_MM[] = { { insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS }, { insn_lb, M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, { insn_ld, 0, 0 }, - { insn_lh, M(mm_lh32_op, 0, 0, 0, 0, 0), RS | RS | SIMM }, + { insn_lh, M(mm_lh32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, { insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM }, { insn_lld, 0, 0 }, { insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM }, From 207c2143322ce23b46061ff57dd5356744214293 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Tue, 31 Oct 2017 15:09:22 -0700 Subject: [PATCH 038/312] MIPS: Fix CM region target definitions commit 6a6cba1d945a7511cdfaf338526871195e420762 upstream. The default CM target field in the GCR_BASE register is encoded with 0 meaning memory & 1 being reserved. However the definitions we use for those bits effectively get these two values backwards - likely because they were copied from the definitions for the CM regions where the target is encoded differently. This results in use setting up GCR_BASE with the reserved target value by default, rather than targeting memory as intended. Although we currently seem to get away with this it's not a great idea to rely upon. Fix this by changing our macros to match the documentated target values. The incorrect encoding became used as of commit 9f98f3dd0c51 ("MIPS: Add generic CM probe & access code") in the Linux v3.15 cycle, and was likely carried forwards from older but unused code introduced by commit 39b8d5254246 ("[MIPS] Add support for MIPS CMP platform.") in the v2.6.26 cycle. Fixes: 9f98f3dd0c51 ("MIPS: Add generic CM probe & access code") Signed-off-by: Paul Burton Reported-by: Matt Redfearn Reviewed-by: James Hogan Cc: Matt Redfearn Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: # v3.15+ Patchwork: https://patchwork.linux-mips.org/patch/17562/ Signed-off-by: James Hogan [jhogan@kernel.org: Backported 3.15..4.13] Signed-off-by: Greg Kroah-Hartman --- arch/mips/include/asm/mips-cm.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h index 6516e9da5133..b836ddec82b7 100644 --- a/arch/mips/include/asm/mips-cm.h +++ b/arch/mips/include/asm/mips-cm.h @@ -238,8 +238,8 @@ BUILD_CM_Cx_R_(tcid_8_priority, 0x80) #define CM_GCR_BASE_GCRBASE_MSK (_ULCAST_(0x1ffff) << 15) #define CM_GCR_BASE_CMDEFTGT_SHF 0 #define CM_GCR_BASE_CMDEFTGT_MSK (_ULCAST_(0x3) << 0) -#define CM_GCR_BASE_CMDEFTGT_DISABLED 0 -#define CM_GCR_BASE_CMDEFTGT_MEM 1 +#define CM_GCR_BASE_CMDEFTGT_MEM 0 +#define CM_GCR_BASE_CMDEFTGT_RESERVED 1 #define CM_GCR_BASE_CMDEFTGT_IOCU0 2 #define CM_GCR_BASE_CMDEFTGT_IOCU1 3 From 175c0622ab11d6977b27875e58b42bc198e4e862 Mon Sep 17 00:00:00 2001 From: Matt Redfearn Date: Fri, 4 Nov 2016 09:28:56 +0000 Subject: [PATCH 039/312] MIPS: SMP: Use a completion event to signal CPU up commit a00eeede507c975087b7b8df8cf2c9f88ba285de upstream. If a secondary CPU failed to start, for any reason, the CPU requesting the secondary to start would get stuck in the loop waiting for the secondary to be present in the cpu_callin_map. Rather than that, use a completion event to signal that the secondary CPU has started and is waiting to synchronise counters. Since the CPU presence will no longer be marked in cpu_callin_map, remove the redundant test from arch_cpu_idle_dead(). Signed-off-by: Matt Redfearn Cc: Maciej W. Rozycki Cc: Jiri Slaby Cc: Paul Gortmaker Cc: Chris Metcalf Cc: Thomas Gleixner Cc: Qais Yousef Cc: James Hogan Cc: Paul Burton Cc: Marcin Nowakowski Cc: Andrew Morton Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/14502/ Signed-off-by: Ralf Baechle Signed-off-by: Greg Kroah-Hartman --- arch/mips/kernel/process.c | 4 +--- arch/mips/kernel/smp.c | 15 +++++++++------ 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 477ba026c3e5..163b3449a8de 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -49,9 +49,7 @@ #ifdef CONFIG_HOTPLUG_CPU void arch_cpu_idle_dead(void) { - /* What the heck is this check doing ? */ - if (!cpumask_test_cpu(smp_processor_id(), &cpu_callin_map)) - play_dead(); + play_dead(); } #endif diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 7fef02a9eb85..526c3c7c43d1 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -64,6 +64,8 @@ EXPORT_SYMBOL(cpu_sibling_map); cpumask_t cpu_core_map[NR_CPUS] __read_mostly; EXPORT_SYMBOL(cpu_core_map); +static DECLARE_COMPLETION(cpu_running); + /* * A logcal cpu mask containing only one VPE per core to * reduce the number of IPIs on large MT systems. @@ -174,7 +176,7 @@ asmlinkage void start_secondary(void) cpumask_set_cpu(cpu, &cpu_coherent_mask); notify_cpu_starting(cpu); - cpumask_set_cpu(cpu, &cpu_callin_map); + complete(&cpu_running); synchronise_count_slave(cpu); set_cpu_online(cpu, true); @@ -242,7 +244,6 @@ void smp_prepare_boot_cpu(void) { set_cpu_possible(0, true); set_cpu_online(0, true); - cpumask_set_cpu(0, &cpu_callin_map); } int __cpu_up(unsigned int cpu, struct task_struct *tidle) @@ -250,11 +251,13 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) mp_ops->boot_secondary(cpu, tidle); /* - * Trust is futile. We should really have timeouts ... + * We must check for timeout here, as the CPU will not be marked + * online until the counters are synchronised. */ - while (!cpumask_test_cpu(cpu, &cpu_callin_map)) { - udelay(100); - schedule(); + if (!wait_for_completion_timeout(&cpu_running, + msecs_to_jiffies(1000))) { + pr_crit("CPU%u: failed to start\n", cpu); + return -EIO; } synchronise_count_master(cpu); From 857e81041f6974effc23af215c896465f91feab0 Mon Sep 17 00:00:00 2001 From: Matija Glavinic Pecotic Date: Thu, 3 Aug 2017 08:20:22 +0200 Subject: [PATCH 040/312] MIPS: Fix race on setting and getting cpu_online_mask commit 6f542ebeaee0ee552a902ce3892220fc22c7ec8e upstream. While testing cpu hoptlug (cpu down and up in loops) on kernel 4.4, it was observed that occasionally check for cpu online will fail in kernel/cpu.c, _cpu_up: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git/tree/kernel/cpu.c?h=v4.4.79#n485 518 /* Arch-specific enabling code. */ 519 ret = __cpu_up(cpu, idle); 520 521 if (ret != 0) 522 goto out_notify; 523 BUG_ON(!cpu_online(cpu)); Reason is race between start_secondary and _cpu_up. cpu_callin_map is set before cpu_online_mask. In __cpu_up, cpu_callin_map is waited for, but cpu online mask is not, resulting in race in which secondary processor started and set cpu_callin_map, but not yet set the online mask,resulting in above BUG being hit. Upstream differs in the area. cpu_online check is in bringup_wait_for_ap, which is after cpu reached AP_ONLINE_IDLE,where secondary passed its start function. Nonetheless, fix makes start_secondary safe and not depending on other locks throughout the code. It protects as well against cpu_online checks put in between sometimes in the future. Fix this by moving completion after all flags are set. Signed-off-by: Matija Glavinic Pecotic Cc: Alexander Sverdlin Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/16925/ Signed-off-by: Ralf Baechle Signed-off-by: Greg Kroah-Hartman --- arch/mips/kernel/smp.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 526c3c7c43d1..02bc48f5e1a8 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -176,9 +176,6 @@ asmlinkage void start_secondary(void) cpumask_set_cpu(cpu, &cpu_coherent_mask); notify_cpu_starting(cpu); - complete(&cpu_running); - synchronise_count_slave(cpu); - set_cpu_online(cpu, true); set_cpu_sibling_map(cpu); @@ -186,6 +183,9 @@ asmlinkage void start_secondary(void) calculate_cpu_foreign_map(); + complete(&cpu_running); + synchronise_count_slave(cpu); + /* * irq will be enabled in ->smp_finish(), enabling it too early * is dangerous. From 6004eb4d1adca173f1a55dc2121f25da71e96b71 Mon Sep 17 00:00:00 2001 From: Matt Redfearn Date: Wed, 27 Sep 2017 10:13:25 +0100 Subject: [PATCH 041/312] MIPS: SMP: Fix deadlock & online race commit 9e8c399a88f0b87e41a894911475ed2a8f8dff9e upstream. Commit 6f542ebeaee0 ("MIPS: Fix race on setting and getting cpu_online_mask") effectively reverted commit 8f46cca1e6c06 ("MIPS: SMP: Fix possibility of deadlock when bringing CPUs online") and thus has reinstated the possibility of deadlock. The commit was based on testing of kernel v4.4, where the CPU hotplug core code issued a BUG() if the starting CPU is not marked online when the boot CPU returns from __cpu_up. The commit fixes this race (in v4.4), but re-introduces the deadlock situation. As noted in the commit message, upstream differs in this area. Commit 8df3e07e7f21f ("cpu/hotplug: Let upcoming cpu bring itself fully up") adds a completion event in the CPU hotplug core code, making this race impossible. However, people were unhappy with relying on the core code to do the right thing. To address the issues both commits were trying to fix, add a second completion event in the MIPS smp hotplug path. It removes the possibility of a race, since the MIPS smp hotplug code now synchronises both the boot and secondary CPUs before they return to the hotplug core code. It also addresses the deadlock by ensuring that the secondary CPU is not marked online before it's counters are synchronised. This fix should also be backported to fix the race condition introduced by the backport of commit 8f46cca1e6c06 ("MIPS: SMP: Fix possibility of deadlock when bringing CPUs online"), through really that race only existed before commit 8df3e07e7f21f ("cpu/hotplug: Let upcoming cpu bring itself fully up"). Signed-off-by: Matt Redfearn Fixes: 6f542ebeaee0 ("MIPS: Fix race on setting and getting cpu_online_mask") CC: Matija Glavinic Pecotic Patchwork: https://patchwork.linux-mips.org/patch/17376/ Signed-off-by: James Hogan [jhogan@kernel.org: Backported 4.1..4.9] Signed-off-by: Greg Kroah-Hartman --- arch/mips/kernel/smp.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 02bc48f5e1a8..4af08c197177 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -64,6 +64,7 @@ EXPORT_SYMBOL(cpu_sibling_map); cpumask_t cpu_core_map[NR_CPUS] __read_mostly; EXPORT_SYMBOL(cpu_core_map); +static DECLARE_COMPLETION(cpu_starting); static DECLARE_COMPLETION(cpu_running); /* @@ -176,6 +177,12 @@ asmlinkage void start_secondary(void) cpumask_set_cpu(cpu, &cpu_coherent_mask); notify_cpu_starting(cpu); + /* Notify boot CPU that we're starting & ready to sync counters */ + complete(&cpu_starting); + + synchronise_count_slave(cpu); + + /* The CPU is running and counters synchronised, now mark it online */ set_cpu_online(cpu, true); set_cpu_sibling_map(cpu); @@ -183,8 +190,11 @@ asmlinkage void start_secondary(void) calculate_cpu_foreign_map(); + /* + * Notify boot CPU that we're up & online and it can safely return + * from __cpu_up + */ complete(&cpu_running); - synchronise_count_slave(cpu); /* * irq will be enabled in ->smp_finish(), enabling it too early @@ -250,17 +260,17 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) { mp_ops->boot_secondary(cpu, tidle); - /* - * We must check for timeout here, as the CPU will not be marked - * online until the counters are synchronised. - */ - if (!wait_for_completion_timeout(&cpu_running, + /* Wait for CPU to start and be ready to sync counters */ + if (!wait_for_completion_timeout(&cpu_starting, msecs_to_jiffies(1000))) { pr_crit("CPU%u: failed to start\n", cpu); return -EIO; } synchronise_count_master(cpu); + + /* Wait for CPU to finish startup & mark itself online before return */ + wait_for_completion(&cpu_running); return 0; } From c08f3a82f66f6ae09a96ca0f2054da0a6d77f84d Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Wed, 9 Dec 2015 14:50:25 -0800 Subject: [PATCH 042/312] test: firmware_class: report errors properly on failure commit 47e0bbb7fa985a0f1b5794a8653fae4f8f49de77 upstream. request_firmware() failures currently won't get reported at all (the error code is discarded). What's more, we get confusing messages, like: # echo -n notafile > /sys/devices/virtual/misc/test_firmware/trigger_request [ 8280.311856] test_firmware: loading 'notafile' [ 8280.317042] test_firmware: load of 'notafile' failed: -2 [ 8280.322445] test_firmware: loaded: 0 # echo $? 0 Report the failures via write() errors, and don't say we "loaded" anything. Signed-off-by: Brian Norris Acked-by: Kees Cook Signed-off-by: Shuah Khan Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- lib/test_firmware.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/lib/test_firmware.c b/lib/test_firmware.c index 86374c1c49a4..841191061816 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c @@ -65,14 +65,19 @@ static ssize_t trigger_request_store(struct device *dev, release_firmware(test_firmware); test_firmware = NULL; rc = request_firmware(&test_firmware, name, dev); - if (rc) + if (rc) { pr_info("load of '%s' failed: %d\n", name, rc); - pr_info("loaded: %zu\n", test_firmware ? test_firmware->size : 0); + goto out; + } + pr_info("loaded: %zu\n", test_firmware->size); + rc = count; + +out: mutex_unlock(&test_fw_mutex); kfree(name); - return count; + return rc; } static DEVICE_ATTR_WO(trigger_request); From ed1deec127d9b944e10e154a65d49af5f3d3ee4a Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Wed, 9 Dec 2015 14:50:29 -0800 Subject: [PATCH 043/312] selftests: firmware: add empty string and async tests commit 1b1fe542b6f010cf6bc7e1c92805e1c0e133e007 upstream. Now that we've added a 'trigger_async_request' knob to test the request_firmware_nowait() API, let's use it. Also add tests for the empty ("") string, since there have been a couple errors in that handling already. Since we now have real ways that the sysfs write might fail, let's add the appropriate check on the 'echo' lines too. Signed-off-by: Brian Norris Acked-by: Kees Cook Signed-off-by: Shuah Khan [AmitP: Dropped the async trigger testing parts from original commit] Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/firmware/fw_filesystem.sh | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh index c4366dc74e01..e9f563fd200a 100755 --- a/tools/testing/selftests/firmware/fw_filesystem.sh +++ b/tools/testing/selftests/firmware/fw_filesystem.sh @@ -48,8 +48,16 @@ echo "ABCD0123" >"$FW" NAME=$(basename "$FW") +if printf '\000' >"$DIR"/trigger_request; then + echo "$0: empty filename should not succeed" >&2 + exit 1 +fi + # Request a firmware that doesn't exist, it should fail. -echo -n "nope-$NAME" >"$DIR"/trigger_request +if echo -n "nope-$NAME" >"$DIR"/trigger_request; then + echo "$0: firmware shouldn't have loaded" >&2 + exit 1 +fi if diff -q "$FW" /dev/test_firmware >/dev/null ; then echo "$0: firmware was not expected to match" >&2 exit 1 From 4afaa496d7f3cf3a8ede24a20daac05f52a364b8 Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Fri, 16 Dec 2016 03:10:35 -0800 Subject: [PATCH 044/312] selftests: firmware: send expected errors to /dev/null commit 880444e214cfd293a2e8cc4bd3505f7ffa6ce33a upstream. Error that we expect should not be spilled to stdout. Without this we get: ./fw_filesystem.sh: line 58: printf: write error: Invalid argument ./fw_filesystem.sh: line 63: printf: write error: No such device ./fw_filesystem.sh: line 69: echo: write error: No such file or directory ./fw_filesystem.sh: filesystem loading works ./fw_filesystem.sh: async filesystem loading works With it: ./fw_filesystem.sh: filesystem loading works ./fw_filesystem.sh: async filesystem loading works Signed-off-by: Luis R. Rodriguez [AmitP: Dropped the async trigger testing parts from original commit] Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/firmware/fw_filesystem.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh index e9f563fd200a..856a1f327b3f 100755 --- a/tools/testing/selftests/firmware/fw_filesystem.sh +++ b/tools/testing/selftests/firmware/fw_filesystem.sh @@ -48,13 +48,13 @@ echo "ABCD0123" >"$FW" NAME=$(basename "$FW") -if printf '\000' >"$DIR"/trigger_request; then +if printf '\000' >"$DIR"/trigger_request 2> /dev/null; then echo "$0: empty filename should not succeed" >&2 exit 1 fi # Request a firmware that doesn't exist, it should fail. -if echo -n "nope-$NAME" >"$DIR"/trigger_request; then +if echo -n "nope-$NAME" >"$DIR"/trigger_request 2> /dev/null; then echo "$0: firmware shouldn't have loaded" >&2 exit 1 fi From d0cfebd9b1db99e507eb3f543cd9d99315d96f4f Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Mon, 23 Jan 2017 08:11:07 -0800 Subject: [PATCH 045/312] tools: firmware: check for distro fallback udev cancel rule commit afb999cdef69148f366839e74470d8f5375ba5f1 upstream. Some distributions (Debian, OpenSUSE) have a udev rule in place to cancel all fallback mechanism uevents immediately. This would obviously make it hard to test against the fallback mechanism test interface, so we need to check for this. Signed-off-by: Luis R. Rodriguez Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- .../selftests/firmware/fw_userhelper.sh | 28 +++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/firmware/fw_userhelper.sh b/tools/testing/selftests/firmware/fw_userhelper.sh index b9983f8e09f6..01c626a1f226 100755 --- a/tools/testing/selftests/firmware/fw_userhelper.sh +++ b/tools/testing/selftests/firmware/fw_userhelper.sh @@ -64,9 +64,33 @@ trap "test_finish" EXIT echo "ABCD0123" >"$FW" NAME=$(basename "$FW") +DEVPATH="$DIR"/"nope-$NAME"/loading + # Test failure when doing nothing (timeout works). -echo 1 >/sys/class/firmware/timeout -echo -n "$NAME" >"$DIR"/trigger_request +echo -n 2 >/sys/class/firmware/timeout +echo -n "nope-$NAME" >"$DIR"/trigger_request 2>/dev/null & + +# Give the kernel some time to load the loading file, must be less +# than the timeout above. +sleep 1 +if [ ! -f $DEVPATH ]; then + echo "$0: fallback mechanism immediately cancelled" + echo "" + echo "The file never appeared: $DEVPATH" + echo "" + echo "This might be a distribution udev rule setup by your distribution" + echo "to immediately cancel all fallback requests, this must be" + echo "removed before running these tests. To confirm look for" + echo "a firmware rule like /lib/udev/rules.d/50-firmware.rules" + echo "and see if you have something like this:" + echo "" + echo "SUBSYSTEM==\"firmware\", ACTION==\"add\", ATTR{loading}=\"-1\"" + echo "" + echo "If you do remove this file or comment out this line before" + echo "proceeding with these tests." + exit 1 +fi + if diff -q "$FW" /dev/test_firmware >/dev/null ; then echo "$0: firmware was not expected to match" >&2 exit 1 From 721d4be9e05de066527ce843ebc2fc8d3ce3aa83 Mon Sep 17 00:00:00 2001 From: Jonas Gorski Date: Sun, 29 Oct 2017 16:27:19 +0100 Subject: [PATCH 046/312] MIPS: AR7: Defer registration of GPIO commit e6b03ab63b4d270e0249f96536fde632409dc1dc upstream. When called from prom init code, ar7_gpio_init() will fail as it will call gpiochip_add() which relies on a working kmalloc() to alloc the gpio_desc array and kmalloc is not useable yet at prom init time. Move ar7_gpio_init() to ar7_register_devices() (a device_initcall) where kmalloc works. Fixes: 14e85c0e69d5 ("gpio: remove gpio_descs global array") Signed-off-by: Jonas Gorski Reviewed-by: Florian Fainelli Cc: Ralf Baechle Cc: Greg Kroah-Hartman Cc: Yoshihiro YUNOMAE Cc: Nicolas Schichan Cc: linux-mips@linux-mips.org Cc: linux-serial@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/17542/ Signed-off-by: James Hogan Signed-off-by: Greg Kroah-Hartman --- arch/mips/ar7/platform.c | 4 ++++ arch/mips/ar7/prom.c | 2 -- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c index 58fca9ad5fcc..aa1d4ed6b1dd 100644 --- a/arch/mips/ar7/platform.c +++ b/arch/mips/ar7/platform.c @@ -654,6 +654,10 @@ static int __init ar7_register_devices(void) u32 val; int res; + res = ar7_gpio_init(); + if (res) + pr_warn("unable to register gpios: %d\n", res); + res = ar7_register_uarts(); if (res) pr_err("unable to setup uart(s): %d\n", res); diff --git a/arch/mips/ar7/prom.c b/arch/mips/ar7/prom.c index a23adc49d50f..36aabee9cba4 100644 --- a/arch/mips/ar7/prom.c +++ b/arch/mips/ar7/prom.c @@ -246,8 +246,6 @@ void __init prom_init(void) ar7_init_cmdline(fw_arg0, (char **)fw_arg1); ar7_init_env((struct env_var *)fw_arg2); console_config(); - - ar7_gpio_init(); } #define PORT(offset) (KSEG1ADDR(AR7_REGS_UART0 + (offset * 4))) From 4694272c470ac7a2ec36e23442369ac5f135faee Mon Sep 17 00:00:00 2001 From: Oswald Buddenhagen Date: Sun, 29 Oct 2017 16:27:20 +0100 Subject: [PATCH 047/312] MIPS: AR7: Ensure that serial ports are properly set up commit b084116f8587b222a2c5ef6dcd846f40f24b9420 upstream. Without UPF_FIXED_TYPE, the data from the PORT_AR7 uart_config entry is never copied, resulting in a dead port. Fixes: 154615d55459 ("MIPS: AR7: Use correct UART port type") Signed-off-by: Oswald Buddenhagen [jonas.gorski: add Fixes tag] Signed-off-by: Jonas Gorski Reviewed-by: Florian Fainelli Cc: Ralf Baechle Cc: Greg Kroah-Hartman Cc: Yoshihiro YUNOMAE Cc: Nicolas Schichan Cc: Oswald Buddenhagen Cc: linux-mips@linux-mips.org Cc: linux-serial@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/17543/ Signed-off-by: James Hogan Signed-off-by: Greg Kroah-Hartman --- arch/mips/ar7/platform.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c index aa1d4ed6b1dd..3446b6fb3acb 100644 --- a/arch/mips/ar7/platform.c +++ b/arch/mips/ar7/platform.c @@ -576,6 +576,7 @@ static int __init ar7_register_uarts(void) uart_port.type = PORT_AR7; uart_port.uartclk = clk_get_rate(bus_clk) / 2; uart_port.iotype = UPIO_MEM32; + uart_port.flags = UPF_FIXED_TYPE; uart_port.regshift = 2; uart_port.line = 0; From 05b690ccb077eccb6a1aeb610456dbd5ea7f04a1 Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Tue, 7 Nov 2017 16:19:24 -0800 Subject: [PATCH 048/312] Input: elan_i2c - add ELAN060C to the ACPI table commit cdea6a30c2689cc33b34c6691b57cca277f0c5dc upstream. ELAN060C touchpad uses elan_i2c as its driver. It can be found on Lenovo ideapad 320-14AST. BugLink: https://bugs.launchpad.net/bugs/1727544 Signed-off-by: Kai-Heng Feng Signed-off-by: Dmitry Torokhov Signed-off-by: Greg Kroah-Hartman --- drivers/input/mouse/elan_i2c_core.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index b8c50d883b2c..c9d491bc85e0 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -1240,6 +1240,7 @@ static const struct acpi_device_id elan_acpi_id[] = { { "ELAN0605", 0 }, { "ELAN0609", 0 }, { "ELAN060B", 0 }, + { "ELAN060C", 0 }, { "ELAN0611", 0 }, { "ELAN1000", 0 }, { } From ba4828af60e227fffb6ccca2e17c64938e7a5d0d Mon Sep 17 00:00:00 2001 From: Sinclair Yeh Date: Wed, 1 Nov 2017 10:47:05 -0700 Subject: [PATCH 049/312] drm/vmwgfx: Fix Ubuntu 17.10 Wayland black screen issue commit cef75036c40408ba3bc308bcb00a3d440da713fc upstream. This is an extension of Commit 7c20d213dd3c ("drm/vmwgfx: Work around mode set failure in 2D VMs") With Wayland desktop and atomic mode set, during the mode setting process there is a moment when two framebuffer sized surfaces are being pinned. This was not an issue with Xorg. Since this only happens during a mode change, there should be no performance impact by increasing allowable mem_size. Signed-off-by: Sinclair Yeh Reviewed-by: Thomas Hellstrom Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index f3f31f995878..be3971b22a02 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -708,7 +708,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) * allocation taken by fbdev */ if (!(dev_priv->capabilities & SVGA_CAP_3D)) - mem_size *= 2; + mem_size *= 3; dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; dev_priv->prim_bb_mem = From 493cb19b2522a23f4056c405ef4c4aa0c90333b3 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Mon, 6 Nov 2017 11:33:36 +0100 Subject: [PATCH 050/312] rbd: use GFP_NOIO for parent stat and data requests commit 1e37f2f84680fa7f8394fd444b6928e334495ccc upstream. rbd_img_obj_exists_submit() and rbd_img_obj_parent_read_full() are on the writeback path for cloned images -- we attempt a stat on the parent object to see if it exists and potentially read it in to call copyup. GFP_NOIO should be used instead of GFP_KERNEL here. Link: http://tracker.ceph.com/issues/22014 Signed-off-by: Ilya Dryomov Reviewed-by: David Disseldorp [idryomov@gmail.com: backport to < 4.9: context] Signed-off-by: Greg Kroah-Hartman --- drivers/block/rbd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index fbdddd6f94b8..ca3bcc81b623 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -2736,7 +2736,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) * from the parent. */ page_count = (u32)calc_pages_for(0, length); - pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); + pages = ceph_alloc_page_vector(page_count, GFP_NOIO); if (IS_ERR(pages)) { result = PTR_ERR(pages); pages = NULL; @@ -2863,7 +2863,7 @@ static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request) */ size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32); page_count = (u32)calc_pages_for(0, size); - pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); + pages = ceph_alloc_page_vector(page_count, GFP_NOIO); if (IS_ERR(pages)) return PTR_ERR(pages); From cdb5a2def61c959eaa5e651ff178702c75d51ecb Mon Sep 17 00:00:00 2001 From: Gerhard Bertelsmann Date: Mon, 6 Nov 2017 18:16:56 +0100 Subject: [PATCH 051/312] can: sun4i: handle overrun in RX FIFO commit 4dcf924c2eda0c47a5c53b7703e3dc65ddaa8920 upstream. SUN4Is CAN IP has a 64 byte deep FIFO buffer. If the buffer is not drained fast enough (overrun) it's getting mangled. Already received frames are dropped - the data can't be restored. Signed-off-by: Gerhard Bertelsmann Signed-off-by: Marc Kleine-Budde Signed-off-by: Greg Kroah-Hartman --- drivers/net/can/sun4i_can.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index b0c80859f746..1ac2090a1721 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -539,6 +539,13 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status) } stats->rx_over_errors++; stats->rx_errors++; + + /* reset the CAN IP by entering reset mode + * ignoring timeout error + */ + set_reset_mode(dev); + set_normal_mode(dev); + /* clear bit */ sun4i_can_write_cmdreg(priv, SUN4I_CMD_CLEAR_OR_FLAG); } @@ -653,8 +660,9 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id) netif_wake_queue(dev); can_led_event(dev, CAN_LED_EVENT_TX); } - if (isrc & SUN4I_INT_RBUF_VLD) { - /* receive interrupt */ + if ((isrc & SUN4I_INT_RBUF_VLD) && + !(isrc & SUN4I_INT_DATA_OR)) { + /* receive interrupt - don't read if overrun occurred */ while (status & SUN4I_STA_RBUF_RDY) { /* RX buffer is not empty */ sun4i_can_rx(dev); From 4df27e6493089c571eeebea0d1475784e6435d18 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Richard=20Sch=C3=BCtz?= Date: Sun, 29 Oct 2017 13:03:22 +0100 Subject: [PATCH 052/312] can: c_can: don't indicate triple sampling support for D_CAN MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit fb5f0b3ef69b95e665e4bbe8a3de7201f09f1071 upstream. The D_CAN controller doesn't provide a triple sampling mode, so don't set the CAN_CTRLMODE_3_SAMPLES flag in ctrlmode_supported. Currently enabling triple sampling is a no-op. Signed-off-by: Richard Schütz Signed-off-by: Marc Kleine-Budde Signed-off-by: Greg Kroah-Hartman --- drivers/net/can/c_can/c_can_pci.c | 1 - drivers/net/can/c_can/c_can_platform.c | 1 - 2 files changed, 2 deletions(-) diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c index cf7c18947189..d065c0e2d18e 100644 --- a/drivers/net/can/c_can/c_can_pci.c +++ b/drivers/net/can/c_can/c_can_pci.c @@ -178,7 +178,6 @@ static int c_can_pci_probe(struct pci_dev *pdev, break; case BOSCH_D_CAN: priv->regs = reg_map_d_can; - priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; break; default: ret = -EINVAL; diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index e36d10520e24..717530eac70c 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -320,7 +320,6 @@ static int c_can_plat_probe(struct platform_device *pdev) break; case BOSCH_D_CAN: priv->regs = reg_map_d_can; - priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; priv->read_reg = c_can_plat_read_reg_aligned_to_16bit; priv->write_reg = c_can_plat_write_reg_aligned_to_16bit; priv->read_reg32 = d_can_plat_read_reg32; From b01f1d60dd1c353acdc3a0fabb7507859d629a7c Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 7 Nov 2017 18:53:07 +0100 Subject: [PATCH 053/312] x86/oprofile/ppro: Do not use __this_cpu*() in preemptible context commit a743bbeef27b9176987ec0cb7f906ab0ab52d1da upstream. The warning below says it all: BUG: using __this_cpu_read() in preemptible [00000000] code: swapper/0/1 caller is __this_cpu_preempt_check CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.14.0-rc8 #4 Call Trace: dump_stack check_preemption_disabled ? do_early_param __this_cpu_preempt_check arch_perfmon_init op_nmi_init ? alloc_pci_root_info oprofile_arch_init oprofile_init do_one_initcall ... These accessors should not have been used in the first place: it is PPro so no mixed silicon revisions and thus it can simply use boot_cpu_data. Reported-by: Fengguang Wu Tested-by: Fengguang Wu Fix-creation-mandated-by: Linus Torvalds Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Cc: Robert Richter Cc: x86@kernel.org Signed-off-by: Greg Kroah-Hartman --- arch/x86/oprofile/op_model_ppro.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index d90528ea5412..12c051d19e4b 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -212,8 +212,8 @@ static void arch_perfmon_setup_counters(void) eax.full = cpuid_eax(0xa); /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */ - if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 && - __this_cpu_read(cpu_info.x86_model) == 15) { + if (eax.split.version_id == 0 && boot_cpu_data.x86 == 6 && + boot_cpu_data.x86_model == 15) { eax.split.version_id = 2; eax.split.num_counters = 2; eax.split.bit_width = 40; From 9c405157269dc7929280d7a9f0f07dc91fc77f58 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sat, 27 Feb 2016 12:45:26 +0000 Subject: [PATCH 054/312] PKCS#7: fix unitialized boolean 'want' commit 06aae592425701851e02bb850cb9f4997f0ae163 upstream. The boolean want is not initialized and hence garbage. The default should be false (later it is only set to true on tne sinfo->authattrs check). Found with static analysis using CoverityScan Signed-off-by: Colin Ian King Signed-off-by: David Howells Cc: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- crypto/asymmetric_keys/pkcs7_parser.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c index 2516e97c58f1..5e5a8adac0ba 100644 --- a/crypto/asymmetric_keys/pkcs7_parser.c +++ b/crypto/asymmetric_keys/pkcs7_parser.c @@ -87,7 +87,7 @@ EXPORT_SYMBOL_GPL(pkcs7_free_message); static int pkcs7_check_authattrs(struct pkcs7_message *msg) { struct pkcs7_signed_info *sinfo; - bool want; + bool want = false; sinfo = msg->signed_infos; if (!sinfo) From c4e3d53bd9e7e7723f351dba998151eca4725af6 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 15 Nov 2017 17:13:14 +0100 Subject: [PATCH 055/312] Linux 4.4.98 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index fb1a40d64ba8..5d62e23347f9 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 4 -SUBLEVEL = 97 +SUBLEVEL = 98 EXTRAVERSION = NAME = Blurry Fish Butt From 19ef30ef23a4de3f0bd8979a5bb5216dd381bc75 Mon Sep 17 00:00:00 2001 From: Todd Kjos Date: Fri, 10 Nov 2017 15:30:27 -0800 Subject: [PATCH 056/312] FROMLIST: binder: fix proc->files use-after-free (from https://patchwork.kernel.org/patch/10058587/) proc->files cleanup is initiated by binder_vma_close. Therefore a reference on the binder_proc is not enough to prevent the files_struct from being released while the binder_proc still has a reference. This can lead to an attempt to dereference the stale pointer obtained from proc->files prior to proc->files cleanup. This has been seen once in task_get_unused_fd_flags() when __alloc_fd() is called with a stale "files". The fix is to always use get_files_struct() to obtain struct_files so that the refcount on the files_struct is used to prevent a premature free. proc->files is removed since we get it every time. Bug: 69164715 Change-Id: I6431027d3d569e76913935c21885201505627982 Signed-off-by: Todd Kjos --- drivers/android/binder.c | 63 +++++++++++++++++++--------------------- 1 file changed, 30 insertions(+), 33 deletions(-) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 1177a6617964..008e448536de 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -466,9 +466,8 @@ struct binder_ref { }; enum binder_deferred_state { - BINDER_DEFERRED_PUT_FILES = 0x01, - BINDER_DEFERRED_FLUSH = 0x02, - BINDER_DEFERRED_RELEASE = 0x04, + BINDER_DEFERRED_FLUSH = 0x01, + BINDER_DEFERRED_RELEASE = 0x02, }; /** @@ -505,8 +504,6 @@ struct binder_priority { * (invariant after initialized) * @tsk task_struct for group_leader of process * (invariant after initialized) - * @files files_struct for process - * (invariant after initialized) * @deferred_work_node: element for binder_deferred_list * (protected by binder_deferred_lock) * @deferred_work: bitmap of deferred work to perform @@ -553,7 +550,6 @@ struct binder_proc { struct list_head waiting_threads; int pid; struct task_struct *tsk; - struct files_struct *files; struct hlist_node deferred_work_node; int deferred_work; bool is_dead; @@ -949,22 +945,34 @@ static void binder_free_thread(struct binder_thread *thread); static void binder_free_proc(struct binder_proc *proc); static void binder_inc_node_tmpref_ilocked(struct binder_node *node); +struct files_struct *binder_get_files_struct(struct binder_proc *proc) +{ + return get_files_struct(proc->tsk); +} + static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) { - struct files_struct *files = proc->files; + struct files_struct *files; unsigned long rlim_cur; unsigned long irqs; + int ret; + files = binder_get_files_struct(proc); if (files == NULL) return -ESRCH; - if (!lock_task_sighand(proc->tsk, &irqs)) - return -EMFILE; + if (!lock_task_sighand(proc->tsk, &irqs)) { + ret = -EMFILE; + goto err; + } rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); unlock_task_sighand(proc->tsk, &irqs); - return __alloc_fd(files, 0, rlim_cur, flags); + ret = __alloc_fd(files, 0, rlim_cur, flags); +err: + put_files_struct(files); + return ret; } /* @@ -973,8 +981,12 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) static void task_fd_install( struct binder_proc *proc, unsigned int fd, struct file *file) { - if (proc->files) - __fd_install(proc->files, fd, file); + struct files_struct *files = binder_get_files_struct(proc); + + if (files) { + __fd_install(files, fd, file); + put_files_struct(files); + } } /* @@ -982,18 +994,20 @@ static void task_fd_install( */ static long task_close_fd(struct binder_proc *proc, unsigned int fd) { + struct files_struct *files = binder_get_files_struct(proc); int retval; - if (proc->files == NULL) + if (files == NULL) return -ESRCH; - retval = __close_fd(proc->files, fd); + retval = __close_fd(files, fd); /* can't restart close syscall because file table entry was cleared */ if (unlikely(retval == -ERESTARTSYS || retval == -ERESTARTNOINTR || retval == -ERESTARTNOHAND || retval == -ERESTART_RESTARTBLOCK)) retval = -EINTR; + put_files_struct(files); return retval; } @@ -4818,7 +4832,6 @@ static void binder_vma_close(struct vm_area_struct *vma) (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); binder_alloc_vma_close(&proc->alloc); - binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); } static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) @@ -4860,10 +4873,8 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_private_data = proc; ret = binder_alloc_mmap_handler(&proc->alloc, vma); - if (ret) - return ret; - proc->files = get_files_struct(current); - return 0; + + return ret; err_bad_arg: pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", @@ -5042,8 +5053,6 @@ static void binder_deferred_release(struct binder_proc *proc) struct rb_node *n; int threads, nodes, incoming_refs, outgoing_refs, active_transactions; - BUG_ON(proc->files); - mutex_lock(&binder_procs_lock); hlist_del(&proc->proc_node); mutex_unlock(&binder_procs_lock); @@ -5125,8 +5134,6 @@ static void binder_deferred_release(struct binder_proc *proc) static void binder_deferred_func(struct work_struct *work) { struct binder_proc *proc; - struct files_struct *files; - int defer; do { @@ -5143,21 +5150,11 @@ static void binder_deferred_func(struct work_struct *work) } mutex_unlock(&binder_deferred_lock); - files = NULL; - if (defer & BINDER_DEFERRED_PUT_FILES) { - files = proc->files; - if (files) - proc->files = NULL; - } - if (defer & BINDER_DEFERRED_FLUSH) binder_deferred_flush(proc); if (defer & BINDER_DEFERRED_RELEASE) binder_deferred_release(proc); /* frees proc */ - - if (files) - put_files_struct(files); } while (proc); } static DECLARE_WORK(binder_deferred_work, binder_deferred_func); From 6440f0ee8a1779f53526bccb9de00914daeb9094 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 5 Sep 2017 14:54:54 +0200 Subject: [PATCH 057/312] mac80211: accept key reinstall without changing anything commit fdf7cb4185b60c68e1a75e61691c4afdc15dea0e upstream. When a key is reinstalled we can reset the replay counters etc. which can lead to nonce reuse and/or replay detection being impossible, breaking security properties, as described in the "KRACK attacks". In particular, CVE-2017-13080 applies to GTK rekeying that happened in firmware while the host is in D3, with the second part of the attack being done after the host wakes up. In this case, the wpa_supplicant mitigation isn't sufficient since wpa_supplicant doesn't know the GTK material. In case this happens, simply silently accept the new key coming from userspace but don't take any action on it since it's the same key; this keeps the PN replay counters intact. Signed-off-by: Johannes Berg Cc: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- net/mac80211/key.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 44388d6a1d8e..88f3fad740bf 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -4,6 +4,7 @@ * Copyright 2006-2007 Jiri Benc * Copyright 2007-2008 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -617,9 +618,6 @@ int ieee80211_key_link(struct ieee80211_key *key, pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE; idx = key->conf.keyidx; - key->local = sdata->local; - key->sdata = sdata; - key->sta = sta; mutex_lock(&sdata->local->key_mtx); @@ -630,6 +628,21 @@ int ieee80211_key_link(struct ieee80211_key *key, else old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]); + /* + * Silently accept key re-installation without really installing the + * new version of the key to avoid nonce reuse or replay issues. + */ + if (old_key && key->conf.keylen == old_key->conf.keylen && + !memcmp(key->conf.key, old_key->conf.key, key->conf.keylen)) { + ieee80211_key_free_unused(key); + ret = 0; + goto out; + } + + key->local = sdata->local; + key->sdata = sdata; + key->sta = sta; + increment_tailroom_need_count(sdata); ieee80211_key_replace(sdata, sta, pairwise, old_key, key); @@ -645,6 +658,7 @@ int ieee80211_key_link(struct ieee80211_key *key, ret = 0; } + out: mutex_unlock(&sdata->local->key_mtx); return ret; From cdac2028c90112354febf9fd6d50ae70259cae0b Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Tue, 17 Oct 2017 20:32:07 +0200 Subject: [PATCH 058/312] mac80211: use constant time comparison with keys commit 2bdd713b92a9cade239d3c7d15205a09f556624d upstream. Otherwise we risk leaking information via timing side channel. Fixes: fdf7cb4185b6 ("mac80211: accept key reinstall without changing anything") Signed-off-by: Jason A. Donenfeld Signed-off-by: Johannes Berg Cc: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- net/mac80211/key.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 88f3fad740bf..61286227f307 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include "ieee80211_i.h" #include "driver-ops.h" @@ -633,7 +634,7 @@ int ieee80211_key_link(struct ieee80211_key *key, * new version of the key to avoid nonce reuse or replay issues. */ if (old_key && key->conf.keylen == old_key->conf.keylen && - !memcmp(key->conf.key, old_key->conf.key, key->conf.keylen)) { + !crypto_memneq(key->conf.key, old_key->conf.key, key->conf.keylen)) { ieee80211_key_free_unused(key); ret = 0; goto out; From c93df40f3c73a74226e59bcfb1edb57ec979d0a7 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 24 Oct 2017 21:12:13 +0200 Subject: [PATCH 059/312] mac80211: don't compare TKIP TX MIC key in reinstall prevention commit cfbb0d90a7abb289edc91833d0905931f8805f12 upstream. For the reinstall prevention, the code I had added compares the whole key. It turns out though that iwlwifi firmware doesn't provide the TKIP TX MIC key as it's not needed in client mode, and thus the comparison will always return false. For client mode, thus always zero out the TX MIC key part before doing the comparison in order to avoid accepting the reinstall of the key with identical encryption and RX MIC key, but not the same TX MIC key (since the supplicant provides the real one.) Fixes: fdf7cb4185b6 ("mac80211: accept key reinstall without changing anything") Signed-off-by: Johannes Berg Cc: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- net/mac80211/key.c | 36 ++++++++++++++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 61286227f307..4a72c0d1e56f 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -608,6 +608,39 @@ void ieee80211_key_free_unused(struct ieee80211_key *key) ieee80211_key_free_common(key); } +static bool ieee80211_key_identical(struct ieee80211_sub_if_data *sdata, + struct ieee80211_key *old, + struct ieee80211_key *new) +{ + u8 tkip_old[WLAN_KEY_LEN_TKIP], tkip_new[WLAN_KEY_LEN_TKIP]; + u8 *tk_old, *tk_new; + + if (!old || new->conf.keylen != old->conf.keylen) + return false; + + tk_old = old->conf.key; + tk_new = new->conf.key; + + /* + * In station mode, don't compare the TX MIC key, as it's never used + * and offloaded rekeying may not care to send it to the host. This + * is the case in iwlwifi, for example. + */ + if (sdata->vif.type == NL80211_IFTYPE_STATION && + new->conf.cipher == WLAN_CIPHER_SUITE_TKIP && + new->conf.keylen == WLAN_KEY_LEN_TKIP && + !(new->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) { + memcpy(tkip_old, tk_old, WLAN_KEY_LEN_TKIP); + memcpy(tkip_new, tk_new, WLAN_KEY_LEN_TKIP); + memset(tkip_old + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8); + memset(tkip_new + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8); + tk_old = tkip_old; + tk_new = tkip_new; + } + + return !crypto_memneq(tk_old, tk_new, new->conf.keylen); +} + int ieee80211_key_link(struct ieee80211_key *key, struct ieee80211_sub_if_data *sdata, struct sta_info *sta) @@ -633,8 +666,7 @@ int ieee80211_key_link(struct ieee80211_key *key, * Silently accept key re-installation without really installing the * new version of the key to avoid nonce reuse or replay issues. */ - if (old_key && key->conf.keylen == old_key->conf.keylen && - !crypto_memneq(key->conf.key, old_key->conf.key, key->conf.keylen)) { + if (ieee80211_key_identical(sdata, old_key, key)) { ieee80211_key_free_unused(key); ret = 0; goto out; From de46c1adbea694109036d3e7dee1fa6250b72660 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Fri, 29 Sep 2017 10:54:24 -0400 Subject: [PATCH 060/312] usb: usbtest: fix NULL pointer dereference commit 7c80f9e4a588f1925b07134bb2e3689335f6c6d8 upstream. If the usbtest driver encounters a device with an IN bulk endpoint but no OUT bulk endpoint, it will try to dereference a NULL pointer (out->desc.bEndpointAddress). The problem can be solved by adding a missing test. Signed-off-by: Alan Stern Reported-by: Andrey Konovalov Tested-by: Andrey Konovalov Signed-off-by: Felipe Balbi Cc: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- drivers/usb/misc/usbtest.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index 2e947dc94e32..bc92a498ec03 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -185,12 +185,13 @@ found: return tmp; } - if (in) { + if (in) dev->in_pipe = usb_rcvbulkpipe(udev, in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); + if (out) dev->out_pipe = usb_sndbulkpipe(udev, out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); - } + if (iso_in) { dev->iso_in = &iso_in->desc; dev->in_iso_pipe = usb_rcvisocpipe(udev, From b7c625ce6d279bf3e138c25c0cd3f595923825f3 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Sat, 7 Oct 2017 11:07:47 -0700 Subject: [PATCH 061/312] Input: ims-psu - check if CDC union descriptor is sane commit ea04efee7635c9120d015dcdeeeb6988130cb67a upstream. Before trying to use CDC union descriptor, try to validate whether that it is sane by checking that intf->altsetting->extra is big enough and that descriptor bLength is not too big and not too small. Reported-by: Andrey Konovalov Signed-off-by: Dmitry Torokhov Cc: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- drivers/input/misc/ims-pcu.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c index f4e8fbec6a94..b5304e264881 100644 --- a/drivers/input/misc/ims-pcu.c +++ b/drivers/input/misc/ims-pcu.c @@ -1635,13 +1635,25 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf) return NULL; } - while (buflen > 0) { + while (buflen >= sizeof(*union_desc)) { union_desc = (struct usb_cdc_union_desc *)buf; + if (union_desc->bLength > buflen) { + dev_err(&intf->dev, "Too large descriptor\n"); + return NULL; + } + if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE && union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) { dev_dbg(&intf->dev, "Found union header\n"); - return union_desc; + + if (union_desc->bLength >= sizeof(*union_desc)) + return union_desc; + + dev_err(&intf->dev, + "Union descriptor to short (%d vs %zd\n)", + union_desc->bLength, sizeof(*union_desc)); + return NULL; } buflen -= union_desc->bLength; From 1e98fd54c356bb2bcf827a74949c9372c455409f Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Tue, 12 Sep 2017 12:41:20 +0200 Subject: [PATCH 062/312] ALSA: seq: Cancel pending autoload work at unbinding device commit fc27fe7e8deef2f37cba3f2be2d52b6ca5eb9d57 upstream. ALSA sequencer core has a mechanism to load the enumerated devices automatically, and it's performed in an off-load work. This seems causing some race when a sequencer is removed while the pending autoload work is running. As syzkaller spotted, it may lead to some use-after-free: BUG: KASAN: use-after-free in snd_rawmidi_dev_seq_free+0x69/0x70 sound/core/rawmidi.c:1617 Write of size 8 at addr ffff88006c611d90 by task kworker/2:1/567 CPU: 2 PID: 567 Comm: kworker/2:1 Not tainted 4.13.0+ #29 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Workqueue: events autoload_drivers Call Trace: __dump_stack lib/dump_stack.c:16 [inline] dump_stack+0x192/0x22c lib/dump_stack.c:52 print_address_description+0x78/0x280 mm/kasan/report.c:252 kasan_report_error mm/kasan/report.c:351 [inline] kasan_report+0x230/0x340 mm/kasan/report.c:409 __asan_report_store8_noabort+0x1c/0x20 mm/kasan/report.c:435 snd_rawmidi_dev_seq_free+0x69/0x70 sound/core/rawmidi.c:1617 snd_seq_dev_release+0x4f/0x70 sound/core/seq_device.c:192 device_release+0x13f/0x210 drivers/base/core.c:814 kobject_cleanup lib/kobject.c:648 [inline] kobject_release lib/kobject.c:677 [inline] kref_put include/linux/kref.h:70 [inline] kobject_put+0x145/0x240 lib/kobject.c:694 put_device+0x25/0x30 drivers/base/core.c:1799 klist_devices_put+0x36/0x40 drivers/base/bus.c:827 klist_next+0x264/0x4a0 lib/klist.c:403 next_device drivers/base/bus.c:270 [inline] bus_for_each_dev+0x17e/0x210 drivers/base/bus.c:312 autoload_drivers+0x3b/0x50 sound/core/seq_device.c:117 process_one_work+0x9fb/0x1570 kernel/workqueue.c:2097 worker_thread+0x1e4/0x1350 kernel/workqueue.c:2231 kthread+0x324/0x3f0 kernel/kthread.c:231 ret_from_fork+0x25/0x30 arch/x86/entry/entry_64.S:425 The fix is simply to assure canceling the autoload work at removing the device. Reported-by: Andrey Konovalov Tested-by: Andrey Konovalov Signed-off-by: Takashi Iwai Cc: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- sound/core/seq/seq_device.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c index c4acf17e9f5e..e40a2cba5002 100644 --- a/sound/core/seq/seq_device.c +++ b/sound/core/seq/seq_device.c @@ -148,8 +148,10 @@ void snd_seq_device_load_drivers(void) flush_work(&autoload_work); } EXPORT_SYMBOL(snd_seq_device_load_drivers); +#define cancel_autoload_drivers() cancel_work_sync(&autoload_work) #else #define queue_autoload_drivers() /* NOP */ +#define cancel_autoload_drivers() /* NOP */ #endif /* @@ -159,6 +161,7 @@ static int snd_seq_device_dev_free(struct snd_device *device) { struct snd_seq_device *dev = device->device_data; + cancel_autoload_drivers(); put_device(&dev->dev); return 0; } From 735818a8b45c90a6210454b3bd413b678a60d018 Mon Sep 17 00:00:00 2001 From: Craig Gallek Date: Mon, 30 Oct 2017 18:50:11 -0400 Subject: [PATCH 063/312] tun/tap: sanitize TUNSETSNDBUF input [ Upstream commit 93161922c658c714715686cd0cf69b090cb9bf1d ] Syzkaller found several variants of the lockup below by setting negative values with the TUNSETSNDBUF ioctl. This patch adds a sanity check to both the tun and tap versions of this ioctl. watchdog: BUG: soft lockup - CPU#0 stuck for 22s! [repro:2389] Modules linked in: irq event stamp: 329692056 hardirqs last enabled at (329692055): [] _raw_spin_unlock_irqrestore+0x31/0x75 hardirqs last disabled at (329692056): [] apic_timer_interrupt+0x98/0xb0 softirqs last enabled at (35659740): [] __do_softirq+0x328/0x48c softirqs last disabled at (35659731): [] irq_exit+0xbc/0xd0 CPU: 0 PID: 2389 Comm: repro Not tainted 4.14.0-rc7 #23 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 task: ffff880009452140 task.stack: ffff880006a20000 RIP: 0010:_raw_spin_lock_irqsave+0x11/0x80 RSP: 0018:ffff880006a27c50 EFLAGS: 00000282 ORIG_RAX: ffffffffffffff10 RAX: ffff880009ac68d0 RBX: ffff880006a27ce0 RCX: 0000000000000000 RDX: 0000000000000001 RSI: ffff880006a27ce0 RDI: ffff880009ac6900 RBP: ffff880006a27c60 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000001 R11: 000000000063ff00 R12: ffff880009ac6900 R13: ffff880006a27cf8 R14: 0000000000000001 R15: ffff880006a27cf8 FS: 00007f4be4838700(0000) GS:ffff88000cc00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000020101000 CR3: 0000000009616000 CR4: 00000000000006f0 Call Trace: prepare_to_wait+0x26/0xc0 sock_alloc_send_pskb+0x14e/0x270 ? remove_wait_queue+0x60/0x60 tun_get_user+0x2cc/0x19d0 ? __tun_get+0x60/0x1b0 tun_chr_write_iter+0x57/0x86 __vfs_write+0x156/0x1e0 vfs_write+0xf7/0x230 SyS_write+0x57/0xd0 entry_SYSCALL_64_fastpath+0x1f/0xbe RIP: 0033:0x7f4be4356df9 RSP: 002b:00007ffc18101c08 EFLAGS: 00000293 ORIG_RAX: 0000000000000001 RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f4be4356df9 RDX: 0000000000000046 RSI: 0000000020101000 RDI: 0000000000000005 RBP: 00007ffc18101c40 R08: 0000000000000001 R09: 0000000000000001 R10: 0000000000000001 R11: 0000000000000293 R12: 0000559c75f64780 R13: 00007ffc18101d30 R14: 0000000000000000 R15: 0000000000000000 Fixes: 33dccbb050bb ("tun: Limit amount of queued packets per device") Fixes: 20d29d7a916a ("net: macvtap driver") Signed-off-by: Craig Gallek Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/macvtap.c | 2 ++ drivers/net/tun.c | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 79de9608ac48..ed96fdefd8e5 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -1117,6 +1117,8 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, case TUNSETSNDBUF: if (get_user(s, sp)) return -EFAULT; + if (s <= 0) + return -EINVAL; q->sk.sk_sndbuf = s; return 0; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index cd191f82d816..0d9b7778f7a5 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -2065,6 +2065,10 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, ret = -EFAULT; break; } + if (sndbuf <= 0) { + ret = -EINVAL; + break; + } tun->sndbuf = sndbuf; tun_set_sndbuf(tun); From 71c4a0fc35768b59b0fbd88c3eda719ed924237c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 30 Oct 2017 23:08:20 -0700 Subject: [PATCH 064/312] tcp: fix tcp_mtu_probe() vs highest_sack [ Upstream commit 2b7cda9c35d3b940eb9ce74b30bbd5eb30db493d ] Based on SNMP values provided by Roman, Yuchung made the observation that some crashes in tcp_sacktag_walk() might be caused by MTU probing. Looking at tcp_mtu_probe(), I found that when a new skb was placed in front of the write queue, we were not updating tcp highest sack. If one skb is freed because all its content was copied to the new skb (for MTU probing), then tp->highest_sack could point to a now freed skb. Bad things would then happen, including infinite loops. This patch renames tcp_highest_sack_combine() and uses it from tcp_mtu_probe() to fix the bug. Note that I also removed one test against tp->sacked_out, since we want to replace tp->highest_sack regardless of whatever condition, since keeping a stale pointer to freed skb is a recipe for disaster. Fixes: a47e5a988a57 ("[TCP]: Convert highest_sack to sk_buff to allow direct access") Signed-off-by: Eric Dumazet Reported-by: Alexei Starovoitov Reported-by: Roman Gushchin Reported-by: Oleksandr Natalenko Acked-by: Alexei Starovoitov Acked-by: Neal Cardwell Acked-by: Yuchung Cheng Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/net/tcp.h | 6 +++--- net/ipv4/tcp_output.c | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index e9d7a8ef9a6d..cecb0e0eff06 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1612,12 +1612,12 @@ static inline void tcp_highest_sack_reset(struct sock *sk) tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk); } -/* Called when old skb is about to be deleted (to be combined with new skb) */ -static inline void tcp_highest_sack_combine(struct sock *sk, +/* Called when old skb is about to be deleted and replaced by new skb */ +static inline void tcp_highest_sack_replace(struct sock *sk, struct sk_buff *old, struct sk_buff *new) { - if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack)) + if (old == tcp_highest_sack(sk)) tcp_sk(sk)->highest_sack = new; } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 850d1b5bfd81..64c7ce847584 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1951,6 +1951,7 @@ static int tcp_mtu_probe(struct sock *sk) nskb->ip_summed = skb->ip_summed; tcp_insert_write_queue_before(nskb, skb, sk); + tcp_highest_sack_replace(sk, skb, nskb); len = 0; tcp_for_write_queue_from_safe(skb, next, sk) { @@ -2464,7 +2465,7 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); - tcp_highest_sack_combine(sk, next_skb, skb); + tcp_highest_sack_replace(sk, next_skb, skb); tcp_unlink_write_queue(next_skb, sk); From 196f4755cc821f09f3ef2ff8cf54bbf34b6ab76c Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 13 Oct 2017 19:22:35 +0200 Subject: [PATCH 065/312] l2tp: check ps->sock before running pppol2tp_session_ioctl() [ Upstream commit 5903f594935a3841137c86b9d5b75143a5b7121c ] When pppol2tp_session_ioctl() is called by pppol2tp_tunnel_ioctl(), the session may be unconnected. That is, it was created by pppol2tp_session_create() and hasn't been connected with pppol2tp_connect(). In this case, ps->sock is NULL, so we need to check for this case in order to avoid dereferencing a NULL pointer. Fixes: 309795f4bec2 ("l2tp: Add netlink control API for L2TP") Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/l2tp/l2tp_ppp.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 8ab9c5d74416..67f2e72723b2 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -1015,6 +1015,9 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, session->name, cmd, arg); sk = ps->sock; + if (!sk) + return -EBADR; + sock_hold(sk); switch (cmd) { From 4b27fe34a226dd9087cb2e93161ffec03952c05a Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 13 Oct 2017 11:58:53 -0700 Subject: [PATCH 066/312] tun: call dev_get_valid_name() before register_netdevice() [ Upstream commit 0ad646c81b2182f7fa67ec0c8c825e0ee165696d ] register_netdevice() could fail early when we have an invalid dev name, in which case ->ndo_uninit() is not called. For tun device, this is a problem because a timer etc. are already initialized and it expects ->ndo_uninit() to clean them up. We could move these initializations into a ->ndo_init() so that register_netdevice() knows better, however this is still complicated due to the logic in tun_detach(). Therefore, I choose to just call dev_get_valid_name() before register_netdevice(), which is quicker and much easier to audit. And for this specific case, it is already enough. Fixes: 96442e42429e ("tuntap: choose the txq based on rxq") Reported-by: Dmitry Alexeev Cc: Jason Wang Cc: "Michael S. Tsirkin" Signed-off-by: Cong Wang Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/tun.c | 3 +++ include/linux/netdevice.h | 3 +++ net/core/dev.c | 6 +++--- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 0d9b7778f7a5..b45eea135b68 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1684,6 +1684,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) if (!dev) return -ENOMEM; + err = dev_get_valid_name(net, dev, name); + if (err) + goto err_free_dev; dev_net_set(dev, net); dev->rtnl_link_ops = &tun_link_ops; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 4035bbe40971..fc54049e8286 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3469,6 +3469,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, unsigned char name_assign_type, void (*setup)(struct net_device *), unsigned int txqs, unsigned int rxqs); +int dev_get_valid_name(struct net *net, struct net_device *dev, + const char *name); + #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) diff --git a/net/core/dev.c b/net/core/dev.c index dac52fa60f25..630704d8d6a2 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1111,9 +1111,8 @@ static int dev_alloc_name_ns(struct net *net, return ret; } -static int dev_get_valid_name(struct net *net, - struct net_device *dev, - const char *name) +int dev_get_valid_name(struct net *net, struct net_device *dev, + const char *name) { BUG_ON(!net); @@ -1129,6 +1128,7 @@ static int dev_get_valid_name(struct net *net, return 0; } +EXPORT_SYMBOL(dev_get_valid_name); /** * dev_change_name - change name of a device From ef3a12f1dc4ae0c1d46476fe536a443b0582ae5f Mon Sep 17 00:00:00 2001 From: Xin Long Date: Wed, 18 Oct 2017 21:37:49 +0800 Subject: [PATCH 067/312] sctp: add the missing sock_owned_by_user check in sctp_icmp_redirect [ Upstream commit 1cc276cec9ec574d41cf47dfc0f51406b6f26ab4 ] Now sctp processes icmp redirect packet in sctp_icmp_redirect where it calls sctp_transport_dst_check in which tp->dst can be released. The problem is before calling sctp_transport_dst_check, it doesn't check sock_owned_by_user, which means tp->dst could be freed while a process is accessing it with owning the socket. An use-after-free issue could be triggered by this. This patch is to fix it by checking sock_owned_by_user before calling sctp_transport_dst_check in sctp_icmp_redirect, so that it would not release tp->dst if users still hold sock lock. Besides, the same issue fixed in commit 45caeaa5ac0b ("dccp/tcp: fix routing redirect race") on sctp also needs this check. Fixes: 55be7a9c6074 ("ipv4: Add redirect support to all protocol icmp error handlers") Reported-by: Eric Dumazet Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Acked-by: Neil Horman Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/sctp/input.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sctp/input.c b/net/sctp/input.c index 2d7859c03fd2..71c2ef84c5b0 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -420,7 +420,7 @@ void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t, { struct dst_entry *dst; - if (!t) + if (sock_owned_by_user(sk) || !t) return; dst = sctp_transport_dst_check(t); if (dst) From d552c8c5007afe1a614f8bc92507d8789aa70307 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 18 Oct 2017 16:14:52 -0700 Subject: [PATCH 068/312] packet: avoid panic in packet_getsockopt() [ Upstream commit 509c7a1ecc8601f94ffba8a00889fefb239c00c6 ] syzkaller got crashes in packet_getsockopt() processing PACKET_ROLLOVER_STATS command while another thread was managing to change po->rollover Using RCU will fix this bug. We might later add proper RCU annotations for sparse sake. In v2: I replaced kfree(rollover) in fanout_add() to kfree_rcu() variant, as spotted by John. Fixes: a9b6391814d5 ("packet: rollover statistics") Signed-off-by: Eric Dumazet Cc: Willem de Bruijn Cc: John Sperbeck Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/packet/af_packet.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 241f69039a72..1584f89c456a 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1724,7 +1724,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) out: if (err && rollover) { - kfree(rollover); + kfree_rcu(rollover, rcu); po->rollover = NULL; } mutex_unlock(&fanout_mutex); @@ -1751,8 +1751,10 @@ static struct packet_fanout *fanout_release(struct sock *sk) else f = NULL; - if (po->rollover) + if (po->rollover) { kfree_rcu(po->rollover, rcu); + po->rollover = NULL; + } } mutex_unlock(&fanout_mutex); @@ -3769,6 +3771,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, void *data = &val; union tpacket_stats_u st; struct tpacket_rollover_stats rstats; + struct packet_rollover *rollover; if (level != SOL_PACKET) return -ENOPROTOOPT; @@ -3847,13 +3850,18 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, 0); break; case PACKET_ROLLOVER_STATS: - if (!po->rollover) + rcu_read_lock(); + rollover = rcu_dereference(po->rollover); + if (rollover) { + rstats.tp_all = atomic_long_read(&rollover->num); + rstats.tp_huge = atomic_long_read(&rollover->num_huge); + rstats.tp_failed = atomic_long_read(&rollover->num_failed); + data = &rstats; + lv = sizeof(rstats); + } + rcu_read_unlock(); + if (!rollover) return -EINVAL; - rstats.tp_all = atomic_long_read(&po->rollover->num); - rstats.tp_huge = atomic_long_read(&po->rollover->num_huge); - rstats.tp_failed = atomic_long_read(&po->rollover->num_failed); - data = &rstats; - lv = sizeof(rstats); break; case PACKET_TX_HAS_OFF: val = po->tp_tx_has_off; From 84237c54b2399076c7a22b7818b0a414099d02d0 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 21 Oct 2017 12:26:23 -0700 Subject: [PATCH 069/312] ipv6: flowlabel: do not leave opt->tot_len with garbage [ Upstream commit 864e2a1f8aac05effac6063ce316b480facb46ff ] When syzkaller team brought us a C repro for the crash [1] that had been reported many times in the past, I finally could find the root cause. If FlowLabel info is merged by fl6_merge_options(), we leave part of the opt_space storage provided by udp/raw/l2tp with random value in opt_space.tot_len, unless a control message was provided at sendmsg() time. Then ip6_setup_cork() would use this random value to perform a kzalloc() call. Undefined behavior and crashes. Fix is to properly set tot_len in fl6_merge_options() At the same time, we can also avoid consuming memory and cpu cycles to clear it, if every option is copied via a kmemdup(). This is the change in ip6_setup_cork(). [1] kasan: CONFIG_KASAN_INLINE enabled kasan: GPF could be caused by NULL-ptr deref or user memory access general protection fault: 0000 [#1] SMP KASAN Dumping ftrace buffer: (ftrace buffer empty) Modules linked in: CPU: 0 PID: 6613 Comm: syz-executor0 Not tainted 4.14.0-rc4+ #127 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 task: ffff8801cb64a100 task.stack: ffff8801cc350000 RIP: 0010:ip6_setup_cork+0x274/0x15c0 net/ipv6/ip6_output.c:1168 RSP: 0018:ffff8801cc357550 EFLAGS: 00010203 RAX: dffffc0000000000 RBX: ffff8801cc357748 RCX: 0000000000000010 RDX: 0000000000000002 RSI: ffffffff842bd1d9 RDI: 0000000000000014 RBP: ffff8801cc357620 R08: ffff8801cb17f380 R09: ffff8801cc357b10 R10: ffff8801cb64a100 R11: 0000000000000000 R12: ffff8801cc357ab0 R13: ffff8801cc357b10 R14: 0000000000000000 R15: ffff8801c3bbf0c0 FS: 00007f9c5c459700(0000) GS:ffff8801db200000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000020324000 CR3: 00000001d1cf2000 CR4: 00000000001406f0 DR0: 0000000020001010 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000600 Call Trace: ip6_make_skb+0x282/0x530 net/ipv6/ip6_output.c:1729 udpv6_sendmsg+0x2769/0x3380 net/ipv6/udp.c:1340 inet_sendmsg+0x11f/0x5e0 net/ipv4/af_inet.c:762 sock_sendmsg_nosec net/socket.c:633 [inline] sock_sendmsg+0xca/0x110 net/socket.c:643 SYSC_sendto+0x358/0x5a0 net/socket.c:1750 SyS_sendto+0x40/0x50 net/socket.c:1718 entry_SYSCALL_64_fastpath+0x1f/0xbe RIP: 0033:0x4520a9 RSP: 002b:00007f9c5c458c08 EFLAGS: 00000216 ORIG_RAX: 000000000000002c RAX: ffffffffffffffda RBX: 0000000000718000 RCX: 00000000004520a9 RDX: 0000000000000001 RSI: 0000000020fd1000 RDI: 0000000000000016 RBP: 0000000000000086 R08: 0000000020e0afe4 R09: 000000000000001c R10: 0000000000000000 R11: 0000000000000216 R12: 00000000004bb1ee R13: 00000000ffffffff R14: 0000000000000016 R15: 0000000000000029 Code: e0 07 83 c0 03 38 d0 7c 08 84 d2 0f 85 ea 0f 00 00 48 8d 79 04 48 b8 00 00 00 00 00 fc ff df 45 8b 74 24 04 48 89 fa 48 c1 ea 03 <0f> b6 14 02 48 89 f8 83 e0 07 83 c0 03 38 d0 7c 08 84 d2 0f 85 RIP: ip6_setup_cork+0x274/0x15c0 net/ipv6/ip6_output.c:1168 RSP: ffff8801cc357550 Signed-off-by: Eric Dumazet Reported-by: Dmitry Vyukov Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/ip6_flowlabel.c | 1 + net/ipv6/ip6_output.c | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index dc2db4f7b182..f3a0a9c0f61e 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -315,6 +315,7 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space, } opt_space->dst1opt = fopt->dst1opt; opt_space->opt_flen = fopt->opt_flen; + opt_space->tot_len = fopt->tot_len; return opt_space; } EXPORT_SYMBOL_GPL(fl6_merge_options); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index e22339fad10b..71624cf26832 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1201,11 +1201,11 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, if (WARN_ON(v6_cork->opt)) return -EINVAL; - v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation); + v6_cork->opt = kzalloc(sizeof(*opt), sk->sk_allocation); if (unlikely(!v6_cork->opt)) return -ENOBUFS; - v6_cork->opt->tot_len = opt->tot_len; + v6_cork->opt->tot_len = sizeof(*opt); v6_cork->opt->opt_flen = opt->opt_flen; v6_cork->opt->opt_nflen = opt->opt_nflen; From 93b12f202a4eae6856e2f67e002c8e4363600d4a Mon Sep 17 00:00:00 2001 From: Andrei Vagin Date: Wed, 25 Oct 2017 10:16:42 -0700 Subject: [PATCH 070/312] net/unix: don't show information about sockets from other namespaces [ Upstream commit 0f5da659d8f1810f44de14acf2c80cd6499623a0 ] socket_diag shows information only about sockets from a namespace where a diag socket lives. But if we request information about one unix socket, the kernel don't check that its netns is matched with a diag socket namespace, so any user can get information about any unix socket in a system. This looks like a bug. v2: add a Fixes tag Fixes: 51d7cccf0723 ("net: make sock diag per-namespace") Signed-off-by: Andrei Vagin Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/unix/diag.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/unix/diag.c b/net/unix/diag.c index 4d9679701a6d..384c84e83462 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c @@ -257,6 +257,8 @@ static int unix_diag_get_exact(struct sk_buff *in_skb, err = -ENOENT; if (sk == NULL) goto out_nosk; + if (!net_eq(sock_net(sk), net)) + goto out; err = sock_diag_check_cookie(sk, req->udiag_cookie); if (err) From 5c8a0850bae234f3f58d166afd75c7e02c95c5c6 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 26 Oct 2017 19:23:27 +0800 Subject: [PATCH 071/312] ip6_gre: only increase err_count for some certain type icmpv6 in ip6gre_err [ Upstream commit f8d20b46ce55cf40afb30dcef6d9288f7ef46d9b ] The similar fix in patch 'ipip: only increase err_count for some certain type icmp in ipip_err' is needed for ip6gre_err. In Jianlin's case, udp netperf broke even when receiving a TooBig icmpv6 packet. Fixes: c12b395a4664 ("gre: Support GRE over IPv6") Reported-by: Jianlin Shi Signed-off-by: Xin Long Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/ip6_gre.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index eab117033b8a..c878cbf65485 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -409,13 +409,16 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, case ICMPV6_DEST_UNREACH: net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", t->parms.name); - break; + if (code != ICMPV6_PORT_UNREACH) + break; + return; case ICMPV6_TIME_EXCEED: if (code == ICMPV6_EXC_HOPLIMIT) { net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", t->parms.name); + break; } - break; + return; case ICMPV6_PARAMPROB: teli = 0; if (code == ICMPV6_HDR_FIELD) @@ -431,13 +434,13 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", t->parms.name); } - break; + return; case ICMPV6_PKT_TOOBIG: mtu = be32_to_cpu(info) - offset; if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; t->dev->mtu = mtu; - break; + return; } if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO)) From bcb3b90cf3c8369ce0ca48b6e4f67eb57abb1438 Mon Sep 17 00:00:00 2001 From: Julien Gomes Date: Wed, 25 Oct 2017 11:50:50 -0700 Subject: [PATCH 072/312] tun: allow positive return values on dev_get_valid_name() call [ Upstream commit 5c25f65fd1e42685f7ccd80e0621829c105785d9 ] If the name argument of dev_get_valid_name() contains "%d", it will try to assign it a unit number in __dev__alloc_name() and return either the unit number (>= 0) or an error code (< 0). Considering positive values as error values prevent tun device creations relying this mechanism, therefor we should only consider negative values as errors here. Signed-off-by: Julien Gomes Acked-by: Cong Wang Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/tun.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index b45eea135b68..50bfded6d7ef 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1685,7 +1685,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) if (!dev) return -ENOMEM; err = dev_get_valid_name(net, dev, name); - if (err) + if (err < 0) goto err_free_dev; dev_net_set(dev, net); From 4b5bb7723da1da2198a5c80e47ecfceb88995ae3 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 28 Oct 2017 02:13:29 +0800 Subject: [PATCH 073/312] sctp: reset owner sk for data chunks on out queues when migrating a sock [ Upstream commit d04adf1b355181e737b6b1e23d801b07f0b7c4c0 ] Now when migrating sock to another one in sctp_sock_migrate(), it only resets owner sk for the data in receive queues, not the chunks on out queues. It would cause that data chunks length on the sock is not consistent with sk sk_wmem_alloc. When closing the sock or freeing these chunks, the old sk would never be freed, and the new sock may crash due to the overflow sk_wmem_alloc. syzbot found this issue with this series: r0 = socket$inet_sctp() sendto$inet(r0) listen(r0) accept4(r0) close(r0) Although listen() should have returned error when one TCP-style socket is in connecting (I may fix this one in another patch), it could also be reproduced by peeling off an assoc. This issue is there since very beginning. This patch is to reset owner sk for the chunks on out queues so that sk sk_wmem_alloc has correct value after accept one sock or peeloff an assoc to one sock. Note that when resetting owner sk for chunks on outqueue, it has to sctp_clear_owner_w/skb_orphan chunks before changing assoc->base.sk first and then sctp_set_owner_w them after changing assoc->base.sk, due to that sctp_wfree and it's callees are using assoc->base.sk. Reported-by: Dmitry Vyukov Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/sctp/socket.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 3ebf3b652d60..73eec73ff733 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -168,6 +168,36 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk) sk_mem_charge(sk, chunk->skb->truesize); } +static void sctp_clear_owner_w(struct sctp_chunk *chunk) +{ + skb_orphan(chunk->skb); +} + +static void sctp_for_each_tx_datachunk(struct sctp_association *asoc, + void (*cb)(struct sctp_chunk *)) + +{ + struct sctp_outq *q = &asoc->outqueue; + struct sctp_transport *t; + struct sctp_chunk *chunk; + + list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) + list_for_each_entry(chunk, &t->transmitted, transmitted_list) + cb(chunk); + + list_for_each_entry(chunk, &q->retransmit, list) + cb(chunk); + + list_for_each_entry(chunk, &q->sacked, list) + cb(chunk); + + list_for_each_entry(chunk, &q->abandoned, list) + cb(chunk); + + list_for_each_entry(chunk, &q->out_chunk_list, list) + cb(chunk); +} + /* Verify that this is a valid address. */ static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, int len) @@ -7362,7 +7392,9 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, * paths won't try to lock it and then oldsk. */ lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); + sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w); sctp_assoc_migrate(assoc, newsk); + sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w); /* If the association on the newsk is already closed before accept() * is called, set RCV_SHUTDOWN flag. From 9bae2ffb87d69aa9b71248724376956eb8d2e656 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 6 Oct 2017 17:05:49 +0200 Subject: [PATCH 074/312] ppp: fix race in ppp device destruction [ Upstream commit 6151b8b37b119e8e3a8401b080d532520c95faf4 ] ppp_release() tries to ensure that netdevices are unregistered before decrementing the unit refcount and running ppp_destroy_interface(). This is all fine as long as the the device is unregistered by ppp_release(): the unregister_netdevice() call, followed by rtnl_unlock(), guarantee that the unregistration process completes before rtnl_unlock() returns. However, the device may be unregistered by other means (like ppp_nl_dellink()). If this happens right before ppp_release() calling rtnl_lock(), then ppp_release() has to wait for the concurrent unregistration code to release the lock. But rtnl_unlock() releases the lock before completing the device unregistration process. This allows ppp_release() to proceed and eventually call ppp_destroy_interface() before the unregistration process completes. Calling free_netdev() on this partially unregistered device will BUG(): ------------[ cut here ]------------ kernel BUG at net/core/dev.c:8141! invalid opcode: 0000 [#1] SMP CPU: 1 PID: 1557 Comm: pppd Not tainted 4.14.0-rc2+ #4 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1.fc26 04/01/2014 Call Trace: ppp_destroy_interface+0xd8/0xe0 [ppp_generic] ppp_disconnect_channel+0xda/0x110 [ppp_generic] ppp_unregister_channel+0x5e/0x110 [ppp_generic] pppox_unbind_sock+0x23/0x30 [pppox] pppoe_connect+0x130/0x440 [pppoe] SYSC_connect+0x98/0x110 ? do_fcntl+0x2c0/0x5d0 SyS_connect+0xe/0x10 entry_SYSCALL_64_fastpath+0x1a/0xa5 RIP: free_netdev+0x107/0x110 RSP: ffffc28a40573d88 ---[ end trace ed294ff0cc40eeff ]--- We could set the ->needs_free_netdev flag on PPP devices and move the ppp_destroy_interface() logic in the ->priv_destructor() callback. But that'd be quite intrusive as we'd first need to unlink from the other channels and units that depend on the device (the ones that used the PPPIOCCONNECT and PPPIOCATTACH ioctls). Instead, we can just let the netdevice hold a reference on its ppp_file. This reference is dropped in ->priv_destructor(), at the very end of the unregistration process, so that neither ppp_release() nor ppp_disconnect_channel() can call ppp_destroy_interface() in the interim. Reported-by: Beniamino Galvani Fixes: 8cb775bc0a34 ("ppp: fix device unregistration upon netns deletion") Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ppp/ppp_generic.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index e5bb870b5461..dc454138d600 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1110,7 +1110,17 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) static struct lock_class_key ppp_tx_busylock; static int ppp_dev_init(struct net_device *dev) { + struct ppp *ppp; + dev->qdisc_tx_busylock = &ppp_tx_busylock; + + ppp = netdev_priv(dev); + /* Let the netdevice take a reference on the ppp file. This ensures + * that ppp_destroy_interface() won't run before the device gets + * unregistered. + */ + atomic_inc(&ppp->file.refcnt); + return 0; } @@ -1133,6 +1143,15 @@ static void ppp_dev_uninit(struct net_device *dev) wake_up_interruptible(&ppp->file.rwait); } +static void ppp_dev_priv_destructor(struct net_device *dev) +{ + struct ppp *ppp; + + ppp = netdev_priv(dev); + if (atomic_dec_and_test(&ppp->file.refcnt)) + ppp_destroy_interface(ppp); +} + static const struct net_device_ops ppp_netdev_ops = { .ndo_init = ppp_dev_init, .ndo_uninit = ppp_dev_uninit, @@ -1150,6 +1169,7 @@ static void ppp_setup(struct net_device *dev) dev->tx_queue_len = 3; dev->type = ARPHRD_PPP; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; + dev->destructor = ppp_dev_priv_destructor; netif_keep_dst(dev); } From b9b0c99a426ad92fa42f0702bb5bf5d0f996cbd8 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 26 Oct 2017 19:19:56 +0800 Subject: [PATCH 075/312] ipip: only increase err_count for some certain type icmp in ipip_err [ Upstream commit f3594f0a7ea36661d7fd942facd7f31a64245f1a ] t->err_count is used to count the link failure on tunnel and an err will be reported to user socket in tx path if t->err_count is not 0. udp socket could even return EHOSTUNREACH to users. Since commit fd58156e456d ("IPIP: Use ip-tunneling code.") removed the 'switch check' for icmp type in ipip_err(), err_count would be increased by the icmp packet with ICMP_EXC_FRAGTIME code. an link failure would be reported out due to this. In Jianlin's case, when receiving ICMP_EXC_FRAGTIME a icmp packet, udp netperf failed with the err: send_data: data send error: No route to host (errno 113) We expect this error reported from tunnel to socket when receiving some certain type icmp, but not ICMP_EXC_FRAGTIME, ICMP_SR_FAILED or ICMP_PARAMETERPROB ones. This patch is to bring 'switch check' for icmp type back to ipip_err so that it only reports link failure for the right type icmp, just as in ipgre_err() and ipip6_err(). Fixes: fd58156e456d ("IPIP: Use ip-tunneling code.") Reported-by: Jianlin Shi Signed-off-by: Xin Long Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/ipip.c | 58 +++++++++++++++++++++++++++++++++++-------------- 1 file changed, 42 insertions(+), 16 deletions(-) diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index a09fb0dec725..486b283a6cd1 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -129,42 +129,68 @@ static struct rtnl_link_ops ipip_link_ops __read_mostly; static int ipip_err(struct sk_buff *skb, u32 info) { -/* All the routers (except for Linux) return only - 8 bytes of packet payload. It means, that precise relaying of - ICMP in the real Internet is absolutely infeasible. - */ + /* All the routers (except for Linux) return only + 8 bytes of packet payload. It means, that precise relaying of + ICMP in the real Internet is absolutely infeasible. + */ struct net *net = dev_net(skb->dev); struct ip_tunnel_net *itn = net_generic(net, ipip_net_id); const struct iphdr *iph = (const struct iphdr *)skb->data; - struct ip_tunnel *t; - int err; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; + struct ip_tunnel *t; + int err = 0; + + switch (type) { + case ICMP_DEST_UNREACH: + switch (code) { + case ICMP_SR_FAILED: + /* Impossible event. */ + goto out; + default: + /* All others are translated to HOST_UNREACH. + * rfc2003 contains "deep thoughts" about NET_UNREACH, + * I believe they are just ether pollution. --ANK + */ + break; + } + break; + + case ICMP_TIME_EXCEEDED: + if (code != ICMP_EXC_TTL) + goto out; + break; + + case ICMP_REDIRECT: + break; + + default: + goto out; + } - err = -ENOENT; t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, iph->daddr, iph->saddr, 0); - if (!t) + if (!t) { + err = -ENOENT; goto out; + } if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { - ipv4_update_pmtu(skb, dev_net(skb->dev), info, - t->parms.link, 0, IPPROTO_IPIP, 0); - err = 0; + ipv4_update_pmtu(skb, net, info, t->parms.link, 0, + iph->protocol, 0); goto out; } if (type == ICMP_REDIRECT) { - ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0, - IPPROTO_IPIP, 0); - err = 0; + ipv4_redirect(skb, net, t->parms.link, 0, iph->protocol, 0); goto out; } - if (t->parms.iph.daddr == 0) + if (t->parms.iph.daddr == 0) { + err = -ENOENT; goto out; + } - err = 0; if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) goto out; From 13eddc67565a7e582c3a1f87627842e0da80291c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 20 Oct 2017 09:04:13 -0700 Subject: [PATCH 076/312] tcp/dccp: fix ireq->opt races [ Upstream commit c92e8c02fe664155ac4234516e32544bec0f113d ] syzkaller found another bug in DCCP/TCP stacks [1] For the reasons explained in commit ce1050089c96 ("tcp/dccp: fix ireq->pktopts race"), we need to make sure we do not access ireq->opt unless we own the request sock. Note the opt field is renamed to ireq_opt to ease grep games. [1] BUG: KASAN: use-after-free in ip_queue_xmit+0x1687/0x18e0 net/ipv4/ip_output.c:474 Read of size 1 at addr ffff8801c951039c by task syz-executor5/3295 CPU: 1 PID: 3295 Comm: syz-executor5 Not tainted 4.14.0-rc4+ #80 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:16 [inline] dump_stack+0x194/0x257 lib/dump_stack.c:52 print_address_description+0x73/0x250 mm/kasan/report.c:252 kasan_report_error mm/kasan/report.c:351 [inline] kasan_report+0x25b/0x340 mm/kasan/report.c:409 __asan_report_load1_noabort+0x14/0x20 mm/kasan/report.c:427 ip_queue_xmit+0x1687/0x18e0 net/ipv4/ip_output.c:474 tcp_transmit_skb+0x1ab7/0x3840 net/ipv4/tcp_output.c:1135 tcp_send_ack.part.37+0x3bb/0x650 net/ipv4/tcp_output.c:3587 tcp_send_ack+0x49/0x60 net/ipv4/tcp_output.c:3557 __tcp_ack_snd_check+0x2c6/0x4b0 net/ipv4/tcp_input.c:5072 tcp_ack_snd_check net/ipv4/tcp_input.c:5085 [inline] tcp_rcv_state_process+0x2eff/0x4850 net/ipv4/tcp_input.c:6071 tcp_child_process+0x342/0x990 net/ipv4/tcp_minisocks.c:816 tcp_v4_rcv+0x1827/0x2f80 net/ipv4/tcp_ipv4.c:1682 ip_local_deliver_finish+0x2e2/0xba0 net/ipv4/ip_input.c:216 NF_HOOK include/linux/netfilter.h:249 [inline] ip_local_deliver+0x1ce/0x6e0 net/ipv4/ip_input.c:257 dst_input include/net/dst.h:464 [inline] ip_rcv_finish+0x887/0x19a0 net/ipv4/ip_input.c:397 NF_HOOK include/linux/netfilter.h:249 [inline] ip_rcv+0xc3f/0x1820 net/ipv4/ip_input.c:493 __netif_receive_skb_core+0x1a3e/0x34b0 net/core/dev.c:4476 __netif_receive_skb+0x2c/0x1b0 net/core/dev.c:4514 netif_receive_skb_internal+0x10b/0x670 net/core/dev.c:4587 netif_receive_skb+0xae/0x390 net/core/dev.c:4611 tun_rx_batched.isra.50+0x5ed/0x860 drivers/net/tun.c:1372 tun_get_user+0x249c/0x36d0 drivers/net/tun.c:1766 tun_chr_write_iter+0xbf/0x160 drivers/net/tun.c:1792 call_write_iter include/linux/fs.h:1770 [inline] new_sync_write fs/read_write.c:468 [inline] __vfs_write+0x68a/0x970 fs/read_write.c:481 vfs_write+0x18f/0x510 fs/read_write.c:543 SYSC_write fs/read_write.c:588 [inline] SyS_write+0xef/0x220 fs/read_write.c:580 entry_SYSCALL_64_fastpath+0x1f/0xbe RIP: 0033:0x40c341 RSP: 002b:00007f469523ec10 EFLAGS: 00000293 ORIG_RAX: 0000000000000001 RAX: ffffffffffffffda RBX: 0000000000718000 RCX: 000000000040c341 RDX: 0000000000000037 RSI: 0000000020004000 RDI: 0000000000000015 RBP: 0000000000000086 R08: 0000000000000000 R09: 0000000000000000 R10: 00000000000f4240 R11: 0000000000000293 R12: 00000000004b7fd1 R13: 00000000ffffffff R14: 0000000020000000 R15: 0000000000025000 Allocated by task 3295: save_stack_trace+0x16/0x20 arch/x86/kernel/stacktrace.c:59 save_stack+0x43/0xd0 mm/kasan/kasan.c:447 set_track mm/kasan/kasan.c:459 [inline] kasan_kmalloc+0xad/0xe0 mm/kasan/kasan.c:551 __do_kmalloc mm/slab.c:3725 [inline] __kmalloc+0x162/0x760 mm/slab.c:3734 kmalloc include/linux/slab.h:498 [inline] tcp_v4_save_options include/net/tcp.h:1962 [inline] tcp_v4_init_req+0x2d3/0x3e0 net/ipv4/tcp_ipv4.c:1271 tcp_conn_request+0xf6d/0x3410 net/ipv4/tcp_input.c:6283 tcp_v4_conn_request+0x157/0x210 net/ipv4/tcp_ipv4.c:1313 tcp_rcv_state_process+0x8ea/0x4850 net/ipv4/tcp_input.c:5857 tcp_v4_do_rcv+0x55c/0x7d0 net/ipv4/tcp_ipv4.c:1482 tcp_v4_rcv+0x2d10/0x2f80 net/ipv4/tcp_ipv4.c:1711 ip_local_deliver_finish+0x2e2/0xba0 net/ipv4/ip_input.c:216 NF_HOOK include/linux/netfilter.h:249 [inline] ip_local_deliver+0x1ce/0x6e0 net/ipv4/ip_input.c:257 dst_input include/net/dst.h:464 [inline] ip_rcv_finish+0x887/0x19a0 net/ipv4/ip_input.c:397 NF_HOOK include/linux/netfilter.h:249 [inline] ip_rcv+0xc3f/0x1820 net/ipv4/ip_input.c:493 __netif_receive_skb_core+0x1a3e/0x34b0 net/core/dev.c:4476 __netif_receive_skb+0x2c/0x1b0 net/core/dev.c:4514 netif_receive_skb_internal+0x10b/0x670 net/core/dev.c:4587 netif_receive_skb+0xae/0x390 net/core/dev.c:4611 tun_rx_batched.isra.50+0x5ed/0x860 drivers/net/tun.c:1372 tun_get_user+0x249c/0x36d0 drivers/net/tun.c:1766 tun_chr_write_iter+0xbf/0x160 drivers/net/tun.c:1792 call_write_iter include/linux/fs.h:1770 [inline] new_sync_write fs/read_write.c:468 [inline] __vfs_write+0x68a/0x970 fs/read_write.c:481 vfs_write+0x18f/0x510 fs/read_write.c:543 SYSC_write fs/read_write.c:588 [inline] SyS_write+0xef/0x220 fs/read_write.c:580 entry_SYSCALL_64_fastpath+0x1f/0xbe Freed by task 3306: save_stack_trace+0x16/0x20 arch/x86/kernel/stacktrace.c:59 save_stack+0x43/0xd0 mm/kasan/kasan.c:447 set_track mm/kasan/kasan.c:459 [inline] kasan_slab_free+0x71/0xc0 mm/kasan/kasan.c:524 __cache_free mm/slab.c:3503 [inline] kfree+0xca/0x250 mm/slab.c:3820 inet_sock_destruct+0x59d/0x950 net/ipv4/af_inet.c:157 __sk_destruct+0xfd/0x910 net/core/sock.c:1560 sk_destruct+0x47/0x80 net/core/sock.c:1595 __sk_free+0x57/0x230 net/core/sock.c:1603 sk_free+0x2a/0x40 net/core/sock.c:1614 sock_put include/net/sock.h:1652 [inline] inet_csk_complete_hashdance+0xd5/0xf0 net/ipv4/inet_connection_sock.c:959 tcp_check_req+0xf4d/0x1620 net/ipv4/tcp_minisocks.c:765 tcp_v4_rcv+0x17f6/0x2f80 net/ipv4/tcp_ipv4.c:1675 ip_local_deliver_finish+0x2e2/0xba0 net/ipv4/ip_input.c:216 NF_HOOK include/linux/netfilter.h:249 [inline] ip_local_deliver+0x1ce/0x6e0 net/ipv4/ip_input.c:257 dst_input include/net/dst.h:464 [inline] ip_rcv_finish+0x887/0x19a0 net/ipv4/ip_input.c:397 NF_HOOK include/linux/netfilter.h:249 [inline] ip_rcv+0xc3f/0x1820 net/ipv4/ip_input.c:493 __netif_receive_skb_core+0x1a3e/0x34b0 net/core/dev.c:4476 __netif_receive_skb+0x2c/0x1b0 net/core/dev.c:4514 netif_receive_skb_internal+0x10b/0x670 net/core/dev.c:4587 netif_receive_skb+0xae/0x390 net/core/dev.c:4611 tun_rx_batched.isra.50+0x5ed/0x860 drivers/net/tun.c:1372 tun_get_user+0x249c/0x36d0 drivers/net/tun.c:1766 tun_chr_write_iter+0xbf/0x160 drivers/net/tun.c:1792 call_write_iter include/linux/fs.h:1770 [inline] new_sync_write fs/read_write.c:468 [inline] __vfs_write+0x68a/0x970 fs/read_write.c:481 vfs_write+0x18f/0x510 fs/read_write.c:543 SYSC_write fs/read_write.c:588 [inline] SyS_write+0xef/0x220 fs/read_write.c:580 entry_SYSCALL_64_fastpath+0x1f/0xbe Fixes: e994b2f0fb92 ("tcp: do not lock listener to process SYN packets") Fixes: 079096f103fa ("tcp/dccp: install syn_recv requests into ehash table") Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/net/inet_sock.h | 2 +- net/dccp/ipv4.c | 13 ++++++++----- net/ipv4/cipso_ipv4.c | 24 +++++++----------------- net/ipv4/inet_connection_sock.c | 8 +++----- net/ipv4/syncookies.c | 2 +- net/ipv4/tcp_input.c | 2 +- net/ipv4/tcp_ipv4.c | 21 ++++++++++++--------- 7 files changed, 33 insertions(+), 39 deletions(-) diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 625bdf95d673..e2113db93d5f 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -95,7 +95,7 @@ struct inet_request_sock { kmemcheck_bitfield_end(flags); u32 ir_mark; union { - struct ip_options_rcu *opt; + struct ip_options_rcu __rcu *ireq_opt; struct sk_buff *pktopts; }; }; diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index e217f17997a4..888ff5a24ec4 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -414,8 +414,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk, sk_daddr_set(newsk, ireq->ir_rmt_addr); sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); newinet->inet_saddr = ireq->ir_loc_addr; - newinet->inet_opt = ireq->opt; - ireq->opt = NULL; + RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt)); newinet->mc_index = inet_iif(skb); newinet->mc_ttl = ip_hdr(skb)->ttl; newinet->inet_id = jiffies; @@ -430,7 +429,10 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk, if (__inet_inherit_port(sk, newsk) < 0) goto put_and_exit; *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); - + if (*own_req) + ireq->ireq_opt = NULL; + else + newinet->inet_opt = NULL; return newsk; exit_overflow: @@ -441,6 +443,7 @@ exit: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); return NULL; put_and_exit: + newinet->inet_opt = NULL; inet_csk_prepare_forced_close(newsk); dccp_done(newsk); goto exit; @@ -492,7 +495,7 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req ireq->ir_rmt_addr); err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, ireq->ir_rmt_addr, - ireq->opt); + rcu_dereference(ireq->ireq_opt)); err = net_xmit_eval(err); } @@ -546,7 +549,7 @@ out: static void dccp_v4_reqsk_destructor(struct request_sock *req) { dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg); - kfree(inet_rsk(req)->opt); + kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1)); } void dccp_syn_ack_timeout(const struct request_sock *req) diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 6cc3e1d602fb..5f3b81941a6f 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -2012,7 +2012,7 @@ int cipso_v4_req_setattr(struct request_sock *req, buf = NULL; req_inet = inet_rsk(req); - opt = xchg(&req_inet->opt, opt); + opt = xchg((__force struct ip_options_rcu **)&req_inet->ireq_opt, opt); if (opt) kfree_rcu(opt, rcu); @@ -2034,11 +2034,13 @@ req_setattr_failure: * values on failure. * */ -static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr) +static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr) { + struct ip_options_rcu *opt = rcu_dereference_protected(*opt_ptr, 1); int hdr_delta = 0; - struct ip_options_rcu *opt = *opt_ptr; + if (!opt || opt->opt.cipso == 0) + return 0; if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) { u8 cipso_len; u8 cipso_off; @@ -2100,14 +2102,10 @@ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr) */ void cipso_v4_sock_delattr(struct sock *sk) { - int hdr_delta; - struct ip_options_rcu *opt; struct inet_sock *sk_inet; + int hdr_delta; sk_inet = inet_sk(sk); - opt = rcu_dereference_protected(sk_inet->inet_opt, 1); - if (!opt || opt->opt.cipso == 0) - return; hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt); if (sk_inet->is_icsk && hdr_delta > 0) { @@ -2127,15 +2125,7 @@ void cipso_v4_sock_delattr(struct sock *sk) */ void cipso_v4_req_delattr(struct request_sock *req) { - struct ip_options_rcu *opt; - struct inet_request_sock *req_inet; - - req_inet = inet_rsk(req); - opt = req_inet->opt; - if (!opt || opt->opt.cipso == 0) - return; - - cipso_v4_delopt(&req_inet->opt); + cipso_v4_delopt(&inet_rsk(req)->ireq_opt); } /** diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 45fa2aaa3d3f..689145a521e3 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -412,9 +412,10 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk, { const struct inet_request_sock *ireq = inet_rsk(req); struct net *net = read_pnet(&ireq->ireq_net); - struct ip_options_rcu *opt = ireq->opt; + struct ip_options_rcu *opt; struct rtable *rt; + opt = rcu_dereference(ireq->ireq_opt); flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, sk->sk_protocol, inet_sk_flowi_flags(sk), @@ -448,10 +449,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk, struct flowi4 *fl4; struct rtable *rt; + opt = rcu_dereference(ireq->ireq_opt); fl4 = &newinet->cork.fl.u.ip4; - rcu_read_lock(); - opt = rcu_dereference(newinet->inet_opt); flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, sk->sk_protocol, inet_sk_flowi_flags(sk), @@ -464,13 +464,11 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk, goto no_route; if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) goto route_err; - rcu_read_unlock(); return &rt->dst; route_err: ip_rt_put(rt); no_route: - rcu_read_unlock(); IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); return NULL; } diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 731b91409625..c22a74374a9c 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -357,7 +357,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) /* We throwed the options of the initial SYN away, so we hope * the ACK carries the same options again (see RFC1122 4.2.3.8) */ - ireq->opt = tcp_v4_save_options(skb); + RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(skb)); if (security_inet_conn_request(sk, skb, req)) { reqsk_free(req); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index c4bbf704ff9c..9e8d70160d20 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -6105,7 +6105,7 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, struct inet_request_sock *ireq = inet_rsk(req); kmemcheck_annotate_bitfield(ireq, flags); - ireq->opt = NULL; + ireq->ireq_opt = NULL; atomic64_set(&ireq->ir_cookie, 0); ireq->ireq_state = TCP_NEW_SYN_RECV; write_pnet(&ireq->ireq_net, sock_net(sk_listener)); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 198fc2314c82..85293a285c50 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -856,7 +856,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, ireq->ir_rmt_addr, - ireq->opt); + rcu_dereference(ireq->ireq_opt)); err = net_xmit_eval(err); } @@ -868,7 +868,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, */ static void tcp_v4_reqsk_destructor(struct request_sock *req) { - kfree(inet_rsk(req)->opt); + kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1)); } @@ -1197,7 +1197,7 @@ static void tcp_v4_init_req(struct request_sock *req, sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); ireq->no_srccheck = inet_sk(sk_listener)->transparent; - ireq->opt = tcp_v4_save_options(skb); + RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(skb)); } static struct dst_entry *tcp_v4_route_req(const struct sock *sk, @@ -1292,10 +1292,9 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, ireq = inet_rsk(req); sk_daddr_set(newsk, ireq->ir_rmt_addr); sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); - newinet->inet_saddr = ireq->ir_loc_addr; - inet_opt = ireq->opt; - rcu_assign_pointer(newinet->inet_opt, inet_opt); - ireq->opt = NULL; + newinet->inet_saddr = ireq->ir_loc_addr; + inet_opt = rcu_dereference(ireq->ireq_opt); + RCU_INIT_POINTER(newinet->inet_opt, inet_opt); newinet->mc_index = inet_iif(skb); newinet->mc_ttl = ip_hdr(skb)->ttl; newinet->rcv_tos = ip_hdr(skb)->tos; @@ -1343,9 +1342,12 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, if (__inet_inherit_port(sk, newsk) < 0) goto put_and_exit; *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); - if (*own_req) + if (likely(*own_req)) { tcp_move_syn(newtp, req); - + ireq->ireq_opt = NULL; + } else { + newinet->inet_opt = NULL; + } return newsk; exit_overflow: @@ -1356,6 +1358,7 @@ exit: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); return NULL; put_and_exit: + newinet->inet_opt = NULL; inet_csk_prepare_forced_close(newsk); tcp_done(newsk); goto exit; From 6f8048cd59d7b3d0b2abb38fbdedcaa5bf69ed28 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 22 Oct 2017 12:33:57 -0700 Subject: [PATCH 077/312] tcp/dccp: fix lockdep splat in inet_csk_route_req() [ Upstream commit a6ca7abe53633d08eea1c6756cb49c9b2d4c90bf ] This patch fixes the following lockdep splat in inet_csk_route_req() lockdep_rcu_suspicious inet_csk_route_req tcp_v4_send_synack tcp_rtx_synack inet_rtx_syn_ack tcp_fastopen_synack_time tcp_retransmit_timer tcp_write_timer_handler tcp_write_timer call_timer_fn Thread running inet_csk_route_req() owns a reference on the request socket, so we have the guarantee ireq->ireq_opt wont be changed or freed. lockdep can enforce this invariant for us. Fixes: c92e8c02fe66 ("tcp/dccp: fix ireq->opt races") Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/inet_connection_sock.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 689145a521e3..74bc77ae551b 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -415,7 +415,8 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk, struct ip_options_rcu *opt; struct rtable *rt; - opt = rcu_dereference(ireq->ireq_opt); + opt = rcu_dereference_protected(ireq->ireq_opt, + atomic_read(&req->rsk_refcnt) > 0); flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, sk->sk_protocol, inet_sk_flowi_flags(sk), From 11fa3353205e732274bdf7d4c41a510b11397a21 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 24 Oct 2017 08:20:31 -0700 Subject: [PATCH 078/312] tcp/dccp: fix other lockdep splats accessing ireq_opt MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 06f877d613be3621604c2520ec0351d9fbdca15f ] In my first attempt to fix the lockdep splat, I forgot we could enter inet_csk_route_req() with a freshly allocated request socket, for which refcount has not yet been elevated, due to complex SLAB_TYPESAFE_BY_RCU rules. We either are in rcu_read_lock() section _or_ we own a refcount on the request. Correct RCU verb to use here is rcu_dereference_check(), although it is not possible to prove we actually own a reference on a shared refcount :/ In v2, I added ireq_opt_deref() helper and use in three places, to fix other possible splats. [ 49.844590] lockdep_rcu_suspicious+0xea/0xf3 [ 49.846487] inet_csk_route_req+0x53/0x14d [ 49.848334] tcp_v4_route_req+0xe/0x10 [ 49.850174] tcp_conn_request+0x31c/0x6a0 [ 49.851992] ? __lock_acquire+0x614/0x822 [ 49.854015] tcp_v4_conn_request+0x5a/0x79 [ 49.855957] ? tcp_v4_conn_request+0x5a/0x79 [ 49.858052] tcp_rcv_state_process+0x98/0xdcc [ 49.859990] ? sk_filter_trim_cap+0x2f6/0x307 [ 49.862085] tcp_v4_do_rcv+0xfc/0x145 [ 49.864055] ? tcp_v4_do_rcv+0xfc/0x145 [ 49.866173] tcp_v4_rcv+0x5ab/0xaf9 [ 49.868029] ip_local_deliver_finish+0x1af/0x2e7 [ 49.870064] ip_local_deliver+0x1b2/0x1c5 [ 49.871775] ? inet_del_offload+0x45/0x45 [ 49.873916] ip_rcv_finish+0x3f7/0x471 [ 49.875476] ip_rcv+0x3f1/0x42f [ 49.876991] ? ip_local_deliver_finish+0x2e7/0x2e7 [ 49.878791] __netif_receive_skb_core+0x6d3/0x950 [ 49.880701] ? process_backlog+0x7e/0x216 [ 49.882589] __netif_receive_skb+0x1d/0x5e [ 49.884122] process_backlog+0x10c/0x216 [ 49.885812] net_rx_action+0x147/0x3df Fixes: a6ca7abe53633 ("tcp/dccp: fix lockdep splat in inet_csk_route_req()") Fixes: c92e8c02fe66 ("tcp/dccp: fix ireq->opt races") Signed-off-by: Eric Dumazet Reported-by: kernel test robot Reported-by: Maciej Żenczykowski Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/net/inet_sock.h | 6 ++++++ net/dccp/ipv4.c | 2 +- net/ipv4/inet_connection_sock.c | 4 ++-- net/ipv4/tcp_ipv4.c | 2 +- 4 files changed, 10 insertions(+), 4 deletions(-) diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index e2113db93d5f..95aa999f31d7 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -113,6 +113,12 @@ static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb) return sk->sk_mark; } +static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq) +{ + return rcu_dereference_check(ireq->ireq_opt, + atomic_read(&ireq->req.rsk_refcnt) > 0); +} + struct inet_cork { unsigned int flags; __be32 addr; diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 888ff5a24ec4..6eb2bbf9873b 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -495,7 +495,7 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req ireq->ir_rmt_addr); err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, ireq->ir_rmt_addr, - rcu_dereference(ireq->ireq_opt)); + ireq_opt_deref(ireq)); err = net_xmit_eval(err); } diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 74bc77ae551b..01acb94c4963 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -415,8 +415,8 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk, struct ip_options_rcu *opt; struct rtable *rt; - opt = rcu_dereference_protected(ireq->ireq_opt, - atomic_read(&req->rsk_refcnt) > 0); + opt = ireq_opt_deref(ireq); + flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, sk->sk_protocol, inet_sk_flowi_flags(sk), diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 85293a285c50..a5d790c13ef5 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -856,7 +856,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, ireq->ir_rmt_addr, - rcu_dereference(ireq->ireq_opt)); + ireq_opt_deref(ireq)); err = net_xmit_eval(err); } From c8f13916c4c9e03dd5c04a44edd4702b97b1e96d Mon Sep 17 00:00:00 2001 From: Bilal Amarni Date: Thu, 8 Jun 2017 14:47:26 +0100 Subject: [PATCH 079/312] security/keys: add CONFIG_KEYS_COMPAT to Kconfig commit 47b2c3fff4932e6fc17ce13d51a43c6969714e20 upstream. CONFIG_KEYS_COMPAT is defined in arch-specific Kconfigs and is missing for several 64-bit architectures : mips, parisc, tile. At the moment and for those architectures, calling in 32-bit userspace the keyctl syscall would return an ENOSYS error. This patch moves the CONFIG_KEYS_COMPAT option to security/keys/Kconfig, to make sure the compatibility wrapper is registered by default for any 64-bit architecture as long as it is configured with CONFIG_COMPAT. [DH: Modified to remove arm64 compat enablement also as requested by Eric Biggers] Signed-off-by: Bilal Amarni Signed-off-by: David Howells Reviewed-by: Arnd Bergmann cc: Eric Biggers Signed-off-by: James Morris Cc: James Cowgill Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/Kconfig | 5 ----- arch/s390/Kconfig | 3 --- arch/sparc/Kconfig | 3 --- arch/x86/Kconfig | 4 ---- security/keys/Kconfig | 4 ++++ 5 files changed, 4 insertions(+), 15 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index db49e0d796b1..dfb1ee8c3e06 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -1082,11 +1082,6 @@ source "arch/powerpc/Kconfig.debug" source "security/Kconfig" -config KEYS_COMPAT - bool - depends on COMPAT && KEYS - default y - source "crypto/Kconfig" config PPC_LIB_RHEAP diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 3a55f493c7da..5ad7b721b769 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -346,9 +346,6 @@ config COMPAT config SYSVIPC_COMPAT def_bool y if COMPAT && SYSVIPC -config KEYS_COMPAT - def_bool y if COMPAT && KEYS - config SMP def_bool y prompt "Symmetric multi-processing support" diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index eb9487470141..94f4ac21761b 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -549,9 +549,6 @@ config SYSVIPC_COMPAT depends on COMPAT && SYSVIPC default y -config KEYS_COMPAT - def_bool y if COMPAT && KEYS - endmenu source "net/Kconfig" diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 436639a31624..7e40905f6d4c 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2641,10 +2641,6 @@ config COMPAT_FOR_U64_ALIGNMENT config SYSVIPC_COMPAT def_bool y depends on SYSVIPC - -config KEYS_COMPAT - def_bool y - depends on KEYS endif endmenu diff --git a/security/keys/Kconfig b/security/keys/Kconfig index 72483b8f1be5..1edb37eea81d 100644 --- a/security/keys/Kconfig +++ b/security/keys/Kconfig @@ -20,6 +20,10 @@ config KEYS If you are unsure as to whether this is required, answer N. +config KEYS_COMPAT + def_bool y + depends on COMPAT && KEYS + config PERSISTENT_KEYRINGS bool "Enable register of persistent per-UID keyrings" depends on KEYS From d27383faf14469c5ec600a3d92aa4f1d81282472 Mon Sep 17 00:00:00 2001 From: Richard Alpe Date: Mon, 1 Feb 2016 08:19:56 +0100 Subject: [PATCH 080/312] tipc: fix link attribute propagation bug commit d01332f1acacc0cb43a61f4244dd2b846d4cd585 upstream. Changing certain link attributes (link tolerance and link priority) from the TIPC management tool is supposed to automatically take effect at both endpoints of the affected link. Currently the media address is not instantiated for the link and is used uninstantiated when crafting protocol messages designated for the peer endpoint. This means that changing a link property currently results in the property being changed on the local machine but the protocol message designated for the peer gets lost. Resulting in property discrepancy between the endpoints. In this patch we resolve this by using the media address from the link entry and using the bearer transmit function to send it. Hence, we can now eliminate the redundant function tipc_link_prot_xmit() and the redundant field tipc_link::media_addr. Fixes: 2af5ae372a4b (tipc: clean up unused code and structures) Reviewed-by: Jon Maloy Reported-by: Jason Hu Signed-off-by: Richard Alpe Signed-off-by: David S. Miller [backported to 4.4 by Tommi Rantala] Signed-off-by: Tommi Rantala Signed-off-by: Greg Kroah-Hartman --- net/tipc/link.c | 28 ++++++---------------------- net/tipc/link.h | 1 - 2 files changed, 6 insertions(+), 23 deletions(-) diff --git a/net/tipc/link.c b/net/tipc/link.c index 72268eac4ec7..736fffb28ab6 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1084,25 +1084,6 @@ drop: return rc; } -/* - * Send protocol message to the other endpoint. - */ -void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg, - u32 gap, u32 tolerance, u32 priority) -{ - struct sk_buff *skb = NULL; - struct sk_buff_head xmitq; - - __skb_queue_head_init(&xmitq); - tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap, - tolerance, priority, &xmitq); - skb = __skb_dequeue(&xmitq); - if (!skb) - return; - tipc_bearer_xmit_skb(l->net, l->bearer_id, skb, l->media_addr); - l->rcv_unacked = 0; -} - static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, u16 rcvgap, int tolerance, int priority, struct sk_buff_head *xmitq) @@ -1636,9 +1617,12 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info) char *name; struct tipc_link *link; struct tipc_node *node; + struct sk_buff_head xmitq; struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; struct net *net = sock_net(skb->sk); + __skb_queue_head_init(&xmitq); + if (!info->attrs[TIPC_NLA_LINK]) return -EINVAL; @@ -1683,14 +1667,14 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info) tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); link->tolerance = tol; - tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0); + tipc_link_build_proto_msg(link, STATE_MSG, 0, 0, tol, 0, &xmitq); } if (props[TIPC_NLA_PROP_PRIO]) { u32 prio; prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); link->priority = prio; - tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio); + tipc_link_build_proto_msg(link, STATE_MSG, 0, 0, 0, prio, &xmitq); } if (props[TIPC_NLA_PROP_WIN]) { u32 win; @@ -1702,7 +1686,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info) out: tipc_node_unlock(node); - + tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr); return res; } diff --git a/net/tipc/link.h b/net/tipc/link.h index 66d859b66c84..2a0d58671e88 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h @@ -153,7 +153,6 @@ struct tipc_stats { struct tipc_link { u32 addr; char name[TIPC_MAX_LINK_NAME]; - struct tipc_media_addr *media_addr; struct net *net; /* Management and link supervision data */ From 7ecc076a5d51776c7057ef57277badfd373f454d Mon Sep 17 00:00:00 2001 From: Chi-hsien Lin Date: Thu, 18 May 2017 17:22:19 +0800 Subject: [PATCH 081/312] brcmfmac: remove setting IBSS mode when stopping AP commit 9029679f66d976f8c720eb03c4898274803c9923 upstream. Upon stopping an AP interface the driver disable INFRA mode effectively setting the interface in IBSS mode. However, this may affect other interfaces running in INFRA mode. For instance, if user creates and stops hostap daemon on virtual interface, then association cannot work on primary interface because default BSS has been set to IBSS mode in firmware side. The IBSS mode should be set when cfg80211 changes the interface. Reviewed-by: Wright Feng Signed-off-by: Chi-hsien Lin [kvalo@codeaurora.org: rephased commit log based on discussion] Signed-off-by: Wright Feng Signed-off-by: Kalle Valo Cc: Philipp Rosenberger Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c index 5fecae0ba52e..83e5aa6a9f28 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c @@ -4295,9 +4295,6 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0); if (err < 0) brcmf_err("setting AP mode failed %d\n", err); - err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 0); - if (err < 0) - brcmf_err("setting INFRA mode failed %d\n", err); if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) brcmf_fil_iovar_int_set(ifp, "mbss", 0); err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY, From 4063c209334911c61fa3807dbe4e7a9c81361b8d Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 5 Jan 2017 12:39:57 +0100 Subject: [PATCH 082/312] target/iscsi: Fix iSCSI task reassignment handling commit 59b6986dbfcdab96a971f9663221849de79a7556 upstream. Allocate a task management request structure for all task management requests, including task reassignment. This change avoids that the se_tmr->response assignment dereferences an uninitialized se_tmr pointer. Reported-by: Moshe David Signed-off-by: Bart Van Assche Reviewed-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Cc: Moshe David Signed-off-by: Nicholas Bellinger Signed-off-by: Greg Kroah-Hartman --- drivers/target/iscsi/iscsi_target.c | 19 +++++++------------ include/target/target_core_base.h | 1 + 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 1ff1c83e2df5..fd493412b172 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1759,7 +1759,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, struct iscsi_tm *hdr; int out_of_order_cmdsn = 0, ret; bool sess_ref = false; - u8 function; + u8 function, tcm_function = TMR_UNKNOWN; hdr = (struct iscsi_tm *) buf; hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; @@ -1805,10 +1805,6 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, * LIO-Target $FABRIC_MOD */ if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { - - u8 tcm_function; - int ret; - transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops, conn->sess->se_sess, 0, DMA_NONE, TCM_SIMPLE_TAG, cmd->sense_buffer + 2); @@ -1844,15 +1840,14 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, return iscsit_add_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); } - - ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, - tcm_function, GFP_KERNEL); - if (ret < 0) - return iscsit_add_reject_cmd(cmd, + } + ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, tcm_function, + GFP_KERNEL); + if (ret < 0) + return iscsit_add_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); - cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req; - } + cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req; cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC; cmd->i_state = ISTATE_SEND_TASKMGTRSP; diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 1adf8739980c..8555321306fb 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -199,6 +199,7 @@ enum tcm_tmreq_table { TMR_LUN_RESET = 5, TMR_TARGET_WARM_RESET = 6, TMR_TARGET_COLD_RESET = 7, + TMR_UNKNOWN = 0xff, }; /* fabric independent task management response values */ From 01000c56caccf36ddd253bb15036a1cbb93ad27f Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sun, 6 Aug 2017 16:10:03 -0700 Subject: [PATCH 083/312] target: Fix node_acl demo-mode + uncached dynamic shutdown regression commit 6f48655facfd7f7ccfe6d252ac0fe319ab02e4dd upstream. This patch fixes a generate_node_acls = 1 + cache_dynamic_acls = 0 regression, that was introduced by commit 01d4d673558985d9a118e1e05026633c3e2ade9b Author: Nicholas Bellinger Date: Wed Dec 7 12:55:54 2016 -0800 which originally had the proper list_del_init() usage, but was dropped during list review as it was thought unnecessary by HCH. However, list_del_init() usage is required during the special generate_node_acls = 1 + cache_dynamic_acls = 0 case when transport_free_session() does a list_del(&se_nacl->acl_list), followed by target_complete_nacl() doing the same thing. This was manifesting as a general protection fault as reported by Justin: kernel: general protection fault: 0000 [#1] SMP kernel: Modules linked in: kernel: CPU: 0 PID: 11047 Comm: iscsi_ttx Not tainted 4.13.0-rc2.x86_64.1+ #20 kernel: Hardware name: Intel Corporation S5500BC/S5500BC, BIOS S5500.86B.01.00.0064.050520141428 05/05/2014 kernel: task: ffff88026939e800 task.stack: ffffc90007884000 kernel: RIP: 0010:target_put_nacl+0x49/0xb0 kernel: RSP: 0018:ffffc90007887d70 EFLAGS: 00010246 kernel: RAX: dead000000000200 RBX: ffff8802556ca000 RCX: 0000000000000000 kernel: RDX: dead000000000100 RSI: 0000000000000246 RDI: ffff8802556ce028 kernel: RBP: ffffc90007887d88 R08: 0000000000000001 R09: 0000000000000000 kernel: R10: ffffc90007887df8 R11: ffffea0009986900 R12: ffff8802556ce020 kernel: R13: ffff8802556ce028 R14: ffff8802556ce028 R15: ffffffff88d85540 kernel: FS: 0000000000000000(0000) GS:ffff88027fc00000(0000) knlGS:0000000000000000 kernel: CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 kernel: CR2: 00007fffe36f5f94 CR3: 0000000009209000 CR4: 00000000003406f0 kernel: DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 kernel: DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 kernel: Call Trace: kernel: transport_free_session+0x67/0x140 kernel: transport_deregister_session+0x7a/0xc0 kernel: iscsit_close_session+0x92/0x210 kernel: iscsit_close_connection+0x5f9/0x840 kernel: iscsit_take_action_for_connection_exit+0xfe/0x110 kernel: iscsi_target_tx_thread+0x140/0x1e0 kernel: ? wait_woken+0x90/0x90 kernel: kthread+0x124/0x160 kernel: ? iscsit_thread_get_cpumask+0x90/0x90 kernel: ? kthread_create_on_node+0x40/0x40 kernel: ret_from_fork+0x22/0x30 kernel: Code: 00 48 89 fb 4c 8b a7 48 01 00 00 74 68 4d 8d 6c 24 08 4c 89 ef e8 e8 28 43 00 48 8b 93 20 04 00 00 48 8b 83 28 04 00 00 4c 89 ef <48> 89 42 08 48 89 10 48 b8 00 01 00 00 00 00 ad de 48 89 83 20 kernel: RIP: target_put_nacl+0x49/0xb0 RSP: ffffc90007887d70 kernel: ---[ end trace f12821adbfd46fed ]--- To address this, go ahead and use proper list_del_list() for all cases of se_nacl->acl_list deletion. Reported-by: Justin Maggard Tested-by: Justin Maggard Cc: Justin Maggard Signed-off-by: Nicholas Bellinger Signed-off-by: Greg Kroah-Hartman --- drivers/target/target_core_tpg.c | 4 ++-- drivers/target/target_core_transport.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index f69f4902dc07..ee16a45f1607 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -350,7 +350,7 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl) if (acl->dynamic_node_acl) { acl->dynamic_node_acl = 0; } - list_del(&acl->acl_list); + list_del_init(&acl->acl_list); tpg->num_node_acls--; mutex_unlock(&tpg->acl_node_mutex); @@ -572,7 +572,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) * in transport_deregister_session(). */ list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) { - list_del(&nacl->acl_list); + list_del_init(&nacl->acl_list); se_tpg->num_node_acls--; core_tpg_wait_for_nacl_pr_ref(nacl); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index f71bedea973a..a42054edd427 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -431,7 +431,7 @@ static void target_complete_nacl(struct kref *kref) } mutex_lock(&se_tpg->acl_node_mutex); - list_del(&nacl->acl_list); + list_del_init(&nacl->acl_list); mutex_unlock(&se_tpg->acl_node_mutex); core_tpg_wait_for_nacl_pr_ref(nacl); @@ -503,7 +503,7 @@ void transport_free_session(struct se_session *se_sess) spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); if (se_nacl->dynamic_stop) - list_del(&se_nacl->acl_list); + list_del_init(&se_nacl->acl_list); } mutex_unlock(&se_tpg->acl_node_mutex); From e8d650563c5fc1ef9f862a8122397e18043fd51b Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Thu, 7 Sep 2017 11:56:40 +0200 Subject: [PATCH 084/312] misc: panel: properly restore atomic counter on error path commit 93dc1774d2a4c7a298d5cdf78cc8acdcb7b1428d upstream. Commit f4757af ("staging: panel: Fix single-open policy race condition") introduced in 3.19-rc1 attempted to fix a race condition on the open, but failed to properly do it and used to exit without restoring the semaphore. This results in -EBUSY being returned after the first open error until the module is reloaded or the system restarted (ie: consecutive to a dual open resulting in -EBUSY or to a permission error). Fixes: f4757af85 # 3.19-rc1 Cc: Mariusz Gorski Signed-off-by: Willy Tarreau [wt: driver is in staging/panel in 4.4] Signed-off-by: Greg Kroah-Hartman --- drivers/staging/panel/panel.c | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c index 70b8f4fabfad..e658e11e1829 100644 --- a/drivers/staging/panel/panel.c +++ b/drivers/staging/panel/panel.c @@ -1431,17 +1431,25 @@ static ssize_t lcd_write(struct file *file, static int lcd_open(struct inode *inode, struct file *file) { - if (!atomic_dec_and_test(&lcd_available)) - return -EBUSY; /* open only once at a time */ + int ret; + ret = -EBUSY; + if (!atomic_dec_and_test(&lcd_available)) + goto fail; /* open only once at a time */ + + ret = -EPERM; if (file->f_mode & FMODE_READ) /* device is write-only */ - return -EPERM; + goto fail; if (lcd.must_clear) { lcd_clear_display(); lcd.must_clear = false; } return nonseekable_open(inode, file); + + fail: + atomic_inc(&lcd_available); + return ret; } static int lcd_release(struct inode *inode, struct file *file) @@ -1704,14 +1712,21 @@ static ssize_t keypad_read(struct file *file, static int keypad_open(struct inode *inode, struct file *file) { - if (!atomic_dec_and_test(&keypad_available)) - return -EBUSY; /* open only once at a time */ + int ret; + ret = -EBUSY; + if (!atomic_dec_and_test(&keypad_available)) + goto fail; /* open only once at a time */ + + ret = -EPERM; if (file->f_mode & FMODE_WRITE) /* device is read-only */ - return -EPERM; + goto fail; keypad_buflen = 0; /* flush the buffer on opening */ return 0; + fail: + atomic_inc(&keypad_available); + return ret; } static int keypad_release(struct inode *inode, struct file *file) From 0cbac004e67307949714c176c8a7af9c1da980b9 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Sat, 18 Nov 2017 11:11:07 +0100 Subject: [PATCH 085/312] Linux 4.4.99 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 5d62e23347f9..0b5d9e20eee2 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 4 -SUBLEVEL = 98 +SUBLEVEL = 99 EXTRAVERSION = NAME = Blurry Fish Butt From b11250d2adedb7fc5965ff4ac410df40fb073f3a Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Tue, 10 Jan 2017 13:35:42 -0800 Subject: [PATCH 086/312] UPSTREAM: mm: Introduce lm_alias (cherry-pick from commit 568c5fe5a54f2654f5a4c599c45b8a62ed9a2013) Certain architectures may have the kernel image mapped separately to alias the linear map. Introduce a macro lm_alias to translate a kernel image symbol into its linear alias. This is used in part with work to add CONFIG_DEBUG_VIRTUAL support for arm64. Reviewed-by: Mark Rutland Tested-by: Mark Rutland Signed-off-by: Laura Abbott Signed-off-by: Will Deacon Bug: 20045882 Bug: 63737556 Change-Id: I93ba4ef8df77f7e52717ab6e208d06122ac3a72b --- include/linux/mm.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/linux/mm.h b/include/linux/mm.h index e51aafbc4b3a..941c7f711ada 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -70,6 +70,10 @@ extern int mmap_rnd_compat_bits __read_mostly; #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) #endif +#ifndef lm_alias +#define lm_alias(x) __va(__pa_symbol(x)) +#endif + /* * To prevent common memory management code establishing * a zero page mapping on a read fault. From 427c567580d9e492090526614ea9bfcb62a72546 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Tue, 10 Jan 2017 13:35:49 -0800 Subject: [PATCH 087/312] BACKPORT: arm64: Use __pa_symbol for kernel symbols (cherry-pick from commit 2077be6783b5936c3daa838d8addbb635667927f) __pa_symbol is technically the marcro that should be used for kernel symbols. Switch to this as a pre-requisite for DEBUG_VIRTUAL which will do bounds checking. Reviewed-by: Mark Rutland Tested-by: Mark Rutland Signed-off-by: Laura Abbott Signed-off-by: Will Deacon Bug: 20045882 Bug: 63737556 Change-Id: Ibef89e5935c9562fa69e946778c705636c1ca61e --- arch/arm64/include/asm/kvm_mmu.h | 2 +- arch/arm64/include/asm/memory.h | 1 + arch/arm64/include/asm/mmu_context.h | 6 ++-- arch/arm64/include/asm/pgtable.h | 2 +- arch/arm64/kernel/acpi_parking_protocol.c | 3 +- arch/arm64/kernel/cpufeature.c | 1 + arch/arm64/kernel/insn.c | 2 +- arch/arm64/kernel/psci.c | 4 ++- arch/arm64/kernel/setup.c | 9 +++--- arch/arm64/kernel/smp_spin_table.c | 3 +- arch/arm64/kernel/vdso.c | 8 ++++-- arch/arm64/mm/init.c | 9 +++--- arch/arm64/mm/kasan_init.c | 25 +++++++++++----- arch/arm64/mm/mmu.c | 35 +++++++++++++++-------- 14 files changed, 72 insertions(+), 38 deletions(-) diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 819b21a9851c..2b1020a056ad 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -265,7 +265,7 @@ static inline void __kvm_flush_dcache_pud(pud_t pud) kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE); } -#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) +#define kvm_virt_to_phys(x) __pa_symbol(x) void kvm_set_way_flush(struct kvm_vcpu *vcpu); void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index ba1b3409d7ed..2600d4a9c1c2 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -185,6 +185,7 @@ static inline void *phys_to_virt(phys_addr_t x) #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) #define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys(x)) +#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) /* * virt_to_page(k) convert a _valid_ virtual address to struct page * diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index e53d30c6f779..386956aae2dc 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -50,7 +50,7 @@ static inline void contextidr_thread_switch(struct task_struct *next) */ static inline void cpu_set_reserved_ttbr0(void) { - unsigned long ttbr = virt_to_phys(empty_zero_page); + unsigned long ttbr = __pa_symbol(empty_zero_page); asm( " msr ttbr0_el1, %0 // set TTBR0\n" @@ -124,7 +124,7 @@ static inline void cpu_install_idmap(void) local_flush_tlb_all(); cpu_set_idmap_tcr_t0sz(); - cpu_switch_mm(idmap_pg_dir, &init_mm); + cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm); } /* @@ -139,7 +139,7 @@ static inline void cpu_replace_ttbr1(pgd_t *pgd) phys_addr_t pgd_phys = virt_to_phys(pgd); - replace_phys = (void *)virt_to_phys(idmap_cpu_replace_ttbr1); + replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1); cpu_install_idmap(); replace_phys(pgd_phys); diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 0e7e8007e6fc..fa210858b873 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -119,7 +119,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); * for zero-mapped memory areas etc.. */ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; -#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) +#define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page)) #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) diff --git a/arch/arm64/kernel/acpi_parking_protocol.c b/arch/arm64/kernel/acpi_parking_protocol.c index 4b1e5a7a98da..89c96bd1aab9 100644 --- a/arch/arm64/kernel/acpi_parking_protocol.c +++ b/arch/arm64/kernel/acpi_parking_protocol.c @@ -17,6 +17,7 @@ * along with this program. If not, see . */ #include +#include #include #include @@ -102,7 +103,7 @@ static int acpi_parking_protocol_cpu_boot(unsigned int cpu) * that read this address need to convert this address to the * Boot-Loader's endianness before jumping. */ - writeq_relaxed(__pa(secondary_entry), &mailbox->entry_point); + writeq_relaxed(__pa_symbol(secondary_entry), &mailbox->entry_point); writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id); arch_send_wakeup_ipi_mask(cpumask_of(cpu)); diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index ae888588024e..56cbcfba0c02 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index c08b9ad6f429..67d3100369c6 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -96,7 +96,7 @@ static void __kprobes *patch_map(void *addr, int fixmap) if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX)) page = vmalloc_to_page(addr); else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA)) - page = virt_to_page(addr); + page = phys_to_page(__pa_symbol(addr)); else return addr; diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index f67f35b6edb1..0b6d5d3d25c3 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -113,7 +114,8 @@ static int __init cpu_psci_cpu_prepare(unsigned int cpu) static int cpu_psci_cpu_boot(unsigned int cpu) { - int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry)); + int err = psci_ops.cpu_on(cpu_logical_map(cpu), + __pa_symbol(secondary_entry)); if (err) pr_err("failed to boot CPU%d (%d)\n", cpu, err); diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 4a24719026a9..554f6e83fc2e 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include @@ -201,10 +202,10 @@ static void __init request_standard_resources(void) struct memblock_region *region; struct resource *res; - kernel_code.start = virt_to_phys(_text); - kernel_code.end = virt_to_phys(__init_begin - 1); - kernel_data.start = virt_to_phys(_sdata); - kernel_data.end = virt_to_phys(_end - 1); + kernel_code.start = __pa_symbol(_text); + kernel_code.end = __pa_symbol(__init_begin - 1); + kernel_data.start = __pa_symbol(_sdata); + kernel_data.end = __pa_symbol(_end - 1); for_each_memblock(memory, region) { res = alloc_bootmem_low(sizeof(*res)); diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c index aef3605a8c47..2ccb883353d9 100644 --- a/arch/arm64/kernel/smp_spin_table.c +++ b/arch/arm64/kernel/smp_spin_table.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -96,7 +97,7 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu) * boot-loader's endianess before jumping. This is mandated by * the boot protocol. */ - writeq_relaxed(__pa(secondary_holding_pen), release_addr); + writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr); __flush_dcache_area((__force void *)release_addr, sizeof(*release_addr)); diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 3b8acfae7797..b7730ba782dd 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -114,6 +114,7 @@ static struct vm_special_mapping vdso_spec[2]; static int __init vdso_init(void) { int i; + unsigned long pfn; if (memcmp(&vdso_start, "\177ELF", 4)) { pr_err("vDSO is not a valid ELF object!\n"); @@ -131,11 +132,14 @@ static int __init vdso_init(void) return -ENOMEM; /* Grab the vDSO data page. */ - vdso_pagelist[0] = virt_to_page(vdso_data); + vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data)); + /* Grab the vDSO code pages. */ + pfn = sym_to_pfn(&vdso_start); + for (i = 0; i < vdso_pages; i++) - vdso_pagelist[i + 1] = virt_to_page(&vdso_start + i * PAGE_SIZE); + vdso_pagelist[i + 1] = pfn_to_page(pfn + i); /* Populate the special mapping structures */ vdso_spec[0] = (struct vm_special_mapping) { diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 678878077996..4fe090aa4637 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -190,8 +191,8 @@ void __init arm64_memblock_init(void) * linear mapping. Take care not to clip the kernel which may be * high in memory. */ - memblock_remove(max_t(u64, memstart_addr + linear_region_size, __pa(_end)), - ULLONG_MAX); + memblock_remove(max_t(u64, memstart_addr + linear_region_size, + __pa_symbol(_end)), ULLONG_MAX); if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) { /* ensure that memstart_addr remains sufficiently aligned */ memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size, @@ -206,7 +207,7 @@ void __init arm64_memblock_init(void) */ if (memory_limit != (phys_addr_t)ULLONG_MAX) { memblock_enforce_memory_limit(memory_limit); - memblock_add(__pa(_text), (u64)(_end - _text)); + memblock_add(__pa_symbol(_text), (u64)(_end - _text)); } if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { @@ -230,7 +231,7 @@ void __init arm64_memblock_init(void) * Register the kernel text, kernel data, initrd, and initial * pagetables with memblock. */ - memblock_reserve(__pa(_text), _end - _text); + memblock_reserve(__pa_symbol(_text), _end - _text); #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start) { memblock_reserve(initrd_start, initrd_end - initrd_start); diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 757009daa9ed..03588d136f93 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -26,6 +27,13 @@ static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE); +/* + * The p*d_populate functions call virt_to_phys implicitly so they can't be used + * directly on kernel symbols (bm_p*d). All the early functions are called too + * early to use lm_alias so __p*d_populate functions must be used to populate + * with the physical address from __pa_symbol. + */ + static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end) { @@ -33,12 +41,13 @@ static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long next; if (pmd_none(*pmd)) - pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); + __pmd_populate(pmd, __pa_symbol(kasan_zero_pte), + PMD_TYPE_TABLE); pte = pte_offset_kimg(pmd, addr); do { next = addr + PAGE_SIZE; - set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page), + set_pte(pte, pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL)); } while (pte++, addr = next, addr != end && pte_none(*pte)); } @@ -51,7 +60,8 @@ static void __init kasan_early_pmd_populate(pud_t *pud, unsigned long next; if (pud_none(*pud)) - pud_populate(&init_mm, pud, kasan_zero_pmd); + __pud_populate(pud, __pa_symbol(kasan_zero_pmd), + PMD_TYPE_TABLE); pmd = pmd_offset_kimg(pud, addr); do { @@ -68,7 +78,8 @@ static void __init kasan_early_pud_populate(pgd_t *pgd, unsigned long next; if (pgd_none(*pgd)) - pgd_populate(&init_mm, pgd, kasan_zero_pud); + __pgd_populate(pgd, __pa_symbol(kasan_zero_pud), + PUD_TYPE_TABLE); pud = pud_offset_kimg(pgd, addr); do { @@ -148,7 +159,7 @@ void __init kasan_init(void) */ memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir)); dsb(ishst); - cpu_replace_ttbr1(tmp_pg_dir); + cpu_replace_ttbr1(lm_alias(tmp_pg_dir)); clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); @@ -199,10 +210,10 @@ void __init kasan_init(void) */ for (i = 0; i < PTRS_PER_PTE; i++) set_pte(&kasan_zero_pte[i], - pfn_pte(virt_to_pfn(kasan_zero_page), PAGE_KERNEL_RO)); + pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL_RO)); memset(kasan_zero_page, 0, PAGE_SIZE); - cpu_replace_ttbr1(swapper_pg_dir); + cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); /* At this point kasan is fully initialized. Enable error messages */ init_task.kasan_depth = 0; diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 6d2e411b2535..e6409dc0cea9 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -385,8 +386,8 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt, static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end) { - unsigned long kernel_start = __pa(_stext); - unsigned long kernel_end = __pa(__init_begin); + unsigned long kernel_start = __pa_symbol(_stext); + unsigned long kernel_end = __pa_symbol(__init_begin); /* * Take care not to create a writable alias for the @@ -450,14 +451,15 @@ void mark_rodata_ro(void) unsigned long section_size; section_size = (unsigned long)_etext - (unsigned long)_stext; - create_mapping_late(__pa(_stext), (unsigned long)_stext, + create_mapping_late(__pa_symbol(_stext), (unsigned long)_stext, section_size, PAGE_KERNEL_ROX); /* * mark .rodata as read only. Use __init_begin rather than __end_rodata * to cover NOTES and EXCEPTION_TABLE. */ section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata; - create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata, + create_mapping_late(__pa_symbol(__start_rodata), + (unsigned long)__start_rodata, section_size, PAGE_KERNEL_RO); } @@ -474,7 +476,7 @@ void fixup_init(void) static void __init map_kernel_chunk(pgd_t *pgd, void *va_start, void *va_end, pgprot_t prot, struct vm_struct *vma) { - phys_addr_t pa_start = __pa(va_start); + phys_addr_t pa_start = __pa_symbol(va_start); unsigned long size = va_end - va_start; BUG_ON(!PAGE_ALIGNED(pa_start)); @@ -522,7 +524,7 @@ static void __init map_kernel(pgd_t *pgd) */ BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START), - __pud(__pa(bm_pmd) | PUD_TYPE_TABLE)); + __pud(__pa_symbol(bm_pmd) | PUD_TYPE_TABLE)); pud_clear_fixmap(); } else { BUG(); @@ -553,7 +555,7 @@ void __init paging_init(void) */ cpu_replace_ttbr1(__va(pgd_phys)); memcpy(swapper_pg_dir, pgd, PAGE_SIZE); - cpu_replace_ttbr1(swapper_pg_dir); + cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); pgd_clear_fixmap(); memblock_free(pgd_phys, PAGE_SIZE); @@ -565,7 +567,7 @@ void __init paging_init(void) * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd * allocated with it. */ - memblock_free(__pa(swapper_pg_dir) + PAGE_SIZE, + memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE, SWAPPER_DIR_SIZE - PAGE_SIZE); bootmem_init(); @@ -678,6 +680,12 @@ static inline pte_t * fixmap_pte(unsigned long addr) return &bm_pte[pte_index(addr)]; } +/* + * The p*d_populate functions call virt_to_phys implicitly so they can't be used + * directly on kernel symbols (bm_p*d). This function is called too early to use + * lm_alias so __p*d_populate functions must be used to populate with the + * physical address from __pa_symbol. + */ void __init early_fixmap_init(void) { pgd_t *pgd; @@ -687,7 +695,7 @@ void __init early_fixmap_init(void) pgd = pgd_offset_k(addr); if (CONFIG_PGTABLE_LEVELS > 3 && - !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa(bm_pud))) { + !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa_symbol(bm_pud))) { /* * We only end up here if the kernel mapping and the fixmap * share the top level pgd entry, which should only happen on @@ -696,12 +704,15 @@ void __init early_fixmap_init(void) BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); pud = pud_offset_kimg(pgd, addr); } else { - pgd_populate(&init_mm, pgd, bm_pud); + if (pgd_none(*pgd)) + __pgd_populate(pgd, __pa_symbol(bm_pud), + PUD_TYPE_TABLE); pud = fixmap_pud(addr); } - pud_populate(&init_mm, pud, bm_pmd); + if (pud_none(*pud)) + __pud_populate(pud, __pa_symbol(bm_pmd), PMD_TYPE_TABLE); pmd = fixmap_pmd(addr); - pmd_populate_kernel(&init_mm, pmd, bm_pte); + __pmd_populate(pmd, __pa_symbol(bm_pte), PMD_TYPE_TABLE); /* * The boot-ioremap range spans multiple pmds, for which From 875988cc0f1e08db707a5f61663968e6dbd17423 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 24 Jan 2017 12:43:40 +0100 Subject: [PATCH 088/312] BACKPORT: arm64: Use __pa_symbol for empty_zero_page (cherry-pick commit from cbb999dd0b452991f4f698142aa7ffe566c0b415) If CONFIG_DEBUG_VIRTUAL=y and CONFIG_ARM64_SW_TTBR0_PAN=y: virt_to_phys used for non-linear address: ffffff8008cc0000 (empty_zero_page+0x0/0x1000) WARNING: CPU: 0 PID: 0 at arch/arm64/mm/physaddr.c:14 __virt_to_phys+0x28/0x60 ... [] __virt_to_phys+0x28/0x60 [] setup_arch+0x46c/0x4d4 Fixes: 2077be6783b5936c ("arm64: Use __pa_symbol for kernel symbols") Acked-by: Mark Rutland Acked-by: Laura Abbott Signed-off-by: Geert Uytterhoeven Signed-off-by: Will Deacon Bug: 20045882 Bug: 63737556 Change-Id: Ida933e532d0423e074b3621207a1e2a5f8609742 --- arch/arm64/kernel/setup.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 554f6e83fc2e..45f44e9cf06c 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -355,9 +355,9 @@ void __init setup_arch(char **cmdline_p) * thread. */ #ifdef CONFIG_THREAD_INFO_IN_TASK - init_task.thread_info.ttbr0 = virt_to_phys(empty_zero_page); + init_task.thread_info.ttbr0 = __pa_symbol(empty_zero_page); #else - init_thread_info.ttbr0 = virt_to_phys(empty_zero_page); + init_thread_info.ttbr0 = __pa_symbol(empty_zero_page); #endif #endif From c344019c48a92e559b7f910e53e0c6f9018680fb Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Mon, 9 Oct 2017 20:14:48 +0200 Subject: [PATCH 089/312] media: imon: Fix null-ptr-deref in imon_probe commit 58fd55e838276a0c13d1dc7c387f90f25063cbf3 upstream. It seems that the return value of usb_ifnum_to_if() can be NULL and needs to be checked. Signed-off-by: Arvind Yadav Tested-by: Andrey Konovalov Signed-off-by: Sean Young Signed-off-by: Mauro Carvalho Chehab Cc: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- drivers/media/rc/imon.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c index eb9e7feb9b13..7a16e9ea041c 100644 --- a/drivers/media/rc/imon.c +++ b/drivers/media/rc/imon.c @@ -2419,6 +2419,11 @@ static int imon_probe(struct usb_interface *interface, mutex_lock(&driver_lock); first_if = usb_ifnum_to_if(usbdev, 0); + if (!first_if) { + ret = -ENODEV; + goto fail; + } + first_if_ctx = usb_get_intfdata(first_if); if (ifnum == 0) { From 0a418e57717d2d33275e9340c726ded671be7698 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 2 Nov 2017 10:38:21 -0400 Subject: [PATCH 090/312] media: dib0700: fix invalid dvb_detach argument commit eb0c19942288569e0ae492476534d5a485fb8ab4 upstream. dvb_detach(arg) calls symbol_put_addr(arg), where arg should be a pointer to a function. Right now a pointer to state->dib7000p_ops is passed to dvb_detach(), which causes a BUG() in symbol_put_addr() as discovered by syzkaller. Pass state->dib7000p_ops.set_wbd_ref instead. ------------[ cut here ]------------ kernel BUG at kernel/module.c:1081! invalid opcode: 0000 [#1] PREEMPT SMP KASAN Modules linked in: CPU: 1 PID: 1151 Comm: kworker/1:1 Tainted: G W 4.14.0-rc1-42251-gebb2c2437d80 #224 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Workqueue: usb_hub_wq hub_event task: ffff88006a336300 task.stack: ffff88006a7c8000 RIP: 0010:symbol_put_addr+0x54/0x60 kernel/module.c:1083 RSP: 0018:ffff88006a7ce210 EFLAGS: 00010246 RAX: 0000000000000000 RBX: ffff880062a8d190 RCX: 0000000000000000 RDX: dffffc0000000020 RSI: ffffffff85876d60 RDI: ffff880062a8d190 RBP: ffff88006a7ce218 R08: 1ffff1000d4f9c12 R09: 1ffff1000d4f9ae4 R10: 1ffff1000d4f9bed R11: 0000000000000000 R12: ffff880062a8d180 R13: 00000000ffffffed R14: ffff880062a8d190 R15: ffff88006947c000 FS: 0000000000000000(0000) GS:ffff88006c900000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f6416532000 CR3: 00000000632f5000 CR4: 00000000000006e0 Call Trace: stk7070p_frontend_attach+0x515/0x610 drivers/media/usb/dvb-usb/dib0700_devices.c:1013 dvb_usb_adapter_frontend_init+0x32b/0x660 drivers/media/usb/dvb-usb/dvb-usb-dvb.c:286 dvb_usb_adapter_init drivers/media/usb/dvb-usb/dvb-usb-init.c:86 dvb_usb_init drivers/media/usb/dvb-usb/dvb-usb-init.c:162 dvb_usb_device_init+0xf70/0x17f0 drivers/media/usb/dvb-usb/dvb-usb-init.c:277 dib0700_probe+0x171/0x5a0 drivers/media/usb/dvb-usb/dib0700_core.c:886 usb_probe_interface+0x35d/0x8e0 drivers/usb/core/driver.c:361 really_probe drivers/base/dd.c:413 driver_probe_device+0x610/0xa00 drivers/base/dd.c:557 __device_attach_driver+0x230/0x290 drivers/base/dd.c:653 bus_for_each_drv+0x161/0x210 drivers/base/bus.c:463 __device_attach+0x26e/0x3d0 drivers/base/dd.c:710 device_initial_probe+0x1f/0x30 drivers/base/dd.c:757 bus_probe_device+0x1eb/0x290 drivers/base/bus.c:523 device_add+0xd0b/0x1660 drivers/base/core.c:1835 usb_set_configuration+0x104e/0x1870 drivers/usb/core/message.c:1932 generic_probe+0x73/0xe0 drivers/usb/core/generic.c:174 usb_probe_device+0xaf/0xe0 drivers/usb/core/driver.c:266 really_probe drivers/base/dd.c:413 driver_probe_device+0x610/0xa00 drivers/base/dd.c:557 __device_attach_driver+0x230/0x290 drivers/base/dd.c:653 bus_for_each_drv+0x161/0x210 drivers/base/bus.c:463 __device_attach+0x26e/0x3d0 drivers/base/dd.c:710 device_initial_probe+0x1f/0x30 drivers/base/dd.c:757 bus_probe_device+0x1eb/0x290 drivers/base/bus.c:523 device_add+0xd0b/0x1660 drivers/base/core.c:1835 usb_new_device+0x7b8/0x1020 drivers/usb/core/hub.c:2457 hub_port_connect drivers/usb/core/hub.c:4903 hub_port_connect_change drivers/usb/core/hub.c:5009 port_event drivers/usb/core/hub.c:5115 hub_event+0x194d/0x3740 drivers/usb/core/hub.c:5195 process_one_work+0xc7f/0x1db0 kernel/workqueue.c:2119 worker_thread+0x221/0x1850 kernel/workqueue.c:2253 kthread+0x3a1/0x470 kernel/kthread.c:231 ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:431 Code: ff ff 48 85 c0 74 24 48 89 c7 e8 48 ea ff ff bf 01 00 00 00 e8 de 20 e3 ff 65 8b 05 b7 2f c2 7e 85 c0 75 c9 e8 f9 0b c1 ff eb c2 <0f> 0b 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 48 b8 00 00 RIP: symbol_put_addr+0x54/0x60 RSP: ffff88006a7ce210 ---[ end trace b75b357739e7e116 ]--- Signed-off-by: Andrey Konovalov Cc: Ben Hutchings Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/usb/dvb-usb/dib0700_devices.c | 24 ++++++++++----------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c index 7ed49646a699..7df0707a0455 100644 --- a/drivers/media/usb/dvb-usb/dib0700_devices.c +++ b/drivers/media/usb/dvb-usb/dib0700_devices.c @@ -292,7 +292,7 @@ static int stk7700P2_frontend_attach(struct dvb_usb_adapter *adap) stk7700d_dib7000p_mt2266_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } } @@ -326,7 +326,7 @@ static int stk7700d_frontend_attach(struct dvb_usb_adapter *adap) stk7700d_dib7000p_mt2266_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } } @@ -479,7 +479,7 @@ static int stk7700ph_frontend_attach(struct dvb_usb_adapter *adap) &stk7700ph_dib7700_xc3028_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -1010,7 +1010,7 @@ static int stk7070p_frontend_attach(struct dvb_usb_adapter *adap) &dib7070p_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -1068,7 +1068,7 @@ static int stk7770p_frontend_attach(struct dvb_usb_adapter *adap) &dib7770p_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -3036,7 +3036,7 @@ static int nim7090_frontend_attach(struct dvb_usb_adapter *adap) if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, &nim7090_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80, &nim7090_dib7000p_config); @@ -3089,7 +3089,7 @@ static int tfe7090pvr_frontend0_attach(struct dvb_usb_adapter *adap) /* initialize IC 0 */ if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, &tfe7090pvr_dib7000p_config[0]) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -3119,7 +3119,7 @@ static int tfe7090pvr_frontend1_attach(struct dvb_usb_adapter *adap) i2c = state->dib7000p_ops.get_i2c_master(adap->dev->adapter[0].fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_6_7, 1); if (state->dib7000p_ops.i2c_enumeration(i2c, 1, 0x10, &tfe7090pvr_dib7000p_config[1]) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -3194,7 +3194,7 @@ static int tfe7790p_frontend_attach(struct dvb_usb_adapter *adap) 1, 0x10, &tfe7790p_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, @@ -3289,7 +3289,7 @@ static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap) stk7070pd_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -3364,7 +3364,7 @@ static int novatd_frontend_attach(struct dvb_usb_adapter *adap) stk7070pd_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } } @@ -3600,7 +3600,7 @@ static int pctv340e_frontend_attach(struct dvb_usb_adapter *adap) if (state->dib7000p_ops.dib7000pc_detection(&adap->dev->i2c_adap) == 0) { /* Demodulator not found for some reason? */ - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } From ceb5c560e2e46266cb8fabb0de5a4975e2a0e354 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Sun, 24 Apr 2016 00:56:03 -0400 Subject: [PATCH 091/312] ext4: fix data exposure after a crash commit 06bd3c36a733ac27962fea7d6f47168841376824 upstream. Huang has reported that in his powerfail testing he is seeing stale block contents in some of recently allocated blocks although he mounts ext4 in data=ordered mode. After some investigation I have found out that indeed when delayed allocation is used, we don't add inode to transaction's list of inodes needing flushing before commit. Originally we were doing that but commit f3b59291a69d removed the logic with a flawed argument that it is not needed. The problem is that although for delayed allocated blocks we write their contents immediately after allocating them, there is no guarantee that the IO scheduler or device doesn't reorder things and thus transaction allocating blocks and attaching them to inode can reach stable storage before actual block contents. Actually whenever we attach freshly allocated blocks to inode using a written extent, we should add inode to transaction's ordered inode list to make sure we properly wait for block contents to be written before committing the transaction. So that is what we do in this patch. This also handles other cases where stale data exposure was possible - like filling hole via mmap in data=ordered,nodelalloc mode. The only exception to the above rule are extending direct IO writes where blkdev_direct_IO() waits for IO to complete before increasing i_size and thus stale data exposure is not possible. For now we don't complicate the code with optimizing this special case since the overhead is pretty low. In case this is observed to be a performance problem we can always handle it using a special flag to ext4_map_blocks(). Fixes: f3b59291a69d0b734be1fc8be489fef2dd846d3d Reported-by: "HUANG Weller (CM/ESW12-CN)" Tested-by: "HUANG Weller (CM/ESW12-CN)" Signed-off-by: Jan Kara Signed-off-by: Theodore Ts'o [bwh: Backported to 4.4: - Drop check for EXT4_GET_BLOCKS_ZERO flag - Adjust context] Signed-off-by: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- fs/ext4/inode.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 194a6baa4283..4df1cb19a243 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -658,6 +658,20 @@ has_zeroout: ret = check_block_validity(inode, map); if (ret != 0) return ret; + + /* + * Inodes with freshly allocated blocks where contents will be + * visible after transaction commit must be on transaction's + * ordered data list. + */ + if (map->m_flags & EXT4_MAP_NEW && + !(map->m_flags & EXT4_MAP_UNWRITTEN) && + !IS_NOQUOTA(inode) && + ext4_should_order_data(inode)) { + ret = ext4_jbd2_file_inode(handle, inode); + if (ret) + return ret; + } } return retval; } @@ -1152,15 +1166,6 @@ static int ext4_write_end(struct file *file, int i_size_changed = 0; trace_ext4_write_end(inode, pos, len, copied); - if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) { - ret = ext4_jbd2_file_inode(handle, inode); - if (ret) { - unlock_page(page); - page_cache_release(page); - goto errout; - } - } - if (ext4_has_inline_data(inode)) { ret = ext4_write_inline_data_end(inode, pos, len, copied, page); From 07e3aff243cdcb70d1e81e515aea553df3080f43 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 7 Jun 2017 15:13:14 +0200 Subject: [PATCH 092/312] KVM: x86: fix singlestepping over syscall MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit c8401dda2f0a00cd25c0af6a95ed50e478d25de4 upstream. TF is handled a bit differently for syscall and sysret, compared to the other instructions: TF is checked after the instruction completes, so that the OS can disable #DB at a syscall by adding TF to FMASK. When the sysret is executed the #DB is taken "as if" the syscall insn just completed. KVM emulates syscall so that it can trap 32-bit syscall on Intel processors. Fix the behavior, otherwise you could get #DB on a user stack which is not nice. This does not affect Linux guests, as they use an IST or task gate for #DB. This fixes CVE-2017-7518. Reported-by: Andy Lutomirski Signed-off-by: Paolo Bonzini Signed-off-by: Radim Krčmář [bwh: Backported to 4.4: - kvm_vcpu_check_singlestep() sets some flags differently - Drop changes to kvm_skip_emulated_instruction()] Signed-off-by: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/kvm_emulate.h | 1 + arch/x86/kvm/emulate.c | 1 + arch/x86/kvm/x86.c | 52 +++++++++++++----------------- 3 files changed, 24 insertions(+), 30 deletions(-) diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 19d14ac23ef9..fc3c7e49c8e4 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -296,6 +296,7 @@ struct x86_emulate_ctxt { bool perm_ok; /* do not check permissions if true */ bool ud; /* inject an #UD if host doesn't support insn */ + bool tf; /* TF value before instruction (after for syscall/sysret) */ bool have_exception; struct x86_exception exception; diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 04b2f3cad7ba..684edebb4a0c 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2726,6 +2726,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt) ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); } + ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; return X86EMUL_CONTINUE; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8e526c6fd784..3ffd5900da5b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5095,6 +5095,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); ctxt->eflags = kvm_get_rflags(vcpu); + ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; + ctxt->eip = kvm_rip_read(vcpu); ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : @@ -5315,37 +5317,26 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, return dr6; } -static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r) +static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r) { struct kvm_run *kvm_run = vcpu->run; - /* - * rflags is the old, "raw" value of the flags. The new value has - * not been saved yet. - * - * This is correct even for TF set by the guest, because "the - * processor will not generate this exception after the instruction - * that sets the TF flag". - */ - if (unlikely(rflags & X86_EFLAGS_TF)) { - if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { - kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | - DR6_RTM; - kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; - kvm_run->debug.arch.exception = DB_VECTOR; - kvm_run->exit_reason = KVM_EXIT_DEBUG; - *r = EMULATE_USER_EXIT; - } else { - vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF; - /* - * "Certain debug exceptions may clear bit 0-3. The - * remaining contents of the DR6 register are never - * cleared by the processor". - */ - vcpu->arch.dr6 &= ~15; - vcpu->arch.dr6 |= DR6_BS | DR6_RTM; - kvm_queue_exception(vcpu, DB_VECTOR); - } + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { + kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM; + kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; + kvm_run->debug.arch.exception = DB_VECTOR; + kvm_run->exit_reason = KVM_EXIT_DEBUG; + *r = EMULATE_USER_EXIT; + } else { + vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF; + /* + * "Certain debug exceptions may clear bit 0-3. The + * remaining contents of the DR6 register are never + * cleared by the processor". + */ + vcpu->arch.dr6 &= ~15; + vcpu->arch.dr6 |= DR6_BS | DR6_RTM; + kvm_queue_exception(vcpu, DB_VECTOR); } } @@ -5500,8 +5491,9 @@ restart: toggle_interruptibility(vcpu, ctxt->interruptibility); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; kvm_rip_write(vcpu, ctxt->eip); - if (r == EMULATE_DONE) - kvm_vcpu_check_singlestep(vcpu, rflags, &r); + if (r == EMULATE_DONE && + (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) + kvm_vcpu_do_singlestep(vcpu, &r); if (!ctxt->have_exception || exception_type(ctxt->exception.vector) == EXCPT_TRAP) __kvm_set_rflags(vcpu, ctxt->eflags); From 49630dd2e10a3b2fee0cec19feb63f08453b876f Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 8 May 2017 00:04:09 +0200 Subject: [PATCH 093/312] bpf: don't let ldimm64 leak map addresses on unprivileged commit 0d0e57697f162da4aa218b5feafe614fb666db07 upstream. The patch fixes two things at once: 1) It checks the env->allow_ptr_leaks and only prints the map address to the log if we have the privileges to do so, otherwise it just dumps 0 as we would when kptr_restrict is enabled on %pK. Given the latter is off by default and not every distro sets it, I don't want to rely on this, hence the 0 by default for unprivileged. 2) Printing of ldimm64 in the verifier log is currently broken in that we don't print the full immediate, but only the 32 bit part of the first insn part for ldimm64. Thus, fix this up as well; it's okay to access, since we verified all ldimm64 earlier already (including just constants) through replace_map_fd_with_map_ptr(). Fixes: 1be7f75d1668 ("bpf: enable non-root eBPF programs") Fixes: cbd357008604 ("bpf: verifier (add ability to receive verification log)") Reported-by: Jann Horn Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller [bwh: Backported to 4.4: s/bpf_verifier_env/verifier_env/] Signed-off-by: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- kernel/bpf/verifier.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 70dc6dcf8649..eb759f5008b8 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -313,7 +313,8 @@ static const char *const bpf_jmp_string[16] = { [BPF_EXIT >> 4] = "exit", }; -static void print_bpf_insn(struct bpf_insn *insn) +static void print_bpf_insn(const struct verifier_env *env, + const struct bpf_insn *insn) { u8 class = BPF_CLASS(insn->code); @@ -377,9 +378,19 @@ static void print_bpf_insn(struct bpf_insn *insn) insn->code, bpf_ldst_string[BPF_SIZE(insn->code) >> 3], insn->src_reg, insn->imm); - } else if (BPF_MODE(insn->code) == BPF_IMM) { - verbose("(%02x) r%d = 0x%x\n", - insn->code, insn->dst_reg, insn->imm); + } else if (BPF_MODE(insn->code) == BPF_IMM && + BPF_SIZE(insn->code) == BPF_DW) { + /* At this point, we already made sure that the second + * part of the ldimm64 insn is accessible. + */ + u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; + bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD; + + if (map_ptr && !env->allow_ptr_leaks) + imm = 0; + + verbose("(%02x) r%d = 0x%llx\n", insn->code, + insn->dst_reg, (unsigned long long)imm); } else { verbose("BUG_ld_%02x\n", insn->code); return; @@ -1764,7 +1775,7 @@ static int do_check(struct verifier_env *env) if (log_level) { verbose("%d: ", insn_idx); - print_bpf_insn(insn); + print_bpf_insn(env, insn); } if (class == BPF_ALU || class == BPF_ALU64) { From 11e8e55be18cd39c3d54674362aa18695b243e22 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Tue, 13 Jun 2017 16:28:27 -0400 Subject: [PATCH 094/312] xen-blkback: don't leak stack data via response ring commit 089bc0143f489bd3a4578bdff5f4ca68fb26f341 upstream. Rather than constructing a local structure instance on the stack, fill the fields directly on the shared ring, just like other backends do. Build on the fact that all response structure flavors are actually identical (the old code did make this assumption too). This is XSA-216. Signed-off-by: Jan Beulich Reviewed-by: Konrad Rzeszutek Wilk Signed-off-by: Konrad Rzeszutek Wilk [bwh: Backported to 4.4: adjust context] Signed-off-by: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- drivers/block/xen-blkback/blkback.c | 23 ++++++++++++----------- drivers/block/xen-blkback/common.h | 25 +++++-------------------- 2 files changed, 17 insertions(+), 31 deletions(-) diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 33e23a7a691f..a295ad6a1674 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -1407,33 +1407,34 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, static void make_response(struct xen_blkif *blkif, u64 id, unsigned short op, int st) { - struct blkif_response resp; + struct blkif_response *resp; unsigned long flags; union blkif_back_rings *blk_rings = &blkif->blk_rings; int notify; - resp.id = id; - resp.operation = op; - resp.status = st; - spin_lock_irqsave(&blkif->blk_ring_lock, flags); /* Place on the response ring for the relevant domain. */ switch (blkif->blk_protocol) { case BLKIF_PROTOCOL_NATIVE: - memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), - &resp, sizeof(resp)); + resp = RING_GET_RESPONSE(&blk_rings->native, + blk_rings->native.rsp_prod_pvt); break; case BLKIF_PROTOCOL_X86_32: - memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), - &resp, sizeof(resp)); + resp = RING_GET_RESPONSE(&blk_rings->x86_32, + blk_rings->x86_32.rsp_prod_pvt); break; case BLKIF_PROTOCOL_X86_64: - memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), - &resp, sizeof(resp)); + resp = RING_GET_RESPONSE(&blk_rings->x86_64, + blk_rings->x86_64.rsp_prod_pvt); break; default: BUG(); } + + resp->id = id; + resp->operation = op; + resp->status = st; + blk_rings->common.rsp_prod_pvt++; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); spin_unlock_irqrestore(&blkif->blk_ring_lock, flags); diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index c929ae22764c..04cfee719334 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -74,9 +74,8 @@ extern unsigned int xen_blkif_max_ring_order; struct blkif_common_request { char dummy; }; -struct blkif_common_response { - char dummy; -}; + +/* i386 protocol version */ struct blkif_x86_32_request_rw { uint8_t nr_segments; /* number of segments */ @@ -128,14 +127,6 @@ struct blkif_x86_32_request { } u; } __attribute__((__packed__)); -/* i386 protocol version */ -#pragma pack(push, 4) -struct blkif_x86_32_response { - uint64_t id; /* copied from request */ - uint8_t operation; /* copied from request */ - int16_t status; /* BLKIF_RSP_??? */ -}; -#pragma pack(pop) /* x86_64 protocol version */ struct blkif_x86_64_request_rw { @@ -192,18 +183,12 @@ struct blkif_x86_64_request { } u; } __attribute__((__packed__)); -struct blkif_x86_64_response { - uint64_t __attribute__((__aligned__(8))) id; - uint8_t operation; /* copied from request */ - int16_t status; /* BLKIF_RSP_??? */ -}; - DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, - struct blkif_common_response); + struct blkif_response); DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, - struct blkif_x86_32_response); + struct blkif_response __packed); DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, - struct blkif_x86_64_response); + struct blkif_response); union blkif_back_rings { struct blkif_back_ring native; From 46bdabbca02ebabd292d0ea3f610aa54e53f0e25 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Tue, 17 Oct 2017 23:26:10 +0800 Subject: [PATCH 095/312] sctp: do not peel off an assoc from one netns to another one commit df80cd9b28b9ebaa284a41df611dbf3a2d05ca74 upstream. Now when peeling off an association to the sock in another netns, all transports in this assoc are not to be rehashed and keep use the old key in hashtable. As a transport uses sk->net as the hash key to insert into hashtable, it would miss removing these transports from hashtable due to the new netns when closing the sock and all transports are being freeed, then later an use-after-free issue could be caused when looking up an asoc and dereferencing those transports. This is a very old issue since very beginning, ChunYu found it with syzkaller fuzz testing with this series: socket$inet6_sctp() bind$inet6() sendto$inet6() unshare(0x40000000) getsockopt$inet_sctp6_SCTP_GET_ASSOC_ID_LIST() getsockopt$inet_sctp6_SCTP_SOCKOPT_PEELOFF() This patch is to block this call when peeling one assoc off from one netns to another one, so that the netns of all transport would not go out-sync with the key in hashtable. Note that this patch didn't fix it by rehashing transports, as it's difficult to handle the situation when the tuple is already in use in the new netns. Besides, no one would like to peel off one assoc to another netns, considering ipaddrs, ifaces, etc. are usually different. Reported-by: ChunYu Wang Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Acked-by: Neil Horman Signed-off-by: David S. Miller Signed-off-by: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- net/sctp/socket.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 73eec73ff733..7f0f689b8d2b 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4453,6 +4453,10 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) struct socket *sock; int err = 0; + /* Do not peel off from one netns to another one. */ + if (!net_eq(current->nsproxy->net_ns, sock_net(sk))) + return -EINVAL; + if (!asoc) return -EINVAL; From caeeef8438c30e7d0e43293fcb7beb9f953bb2a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= Date: Mon, 6 Nov 2017 15:37:22 +0100 Subject: [PATCH 096/312] net: cdc_ether: fix divide by 0 on bad descriptors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 2cb80187ba065d7decad7c6614e35e07aec8a974 upstream. Setting dev->hard_mtu to 0 will cause a divide error in usbnet_probe. Protect against devices with bogus CDC Ethernet functional descriptors by ignoring a zero wMaxSegmentSize. Signed-off-by: Bjørn Mork Acked-by: Oliver Neukum Signed-off-by: David S. Miller Signed-off-by: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- drivers/net/usb/cdc_ether.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 8c408aa2f208..f9343bee1de3 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -221,7 +221,7 @@ skip: goto bad_desc; } - if (header.usb_cdc_ether_desc) { + if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) { dev->hard_mtu = le16_to_cpu(info->ether->wMaxSegmentSize); /* because of Zaurus, we may be ignoring the host * side link address we were given. From e455048c7ae95633c8778a0282d1ee86f78c5349 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= Date: Mon, 6 Nov 2017 15:32:18 +0100 Subject: [PATCH 097/312] net: qmi_wwan: fix divide by 0 on bad descriptors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 7fd078337201cf7468f53c3d9ef81ff78cb6df3b upstream. A CDC Ethernet functional descriptor with wMaxSegmentSize = 0 will cause a divide error in usbnet_probe: divide error: 0000 [#1] PREEMPT SMP KASAN Modules linked in: CPU: 0 PID: 24 Comm: kworker/0:1 Not tainted 4.14.0-rc8-44453-g1fdc1a82c34f #56 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Workqueue: usb_hub_wq hub_event task: ffff88006bef5c00 task.stack: ffff88006bf60000 RIP: 0010:usbnet_update_max_qlen+0x24d/0x390 drivers/net/usb/usbnet.c:355 RSP: 0018:ffff88006bf67508 EFLAGS: 00010246 RAX: 00000000000163c8 RBX: ffff8800621fce40 RCX: ffff8800621fcf34 RDX: 0000000000000000 RSI: ffffffff837ecb7a RDI: ffff8800621fcf34 RBP: ffff88006bf67520 R08: ffff88006bef5c00 R09: ffffed000c43f881 R10: ffffed000c43f880 R11: ffff8800621fc406 R12: 0000000000000003 R13: ffffffff85c71de0 R14: 0000000000000000 R15: 0000000000000000 FS: 0000000000000000(0000) GS:ffff88006ca00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007ffe9c0d6dac CR3: 00000000614f4000 CR4: 00000000000006f0 Call Trace: usbnet_probe+0x18b5/0x2790 drivers/net/usb/usbnet.c:1783 qmi_wwan_probe+0x133/0x220 drivers/net/usb/qmi_wwan.c:1338 usb_probe_interface+0x324/0x940 drivers/usb/core/driver.c:361 really_probe drivers/base/dd.c:413 driver_probe_device+0x522/0x740 drivers/base/dd.c:557 Fix by simply ignoring the bogus descriptor, as it is optional for QMI devices anyway. Fixes: 423ce8caab7e ("net: usb: qmi_wwan: New driver for Huawei QMI based WWAN devices") Reported-by: Andrey Konovalov Signed-off-by: Bjørn Mork Signed-off-by: David S. Miller Signed-off-by: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- drivers/net/usb/qmi_wwan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 958af3b1af7f..e325ca3ad565 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -262,7 +262,7 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) } /* errors aren't fatal - we can live with the dynamic address */ - if (cdc_ether) { + if (cdc_ether && cdc_ether->wMaxSegmentSize) { dev->hard_mtu = le16_to_cpu(cdc_ether->wMaxSegmentSize); usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress); } From 977784638f762db11674e96c311eeb41da90ab5c Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 17 Nov 2017 11:50:27 -0800 Subject: [PATCH 098/312] arm: crypto: reduce priority of bit-sliced AES cipher [ Not upstream because this is a minimal fix for a bug where arm32 kernels can use a much slower implementation of AES than is actually available, potentially forcing vendors to disable encryption on their devices.] All the aes-bs (bit-sliced) and aes-ce (cryptographic extensions) algorithms had a priority of 300. This is undesirable because it means an aes-bs algorithm may be used when an aes-ce algorithm is available. The aes-ce algorithms have much better performance (up to 10x faster). Fix it by decreasing the priority of the aes-bs algorithms to 250. This was fixed upstream by commit cc477bf64573 ("crypto: arm/aes - replace bit-sliced OpenSSL NEON code"), but it was just a small part of a complete rewrite. This patch just fixes the priority bug for older kernels. Signed-off-by: Eric Biggers Acked-by: Ard Biesheuvel --- arch/arm/crypto/aesbs-glue.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c index 6d685298690e..648d5fac9cbf 100644 --- a/arch/arm/crypto/aesbs-glue.c +++ b/arch/arm/crypto/aesbs-glue.c @@ -357,7 +357,7 @@ static struct crypto_alg aesbs_algs[] = { { }, { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-neonbs", - .cra_priority = 300, + .cra_priority = 250, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct async_helper_ctx), @@ -377,7 +377,7 @@ static struct crypto_alg aesbs_algs[] = { { }, { .cra_name = "ctr(aes)", .cra_driver_name = "ctr-aes-neonbs", - .cra_priority = 300, + .cra_priority = 250, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct async_helper_ctx), @@ -397,7 +397,7 @@ static struct crypto_alg aesbs_algs[] = { { }, { .cra_name = "xts(aes)", .cra_driver_name = "xts-aes-neonbs", - .cra_priority = 300, + .cra_priority = 250, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct async_helper_ctx), From ec4f8a71c0fc3c2ad0c40e01abcaf12631dd9ce2 Mon Sep 17 00:00:00 2001 From: Leif Liddy Date: Sat, 8 Jul 2017 20:55:32 +0200 Subject: [PATCH 099/312] Bluetooth: btusb: fix QCA Rome suspend/resume commit fd865802c66bc451dc515ed89360f84376ce1a56 upstream. There's been numerous reported instances where BTUSB_QCA_ROME bluetooth controllers stop functioning upon resume from suspend. These devices seem to be losing power during suspend. Patch will detect a status change on resume and perform a reset. Signed-off-by: Leif Liddy Signed-off-by: Marcel Holtmann Cc: Kai Heng Feng Signed-off-by: Greg Kroah-Hartman --- drivers/bluetooth/btusb.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 7bb8055bd10c..1ccad79ce77c 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -2969,6 +2969,12 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_QCA_ROME) { data->setup_on_usb = btusb_setup_qca; hdev->set_bdaddr = btusb_set_bdaddr_ath3012; + + /* QCA Rome devices lose their updated firmware over suspend, + * but the USB hub doesn't notice any status change. + * Explicitly request a device reset on resume. + */ + set_bit(BTUSB_RESET_RESUME, &data->flags); } #ifdef CONFIG_BT_HCIBTUSB_RTL From 8d9142ff444e789d8ac34db1db5214a043484f86 Mon Sep 17 00:00:00 2001 From: Adam Wallis Date: Thu, 2 Nov 2017 08:53:30 -0400 Subject: [PATCH 100/312] dmaengine: dmatest: warn user when dma test times out commit a9df21e34b422f79d9a9fa5c3eff8c2a53491be6 upstream. Commit adfa543e7314 ("dmatest: don't use set_freezable_with_signal()") introduced a bug (that is in fact documented by the patch commit text) that leaves behind a dangling pointer. Since the done_wait structure is allocated on the stack, future invocations to the DMATEST can produce undesirable results (e.g., corrupted spinlocks). Ideally, this would be cleaned up in the thread handler, but at the very least, the kernel is left in a very precarious scenario that can lead to some long debug sessions when the crash comes later. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=197605 Signed-off-by: Adam Wallis Signed-off-by: Vinod Koul Signed-off-by: Greg Kroah-Hartman --- drivers/dma/dmatest.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index b8576fd6bd0e..1c7568c0055a 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -634,6 +634,7 @@ static int dmatest_func(void *data) * free it this time?" dancing. For now, just * leave it dangling. */ + WARN(1, "dmatest: Kernel stack may be corrupted!!\n"); dmaengine_unmap_put(um); result("test timed out", total_tests, src_off, dst_off, len, 0); From 2bb04f1ff63db7e331179483c1f79d9d9ac30342 Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Thu, 8 Dec 2016 10:45:31 +0200 Subject: [PATCH 101/312] extcon: palmas: Check the parent instance to prevent the NULL [ Upstream commit 9fe172b9be532acc23e35ba693700383ab775e66 ] extcon-palmas must be child of palmas and expects parent's drvdata to be valid. Check for non NULL parent drvdata and fail if it is NULL. Not doing so will result in a NULL pointer dereference later in the probe() parent drvdata is NULL (e.g. misplaced extcon-palmas node in device tree). Signed-off-by: Roger Quadros Signed-off-by: Chanwoo Choi Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/extcon/extcon-palmas.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c index 93c30a885740..aa2f6bb82b32 100644 --- a/drivers/extcon/extcon-palmas.c +++ b/drivers/extcon/extcon-palmas.c @@ -190,6 +190,11 @@ static int palmas_usb_probe(struct platform_device *pdev) struct palmas_usb *palmas_usb; int status; + if (!palmas) { + dev_err(&pdev->dev, "failed to get valid parent\n"); + return -EINVAL; + } + palmas_usb = devm_kzalloc(&pdev->dev, sizeof(*palmas_usb), GFP_KERNEL); if (!palmas_usb) return -ENOMEM; From 62641014202e6681f488ffa72c2956dd97faa0f8 Mon Sep 17 00:00:00 2001 From: Ngai-Mint Kwan Date: Wed, 2 Nov 2016 16:44:46 -0700 Subject: [PATCH 102/312] fm10k: request reset when mbx->state changes [ Upstream commit 2f3fc1e6200309ccf87f61dea56e57e563c4f800 ] Multiple IES API resets can cause a race condition where the mailbox interrupt request bits can be cleared before being handled. This can leave certain mailbox messages from the PF to be untreated and the PF will enter in some inactive state. If this situation occurs, the IES API will initiate a mailbox version reset which, then, trigger a mailbox state change. Once this mailbox transition occurs (from OPEN to CONNECT state), a request for reset will be returned. This ensures that PF will undergo a reset whenever IES API encounters an unknown global mailbox interrupt event or whenever the IES API terminates. Signed-off-by: Ngai-Mint Kwan Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/fm10k/fm10k_mbx.c | 10 +++++++--- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 6 +++++- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index af09a1b272e6..6a2d1454befe 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -2002,9 +2002,10 @@ static void fm10k_sm_mbx_create_reply(struct fm10k_hw *hw, * function can also be used to respond to an error as the connection * resetting would also be a means of dealing with errors. **/ -static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, - struct fm10k_mbx_info *mbx) +static s32 fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) { + s32 err = 0; const enum fm10k_mbx_state state = mbx->state; switch (state) { @@ -2017,6 +2018,7 @@ static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, case FM10K_STATE_OPEN: /* flush any incomplete work */ fm10k_sm_mbx_connect_reset(mbx); + err = FM10K_ERR_RESET_REQUESTED; break; case FM10K_STATE_CONNECT: /* Update remote value to match local value */ @@ -2026,6 +2028,8 @@ static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, } fm10k_sm_mbx_create_reply(hw, mbx, mbx->tail); + + return err; } /** @@ -2106,7 +2110,7 @@ static s32 fm10k_sm_mbx_process(struct fm10k_hw *hw, switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_VER)) { case 0: - fm10k_sm_mbx_process_reset(hw, mbx); + err = fm10k_sm_mbx_process_reset(hw, mbx); break; case FM10K_SM_MBX_VERSION: err = fm10k_sm_mbx_process_version_1(hw, mbx); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 7f3fb51bc37b..06f35700840b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1072,6 +1072,7 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data) struct fm10k_hw *hw = &interface->hw; struct fm10k_mbx_info *mbx = &hw->mbx; u32 eicr; + s32 err = 0; /* unmask any set bits related to this interrupt */ eicr = fm10k_read_reg(hw, FM10K_EICR); @@ -1087,12 +1088,15 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data) /* service mailboxes */ if (fm10k_mbx_trylock(interface)) { - mbx->ops.process(hw, mbx); + err = mbx->ops.process(hw, mbx); /* handle VFLRE events */ fm10k_iov_event(interface); fm10k_mbx_unlock(interface); } + if (err == FM10K_ERR_RESET_REQUESTED) + interface->flags |= FM10K_FLAG_RESET_REQUESTED; + /* if switch toggled state we should reset GLORTs */ if (eicr & FM10K_EICR_SWITCHNOTREADY) { /* force link down for at least 4 seconds */ From 84a97ea8b7b166f7f80df6a8f794357ccd729888 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Thu, 5 Jan 2017 11:17:30 -0800 Subject: [PATCH 103/312] ARM: dts: Fix compatible for ti81xx uarts for 8250 [ Upstream commit f62280efe8934a1275fd148ef302d1afec8cd3df ] When using 8250_omap driver, we need to specify the right compatible value for the UART to work on dm814x and dm816x. Signed-off-by: Tony Lindgren Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/arm/boot/dts/dm814x.dtsi | 6 +++--- arch/arm/boot/dts/dm816x.dtsi | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi index 7988b42e5764..509437839541 100644 --- a/arch/arm/boot/dts/dm814x.dtsi +++ b/arch/arm/boot/dts/dm814x.dtsi @@ -138,7 +138,7 @@ }; uart1: uart@20000 { - compatible = "ti,omap3-uart"; + compatible = "ti,am3352-uart", "ti,omap3-uart"; ti,hwmods = "uart1"; reg = <0x20000 0x2000>; clock-frequency = <48000000>; @@ -148,7 +148,7 @@ }; uart2: uart@22000 { - compatible = "ti,omap3-uart"; + compatible = "ti,am3352-uart", "ti,omap3-uart"; ti,hwmods = "uart2"; reg = <0x22000 0x2000>; clock-frequency = <48000000>; @@ -158,7 +158,7 @@ }; uart3: uart@24000 { - compatible = "ti,omap3-uart"; + compatible = "ti,am3352-uart", "ti,omap3-uart"; ti,hwmods = "uart3"; reg = <0x24000 0x2000>; clock-frequency = <48000000>; diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi index eee636de4cd8..e526928e6e96 100644 --- a/arch/arm/boot/dts/dm816x.dtsi +++ b/arch/arm/boot/dts/dm816x.dtsi @@ -347,7 +347,7 @@ }; uart1: uart@48020000 { - compatible = "ti,omap3-uart"; + compatible = "ti,am3352-uart", "ti,omap3-uart"; ti,hwmods = "uart1"; reg = <0x48020000 0x2000>; clock-frequency = <48000000>; @@ -357,7 +357,7 @@ }; uart2: uart@48022000 { - compatible = "ti,omap3-uart"; + compatible = "ti,am3352-uart", "ti,omap3-uart"; ti,hwmods = "uart2"; reg = <0x48022000 0x2000>; clock-frequency = <48000000>; @@ -367,7 +367,7 @@ }; uart3: uart@48024000 { - compatible = "ti,omap3-uart"; + compatible = "ti,am3352-uart", "ti,omap3-uart"; ti,hwmods = "uart3"; reg = <0x48024000 0x2000>; clock-frequency = <48000000>; From 4a23041fa5335908acb5a4fef62c1aedfe8ac0b1 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Thu, 5 Jan 2017 11:10:40 -0800 Subject: [PATCH 104/312] ARM: dts: Fix am335x and dm814x scm syscon to probe children [ Upstream commit 1aa09df0854efe16b7a80358a18f0a0bebafd246 ] Without these changes children of the scn syscon won't probe. Signed-off-by: Tony Lindgren Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/arm/boot/dts/am33xx.dtsi | 3 ++- arch/arm/boot/dts/dm814x.dtsi | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index d23e2524d694..be9c37e89be1 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -142,10 +142,11 @@ }; scm_conf: scm_conf@0 { - compatible = "syscon"; + compatible = "syscon", "simple-bus"; reg = <0x0 0x800>; #address-cells = <1>; #size-cells = <1>; + ranges = <0 0 0x800>; scm_clocks: clocks { #address-cells = <1>; diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi index 509437839541..c226c3d952d8 100644 --- a/arch/arm/boot/dts/dm814x.dtsi +++ b/arch/arm/boot/dts/dm814x.dtsi @@ -189,10 +189,11 @@ ranges = <0 0x160000 0x16d000>; scm_conf: scm_conf@0 { - compatible = "syscon"; + compatible = "syscon", "simple-bus"; reg = <0x0 0x800>; #address-cells = <1>; #size-cells = <1>; + ranges = <0 0 0x800>; scm_clocks: clocks { #address-cells = <1>; From 955840ea50355cdd400683b1ee5c3ac0017cc160 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Thu, 5 Jan 2017 11:08:20 -0800 Subject: [PATCH 105/312] ARM: OMAP2+: Fix init for multiple quirks for the same SoC [ Upstream commit 6e613ebf4405fc09e2a8c16ed193b47f80a3cbed ] It's possible that there are multiple quirks that need to be initialized for the same SoC. Fix the issue by not returning on the first match. Signed-off-by: Tony Lindgren Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/arm/mach-omap2/pdata-quirks.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 58144779dec4..1e6e09841707 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -522,7 +522,6 @@ static void pdata_quirks_check(struct pdata_init *quirks) if (of_machine_is_compatible(quirks->compatible)) { if (quirks->fn) quirks->fn(); - break; } quirks++; } From 4fd669feacd371d39b534b6f16414bf377cf0d6e Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Thu, 5 Jan 2017 11:07:18 -0800 Subject: [PATCH 106/312] ARM: dts: Fix omap3 off mode pull defines [ Upstream commit d97556c8012015901a3ce77f46960078139cd79d ] We need to also have OFFPULLUDENABLE bit set to use the off mode pull values. Otherwise the line is pulled down internally if no external pull exists. This is has some documentation at: http://processors.wiki.ti.com/index.php/Optimizing_OMAP35x_and_AM/DM37x_OFF_mode_PAD_configuration Note that the value is still glitchy during off mode transitions as documented in spz319f.pdf "Advisory 1.45". It's best to use external pulls instead of relying on the internal ones for off mode and even then anything pulled up will get driven down momentarily on off mode restore for GPIO banks other than bank1. Signed-off-by: Tony Lindgren Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- include/dt-bindings/pinctrl/omap.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/dt-bindings/pinctrl/omap.h b/include/dt-bindings/pinctrl/omap.h index 13949259705a..0d4fe32b3ae2 100644 --- a/include/dt-bindings/pinctrl/omap.h +++ b/include/dt-bindings/pinctrl/omap.h @@ -45,8 +45,8 @@ #define PIN_OFF_NONE 0 #define PIN_OFF_OUTPUT_HIGH (OFF_EN | OFFOUT_EN | OFFOUT_VAL) #define PIN_OFF_OUTPUT_LOW (OFF_EN | OFFOUT_EN) -#define PIN_OFF_INPUT_PULLUP (OFF_EN | OFF_PULL_EN | OFF_PULL_UP) -#define PIN_OFF_INPUT_PULLDOWN (OFF_EN | OFF_PULL_EN) +#define PIN_OFF_INPUT_PULLUP (OFF_EN | OFFOUT_EN | OFF_PULL_EN | OFF_PULL_UP) +#define PIN_OFF_INPUT_PULLDOWN (OFF_EN | OFFOUT_EN | OFF_PULL_EN) #define PIN_OFF_WAKEUPENABLE WAKEUP_EN /* From 2066882df7491631f16f93dc7da75e9f4497b994 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 3 Jan 2017 19:09:46 +0100 Subject: [PATCH 107/312] ata: ATA_BMDMA should depend on HAS_DMA [ Upstream commit 7bc7ab1e63dfe004931502f90ce7020e375623da ] If NO_DMA=y: ERROR: "dmam_alloc_coherent" [drivers/ata/libata.ko] undefined! Add a dependency on HAS_DMA to fix this. Signed-off-by: Geert Uytterhoeven Signed-off-by: Tejun Heo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/ata/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 6aaa3f81755b..fd19d180f640 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -272,6 +272,7 @@ config SATA_SX4 config ATA_BMDMA bool "ATA BMDMA support" + depends on HAS_DMA default y help This option adds support for SFF ATA controllers with BMDMA From 11cb9dedb2e6386467ac9c958c3d8cb95738e16e Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 3 Jan 2017 19:09:45 +0100 Subject: [PATCH 108/312] ata: SATA_HIGHBANK should depend on HAS_DMA [ Upstream commit 2a736e0585e585c2566b5119af8381910a170e44 ] If NO_DMA=y: ERROR: "bad_dma_ops" [drivers/ata/sata_highbank.ko] undefined! Add a dependency on HAS_DMA to fix this. Signed-off-by: Geert Uytterhoeven Signed-off-by: Tejun Heo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/ata/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index fd19d180f640..c1e40ebaf5e2 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -319,6 +319,7 @@ config SATA_DWC_VDEBUG config SATA_HIGHBANK tristate "Calxeda Highbank SATA support" + depends on HAS_DMA depends on ARCH_HIGHBANK || COMPILE_TEST help This option enables support for the Calxeda Highbank SoC's From b9ea0af4793b4fce9be0f07dd0cf9ea7c94c1049 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 3 Jan 2017 19:09:44 +0100 Subject: [PATCH 109/312] ata: SATA_MV should depend on HAS_DMA [ Upstream commit 62989cebd367a1aae1e009e1a5b1ec046a4c8fdc ] If NO_DMA=y: ERROR: "dma_pool_alloc" [drivers/ata/sata_mv.ko] undefined! ERROR: "dmam_pool_create" [drivers/ata/sata_mv.ko] undefined! ERROR: "dma_pool_free" [drivers/ata/sata_mv.ko] undefined! Add a dependency on HAS_DMA to fix this. Signed-off-by: Geert Uytterhoeven Signed-off-by: Tejun Heo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/ata/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index c1e40ebaf5e2..c2ba811993d4 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -329,6 +329,7 @@ config SATA_HIGHBANK config SATA_MV tristate "Marvell SATA support" + depends on HAS_DMA depends on PCI || ARCH_DOVE || ARCH_MV78XX0 || \ ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST select GENERIC_PHY From 7878dca7ca24f7186cbf2e4635ad74fee0d2ce7f Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 21 Dec 2016 11:00:12 +0530 Subject: [PATCH 110/312] drm/sti: sti_vtg: Handle return NULL error from devm_ioremap_nocache [ Upstream commit 1ae0d5af347df224a6e76334683f13a96d915a44 ] Here, If devm_ioremap_nocache will fail. It will return NULL. Kernel can run into a NULL-pointer dereference. This error check will avoid NULL pointer dereference. Signed-off-by: Arvind Yadav Acked-by: Vincent Abriou Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/sti/sti_vtg.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c index d56630c60039..117a2f52fb4e 100644 --- a/drivers/gpu/drm/sti/sti_vtg.c +++ b/drivers/gpu/drm/sti/sti_vtg.c @@ -346,6 +346,10 @@ static int vtg_probe(struct platform_device *pdev) return -ENOMEM; } vtg->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); + if (!vtg->regs) { + DRM_ERROR("failed to remap I/O memory\n"); + return -ENOMEM; + } np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0); if (np) { From 559a208028387ffb230f502f8137c278b311803b Mon Sep 17 00:00:00 2001 From: Aaron Sierra Date: Tue, 29 Nov 2016 10:03:56 -0600 Subject: [PATCH 111/312] igb: reset the PHY before reading the PHY ID [ Upstream commit 182785335447957409282ca745aa5bc3968facee ] Several people have reported firmware leaving the I210/I211 PHY's page select register set to something other than the default of zero. This causes the first accesses, PHY_IDx register reads, to access something else, resulting in device probe failure: igb: Intel(R) Gigabit Ethernet Network Driver - version 5.4.0-k igb: Copyright (c) 2007-2014 Intel Corporation. igb: probe of 0000:01:00.0 failed with error -2 This problem began for them after a previous patch I submitted was applied: commit 2a3cdead8b408351fa1e3079b220fa331480ffbc Author: Aaron Sierra Date: Tue Nov 3 12:37:09 2015 -0600 igb: Remove GS40G specific defines/functions I personally experienced this problem after attempting to PXE boot from I210 devices using this firmware: Intel(R) Boot Agent GE v1.5.78 Copyright (C) 1997-2014, Intel Corporation Resetting the PHY before reading from it, ensures the page select register is in its default state and doesn't make assumptions about the PHY's register set before the PHY has been probed. Cc: Matwey V. Kornilov Cc: Chris Arges Cc: Jochen Henneberg Signed-off-by: Aaron Sierra Tested-by: Matwey V. Kornilov Tested-by: Chris J Arges Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/igb/e1000_82575.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 97bf0c3d5c69..f3f3b95d5512 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -223,6 +223,17 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; + /* Make sure the PHY is in a good state. Several people have reported + * firmware leaving the PHY's page select register set to something + * other than the default of zero, which causes the PHY ID read to + * access something other than the intended register. + */ + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + /* Set phy->phy_addr and phy->id. */ ret_val = igb_get_phy_id_82575(hw); if (ret_val) From ba83011a470cc1246d24a41ca2fe6047799f27e7 Mon Sep 17 00:00:00 2001 From: Todd Fujinaka Date: Tue, 15 Nov 2016 08:54:26 -0800 Subject: [PATCH 112/312] igb: close/suspend race in netif_device_detach [ Upstream commit 9474933caf21a4cb5147223dca1551f527aaac36 ] Similar to ixgbe, when an interface is part of a namespace it is possible that igb_close() may be called while __igb_shutdown() is running which ends up in a double free WARN and/or a BUG in free_msi_irqs(). Extend the rtnl_lock() to protect the call to netif_device_detach() and igb_clear_interrupt_scheme() in __igb_shutdown() and check for netif_device_present() to avoid calling igb_clear_interrupt_scheme() a second time in igb_close(). Also extend the rtnl lock in igb_resume() to netif_device_attach(). Signed-off-by: Todd Fujinaka Acked-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/igb/igb_main.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index a481ea64e287..ff6e57d788eb 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3172,7 +3172,9 @@ static int __igb_close(struct net_device *netdev, bool suspending) static int igb_close(struct net_device *netdev) { - return __igb_close(netdev, false); + if (netif_device_present(netdev)) + return __igb_close(netdev, false); + return 0; } /** @@ -7325,12 +7327,14 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, int retval = 0; #endif + rtnl_lock(); netif_device_detach(netdev); if (netif_running(netdev)) __igb_close(netdev, true); igb_clear_interrupt_scheme(adapter); + rtnl_unlock(); #ifdef CONFIG_PM retval = pci_save_state(pdev); @@ -7450,16 +7454,15 @@ static int igb_resume(struct device *dev) wr32(E1000_WUS, ~0); - if (netdev->flags & IFF_UP) { - rtnl_lock(); + rtnl_lock(); + if (!err && netif_running(netdev)) err = __igb_open(netdev, true); - rtnl_unlock(); - if (err) - return err; - } - netif_device_attach(netdev); - return 0; + if (!err) + netif_device_attach(netdev); + rtnl_unlock(); + + return err; } static int igb_runtime_idle(struct device *dev) From bb848b61967f922410421a0c606cded8974dc20d Mon Sep 17 00:00:00 2001 From: Hannu Lounento Date: Mon, 2 Jan 2017 18:26:06 +0100 Subject: [PATCH 113/312] igb: Fix hw_dbg logging in igb_update_flash_i210 [ Upstream commit 76ed5a8f47476e4984cc8c0c1bc4cee62650f7fd ] Fix an if statement with hw_dbg lines where the logic was inverted with regards to the corresponding return value used in the if statement. Signed-off-by: Hannu Lounento Signed-off-by: Peter Senna Tschudin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/igb/e1000_i210.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c index 29f59c76878a..851225b5dc0f 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -699,9 +699,9 @@ static s32 igb_update_flash_i210(struct e1000_hw *hw) ret_val = igb_pool_flash_update_done_i210(hw); if (ret_val) - hw_dbg("Flash update complete\n"); - else hw_dbg("Flash update time out\n"); + else + hw_dbg("Flash update complete\n"); out: return ret_val; From 469e75ddff14da449a090e0baffcbb4dfc0ac963 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Mon, 2 Jan 2017 11:04:58 -0300 Subject: [PATCH 114/312] scsi: ufs-qcom: Fix module autoload [ Upstream commit ab3dabb3e8cf077850f20610f73a0def1fed10cb ] If the driver is built as a module, autoload won't work because the module alias information is not filled. So user-space can't match the registered device with the corresponding module. Export the module alias information using the MODULE_DEVICE_TABLE() macro. Before this patch: $ modinfo drivers/scsi/ufs/ufs-qcom.ko | grep alias $ After this patch: $ modinfo drivers/scsi/ufs/ufs-qcom.ko | grep alias alias: of:N*T*Cqcom,ufshcC* alias: of:N*T*Cqcom,ufshc Signed-off-by: Javier Martinez Canillas Reviewed-by: Subhash Jadavani Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/ufs/ufs-qcom.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index 4f38d008bfb4..4b82c3765e01 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -1552,6 +1552,7 @@ static const struct of_device_id ufs_qcom_of_match[] = { { .compatible = "qcom,ufshc"}, {}, }; +MODULE_DEVICE_TABLE(of, ufs_qcom_of_match); static const struct dev_pm_ops ufs_qcom_pm_ops = { .suspend = ufshcd_pltfrm_suspend, From 0c098158785b5c8091c0bae3aa505060414076cc Mon Sep 17 00:00:00 2001 From: "subhashj@codeaurora.org" Date: Thu, 22 Dec 2016 18:41:22 -0800 Subject: [PATCH 115/312] scsi: ufs: add capability to keep auto bkops always enabled [ Upstream commit 4e768e7645ec4ffa92ee163643777b261ae97142 ] UFS device requires to perform bkops (back ground operations) periodically but host can control (via auto-bkops parameter of device) when device can perform bkops based on its performance requirements. In general, host would like to enable the device's auto-bkops only when it's not doing any regular data transfer but sometimes device may not behave properly if host keeps the auto-bkops disabled. This change adds the capability to let the device auto-bkops always enabled except suspend. Reviewed-by: Sahitya Tummala Signed-off-by: Subhash Jadavani Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/ufs/ufshcd.c | 33 ++++++++++++++++++++++----------- drivers/scsi/ufs/ufshcd.h | 13 +++++++++++++ 2 files changed, 35 insertions(+), 11 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 85cd2564c157..0c2482ec7d21 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -3340,18 +3340,25 @@ out: } /** - * ufshcd_force_reset_auto_bkops - force enable of auto bkops + * ufshcd_force_reset_auto_bkops - force reset auto bkops state * @hba: per adapter instance * * After a device reset the device may toggle the BKOPS_EN flag * to default value. The s/w tracking variables should be updated - * as well. Do this by forcing enable of auto bkops. + * as well. This function would change the auto-bkops state based on + * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND. */ -static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) +static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) { - hba->auto_bkops_enabled = false; - hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS; - ufshcd_enable_auto_bkops(hba); + if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) { + hba->auto_bkops_enabled = false; + hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS; + ufshcd_enable_auto_bkops(hba); + } else { + hba->auto_bkops_enabled = true; + hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS; + ufshcd_disable_auto_bkops(hba); + } } static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) @@ -5149,11 +5156,15 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) goto set_old_link_state; } - /* - * If BKOPs operations are urgently needed at this moment then - * keep auto-bkops enabled or else disable it. - */ - ufshcd_urgent_bkops(hba); + if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) + ufshcd_enable_auto_bkops(hba); + else + /* + * If BKOPs operations are urgently needed at this moment then + * keep auto-bkops enabled or else disable it. + */ + ufshcd_urgent_bkops(hba); + hba->clk_gating.is_suspended = false; if (ufshcd_is_clkscaling_enabled(hba)) diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 2570d9477b37..bb02100ab2dc 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -528,6 +528,14 @@ struct ufs_hba { * CAUTION: Enabling this might reduce overall UFS throughput. */ #define UFSHCD_CAP_INTR_AGGR (1 << 4) + /* + * This capability allows the device auto-bkops to be always enabled + * except during suspend (both runtime and suspend). + * Enabling this capability means that device will always be allowed + * to do background operation when it's active but it might degrade + * the performance of ongoing read/write operations. + */ +#define UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND (1 << 5) struct devfreq *devfreq; struct ufs_clk_scaling clk_scaling; @@ -623,6 +631,11 @@ static inline void *ufshcd_get_variant(struct ufs_hba *hba) BUG_ON(!hba); return hba->priv; } +static inline bool ufshcd_keep_autobkops_enabled_except_suspend( + struct ufs_hba *hba) +{ + return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND; +} extern int ufshcd_runtime_suspend(struct ufs_hba *hba); extern int ufshcd_runtime_resume(struct ufs_hba *hba); From fdc1e9d553e1e4556b0697d3a65aab7091058db6 Mon Sep 17 00:00:00 2001 From: Galo Navarro Date: Tue, 3 Jan 2017 23:12:09 +0100 Subject: [PATCH 116/312] staging: rtl8188eu: fix incorrect ERROR tags from logs [ Upstream commit 401579c22ccbcb54244494069973e64b1fe980d2 ] Several lifecycle events in the rtl8188eu driver are logged using the DBG_88E_LEVEL macro from rtw_debug.h, which is tagged as ERROR regardless of the actual level. Below are dmesg excerpts after loading and unloading the module, the messages are misleading as there was no error. [517434.916239] usbcore: registered new interface driver r8188eu [517435.680653] R8188EU: ERROR indicate disassoc [517437.122606] R8188EU: ERROR assoc success [517797.735611] usbcore: deregistering interface driver r8188eu [517797.736069] R8188EU: ERROR indicate disassoc Remove the ERROR prefix from the logs. After the patch, logs are: [517949.873976] usbcore: registered new interface driver r8188eu [517950.592845] R8188EU: indicate disassoc [517951.993973] R8188EU: assoc success [521778.784448] usbcore: deregistering interface driver r8188eu [521778.784838] R8188EU: indicate disassoc Signed-off-by: Galo Navarro Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/staging/rtl8188eu/include/rtw_debug.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/rtl8188eu/include/rtw_debug.h b/drivers/staging/rtl8188eu/include/rtw_debug.h index 971bf457f32d..e75a386344e4 100644 --- a/drivers/staging/rtl8188eu/include/rtw_debug.h +++ b/drivers/staging/rtl8188eu/include/rtw_debug.h @@ -75,7 +75,7 @@ extern u32 GlobalDebugLevel; #define DBG_88E_LEVEL(_level, fmt, arg...) \ do { \ if (_level <= GlobalDebugLevel) \ - pr_info(DRIVER_PREFIX"ERROR " fmt, ##arg); \ + pr_info(DRIVER_PREFIX fmt, ##arg); \ } while (0) #define DBG_88E(...) \ From 18477baf599e5d2bb98a67e720054bec1844ff91 Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 19 Dec 2016 15:07:30 -0800 Subject: [PATCH 117/312] scsi: lpfc: Add missing memory barrier [ Upstream commit 6b3b3bdb83b4ad51252d21bb13596db879e51850 ] On loosely ordered memory systems (PPC for example), the WQE elements were being updated in memory, but not necessarily flushed before the separate doorbell was written to hw which would cause hw to dma the WQE element. Thus, the hardware occasionally received partially updated WQE data. Add the memory barrier after updating the WQE memory. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/lpfc/lpfc_sli.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 38e90d9c2ced..abba562cf145 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -118,6 +118,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); + /* ensure WQE bcopy flushed before doorbell write */ + wmb(); /* Update the host index before invoking device */ host_index = q->host_index; From a7e7d319cc963e6d1701490be9a5d5b938e2fd00 Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 19 Dec 2016 15:07:25 -0800 Subject: [PATCH 118/312] scsi: lpfc: FCoE VPort enable-disable does not bring up the VPort [ Upstream commit 104450eb08ca662e6b1d02da11aca9598e978f3e ] FCoE VPort enable-disable does not bring up the VPort. VPI structure needed to be initialized before being re-registered. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/lpfc/lpfc_vport.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 769012663a8f..861c57bc4520 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -528,6 +528,12 @@ enable_vport(struct fc_vport *fc_vport) spin_lock_irq(shost->host_lock); vport->load_flag |= FC_LOADING; + if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) { + spin_unlock_irq(shost->host_lock); + lpfc_issue_init_vpi(vport); + goto out; + } + vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; spin_unlock_irq(shost->host_lock); @@ -548,6 +554,8 @@ enable_vport(struct fc_vport *fc_vport) } else { lpfc_vport_set_state(vport, FC_VPORT_FAILED); } + +out: lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, "1827 Vport Enabled.\n"); return VPORT_OK; From 44eb947094c4c0bf6fd805127a72f20662f6bd8b Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 19 Dec 2016 15:07:24 -0800 Subject: [PATCH 119/312] scsi: lpfc: Correct host name in symbolic_name field [ Upstream commit 6c9231f604c2575be24c96d38deb70f145172f92 ] Correct host name in symbolic_name field of nameserver registrations Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/lpfc/lpfc_attr.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index f6446d759d7f..4639dac64e7f 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -5147,6 +5147,19 @@ lpfc_free_sysfs_attr(struct lpfc_vport *vport) * Dynamic FC Host Attributes Support */ +/** + * lpfc_get_host_symbolic_name - Copy symbolic name into the scsi host + * @shost: kernel scsi host pointer. + **/ +static void +lpfc_get_host_symbolic_name(struct Scsi_Host *shost) +{ + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + + lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), + sizeof fc_host_symbolic_name(shost)); +} + /** * lpfc_get_host_port_id - Copy the vport DID into the scsi host port id * @shost: kernel scsi host pointer. @@ -5684,6 +5697,8 @@ struct fc_function_template lpfc_transport_functions = { .show_host_supported_fc4s = 1, .show_host_supported_speeds = 1, .show_host_maxframe_size = 1, + + .get_host_symbolic_name = lpfc_get_host_symbolic_name, .show_host_symbolic_name = 1, /* dynamic attributes the driver supports */ @@ -5751,6 +5766,8 @@ struct fc_function_template lpfc_vport_transport_functions = { .show_host_supported_fc4s = 1, .show_host_supported_speeds = 1, .show_host_maxframe_size = 1, + + .get_host_symbolic_name = lpfc_get_host_symbolic_name, .show_host_symbolic_name = 1, /* dynamic attributes the driver supports */ From 8d8723c8c1879c3233f85c855922db22a965e597 Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 19 Dec 2016 15:07:23 -0800 Subject: [PATCH 120/312] scsi: lpfc: Correct issue leading to oops during link reset [ Upstream commit e6c6acc0e0223ddaf867628d420ee196349c6fae ] Correct issue leading to oops during link reset. Missing vport pointer. [mkp: fixed typo] Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/lpfc/lpfc_sli.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index abba562cf145..8379fbbc60db 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -9807,6 +9807,7 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, iabt->ulpCommand = CMD_CLOSE_XRI_CN; abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; + abtsiocbp->vport = vport; lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, "0339 Abort xri x%x, original iotag x%x, " From 51abb2a73f16e884a5bf284acb6bd59d72e054a5 Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 19 Dec 2016 15:07:20 -0800 Subject: [PATCH 121/312] scsi: lpfc: Clear the VendorVersion in the PLOGI/PLOGI ACC payload [ Upstream commit e0165f20447c8ca1d367725ee94d8ec9f38ca275 ] Clear the VendorVersion in the PLOGI/PLOGI ACC payload Vendor version info may have been set on fabric login. Before sending PLOGI payloads, ensure that it's cleared. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/lpfc/lpfc_els.c | 6 ++++++ drivers/scsi/lpfc/lpfc_hw.h | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index c74f74ab981c..d278362448ca 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1982,6 +1982,9 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) if (sp->cmn.fcphHigh < FC_PH3) sp->cmn.fcphHigh = FC_PH3; + sp->cmn.valid_vendor_ver_level = 0; + memset(sp->vendorVersion, 0, sizeof(sp->vendorVersion)); + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue PLOGI: did:x%x", did, 0, 0); @@ -3966,6 +3969,9 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, } else { memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); + + sp->cmn.valid_vendor_ver_level = 0; + memset(sp->vendorVersion, 0, sizeof(sp->vendorVersion)); } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 2cce88e967ce..a8ad97300177 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -360,6 +360,12 @@ struct csp { * Word 1 Bit 30 in PLOGI request is random offset */ #define virtual_fabric_support randomOffset /* Word 1, bit 30 */ +/* + * Word 1 Bit 29 in common service parameter is overloaded. + * Word 1 Bit 29 in FLOGI response is multiple NPort assignment + * Word 1 Bit 29 in FLOGI/PLOGI request is Valid Vendor Version Level + */ +#define valid_vendor_ver_level response_multiple_NPort /* Word 1, bit 29 */ #ifdef __BIG_ENDIAN_BITFIELD uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */ uint16_t randomOffset:1; /* FC Word 1, bit 30 */ From 3b985d39ed4f1a6953c83b62221d2e7cf3509f31 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 4 Jan 2017 12:34:14 +0100 Subject: [PATCH 122/312] ALSA: vx: Don't try to update capture stream before running [ Upstream commit ed3c177d960bb5881b945ca6f784868126bb90db ] The update of stream costs significantly, and we should avoid it unless the stream really has started. Check pipe->running flag instead of pipe->prepared. Signed-off-by: Takashi Iwai Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- sound/drivers/vx/vx_pcm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/drivers/vx/vx_pcm.c b/sound/drivers/vx/vx_pcm.c index 11467272089e..69f252585780 100644 --- a/sound/drivers/vx/vx_pcm.c +++ b/sound/drivers/vx/vx_pcm.c @@ -1015,7 +1015,7 @@ static void vx_pcm_capture_update(struct vx_core *chip, struct snd_pcm_substream int size, space, count; struct snd_pcm_runtime *runtime = subs->runtime; - if (! pipe->prepared || (chip->chip_status & VX_STAT_IS_STALE)) + if (!pipe->running || (chip->chip_status & VX_STAT_IS_STALE)) return; size = runtime->buffer_size - snd_pcm_capture_avail(runtime); From fe21a3d688196522166686d628e2d8b3e22626e1 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 4 Jan 2017 12:19:15 +0100 Subject: [PATCH 123/312] ALSA: vx: Fix possible transfer overflow [ Upstream commit 874e1f6fad9a5184b67f4cee37c1335cd2cc5677 ] The pseudo DMA transfer codes in VX222 and VX-pocket driver have a slight bug where they check the buffer boundary wrongly, and may overflow. Also, the zero sample count might be handled badly for the playback (although it shouldn't happen in theory). This patch addresses these issues. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=141541 Signed-off-by: Takashi Iwai Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- sound/drivers/vx/vx_pcm.c | 6 ++++-- sound/pci/vx222/vx222_ops.c | 12 ++++++------ sound/pcmcia/vx/vxp_ops.c | 12 ++++++------ 3 files changed, 16 insertions(+), 14 deletions(-) diff --git a/sound/drivers/vx/vx_pcm.c b/sound/drivers/vx/vx_pcm.c index 69f252585780..ea7b377f0378 100644 --- a/sound/drivers/vx/vx_pcm.c +++ b/sound/drivers/vx/vx_pcm.c @@ -1048,8 +1048,10 @@ static void vx_pcm_capture_update(struct vx_core *chip, struct snd_pcm_substream /* ok, let's accelerate! */ int align = pipe->align * 3; space = (count / align) * align; - vx_pseudo_dma_read(chip, runtime, pipe, space); - count -= space; + if (space > 0) { + vx_pseudo_dma_read(chip, runtime, pipe, space); + count -= space; + } } /* read the rest of bytes */ while (count > 0) { diff --git a/sound/pci/vx222/vx222_ops.c b/sound/pci/vx222/vx222_ops.c index af83b3b38052..8e457ea27f89 100644 --- a/sound/pci/vx222/vx222_ops.c +++ b/sound/pci/vx222/vx222_ops.c @@ -269,12 +269,12 @@ static void vx2_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime, /* Transfer using pseudo-dma. */ - if (offset + count > pipe->buffer_bytes) { + if (offset + count >= pipe->buffer_bytes) { int length = pipe->buffer_bytes - offset; count -= length; length >>= 2; /* in 32bit words */ /* Transfer using pseudo-dma. */ - while (length-- > 0) { + for (; length > 0; length--) { outl(cpu_to_le32(*addr), port); addr++; } @@ -284,7 +284,7 @@ static void vx2_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime, pipe->hw_ptr += count; count >>= 2; /* in 32bit words */ /* Transfer using pseudo-dma. */ - while (count-- > 0) { + for (; count > 0; count--) { outl(cpu_to_le32(*addr), port); addr++; } @@ -307,12 +307,12 @@ static void vx2_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime, vx2_setup_pseudo_dma(chip, 0); /* Transfer using pseudo-dma. */ - if (offset + count > pipe->buffer_bytes) { + if (offset + count >= pipe->buffer_bytes) { int length = pipe->buffer_bytes - offset; count -= length; length >>= 2; /* in 32bit words */ /* Transfer using pseudo-dma. */ - while (length-- > 0) + for (; length > 0; length--) *addr++ = le32_to_cpu(inl(port)); addr = (u32 *)runtime->dma_area; pipe->hw_ptr = 0; @@ -320,7 +320,7 @@ static void vx2_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime, pipe->hw_ptr += count; count >>= 2; /* in 32bit words */ /* Transfer using pseudo-dma. */ - while (count-- > 0) + for (; count > 0; count--) *addr++ = le32_to_cpu(inl(port)); vx2_release_pseudo_dma(chip); diff --git a/sound/pcmcia/vx/vxp_ops.c b/sound/pcmcia/vx/vxp_ops.c index 281972913c32..56aa1ba73ccc 100644 --- a/sound/pcmcia/vx/vxp_ops.c +++ b/sound/pcmcia/vx/vxp_ops.c @@ -369,12 +369,12 @@ static void vxp_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime, unsigned short *addr = (unsigned short *)(runtime->dma_area + offset); vx_setup_pseudo_dma(chip, 1); - if (offset + count > pipe->buffer_bytes) { + if (offset + count >= pipe->buffer_bytes) { int length = pipe->buffer_bytes - offset; count -= length; length >>= 1; /* in 16bit words */ /* Transfer using pseudo-dma. */ - while (length-- > 0) { + for (; length > 0; length--) { outw(cpu_to_le16(*addr), port); addr++; } @@ -384,7 +384,7 @@ static void vxp_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime, pipe->hw_ptr += count; count >>= 1; /* in 16bit words */ /* Transfer using pseudo-dma. */ - while (count-- > 0) { + for (; count > 0; count--) { outw(cpu_to_le16(*addr), port); addr++; } @@ -411,12 +411,12 @@ static void vxp_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime, if (snd_BUG_ON(count % 2)) return; vx_setup_pseudo_dma(chip, 0); - if (offset + count > pipe->buffer_bytes) { + if (offset + count >= pipe->buffer_bytes) { int length = pipe->buffer_bytes - offset; count -= length; length >>= 1; /* in 16bit words */ /* Transfer using pseudo-dma. */ - while (length-- > 0) + for (; length > 0; length--) *addr++ = le16_to_cpu(inw(port)); addr = (unsigned short *)runtime->dma_area; pipe->hw_ptr = 0; @@ -424,7 +424,7 @@ static void vxp_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime, pipe->hw_ptr += count; count >>= 1; /* in 16bit words */ /* Transfer using pseudo-dma. */ - while (count-- > 1) + for (; count > 1; count--) *addr++ = le16_to_cpu(inw(port)); /* Disable DMA */ pchip->regDIALOG &= ~VXP_DLG_DMAREAD_SEL_MASK; From d6f21ea251182bfee9aad94eed8e242dcdd54d79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Wed, 6 Jul 2016 19:33:05 +0200 Subject: [PATCH 124/312] backlight: lcd: Fix race condition during register MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit cc21942bce652d1a92dae85b785378256e1df1f7 ] Once device_register is called for a device its attributes might be accessed. As the callbacks of a lcd device's attributes make use of the lcd_ops, the respective member must be setup before calling device_register. Signed-off-by: Uwe Kleine-König Signed-off-by: Lee Jones Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/video/backlight/lcd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c index 7de847df224f..4b40c6a4d441 100644 --- a/drivers/video/backlight/lcd.c +++ b/drivers/video/backlight/lcd.c @@ -226,6 +226,8 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent, dev_set_name(&new_ld->dev, "%s", name); dev_set_drvdata(&new_ld->dev, devdata); + new_ld->ops = ops; + rc = device_register(&new_ld->dev); if (rc) { put_device(&new_ld->dev); @@ -238,8 +240,6 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent, return ERR_PTR(rc); } - new_ld->ops = ops; - return new_ld; } EXPORT_SYMBOL(lcd_device_register); From 3e899991b99e0948b1922645945ce67edf758bfe Mon Sep 17 00:00:00 2001 From: Alexey Khoroshilov Date: Sat, 9 Jul 2016 01:19:51 +0300 Subject: [PATCH 125/312] backlight: adp5520: Fix error handling in adp5520_bl_probe() [ Upstream commit 0eb3fba8c68275f0122f65f7316efaaf86448016 ] If adp5520_bl_setup() fails, sysfs group left unremoved. By the way, fix overcomplicated assignement of error code. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Alexey Khoroshilov Acked-by: Michael Hennerich Signed-off-by: Lee Jones Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/video/backlight/adp5520_bl.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c index dd88ba1d71ce..35373e2065b2 100644 --- a/drivers/video/backlight/adp5520_bl.c +++ b/drivers/video/backlight/adp5520_bl.c @@ -332,10 +332,18 @@ static int adp5520_bl_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, bl); - ret |= adp5520_bl_setup(bl); + ret = adp5520_bl_setup(bl); + if (ret) { + dev_err(&pdev->dev, "failed to setup\n"); + if (data->pdata->en_ambl_sens) + sysfs_remove_group(&bl->dev.kobj, + &adp5520_bl_attr_group); + return ret; + } + backlight_update_status(bl); - return ret; + return 0; } static int adp5520_bl_remove(struct platform_device *pdev) From b1e8e6d4c06547ad6f1864eefef70ce8e6dbb463 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 3 Jan 2017 17:00:27 +0530 Subject: [PATCH 126/312] gpu: drm: mgag200: mgag200_main:- Handle error from pci_iomap [ Upstream commit 4b0ea93f250afc6c1128e201b0a8a115ae613e47 ] Here, pci_iomap can fail, handle this case and return -ENOMEM. Signed-off-by: Arvind Yadav Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1483443027-13444-1-git-send-email-arvind.yadav.cs@gmail.com Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/mgag200/mgag200_main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c index b1a0f5656175..44df959cbadb 100644 --- a/drivers/gpu/drm/mgag200/mgag200_main.c +++ b/drivers/gpu/drm/mgag200/mgag200_main.c @@ -145,6 +145,8 @@ static int mga_vram_init(struct mga_device *mdev) } mem = pci_iomap(mdev->dev->pdev, 0, 0); + if (!mem) + return -ENOMEM; mdev->mc.vram_size = mga_probe_vram(mdev, mem); From e2d12bdaed6b7cee739583490325ed7a236a82d9 Mon Sep 17 00:00:00 2001 From: Kailang Yang Date: Wed, 4 Jan 2017 14:49:07 +0800 Subject: [PATCH 127/312] ALSA: hda/realtek - Add new codec ID ALC299 [ Upstream commit 28f1f9b26cee161ddd3985b3eb78e3ffada08dda ] ALC299 was similar as ALC225. Add headset support for ALC299. ALC3271 was for Dell rename. Signed-off-by: Kailang Yang Signed-off-by: Takashi Iwai Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index a83688f8672e..af0962307b7f 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -338,6 +338,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0288: case 0x10ec0295: case 0x10ec0298: + case 0x10ec0299: alc_update_coef_idx(codec, 0x10, 1<<9, 0); break; case 0x10ec0285: @@ -914,6 +915,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = { { 0x10ec0256, 0x1028, 0, "ALC3246" }, { 0x10ec0225, 0x1028, 0, "ALC3253" }, { 0x10ec0295, 0x1028, 0, "ALC3254" }, + { 0x10ec0299, 0x1028, 0, "ALC3271" }, { 0x10ec0670, 0x1025, 0, "ALC669X" }, { 0x10ec0676, 0x1025, 0, "ALC679X" }, { 0x10ec0282, 0x1043, 0, "ALC3229" }, @@ -3721,6 +3723,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec) break; case 0x10ec0225: case 0x10ec0295: + case 0x10ec0299: alc_process_coef_fw(codec, coef0225); break; } @@ -3823,6 +3826,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin, break; case 0x10ec0225: case 0x10ec0295: + case 0x10ec0299: alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x31<<10); snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); alc_process_coef_fw(codec, coef0225); @@ -3881,6 +3885,7 @@ static void alc_headset_mode_default(struct hda_codec *codec) switch (codec->core.vendor_id) { case 0x10ec0225: case 0x10ec0295: + case 0x10ec0299: alc_process_coef_fw(codec, coef0225); break; case 0x10ec0236: @@ -3995,6 +4000,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec) break; case 0x10ec0225: case 0x10ec0295: + case 0x10ec0299: alc_process_coef_fw(codec, coef0225); break; } @@ -4086,6 +4092,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec) break; case 0x10ec0225: case 0x10ec0295: + case 0x10ec0299: alc_process_coef_fw(codec, coef0225); break; } @@ -4171,6 +4178,7 @@ static void alc_determine_headset_type(struct hda_codec *codec) break; case 0x10ec0225: case 0x10ec0295: + case 0x10ec0299: alc_process_coef_fw(codec, coef0225); msleep(800); val = alc_read_coef_idx(codec, 0x46); @@ -6233,6 +6241,7 @@ static int patch_alc269(struct hda_codec *codec) break; case 0x10ec0225: case 0x10ec0295: + case 0x10ec0299: spec->codec_variant = ALC269_TYPE_ALC225; break; case 0x10ec0234: @@ -7191,6 +7200,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = { HDA_CODEC_ENTRY(0x10ec0294, "ALC294", patch_alc269), HDA_CODEC_ENTRY(0x10ec0295, "ALC295", patch_alc269), HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269), + HDA_CODEC_ENTRY(0x10ec0299, "ALC299", patch_alc269), HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861), HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd), HDA_CODEC_ENTRY(0x10ec0861, "ALC861", patch_alc861), From 865fe71c0a060ff8e0d79e1738fea45d5cf750f9 Mon Sep 17 00:00:00 2001 From: Jon Mason Date: Mon, 5 Dec 2016 18:12:21 -0500 Subject: [PATCH 128/312] arm64: dts: NS2: reserve memory for Nitro firmware [ Upstream commit 0cc878d678444392ca2a31350f89f489593ef5bb ] Nitro firmware is loaded into memory by the bootloader at a specific location. Set this memory range aside to prevent the kernel from using it. Signed-off-by: Jon Mason Signed-off-by: Florian Fainelli Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/arm64/boot/dts/broadcom/ns2.dtsi | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi index 3c92d92278e5..a14a6bb31887 100644 --- a/arch/arm64/boot/dts/broadcom/ns2.dtsi +++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi @@ -30,6 +30,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +/memreserve/ 0x81000000 0x00200000; + #include /memreserve/ 0x84b00000 0x00000008; From f12976ce82cd5ca2a95eb65b891412014a900398 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Wed, 16 Nov 2016 09:48:02 -0800 Subject: [PATCH 129/312] ixgbe: fix AER error handling [ Upstream commit 126db13fa0e6d05c9f94e0125f61e773bd5ab079 ] Make sure that we free the IRQs in ixgbe_io_error_detected() when responding to an PCIe AER error and also restore them when the interface recovers from it. Previously it was possible to trigger BUG_ON() check in free_msix_irqs() in the case where we call ixgbe_remove() after a failed recovery from AER error because the interrupts were not freed. Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index cd9b284bc83b..13723fa32efc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9221,7 +9221,7 @@ skip_bad_vf_detection: } if (netif_running(netdev)) - ixgbe_down(adapter); + ixgbe_close_suspend(adapter); if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) pci_disable_device(pdev); @@ -9291,10 +9291,12 @@ static void ixgbe_io_resume(struct pci_dev *pdev) } #endif + rtnl_lock(); if (netif_running(netdev)) - ixgbe_up(adapter); + ixgbe_open(netdev); netif_device_attach(netdev); + rtnl_unlock(); } static const struct pci_error_handlers ixgbe_err_handler = { From 9d51db4caff961763f49577962730b2f2be18436 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Fri, 11 Nov 2016 10:07:47 -0800 Subject: [PATCH 130/312] ixgbe: handle close/suspend race with netif_device_detach/present [ Upstream commit f7f37e7ff2b9b7eff7fbd035569cab35896869a3 ] When an interface is part of a namespace it is possible that ixgbe_close() may be called while __ixgbe_shutdown() is running which ends up in a double free WARN and/or a BUG in free_msi_irqs(). To handle this situation we extend the rtnl_lock() to protect the call to netif_device_detach() and ixgbe_clear_interrupt_scheme() in __ixgbe_shutdown() and check for netif_device_present() to avoid clearing the interrupts second time in ixgbe_close(); Also extend the rtnl lock in ixgbe_resume() to netif_device_attach(). Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 13723fa32efc..83645d8503d4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -5878,7 +5878,8 @@ static int ixgbe_close(struct net_device *netdev) ixgbe_ptp_stop(adapter); - ixgbe_close_suspend(adapter); + if (netif_device_present(netdev)) + ixgbe_close_suspend(adapter); ixgbe_fdir_filter_exit(adapter); @@ -5923,14 +5924,12 @@ static int ixgbe_resume(struct pci_dev *pdev) if (!err && netif_running(netdev)) err = ixgbe_open(netdev); + + if (!err) + netif_device_attach(netdev); rtnl_unlock(); - if (err) - return err; - - netif_device_attach(netdev); - - return 0; + return err; } #endif /* CONFIG_PM */ @@ -5945,14 +5944,14 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) int retval = 0; #endif + rtnl_lock(); netif_device_detach(netdev); - rtnl_lock(); if (netif_running(netdev)) ixgbe_close_suspend(adapter); - rtnl_unlock(); ixgbe_clear_interrupt_scheme(adapter); + rtnl_unlock(); #ifdef CONFIG_PM retval = pci_save_state(pdev); From 12ec51aaafe39bb2b7698a1a79ee60aaa46a0601 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Thu, 10 Nov 2016 09:57:29 -0800 Subject: [PATCH 131/312] ixgbe: Reduce I2C retry count on X550 devices [ Upstream commit 3f0d646b720d541309b11e190db58086f446f41e ] A retry count of 10 is likely to run into problems on X550 devices that have to detect and reset unresponsive CS4227 devices. So, reduce the I2C retry count to 3 for X550 and above. This should avoid any possible regressions in existing devices. Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index fb8673d63806..48d97cb730d8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -113,7 +113,7 @@ static s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val, bool lock) { u32 swfw_mask = hw->phy.phy_semaphore_mask; - int max_retry = 10; + int max_retry = 3; int retry = 0; u8 csum_byte; u8 high_bits; @@ -1764,6 +1764,8 @@ static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, u32 swfw_mask = hw->phy.phy_semaphore_mask; bool nack = true; + if (hw->mac.type >= ixgbe_mac_X550) + max_retry = 3; if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr)) max_retry = IXGBE_SFP_DETECT_RETRIES; From b6e7fb0be1e5aa7d00484bd3da7a1b368d693e41 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Fri, 4 Nov 2016 14:03:03 -0700 Subject: [PATCH 132/312] ixgbe: add mask for 64 RSS queues [ Upstream commit 2bf1a87b903bd81b1448a1cef73de59fb6c4d340 ] The indirection table was reported incorrectly for X550 and newer where we can support up to 64 RSS queues. Reported-by Krishneil Singh Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index f3168bcc7d87..f0de09db8283 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -307,6 +307,7 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) ixgbe_cache_ring_rss(adapter); } +#define IXGBE_RSS_64Q_MASK 0x3F #define IXGBE_RSS_16Q_MASK 0xF #define IXGBE_RSS_8Q_MASK 0x7 #define IXGBE_RSS_4Q_MASK 0x3 @@ -602,6 +603,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) **/ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) { + struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_ring_feature *f; u16 rss_i; @@ -610,7 +612,11 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) rss_i = f->limit; f->indices = rss_i; - f->mask = IXGBE_RSS_16Q_MASK; + + if (hw->mac.type < ixgbe_mac_X550) + f->mask = IXGBE_RSS_16Q_MASK; + else + f->mask = IXGBE_RSS_64Q_MASK; /* disable ATR by default, it will be configured below */ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; From ade72053f4520f41c8f56b244162f81b37aa898e Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Wed, 28 Sep 2016 16:01:48 -0700 Subject: [PATCH 133/312] ixgbe: do not disable FEC from the driver [ Upstream commit 1fe954b2097bb907b4578e6a74e4c1d23785a601 ] FEC is configured by the NVM and the driver should not be overriding it. Signed-off-by: Emil Tantilov Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index ebe0ac950b14..31f864fb30c1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1643,8 +1643,6 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, return status; reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; - reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ | - IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC); reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR | IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX); From b15877443a936cb9dd2c58c8a7acf451c7be0fdc Mon Sep 17 00:00:00 2001 From: Jannik Becher Date: Tue, 20 Dec 2016 18:59:46 +0100 Subject: [PATCH 134/312] staging: rtl8712: fixed little endian problem [ Upstream commit 502c80744fcac6b16f28699469c70db499fe2f69 ] Fixed a sparse warning. Using function le16_to_cpus() to avoid double assignment. Signed-off-by: Jannik Becher Tested-by: Larry Finger Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/staging/rtl8712/rtl871x_ioctl_linux.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c index edfc6805e012..2b348439242f 100644 --- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c +++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c @@ -199,7 +199,7 @@ static inline char *translate_scan(struct _adapter *padapter, iwe.cmd = SIOCGIWMODE; memcpy((u8 *)&cap, r8712_get_capability_from_ie(pnetwork->network.IEs), 2); - cap = le16_to_cpu(cap); + le16_to_cpus(&cap); if (cap & (WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_BSS)) { if (cap & WLAN_CAPABILITY_BSS) iwe.u.mode = (u32)IW_MODE_MASTER; From 7f5eb098ef9590db1998b827a38729daf9d35731 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Mon, 7 Nov 2016 11:14:09 +0000 Subject: [PATCH 135/312] MIPS: End asm function prologue macros with .insn [ Upstream commit 08889582b8aa0bbc01a1e5a0033b9f98d2e11caa ] When building a kernel targeting a microMIPS ISA, recent GNU linkers will fail the link if they cannot determine that the target of a branch or jump is microMIPS code, with errors such as the following: mips-img-linux-gnu-ld: arch/mips/built-in.o: .text+0x542c: Unsupported jump between ISA modes; consider recompiling with interlinking enabled. mips-img-linux-gnu-ld: final link failed: Bad value or: ./arch/mips/include/asm/uaccess.h:1017: warning: JALX to a non-word-aligned address Placing anything other than an instruction at the start of a function written in assembly appears to trigger such errors. In order to prepare for allowing us to follow function prologue macros with an EXPORT_SYMBOL invocation, end the prologue macros (LEAD, NESTED & FEXPORT) with a .insn directive. This ensures that the start of the function is marked as code, which always makes sense for functions & safely prevents us from hitting the link errors described above. Signed-off-by: Paul Burton Reviewed-by: Maciej W. Rozycki Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/14508/ Signed-off-by: Ralf Baechle Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/mips/include/asm/asm.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h index 7c26b28bf252..859cf7048347 100644 --- a/arch/mips/include/asm/asm.h +++ b/arch/mips/include/asm/asm.h @@ -54,7 +54,8 @@ .align 2; \ .type symbol, @function; \ .ent symbol, 0; \ -symbol: .frame sp, 0, ra +symbol: .frame sp, 0, ra; \ + .insn /* * NESTED - declare nested routine entry point @@ -63,8 +64,9 @@ symbol: .frame sp, 0, ra .globl symbol; \ .align 2; \ .type symbol, @function; \ - .ent symbol, 0; \ -symbol: .frame sp, framesize, rpc + .ent symbol, 0; \ +symbol: .frame sp, framesize, rpc; \ + .insn /* * END - mark end of function @@ -86,7 +88,7 @@ symbol: #define FEXPORT(symbol) \ .globl symbol; \ .type symbol, @function; \ -symbol: +symbol: .insn /* * ABS - export absolute symbol From a904ebe92a05dd86517b93bad062076e38890788 Mon Sep 17 00:00:00 2001 From: Chen Gang Date: Thu, 14 Jan 2016 15:18:33 -0800 Subject: [PATCH 136/312] mm: add PHYS_PFN, use it in __phys_to_pfn() commit 8f235d1a3eb7198affe7cadf676a10afb8a46a1a upstream. __phys_to_pfn and __pfn_to_phys are symmetric, PHYS_PFN and PFN_PHYS are semmetric: - y = (phys_addr_t)x << PAGE_SHIFT - y >> PAGE_SHIFT = (phys_add_t)x - (unsigned long)(y >> PAGE_SHIFT) = x [akpm@linux-foundation.org: use macro arg name `x'] [arnd@arndb.de: include linux/pfn.h for PHYS_PFN definition] Signed-off-by: Chen Gang Cc: Oleg Nesterov Signed-off-by: Arnd Bergmann Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Cc: Marcin Nowakowski Signed-off-by: Greg Kroah-Hartman --- include/asm-generic/memory_model.h | 4 +++- include/linux/pfn.h | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h index 4b4b056a6eb0..5148150cc80b 100644 --- a/include/asm-generic/memory_model.h +++ b/include/asm-generic/memory_model.h @@ -1,6 +1,8 @@ #ifndef __ASM_MEMORY_MODEL_H #define __ASM_MEMORY_MODEL_H +#include + #ifndef __ASSEMBLY__ #if defined(CONFIG_FLATMEM) @@ -72,7 +74,7 @@ /* * Convert a physical address to a Page Frame Number and back */ -#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) +#define __phys_to_pfn(paddr) PHYS_PFN(paddr) #define __pfn_to_phys(pfn) PFN_PHYS(pfn) #define page_to_pfn __page_to_pfn diff --git a/include/linux/pfn.h b/include/linux/pfn.h index 7646637221f3..97f3e88aead4 100644 --- a/include/linux/pfn.h +++ b/include/linux/pfn.h @@ -9,5 +9,6 @@ #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) #define PFN_DOWN(x) ((x) >> PAGE_SHIFT) #define PFN_PHYS(x) ((phys_addr_t)(x) << PAGE_SHIFT) +#define PHYS_PFN(x) ((unsigned long)((x) >> PAGE_SHIFT)) #endif From 034347aca034d8c2edd54ed8f6c67753ab2741be Mon Sep 17 00:00:00 2001 From: Marcin Nowakowski Date: Wed, 23 Nov 2016 14:43:45 +0100 Subject: [PATCH 137/312] MIPS: init: Ensure bootmem does not corrupt reserved memory [ Upstream commit d9b5b658210f28ed9f70c757d553e679d76e2986 ] Current init code initialises bootmem allocator with all of the low memory that it assumes is available, but does not check for reserved memory block, which can lead to corruption of data that may be stored there. Move bootmem's allocation map to a location that does not cross any reserved regions Signed-off-by: Marcin Nowakowski Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/14609/ Signed-off-by: Ralf Baechle Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/mips/kernel/setup.c | 74 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 71 insertions(+), 3 deletions(-) diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 8acae316f26b..1acaf0939cb5 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -152,6 +152,35 @@ void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_add add_memory_region(start, size, BOOT_MEM_RAM); } +bool __init memory_region_available(phys_addr_t start, phys_addr_t size) +{ + int i; + bool in_ram = false, free = true; + + for (i = 0; i < boot_mem_map.nr_map; i++) { + phys_addr_t start_, end_; + + start_ = boot_mem_map.map[i].addr; + end_ = boot_mem_map.map[i].addr + boot_mem_map.map[i].size; + + switch (boot_mem_map.map[i].type) { + case BOOT_MEM_RAM: + if (start >= start_ && start + size <= end_) + in_ram = true; + break; + case BOOT_MEM_RESERVED: + if ((start >= start_ && start < end_) || + (start < start_ && start + size >= start_)) + free = false; + break; + default: + continue; + } + } + + return in_ram && free; +} + static void __init print_memory_map(void) { int i; @@ -300,11 +329,19 @@ static void __init bootmem_init(void) #else /* !CONFIG_SGI_IP27 */ +static unsigned long __init bootmap_bytes(unsigned long pages) +{ + unsigned long bytes = DIV_ROUND_UP(pages, 8); + + return ALIGN(bytes, sizeof(long)); +} + static void __init bootmem_init(void) { unsigned long reserved_end; unsigned long mapstart = ~0UL; unsigned long bootmap_size; + bool bootmap_valid = false; int i; /* @@ -384,12 +421,43 @@ static void __init bootmem_init(void) mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end))); #endif + /* + * check that mapstart doesn't overlap with any of + * memory regions that have been reserved through eg. DTB + */ + bootmap_size = bootmap_bytes(max_low_pfn - min_low_pfn); + + bootmap_valid = memory_region_available(PFN_PHYS(mapstart), + bootmap_size); + for (i = 0; i < boot_mem_map.nr_map && !bootmap_valid; i++) { + unsigned long mapstart_addr; + + switch (boot_mem_map.map[i].type) { + case BOOT_MEM_RESERVED: + mapstart_addr = PFN_ALIGN(boot_mem_map.map[i].addr + + boot_mem_map.map[i].size); + if (PHYS_PFN(mapstart_addr) < mapstart) + break; + + bootmap_valid = memory_region_available(mapstart_addr, + bootmap_size); + if (bootmap_valid) + mapstart = PHYS_PFN(mapstart_addr); + break; + default: + break; + } + } + + if (!bootmap_valid) + panic("No memory area to place a bootmap bitmap"); + /* * Initialize the boot-time allocator with low memory only. */ - bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart, - min_low_pfn, max_low_pfn); - + if (bootmap_size != init_bootmem_node(NODE_DATA(0), mapstart, + min_low_pfn, max_low_pfn)) + panic("Unexpected memory size required for bootmap"); for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long start, end; From 9a8ef143951f16d37194efcf8d5ba1406be42e7d Mon Sep 17 00:00:00 2001 From: Marcin Nowakowski Date: Wed, 23 Nov 2016 14:43:44 +0100 Subject: [PATCH 138/312] MIPS: init: Ensure reserved memory regions are not added to bootmem [ Upstream commit e89ef66d7682f031f026eee6bba03c8c2248d2a9 ] Memories managed through boot_mem_map are generally expected to define non-crossing areas. However, if part of a larger memory block is marked as reserved, it would still be added to bootmem allocator as an available block and could end up being overwritten by the allocator. Prevent this by explicitly marking the memory as reserved it if exists in the range used by bootmem allocator. Signed-off-by: Marcin Nowakowski Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/14608/ Signed-off-by: Ralf Baechle Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/mips/kernel/setup.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 1acaf0939cb5..4f9f1ae49213 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -506,6 +506,10 @@ static void __init bootmem_init(void) continue; default: /* Not usable memory */ + if (start > min_low_pfn && end < max_low_pfn) + reserve_bootmem(boot_mem_map.map[i].addr, + boot_mem_map.map[i].size, + BOOTMEM_DEFAULT); continue; } From d2d72c0ee4c90769f43066b89b5831209af37bab Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Mon, 7 Nov 2016 11:30:41 +0000 Subject: [PATCH 139/312] MIPS: Netlogic: Exclude netlogic,xlp-pic code from XLR builds MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 9799270affc53414da96e77e454a5616b39cdab0 ] Code in arch/mips/netlogic/common/irq.c which handles the XLP PIC fails to build in XLR configurations due to cpu_is_xlp9xx not being defined, leading to the following build failure: arch/mips/netlogic/common/irq.c: In function ‘xlp_of_pic_init’: arch/mips/netlogic/common/irq.c:298:2: error: implicit declaration of function ‘cpu_is_xlp9xx’ [-Werror=implicit-function-declaration] if (cpu_is_xlp9xx()) { ^ Although the code was conditional upon CONFIG_OF which is indirectly selected by CONFIG_NLM_XLP_BOARD but not CONFIG_NLM_XLR_BOARD, the failing XLR with CONFIG_OF configuration can be configured manually or by randconfig. Fix the build failure by making the affected XLP PIC code conditional upon CONFIG_CPU_XLP which is used to guard the inclusion of asm/netlogic/xlp-hal/xlp.h that provides the required cpu_is_xlp9xx function. [ralf@linux-mips.org: Fixed up as per Jayachandran's suggestion.] Signed-off-by: Paul Burton Cc: Jayachandran C Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/14524/ Signed-off-by: Ralf Baechle Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/mips/netlogic/common/irq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c index 3660dc67d544..f4961bc9a61d 100644 --- a/arch/mips/netlogic/common/irq.c +++ b/arch/mips/netlogic/common/irq.c @@ -275,7 +275,7 @@ asmlinkage void plat_irq_dispatch(void) do_IRQ(nlm_irq_to_xirq(node, i)); } -#ifdef CONFIG_OF +#ifdef CONFIG_CPU_XLP static const struct irq_domain_ops xlp_pic_irq_domain_ops = { .xlate = irq_domain_xlate_onetwocell, }; @@ -348,7 +348,7 @@ void __init arch_init_irq(void) #if defined(CONFIG_CPU_XLR) nlm_setup_fmn_irq(); #endif -#if defined(CONFIG_OF) +#ifdef CONFIG_CPU_XLP of_irq_init(xlp_pic_irq_ids); #endif } From d9c8d4adb5a7466cf3000c3c7d7136be5376cf22 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Mon, 13 Nov 2017 17:55:20 -0500 Subject: [PATCH 140/312] Revert "crypto: xts - Add ECB dependency" This reverts commit 6145171a6bc0abdc3eca7a4b795ede467d2ba569. The commit fixes a bug that was only introduced in 4.10, thus is irrelevant for <=4.9. Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- crypto/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index 617bf4a7da56..7240821137fd 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -343,7 +343,6 @@ config CRYPTO_XTS select CRYPTO_BLKCIPHER select CRYPTO_MANAGER select CRYPTO_GF128MUL - select CRYPTO_ECB help XTS: IEEE1619/D16 narrow block cipher use with aes-xts-plain, key size 256, 384 or 512 bits. This implementation currently From f5e303d54ac2d739663643f52af4972f8955734d Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Mon, 13 Nov 2017 18:03:32 -0500 Subject: [PATCH 141/312] Revert "uapi: fix linux/rds.h userspace compilation errors" This reverts commit ad50561ba7a664bc581826c9d57d137fcf17bfa5. There was a mixup with the commit message for two upstream commit that have the same subject line. This revert will be followed by the two commits with proper commit messages. Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- include/uapi/linux/rds.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h index 7af20a136429..0f9265cb2a96 100644 --- a/include/uapi/linux/rds.h +++ b/include/uapi/linux/rds.h @@ -35,7 +35,6 @@ #define _LINUX_RDS_H #include -#include /* For __kernel_sockaddr_storage. */ #define RDS_IB_ABI_VERSION 0x301 @@ -224,7 +223,7 @@ struct rds_get_mr_args { }; struct rds_get_mr_for_dest_args { - struct __kernel_sockaddr_storage dest_addr; + struct sockaddr_storage dest_addr; struct rds_iovec vec; uint64_t cookie_addr; uint64_t flags; From 16e7973f8e47235092abad2d2632edefc64ff03f Mon Sep 17 00:00:00 2001 From: "Dmitry V. Levin" Date: Thu, 16 Feb 2017 18:05:45 +0300 Subject: [PATCH 142/312] uapi: fix linux/rds.h userspace compilation error [ Upstream commit 1786dbf3702e33ce3afd2d3dbe630bd04b1d2e58 ] On the kernel side, sockaddr_storage is #define'd to __kernel_sockaddr_storage. Replacing struct sockaddr_storage with struct __kernel_sockaddr_storage defined by fixes the following linux/rds.h userspace compilation error: /usr/include/linux/rds.h:226:26: error: field 'dest_addr' has incomplete type struct sockaddr_storage dest_addr; Signed-off-by: Dmitry V. Levin Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- include/uapi/linux/rds.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h index 0f9265cb2a96..7af20a136429 100644 --- a/include/uapi/linux/rds.h +++ b/include/uapi/linux/rds.h @@ -35,6 +35,7 @@ #define _LINUX_RDS_H #include +#include /* For __kernel_sockaddr_storage. */ #define RDS_IB_ABI_VERSION 0x301 @@ -223,7 +224,7 @@ struct rds_get_mr_args { }; struct rds_get_mr_for_dest_args { - struct sockaddr_storage dest_addr; + struct __kernel_sockaddr_storage dest_addr; struct rds_iovec vec; uint64_t cookie_addr; uint64_t flags; From 834a5d880a54008e62ba501785337b7f51c43b39 Mon Sep 17 00:00:00 2001 From: "Dmitry V. Levin" Date: Thu, 16 Feb 2017 18:05:13 +0300 Subject: [PATCH 143/312] uapi: fix linux/rds.h userspace compilation errors [ Upstream commit feb0869d90e51ce8b6fd8a46588465b1b5a26d09 ] Consistently use types from linux/types.h to fix the following linux/rds.h userspace compilation errors: /usr/include/linux/rds.h:106:2: error: unknown type name 'uint8_t' uint8_t name[32]; /usr/include/linux/rds.h:107:2: error: unknown type name 'uint64_t' uint64_t value; /usr/include/linux/rds.h:117:2: error: unknown type name 'uint64_t' uint64_t next_tx_seq; /usr/include/linux/rds.h:118:2: error: unknown type name 'uint64_t' uint64_t next_rx_seq; /usr/include/linux/rds.h:121:2: error: unknown type name 'uint8_t' uint8_t transport[TRANSNAMSIZ]; /* null term ascii */ /usr/include/linux/rds.h:122:2: error: unknown type name 'uint8_t' uint8_t flags; /usr/include/linux/rds.h:129:2: error: unknown type name 'uint64_t' uint64_t seq; /usr/include/linux/rds.h:130:2: error: unknown type name 'uint32_t' uint32_t len; /usr/include/linux/rds.h:135:2: error: unknown type name 'uint8_t' uint8_t flags; /usr/include/linux/rds.h:139:2: error: unknown type name 'uint32_t' uint32_t sndbuf; /usr/include/linux/rds.h:144:2: error: unknown type name 'uint32_t' uint32_t rcvbuf; /usr/include/linux/rds.h:145:2: error: unknown type name 'uint64_t' uint64_t inum; /usr/include/linux/rds.h:153:2: error: unknown type name 'uint64_t' uint64_t hdr_rem; /usr/include/linux/rds.h:154:2: error: unknown type name 'uint64_t' uint64_t data_rem; /usr/include/linux/rds.h:155:2: error: unknown type name 'uint32_t' uint32_t last_sent_nxt; /usr/include/linux/rds.h:156:2: error: unknown type name 'uint32_t' uint32_t last_expected_una; /usr/include/linux/rds.h:157:2: error: unknown type name 'uint32_t' uint32_t last_seen_una; /usr/include/linux/rds.h:164:2: error: unknown type name 'uint8_t' uint8_t src_gid[RDS_IB_GID_LEN]; /usr/include/linux/rds.h:165:2: error: unknown type name 'uint8_t' uint8_t dst_gid[RDS_IB_GID_LEN]; /usr/include/linux/rds.h:167:2: error: unknown type name 'uint32_t' uint32_t max_send_wr; /usr/include/linux/rds.h:168:2: error: unknown type name 'uint32_t' uint32_t max_recv_wr; /usr/include/linux/rds.h:169:2: error: unknown type name 'uint32_t' uint32_t max_send_sge; /usr/include/linux/rds.h:170:2: error: unknown type name 'uint32_t' uint32_t rdma_mr_max; /usr/include/linux/rds.h:171:2: error: unknown type name 'uint32_t' uint32_t rdma_mr_size; /usr/include/linux/rds.h:212:9: error: unknown type name 'uint64_t' typedef uint64_t rds_rdma_cookie_t; /usr/include/linux/rds.h:215:2: error: unknown type name 'uint64_t' uint64_t addr; /usr/include/linux/rds.h:216:2: error: unknown type name 'uint64_t' uint64_t bytes; /usr/include/linux/rds.h:221:2: error: unknown type name 'uint64_t' uint64_t cookie_addr; /usr/include/linux/rds.h:222:2: error: unknown type name 'uint64_t' uint64_t flags; /usr/include/linux/rds.h:228:2: error: unknown type name 'uint64_t' uint64_t cookie_addr; /usr/include/linux/rds.h:229:2: error: unknown type name 'uint64_t' uint64_t flags; /usr/include/linux/rds.h:234:2: error: unknown type name 'uint64_t' uint64_t flags; /usr/include/linux/rds.h:240:2: error: unknown type name 'uint64_t' uint64_t local_vec_addr; /usr/include/linux/rds.h:241:2: error: unknown type name 'uint64_t' uint64_t nr_local; /usr/include/linux/rds.h:242:2: error: unknown type name 'uint64_t' uint64_t flags; /usr/include/linux/rds.h:243:2: error: unknown type name 'uint64_t' uint64_t user_token; /usr/include/linux/rds.h:248:2: error: unknown type name 'uint64_t' uint64_t local_addr; /usr/include/linux/rds.h:249:2: error: unknown type name 'uint64_t' uint64_t remote_addr; /usr/include/linux/rds.h:252:4: error: unknown type name 'uint64_t' uint64_t compare; /usr/include/linux/rds.h:253:4: error: unknown type name 'uint64_t' uint64_t swap; /usr/include/linux/rds.h:256:4: error: unknown type name 'uint64_t' uint64_t add; /usr/include/linux/rds.h:259:4: error: unknown type name 'uint64_t' uint64_t compare; /usr/include/linux/rds.h:260:4: error: unknown type name 'uint64_t' uint64_t swap; /usr/include/linux/rds.h:261:4: error: unknown type name 'uint64_t' uint64_t compare_mask; /usr/include/linux/rds.h:262:4: error: unknown type name 'uint64_t' uint64_t swap_mask; /usr/include/linux/rds.h:265:4: error: unknown type name 'uint64_t' uint64_t add; /usr/include/linux/rds.h:266:4: error: unknown type name 'uint64_t' uint64_t nocarry_mask; /usr/include/linux/rds.h:269:2: error: unknown type name 'uint64_t' uint64_t flags; /usr/include/linux/rds.h:270:2: error: unknown type name 'uint64_t' uint64_t user_token; /usr/include/linux/rds.h:274:2: error: unknown type name 'uint64_t' uint64_t user_token; /usr/include/linux/rds.h:275:2: error: unknown type name 'int32_t' int32_t status; Signed-off-by: Dmitry V. Levin Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- include/uapi/linux/rds.h | 102 +++++++++++++++++++-------------------- 1 file changed, 51 insertions(+), 51 deletions(-) diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h index 7af20a136429..804c9b2bfce3 100644 --- a/include/uapi/linux/rds.h +++ b/include/uapi/linux/rds.h @@ -104,8 +104,8 @@ #define RDS_INFO_LAST 10010 struct rds_info_counter { - uint8_t name[32]; - uint64_t value; + __u8 name[32]; + __u64 value; } __attribute__((packed)); #define RDS_INFO_CONNECTION_FLAG_SENDING 0x01 @@ -115,35 +115,35 @@ struct rds_info_counter { #define TRANSNAMSIZ 16 struct rds_info_connection { - uint64_t next_tx_seq; - uint64_t next_rx_seq; + __u64 next_tx_seq; + __u64 next_rx_seq; __be32 laddr; __be32 faddr; - uint8_t transport[TRANSNAMSIZ]; /* null term ascii */ - uint8_t flags; + __u8 transport[TRANSNAMSIZ]; /* null term ascii */ + __u8 flags; } __attribute__((packed)); #define RDS_INFO_MESSAGE_FLAG_ACK 0x01 #define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02 struct rds_info_message { - uint64_t seq; - uint32_t len; + __u64 seq; + __u32 len; __be32 laddr; __be32 faddr; __be16 lport; __be16 fport; - uint8_t flags; + __u8 flags; } __attribute__((packed)); struct rds_info_socket { - uint32_t sndbuf; + __u32 sndbuf; __be32 bound_addr; __be32 connected_addr; __be16 bound_port; __be16 connected_port; - uint32_t rcvbuf; - uint64_t inum; + __u32 rcvbuf; + __u64 inum; } __attribute__((packed)); struct rds_info_tcp_socket { @@ -151,25 +151,25 @@ struct rds_info_tcp_socket { __be16 local_port; __be32 peer_addr; __be16 peer_port; - uint64_t hdr_rem; - uint64_t data_rem; - uint32_t last_sent_nxt; - uint32_t last_expected_una; - uint32_t last_seen_una; + __u64 hdr_rem; + __u64 data_rem; + __u32 last_sent_nxt; + __u32 last_expected_una; + __u32 last_seen_una; } __attribute__((packed)); #define RDS_IB_GID_LEN 16 struct rds_info_rdma_connection { __be32 src_addr; __be32 dst_addr; - uint8_t src_gid[RDS_IB_GID_LEN]; - uint8_t dst_gid[RDS_IB_GID_LEN]; + __u8 src_gid[RDS_IB_GID_LEN]; + __u8 dst_gid[RDS_IB_GID_LEN]; - uint32_t max_send_wr; - uint32_t max_recv_wr; - uint32_t max_send_sge; - uint32_t rdma_mr_max; - uint32_t rdma_mr_size; + __u32 max_send_wr; + __u32 max_recv_wr; + __u32 max_send_sge; + __u32 rdma_mr_max; + __u32 rdma_mr_size; }; /* @@ -210,70 +210,70 @@ struct rds_info_rdma_connection { * (so that the application does not have to worry about * alignment). */ -typedef uint64_t rds_rdma_cookie_t; +typedef __u64 rds_rdma_cookie_t; struct rds_iovec { - uint64_t addr; - uint64_t bytes; + __u64 addr; + __u64 bytes; }; struct rds_get_mr_args { struct rds_iovec vec; - uint64_t cookie_addr; - uint64_t flags; + __u64 cookie_addr; + __u64 flags; }; struct rds_get_mr_for_dest_args { struct __kernel_sockaddr_storage dest_addr; struct rds_iovec vec; - uint64_t cookie_addr; - uint64_t flags; + __u64 cookie_addr; + __u64 flags; }; struct rds_free_mr_args { rds_rdma_cookie_t cookie; - uint64_t flags; + __u64 flags; }; struct rds_rdma_args { rds_rdma_cookie_t cookie; struct rds_iovec remote_vec; - uint64_t local_vec_addr; - uint64_t nr_local; - uint64_t flags; - uint64_t user_token; + __u64 local_vec_addr; + __u64 nr_local; + __u64 flags; + __u64 user_token; }; struct rds_atomic_args { rds_rdma_cookie_t cookie; - uint64_t local_addr; - uint64_t remote_addr; + __u64 local_addr; + __u64 remote_addr; union { struct { - uint64_t compare; - uint64_t swap; + __u64 compare; + __u64 swap; } cswp; struct { - uint64_t add; + __u64 add; } fadd; struct { - uint64_t compare; - uint64_t swap; - uint64_t compare_mask; - uint64_t swap_mask; + __u64 compare; + __u64 swap; + __u64 compare_mask; + __u64 swap_mask; } m_cswp; struct { - uint64_t add; - uint64_t nocarry_mask; + __u64 add; + __u64 nocarry_mask; } m_fadd; }; - uint64_t flags; - uint64_t user_token; + __u64 flags; + __u64 user_token; }; struct rds_rdma_notify { - uint64_t user_token; - int32_t status; + __u64 user_token; + __s32 status; }; #define RDS_RDMA_SUCCESS 0 From 965003b311e0ed764eb6c0d6f86fe14a57d43286 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Wed, 8 Nov 2017 12:23:17 -0500 Subject: [PATCH 144/312] USB: usbfs: compute urb->actual_length for isochronous commit 2ef47001b3ee3ded579b7532ebdcf8680e4d8c54 upstream. The USB kerneldoc says that the actual_length field "is read in non-iso completion functions", but the usbfs driver uses it for all URB types in processcompl(). Since not all of the host controller drivers set actual_length for isochronous URBs, programs using usbfs with some host controllers don't work properly. For example, Minas reports that a USB camera controlled by libusb doesn't work properly with a dwc2 controller. It doesn't seem worthwhile to change the HCDs and the documentation, since the in-kernel USB class drivers evidently don't rely on actual_length for isochronous transfers. The easiest solution is for usbfs to calculate the actual_length value for itself, by adding up the lengths of the individual packets in an isochronous transfer. Signed-off-by: Alan Stern CC: Minas Harutyunyan Reported-and-tested-by: wlf Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/devio.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 873ba02d59e6..f4c3a37e00ba 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -1653,6 +1653,18 @@ static int proc_unlinkurb(struct usb_dev_state *ps, void __user *arg) return 0; } +static void compute_isochronous_actual_length(struct urb *urb) +{ + unsigned int i; + + if (urb->number_of_packets > 0) { + urb->actual_length = 0; + for (i = 0; i < urb->number_of_packets; i++) + urb->actual_length += + urb->iso_frame_desc[i].actual_length; + } +} + static int processcompl(struct async *as, void __user * __user *arg) { struct urb *urb = as->urb; @@ -1660,6 +1672,7 @@ static int processcompl(struct async *as, void __user * __user *arg) void __user *addr = as->userurb; unsigned int i; + compute_isochronous_actual_length(urb); if (as->userbuffer && urb->actual_length) { if (copy_urb_data_to_user(as->userbuffer, urb)) goto err_out; @@ -1829,6 +1842,7 @@ static int processcompl_compat(struct async *as, void __user * __user *arg) void __user *addr = as->userurb; unsigned int i; + compute_isochronous_actual_length(urb); if (as->userbuffer && urb->actual_length) { if (copy_urb_data_to_user(as->userbuffer, urb)) return -EFAULT; From 302dd596822ae3a276f7bdaf5c8e065fe8cfc570 Mon Sep 17 00:00:00 2001 From: Bernhard Rosenkraenzer Date: Fri, 3 Nov 2017 16:46:02 +0100 Subject: [PATCH 145/312] USB: Add delay-init quirk for Corsair K70 LUX keyboards commit a0fea6027f19c62727315aba1a7fae75a9caa842 upstream. Without this patch, K70 LUX keyboards don't work, saying usb 3-3: unable to read config index 0 descriptor/all usb 3-3: can't read configurations, error -110 usb usb3-port3: unable to enumerate USB device Signed-off-by: Bernhard Rosenkraenzer Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/quirks.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index a6aaf2f193a4..37c418e581fb 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -221,6 +221,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* Corsair Strafe RGB */ { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Corsair K70 LUX */ + { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, + /* MIDI keyboard WORLDE MINI */ { USB_DEVICE(0x1c75, 0x0204), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, From 5cd938508c815b684f033c291d59158e2576adc1 Mon Sep 17 00:00:00 2001 From: Douglas Fischer Date: Sun, 29 Oct 2017 23:29:55 +0000 Subject: [PATCH 146/312] USB: serial: qcserial: add pid/vid for Sierra Wireless EM7355 fw update commit 771394a54148f18926ca86414e51c69eda27d0cd upstream. Add USB PID/VID for Sierra Wireless EM7355 LTE modem QDL firmware update mode. Signed-off-by: Douglas Fischer Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/qcserial.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index e1c1e329c877..4516291df1b8 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -148,6 +148,7 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x1199, 0x68a2)}, /* Sierra Wireless MC7710 */ {DEVICE_SWI(0x1199, 0x68c0)}, /* Sierra Wireless MC7304/MC7354 */ {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */ + {DEVICE_SWI(0x1199, 0x901e)}, /* Sierra Wireless EM7355 QDL */ {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */ {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9041)}, /* Sierra Wireless MC7305/MC7355 */ From 8b36209e93df92903f2d37229abc9e730668a6a2 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 11 Oct 2017 14:02:57 +0200 Subject: [PATCH 147/312] USB: serial: garmin_gps: fix I/O after failed probe and remove commit 19a565d9af6e0d828bd0d521d3bafd5017f4ce52 upstream. Make sure to stop any submitted interrupt and bulk-out URBs before returning after failed probe and when the port is being unbound to avoid later NULL-pointer dereferences in the completion callbacks. Also fix up the related and broken I/O cancellation on failed open and on close. (Note that port->write_urb was never submitted.) Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Reviewed-by: Greg Kroah-Hartman Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/garmin_gps.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c index 37d0e8cc7af6..8ac161cbfba8 100644 --- a/drivers/usb/serial/garmin_gps.c +++ b/drivers/usb/serial/garmin_gps.c @@ -138,6 +138,7 @@ struct garmin_data { __u8 privpkt[4*6]; spinlock_t lock; struct list_head pktlist; + struct usb_anchor write_urbs; }; @@ -906,13 +907,19 @@ static int garmin_init_session(struct usb_serial_port *port) sizeof(GARMIN_START_SESSION_REQ), 0); if (status < 0) - break; + goto err_kill_urbs; } if (status > 0) status = 0; } + return status; + +err_kill_urbs: + usb_kill_anchored_urbs(&garmin_data_p->write_urbs); + usb_kill_urb(port->interrupt_in_urb); + return status; } @@ -931,7 +938,6 @@ static int garmin_open(struct tty_struct *tty, struct usb_serial_port *port) spin_unlock_irqrestore(&garmin_data_p->lock, flags); /* shutdown any bulk reads that might be going on */ - usb_kill_urb(port->write_urb); usb_kill_urb(port->read_urb); if (garmin_data_p->state == STATE_RESET) @@ -954,7 +960,7 @@ static void garmin_close(struct usb_serial_port *port) /* shutdown our urbs */ usb_kill_urb(port->read_urb); - usb_kill_urb(port->write_urb); + usb_kill_anchored_urbs(&garmin_data_p->write_urbs); /* keep reset state so we know that we must start a new session */ if (garmin_data_p->state != STATE_RESET) @@ -1038,12 +1044,14 @@ static int garmin_write_bulk(struct usb_serial_port *port, } /* send it down the pipe */ + usb_anchor_urb(urb, &garmin_data_p->write_urbs); status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { dev_err(&port->dev, "%s - usb_submit_urb(write bulk) failed with status = %d\n", __func__, status); count = status; + usb_unanchor_urb(urb); kfree(buffer); } @@ -1402,6 +1410,7 @@ static int garmin_port_probe(struct usb_serial_port *port) garmin_data_p->state = 0; garmin_data_p->flags = 0; garmin_data_p->count = 0; + init_usb_anchor(&garmin_data_p->write_urbs); usb_set_serial_port_data(port, garmin_data_p); status = garmin_init_session(port); @@ -1414,6 +1423,7 @@ static int garmin_port_remove(struct usb_serial_port *port) { struct garmin_data *garmin_data_p = usb_get_serial_port_data(port); + usb_kill_anchored_urbs(&garmin_data_p->write_urbs); usb_kill_urb(port->interrupt_in_urb); del_timer_sync(&garmin_data_p->timer); kfree(garmin_data_p); From f119ff8e5b6b9123ce8f22834b8d0eb3fab45d72 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 11 Oct 2017 14:02:58 +0200 Subject: [PATCH 148/312] USB: serial: garmin_gps: fix memory leak on probe errors commit 74d471b598444b7f2d964930f7234779c80960a0 upstream. Make sure to free the port private data before returning after a failed probe attempt. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Reviewed-by: Greg Kroah-Hartman Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/garmin_gps.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c index 8ac161cbfba8..2220c1b9df10 100644 --- a/drivers/usb/serial/garmin_gps.c +++ b/drivers/usb/serial/garmin_gps.c @@ -1414,6 +1414,12 @@ static int garmin_port_probe(struct usb_serial_port *port) usb_set_serial_port_data(port, garmin_data_p); status = garmin_init_session(port); + if (status) + goto err_free; + + return 0; +err_free: + kfree(garmin_data_p); return status; } From 26d6298789e695c9f627ce49a7bbd2286405798a Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 21 Nov 2017 09:21:23 +0100 Subject: [PATCH 149/312] Linux 4.4.100 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 0b5d9e20eee2..91dd7832f499 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 4 -SUBLEVEL = 99 +SUBLEVEL = 100 EXTRAVERSION = NAME = Blurry Fish Butt From 0c1282c7f046563b43c69be360b4e05d4edd34fa Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 2 Nov 2017 12:30:25 -0700 Subject: [PATCH 150/312] tcp: do not mangle skb->cb[] in tcp_make_synack() [ Upstream commit 3b11775033dc87c3d161996c54507b15ba26414a ] Christoph Paasch sent a patch to address the following issue : tcp_make_synack() is leaving some TCP private info in skb->cb[], then send the packet by other means than tcp_transmit_skb() tcp_transmit_skb() makes sure to clear skb->cb[] to not confuse IPv4/IPV6 stacks, but we have no such cleanup for SYNACK. tcp_make_synack() should not use tcp_init_nondata_skb() : tcp_init_nondata_skb() really should be limited to skbs put in write/rtx queues (the ones that are only sent via tcp_transmit_skb()) This patch fixes the issue and should even save few cpu cycles ;) Fixes: 971f10eca186 ("tcp: better TCP_SKB_CB layout to reduce cache line misses") Signed-off-by: Eric Dumazet Reported-by: Christoph Paasch Reviewed-by: Christoph Paasch Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/tcp_output.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 64c7ce847584..39c2919fe0d3 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -3018,13 +3018,8 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, tcp_ecn_make_synack(req, th); th->source = htons(ireq->ir_num); th->dest = ireq->ir_rmt_port; - /* Setting of flags are superfluous here for callers (and ECE is - * not even correctly set) - */ - tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, - TCPHDR_SYN | TCPHDR_ACK); - - th->seq = htonl(TCP_SKB_CB(skb)->seq); + skb->ip_summed = CHECKSUM_PARTIAL; + th->seq = htonl(tcp_rsk(req)->snt_isn); /* XXX data is queued and acked as is. No buffer/window check */ th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); From 001e9cbe1daee3544c804cc0f0196818f8e38dc3 Mon Sep 17 00:00:00 2001 From: Ye Yin Date: Thu, 26 Oct 2017 16:57:05 +0800 Subject: [PATCH 151/312] netfilter/ipvs: clear ipvs_property flag when SKB net namespace changed [ Upstream commit 2b5ec1a5f9738ee7bf8f5ec0526e75e00362c48f ] When run ipvs in two different network namespace at the same host, and one ipvs transport network traffic to the other network namespace ipvs. 'ipvs_property' flag will make the second ipvs take no effect. So we should clear 'ipvs_property' when SKB network namespace changed. Fixes: 621e84d6f373 ("dev: introduce skb_scrub_packet()") Signed-off-by: Ye Yin Signed-off-by: Wei Zhou Signed-off-by: Julian Anastasov Signed-off-by: Simon Horman Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/linux/skbuff.h | 7 +++++++ net/core/skbuff.c | 1 + 2 files changed, 8 insertions(+) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 3f61c647fc5c..b5421f6f155a 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3400,6 +3400,13 @@ static inline void nf_reset_trace(struct sk_buff *skb) #endif } +static inline void ipvs_reset(struct sk_buff *skb) +{ +#if IS_ENABLED(CONFIG_IP_VS) + skb->ipvs_property = 0; +#endif +} + /* Note: This doesn't put any conntrack and bridge info in dst. */ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, bool copy) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 73dfd7729bc9..d33609c2f276 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -4229,6 +4229,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet) if (!xnet) return; + ipvs_reset(skb); skb_orphan(skb); skb->mark = 0; } From 3bb6245e14ea56806ee6a3860c2380e96647593d Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Mon, 6 Nov 2017 09:01:57 +0800 Subject: [PATCH 152/312] bonding: discard lowest hash bit for 802.3ad layer3+4 [ Upstream commit b5f862180d7011d9575d0499fa37f0f25b423b12 ] After commit 07f4c90062f8 ("tcp/dccp: try to not exhaust ip_local_port_range in connect()"), we will try to use even ports for connect(). Then if an application (seen clearly with iperf) opens multiple streams to the same destination IP and port, each stream will be given an even source port. So the bonding driver's simple xmit_hash_policy based on layer3+4 addressing will always hash all these streams to the same interface. And the total throughput will limited to a single slave. Change the tcp code will impact the whole tcp behavior, only for bonding usage. Paolo Abeni suggested fix this by changing the bonding code only, which should be more reasonable, and less impact. Fix this by discarding the lowest hash bit because it contains little entropy. After the fix we can re-balance between slaves. Signed-off-by: Paolo Abeni Signed-off-by: Hangbin Liu Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/bonding/bond_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 5dca77e0ffed..2cb34b0f3856 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3166,7 +3166,7 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb) hash ^= (hash >> 16); hash ^= (hash >> 8); - return hash; + return hash >> 1; } /*-------------------------- Device entry points ----------------------------*/ From ef206ea779a9a356a16246a5fe3e875456dac701 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Thu, 9 Nov 2017 16:43:13 -0800 Subject: [PATCH 153/312] vlan: fix a use-after-free in vlan_device_event() [ Upstream commit 052d41c01b3a2e3371d66de569717353af489d63 ] After refcnt reaches zero, vlan_vid_del() could free dev->vlan_info via RCU: RCU_INIT_POINTER(dev->vlan_info, NULL); call_rcu(&vlan_info->rcu, vlan_info_rcu_free); However, the pointer 'grp' still points to that memory since it is set before vlan_vid_del(): vlan_info = rtnl_dereference(dev->vlan_info); if (!vlan_info) goto out; grp = &vlan_info->grp; Depends on when that RCU callback is scheduled, we could trigger a use-after-free in vlan_group_for_each_dev() right following this vlan_vid_del(). Fix it by moving vlan_vid_del() before setting grp. This is also symmetric to the vlan_vid_add() we call in vlan_device_event(). Reported-by: Fengguang Wu Fixes: efc73f4bbc23 ("net: Fix memory leak - vlan_info struct") Cc: Alexander Duyck Cc: Linus Torvalds Cc: Girish Moodalbail Signed-off-by: Cong Wang Reviewed-by: Girish Moodalbail Tested-by: Fengguang Wu Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/8021q/vlan.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 5e4199d5a388..01abb6431fd9 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -376,6 +376,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, dev->name); vlan_vid_add(dev, htons(ETH_P_8021Q), 0); } + if (event == NETDEV_DOWN && + (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) + vlan_vid_del(dev, htons(ETH_P_8021Q), 0); vlan_info = rtnl_dereference(dev->vlan_info); if (!vlan_info) @@ -423,9 +426,6 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, struct net_device *tmp; LIST_HEAD(close_list); - if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) - vlan_vid_del(dev, htons(ETH_P_8021Q), 0); - /* Put all VLANs for this dev in the down state too. */ vlan_group_for_each_dev(grp, i, vlandev) { flgs = vlandev->flags; From 4cfc0b41af031785a37e12d8de6bc02a61ff8fda Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Thu, 9 Nov 2017 13:04:44 +0900 Subject: [PATCH 154/312] af_netlink: ensure that NLMSG_DONE never fails in dumps [ Upstream commit 0642840b8bb008528dbdf929cec9f65ac4231ad0 ] The way people generally use netlink_dump is that they fill in the skb as much as possible, breaking when nla_put returns an error. Then, they get called again and start filling out the next skb, and again, and so forth. The mechanism at work here is the ability for the iterative dumping function to detect when the skb is filled up and not fill it past the brim, waiting for a fresh skb for the rest of the data. However, if the attributes are small and nicely packed, it is possible that a dump callback function successfully fills in attributes until the skb is of size 4080 (libmnl's default page-sized receive buffer size). The dump function completes, satisfied, and then, if it happens to be that this is actually the last skb, and no further ones are to be sent, then netlink_dump will add on the NLMSG_DONE part: nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI); It is very important that netlink_dump does this, of course. However, in this example, that call to nlmsg_put_answer will fail, because the previous filling by the dump function did not leave it enough room. And how could it possibly have done so? All of the nla_put variety of functions simply check to see if the skb has enough tailroom, independent of the context it is in. In order to keep the important assumptions of all netlink dump users, it is therefore important to give them an skb that has this end part of the tail already reserved, so that the call to nlmsg_put_answer does not fail. Otherwise, library authors are forced to find some bizarre sized receive buffer that has a large modulo relative to the common sizes of messages received, which is ugly and buggy. This patch thus saves the NLMSG_DONE for an additional message, for the case that things are dangerously close to the brim. This requires keeping track of the errno from ->dump() across calls. Signed-off-by: Jason A. Donenfeld Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/netlink/af_netlink.c | 17 +++++++++++------ net/netlink/af_netlink.h | 1 + 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index acfb16fdcd55..9ecdd61c6463 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2077,7 +2077,7 @@ static int netlink_dump(struct sock *sk) struct sk_buff *skb = NULL; struct nlmsghdr *nlh; struct module *module; - int len, err = -ENOBUFS; + int err = -ENOBUFS; int alloc_min_size; int alloc_size; @@ -2125,9 +2125,11 @@ static int netlink_dump(struct sock *sk) skb_reserve(skb, skb_tailroom(skb) - alloc_size); netlink_skb_set_owner_r(skb, sk); - len = cb->dump(skb, cb); + if (nlk->dump_done_errno > 0) + nlk->dump_done_errno = cb->dump(skb, cb); - if (len > 0) { + if (nlk->dump_done_errno > 0 || + skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) { mutex_unlock(nlk->cb_mutex); if (sk_filter(sk, skb)) @@ -2137,13 +2139,15 @@ static int netlink_dump(struct sock *sk) return 0; } - nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI); - if (!nlh) + nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, + sizeof(nlk->dump_done_errno), NLM_F_MULTI); + if (WARN_ON(!nlh)) goto errout_skb; nl_dump_check_consistent(cb, nlh); - memcpy(nlmsg_data(nlh), &len, sizeof(len)); + memcpy(nlmsg_data(nlh), &nlk->dump_done_errno, + sizeof(nlk->dump_done_errno)); if (sk_filter(sk, skb)) kfree_skb(skb); @@ -2208,6 +2212,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, cb->skb = skb; nlk->cb_running = true; + nlk->dump_done_errno = INT_MAX; mutex_unlock(nlk->cb_mutex); diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h index ea4600aea6b0..d987696c0eb4 100644 --- a/net/netlink/af_netlink.h +++ b/net/netlink/af_netlink.h @@ -38,6 +38,7 @@ struct netlink_sock { wait_queue_head_t wait; bool bound; bool cb_running; + int dump_done_errno; struct netlink_callback cb; struct mutex *cb_mutex; struct mutex cb_def_mutex; From 2a0e60907e54dad75e9b3568d02bec11d6e74f6b Mon Sep 17 00:00:00 2001 From: Xin Long Date: Tue, 17 Oct 2017 23:26:10 +0800 Subject: [PATCH 155/312] sctp: do not peel off an assoc from one netns to another one [ Upstream commit df80cd9b28b9ebaa284a41df611dbf3a2d05ca74 ] Now when peeling off an association to the sock in another netns, all transports in this assoc are not to be rehashed and keep use the old key in hashtable. As a transport uses sk->net as the hash key to insert into hashtable, it would miss removing these transports from hashtable due to the new netns when closing the sock and all transports are being freeed, then later an use-after-free issue could be caused when looking up an asoc and dereferencing those transports. This is a very old issue since very beginning, ChunYu found it with syzkaller fuzz testing with this series: socket$inet6_sctp() bind$inet6() sendto$inet6() unshare(0x40000000) getsockopt$inet_sctp6_SCTP_GET_ASSOC_ID_LIST() getsockopt$inet_sctp6_SCTP_SOCKOPT_PEELOFF() This patch is to block this call when peeling one assoc off from one netns to another one, so that the netns of all transport would not go out-sync with the key in hashtable. Note that this patch didn't fix it by rehashing transports, as it's difficult to handle the situation when the tuple is already in use in the new netns. Besides, no one would like to peel off one assoc to another netns, considering ipaddrs, ifaces, etc. are usually different. Reported-by: ChunYu Wang Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Acked-by: Neil Horman Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/sctp/socket.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 7f0f689b8d2b..272edd7748a0 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4453,6 +4453,10 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) struct socket *sock; int err = 0; + /* Do not peel off from one netns to another one. */ + if (!net_eq(current->nsproxy->net_ns, sock_net(sk))) + return -EINVAL; + /* Do not peel off from one netns to another one. */ if (!net_eq(current->nsproxy->net_ns, sock_net(sk))) return -EINVAL; From ae93cefb94300fd67c48e68f08c943f2f1f363c8 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Thu, 16 Nov 2017 11:07:15 +0800 Subject: [PATCH 156/312] fealnx: Fix building error on MIPS [ Upstream commit cc54c1d32e6a4bb3f116721abf900513173e4d02 ] This patch try to fix the building error on MIPS. The reason is MIPS has already defined the LONG macro, which conflicts with the LONG enum in drivers/net/ethernet/fealnx.c. Signed-off-by: Huacai Chen Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/fealnx.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index b1b9ebafb354..a3b2e23921bf 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -257,8 +257,8 @@ enum rx_desc_status_bits { RXFSD = 0x00000800, /* first descriptor */ RXLSD = 0x00000400, /* last descriptor */ ErrorSummary = 0x80, /* error summary */ - RUNT = 0x40, /* runt packet received */ - LONG = 0x20, /* long packet received */ + RUNTPKT = 0x40, /* runt packet received */ + LONGPKT = 0x20, /* long packet received */ FAE = 0x10, /* frame align error */ CRC = 0x08, /* crc error */ RXER = 0x04, /* receive error */ @@ -1633,7 +1633,7 @@ static int netdev_rx(struct net_device *dev) dev->name, rx_status); dev->stats.rx_errors++; /* end of a packet. */ - if (rx_status & (LONG | RUNT)) + if (rx_status & (LONGPKT | RUNTPKT)) dev->stats.rx_length_errors++; if (rx_status & RXER) dev->stats.rx_frame_errors++; From 51b8aea7abdeb522e5450c246912b6f4a491e1c8 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 15 Nov 2017 22:17:48 -0600 Subject: [PATCH 157/312] net/sctp: Always set scope_id in sctp_inet6_skb_msgname [ Upstream commit 7c8a61d9ee1df0fb4747879fa67a99614eb62fec ] Alexandar Potapenko while testing the kernel with KMSAN and syzkaller discovered that in some configurations sctp would leak 4 bytes of kernel stack. Working with his reproducer I discovered that those 4 bytes that are leaked is the scope id of an ipv6 address returned by recvmsg. With a little code inspection and a shrewd guess I discovered that sctp_inet6_skb_msgname only initializes the scope_id field for link local ipv6 addresses to the interface index the link local address pertains to instead of initializing the scope_id field for all ipv6 addresses. That is almost reasonable as scope_id's are meaniningful only for link local addresses. Set the scope_id in all other cases to 0 which is not a valid interface index to make it clear there is nothing useful in the scope_id field. There should be no danger of breaking userspace as the stack leak guaranteed that previously meaningless random data was being returned. Fixes: 372f525b495c ("SCTP: Resync with LKSCTP tree.") History-tree: https://git.kernel.org/pub/scm/linux/kernel/git/tglx/history.git Reported-by: Alexander Potapenko Tested-by: Alexander Potapenko Signed-off-by: "Eric W. Biederman" Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/sctp/ipv6.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index e33e9bd4ed5a..8a61ccc37e12 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -806,6 +806,8 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname, if (ipv6_addr_type(&addr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) { struct sctp_ulpevent *ev = sctp_skb2event(skb); addr->v6.sin6_scope_id = ev->iif; + } else { + addr->v6.sin6_scope_id = 0; } } From a9100b6f1a8af4251107a2d67e98854e8a61cd4b Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Tue, 7 Nov 2017 11:37:07 +0100 Subject: [PATCH 158/312] ima: do not update security.ima if appraisal status is not INTEGRITY_PASS commit 020aae3ee58c1af0e7ffc4e2cc9fe4dc630338cb upstream. Commit b65a9cfc2c38 ("Untangling ima mess, part 2: deal with counters") moved the call of ima_file_check() from may_open() to do_filp_open() at a point where the file descriptor is already opened. This breaks the assumption made by IMA that file descriptors being closed belong to files whose access was granted by ima_file_check(). The consequence is that security.ima and security.evm are updated with good values, regardless of the current appraisal status. For example, if a file does not have security.ima, IMA will create it after opening the file for writing, even if access is denied. Access to the file will be allowed afterwards. Avoid this issue by checking the appraisal status before updating security.ima. Signed-off-by: Roberto Sassu Signed-off-by: Mimi Zohar Signed-off-by: James Morris Signed-off-by: Greg Kroah-Hartman --- security/integrity/ima/ima_appraise.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c index 9ce9d5003dcc..19014293f927 100644 --- a/security/integrity/ima/ima_appraise.c +++ b/security/integrity/ima/ima_appraise.c @@ -297,6 +297,9 @@ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file) if (iint->flags & IMA_DIGSIG) return; + if (iint->ima_file_status != INTEGRITY_PASS) + return; + rc = ima_collect_measurement(iint, file, NULL, NULL); if (rc < 0) return; From 1df403abfa9e1714161420412876beb7b23fb162 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sat, 21 Oct 2017 10:50:18 +0200 Subject: [PATCH 159/312] serial: omap: Fix EFR write on RTS deassertion commit 2a71de2f7366fb1aec632116d0549ec56d6a3940 upstream. Commit 348f9bb31c56 ("serial: omap: Fix RTS handling") sought to enable auto RTS upon manual RTS assertion and disable it on deassertion. However it seems the latter was done incorrectly, it clears all bits in the Extended Features Register *except* auto RTS. Fixes: 348f9bb31c56 ("serial: omap: Fix RTS handling") Cc: Peter Hurley Signed-off-by: Lukas Wunner Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/omap-serial.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index de1c143b475f..21fc9b3a27cf 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -693,7 +693,7 @@ static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl) if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS)) up->efr |= UART_EFR_RTS; else - up->efr &= UART_EFR_RTS; + up->efr &= ~UART_EFR_RTS; serial_out(up, UART_EFR, up->efr); serial_out(up, UART_LCR, lcr); From 4310b6bfa8e0f3b16a3919db709136d87c45574c Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 13 Jun 2016 11:15:14 +0100 Subject: [PATCH 160/312] arm64: fix dump_instr when PAN and UAO are in use commit c5cea06be060f38e5400d796e61cfc8c36e52924 upstream. If the kernel is set to show unhandled signals, and a user task does not handle a SIGILL as a result of an instruction abort, we will attempt to log the offending instruction with dump_instr before killing the task. We use dump_instr to log the encoding of the offending userspace instruction. However, dump_instr is also used to dump instructions from kernel space, and internally always switches to KERNEL_DS before dumping the instruction with get_user. When both PAN and UAO are in use, reading a user instruction via get_user while in KERNEL_DS will result in a permission fault, which leads to an Oops. As we have regs corresponding to the context of the original instruction abort, we can inspect this and only flip to KERNEL_DS if the original abort was taken from the kernel, avoiding this issue. At the same time, remove the redundant (and incorrect) comments regarding the order dump_mem and dump_instr are called in. Cc: Catalin Marinas Cc: James Morse Cc: Robin Murphy Signed-off-by: Mark Rutland Reported-by: Vladimir Murzin Tested-by: Vladimir Murzin Fixes: 57f4959bad0a154a ("arm64: kernel: Add support for User Access Override") Signed-off-by: Will Deacon Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/traps.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 210826d5bba5..9119722eb347 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -64,8 +64,7 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom, /* * We need to switch to kernel mode so that we can use __get_user - * to safely read from kernel space. Note that we now dump the - * code first, just in case the backtrace kills us. + * to safely read from kernel space. */ fs = get_fs(); set_fs(KERNEL_DS); @@ -111,21 +110,12 @@ static void dump_backtrace_entry(unsigned long where) print_ip_sym(where); } -static void dump_instr(const char *lvl, struct pt_regs *regs) +static void __dump_instr(const char *lvl, struct pt_regs *regs) { unsigned long addr = instruction_pointer(regs); - mm_segment_t fs; char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; int i; - /* - * We need to switch to kernel mode so that we can use __get_user - * to safely read from kernel space. Note that we now dump the - * code first, just in case the backtrace kills us. - */ - fs = get_fs(); - set_fs(KERNEL_DS); - for (i = -4; i < 1; i++) { unsigned int val, bad; @@ -139,8 +129,18 @@ static void dump_instr(const char *lvl, struct pt_regs *regs) } } printk("%sCode: %s\n", lvl, str); +} - set_fs(fs); +static void dump_instr(const char *lvl, struct pt_regs *regs) +{ + if (!user_mode(regs)) { + mm_segment_t fs = get_fs(); + set_fs(KERNEL_DS); + __dump_instr(lvl, regs); + set_fs(fs); + } else { + __dump_instr(lvl, regs); + } } static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) From 8c325770546a3ae6ec47e4fee68890eb5dbb8939 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 16 Nov 2017 16:57:10 -0700 Subject: [PATCH 161/312] nvme: Fix memory order on async queue deletion This patch is a fix specific to the 3.19 - 4.4 kernels. The 4.5 kernel inadvertently fixed this bug differently (db3cbfff5bcc0), but is not a stable candidate due it being a complicated re-write of the entire feature. This patch fixes a potential timing bug with nvme's asynchronous queue deletion, which causes an allocated request to be accidentally released due to the ordering of the shared completion context among the sq/cq pair. The completion context saves the request that issued the queue deletion. If the submission side deletion happens to reset the active request, the completion side will release the wrong request tag back into the pool of available tags. This means the driver will create multiple commands with the same tag, corrupting the queue context. The error is observable in the kernel logs like: "nvme XX:YY:ZZ completed id XX twice on qid:0" In this particular case, this message occurs because the queue is corrupted. The following timing sequence demonstrates the error: CPU A CPU B ----------------------- ----------------------------- nvme_irq nvme_process_cq async_completion queue_kthread_work -----------> nvme_del_sq_work_handler nvme_delete_cq adapter_async_del_queue nvme_submit_admin_async_cmd cmdinfo->req = req; blk_mq_free_request(cmdinfo->req); <-- wrong request!!! This patch fixes the bug by releasing the request in the completion side prior to waking the submission thread, such that that thread can't muck with the shared completion context. Fixes: a4aea5623d4a5 ("NVMe: Convert to blk-mq") Signed-off-by: Keith Busch Signed-off-by: Greg Kroah-Hartman --- drivers/nvme/host/pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 669edbd47602..d6ceb8b91cd6 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -350,8 +350,8 @@ static void async_completion(struct nvme_queue *nvmeq, void *ctx, struct async_cmd_info *cmdinfo = ctx; cmdinfo->result = le32_to_cpup(&cqe->result); cmdinfo->status = le16_to_cpup(&cqe->status) >> 1; - queue_kthread_work(cmdinfo->worker, &cmdinfo->work); blk_mq_free_request(cmdinfo->req); + queue_kthread_work(cmdinfo->worker, &cmdinfo->work); } static inline struct nvme_cmd_info *get_cmd_from_tag(struct nvme_queue *nvmeq, From c4baa4a5870cb02f713def1620052bfca7a82bbb Mon Sep 17 00:00:00 2001 From: alex chen Date: Wed, 15 Nov 2017 17:31:40 -0800 Subject: [PATCH 162/312] ocfs2: should wait dio before inode lock in ocfs2_setattr() commit 28f5a8a7c033cbf3e32277f4cc9c6afd74f05300 upstream. we should wait dio requests to finish before inode lock in ocfs2_setattr(), otherwise the following deadlock will happen: process 1 process 2 process 3 truncate file 'A' end_io of writing file 'A' receiving the bast messages ocfs2_setattr ocfs2_inode_lock_tracker ocfs2_inode_lock_full inode_dio_wait __inode_dio_wait -->waiting for all dio requests finish dlm_proxy_ast_handler dlm_do_local_bast ocfs2_blocking_ast ocfs2_generic_handle_bast set OCFS2_LOCK_BLOCKED flag dio_end_io dio_bio_end_aio dio_complete ocfs2_dio_end_io ocfs2_dio_end_io_write ocfs2_inode_lock __ocfs2_cluster_lock ocfs2_wait_for_mask -->waiting for OCFS2_LOCK_BLOCKED flag to be cleared, that is waiting for 'process 1' unlocking the inode lock inode_dio_end -->here dec the i_dio_count, but will never be called, so a deadlock happened. Link: http://lkml.kernel.org/r/59F81636.70508@huawei.com Signed-off-by: Alex Chen Reviewed-by: Jun Piao Reviewed-by: Joseph Qi Acked-by: Changwei Ge Cc: Mark Fasheh Cc: Joel Becker Cc: Junxiao Bi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/ocfs2/file.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 1d738723a41a..501ecc4a1ac4 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1166,6 +1166,13 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) } size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE; if (size_change) { + /* + * Here we should wait dio to finish before inode lock + * to avoid a deadlock between ocfs2_setattr() and + * ocfs2_dio_end_io_write() + */ + inode_dio_wait(inode); + status = ocfs2_rw_lock(inode, 1); if (status < 0) { mlog_errno(status); @@ -1186,8 +1193,6 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) if (status) goto bail_unlock; - inode_dio_wait(inode); - if (i_size_read(inode) >= attr->ia_size) { if (ocfs2_should_order_data(inode)) { status = ocfs2_begin_ordered_truncate(inode, From 4ecf752738acf14848546b0c7d2e7f3753f1ed3f Mon Sep 17 00:00:00 2001 From: Corey Minyard Date: Sat, 29 Jul 2017 21:14:55 -0500 Subject: [PATCH 163/312] ipmi: fix unsigned long underflow commit 392a17b10ec4320d3c0e96e2a23ebaad1123b989 upstream. When I set the timeout to a specific value such as 500ms, the timeout event will not happen in time due to the overflow in function check_msg_timeout: ... ent->timeout -= timeout_period; if (ent->timeout > 0) return; ... The type of timeout_period is long, but ent->timeout is unsigned long. This patch makes the type consistent. Reported-by: Weilong Chen Signed-off-by: Corey Minyard Tested-by: Weilong Chen Signed-off-by: Greg Kroah-Hartman --- drivers/char/ipmi/ipmi_msghandler.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 25372dc381d4..5cb5e8ff0224 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -4029,7 +4029,8 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, } static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, - struct list_head *timeouts, long timeout_period, + struct list_head *timeouts, + unsigned long timeout_period, int slot, unsigned long *flags, unsigned int *waiting_msgs) { @@ -4042,8 +4043,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, if (!ent->inuse) return; - ent->timeout -= timeout_period; - if (ent->timeout > 0) { + if (timeout_period < ent->timeout) { + ent->timeout -= timeout_period; (*waiting_msgs)++; return; } @@ -4109,7 +4110,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, } } -static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period) +static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, + unsigned long timeout_period) { struct list_head timeouts; struct ipmi_recv_msg *msg, *msg2; From c1b3703b643f0abce65f10cc7716c9ddc9c0decb Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Wed, 15 Nov 2017 17:38:41 -0800 Subject: [PATCH 164/312] mm/page_alloc.c: broken deferred calculation commit d135e5750205a21a212a19dbb05aeb339e2cbea7 upstream. In reset_deferred_meminit() we determine number of pages that must not be deferred. We initialize pages for at least 2G of memory, but also pages for reserved memory in this node. The reserved memory is determined in this function: memblock_reserved_memory_within(), which operates over physical addresses, and returns size in bytes. However, reset_deferred_meminit() assumes that that this function operates with pfns, and returns page count. The result is that in the best case machine boots slower than expected due to initializing more pages than needed in single thread, and in the worst case panics because fewer than needed pages are initialized early. Link: http://lkml.kernel.org/r/20171021011707.15191-1-pasha.tatashin@oracle.com Fixes: 864b9a393dcb ("mm: consider memblock reservations for deferred memory initialization sizing") Signed-off-by: Pavel Tatashin Acked-by: Michal Hocko Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- include/linux/mmzone.h | 3 ++- mm/page_alloc.c | 27 ++++++++++++++++++--------- 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 5b609a3ce3d7..ff88d6189411 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -688,7 +688,8 @@ typedef struct pglist_data { * is the first PFN that needs to be initialised. */ unsigned long first_deferred_pfn; - unsigned long static_init_size; + /* Number of non-deferred pages */ + unsigned long static_init_pgcnt; #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ } pg_data_t; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6b5421ae86c6..35ce37b187f9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -267,28 +267,37 @@ EXPORT_SYMBOL(nr_online_nodes); int page_group_by_mobility_disabled __read_mostly; #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT + +/* + * Determine how many pages need to be initialized durig early boot + * (non-deferred initialization). + * The value of first_deferred_pfn will be set later, once non-deferred pages + * are initialized, but for now set it ULONG_MAX. + */ static inline void reset_deferred_meminit(pg_data_t *pgdat) { - unsigned long max_initialise; - unsigned long reserved_lowmem; + phys_addr_t start_addr, end_addr; + unsigned long max_pgcnt; + unsigned long reserved; /* * Initialise at least 2G of a node but also take into account that * two large system hashes that can take up 1GB for 0.25TB/node. */ - max_initialise = max(2UL << (30 - PAGE_SHIFT), - (pgdat->node_spanned_pages >> 8)); + max_pgcnt = max(2UL << (30 - PAGE_SHIFT), + (pgdat->node_spanned_pages >> 8)); /* * Compensate the all the memblock reservations (e.g. crash kernel) * from the initial estimation to make sure we will initialize enough * memory to boot. */ - reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn, - pgdat->node_start_pfn + max_initialise); - max_initialise += reserved_lowmem; + start_addr = PFN_PHYS(pgdat->node_start_pfn); + end_addr = PFN_PHYS(pgdat->node_start_pfn + max_pgcnt); + reserved = memblock_reserved_memory_within(start_addr, end_addr); + max_pgcnt += PHYS_PFN(reserved); - pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages); + pgdat->static_init_pgcnt = min(max_pgcnt, pgdat->node_spanned_pages); pgdat->first_deferred_pfn = ULONG_MAX; } @@ -324,7 +333,7 @@ static inline bool update_defer_init(pg_data_t *pgdat, return true; /* Initialise at least 2G of the highest zone */ (*nr_initialised)++; - if ((*nr_initialised > pgdat->static_init_size) && + if ((*nr_initialised > pgdat->static_init_pgcnt) && (pfn & (PAGES_PER_SECTION - 1)) == 0) { pgdat->first_deferred_pfn = pfn; return false; From 7b7a1c39e8394d4232714093882b733b3e435c67 Mon Sep 17 00:00:00 2001 From: Jan Harkes Date: Wed, 27 Sep 2017 15:52:12 -0400 Subject: [PATCH 165/312] coda: fix 'kernel memory exposure attempt' in fsync commit d337b66a4c52c7b04eec661d86c2ef6e168965a2 upstream. When an application called fsync on a file in Coda a small request with just the file identifier was allocated, but the declared length was set to the size of union of all possible upcall requests. This bug has been around for a very long time and is now caught by the extra checking in usercopy that was introduced in Linux-4.8. The exposure happens when the Coda cache manager process reads the fsync upcall request at which point it is killed. As a result there is nobody servicing any further upcalls, trapping any processes that try to access the mounted Coda filesystem. Signed-off-by: Jan Harkes Signed-off-by: Al Viro Signed-off-by: Greg Kroah-Hartman --- fs/coda/upcall.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c index f6c6c8adbc01..7289f0a7670b 100644 --- a/fs/coda/upcall.c +++ b/fs/coda/upcall.c @@ -446,8 +446,7 @@ int venus_fsync(struct super_block *sb, struct CodaFid *fid) UPARG(CODA_FSYNC); inp->coda_fsync.VFid = *fid; - error = coda_upcall(coda_vcp(sb), sizeof(union inputArgs), - &outsize, inp); + error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); CODA_FREE(inp, insize); return error; From e34e744f70a68f8f16f945a286802898c56a8b5a Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Fri, 3 Jun 2016 14:55:38 -0700 Subject: [PATCH 166/312] mm: check the return value of lookup_page_ext for all call sites commit f86e4271978bd93db466d6a95dad4b0fdcdb04f6 upstream. Per the discussion with Joonsoo Kim [1], we need check the return value of lookup_page_ext() for all call sites since it might return NULL in some cases, although it is unlikely, i.e. memory hotplug. Tested with ltp with "page_owner=0". [1] http://lkml.kernel.org/r/20160519002809.GA10245@js1304-P5Q-DELUXE [akpm@linux-foundation.org: fix build-breaking typos] [arnd@arndb.de: fix build problems from lookup_page_ext] Link: http://lkml.kernel.org/r/6285269.2CksypHdYp@wuerfel [akpm@linux-foundation.org: coding-style fixes] Link: http://lkml.kernel.org/r/1464023768-31025-1-git-send-email-yang.shi@linaro.org Signed-off-by: Yang Shi Signed-off-by: Arnd Bergmann Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Michal Hocko Signed-off-by: Greg Kroah-Hartman --- include/linux/page_idle.h | 43 ++++++++++++++++++++++++++++++++------- mm/debug-pagealloc.c | 6 ++++++ mm/page_alloc.c | 6 ++++++ mm/page_owner.c | 16 +++++++++++++++ mm/vmstat.c | 2 ++ 5 files changed, 66 insertions(+), 7 deletions(-) diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h index bf268fa92c5b..fec40271339f 100644 --- a/include/linux/page_idle.h +++ b/include/linux/page_idle.h @@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops; static inline bool page_is_young(struct page *page) { - return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return false; + + return test_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline void set_page_young(struct page *page) { - set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return; + + set_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline bool test_and_clear_page_young(struct page *page) { - return test_and_clear_bit(PAGE_EXT_YOUNG, - &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return false; + + return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline bool page_is_idle(struct page *page) { - return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return false; + + return test_bit(PAGE_EXT_IDLE, &page_ext->flags); } static inline void set_page_idle(struct page *page) { - set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return; + + set_bit(PAGE_EXT_IDLE, &page_ext->flags); } static inline void clear_page_idle(struct page *page) { - clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return; + + clear_bit(PAGE_EXT_IDLE, &page_ext->flags); } #endif /* CONFIG_64BIT */ diff --git a/mm/debug-pagealloc.c b/mm/debug-pagealloc.c index 5bf5906ce13b..fe1c61f7cf26 100644 --- a/mm/debug-pagealloc.c +++ b/mm/debug-pagealloc.c @@ -34,6 +34,8 @@ static inline void set_page_poison(struct page *page) struct page_ext *page_ext; page_ext = lookup_page_ext(page); + if (page_ext) + return; __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); } @@ -42,6 +44,8 @@ static inline void clear_page_poison(struct page *page) struct page_ext *page_ext; page_ext = lookup_page_ext(page); + if (page_ext) + return; __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); } @@ -50,6 +54,8 @@ static inline bool page_poison(struct page *page) struct page_ext *page_ext; page_ext = lookup_page_ext(page); + if (page_ext) + return false; return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 35ce37b187f9..3c70f03d91ec 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -569,6 +569,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page, return; page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + return; + __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); INIT_LIST_HEAD(&page->lru); @@ -586,6 +589,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page, return; page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + return; + __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); set_page_private(page, 0); diff --git a/mm/page_owner.c b/mm/page_owner.c index 983c3a10fa07..dd6b9cebf981 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -53,6 +53,8 @@ void __reset_page_owner(struct page *page, unsigned int order) for (i = 0; i < (1 << order); i++) { page_ext = lookup_page_ext(page + i); + if (unlikely(!page_ext)) + continue; __clear_bit(PAGE_EXT_OWNER, &page_ext->flags); } } @@ -60,6 +62,7 @@ void __reset_page_owner(struct page *page, unsigned int order) void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) { struct page_ext *page_ext = lookup_page_ext(page); + struct stack_trace trace = { .nr_entries = 0, .max_entries = ARRAY_SIZE(page_ext->trace_entries), @@ -67,6 +70,9 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) .skip = 3, }; + if (unlikely(!page_ext)) + return; + save_stack_trace(&trace); page_ext->order = order; @@ -79,6 +85,12 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) gfp_t __get_page_owner_gfp(struct page *page) { struct page_ext *page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + /* + * The caller just returns 0 if no valid gfp + * So return 0 here too. + */ + return 0; return page_ext->gfp_mask; } @@ -194,6 +206,8 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) } page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + continue; /* * Some pages could be missed by concurrent allocation or free, @@ -257,6 +271,8 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) continue; page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + continue; /* Maybe overraping zone */ if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) diff --git a/mm/vmstat.c b/mm/vmstat.c index c54fd2924f25..c344e3609c53 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1091,6 +1091,8 @@ static void pagetypeinfo_showmixedcount_print(struct seq_file *m, continue; page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + continue; if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) continue; From 3630b28019075639a9db00491349e05fbf0f901e Mon Sep 17 00:00:00 2001 From: Jaewon Kim Date: Wed, 15 Nov 2017 17:39:07 -0800 Subject: [PATCH 167/312] mm/page_ext.c: check if page_ext is not prepared commit e492080e640c2d1235ddf3441cae634cfffef7e1 upstream. online_page_ext() and page_ext_init() allocate page_ext for each section, but they do not allocate if the first PFN is !pfn_present(pfn) or !pfn_valid(pfn). Then section->page_ext remains as NULL. lookup_page_ext checks NULL only if CONFIG_DEBUG_VM is enabled. For a valid PFN, __set_page_owner will try to get page_ext through lookup_page_ext. Without CONFIG_DEBUG_VM lookup_page_ext will misuse NULL pointer as value 0. This incurrs invalid address access. This is the panic example when PFN 0x100000 is not valid but PFN 0x13FC00 is being used for page_ext. section->page_ext is NULL, get_entry returned invalid page_ext address as 0x1DFA000 for a PFN 0x13FC00. To avoid this panic, CONFIG_DEBUG_VM should be removed so that page_ext will be checked at all times. Unable to handle kernel paging request at virtual address 01dfa014 ------------[ cut here ]------------ Kernel BUG at ffffff80082371e0 [verbose debug info unavailable] Internal error: Oops: 96000045 [#1] PREEMPT SMP Modules linked in: PC is at __set_page_owner+0x48/0x78 LR is at __set_page_owner+0x44/0x78 __set_page_owner+0x48/0x78 get_page_from_freelist+0x880/0x8e8 __alloc_pages_nodemask+0x14c/0xc48 __do_page_cache_readahead+0xdc/0x264 filemap_fault+0x2ac/0x550 ext4_filemap_fault+0x3c/0x58 __do_fault+0x80/0x120 handle_mm_fault+0x704/0xbb0 do_page_fault+0x2e8/0x394 do_mem_abort+0x88/0x124 Pre-4.7 kernels also need commit f86e4271978b ("mm: check the return value of lookup_page_ext for all call sites"). Link: http://lkml.kernel.org/r/20171107094131.14621-1-jaewon31.kim@samsung.com Fixes: eefa864b701d ("mm/page_ext: resurrect struct page extending code for debugging") Signed-off-by: Jaewon Kim Acked-by: Michal Hocko Cc: Vlastimil Babka Cc: Minchan Kim Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Michal Hocko Signed-off-by: Greg Kroah-Hartman --- mm/page_ext.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/mm/page_ext.c b/mm/page_ext.c index 292ca7b8debd..4d1eac0d4fc5 100644 --- a/mm/page_ext.c +++ b/mm/page_ext.c @@ -106,7 +106,6 @@ struct page_ext *lookup_page_ext(struct page *page) struct page_ext *base; base = NODE_DATA(page_to_nid(page))->node_page_ext; -#ifdef CONFIG_DEBUG_VM /* * The sanity checks the page allocator does upon freeing a * page can reach here before the page_ext arrays are @@ -115,7 +114,6 @@ struct page_ext *lookup_page_ext(struct page *page) */ if (unlikely(!base)) return NULL; -#endif offset = pfn - round_down(node_start_pfn(page_to_nid(page)), MAX_ORDER_NR_PAGES); return base + offset; @@ -180,7 +178,6 @@ struct page_ext *lookup_page_ext(struct page *page) { unsigned long pfn = page_to_pfn(page); struct mem_section *section = __pfn_to_section(pfn); -#ifdef CONFIG_DEBUG_VM /* * The sanity checks the page allocator does upon freeing a * page can reach here before the page_ext arrays are @@ -189,7 +186,6 @@ struct page_ext *lookup_page_ext(struct page *page) */ if (!section->page_ext) return NULL; -#endif return section->page_ext + pfn; } From a3805b10de80953db316985f567453fc18329423 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Tue, 14 Nov 2017 01:03:44 +0100 Subject: [PATCH 168/312] mm/pagewalk.c: report holes in hugetlb ranges commit 373c4557d2aa362702c4c2d41288fb1e54990b7c upstream. This matters at least for the mincore syscall, which will otherwise copy uninitialized memory from the page allocator to userspace. It is probably also a correctness error for /proc/$pid/pagemap, but I haven't tested that. Removing the `walk->hugetlb_entry` condition in walk_hugetlb_range() has no effect because the caller already checks for that. This only reports holes in hugetlb ranges to callers who have specified a hugetlb_entry callback. This issue was found using an AFL-based fuzzer. v2: - don't crash on ->pte_hole==NULL (Andrew Morton) - add Cc stable (Andrew Morton) Changed for 4.4/4.9 stable backport: - fix up conflict in the huge_pte_offset() call Fixes: 1e25a271c8ac ("mincore: apply page table walker on do_mincore()") Signed-off-by: Jann Horn Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/pagewalk.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 29f2f8b853ae..c2cbd2620169 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -142,8 +142,12 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end, do { next = hugetlb_entry_end(h, addr, end); pte = huge_pte_offset(walk->mm, addr & hmask); - if (pte && walk->hugetlb_entry) + + if (pte) err = walk->hugetlb_entry(pte, hmask, addr, next, walk); + else if (walk->pte_hole) + err = walk->pte_hole(addr, next, walk); + if (err) break; } while (addr = next, addr != end); From 5baf0fb260fc59d632ba150f442a5904650d6170 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 24 Nov 2017 08:32:25 +0100 Subject: [PATCH 169/312] Linux 4.4.101 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 91dd7832f499..0d7b050427ed 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 4 -SUBLEVEL = 100 +SUBLEVEL = 101 EXTRAVERSION = NAME = Blurry Fish Butt From 0208fabf7256245125fbabf03207a0da4000ea2d Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Fri, 24 Nov 2017 11:13:07 +0100 Subject: [PATCH 170/312] mm, hwpoison: fixup "mm: check the return value of lookup_page_ext for all call sites" Backport of the upstream commit f86e4271978b ("mm: check the return value of lookup_page_ext for all call sites") is wrong for hwpoison pages. I have accidentally negated the condition for bailout. This basically disables hwpoison pages tracking while the code still might crash on unusual configurations when struct pages do not have page_ext allocated. The fix is trivial to invert the condition. Reported-by: Jiri Slaby Signed-off-by: Michal Hocko Signed-off-by: Greg Kroah-Hartman --- mm/debug-pagealloc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/debug-pagealloc.c b/mm/debug-pagealloc.c index fe1c61f7cf26..3b8f1b83610e 100644 --- a/mm/debug-pagealloc.c +++ b/mm/debug-pagealloc.c @@ -34,7 +34,7 @@ static inline void set_page_poison(struct page *page) struct page_ext *page_ext; page_ext = lookup_page_ext(page); - if (page_ext) + if (!page_ext) return; __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); } @@ -44,7 +44,7 @@ static inline void clear_page_poison(struct page *page) struct page_ext *page_ext; page_ext = lookup_page_ext(page); - if (page_ext) + if (!page_ext) return; __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); } @@ -54,7 +54,7 @@ static inline bool page_poison(struct page *page) struct page_ext *page_ext; page_ext = lookup_page_ext(page); - if (page_ext) + if (!page_ext) return false; return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); } From 29ffb9c1fb4acbda207985ad1558191ffb776bee Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 24 Nov 2017 11:26:29 +0100 Subject: [PATCH 171/312] Linux 4.4.102 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 0d7b050427ed..9e036fac9c04 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 4 -SUBLEVEL = 101 +SUBLEVEL = 102 EXTRAVERSION = NAME = Blurry Fish Butt From 663d2e5444a6791c41d690b02c2ae027b1cdb062 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 5 Sep 2017 10:56:13 +0200 Subject: [PATCH 172/312] UPSTREAM: android: binder: fix type mismatch warning Allowing binder to expose the 64-bit API on 32-bit kernels caused a build warning: drivers/android/binder.c: In function 'binder_transaction_buffer_release': drivers/android/binder.c:2220:15: error: cast to pointer from integer of different size [-Werror=int-to-pointer-cast] fd_array = (u32 *)(parent_buffer + fda->parent_offset); ^ drivers/android/binder.c: In function 'binder_translate_fd_array': drivers/android/binder.c:2445:13: error: cast to pointer from integer of different size [-Werror=int-to-pointer-cast] fd_array = (u32 *)(parent_buffer + fda->parent_offset); ^ drivers/android/binder.c: In function 'binder_fixup_parent': drivers/android/binder.c:2511:18: error: cast to pointer from integer of different size [-Werror=int-to-pointer-cast] This adds extra type casts to avoid the warning. However, there is another problem with the Kconfig option: turning it on or off creates two incompatible ABI versions, a kernel that has this enabled cannot run user space that was built without it or vice versa. A better solution might be to leave the option hidden until the binder code is fixed to deal with both ABI versions. Fixes: e8d2ed7db7c3 ("Revert "staging: Fix build issues with new binder API"") Signed-off-by: Arnd Bergmann Signed-off-by: Greg Kroah-Hartman (cherry picked from commit 1c363eaece2752c5f8b1b874cb4ae435de06aa66) Change-Id: Id09185a6f86905926699e92a2b30201b8a5e83e5 --- drivers/android/binder.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 008e448536de..d559596988d7 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2468,7 +2468,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, debug_id, (u64)fda->num_fds); continue; } - fd_array = (u32 *)(parent_buffer + fda->parent_offset); + fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); for (fd_index = 0; fd_index < fda->num_fds; fd_index++) task_close_fd(proc, fd_array[fd_index]); } break; @@ -2692,7 +2692,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, */ parent_buffer = parent->buffer - binder_alloc_get_user_buffer_offset(&target_proc->alloc); - fd_array = (u32 *)(parent_buffer + fda->parent_offset); + fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) { binder_user_error("%d:%d parent offset not aligned correctly.\n", proc->pid, thread->pid); @@ -2758,7 +2758,7 @@ static int binder_fixup_parent(struct binder_transaction *t, proc->pid, thread->pid); return -EINVAL; } - parent_buffer = (u8 *)(parent->buffer - + parent_buffer = (u8 *)((uintptr_t)parent->buffer - binder_alloc_get_user_buffer_offset( &target_proc->alloc)); *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer; From 353c16247d72937c90e7ef04cbd603502581019a Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Sat, 23 Sep 2017 17:02:18 +0800 Subject: [PATCH 173/312] f2fs: updates on 4.15-rc1 Pull f2fs updates from Jaegeuk Kim: "In this round, we introduce sysfile-based quota support which is required for Android by default. In addition, we allow that users are able to reserve some blocks in runtime to mitigate performance drops in low free space. Enhancements: - assign proper data segments according to write_hints given by user - issue cache_flush on dirty devices only among multiple devices - exploit cp_error flag and add more faults to enhance fault injection test - conduct more readaheads during f2fs_readdir - add a range for discard commands Bug fixes: - fix zero stat->st_blocks when inline_data is set - drop crypto key and free stale memory pointer while evict_inode is failing - fix some corner cases in free space and segment management - fix wrong last_disk_size This series includes lots of clean-ups and code enhancement in terms of xattr operations, discard/flush command control. In addition, it adds versatile debugfs entries to monitor f2fs status" Cherry-picked from origin/upstream-f2fs-stable-linux-4.4.y: 56a07b070510 f2fs: deny accessing encryption policy if encryption is off c394842e26e5 f2fs: inject fault in inc_valid_node_count 926292251022 f2fs: fix to clear FI_NO_PREALLOC e6cfc5de2d05 f2fs: expose quota information in debugfs c4cd2efe835b f2fs: separate nat entry mem alloc from nat_tree_lock 48c72b4c8c50 f2fs: validate before set/clear free nat bitmap baf9275a4bbd f2fs: avoid opened loop codes in __add_ino_entry 47af6c72d944 f2fs: apply write hints to select the type of segments for buffered write ac9819160586 f2fs: introduce scan_curseg_cache for cleanup ca28e9670e80 f2fs: optimize the way of traversing free_nid_bitmap 460688b59e8b f2fs: keep scanning until enough free nids are acquired 0186182c0c4d f2fs: trace checkpoint reason in fsync() 5d4b6efcfd09 f2fs: keep isize once block is reserved cross EOF 3c8f767e1374 f2fs: avoid race in between GC and block exchange 4423778adf0e f2fs: save a multiplication for last_nid calculation 3e3b40557525 f2fs: fix summary info corruption 44889e487981 f2fs: remove dead code in update_meta_page 55c7b9595bb9 f2fs: remove unneeded semicolon 8b92814117d5 f2fs: don't bother with inode->i_version 42c7c71824fc f2fs: check curseg space before foreground GC c5470498e59b f2fs: use rw_semaphore to protect SIT cache 82750d346ab7 f2fs: support quota sys files 26dfec49b25a f2fs: add quota_ino feature infra ddb8e2ae9811 f2fs: optimize __update_nat_bits f46ae958c701 f2fs: modify for accurate fggc node io stat c713fdb5a23c Revert "f2fs: handle dirty segments inside refresh_sit_entry" 873ec505cb07 f2fs: add a function to move nid ae66786296b4 f2fs: export SSR allocation threshold 90c28a18d2a4 f2fs: give correct trimmed blocks in fstrim 5612922fb0ac f2fs: support bio allocation error injection 583b7a274c27 f2fs: support get_page error injection 09a073cc8c56 f2fs: add missing sysfs description e945474a9c1b f2fs: support soft block reservation b7b2e629b6f6 f2fs: handle error case when adding xattr entry 7368e30495c5 f2fs: support flexible inline xattr size ada4061e191b f2fs: show current cp state 5b8ff1301a61 f2fs: add missing quota_initialize 46d4a691f035 f2fs: show # of dirty segments via sysfs fc13f9d7ce1e f2fs: stop all the operations by cp_error flag 91bea0c391b3 f2fs: remove several redundant assignments 807486c79534 f2fs: avoid using timespec 03b1cb0bb4a2 f2fs: fix to correct no_fggc_candidate 5c15033ceaea Revert "f2fs: return wrong error number on f2fs_quota_write" 5f5f59322240 f2fs: remove obsolete pointer for truncate_xattr_node 032a6906825a f2fs: retry ENOMEM for quota_read|write 171b638fc49b f2fs: limit # of inmemory pages 83ed7a615f0a f2fs: update ctx->pos correctly when hitting hole in directory 4d6e68be2534 f2fs: relocate readahead codes in readdir() c8be47b54018 f2fs: allow readdir() to be interrupted 2b903fe94cd0 f2fs: trace f2fs_readdir bb0db666d4bc f2fs: trace f2fs_lookup 40d6250f046a f2fs: skip searching non-exist range in truncate_hole 8e84f379df61 f2fs: expose some sectors to user in inline data or dentry case cb98f70dea02 f2fs: avoid stale fi->gdirty_list pointer 5562a3c53963 f2fs/crypto: drop crypto key at evict_inode only 85853e7e38d7 f2fs: fix to avoid race when accessing last_disk_size 0c47a892d555 f2fs: Fix bool initialization/comparison 68e801abc520 f2fs: give up CP_TRIMMED_FLAG if it drops discards df74eacb2075 f2fs: trace f2fs_remove_discard bd502c6e3e7a f2fs: reduce cmd_lock coverage in __issue_discard_cmd a34ab5ca4f94 f2fs: split discard policy 1e65afd14d32 f2fs: wrap discard policy 684447dad138 f2fs: support issuing/waiting discard in range 27eaad09380f f2fs: fix to flush multiple device in checkpoint 08bb9d68d51b f2fs: enhance multiple device flush 9c2526ac2ecb f2fs: fix to show ino management cache size correctly 814b463d262f f2fs: drop FI_UPDATE_WRITE tag after f2fs_issue_flush f555b0a117d3 f2fs: obsolete ALLOC_NID_LIST list 75d3164ae128 f2fs: convert inline data for direct I/O & FI_NO_PREALLOC 4de0ceb6b7ef f2fs: allow readpages with NULL file pointer 322a45d17212 f2fs: show flush list status in sysfs 6d625a93b4a8 f2fs: introduce read_xattr_block 8ea6e1c327c5 f2fs: introduce read_inline_xattr dbce11e9ee5b Revert "f2fs: reuse nids more aggressively" 131bc9f6b7f9 Revert "f2fs: node segment is prior to data segment selected victim" Change-Id: I93b9cd867b859a667a448b39299ff44a2b841b8c Signed-off-by: Jaegeuk Kim --- Documentation/ABI/testing/sysfs-fs-f2fs | 43 +- fs/f2fs/acl.c | 3 + fs/f2fs/checkpoint.c | 64 ++- fs/f2fs/data.c | 38 +- fs/f2fs/debug.c | 31 +- fs/f2fs/dir.c | 32 +- fs/f2fs/f2fs.h | 223 +++++++--- fs/f2fs/file.c | 120 ++++-- fs/f2fs/gc.c | 37 +- fs/f2fs/inline.c | 1 + fs/f2fs/inode.c | 26 +- fs/f2fs/namei.c | 101 ++++- fs/f2fs/node.c | 420 ++++++++++--------- fs/f2fs/node.h | 16 +- fs/f2fs/recovery.c | 8 +- fs/f2fs/segment.c | 516 ++++++++++++++++++------ fs/f2fs/segment.h | 39 +- fs/f2fs/shrinker.c | 2 +- fs/f2fs/super.c | 219 ++++++++-- fs/f2fs/sysfs.c | 53 ++- fs/f2fs/xattr.c | 178 ++++---- include/linux/f2fs_fs.h | 10 +- include/trace/events/f2fs.h | 116 +++++- 23 files changed, 1664 insertions(+), 632 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs index 500c60403653..2baed1151eac 100644 --- a/Documentation/ABI/testing/sysfs-fs-f2fs +++ b/Documentation/ABI/testing/sysfs-fs-f2fs @@ -51,6 +51,18 @@ Description: Controls the dirty page count condition for the in-place-update policies. +What: /sys/fs/f2fs//min_hot_blocks +Date: March 2017 +Contact: "Jaegeuk Kim" +Description: + Controls the dirty page count condition for redefining hot data. + +What: /sys/fs/f2fs//min_ssr_sections +Date: October 2017 +Contact: "Chao Yu" +Description: + Controls the fee section threshold to trigger SSR allocation. + What: /sys/fs/f2fs//max_small_discards Date: November 2013 Contact: "Jaegeuk Kim" @@ -96,6 +108,18 @@ Contact: "Jaegeuk Kim" Description: Controls the checkpoint timing. +What: /sys/fs/f2fs//idle_interval +Date: January 2016 +Contact: "Jaegeuk Kim" +Description: + Controls the idle timing. + +What: /sys/fs/f2fs//iostat_enable +Date: August 2017 +Contact: "Chao Yu" +Description: + Controls to enable/disable IO stat. + What: /sys/fs/f2fs//ra_nid_pages Date: October 2015 Contact: "Chao Yu" @@ -116,6 +140,12 @@ Contact: "Shuoran Liu" Description: Shows total written kbytes issued to disk. +What: /sys/fs/f2fs//feature +Date: July 2017 +Contact: "Jaegeuk Kim" +Description: + Shows all enabled features in current device. + What: /sys/fs/f2fs//inject_rate Date: May 2016 Contact: "Sheng Yong" @@ -132,7 +162,18 @@ What: /sys/fs/f2fs//reserved_blocks Date: June 2017 Contact: "Chao Yu" Description: - Controls current reserved blocks in system. + Controls target reserved blocks in system, the threshold + is soft, it could exceed current available user space. + +What: /sys/fs/f2fs//current_reserved_blocks +Date: October 2017 +Contact: "Yunlong Song" +Contact: "Chao Yu" +Description: + Shows current reserved blocks in system, it may be temporarily + smaller than target_reserved_blocks, but will gradually + increase to target_reserved_blocks when more free blocks are + freed by user later. What: /sys/fs/f2fs//gc_urgent Date: August 2017 diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c index 112f8e04c549..3f52efa0f94f 100644 --- a/fs/f2fs/acl.c +++ b/fs/f2fs/acl.c @@ -253,6 +253,9 @@ static int __f2fs_set_acl(struct inode *inode, int type, int f2fs_set_acl(struct inode *inode, struct posix_acl *acl, int type) { + if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) + return -EIO; + return __f2fs_set_acl(inode, type, acl, NULL); } diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index e86f67ac96c6..2eb778174a9b 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -29,7 +29,6 @@ struct kmem_cache *inode_entry_slab; void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io) { set_ckpt_flags(sbi, CP_ERROR_FLAG); - sbi->sb->s_flags |= MS_RDONLY; if (!end_io) f2fs_flush_merged_writes(sbi); } @@ -402,24 +401,23 @@ const struct address_space_operations f2fs_meta_aops = { #endif }; -static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) +static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, + unsigned int devidx, int type) { struct inode_management *im = &sbi->im[type]; struct ino_entry *e, *tmp; tmp = f2fs_kmem_cache_alloc(ino_entry_slab, GFP_NOFS); -retry: + radix_tree_preload(GFP_NOFS | __GFP_NOFAIL); spin_lock(&im->ino_lock); e = radix_tree_lookup(&im->ino_root, ino); if (!e) { e = tmp; - if (radix_tree_insert(&im->ino_root, ino, e)) { - spin_unlock(&im->ino_lock); - radix_tree_preload_end(); - goto retry; - } + if (unlikely(radix_tree_insert(&im->ino_root, ino, e))) + f2fs_bug_on(sbi, 1); + memset(e, 0, sizeof(struct ino_entry)); e->ino = ino; @@ -427,6 +425,10 @@ retry: if (type != ORPHAN_INO) im->ino_num++; } + + if (type == FLUSH_INO) + f2fs_set_bit(devidx, (char *)&e->dirty_device); + spin_unlock(&im->ino_lock); radix_tree_preload_end(); @@ -455,7 +457,7 @@ static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) void add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) { /* add new dirty ino entry into list */ - __add_ino_entry(sbi, ino, type); + __add_ino_entry(sbi, ino, 0, type); } void remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) @@ -481,7 +483,7 @@ void release_ino_entry(struct f2fs_sb_info *sbi, bool all) struct ino_entry *e, *tmp; int i; - for (i = all ? ORPHAN_INO: APPEND_INO; i <= UPDATE_INO; i++) { + for (i = all ? ORPHAN_INO : APPEND_INO; i < MAX_INO_ENTRY; i++) { struct inode_management *im = &sbi->im[i]; spin_lock(&im->ino_lock); @@ -495,6 +497,27 @@ void release_ino_entry(struct f2fs_sb_info *sbi, bool all) } } +void set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, + unsigned int devidx, int type) +{ + __add_ino_entry(sbi, ino, devidx, type); +} + +bool is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, + unsigned int devidx, int type) +{ + struct inode_management *im = &sbi->im[type]; + struct ino_entry *e; + bool is_dirty = false; + + spin_lock(&im->ino_lock); + e = radix_tree_lookup(&im->ino_root, ino); + if (e && f2fs_test_bit(devidx, (char *)&e->dirty_device)) + is_dirty = true; + spin_unlock(&im->ino_lock); + return is_dirty; +} + int acquire_orphan_inode(struct f2fs_sb_info *sbi) { struct inode_management *im = &sbi->im[ORPHAN_INO]; @@ -531,7 +554,7 @@ void release_orphan_inode(struct f2fs_sb_info *sbi) void add_orphan_inode(struct inode *inode) { /* add new orphan ino entry into list */ - __add_ino_entry(F2FS_I_SB(inode), inode->i_ino, ORPHAN_INO); + __add_ino_entry(F2FS_I_SB(inode), inode->i_ino, 0, ORPHAN_INO); update_inode_page(inode); } @@ -555,7 +578,7 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) return err; } - __add_ino_entry(sbi, ino, ORPHAN_INO); + __add_ino_entry(sbi, ino, 0, ORPHAN_INO); inode = f2fs_iget_retry(sbi->sb, ino); if (IS_ERR(inode)) { @@ -591,6 +614,9 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi) block_t start_blk, orphan_blocks, i, j; unsigned int s_flags = sbi->sb->s_flags; int err = 0; +#ifdef CONFIG_QUOTA + int quota_enabled; +#endif if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG)) return 0; @@ -603,8 +629,9 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi) #ifdef CONFIG_QUOTA /* Needed for iput() to work correctly and not trash data */ sbi->sb->s_flags |= MS_ACTIVE; + /* Turn on quotas so that they are updated correctly */ - f2fs_enable_quota_files(sbi); + quota_enabled = f2fs_enable_quota_files(sbi, s_flags & MS_RDONLY); #endif start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi); @@ -632,7 +659,8 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi) out: #ifdef CONFIG_QUOTA /* Turn quotas off */ - f2fs_quota_off_umount(sbi->sb); + if (quota_enabled) + f2fs_quota_off_umount(sbi->sb); #endif sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */ @@ -987,7 +1015,7 @@ int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi) update_inode_page(inode); iput(inode); } - }; + } return 0; } @@ -1147,6 +1175,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) struct super_block *sb = sbi->sb; struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE); u64 kbytes_written; + int err; /* Flush all the NAT/SIT pages */ while (get_pages(sbi, F2FS_DIRTY_META)) { @@ -1240,6 +1269,11 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) if (unlikely(f2fs_cp_error(sbi))) return -EIO; + /* flush all device cache */ + err = f2fs_flush_device_cache(sbi); + if (err) + return err; + /* write out checkpoint buffer at block 0 */ update_meta_page(sbi, ckpt, start_blk++); diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index c8583d7a1845..cdccc429325b 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -172,7 +172,7 @@ static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, { struct bio *bio; - bio = f2fs_bio_alloc(npages); + bio = f2fs_bio_alloc(sbi, npages, true); f2fs_target_device(sbi, blk_addr, bio); bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io; @@ -417,8 +417,8 @@ next: bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page; - /* set submitted = 1 as a return value */ - fio->submitted = 1; + /* set submitted = true as a return value */ + fio->submitted = true; inc_page_count(sbi, WB_DATA_TYPE(bio_page)); @@ -472,7 +472,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, f2fs_wait_on_block_writeback(sbi, blkaddr); } - bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES)); + bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false); if (!bio) { if (ctx) fscrypt_release_ctx(ctx); @@ -832,6 +832,13 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from) struct f2fs_map_blocks map; int err = 0; + /* convert inline data for Direct I/O*/ + if (iocb->ki_flags & IOCB_DIRECT) { + err = f2fs_convert_inline_inode(inode); + if (err) + return err; + } + if (is_inode_flag_set(inode, FI_NO_PREALLOC)) return 0; @@ -844,15 +851,11 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from) map.m_next_pgofs = NULL; - if (iocb->ki_flags & IOCB_DIRECT) { - err = f2fs_convert_inline_inode(inode); - if (err) - return err; + if (iocb->ki_flags & IOCB_DIRECT) return f2fs_map_blocks(inode, &map, 1, __force_buffered_io(inode, WRITE) ? F2FS_GET_BLOCK_PRE_AIO : F2FS_GET_BLOCK_PRE_DIO); - } if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) { err = f2fs_convert_inline_inode(inode); if (err) @@ -1332,7 +1335,7 @@ static int f2fs_read_data_pages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { - struct inode *inode = file->f_mapping->host; + struct inode *inode = mapping->host; struct page *page = list_last_entry(pages, struct page, lru); trace_f2fs_readpages(inode, page, nr_pages); @@ -1493,6 +1496,7 @@ static int __write_data_page(struct page *page, bool *submitted, int err = 0; struct f2fs_io_info fio = { .sbi = sbi, + .ino = inode->i_ino, .type = DATA, .op = REQ_OP_WRITE, .op_flags = wbc_to_write_flags(wbc), @@ -1564,8 +1568,11 @@ write: err = do_write_data_page(&fio); } } + + down_write(&F2FS_I(inode)->i_sem); if (F2FS_I(inode)->last_disk_size < psize) F2FS_I(inode)->last_disk_size = psize; + up_write(&F2FS_I(inode)->i_sem); done: if (err && err != -ENOENT) @@ -1945,6 +1952,12 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping, } trace_f2fs_write_begin(inode, pos, len, flags); + if (f2fs_is_atomic_file(inode) && + !available_free_memory(sbi, INMEM_PAGES)) { + err = -ENOMEM; + goto fail; + } + /* * We should check this at this moment to avoid deadlock on inode page * and #0 page. The locking rule for inline_data conversion should be: @@ -1960,7 +1973,8 @@ repeat: * Do not use grab_cache_page_write_begin() to avoid deadlock due to * wait_for_stable_page. Will wait that below with our IO control. */ - page = grab_cache_page(mapping, index); + page = f2fs_pagecache_get_page(mapping, index, + FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS); if (!page) { err = -ENOMEM; goto fail; @@ -2021,6 +2035,8 @@ repeat: fail: f2fs_put_page(page, 1); f2fs_write_failed(mapping, pos + len); + if (f2fs_is_atomic_file(inode)) + drop_inmem_pages_all(sbi); return err; } diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index 87f449845f5f..ecada8425268 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -45,9 +45,18 @@ static void update_general_status(struct f2fs_sb_info *sbi) si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS); si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META); si->ndirty_data = get_pages(sbi, F2FS_DIRTY_DATA); + si->ndirty_qdata = get_pages(sbi, F2FS_DIRTY_QDATA); si->ndirty_imeta = get_pages(sbi, F2FS_DIRTY_IMETA); si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE]; si->ndirty_files = sbi->ndirty_inode[FILE_INODE]; + + si->nquota_files = 0; + if (f2fs_sb_has_quota_ino(sbi->sb)) { + for (i = 0; i < MAXQUOTAS; i++) { + if (f2fs_qf_ino(sbi->sb, i)) + si->nquota_files++; + } + } si->ndirty_all = sbi->ndirty_inode[DIRTY_META]; si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES); si->aw_cnt = atomic_read(&sbi->aw_cnt); @@ -61,6 +70,8 @@ static void update_general_status(struct f2fs_sb_info *sbi) atomic_read(&SM_I(sbi)->fcc_info->issued_flush); si->nr_flushing = atomic_read(&SM_I(sbi)->fcc_info->issing_flush); + si->flush_list_empty = + llist_empty(&SM_I(sbi)->fcc_info->issue_list); } if (SM_I(sbi) && SM_I(sbi)->dcc_info) { si->nr_discarded = @@ -96,9 +107,9 @@ static void update_general_status(struct f2fs_sb_info *sbi) si->dirty_nats = NM_I(sbi)->dirty_nat_cnt; si->sits = MAIN_SEGS(sbi); si->dirty_sits = SIT_I(sbi)->dirty_sentries; - si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID_LIST]; + si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID]; si->avail_nids = NM_I(sbi)->available_nids; - si->alloc_nids = NM_I(sbi)->nid_cnt[ALLOC_NID_LIST]; + si->alloc_nids = NM_I(sbi)->nid_cnt[PREALLOC_NID]; si->bg_gc = sbi->bg_gc; si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg) * 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg) @@ -231,14 +242,14 @@ get_cache: } /* free nids */ - si->cache_mem += (NM_I(sbi)->nid_cnt[FREE_NID_LIST] + - NM_I(sbi)->nid_cnt[ALLOC_NID_LIST]) * + si->cache_mem += (NM_I(sbi)->nid_cnt[FREE_NID] + + NM_I(sbi)->nid_cnt[PREALLOC_NID]) * sizeof(struct free_nid); si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry); si->cache_mem += NM_I(sbi)->dirty_nat_cnt * sizeof(struct nat_entry_set); si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages); - for (i = 0; i <= ORPHAN_INO; i++) + for (i = 0; i < MAX_INO_ENTRY; i++) si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry); si->cache_mem += atomic_read(&sbi->total_ext_tree) * sizeof(struct extent_tree); @@ -262,9 +273,10 @@ static int stat_show(struct seq_file *s, void *v) list_for_each_entry(si, &f2fs_stat_list, stat_list) { update_general_status(si->sbi); - seq_printf(s, "\n=====[ partition info(%pg). #%d, %s]=====\n", + seq_printf(s, "\n=====[ partition info(%pg). #%d, %s, CP: %s]=====\n", si->sbi->sb->s_bdev, i++, - f2fs_readonly(si->sbi->sb) ? "RO": "RW"); + f2fs_readonly(si->sbi->sb) ? "RO": "RW", + f2fs_cp_error(si->sbi) ? "Error": "Good"); seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ", si->sit_area_segs, si->nat_area_segs); seq_printf(s, "[SSA: %d] [MAIN: %d", @@ -349,10 +361,11 @@ static int stat_show(struct seq_file *s, void *v) seq_printf(s, " - Inner Struct Count: tree: %d(%d), node: %d\n", si->ext_tree, si->zombie_tree, si->ext_node); seq_puts(s, "\nBalancing F2FS Async:\n"); - seq_printf(s, " - IO (CP: %4d, Data: %4d, Flush: (%4d %4d), " + seq_printf(s, " - IO (CP: %4d, Data: %4d, Flush: (%4d %4d %4d), " "Discard: (%4d %4d)) cmd: %4d undiscard:%4u\n", si->nr_wb_cp_data, si->nr_wb_data, si->nr_flushing, si->nr_flushed, + si->flush_list_empty, si->nr_discarding, si->nr_discarded, si->nr_discard_cmd, si->undiscard_blks); seq_printf(s, " - inmem: %4d, atomic IO: %4d (Max. %4d), " @@ -365,6 +378,8 @@ static int stat_show(struct seq_file *s, void *v) si->ndirty_dent, si->ndirty_dirs, si->ndirty_all); seq_printf(s, " - datas: %4d in files:%4d\n", si->ndirty_data, si->ndirty_files); + seq_printf(s, " - quota datas: %4d in quota files:%4d\n", + si->ndirty_qdata, si->nquota_files); seq_printf(s, " - meta: %4d in %4d\n", si->ndirty_meta, si->meta_pages); seq_printf(s, " - imeta: %4d\n", diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 4f2a8fedb313..1955707b138b 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -10,10 +10,12 @@ */ #include #include +#include #include "f2fs.h" #include "node.h" #include "acl.h" #include "xattr.h" +#include static unsigned long dir_blocks(struct inode *inode) { @@ -847,6 +849,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx) struct f2fs_dentry_block *dentry_blk = NULL; struct page *dentry_page = NULL; struct file_ra_state *ra = &file->f_ra; + loff_t start_pos = ctx->pos; unsigned int n = ((unsigned long)ctx->pos / NR_DENTRY_IN_BLOCK); struct f2fs_dentry_ptr d; struct fscrypt_str fstr = FSTR_INIT(NULL, 0); @@ -855,24 +858,32 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx) if (f2fs_encrypted_inode(inode)) { err = fscrypt_get_encryption_info(inode); if (err && err != -ENOKEY) - return err; + goto out; err = fscrypt_fname_alloc_buffer(inode, F2FS_NAME_LEN, &fstr); if (err < 0) - return err; + goto out; } if (f2fs_has_inline_dentry(inode)) { err = f2fs_read_inline_dir(file, ctx, &fstr); - goto out; + goto out_free; } - /* readahead for multi pages of dir */ - if (npages - n > 1 && !ra_has_index(ra, n)) - page_cache_sync_readahead(inode->i_mapping, ra, file, n, + for (; n < npages; n++, ctx->pos = n * NR_DENTRY_IN_BLOCK) { + + /* allow readdir() to be interrupted */ + if (fatal_signal_pending(current)) { + err = -ERESTARTSYS; + goto out_free; + } + cond_resched(); + + /* readahead for multi pages of dir */ + if (npages - n > 1 && !ra_has_index(ra, n)) + page_cache_sync_readahead(inode->i_mapping, ra, file, n, min(npages - n, (pgoff_t)MAX_DIR_RA_PAGES)); - for (; n < npages; n++) { dentry_page = get_lock_data_page(inode, n, false); if (IS_ERR(dentry_page)) { err = PTR_ERR(dentry_page); @@ -880,7 +891,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx) err = 0; continue; } else { - goto out; + goto out_free; } } @@ -896,12 +907,13 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx) break; } - ctx->pos = (n + 1) * NR_DENTRY_IN_BLOCK; kunmap(dentry_page); f2fs_put_page(dentry_page, 1); } -out: +out_free: fscrypt_fname_free_buffer(&fstr); +out: + trace_f2fs_readdir(inode, start_pos, ctx->pos, err); return err < 0 ? err : 0; } diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index c1a0aef8efc6..081ec493baae 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -47,6 +47,8 @@ enum { FAULT_KMALLOC, FAULT_PAGE_ALLOC, + FAULT_PAGE_GET, + FAULT_ALLOC_BIO, FAULT_ALLOC_NID, FAULT_ORPHAN, FAULT_BLOCK, @@ -94,6 +96,7 @@ extern char *fault_name[FAULT_MAX]; #define F2FS_MOUNT_GRPQUOTA 0x00100000 #define F2FS_MOUNT_PRJQUOTA 0x00200000 #define F2FS_MOUNT_QUOTA 0x00400000 +#define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000 #define clear_opt(sbi, option) ((sbi)->mount_opt.opt &= ~F2FS_MOUNT_##option) #define set_opt(sbi, option) ((sbi)->mount_opt.opt |= F2FS_MOUNT_##option) @@ -119,6 +122,8 @@ struct f2fs_mount_info { #define F2FS_FEATURE_EXTRA_ATTR 0x0008 #define F2FS_FEATURE_PRJQUOTA 0x0010 #define F2FS_FEATURE_INODE_CHKSUM 0x0020 +#define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040 +#define F2FS_FEATURE_QUOTA_INO 0x0080 #define F2FS_HAS_FEATURE(sb, mask) \ ((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0) @@ -214,7 +219,7 @@ enum { #define BATCHED_TRIM_BLOCKS(sbi) \ (BATCHED_TRIM_SEGMENTS(sbi) << (sbi)->log_blocks_per_seg) #define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi) -#define DISCARD_ISSUE_RATE 8 +#define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */ #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */ #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */ #define DEF_CP_INTERVAL 60 /* 60 secs */ @@ -225,7 +230,6 @@ struct cp_control { __u64 trim_start; __u64 trim_end; __u64 trim_minlen; - __u64 trimmed; }; /* @@ -244,12 +248,14 @@ enum { ORPHAN_INO, /* for orphan ino list */ APPEND_INO, /* for append ino list */ UPDATE_INO, /* for update ino list */ + FLUSH_INO, /* for multiple device flushing */ MAX_INO_ENTRY, /* max. list */ }; struct ino_entry { - struct list_head list; /* list head */ - nid_t ino; /* inode number */ + struct list_head list; /* list head */ + nid_t ino; /* inode number */ + unsigned int dirty_device; /* dirty device bitmap */ }; /* for the list of inodes to be GCed */ @@ -273,10 +279,6 @@ struct discard_entry { #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \ (MAX_PLIST_NUM - 1) : (blk_num - 1)) -#define P_ACTIVE 0x01 -#define P_TRIM 0x02 -#define plist_issue(tag) (((tag) & P_ACTIVE) || ((tag) & P_TRIM)) - enum { D_PREP, D_SUBMIT, @@ -308,12 +310,32 @@ struct discard_cmd { int error; /* bio error */ }; +enum { + DPOLICY_BG, + DPOLICY_FORCE, + DPOLICY_FSTRIM, + DPOLICY_UMOUNT, + MAX_DPOLICY, +}; + +struct discard_policy { + int type; /* type of discard */ + unsigned int min_interval; /* used for candidates exist */ + unsigned int max_interval; /* used for candidates not exist */ + unsigned int max_requests; /* # of discards issued per round */ + unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */ + bool io_aware; /* issue discard in idle time */ + bool sync; /* submit discard with REQ_SYNC flag */ + unsigned int granularity; /* discard granularity */ +}; + struct discard_cmd_control { struct task_struct *f2fs_issue_discard; /* discard thread */ struct list_head entry_list; /* 4KB discard entry list */ struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */ unsigned char pend_list_tag[MAX_PLIST_NUM];/* tag for pending entries */ struct list_head wait_list; /* store on-flushing entries */ + struct list_head fstrim_list; /* in-flight discard from fstrim */ wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */ unsigned int discard_wake; /* to wake up discard thread */ struct mutex cmd_lock; @@ -443,11 +465,14 @@ struct f2fs_flush_device { /* for inline stuff */ #define DEF_INLINE_RESERVED_SIZE 1 +#define DEF_MIN_INLINE_SIZE 1 static inline int get_extra_isize(struct inode *inode); -#define MAX_INLINE_DATA(inode) (sizeof(__le32) * \ - (CUR_ADDRS_PER_INODE(inode) - \ - DEF_INLINE_RESERVED_SIZE - \ - F2FS_INLINE_XATTR_ADDRS)) +static inline int get_inline_xattr_addrs(struct inode *inode); +#define F2FS_INLINE_XATTR_ADDRS(inode) get_inline_xattr_addrs(inode) +#define MAX_INLINE_DATA(inode) (sizeof(__le32) * \ + (CUR_ADDRS_PER_INODE(inode) - \ + F2FS_INLINE_XATTR_ADDRS(inode) - \ + DEF_INLINE_RESERVED_SIZE)) /* for inline dir */ #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \ @@ -647,6 +672,7 @@ struct f2fs_inode_info { #endif struct list_head dirty_list; /* dirty list for dirs and files */ struct list_head gdirty_list; /* linked in global dirty list */ + struct list_head inmem_ilist; /* list for inmem inodes */ struct list_head inmem_pages; /* inmemory pages managed by f2fs */ struct task_struct *inmem_task; /* store inmemory task */ struct mutex inmem_lock; /* lock for inmemory pages */ @@ -657,6 +683,7 @@ struct f2fs_inode_info { int i_extra_isize; /* size of extra space located in i_addr */ kprojid_t i_projid; /* id for project quota */ + int i_inline_xattr_size; /* inline xattr size */ }; static inline void get_extent_info(struct extent_info *ext, @@ -730,10 +757,13 @@ static inline void __try_update_largest_extent(struct inode *inode, } } -enum nid_list { - FREE_NID_LIST, - ALLOC_NID_LIST, - MAX_NID_LIST, +/* + * For free nid management + */ +enum nid_state { + FREE_NID, /* newly added to free nid list */ + PREALLOC_NID, /* it is preallocated */ + MAX_NID_STATE, }; struct f2fs_nm_info { @@ -756,8 +786,8 @@ struct f2fs_nm_info { /* free node ids management */ struct radix_tree_root free_nid_root;/* root of the free_nid cache */ - struct list_head nid_list[MAX_NID_LIST];/* lists for free nids */ - unsigned int nid_cnt[MAX_NID_LIST]; /* the number of free node id */ + struct list_head free_nid_list; /* list for free nids excluding preallocated nids */ + unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */ spinlock_t nid_list_lock; /* protect nid lists ops */ struct mutex build_lock; /* lock for build free nids */ unsigned char (*free_nid_bitmap)[NAT_ENTRY_BITMAP_SIZE]; @@ -835,6 +865,7 @@ enum { struct flush_cmd { struct completion wait; struct llist_node llnode; + nid_t ino; int ret; }; @@ -853,6 +884,8 @@ struct f2fs_sm_info { struct dirty_seglist_info *dirty_info; /* dirty segment information */ struct curseg_info *curseg_array; /* active segment information */ + struct rw_semaphore curseg_lock; /* for preventing curseg change */ + block_t seg0_blkaddr; /* block address of 0'th segment */ block_t main_blkaddr; /* start block address of main area */ block_t ssa_blkaddr; /* start block address of SSA area */ @@ -874,6 +907,7 @@ struct f2fs_sm_info { unsigned int min_ipu_util; /* in-place-update threshold */ unsigned int min_fsync_blocks; /* threshold for fsync */ unsigned int min_hot_blocks; /* threshold for hot block allocation */ + unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */ /* for flush command control */ struct flush_cmd_control *fcc_info; @@ -895,6 +929,7 @@ struct f2fs_sm_info { enum count_type { F2FS_DIRTY_DENTS, F2FS_DIRTY_DATA, + F2FS_DIRTY_QDATA, F2FS_DIRTY_NODES, F2FS_DIRTY_META, F2FS_INMEM_PAGES, @@ -943,6 +978,18 @@ enum need_lock_type { LOCK_RETRY, }; +enum cp_reason_type { + CP_NO_NEEDED, + CP_NON_REGULAR, + CP_HARDLINK, + CP_SB_NEED_CP, + CP_WRONG_PINO, + CP_NO_SPC_ROLL, + CP_NODE_NEED_CP, + CP_FASTBOOT_MODE, + CP_SPEC_LOG_NUM, +}; + enum iostat_type { APP_DIRECT_IO, /* app direct IOs */ APP_BUFFERED_IO, /* app buffered IOs */ @@ -962,6 +1009,7 @@ enum iostat_type { struct f2fs_io_info { struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ + nid_t ino; /* inode number */ enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ enum temp_type temp; /* contains HOT/WARM/COLD */ int op; /* contains REQ_OP_ */ @@ -1006,6 +1054,7 @@ enum inode_type { DIR_INODE, /* for dirty dir inode */ FILE_INODE, /* for dirty regular/symlink inode */ DIRTY_META, /* for all dirtied inode metadata */ + ATOMIC_FILE, /* for all atomic files */ NR_INODE_TYPE, }; @@ -1108,12 +1157,15 @@ struct f2fs_sb_info { loff_t max_file_blocks; /* max block index of file */ int active_logs; /* # of active logs */ int dir_level; /* directory level */ + int inline_xattr_size; /* inline xattr size */ + unsigned int trigger_ssr_threshold; /* threshold to trigger ssr */ block_t user_block_count; /* # of user blocks */ block_t total_valid_block_count; /* # of valid blocks */ block_t discard_blks; /* discard command candidats */ block_t last_valid_block_count; /* for recovery */ block_t reserved_blocks; /* configurable reserved blocks */ + block_t current_reserved_blocks; /* current reserved blocks */ u32 s_next_generation; /* for NFS support */ @@ -1179,6 +1231,8 @@ struct f2fs_sb_info { struct list_head s_list; int s_ndevs; /* number of devices */ struct f2fs_dev_info *devs; /* for device list */ + unsigned int dirty_device; /* for checkpoint data flush */ + spinlock_t dev_lock; /* protect dirty_device */ struct mutex umount_mutex; unsigned int shrinker_run_no; @@ -1242,8 +1296,7 @@ static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type) static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type) { - struct timespec ts = {sbi->interval_time[type], 0}; - unsigned long interval = timespec_to_jiffies(&ts); + unsigned long interval = sbi->interval_time[type] * HZ; return time_after(jiffies, sbi->last_time[type] + interval); } @@ -1410,6 +1463,13 @@ static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp) return le64_to_cpu(cp->checkpoint_ver); } +static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type) +{ + if (type < F2FS_MAX_QUOTAS) + return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]); + return 0; +} + static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp) { size_t crc_offset = le32_to_cpu(cp->checksum_offset); @@ -1588,7 +1648,8 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, spin_lock(&sbi->stat_lock); sbi->total_valid_block_count += (block_t)(*count); - avail_user_block_count = sbi->user_block_count - sbi->reserved_blocks; + avail_user_block_count = sbi->user_block_count - + sbi->current_reserved_blocks; if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) { diff = sbi->total_valid_block_count - avail_user_block_count; *count -= diff; @@ -1622,6 +1683,10 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count); f2fs_bug_on(sbi, inode->i_blocks < sectors); sbi->total_valid_block_count -= (block_t)count; + if (sbi->reserved_blocks && + sbi->current_reserved_blocks < sbi->reserved_blocks) + sbi->current_reserved_blocks = min(sbi->reserved_blocks, + sbi->current_reserved_blocks + count); spin_unlock(&sbi->stat_lock); f2fs_i_blocks_write(inode, count, false, true); } @@ -1642,6 +1707,8 @@ static inline void inode_inc_dirty_pages(struct inode *inode) atomic_inc(&F2FS_I(inode)->dirty_pages); inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); + if (IS_NOQUOTA(inode)) + inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); } static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) @@ -1658,6 +1725,8 @@ static inline void inode_dec_dirty_pages(struct inode *inode) atomic_dec(&F2FS_I(inode)->dirty_pages); dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); + if (IS_NOQUOTA(inode)) + dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); } static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type) @@ -1765,10 +1834,17 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, return ret; } +#ifdef CONFIG_F2FS_FAULT_INJECTION + if (time_to_inject(sbi, FAULT_BLOCK)) { + f2fs_show_injection_info(FAULT_BLOCK); + goto enospc; + } +#endif + spin_lock(&sbi->stat_lock); valid_block_count = sbi->total_valid_block_count + 1; - if (unlikely(valid_block_count + sbi->reserved_blocks > + if (unlikely(valid_block_count + sbi->current_reserved_blocks > sbi->user_block_count)) { spin_unlock(&sbi->stat_lock); goto enospc; @@ -1811,6 +1887,9 @@ static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, sbi->total_valid_node_count--; sbi->total_valid_block_count--; + if (sbi->reserved_blocks && + sbi->current_reserved_blocks < sbi->reserved_blocks) + sbi->current_reserved_blocks++; spin_unlock(&sbi->stat_lock); @@ -1857,6 +1936,19 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS); } +static inline struct page *f2fs_pagecache_get_page( + struct address_space *mapping, pgoff_t index, + int fgp_flags, gfp_t gfp_mask) +{ +#ifdef CONFIG_F2FS_FAULT_INJECTION + if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) { + f2fs_show_injection_info(FAULT_PAGE_GET); + return NULL; + } +#endif + return pagecache_get_page(mapping, index, fgp_flags, gfp_mask); +} + static inline void f2fs_copy_page(struct page *src, struct page *dst) { char *src_kaddr = kmap(src); @@ -1906,15 +1998,25 @@ static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, return entry; } -static inline struct bio *f2fs_bio_alloc(int npages) +static inline struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, + int npages, bool no_fail) { struct bio *bio; - /* No failure on bio allocation */ - bio = bio_alloc(GFP_NOIO, npages); - if (!bio) - bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages); - return bio; + if (no_fail) { + /* No failure on bio allocation */ + bio = bio_alloc(GFP_NOIO, npages); + if (!bio) + bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages); + return bio; + } +#ifdef CONFIG_F2FS_FAULT_INJECTION + if (time_to_inject(sbi, FAULT_ALLOC_BIO)) { + f2fs_show_injection_info(FAULT_ALLOC_BIO); + return NULL; + } +#endif + return bio_alloc(GFP_KERNEL, npages); } static inline void f2fs_radix_tree_insert(struct radix_tree_root *root, @@ -2224,25 +2326,20 @@ static inline int f2fs_has_inline_xattr(struct inode *inode) static inline unsigned int addrs_per_inode(struct inode *inode) { - if (f2fs_has_inline_xattr(inode)) - return CUR_ADDRS_PER_INODE(inode) - F2FS_INLINE_XATTR_ADDRS; - return CUR_ADDRS_PER_INODE(inode); + return CUR_ADDRS_PER_INODE(inode) - F2FS_INLINE_XATTR_ADDRS(inode); } -static inline void *inline_xattr_addr(struct page *page) +static inline void *inline_xattr_addr(struct inode *inode, struct page *page) { struct f2fs_inode *ri = F2FS_INODE(page); return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE - - F2FS_INLINE_XATTR_ADDRS]); + F2FS_INLINE_XATTR_ADDRS(inode)]); } static inline int inline_xattr_size(struct inode *inode) { - if (f2fs_has_inline_xattr(inode)) - return F2FS_INLINE_XATTR_ADDRS << 2; - else - return 0; + return get_inline_xattr_addrs(inode) * sizeof(__le32); } static inline int f2fs_has_inline_data(struct inode *inode) @@ -2323,9 +2420,10 @@ static inline void clear_file(struct inode *inode, int type) static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync) { + bool ret; + if (dsync) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - bool ret; spin_lock(&sbi->inode_lock[DIRTY_META]); ret = list_empty(&F2FS_I(inode)->gdirty_list); @@ -2336,9 +2434,15 @@ static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync) file_keep_isize(inode) || i_size_read(inode) & PAGE_MASK) return false; - return F2FS_I(inode)->last_disk_size == i_size_read(inode); + + down_read(&F2FS_I(inode)->i_sem); + ret = F2FS_I(inode)->last_disk_size == i_size_read(inode); + up_read(&F2FS_I(inode)->i_sem); + + return ret; } +#define sb_rdonly f2fs_readonly static inline int f2fs_readonly(struct super_block *sb) { return sb->s_flags & MS_RDONLY; @@ -2406,6 +2510,12 @@ static inline int get_extra_isize(struct inode *inode) return F2FS_I(inode)->i_extra_isize / sizeof(__le32); } +static inline int f2fs_sb_has_flexible_inline_xattr(struct super_block *sb); +static inline int get_inline_xattr_addrs(struct inode *inode) +{ + return F2FS_I(inode)->i_inline_xattr_size; +} + #define get_inode_mode(i) \ ((is_inode_flag_set(i, FI_ACL_MODE)) ? \ (F2FS_I(i)->i_acl_mode) : ((i)->i_mode)) @@ -2534,7 +2644,7 @@ static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) */ int f2fs_inode_dirtied(struct inode *inode, bool sync); void f2fs_inode_synced(struct inode *inode); -void f2fs_enable_quota_files(struct f2fs_sb_info *sbi); +int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly); void f2fs_quota_off_umount(struct super_block *sb); int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); int f2fs_sync_fs(struct super_block *sb, int sync); @@ -2562,7 +2672,7 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni); pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs); int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode); int truncate_inode_blocks(struct inode *inode, pgoff_t from); -int truncate_xattr_node(struct inode *inode, struct page *page); +int truncate_xattr_node(struct inode *inode); int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino); int remove_inode_page(struct inode *inode); struct page *new_inode_page(struct inode *inode); @@ -2597,19 +2707,22 @@ void destroy_node_manager_caches(void); */ bool need_SSR(struct f2fs_sb_info *sbi); void register_inmem_page(struct inode *inode, struct page *page); +void drop_inmem_pages_all(struct f2fs_sb_info *sbi); void drop_inmem_pages(struct inode *inode); void drop_inmem_page(struct inode *inode, struct page *page); int commit_inmem_pages(struct inode *inode); void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need); void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi); -int f2fs_issue_flush(struct f2fs_sb_info *sbi); +int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino); int create_flush_cmd_control(struct f2fs_sb_info *sbi); +int f2fs_flush_device_cache(struct f2fs_sb_info *sbi); void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free); void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr); bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); -void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new); +void init_discard_policy(struct discard_policy *dpolicy, int discard_type, + unsigned int granularity); void stop_discard_thread(struct f2fs_sb_info *sbi); -void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount); +bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi); void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc); void release_discard_addrs(struct f2fs_sb_info *sbi); int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); @@ -2664,6 +2777,10 @@ void add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); void remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); void release_ino_entry(struct f2fs_sb_info *sbi, bool all); bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode); +void set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, + unsigned int devidx, int type); +bool is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, + unsigned int devidx, int type); int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi); int acquire_orphan_inode(struct f2fs_sb_info *sbi); void release_orphan_inode(struct f2fs_sb_info *sbi); @@ -2751,14 +2868,16 @@ struct f2fs_stat_info { unsigned long long hit_largest, hit_cached, hit_rbtree; unsigned long long hit_total, total_ext; int ext_tree, zombie_tree, ext_node; - int ndirty_node, ndirty_dent, ndirty_meta, ndirty_data, ndirty_imeta; + int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta; + int ndirty_data, ndirty_qdata; int inmem_pages; - unsigned int ndirty_dirs, ndirty_files, ndirty_all; + unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all; int nats, dirty_nats, sits, dirty_sits; int free_nids, avail_nids, alloc_nids; int total_count, utilization; int bg_gc, nr_wb_cp_data, nr_wb_data; - int nr_flushing, nr_flushed, nr_discarding, nr_discarded; + int nr_flushing, nr_flushed, flush_list_empty; + int nr_discarding, nr_discarded; int nr_discard_cmd; unsigned int undiscard_blks; int inline_xattr, inline_inode, inline_dir, append, update, orphans; @@ -3066,6 +3185,16 @@ static inline int f2fs_sb_has_inode_chksum(struct super_block *sb) return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_INODE_CHKSUM); } +static inline int f2fs_sb_has_flexible_inline_xattr(struct super_block *sb) +{ + return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_FLEXIBLE_INLINE_XATTR); +} + +static inline int f2fs_sb_has_quota_ino(struct super_block *sb) +{ + return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_QUOTA_INO); +} + #ifdef CONFIG_BLK_DEV_ZONED static inline int get_blkz_type(struct f2fs_sb_info *sbi, struct block_device *bdev, block_t blkaddr) diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index a9e1655a6bf8..bfff53f658e1 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -56,6 +56,11 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, struct dnode_of_data dn; int err; + if (unlikely(f2fs_cp_error(sbi))) { + err = -EIO; + goto err; + } + sb_start_pagefault(inode->i_sb); f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); @@ -117,6 +122,7 @@ out_sem: out: sb_end_pagefault(inode->i_sb); f2fs_update_time(sbi, REQ_TIME); +err: return block_page_mkwrite_return(err); } @@ -141,27 +147,29 @@ static int get_parent_ino(struct inode *inode, nid_t *pino) return 1; } -static inline bool need_do_checkpoint(struct inode *inode) +static inline enum cp_reason_type need_do_checkpoint(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - bool need_cp = false; + enum cp_reason_type cp_reason = CP_NO_NEEDED; - if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1) - need_cp = true; + if (!S_ISREG(inode->i_mode)) + cp_reason = CP_NON_REGULAR; + else if (inode->i_nlink != 1) + cp_reason = CP_HARDLINK; else if (is_sbi_flag_set(sbi, SBI_NEED_CP)) - need_cp = true; + cp_reason = CP_SB_NEED_CP; else if (file_wrong_pino(inode)) - need_cp = true; + cp_reason = CP_WRONG_PINO; else if (!space_for_roll_forward(sbi)) - need_cp = true; + cp_reason = CP_NO_SPC_ROLL; else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino)) - need_cp = true; + cp_reason = CP_NODE_NEED_CP; else if (test_opt(sbi, FASTBOOT)) - need_cp = true; + cp_reason = CP_FASTBOOT_MODE; else if (sbi->active_logs == 2) - need_cp = true; + cp_reason = CP_SPEC_LOG_NUM; - return need_cp; + return cp_reason; } static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino) @@ -196,7 +204,7 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end, struct f2fs_sb_info *sbi = F2FS_I_SB(inode); nid_t ino = inode->i_ino; int ret = 0; - bool need_cp = false; + enum cp_reason_type cp_reason = 0; struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = LONG_MAX, @@ -215,7 +223,7 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end, clear_inode_flag(inode, FI_NEED_IPU); if (ret) { - trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret); + trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); return ret; } @@ -246,10 +254,10 @@ go_write: * sudden-power-off. */ down_read(&F2FS_I(inode)->i_sem); - need_cp = need_do_checkpoint(inode); + cp_reason = need_do_checkpoint(inode); up_read(&F2FS_I(inode)->i_sem); - if (need_cp) { + if (cp_reason) { /* all the dirty node pages should be flushed for POR */ ret = f2fs_sync_fs(inode->i_sb, 1); @@ -297,19 +305,24 @@ sync_nodes: remove_ino_entry(sbi, ino, APPEND_INO); clear_inode_flag(inode, FI_APPEND_WRITE); flush_out: - remove_ino_entry(sbi, ino, UPDATE_INO); - clear_inode_flag(inode, FI_UPDATE_WRITE); if (!atomic) - ret = f2fs_issue_flush(sbi); + ret = f2fs_issue_flush(sbi, inode->i_ino); + if (!ret) { + remove_ino_entry(sbi, ino, UPDATE_INO); + clear_inode_flag(inode, FI_UPDATE_WRITE); + remove_ino_entry(sbi, ino, FLUSH_INO); + } f2fs_update_time(sbi, REQ_TIME); out: - trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret); + trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); f2fs_trace_ios(NULL, 1); return ret; } int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { + if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file))))) + return -EIO; return f2fs_do_sync_file(file, start, end, datasync, false); } @@ -446,6 +459,9 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) struct inode *inode = file_inode(file); int err; + if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) + return -EIO; + /* we don't need to use inline_data strictly */ err = f2fs_convert_inline_inode(inode); if (err) @@ -632,6 +648,9 @@ int f2fs_truncate(struct inode *inode) { int err; + if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) + return -EIO; + if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return 0; @@ -667,7 +686,8 @@ int f2fs_getattr(struct vfsmount *mnt, generic_fillattr(inode, stat); /* we need to show initial sectors used for inline_data/dentries */ - if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) + if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) || + f2fs_has_inline_dentry(inode)) stat->blocks += (stat->size + 511) >> 9; return 0; @@ -709,6 +729,9 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr) int err; bool size_changed = false; + if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) + return -EIO; + err = inode_change_ok(inode, attr); if (err) return err; @@ -761,6 +784,10 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr) inode->i_mtime = inode->i_ctime = current_time(inode); } + down_write(&F2FS_I(inode)->i_sem); + F2FS_I(inode)->last_disk_size = i_size_read(inode); + up_write(&F2FS_I(inode)->i_sem); + size_changed = true; } @@ -834,7 +861,7 @@ int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) err = get_dnode_of_data(&dn, pg_start, LOOKUP_NODE); if (err) { if (err == -ENOENT) { - pg_start++; + pg_start = get_next_page_offset(&dn, pg_start); continue; } return err; @@ -1149,11 +1176,14 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) if (ret) goto out; + /* avoid gc operation during block exchange */ + down_write(&F2FS_I(inode)->dio_rwsem[WRITE]); + truncate_pagecache(inode, offset); ret = f2fs_do_collapse(inode, pg_start, pg_end); if (ret) - goto out; + goto out_unlock; /* write out all moved pages, if possible */ filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); @@ -1165,7 +1195,8 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) ret = truncate_blocks(inode, new_size, true); if (!ret) f2fs_i_size_write(inode, new_size); - +out_unlock: + up_write(&F2FS_I(inode)->dio_rwsem[WRITE]); out: up_write(&F2FS_I(inode)->i_mmap_sem); return ret; @@ -1348,6 +1379,9 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) if (ret) goto out; + /* avoid gc operation during block exchange */ + down_write(&F2FS_I(inode)->dio_rwsem[WRITE]); + truncate_pagecache(inode, offset); pg_start = offset >> PAGE_SHIFT; @@ -1375,6 +1409,8 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) if (!ret) f2fs_i_size_write(inode, new_size); + + up_write(&F2FS_I(inode)->dio_rwsem[WRITE]); out: up_write(&F2FS_I(inode)->i_mmap_sem); return ret; @@ -1424,8 +1460,12 @@ static int expand_inode_data(struct inode *inode, loff_t offset, new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end; } - if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) - f2fs_i_size_write(inode, new_size); + if (new_size > i_size_read(inode)) { + if (mode & FALLOC_FL_KEEP_SIZE) + file_set_keep_isize(inode); + else + f2fs_i_size_write(inode, new_size); + } return err; } @@ -1436,6 +1476,9 @@ static long f2fs_fallocate(struct file *file, int mode, struct inode *inode = file_inode(file); long ret = 0; + if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) + return -EIO; + /* f2fs only support ->fallocate for regular file */ if (!S_ISREG(inode->i_mode)) return -EINVAL; @@ -1469,8 +1512,6 @@ static long f2fs_fallocate(struct file *file, int mode, if (!ret) { inode->i_mtime = inode->i_ctime = current_time(inode); f2fs_mark_inode_dirty_sync(inode, false); - if (mode & FALLOC_FL_KEEP_SIZE) - file_set_keep_isize(inode); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); } @@ -1864,6 +1905,9 @@ static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); + if (!f2fs_sb_has_crypto(inode->i_sb)) + return -EOPNOTSUPP; + f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); return fscrypt_ioctl_set_policy(filp, (const void __user *)arg); @@ -1871,6 +1915,8 @@ static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg) static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg) { + if (!f2fs_sb_has_crypto(file_inode(filp)->i_sb)) + return -EOPNOTSUPP; return fscrypt_ioctl_get_policy(filp, (void __user *)arg); } @@ -2226,9 +2272,13 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in, } inode_lock(src); + down_write(&F2FS_I(src)->dio_rwsem[WRITE]); if (src != dst) { - if (!inode_trylock(dst)) { - ret = -EBUSY; + ret = -EBUSY; + if (!inode_trylock(dst)) + goto out; + if (!down_write_trylock(&F2FS_I(dst)->dio_rwsem[WRITE])) { + inode_unlock(dst); goto out; } } @@ -2288,9 +2338,12 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in, } f2fs_unlock_op(sbi); out_unlock: - if (src != dst) + if (src != dst) { + up_write(&F2FS_I(dst)->dio_rwsem[WRITE]); inode_unlock(dst); + } out: + up_write(&F2FS_I(src)->dio_rwsem[WRITE]); inode_unlock(src); return ret; } @@ -2412,6 +2465,9 @@ static int f2fs_ioc_get_features(struct file *filp, unsigned long arg) long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { + if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp))))) + return -EIO; + switch (cmd) { case F2FS_IOC_GETFLAGS: return f2fs_ioc_getflags(filp, arg); @@ -2465,6 +2521,9 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) struct blk_plug plug; ssize_t ret; + if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) + return -EIO; + inode_lock(inode); ret = generic_write_checks(iocb, from); if (ret > 0) { @@ -2475,6 +2534,7 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) err = f2fs_preallocate_blocks(iocb, from); if (err) { + clear_inode_flag(inode, FI_NO_PREALLOC); inode_unlock(inode); return err; } diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index bd16e6631cf3..be9fd616736b 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -267,16 +267,6 @@ static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) return UINT_MAX - ((100 * (100 - u) * age) / (100 + u)); } -static unsigned int get_greedy_cost(struct f2fs_sb_info *sbi, - unsigned int segno) -{ - unsigned int valid_blocks = - get_valid_blocks(sbi, segno, true); - - return IS_DATASEG(get_seg_entry(sbi, segno)->type) ? - valid_blocks * 2 : valid_blocks; -} - static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi, unsigned int segno, struct victim_sel_policy *p) { @@ -285,7 +275,7 @@ static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi, /* alloc_mode == LFS */ if (p->gc_mode == GC_GREEDY) - return get_greedy_cost(sbi, segno); + return get_valid_blocks(sbi, segno, true); else return get_cb_cost(sbi, segno); } @@ -466,10 +456,10 @@ static int check_valid_map(struct f2fs_sb_info *sbi, struct seg_entry *sentry; int ret; - mutex_lock(&sit_i->sentry_lock); + down_read(&sit_i->sentry_lock); sentry = get_seg_entry(sbi, segno); ret = f2fs_test_bit(offset, sentry->cur_valid_map); - mutex_unlock(&sit_i->sentry_lock); + up_read(&sit_i->sentry_lock); return ret; } @@ -608,6 +598,7 @@ static void move_data_block(struct inode *inode, block_t bidx, { struct f2fs_io_info fio = { .sbi = F2FS_I_SB(inode), + .ino = inode->i_ino, .type = DATA, .temp = COLD, .op = REQ_OP_READ, @@ -659,8 +650,8 @@ static void move_data_block(struct inode *inode, block_t bidx, allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr, &sum, CURSEG_COLD_DATA, NULL, false); - fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi), newaddr, - FGP_LOCK | FGP_CREAT, GFP_NOFS); + fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi), + newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS); if (!fio.encrypted_page) { err = -ENOMEM; goto recover_block; @@ -738,6 +729,7 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type, } else { struct f2fs_io_info fio = { .sbi = F2FS_I_SB(inode), + .ino = inode->i_ino, .type = DATA, .temp = COLD, .op = REQ_OP_WRITE, @@ -840,10 +832,17 @@ next_step: continue; } + if (!down_write_trylock( + &F2FS_I(inode)->dio_rwsem[WRITE])) { + iput(inode); + continue; + } + start_bidx = start_bidx_of_node(nofs, inode); data_page = get_read_data_page(inode, start_bidx + ofs_in_node, REQ_RAHEAD, true); + up_write(&F2FS_I(inode)->dio_rwsem[WRITE]); if (IS_ERR(data_page)) { iput(inode); continue; @@ -901,10 +900,10 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, struct sit_info *sit_i = SIT_I(sbi); int ret; - mutex_lock(&sit_i->sentry_lock); + down_write(&sit_i->sentry_lock); ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, NO_CHECK_TYPE, LFS); - mutex_unlock(&sit_i->sentry_lock); + up_write(&sit_i->sentry_lock); return ret; } @@ -952,8 +951,8 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, /* * this is to avoid deadlock: * - lock_page(sum_page) - f2fs_replace_block - * - check_valid_map() - mutex_lock(sentry_lock) - * - mutex_lock(sentry_lock) - change_curseg() + * - check_valid_map() - down_write(sentry_lock) + * - down_read(sentry_lock) - change_curseg() * - lock_page(sum_page) */ if (type == SUM_TYPE_NODE) diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index fbf22b0f667f..91d5d831be72 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -130,6 +130,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) { struct f2fs_io_info fio = { .sbi = F2FS_I_SB(dn->inode), + .ino = dn->inode->i_ino, .type = DATA, .op = REQ_OP_WRITE, .op_flags = REQ_SYNC | REQ_NOIDLE | REQ_PRIO, diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 50c88e37ed66..9684d53563f1 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -232,6 +232,23 @@ static int do_read_inode(struct inode *inode) fi->i_extra_isize = f2fs_has_extra_attr(inode) ? le16_to_cpu(ri->i_extra_isize) : 0; + if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)) { + f2fs_bug_on(sbi, !f2fs_has_extra_attr(inode)); + fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size); + } else if (f2fs_has_inline_xattr(inode) || + f2fs_has_inline_dentry(inode)) { + fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS; + } else { + + /* + * Previous inline data or directory always reserved 200 bytes + * in inode layout, even if inline_xattr is disabled. In order + * to keep inline_dentry's structure for backward compatibility, + * we get the space back only from inline_data. + */ + fi->i_inline_xattr_size = 0; + } + /* check data exist */ if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode)) __recover_inline_status(inode, node_page); @@ -384,6 +401,10 @@ int update_inode(struct inode *inode, struct page *node_page) if (f2fs_has_extra_attr(inode)) { ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize); + if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)->sb)) + ri->i_inline_xattr_size = + cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size); + if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)->sb) && F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize, i_projid)) { @@ -480,6 +501,7 @@ void f2fs_evict_inode(struct inode *inode) remove_ino_entry(sbi, inode->i_ino, APPEND_INO); remove_ino_entry(sbi, inode->i_ino, UPDATE_INO); + remove_ino_entry(sbi, inode->i_ino, FLUSH_INO); sb_start_intwrite(inode->i_sb); set_inode_flag(inode, FI_NO_ALLOC); @@ -519,8 +541,10 @@ no_delete: stat_dec_inline_dir(inode); stat_dec_inline_inode(inode); - if (!is_set_ckpt_flags(sbi, CP_ERROR_FLAG)) + if (likely(!is_set_ckpt_flags(sbi, CP_ERROR_FLAG))) f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE)); + else + f2fs_inode_synced(inode); /* ino == 0, if f2fs_new_inode() was failed t*/ if (inode->i_ino) diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index d92b8e9064cb..cf8f4370d256 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -29,6 +29,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode) nid_t ino; struct inode *inode; bool nid_free = false; + int xattr_size = 0; int err; inode = new_inode(dir->i_sb); @@ -86,11 +87,23 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode) if (test_opt(sbi, INLINE_XATTR)) set_inode_flag(inode, FI_INLINE_XATTR); + if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode)) set_inode_flag(inode, FI_INLINE_DATA); if (f2fs_may_inline_dentry(inode)) set_inode_flag(inode, FI_INLINE_DENTRY); + if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)) { + f2fs_bug_on(sbi, !f2fs_has_extra_attr(inode)); + if (f2fs_has_inline_xattr(inode)) + xattr_size = sbi->inline_xattr_size; + /* Otherwise, will be 0 */ + } else if (f2fs_has_inline_xattr(inode) || + f2fs_has_inline_dentry(inode)) { + xattr_size = DEFAULT_INLINE_XATTR_ADDRS; + } + F2FS_I(inode)->i_inline_xattr_size = xattr_size; + f2fs_init_extent_tree(inode, NULL); stat_inc_inline_xattr(inode); @@ -177,6 +190,9 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode, nid_t ino = 0; int err; + if (unlikely(f2fs_cp_error(sbi))) + return -EIO; + err = dquot_initialize(dir); if (err) return err; @@ -221,6 +237,9 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir, struct f2fs_sb_info *sbi = F2FS_I_SB(dir); int err; + if (unlikely(f2fs_cp_error(sbi))) + return -EIO; + if (f2fs_encrypted_inode(dir) && !fscrypt_has_permitted_context(dir, inode)) return -EPERM; @@ -331,12 +350,15 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, struct inode *inode = NULL; struct f2fs_dir_entry *de; struct page *page; - nid_t ino; + struct dentry *new; + nid_t ino = -1; int err = 0; unsigned int root_ino = F2FS_ROOT_INO(F2FS_I_SB(dir)); + trace_f2fs_lookup_start(dir, dentry, flags); + if (f2fs_encrypted_inode(dir)) { - int res = fscrypt_get_encryption_info(dir); + err = fscrypt_get_encryption_info(dir); /* * DCACHE_ENCRYPTED_WITH_KEY is set if the dentry is @@ -346,18 +368,22 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, if (fscrypt_has_encryption_key(dir)) fscrypt_set_encrypted_dentry(dentry); fscrypt_set_d_op(dentry); - if (res && res != -ENOKEY) - return ERR_PTR(res); + if (err && err != -ENOKEY) + goto out; } - if (dentry->d_name.len > F2FS_NAME_LEN) - return ERR_PTR(-ENAMETOOLONG); + if (dentry->d_name.len > F2FS_NAME_LEN) { + err = -ENAMETOOLONG; + goto out; + } de = f2fs_find_entry(dir, &dentry->d_name, &page); if (!de) { - if (IS_ERR(page)) - return (struct dentry *)page; - return d_splice_alias(inode, dentry); + if (IS_ERR(page)) { + err = PTR_ERR(page); + goto out; + } + goto out_splice; } ino = le32_to_cpu(de->ino); @@ -365,19 +391,21 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, f2fs_put_page(page, 0); inode = f2fs_iget(dir->i_sb, ino); - if (IS_ERR(inode)) - return ERR_CAST(inode); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + goto out; + } if ((dir->i_ino == root_ino) && f2fs_has_inline_dots(dir)) { err = __recover_dot_dentries(dir, root_ino); if (err) - goto err_out; + goto out_iput; } if (f2fs_has_inline_dots(inode)) { err = __recover_dot_dentries(inode, dir->i_ino); if (err) - goto err_out; + goto out_iput; } if (f2fs_encrypted_inode(dir) && (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) && @@ -386,12 +414,18 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, "Inconsistent encryption contexts: %lu/%lu", dir->i_ino, inode->i_ino); err = -EPERM; - goto err_out; + goto out_iput; } - return d_splice_alias(inode, dentry); - -err_out: +out_splice: + new = d_splice_alias(inode, dentry); + if (IS_ERR(new)) + err = PTR_ERR(new); + trace_f2fs_lookup_end(dir, dentry, ino, err); + return new; +out_iput: iput(inode); +out: + trace_f2fs_lookup_end(dir, dentry, ino, err); return ERR_PTR(err); } @@ -405,7 +439,13 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry) trace_f2fs_unlink_enter(dir, dentry); + if (unlikely(f2fs_cp_error(sbi))) + return -EIO; + err = dquot_initialize(dir); + if (err) + return err; + err = dquot_initialize(inode); if (err) return err; @@ -457,6 +497,9 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry, struct fscrypt_symlink_data *sd = NULL; int err; + if (unlikely(f2fs_cp_error(sbi))) + return -EIO; + if (f2fs_encrypted_inode(dir)) { err = fscrypt_get_encryption_info(dir); if (err) @@ -563,6 +606,9 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) struct inode *inode; int err; + if (unlikely(f2fs_cp_error(sbi))) + return -EIO; + err = dquot_initialize(dir); if (err) return err; @@ -615,6 +661,9 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry, struct inode *inode; int err = 0; + if (unlikely(f2fs_cp_error(sbi))) + return -EIO; + err = dquot_initialize(dir); if (err) return err; @@ -709,6 +758,9 @@ out: static int f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) { + if (unlikely(f2fs_cp_error(F2FS_I_SB(dir)))) + return -EIO; + if (f2fs_encrypted_inode(dir)) { int err = fscrypt_get_encryption_info(dir); if (err) @@ -720,6 +772,9 @@ static int f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) static int f2fs_create_whiteout(struct inode *dir, struct inode **whiteout) { + if (unlikely(f2fs_cp_error(F2FS_I_SB(dir)))) + return -EIO; + return __f2fs_tmpfile(dir, NULL, S_IFCHR | WHITEOUT_MODE, whiteout); } @@ -739,6 +794,9 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, bool is_old_inline = f2fs_has_inline_dentry(old_dir); int err = -ENOENT; + if (unlikely(f2fs_cp_error(sbi))) + return -EIO; + if ((f2fs_encrypted_inode(old_dir) && !fscrypt_has_encryption_key(old_dir)) || (f2fs_encrypted_inode(new_dir) && @@ -764,6 +822,12 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, if (err) goto out; + if (new_inode) { + err = dquot_initialize(new_inode); + if (err) + goto out; + } + old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_entry) { if (IS_ERR(old_page)) @@ -932,6 +996,9 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry, int old_nlink = 0, new_nlink = 0; int err = -ENOENT; + if (unlikely(f2fs_cp_error(sbi))) + return -EIO; + if ((f2fs_encrypted_inode(old_dir) && !fscrypt_has_encryption_key(old_dir)) || (f2fs_encrypted_inode(new_dir) && diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 32474db18ad9..964c99655942 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -46,7 +46,7 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type) * give 25%, 25%, 50%, 50%, 50% memory for each components respectively */ if (type == FREE_NIDS) { - mem_size = (nm_i->nid_cnt[FREE_NID_LIST] * + mem_size = (nm_i->nid_cnt[FREE_NID] * sizeof(struct free_nid)) >> PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); } else if (type == NAT_ENTRIES) { @@ -63,7 +63,7 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type) } else if (type == INO_ENTRIES) { int i; - for (i = 0; i <= UPDATE_INO; i++) + for (i = 0; i < MAX_INO_ENTRY; i++) mem_size += sbi->im[i].ino_num * sizeof(struct ino_entry); mem_size >>= PAGE_SHIFT; @@ -74,6 +74,10 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type) atomic_read(&sbi->total_ext_node) * sizeof(struct extent_node)) >> PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); + } else if (type == INMEM_PAGES) { + /* it allows 20% / total_ram for inmemory pages */ + mem_size = get_pages(sbi, F2FS_INMEM_PAGES); + res = mem_size < (val.totalram / 5); } else { if (!sbi->sb->s_bdi->wb.dirty_exceeded) return true; @@ -134,6 +138,44 @@ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) return dst_page; } +static struct nat_entry *__alloc_nat_entry(nid_t nid, bool no_fail) +{ + struct nat_entry *new; + + if (no_fail) + new = f2fs_kmem_cache_alloc(nat_entry_slab, + GFP_NOFS | __GFP_ZERO); + else + new = kmem_cache_alloc(nat_entry_slab, + GFP_NOFS | __GFP_ZERO); + if (new) { + nat_set_nid(new, nid); + nat_reset_flag(new); + } + return new; +} + +static void __free_nat_entry(struct nat_entry *e) +{ + kmem_cache_free(nat_entry_slab, e); +} + +/* must be locked by nat_tree_lock */ +static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i, + struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail) +{ + if (no_fail) + f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne); + else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne)) + return NULL; + + if (raw_ne) + node_info_from_raw_nat(&ne->ni, raw_ne); + list_add_tail(&ne->list, &nm_i->nat_entries); + nm_i->nat_cnt++; + return ne; +} + static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) { return radix_tree_lookup(&nm_i->nat_root, n); @@ -150,7 +192,7 @@ static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) list_del(&e->list); radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); nm_i->nat_cnt--; - kmem_cache_free(nat_entry_slab, e); + __free_nat_entry(e); } static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, @@ -246,49 +288,29 @@ bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino) return need_update; } -static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid, - bool no_fail) -{ - struct nat_entry *new; - - if (no_fail) { - new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_NOFS); - f2fs_radix_tree_insert(&nm_i->nat_root, nid, new); - } else { - new = kmem_cache_alloc(nat_entry_slab, GFP_NOFS); - if (!new) - return NULL; - if (radix_tree_insert(&nm_i->nat_root, nid, new)) { - kmem_cache_free(nat_entry_slab, new); - return NULL; - } - } - - memset(new, 0, sizeof(struct nat_entry)); - nat_set_nid(new, nid); - nat_reset_flag(new); - list_add_tail(&new->list, &nm_i->nat_entries); - nm_i->nat_cnt++; - return new; -} - +/* must be locked by nat_tree_lock */ static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid, struct f2fs_nat_entry *ne) { struct f2fs_nm_info *nm_i = NM_I(sbi); - struct nat_entry *e; + struct nat_entry *new, *e; + new = __alloc_nat_entry(nid, false); + if (!new) + return; + + down_write(&nm_i->nat_tree_lock); e = __lookup_nat_cache(nm_i, nid); - if (!e) { - e = grab_nat_entry(nm_i, nid, false); - if (e) - node_info_from_raw_nat(&e->ni, ne); - } else { + if (!e) + e = __init_nat_entry(nm_i, new, ne, false); + else f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) || nat_get_blkaddr(e) != le32_to_cpu(ne->block_addr) || nat_get_version(e) != ne->version); - } + up_write(&nm_i->nat_tree_lock); + if (e != new) + __free_nat_entry(new); } static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, @@ -296,11 +318,12 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, { struct f2fs_nm_info *nm_i = NM_I(sbi); struct nat_entry *e; + struct nat_entry *new = __alloc_nat_entry(ni->nid, true); down_write(&nm_i->nat_tree_lock); e = __lookup_nat_cache(nm_i, ni->nid); if (!e) { - e = grab_nat_entry(nm_i, ni->nid, true); + e = __init_nat_entry(nm_i, new, NULL, true); copy_node_info(&e->ni, ni); f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); } else if (new_blkaddr == NEW_ADDR) { @@ -312,6 +335,9 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, copy_node_info(&e->ni, ni); f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); } + /* let's free early to reduce memory consumption */ + if (e != new) + __free_nat_entry(new); /* sanity check */ f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr); @@ -327,10 +353,6 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { unsigned char version = nat_get_version(e); nat_set_version(e, inc_node_version(version)); - - /* in order to reuse the nid */ - if (nm_i->next_scan_nid > ni->nid) - nm_i->next_scan_nid = ni->nid; } /* change address */ @@ -424,9 +446,7 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) f2fs_put_page(page, 1); cache: /* cache nat entry */ - down_write(&nm_i->nat_tree_lock); cache_nat_entry(sbi, nid, &ne); - up_write(&nm_i->nat_tree_lock); } /* @@ -962,7 +982,8 @@ fail: return err > 0 ? 0 : err; } -int truncate_xattr_node(struct inode *inode, struct page *page) +/* caller must lock inode page */ +int truncate_xattr_node(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); nid_t nid = F2FS_I(inode)->i_xattr_nid; @@ -978,10 +999,7 @@ int truncate_xattr_node(struct inode *inode, struct page *page) f2fs_i_xnid_write(inode, 0); - set_new_dnode(&dn, inode, page, npage, nid); - - if (page) - dn.inode_page_locked = true; + set_new_dnode(&dn, inode, NULL, npage, nid); truncate_node(&dn); return 0; } @@ -1000,7 +1018,7 @@ int remove_inode_page(struct inode *inode) if (err) return err; - err = truncate_xattr_node(inode, dn.inode_page); + err = truncate_xattr_node(inode); if (err) { f2fs_put_dnode(&dn); return err; @@ -1220,7 +1238,8 @@ static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino) if (!inode) return; - page = pagecache_get_page(inode->i_mapping, 0, FGP_LOCK|FGP_NOWAIT, 0); + page = f2fs_pagecache_get_page(inode->i_mapping, 0, + FGP_LOCK|FGP_NOWAIT, 0); if (!page) goto iput_out; @@ -1244,37 +1263,6 @@ iput_out: iput(inode); } -void move_node_page(struct page *node_page, int gc_type) -{ - if (gc_type == FG_GC) { - struct f2fs_sb_info *sbi = F2FS_P_SB(node_page); - struct writeback_control wbc = { - .sync_mode = WB_SYNC_ALL, - .nr_to_write = 1, - .for_reclaim = 0, - }; - - set_page_dirty(node_page); - f2fs_wait_on_page_writeback(node_page, NODE, true); - - f2fs_bug_on(sbi, PageWriteback(node_page)); - if (!clear_page_dirty_for_io(node_page)) - goto out_page; - - if (NODE_MAPPING(sbi)->a_ops->writepage(node_page, &wbc)) - unlock_page(node_page); - goto release_page; - } else { - /* set page dirty and write it */ - if (!PageWriteback(node_page)) - set_page_dirty(node_page); - } -out_page: - unlock_page(node_page); -release_page: - f2fs_put_page(node_page, 0); -} - static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino) { pgoff_t index, end; @@ -1344,6 +1332,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted, struct node_info ni; struct f2fs_io_info fio = { .sbi = sbi, + .ino = ino_of_node(page), .type = NODE, .op = REQ_OP_WRITE, .op_flags = wbc_to_write_flags(wbc), @@ -1416,6 +1405,37 @@ redirty_out: return AOP_WRITEPAGE_ACTIVATE; } +void move_node_page(struct page *node_page, int gc_type) +{ + if (gc_type == FG_GC) { + struct writeback_control wbc = { + .sync_mode = WB_SYNC_ALL, + .nr_to_write = 1, + .for_reclaim = 0, + }; + + set_page_dirty(node_page); + f2fs_wait_on_page_writeback(node_page, NODE, true); + + f2fs_bug_on(F2FS_P_SB(node_page), PageWriteback(node_page)); + if (!clear_page_dirty_for_io(node_page)) + goto out_page; + + if (__write_node_page(node_page, false, NULL, + &wbc, false, FS_GC_NODE_IO)) + unlock_page(node_page); + goto release_page; + } else { + /* set page dirty and write it */ + if (!PageWriteback(node_page)) + set_page_dirty(node_page); + } +out_page: + unlock_page(node_page); +release_page: + f2fs_put_page(node_page, 0); +} + static int f2fs_write_node_page(struct page *page, struct writeback_control *wbc) { @@ -1764,35 +1784,54 @@ static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, return radix_tree_lookup(&nm_i->free_nid_root, n); } -static int __insert_nid_to_list(struct f2fs_sb_info *sbi, - struct free_nid *i, enum nid_list list, bool new) +static int __insert_free_nid(struct f2fs_sb_info *sbi, + struct free_nid *i, enum nid_state state) { struct f2fs_nm_info *nm_i = NM_I(sbi); - if (new) { - int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i); - if (err) - return err; - } + int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i); + if (err) + return err; - f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW : - i->state != NID_ALLOC); - nm_i->nid_cnt[list]++; - list_add_tail(&i->list, &nm_i->nid_list[list]); + f2fs_bug_on(sbi, state != i->state); + nm_i->nid_cnt[state]++; + if (state == FREE_NID) + list_add_tail(&i->list, &nm_i->free_nid_list); return 0; } -static void __remove_nid_from_list(struct f2fs_sb_info *sbi, - struct free_nid *i, enum nid_list list, bool reuse) +static void __remove_free_nid(struct f2fs_sb_info *sbi, + struct free_nid *i, enum nid_state state) { struct f2fs_nm_info *nm_i = NM_I(sbi); - f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW : - i->state != NID_ALLOC); - nm_i->nid_cnt[list]--; - list_del(&i->list); - if (!reuse) - radix_tree_delete(&nm_i->free_nid_root, i->nid); + f2fs_bug_on(sbi, state != i->state); + nm_i->nid_cnt[state]--; + if (state == FREE_NID) + list_del(&i->list); + radix_tree_delete(&nm_i->free_nid_root, i->nid); +} + +static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i, + enum nid_state org_state, enum nid_state dst_state) +{ + struct f2fs_nm_info *nm_i = NM_I(sbi); + + f2fs_bug_on(sbi, org_state != i->state); + i->state = dst_state; + nm_i->nid_cnt[org_state]--; + nm_i->nid_cnt[dst_state]++; + + switch (dst_state) { + case PREALLOC_NID: + list_del(&i->list); + break; + case FREE_NID: + list_add_tail(&i->list, &nm_i->free_nid_list); + break; + default: + BUG_ON(1); + } } /* return if the nid is recognized as free */ @@ -1810,7 +1849,7 @@ static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS); i->nid = nid; - i->state = NID_NEW; + i->state = FREE_NID; if (radix_tree_preload(GFP_NOFS)) goto err; @@ -1823,7 +1862,7 @@ static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) * - f2fs_create * - f2fs_new_inode * - alloc_nid - * - __insert_nid_to_list(ALLOC_NID_LIST) + * - __insert_nid_to_list(PREALLOC_NID) * - f2fs_balance_fs_bg * - build_free_nids * - __build_free_nids @@ -1836,8 +1875,8 @@ static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) * - new_node_page * - set_node_addr * - alloc_nid_done - * - __remove_nid_from_list(ALLOC_NID_LIST) - * - __insert_nid_to_list(FREE_NID_LIST) + * - __remove_nid_from_list(PREALLOC_NID) + * - __insert_nid_to_list(FREE_NID) */ ne = __lookup_nat_cache(nm_i, nid); if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) || @@ -1846,13 +1885,13 @@ static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) e = __lookup_free_nid_list(nm_i, nid); if (e) { - if (e->state == NID_NEW) + if (e->state == FREE_NID) ret = true; goto err_out; } } ret = true; - err = __insert_nid_to_list(sbi, i, FREE_NID_LIST, true); + err = __insert_free_nid(sbi, i, FREE_NID); err_out: spin_unlock(&nm_i->nid_list_lock); radix_tree_preload_end(); @@ -1870,8 +1909,8 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid) spin_lock(&nm_i->nid_list_lock); i = __lookup_free_nid_list(nm_i, nid); - if (i && i->state == NID_NEW) { - __remove_nid_from_list(sbi, i, FREE_NID_LIST, false); + if (i && i->state == FREE_NID) { + __remove_free_nid(sbi, i, FREE_NID); need_free = true; } spin_unlock(&nm_i->nid_list_lock); @@ -1890,15 +1929,18 @@ static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap)) return; - if (set) + if (set) { + if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs])) + return; __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); - else - __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); - - if (set) nm_i->free_nid_count[nat_ofs]++; - else if (!build) - nm_i->free_nid_count[nat_ofs]--; + } else { + if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs])) + return; + __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); + if (!build) + nm_i->free_nid_count[nat_ofs]--; + } } static void scan_nat_page(struct f2fs_sb_info *sbi, @@ -1933,34 +1975,12 @@ static void scan_nat_page(struct f2fs_sb_info *sbi, } } -static void scan_free_nid_bits(struct f2fs_sb_info *sbi) +static void scan_curseg_cache(struct f2fs_sb_info *sbi) { - struct f2fs_nm_info *nm_i = NM_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); struct f2fs_journal *journal = curseg->journal; - unsigned int i, idx; + int i; - down_read(&nm_i->nat_tree_lock); - - for (i = 0; i < nm_i->nat_blocks; i++) { - if (!test_bit_le(i, nm_i->nat_block_bitmap)) - continue; - if (!nm_i->free_nid_count[i]) - continue; - for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) { - nid_t nid; - - if (!test_bit_le(idx, nm_i->free_nid_bitmap[i])) - continue; - - nid = i * NAT_ENTRY_PER_BLOCK + idx; - add_free_nid(sbi, nid, true); - - if (nm_i->nid_cnt[FREE_NID_LIST] >= MAX_FREE_NIDS) - goto out; - } - } -out: down_read(&curseg->journal_rwsem); for (i = 0; i < nats_in_cursum(journal); i++) { block_t addr; @@ -1974,14 +1994,43 @@ out: remove_free_nid(sbi, nid); } up_read(&curseg->journal_rwsem); +} + +static void scan_free_nid_bits(struct f2fs_sb_info *sbi) +{ + struct f2fs_nm_info *nm_i = NM_I(sbi); + unsigned int i, idx; + nid_t nid; + + down_read(&nm_i->nat_tree_lock); + + for (i = 0; i < nm_i->nat_blocks; i++) { + if (!test_bit_le(i, nm_i->nat_block_bitmap)) + continue; + if (!nm_i->free_nid_count[i]) + continue; + for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) { + idx = find_next_bit_le(nm_i->free_nid_bitmap[i], + NAT_ENTRY_PER_BLOCK, idx); + if (idx >= NAT_ENTRY_PER_BLOCK) + break; + + nid = i * NAT_ENTRY_PER_BLOCK + idx; + add_free_nid(sbi, nid, true); + + if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS) + goto out; + } + } +out: + scan_curseg_cache(sbi); + up_read(&nm_i->nat_tree_lock); } static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) { struct f2fs_nm_info *nm_i = NM_I(sbi); - struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); - struct f2fs_journal *journal = curseg->journal; int i = 0; nid_t nid = nm_i->next_scan_nid; @@ -1989,7 +2038,7 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) nid = 0; /* Enough entries */ - if (nm_i->nid_cnt[FREE_NID_LIST] >= NAT_ENTRY_PER_BLOCK) + if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) return; if (!sync && !available_free_memory(sbi, FREE_NIDS)) @@ -1999,7 +2048,7 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) /* try to find free nids in free_nid_bitmap */ scan_free_nid_bits(sbi); - if (nm_i->nid_cnt[FREE_NID_LIST]) + if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) return; } @@ -2027,18 +2076,8 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) nm_i->next_scan_nid = nid; /* find free nids from current sum_pages */ - down_read(&curseg->journal_rwsem); - for (i = 0; i < nats_in_cursum(journal); i++) { - block_t addr; + scan_curseg_cache(sbi); - addr = le32_to_cpu(nat_in_journal(journal, i).block_addr); - nid = le32_to_cpu(nid_in_journal(journal, i)); - if (addr == NULL_ADDR) - add_free_nid(sbi, nid, true); - else - remove_free_nid(sbi, nid); - } - up_read(&curseg->journal_rwsem); up_read(&nm_i->nat_tree_lock); ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), @@ -2076,15 +2115,13 @@ retry: } /* We should not use stale free nids created by build_free_nids */ - if (nm_i->nid_cnt[FREE_NID_LIST] && !on_build_free_nids(nm_i)) { - f2fs_bug_on(sbi, list_empty(&nm_i->nid_list[FREE_NID_LIST])); - i = list_first_entry(&nm_i->nid_list[FREE_NID_LIST], + if (nm_i->nid_cnt[FREE_NID] && !on_build_free_nids(nm_i)) { + f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list)); + i = list_first_entry(&nm_i->free_nid_list, struct free_nid, list); *nid = i->nid; - __remove_nid_from_list(sbi, i, FREE_NID_LIST, true); - i->state = NID_ALLOC; - __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false); + __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID); nm_i->available_nids--; update_free_nid_bitmap(sbi, *nid, false, false); @@ -2110,7 +2147,7 @@ void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) spin_lock(&nm_i->nid_list_lock); i = __lookup_free_nid_list(nm_i, nid); f2fs_bug_on(sbi, !i); - __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false); + __remove_free_nid(sbi, i, PREALLOC_NID); spin_unlock(&nm_i->nid_list_lock); kmem_cache_free(free_nid_slab, i); @@ -2133,12 +2170,10 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) f2fs_bug_on(sbi, !i); if (!available_free_memory(sbi, FREE_NIDS)) { - __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false); + __remove_free_nid(sbi, i, PREALLOC_NID); need_free = true; } else { - __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, true); - i->state = NID_NEW; - __insert_nid_to_list(sbi, i, FREE_NID_LIST, false); + __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID); } nm_i->available_nids++; @@ -2157,20 +2192,19 @@ int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) struct free_nid *i, *next; int nr = nr_shrink; - if (nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS) + if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) return 0; if (!mutex_trylock(&nm_i->build_lock)) return 0; spin_lock(&nm_i->nid_list_lock); - list_for_each_entry_safe(i, next, &nm_i->nid_list[FREE_NID_LIST], - list) { + list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) { if (nr_shrink <= 0 || - nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS) + nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) break; - __remove_nid_from_list(sbi, i, FREE_NID_LIST, false); + __remove_free_nid(sbi, i, FREE_NID); kmem_cache_free(free_nid_slab, i); nr_shrink--; } @@ -2196,8 +2230,8 @@ void recover_inline_xattr(struct inode *inode, struct page *page) goto update_inode; } - dst_addr = inline_xattr_addr(ipage); - src_addr = inline_xattr_addr(page); + dst_addr = inline_xattr_addr(inode, ipage); + src_addr = inline_xattr_addr(inode, page); inline_size = inline_xattr_size(inode); f2fs_wait_on_page_writeback(ipage, NODE, true); @@ -2286,6 +2320,12 @@ retry: dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR); if (dst->i_inline & F2FS_EXTRA_ATTR) { dst->i_extra_isize = src->i_extra_isize; + + if (f2fs_sb_has_flexible_inline_xattr(sbi->sb) && + F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), + i_inline_xattr_size)) + dst->i_inline_xattr_size = src->i_inline_xattr_size; + if (f2fs_sb_has_project_quota(sbi->sb) && F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), i_projid)) @@ -2357,8 +2397,8 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi) ne = __lookup_nat_cache(nm_i, nid); if (!ne) { - ne = grab_nat_entry(nm_i, nid, true); - node_info_from_raw_nat(&ne->ni, &raw_ne); + ne = __alloc_nat_entry(nid, true); + __init_nat_entry(nm_i, ne, &raw_ne, true); } /* @@ -2404,15 +2444,17 @@ static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK; struct f2fs_nat_block *nat_blk = page_address(page); int valid = 0; - int i; + int i = 0; if (!enabled_nat_bits(sbi, NULL)) return; - for (i = 0; i < NAT_ENTRY_PER_BLOCK; i++) { - if (start_nid == 0 && i == 0) - valid++; - if (nat_blk->entries[i].block_addr) + if (nat_index == 0) { + valid = 1; + i = 1; + } + for (; i < NAT_ENTRY_PER_BLOCK; i++) { + if (nat_blk->entries[i].block_addr != NULL_ADDR) valid++; } if (valid == 0) { @@ -2607,7 +2649,7 @@ static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi) __set_bit_le(i, nm_i->nat_block_bitmap); nid = i * NAT_ENTRY_PER_BLOCK; - last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK; + last_nid = nid + NAT_ENTRY_PER_BLOCK; spin_lock(&NM_I(sbi)->nid_list_lock); for (; nid < last_nid; nid++) @@ -2642,16 +2684,15 @@ static int init_node_manager(struct f2fs_sb_info *sbi) /* not used nids: 0, node, meta, (and root counted as valid node) */ nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count - F2FS_RESERVED_NODE_NUM; - nm_i->nid_cnt[FREE_NID_LIST] = 0; - nm_i->nid_cnt[ALLOC_NID_LIST] = 0; + nm_i->nid_cnt[FREE_NID] = 0; + nm_i->nid_cnt[PREALLOC_NID] = 0; nm_i->nat_cnt = 0; nm_i->ram_thresh = DEF_RAM_THRESHOLD; nm_i->ra_nid_pages = DEF_RA_NID_PAGES; nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD; INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC); - INIT_LIST_HEAD(&nm_i->nid_list[FREE_NID_LIST]); - INIT_LIST_HEAD(&nm_i->nid_list[ALLOC_NID_LIST]); + INIT_LIST_HEAD(&nm_i->free_nid_list); INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO); INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO); INIT_LIST_HEAD(&nm_i->nat_entries); @@ -2743,16 +2784,15 @@ void destroy_node_manager(struct f2fs_sb_info *sbi) /* destroy free nid list */ spin_lock(&nm_i->nid_list_lock); - list_for_each_entry_safe(i, next_i, &nm_i->nid_list[FREE_NID_LIST], - list) { - __remove_nid_from_list(sbi, i, FREE_NID_LIST, false); + list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { + __remove_free_nid(sbi, i, FREE_NID); spin_unlock(&nm_i->nid_list_lock); kmem_cache_free(free_nid_slab, i); spin_lock(&nm_i->nid_list_lock); } - f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID_LIST]); - f2fs_bug_on(sbi, nm_i->nid_cnt[ALLOC_NID_LIST]); - f2fs_bug_on(sbi, !list_empty(&nm_i->nid_list[ALLOC_NID_LIST])); + f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]); + f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]); + f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list)); spin_unlock(&nm_i->nid_list_lock); /* destroy nat cache */ diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h index bb53e9955ff2..0ee3e5ff49a3 100644 --- a/fs/f2fs/node.h +++ b/fs/f2fs/node.h @@ -140,6 +140,7 @@ enum mem_type { DIRTY_DENTS, /* indicates dirty dentry pages */ INO_ENTRIES, /* indicates inode entries */ EXTENT_CACHE, /* indicates extent cache */ + INMEM_PAGES, /* indicates inmemory pages */ BASE_CHECK, /* check kernel status */ }; @@ -150,18 +151,10 @@ struct nat_entry_set { unsigned int entry_cnt; /* the # of nat entries in set */ }; -/* - * For free nid mangement - */ -enum nid_state { - NID_NEW, /* newly added to free nid list */ - NID_ALLOC /* it is allocated */ -}; - struct free_nid { struct list_head list; /* for free node id list */ nid_t nid; /* node id */ - int state; /* in use or not: NID_NEW or NID_ALLOC */ + int state; /* in use or not: FREE_NID or PREALLOC_NID */ }; static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid) @@ -170,12 +163,11 @@ static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid) struct free_nid *fnid; spin_lock(&nm_i->nid_list_lock); - if (nm_i->nid_cnt[FREE_NID_LIST] <= 0) { + if (nm_i->nid_cnt[FREE_NID] <= 0) { spin_unlock(&nm_i->nid_list_lock); return; } - fnid = list_first_entry(&nm_i->nid_list[FREE_NID_LIST], - struct free_nid, list); + fnid = list_first_entry(&nm_i->free_nid_list, struct free_nid, list); *nid = fnid->nid; spin_unlock(&nm_i->nid_list_lock); } diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 9626758bc762..92c57ace1939 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -594,6 +594,9 @@ int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only) int ret = 0; unsigned long s_flags = sbi->sb->s_flags; bool need_writecp = false; +#ifdef CONFIG_QUOTA + int quota_enabled; +#endif if (s_flags & MS_RDONLY) { f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs"); @@ -604,7 +607,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only) /* Needed for iput() to work correctly and not trash data */ sbi->sb->s_flags |= MS_ACTIVE; /* Turn on quotas so that they are updated correctly */ - f2fs_enable_quota_files(sbi); + quota_enabled = f2fs_enable_quota_files(sbi, s_flags & MS_RDONLY); #endif fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry", @@ -665,7 +668,8 @@ skip: out: #ifdef CONFIG_QUOTA /* Turn quotas off */ - f2fs_quota_off_umount(sbi->sb); + if (quota_enabled) + f2fs_quota_off_umount(sbi->sb); #endif sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */ diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index f5c494389483..94939a5a96c8 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -181,11 +181,12 @@ bool need_SSR(struct f2fs_sb_info *sbi) return true; return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs + - 2 * reserved_sections(sbi)); + SM_I(sbi)->min_ssr_sections + reserved_sections(sbi)); } void register_inmem_page(struct inode *inode, struct page *page) { + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode_info *fi = F2FS_I(inode); struct inmem_pages *new; @@ -204,6 +205,10 @@ void register_inmem_page(struct inode *inode, struct page *page) mutex_lock(&fi->inmem_lock); get_page(page); list_add_tail(&new->list, &fi->inmem_pages); + spin_lock(&sbi->inode_lock[ATOMIC_FILE]); + if (list_empty(&fi->inmem_ilist)) + list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]); + spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES); mutex_unlock(&fi->inmem_lock); @@ -262,12 +267,41 @@ next: return err; } +void drop_inmem_pages_all(struct f2fs_sb_info *sbi) +{ + struct list_head *head = &sbi->inode_list[ATOMIC_FILE]; + struct inode *inode; + struct f2fs_inode_info *fi; +next: + spin_lock(&sbi->inode_lock[ATOMIC_FILE]); + if (list_empty(head)) { + spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); + return; + } + fi = list_first_entry(head, struct f2fs_inode_info, inmem_ilist); + inode = igrab(&fi->vfs_inode); + spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); + + if (inode) { + drop_inmem_pages(inode); + iput(inode); + } + congestion_wait(BLK_RW_ASYNC, HZ/50); + cond_resched(); + goto next; +} + void drop_inmem_pages(struct inode *inode) { + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode_info *fi = F2FS_I(inode); mutex_lock(&fi->inmem_lock); __revoke_inmem_pages(inode, &fi->inmem_pages, true, false); + spin_lock(&sbi->inode_lock[ATOMIC_FILE]); + if (!list_empty(&fi->inmem_ilist)) + list_del_init(&fi->inmem_ilist); + spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); mutex_unlock(&fi->inmem_lock); clear_inode_flag(inode, FI_ATOMIC_FILE); @@ -313,6 +347,7 @@ static int __commit_inmem_pages(struct inode *inode, struct inmem_pages *cur, *tmp; struct f2fs_io_info fio = { .sbi = sbi, + .ino = inode->i_ino, .type = DATA, .op = REQ_OP_WRITE, .op_flags = REQ_SYNC | REQ_PRIO, @@ -398,6 +433,10 @@ int commit_inmem_pages(struct inode *inode) /* drop all uncommitted pages */ __revoke_inmem_pages(inode, &fi->inmem_pages, true, false); } + spin_lock(&sbi->inode_lock[ATOMIC_FILE]); + if (!list_empty(&fi->inmem_ilist)) + list_del_init(&fi->inmem_ilist); + spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); mutex_unlock(&fi->inmem_lock); clear_inode_flag(inode, FI_ATOMIC_COMMIT); @@ -472,7 +511,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) static int __submit_flush_wait(struct f2fs_sb_info *sbi, struct block_device *bdev) { - struct bio *bio = f2fs_bio_alloc(0); + struct bio *bio = f2fs_bio_alloc(sbi, 0, true); int ret; bio->bi_rw = REQ_OP_WRITE; @@ -485,15 +524,17 @@ static int __submit_flush_wait(struct f2fs_sb_info *sbi, return ret; } -static int submit_flush_wait(struct f2fs_sb_info *sbi) +static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino) { - int ret = __submit_flush_wait(sbi, sbi->sb->s_bdev); + int ret = 0; int i; - if (!sbi->s_ndevs || ret) - return ret; + if (!sbi->s_ndevs) + return __submit_flush_wait(sbi, sbi->sb->s_bdev); - for (i = 1; i < sbi->s_ndevs; i++) { + for (i = 0; i < sbi->s_ndevs; i++) { + if (!is_dirty_device(sbi, ino, i, FLUSH_INO)) + continue; ret = __submit_flush_wait(sbi, FDEV(i).bdev); if (ret) break; @@ -519,7 +560,9 @@ repeat: fcc->dispatch_list = llist_del_all(&fcc->issue_list); fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list); - ret = submit_flush_wait(sbi); + cmd = llist_entry(fcc->dispatch_list, struct flush_cmd, llnode); + + ret = submit_flush_wait(sbi, cmd->ino); atomic_inc(&fcc->issued_flush); llist_for_each_entry_safe(cmd, next, @@ -537,7 +580,7 @@ repeat: goto repeat; } -int f2fs_issue_flush(struct f2fs_sb_info *sbi) +int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino) { struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; struct flush_cmd cmd; @@ -547,19 +590,20 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi) return 0; if (!test_opt(sbi, FLUSH_MERGE)) { - ret = submit_flush_wait(sbi); + ret = submit_flush_wait(sbi, ino); atomic_inc(&fcc->issued_flush); return ret; } - if (atomic_inc_return(&fcc->issing_flush) == 1) { - ret = submit_flush_wait(sbi); + if (atomic_inc_return(&fcc->issing_flush) == 1 || sbi->s_ndevs > 1) { + ret = submit_flush_wait(sbi, ino); atomic_dec(&fcc->issing_flush); atomic_inc(&fcc->issued_flush); return ret; } + cmd.ino = ino; init_completion(&cmd.wait); llist_add(&cmd.llnode, &fcc->issue_list); @@ -583,7 +627,7 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi) } else { struct flush_cmd *tmp, *next; - ret = submit_flush_wait(sbi); + ret = submit_flush_wait(sbi, ino); llist_for_each_entry_safe(tmp, next, list, llnode) { if (tmp == &cmd) { @@ -653,6 +697,28 @@ void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free) } } +int f2fs_flush_device_cache(struct f2fs_sb_info *sbi) +{ + int ret = 0, i; + + if (!sbi->s_ndevs) + return 0; + + for (i = 1; i < sbi->s_ndevs; i++) { + if (!f2fs_test_bit(i, (char *)&sbi->dirty_device)) + continue; + ret = __submit_flush_wait(sbi, FDEV(i).bdev); + if (ret) + break; + + spin_lock(&sbi->dev_lock); + f2fs_clear_bit(i, (char *)&sbi->dirty_device); + spin_unlock(&sbi->dev_lock); + } + + return ret; +} + static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, enum dirty_type dirty_type) { @@ -794,6 +860,8 @@ static void __remove_discard_cmd(struct f2fs_sb_info *sbi, { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; + trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len); + f2fs_bug_on(sbi, dc->ref); if (dc->error == -EOPNOTSUPP) @@ -875,7 +943,7 @@ static int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, if (ret) return ret; } - bio = f2fs_bio_alloc(1); + bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, 1); bio->bi_iter.bi_sector = sector; bio->bi_bdev = bdev; bio_set_op_attrs(bio, op, 0); @@ -926,10 +994,14 @@ void __check_sit_bitmap(struct f2fs_sb_info *sbi, /* this function is copied from blkdev_issue_discard from block/blk-lib.c */ static void __submit_discard_cmd(struct f2fs_sb_info *sbi, - struct discard_cmd *dc) + struct discard_policy *dpolicy, + struct discard_cmd *dc) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; + struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ? + &(dcc->fstrim_list) : &(dcc->wait_list); struct bio *bio = NULL; + int flag = dpolicy->sync ? REQ_SYNC : 0; if (dc->state != D_PREP) return; @@ -948,8 +1020,8 @@ static void __submit_discard_cmd(struct f2fs_sb_info *sbi, if (bio) { bio->bi_private = dc; bio->bi_end_io = f2fs_submit_discard_endio; - submit_bio(REQ_SYNC, bio); - list_move_tail(&dc->list, &dcc->wait_list); + submit_bio(flag, bio); + list_move_tail(&dc->list, wait_list); __check_sit_bitmap(sbi, dc->start, dc->start + dc->len); f2fs_update_iostat(sbi, FS_DISCARD, 1); @@ -966,7 +1038,7 @@ static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi, struct rb_node *insert_parent) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; - struct rb_node **p = &dcc->root.rb_node; + struct rb_node **p; struct rb_node *parent = NULL; struct discard_cmd *dc = NULL; @@ -1134,58 +1206,107 @@ static int __queue_discard_cmd(struct f2fs_sb_info *sbi, return 0; } -static int __issue_discard_cmd(struct f2fs_sb_info *sbi, bool issue_cond) +static void __issue_discard_cmd_range(struct f2fs_sb_info *sbi, + struct discard_policy *dpolicy, + unsigned int start, unsigned int end) +{ + struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; + struct discard_cmd *prev_dc = NULL, *next_dc = NULL; + struct rb_node **insert_p = NULL, *insert_parent = NULL; + struct discard_cmd *dc; + struct blk_plug plug; + int issued; + +next: + issued = 0; + + mutex_lock(&dcc->cmd_lock); + f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root)); + + dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root, + NULL, start, + (struct rb_entry **)&prev_dc, + (struct rb_entry **)&next_dc, + &insert_p, &insert_parent, true); + if (!dc) + dc = next_dc; + + blk_start_plug(&plug); + + while (dc && dc->lstart <= end) { + struct rb_node *node; + + if (dc->len < dpolicy->granularity) + goto skip; + + if (dc->state != D_PREP) { + list_move_tail(&dc->list, &dcc->fstrim_list); + goto skip; + } + + __submit_discard_cmd(sbi, dpolicy, dc); + + if (++issued >= dpolicy->max_requests) { + start = dc->lstart + dc->len; + + blk_finish_plug(&plug); + mutex_unlock(&dcc->cmd_lock); + + schedule(); + + goto next; + } +skip: + node = rb_next(&dc->rb_node); + dc = rb_entry_safe(node, struct discard_cmd, rb_node); + + if (fatal_signal_pending(current)) + break; + } + + blk_finish_plug(&plug); + mutex_unlock(&dcc->cmd_lock); +} + +static int __issue_discard_cmd(struct f2fs_sb_info *sbi, + struct discard_policy *dpolicy) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct list_head *pend_list; struct discard_cmd *dc, *tmp; struct blk_plug plug; - int iter = 0, issued = 0; - int i; + int i, iter = 0, issued = 0; bool io_interrupted = false; - mutex_lock(&dcc->cmd_lock); - f2fs_bug_on(sbi, - !__check_rb_tree_consistence(sbi, &dcc->root)); - blk_start_plug(&plug); - for (i = MAX_PLIST_NUM - 1; - i >= 0 && plist_issue(dcc->pend_list_tag[i]); i--) { + for (i = MAX_PLIST_NUM - 1; i >= 0; i--) { + if (i + 1 < dpolicy->granularity) + break; pend_list = &dcc->pend_list[i]; + + mutex_lock(&dcc->cmd_lock); + f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root)); + blk_start_plug(&plug); list_for_each_entry_safe(dc, tmp, pend_list, list) { f2fs_bug_on(sbi, dc->state != D_PREP); - /* Hurry up to finish fstrim */ - if (dcc->pend_list_tag[i] & P_TRIM) { - __submit_discard_cmd(sbi, dc); - issued++; - - if (fatal_signal_pending(current)) - break; - continue; - } - - if (!issue_cond) { - __submit_discard_cmd(sbi, dc); - issued++; - continue; - } - - if (is_idle(sbi)) { - __submit_discard_cmd(sbi, dc); - issued++; - } else { + if (dpolicy->io_aware && i < dpolicy->io_aware_gran && + !is_idle(sbi)) { io_interrupted = true; + goto skip; } - if (++iter >= DISCARD_ISSUE_RATE) - goto out; + __submit_discard_cmd(sbi, dpolicy, dc); + issued++; +skip: + if (++iter >= dpolicy->max_requests) + break; } - if (list_empty(pend_list) && dcc->pend_list_tag[i] & P_TRIM) - dcc->pend_list_tag[i] &= (~P_TRIM); + blk_finish_plug(&plug); + mutex_unlock(&dcc->cmd_lock); + + if (iter >= dpolicy->max_requests) + break; } -out: - blk_finish_plug(&plug); - mutex_unlock(&dcc->cmd_lock); if (!issued && io_interrupted) issued = -1; @@ -1193,12 +1314,13 @@ out: return issued; } -static void __drop_discard_cmd(struct f2fs_sb_info *sbi) +static bool __drop_discard_cmd(struct f2fs_sb_info *sbi) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct list_head *pend_list; struct discard_cmd *dc, *tmp; int i; + bool dropped = false; mutex_lock(&dcc->cmd_lock); for (i = MAX_PLIST_NUM - 1; i >= 0; i--) { @@ -1206,39 +1328,58 @@ static void __drop_discard_cmd(struct f2fs_sb_info *sbi) list_for_each_entry_safe(dc, tmp, pend_list, list) { f2fs_bug_on(sbi, dc->state != D_PREP); __remove_discard_cmd(sbi, dc); + dropped = true; } } mutex_unlock(&dcc->cmd_lock); + + return dropped; } -static void __wait_one_discard_bio(struct f2fs_sb_info *sbi, +static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi, struct discard_cmd *dc) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; + unsigned int len = 0; wait_for_completion_io(&dc->wait); mutex_lock(&dcc->cmd_lock); f2fs_bug_on(sbi, dc->state != D_DONE); dc->ref--; - if (!dc->ref) + if (!dc->ref) { + if (!dc->error) + len = dc->len; __remove_discard_cmd(sbi, dc); + } mutex_unlock(&dcc->cmd_lock); + + return len; } -static void __wait_discard_cmd(struct f2fs_sb_info *sbi, bool wait_cond) +static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi, + struct discard_policy *dpolicy, + block_t start, block_t end) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; - struct list_head *wait_list = &(dcc->wait_list); + struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ? + &(dcc->fstrim_list) : &(dcc->wait_list); struct discard_cmd *dc, *tmp; bool need_wait; + unsigned int trimmed = 0; next: need_wait = false; mutex_lock(&dcc->cmd_lock); list_for_each_entry_safe(dc, tmp, wait_list, list) { - if (!wait_cond || (dc->state == D_DONE && !dc->ref)) { + if (dc->lstart + dc->len <= start || end <= dc->lstart) + continue; + if (dc->len < dpolicy->granularity) + continue; + if (dc->state == D_DONE && !dc->ref) { wait_for_completion_io(&dc->wait); + if (!dc->error) + trimmed += dc->len; __remove_discard_cmd(sbi, dc); } else { dc->ref++; @@ -1249,9 +1390,17 @@ next: mutex_unlock(&dcc->cmd_lock); if (need_wait) { - __wait_one_discard_bio(sbi, dc); + trimmed += __wait_one_discard_bio(sbi, dc); goto next; } + + return trimmed; +} + +static void __wait_all_discard_cmd(struct f2fs_sb_info *sbi, + struct discard_policy *dpolicy) +{ + __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX); } /* This should be covered by global mutex, &sit_i->sentry_lock */ @@ -1289,23 +1438,19 @@ void stop_discard_thread(struct f2fs_sb_info *sbi) } } -/* This comes from f2fs_put_super and f2fs_trim_fs */ -void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount) -{ - __issue_discard_cmd(sbi, false); - __drop_discard_cmd(sbi); - __wait_discard_cmd(sbi, !umount); -} - -static void mark_discard_range_all(struct f2fs_sb_info *sbi) +/* This comes from f2fs_put_super */ +bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; - int i; + struct discard_policy dpolicy; + bool dropped; - mutex_lock(&dcc->cmd_lock); - for (i = 0; i < MAX_PLIST_NUM; i++) - dcc->pend_list_tag[i] |= P_TRIM; - mutex_unlock(&dcc->cmd_lock); + init_discard_policy(&dpolicy, DPOLICY_UMOUNT, dcc->discard_granularity); + __issue_discard_cmd(sbi, &dpolicy); + dropped = __drop_discard_cmd(sbi); + __wait_all_discard_cmd(sbi, &dpolicy); + + return dropped; } static int issue_discard_thread(void *data) @@ -1313,12 +1458,16 @@ static int issue_discard_thread(void *data) struct f2fs_sb_info *sbi = data; struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; wait_queue_head_t *q = &dcc->discard_wait_queue; + struct discard_policy dpolicy; unsigned int wait_ms = DEF_MIN_DISCARD_ISSUE_TIME; int issued; set_freezable(); do { + init_discard_policy(&dpolicy, DPOLICY_BG, + dcc->discard_granularity); + wait_event_interruptible_timeout(*q, kthread_should_stop() || freezing(current) || dcc->discard_wake, @@ -1331,17 +1480,18 @@ static int issue_discard_thread(void *data) if (dcc->discard_wake) { dcc->discard_wake = 0; if (sbi->gc_thread && sbi->gc_thread->gc_urgent) - mark_discard_range_all(sbi); + init_discard_policy(&dpolicy, + DPOLICY_FORCE, 1); } sb_start_intwrite(sbi->sb); - issued = __issue_discard_cmd(sbi, true); + issued = __issue_discard_cmd(sbi, &dpolicy); if (issued) { - __wait_discard_cmd(sbi, true); - wait_ms = DEF_MIN_DISCARD_ISSUE_TIME; + __wait_all_discard_cmd(sbi, &dpolicy); + wait_ms = dpolicy.min_interval; } else { - wait_ms = DEF_MAX_DISCARD_ISSUE_TIME; + wait_ms = dpolicy.max_interval; } sb_end_intwrite(sbi->sb); @@ -1605,7 +1755,6 @@ find_next: f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos, len); - cpc->trimmed += len; total_len += len; } else { next_pos = find_next_bit_le(entry->discard_map, @@ -1626,6 +1775,37 @@ skip: wake_up_discard_thread(sbi, false); } +void init_discard_policy(struct discard_policy *dpolicy, + int discard_type, unsigned int granularity) +{ + /* common policy */ + dpolicy->type = discard_type; + dpolicy->sync = true; + dpolicy->granularity = granularity; + + if (discard_type == DPOLICY_BG) { + dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME; + dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME; + dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST; + dpolicy->io_aware_gran = MAX_PLIST_NUM; + dpolicy->io_aware = true; + } else if (discard_type == DPOLICY_FORCE) { + dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME; + dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME; + dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST; + dpolicy->io_aware_gran = MAX_PLIST_NUM; + dpolicy->io_aware = true; + } else if (discard_type == DPOLICY_FSTRIM) { + dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST; + dpolicy->io_aware_gran = MAX_PLIST_NUM; + dpolicy->io_aware = false; + } else if (discard_type == DPOLICY_UMOUNT) { + dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST; + dpolicy->io_aware_gran = MAX_PLIST_NUM; + dpolicy->io_aware = false; + } +} + static int create_discard_cmd_control(struct f2fs_sb_info *sbi) { dev_t dev = sbi->sb->s_bdev->bd_dev; @@ -1643,12 +1823,10 @@ static int create_discard_cmd_control(struct f2fs_sb_info *sbi) dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY; INIT_LIST_HEAD(&dcc->entry_list); - for (i = 0; i < MAX_PLIST_NUM; i++) { + for (i = 0; i < MAX_PLIST_NUM; i++) INIT_LIST_HEAD(&dcc->pend_list[i]); - if (i >= dcc->discard_granularity - 1) - dcc->pend_list_tag[i] |= P_ACTIVE; - } INIT_LIST_HEAD(&dcc->wait_list); + INIT_LIST_HEAD(&dcc->fstrim_list); mutex_init(&dcc->cmd_lock); atomic_set(&dcc->issued_discard, 0); atomic_set(&dcc->issing_discard, 0); @@ -1796,16 +1974,6 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) get_sec_entry(sbi, segno)->valid_blocks += del; } -void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new) -{ - update_sit_entry(sbi, new, 1); - if (GET_SEGNO(sbi, old) != NULL_SEGNO) - update_sit_entry(sbi, old, -1); - - locate_dirty_segment(sbi, GET_SEGNO(sbi, old)); - locate_dirty_segment(sbi, GET_SEGNO(sbi, new)); -} - void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) { unsigned int segno = GET_SEGNO(sbi, addr); @@ -1816,14 +1984,14 @@ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) return; /* add it into sit main buffer */ - mutex_lock(&sit_i->sentry_lock); + down_write(&sit_i->sentry_lock); update_sit_entry(sbi, addr, -1); /* add it into dirty seglist */ locate_dirty_segment(sbi, segno); - mutex_unlock(&sit_i->sentry_lock); + up_write(&sit_i->sentry_lock); } bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr) @@ -1836,7 +2004,7 @@ bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr) if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) return true; - mutex_lock(&sit_i->sentry_lock); + down_read(&sit_i->sentry_lock); segno = GET_SEGNO(sbi, blkaddr); se = get_seg_entry(sbi, segno); @@ -1845,7 +2013,7 @@ bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr) if (f2fs_test_bit(offset, se->ckpt_valid_map)) is_cp = true; - mutex_unlock(&sit_i->sentry_lock); + up_read(&sit_i->sentry_lock); return is_cp; } @@ -1903,12 +2071,8 @@ struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno) void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr) { struct page *page = grab_meta_page(sbi, blk_addr); - void *dst = page_address(page); - if (src) - memcpy(dst, src, PAGE_SIZE); - else - memset(dst, 0, PAGE_SIZE); + memcpy(page_address(page), src, PAGE_SIZE); set_page_dirty(page); f2fs_put_page(page, 1); } @@ -2007,7 +2171,6 @@ find_other_zone: } secno = left_start; skip_left: - hint = secno; segno = GET_SEG_FROM_SEC(sbi, secno); zoneno = GET_ZONE_FROM_SEC(sbi, secno); @@ -2242,12 +2405,16 @@ void allocate_new_segments(struct f2fs_sb_info *sbi) unsigned int old_segno; int i; + down_write(&SIT_I(sbi)->sentry_lock); + for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { curseg = CURSEG_I(sbi, i); old_segno = curseg->segno; SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true); locate_dirty_segment(sbi, old_segno); } + + up_write(&SIT_I(sbi)->sentry_lock); } static const struct segment_allocation default_salloc_ops = { @@ -2259,14 +2426,14 @@ bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc) __u64 trim_start = cpc->trim_start; bool has_candidate = false; - mutex_lock(&SIT_I(sbi)->sentry_lock); + down_write(&SIT_I(sbi)->sentry_lock); for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) { if (add_discard_addrs(sbi, cpc, true)) { has_candidate = true; break; } } - mutex_unlock(&SIT_I(sbi)->sentry_lock); + up_write(&SIT_I(sbi)->sentry_lock); cpc->trim_start = trim_start; return has_candidate; @@ -2276,14 +2443,16 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) { __u64 start = F2FS_BYTES_TO_BLK(range->start); __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1; - unsigned int start_segno, end_segno; + unsigned int start_segno, end_segno, cur_segno; + block_t start_block, end_block; struct cp_control cpc; + struct discard_policy dpolicy; + unsigned long long trimmed = 0; int err = 0; if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize) return -EINVAL; - cpc.trimmed = 0; if (end <= MAIN_BLKADDR(sbi)) goto out; @@ -2297,12 +2466,14 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start); end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 : GET_SEGNO(sbi, end); + cpc.reason = CP_DISCARD; cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen)); /* do checkpoint to issue discard commands safely */ - for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) { - cpc.trim_start = start_segno; + for (cur_segno = start_segno; cur_segno <= end_segno; + cur_segno = cpc.trim_end + 1) { + cpc.trim_start = cur_segno; if (sbi->discard_blks == 0) break; @@ -2310,7 +2481,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) cpc.trim_end = end_segno; else cpc.trim_end = min_t(unsigned int, - rounddown(start_segno + + rounddown(cur_segno + BATCHED_TRIM_SEGMENTS(sbi), sbi->segs_per_sec) - 1, end_segno); @@ -2322,11 +2493,16 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) schedule(); } - /* It's time to issue all the filed discards */ - mark_discard_range_all(sbi); - f2fs_wait_discard_bios(sbi, false); + + start_block = START_BLOCK(sbi, start_segno); + end_block = START_BLOCK(sbi, min(cur_segno, end_segno) + 1); + + init_discard_policy(&dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen); + __issue_discard_cmd_range(sbi, &dpolicy, start_block, end_block); + trimmed = __wait_discard_cmd_range(sbi, &dpolicy, + start_block, end_block); out: - range->len = F2FS_BLK_TO_BYTES(cpc.trimmed); + range->len = F2FS_BLK_TO_BYTES(trimmed); return err; } @@ -2338,6 +2514,20 @@ static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type) return false; } +#if 0 +int rw_hint_to_seg_type(enum rw_hint hint) +{ + switch (hint) { + case WRITE_LIFE_SHORT: + return CURSEG_HOT_DATA; + case WRITE_LIFE_EXTREME: + return CURSEG_COLD_DATA; + default: + return CURSEG_WARM_DATA; + } +} +#endif + static int __get_segment_type_2(struct f2fs_io_info *fio) { if (fio->type == DATA) @@ -2372,6 +2562,7 @@ static int __get_segment_type_6(struct f2fs_io_info *fio) return CURSEG_COLD_DATA; if (is_inode_flag_set(inode, FI_HOT_DATA)) return CURSEG_HOT_DATA; + /* rw_hint_to_seg_type(inode->i_write_hint); */ return CURSEG_WARM_DATA; } else { if (IS_DNODE(fio->page)) @@ -2416,8 +2607,10 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, struct sit_info *sit_i = SIT_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, type); + down_read(&SM_I(sbi)->curseg_lock); + mutex_lock(&curseg->curseg_mutex); - mutex_lock(&sit_i->sentry_lock); + down_write(&sit_i->sentry_lock); *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); @@ -2434,15 +2627,26 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, stat_inc_block_count(sbi, curseg); + /* + * SIT information should be updated before segment allocation, + * since SSR needs latest valid block information. + */ + update_sit_entry(sbi, *new_blkaddr, 1); + if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) + update_sit_entry(sbi, old_blkaddr, -1); + if (!__has_curseg_space(sbi, type)) sit_i->s_ops->allocate_segment(sbi, type, false); - /* - * SIT information should be updated after segment allocation, - * since we need to keep dirty segments precisely under SSR. - */ - refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr); - mutex_unlock(&sit_i->sentry_lock); + /* + * segment dirty status should be updated after segment allocation, + * so we just need to update status only one time after previous + * segment being closed. + */ + locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); + locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr)); + + up_write(&sit_i->sentry_lock); if (page && IS_NODESEG(type)) { fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); @@ -2462,6 +2666,29 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, } mutex_unlock(&curseg->curseg_mutex); + + up_read(&SM_I(sbi)->curseg_lock); +} + +static void update_device_state(struct f2fs_io_info *fio) +{ + struct f2fs_sb_info *sbi = fio->sbi; + unsigned int devidx; + + if (!sbi->s_ndevs) + return; + + devidx = f2fs_target_device_index(sbi, fio->new_blkaddr); + + /* update device state for fsync */ + set_dirty_device(sbi, fio->ino, devidx, FLUSH_INO); + + /* update device state for checkpoint */ + if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) { + spin_lock(&sbi->dev_lock); + f2fs_set_bit(devidx, (char *)&sbi->dirty_device); + spin_unlock(&sbi->dev_lock); + } } static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio) @@ -2478,6 +2705,8 @@ reallocate: if (err == -EAGAIN) { fio->old_blkaddr = fio->new_blkaddr; goto reallocate; + } else if (!err) { + update_device_state(fio); } } @@ -2538,12 +2767,26 @@ int rewrite_data_page(struct f2fs_io_info *fio) stat_inc_inplace_blocks(fio->sbi); err = f2fs_submit_page_bio(fio); + if (!err) + update_device_state(fio); f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE); return err; } +static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi, + unsigned int segno) +{ + int i; + + for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) { + if (CURSEG_I(sbi, i)->segno == segno) + break; + } + return i; +} + void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, block_t old_blkaddr, block_t new_blkaddr, bool recover_curseg, bool recover_newaddr) @@ -2559,6 +2802,8 @@ void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, se = get_seg_entry(sbi, segno); type = se->type; + down_write(&SM_I(sbi)->curseg_lock); + if (!recover_curseg) { /* for recovery flow */ if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) { @@ -2568,14 +2813,19 @@ void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, type = CURSEG_WARM_DATA; } } else { - if (!IS_CURSEG(sbi, segno)) + if (IS_CURSEG(sbi, segno)) { + /* se->type is volatile as SSR allocation */ + type = __f2fs_get_curseg(sbi, segno); + f2fs_bug_on(sbi, type == NO_CHECK_TYPE); + } else { type = CURSEG_WARM_DATA; + } } curseg = CURSEG_I(sbi, type); mutex_lock(&curseg->curseg_mutex); - mutex_lock(&sit_i->sentry_lock); + down_write(&sit_i->sentry_lock); old_cursegno = curseg->segno; old_blkoff = curseg->next_blkoff; @@ -2607,8 +2857,9 @@ void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, curseg->next_blkoff = old_blkoff; } - mutex_unlock(&sit_i->sentry_lock); + up_write(&sit_i->sentry_lock); mutex_unlock(&curseg->curseg_mutex); + up_write(&SM_I(sbi)->curseg_lock); } void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, @@ -3062,7 +3313,7 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) bool to_journal = true; struct seg_entry *se; - mutex_lock(&sit_i->sentry_lock); + down_write(&sit_i->sentry_lock); if (!sit_i->dirty_sentries) goto out; @@ -3156,7 +3407,7 @@ out: cpc->trim_start = trim_start; } - mutex_unlock(&sit_i->sentry_lock); + up_write(&sit_i->sentry_lock); set_prefree_as_free_segments(sbi); } @@ -3249,7 +3500,7 @@ static int build_sit_info(struct f2fs_sb_info *sbi) sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec; - mutex_init(&sit_i->sentry_lock); + init_rwsem(&sit_i->sentry_lock); return 0; } @@ -3490,7 +3741,7 @@ static void init_min_max_mtime(struct f2fs_sb_info *sbi) struct sit_info *sit_i = SIT_I(sbi); unsigned int segno; - mutex_lock(&sit_i->sentry_lock); + down_write(&sit_i->sentry_lock); sit_i->min_mtime = LLONG_MAX; @@ -3507,7 +3758,7 @@ static void init_min_max_mtime(struct f2fs_sb_info *sbi) sit_i->min_mtime = mtime; } sit_i->max_mtime = get_mtime(sbi); - mutex_unlock(&sit_i->sentry_lock); + up_write(&sit_i->sentry_lock); } int build_segment_manager(struct f2fs_sb_info *sbi) @@ -3540,11 +3791,14 @@ int build_segment_manager(struct f2fs_sb_info *sbi) sm_info->min_ipu_util = DEF_MIN_IPU_UTIL; sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS; sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS; + sm_info->min_ssr_sections = reserved_sections(sbi); sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS; INIT_LIST_HEAD(&sm_info->sit_entry_set); + init_rwsem(&sm_info->curseg_lock); + if (!f2fs_readonly(sbi->sb)) { err = create_flush_cmd_control(sbi); if (err) diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index ffa11274b0ce..5264b6ed120c 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -231,7 +231,7 @@ struct sit_info { unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */ unsigned int dirty_sentries; /* # of dirty sentries */ unsigned int sents_per_block; /* # of SIT entries per block */ - struct mutex sentry_lock; /* to protect SIT cache */ + struct rw_semaphore sentry_lock; /* to protect SIT cache */ struct seg_entry *sentries; /* SIT segment-level cache */ struct sec_entry *sec_entries; /* SIT section-level cache */ @@ -497,6 +497,33 @@ static inline int reserved_sections(struct f2fs_sb_info *sbi) return GET_SEC_FROM_SEG(sbi, (unsigned int)reserved_segments(sbi)); } +static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi) +{ + unsigned int node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) + + get_pages(sbi, F2FS_DIRTY_DENTS); + unsigned int dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS); + unsigned int segno, left_blocks; + int i; + + /* check current node segment */ + for (i = CURSEG_HOT_NODE; i <= CURSEG_COLD_NODE; i++) { + segno = CURSEG_I(sbi, i)->segno; + left_blocks = sbi->blocks_per_seg - + get_seg_entry(sbi, segno)->ckpt_valid_blocks; + + if (node_blocks > left_blocks) + return false; + } + + /* check current data segment */ + segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno; + left_blocks = sbi->blocks_per_seg - + get_seg_entry(sbi, segno)->ckpt_valid_blocks; + if (dent_blocks > left_blocks) + return false; + return true; +} + static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed, int needed) { @@ -507,6 +534,9 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) return false; + if (free_sections(sbi) + freed == reserved_sections(sbi) + needed && + has_curseg_enough_space(sbi)) + return false; return (free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs + imeta_secs + reserved_sections(sbi) + needed); @@ -730,7 +760,7 @@ static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type) static inline bool no_fggc_candidate(struct f2fs_sb_info *sbi, unsigned int secno) { - if (get_valid_blocks(sbi, GET_SEG_FROM_SEC(sbi, secno), true) >= + if (get_valid_blocks(sbi, GET_SEG_FROM_SEC(sbi, secno), true) > sbi->fggc_threshold) return true; return false; @@ -795,8 +825,9 @@ static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force) goto wake_up; mutex_lock(&dcc->cmd_lock); - for (i = MAX_PLIST_NUM - 1; - i >= 0 && plist_issue(dcc->pend_list_tag[i]); i--) { + for (i = MAX_PLIST_NUM - 1; i >= 0; i--) { + if (i + 1 < dcc->discard_granularity) + break; if (!list_empty(&dcc->pend_list[i])) { wakeup = true; break; diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c index 5c60fc28ec75..0b5664a1a6cc 100644 --- a/fs/f2fs/shrinker.c +++ b/fs/f2fs/shrinker.c @@ -28,7 +28,7 @@ static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi) static unsigned long __count_free_nids(struct f2fs_sb_info *sbi) { - long count = NM_I(sbi)->nid_cnt[FREE_NID_LIST] - MAX_FREE_NIDS; + long count = NM_I(sbi)->nid_cnt[FREE_NID] - MAX_FREE_NIDS; return count > 0 ? count : 0; } diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 482bb0333806..76e2f1518224 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -44,6 +44,8 @@ static struct kmem_cache *f2fs_inode_cachep; char *fault_name[FAULT_MAX] = { [FAULT_KMALLOC] = "kmalloc", [FAULT_PAGE_ALLOC] = "page alloc", + [FAULT_PAGE_GET] = "page get", + [FAULT_ALLOC_BIO] = "alloc bio", [FAULT_ALLOC_NID] = "alloc nid", [FAULT_ORPHAN] = "orphan", [FAULT_BLOCK] = "no more block", @@ -92,6 +94,7 @@ enum { Opt_disable_ext_identify, Opt_inline_xattr, Opt_noinline_xattr, + Opt_inline_xattr_size, Opt_inline_data, Opt_inline_dentry, Opt_noinline_dentry, @@ -141,6 +144,7 @@ static match_table_t f2fs_tokens = { {Opt_disable_ext_identify, "disable_ext_identify"}, {Opt_inline_xattr, "inline_xattr"}, {Opt_noinline_xattr, "noinline_xattr"}, + {Opt_inline_xattr_size, "inline_xattr_size=%u"}, {Opt_inline_data, "inline_data"}, {Opt_inline_dentry, "inline_dentry"}, {Opt_noinline_dentry, "noinline_dentry"}, @@ -209,6 +213,12 @@ static int f2fs_set_qf_name(struct super_block *sb, int qtype, "quota options when quota turned on"); return -EINVAL; } + if (f2fs_sb_has_quota_ino(sb)) { + f2fs_msg(sb, KERN_INFO, + "QUOTA feature is enabled, so ignore qf_name"); + return 0; + } + qname = match_strdup(args); if (!qname) { f2fs_msg(sb, KERN_ERR, @@ -287,6 +297,18 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi) return -1; } } + + if (f2fs_sb_has_quota_ino(sbi->sb) && sbi->s_jquota_fmt) { + f2fs_msg(sbi->sb, KERN_INFO, + "QUOTA feature is enabled, so ignore jquota_fmt"); + sbi->s_jquota_fmt = 0; + } + if (f2fs_sb_has_quota_ino(sbi->sb) && sb_rdonly(sbi->sb)) { + f2fs_msg(sbi->sb, KERN_INFO, + "Filesystem with quota feature cannot be mounted RDWR " + "without CONFIG_QUOTA"); + return -1; + } return 0; } #endif @@ -383,6 +405,12 @@ static int parse_options(struct super_block *sb, char *options) case Opt_noinline_xattr: clear_opt(sbi, INLINE_XATTR); break; + case Opt_inline_xattr_size: + if (args->from && match_int(args, &arg)) + return -EINVAL; + set_opt(sbi, INLINE_XATTR_SIZE); + sbi->inline_xattr_size = arg; + break; #else case Opt_user_xattr: f2fs_msg(sb, KERN_INFO, @@ -604,6 +632,24 @@ static int parse_options(struct super_block *sb, char *options) F2FS_IO_SIZE_KB(sbi)); return -EINVAL; } + + if (test_opt(sbi, INLINE_XATTR_SIZE)) { + if (!test_opt(sbi, INLINE_XATTR)) { + f2fs_msg(sb, KERN_ERR, + "inline_xattr_size option should be " + "set with inline_xattr option"); + return -EINVAL; + } + if (!sbi->inline_xattr_size || + sbi->inline_xattr_size >= DEF_ADDRS_PER_INODE - + F2FS_TOTAL_EXTRA_ATTR_SIZE - + DEF_INLINE_RESERVED_SIZE - + DEF_MIN_INLINE_SIZE) { + f2fs_msg(sb, KERN_ERR, + "inline xattr size is out of range"); + return -EINVAL; + } + } return 0; } @@ -618,13 +664,13 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb) init_once((void *) fi); /* Initialize f2fs-specific inode info */ - fi->vfs_inode.i_version = 1; atomic_set(&fi->dirty_pages, 0); fi->i_current_depth = 1; fi->i_advise = 0; init_rwsem(&fi->i_sem); INIT_LIST_HEAD(&fi->dirty_list); INIT_LIST_HEAD(&fi->gdirty_list); + INIT_LIST_HEAD(&fi->inmem_ilist); INIT_LIST_HEAD(&fi->inmem_pages); mutex_init(&fi->inmem_lock); init_rwsem(&fi->dio_rwsem[READ]); @@ -673,7 +719,6 @@ static int f2fs_drop_inode(struct inode *inode) sb_end_intwrite(inode->i_sb); - fscrypt_put_encryption_info(inode, NULL); spin_lock(&inode->i_lock); atomic_dec(&inode->i_count); } @@ -781,6 +826,7 @@ static void f2fs_put_super(struct super_block *sb) { struct f2fs_sb_info *sbi = F2FS_SB(sb); int i; + bool dropped; f2fs_quota_off_umount(sb); @@ -801,9 +847,9 @@ static void f2fs_put_super(struct super_block *sb) } /* be sure to wait for any on-going discard commands */ - f2fs_wait_discard_bios(sbi, true); + dropped = f2fs_wait_discard_bios(sbi); - if (f2fs_discard_en(sbi) && !sbi->discard_blks) { + if (f2fs_discard_en(sbi) && !sbi->discard_blks && !dropped) { struct cp_control cpc = { .reason = CP_UMOUNT | CP_TRIMMED, }; @@ -859,6 +905,9 @@ int f2fs_sync_fs(struct super_block *sb, int sync) struct f2fs_sb_info *sbi = F2FS_SB(sb); int err = 0; + if (unlikely(f2fs_cp_error(sbi))) + return 0; + trace_f2fs_sync_fs(sb, sync); if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) @@ -958,7 +1007,7 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf) buf->f_blocks = total_count - start_count; buf->f_bfree = user_block_count - valid_user_blocks(sbi) + ovp_count; buf->f_bavail = user_block_count - valid_user_blocks(sbi) - - sbi->reserved_blocks; + sbi->current_reserved_blocks; avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM; @@ -1047,6 +1096,9 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) seq_puts(seq, ",inline_xattr"); else seq_puts(seq, ",noinline_xattr"); + if (test_opt(sbi, INLINE_XATTR_SIZE)) + seq_printf(seq, ",inline_xattr_size=%u", + sbi->inline_xattr_size); #endif #ifdef CONFIG_F2FS_FS_POSIX_ACL if (test_opt(sbi, POSIX_ACL)) @@ -1109,6 +1161,7 @@ static void default_options(struct f2fs_sb_info *sbi) { /* init some FS parameters */ sbi->active_logs = NR_CURSEG_TYPE; + sbi->inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS; set_opt(sbi, BG_GC); set_opt(sbi, INLINE_XATTR); @@ -1137,6 +1190,9 @@ static void default_options(struct f2fs_sb_info *sbi) #endif } +#ifdef CONFIG_QUOTA +static int f2fs_enable_quotas(struct super_block *sb); +#endif static int f2fs_remount(struct super_block *sb, int *flags, char *data) { struct f2fs_sb_info *sbi = F2FS_SB(sb); @@ -1203,6 +1259,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) if (f2fs_readonly(sb) && (*flags & MS_RDONLY)) goto skip; +#ifdef CONFIG_QUOTA if (!f2fs_readonly(sb) && (*flags & MS_RDONLY)) { err = dquot_suspend(sb, -1); if (err < 0) @@ -1210,9 +1267,15 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) } else { /* dquot_resume needs RW */ sb->s_flags &= ~MS_RDONLY; - dquot_resume(sb, -1); + if (sb_any_quota_suspended(sb)) { + dquot_resume(sb, -1); + } else if (f2fs_sb_has_quota_ino(sb)) { + err = f2fs_enable_quotas(sb); + if (err) + goto restore_opts; + } } - +#endif /* disallow enable/disable extent_cache dynamically */ if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) { err = -EINVAL; @@ -1321,8 +1384,13 @@ static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data, tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread); repeat: page = read_mapping_page(mapping, blkidx, NULL); - if (IS_ERR(page)) + if (IS_ERR(page)) { + if (PTR_ERR(page) == -ENOMEM) { + congestion_wait(BLK_RW_ASYNC, HZ/50); + goto repeat; + } return PTR_ERR(page); + } lock_page(page); @@ -1365,11 +1433,16 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type, while (towrite > 0) { tocopy = min_t(unsigned long, sb->s_blocksize - offset, towrite); - +retry: err = a_ops->write_begin(NULL, mapping, off, tocopy, 0, &page, NULL); - if (unlikely(err)) + if (unlikely(err)) { + if (err == -ENOMEM) { + congestion_wait(BLK_RW_ASYNC, HZ/50); + goto retry; + } break; + } kaddr = kmap_atomic(page); memcpy(kaddr + offset, data, tocopy); @@ -1386,8 +1459,7 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type, } if (len == towrite) - return 0; - inode->i_version++; + return err; inode->i_mtime = inode->i_ctime = current_time(inode); f2fs_mark_inode_dirty_sync(inode, false); return len - towrite; @@ -1409,19 +1481,91 @@ static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type) sbi->s_jquota_fmt, type); } -void f2fs_enable_quota_files(struct f2fs_sb_info *sbi) +int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly) { - int i, ret; + int enabled = 0; + int i, err; + + if (f2fs_sb_has_quota_ino(sbi->sb) && rdonly) { + err = f2fs_enable_quotas(sbi->sb); + if (err) { + f2fs_msg(sbi->sb, KERN_ERR, + "Cannot turn on quota_ino: %d", err); + return 0; + } + return 1; + } for (i = 0; i < MAXQUOTAS; i++) { if (sbi->s_qf_names[i]) { - ret = f2fs_quota_on_mount(sbi, i); - if (ret < 0) - f2fs_msg(sbi->sb, KERN_ERR, - "Cannot turn on journaled " - "quota: error %d", ret); + err = f2fs_quota_on_mount(sbi, i); + if (!err) { + enabled = 1; + continue; + } + f2fs_msg(sbi->sb, KERN_ERR, + "Cannot turn on quotas: %d on %d", err, i); } } + return enabled; +} + +static int f2fs_quota_enable(struct super_block *sb, int type, int format_id, + unsigned int flags) +{ + struct inode *qf_inode; + unsigned long qf_inum; + int err; + + BUG_ON(!f2fs_sb_has_quota_ino(sb)); + + qf_inum = f2fs_qf_ino(sb, type); + if (!qf_inum) + return -EPERM; + + qf_inode = f2fs_iget(sb, qf_inum); + if (IS_ERR(qf_inode)) { + f2fs_msg(sb, KERN_ERR, + "Bad quota inode %u:%lu", type, qf_inum); + return PTR_ERR(qf_inode); + } + + /* Don't account quota for quota files to avoid recursion */ + qf_inode->i_flags |= S_NOQUOTA; + err = dquot_enable(qf_inode, type, format_id, flags); + iput(qf_inode); + return err; +} + +static int f2fs_enable_quotas(struct super_block *sb) +{ + int type, err = 0; + unsigned long qf_inum; + bool quota_mopt[MAXQUOTAS] = { + test_opt(F2FS_SB(sb), USRQUOTA), + test_opt(F2FS_SB(sb), GRPQUOTA), + test_opt(F2FS_SB(sb), PRJQUOTA), + }; + + sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE; + for (type = 0; type < MAXQUOTAS; type++) { + qf_inum = f2fs_qf_ino(sb, type); + if (qf_inum) { + err = f2fs_quota_enable(sb, type, QFMT_VFS_V1, + DQUOT_USAGE_ENABLED | + (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0)); + if (err) { + f2fs_msg(sb, KERN_ERR, + "Failed to enable quota tracking " + "(type=%d, err=%d). Please run " + "fsck to fix.", type, err); + for (type--; type >= 0; type--) + dquot_quota_off(sb, type); + return err; + } + } + } + return 0; } static int f2fs_quota_sync(struct super_block *sb, int type) @@ -1492,7 +1636,7 @@ static int f2fs_quota_off(struct super_block *sb, int type) f2fs_quota_sync(sb, type); err = dquot_quota_off(sb, type); - if (err) + if (err || f2fs_sb_has_quota_ino(sb)) goto out_put; inode_lock(inode); @@ -1660,7 +1804,7 @@ static loff_t max_file_blocks(void) /* * note: previously, result is equal to (DEF_ADDRS_PER_INODE - - * F2FS_INLINE_XATTR_ADDRS), but now f2fs try to reserve more + * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more * space in inode.i_addr, it will be more safe to reassign * result as zero. */ @@ -1969,6 +2113,9 @@ static void init_sb_info(struct f2fs_sb_info *sbi) for (j = HOT; j < NR_TEMP_TYPE; j++) mutex_init(&sbi->wio_mutex[i][j]); spin_lock_init(&sbi->cp_lock); + + sbi->dirty_device = 0; + spin_lock_init(&sbi->dev_lock); } static int init_percpu_info(struct f2fs_sb_info *sbi) @@ -2323,7 +2470,10 @@ try_onemore: #ifdef CONFIG_QUOTA sb->dq_op = &f2fs_quota_operations; - sb->s_qcop = &f2fs_quotactl_ops; + if (f2fs_sb_has_quota_ino(sb)) + sb->s_qcop = &dquot_quotactl_sysfile_ops; + else + sb->s_qcop = &f2fs_quotactl_ops; sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; #endif @@ -2419,6 +2569,7 @@ try_onemore: le64_to_cpu(sbi->ckpt->valid_block_count); sbi->last_valid_block_count = sbi->total_valid_block_count; sbi->reserved_blocks = 0; + sbi->current_reserved_blocks = 0; for (i = 0; i < NR_INODE_TYPE; i++) { INIT_LIST_HEAD(&sbi->inode_list[i]); @@ -2493,10 +2644,24 @@ try_onemore: if (err) goto free_root_inode; +#ifdef CONFIG_QUOTA + /* + * Turn on quotas which were not enabled for read-only mounts if + * filesystem has quota feature, so that they are updated correctly. + */ + if (f2fs_sb_has_quota_ino(sb) && !sb_rdonly(sb)) { + err = f2fs_enable_quotas(sb); + if (err) { + f2fs_msg(sb, KERN_ERR, + "Cannot turn on quotas: error %d", err); + goto free_sysfs; + } + } +#endif /* if there are nt orphan nodes free them */ err = recover_orphan_inodes(sbi); if (err) - goto free_sysfs; + goto free_meta; /* recover fsynced data */ if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) { @@ -2530,7 +2695,7 @@ try_onemore: err = -EINVAL; f2fs_msg(sb, KERN_ERR, "Need to recover fsync data"); - goto free_sysfs; + goto free_meta; } } skip_recovery: @@ -2564,6 +2729,10 @@ skip_recovery: return 0; free_meta: +#ifdef CONFIG_QUOTA + if (f2fs_sb_has_quota_ino(sb) && !sb_rdonly(sb)) + f2fs_quota_off_umount(sbi->sb); +#endif f2fs_sync_inode_meta(sbi); /* * Some dirty meta pages can be produced by recover_orphan_inodes() @@ -2572,7 +2741,9 @@ free_meta: * falls into an infinite loop in sync_meta_pages(). */ truncate_inode_pages_final(META_MAPPING(sbi)); +#ifdef CONFIG_QUOTA free_sysfs: +#endif f2fs_unregister_sysfs(sbi); free_root_inode: dput(sb->s_root); diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index e2c258f717cd..9835348b6e5d 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -30,7 +30,7 @@ enum { FAULT_INFO_RATE, /* struct f2fs_fault_info */ FAULT_INFO_TYPE, /* struct f2fs_fault_info */ #endif - RESERVED_BLOCKS, + RESERVED_BLOCKS, /* struct f2fs_sb_info */ }; struct f2fs_attr { @@ -63,6 +63,13 @@ static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type) return NULL; } +static ssize_t dirty_segments_show(struct f2fs_attr *a, + struct f2fs_sb_info *sbi, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%llu\n", + (unsigned long long)(dirty_segments(sbi))); +} + static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a, struct f2fs_sb_info *sbi, char *buf) { @@ -100,10 +107,22 @@ static ssize_t features_show(struct f2fs_attr *a, if (f2fs_sb_has_inode_chksum(sb)) len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", len ? ", " : "", "inode_checksum"); + if (f2fs_sb_has_flexible_inline_xattr(sb)) + len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", + len ? ", " : "", "flexible_inline_xattr"); + if (f2fs_sb_has_quota_ino(sb)) + len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", + len ? ", " : "", "quota_ino"); len += snprintf(buf + len, PAGE_SIZE - len, "\n"); return len; } +static ssize_t current_reserved_blocks_show(struct f2fs_attr *a, + struct f2fs_sb_info *sbi, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", sbi->current_reserved_blocks); +} + static ssize_t f2fs_sbi_show(struct f2fs_attr *a, struct f2fs_sb_info *sbi, char *buf) { @@ -143,34 +162,22 @@ static ssize_t f2fs_sbi_store(struct f2fs_attr *a, #endif if (a->struct_type == RESERVED_BLOCKS) { spin_lock(&sbi->stat_lock); - if ((unsigned long)sbi->total_valid_block_count + t > - (unsigned long)sbi->user_block_count) { + if (t > (unsigned long)sbi->user_block_count) { spin_unlock(&sbi->stat_lock); return -EINVAL; } *ui = t; + sbi->current_reserved_blocks = min(sbi->reserved_blocks, + sbi->user_block_count - valid_user_blocks(sbi)); spin_unlock(&sbi->stat_lock); return count; } if (!strcmp(a->attr.name, "discard_granularity")) { - struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; - int i; - if (t == 0 || t > MAX_PLIST_NUM) return -EINVAL; if (t == *ui) return count; - - mutex_lock(&dcc->cmd_lock); - for (i = 0; i < MAX_PLIST_NUM; i++) { - if (i >= t - 1) - dcc->pend_list_tag[i] |= P_ACTIVE; - else - dcc->pend_list_tag[i] &= (~P_ACTIVE); - } - mutex_unlock(&dcc->cmd_lock); - *ui = t; return count; } @@ -222,6 +229,8 @@ enum feat_id { FEAT_EXTRA_ATTR, FEAT_PROJECT_QUOTA, FEAT_INODE_CHECKSUM, + FEAT_FLEXIBLE_INLINE_XATTR, + FEAT_QUOTA_INO, }; static ssize_t f2fs_feature_show(struct f2fs_attr *a, @@ -234,6 +243,8 @@ static ssize_t f2fs_feature_show(struct f2fs_attr *a, case FEAT_EXTRA_ATTR: case FEAT_PROJECT_QUOTA: case FEAT_INODE_CHECKSUM: + case FEAT_FLEXIBLE_INLINE_XATTR: + case FEAT_QUOTA_INO: return snprintf(buf, PAGE_SIZE, "supported\n"); } return 0; @@ -279,6 +290,7 @@ F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_hot_blocks, min_hot_blocks); +F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ssr_sections, min_ssr_sections); F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh); F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages); F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, dirty_nats_ratio, dirty_nats_ratio); @@ -291,8 +303,10 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_enable, iostat_enable); F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate); F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type); #endif +F2FS_GENERAL_RO_ATTR(dirty_segments); F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes); F2FS_GENERAL_RO_ATTR(features); +F2FS_GENERAL_RO_ATTR(current_reserved_blocks); #ifdef CONFIG_F2FS_FS_ENCRYPTION F2FS_FEATURE_RO_ATTR(encryption, FEAT_CRYPTO); @@ -304,6 +318,8 @@ F2FS_FEATURE_RO_ATTR(atomic_write, FEAT_ATOMIC_WRITE); F2FS_FEATURE_RO_ATTR(extra_attr, FEAT_EXTRA_ATTR); F2FS_FEATURE_RO_ATTR(project_quota, FEAT_PROJECT_QUOTA); F2FS_FEATURE_RO_ATTR(inode_checksum, FEAT_INODE_CHECKSUM); +F2FS_FEATURE_RO_ATTR(flexible_inline_xattr, FEAT_FLEXIBLE_INLINE_XATTR); +F2FS_FEATURE_RO_ATTR(quota_ino, FEAT_QUOTA_INO); #define ATTR_LIST(name) (&f2fs_attr_##name.attr) static struct attribute *f2fs_attrs[] = { @@ -321,6 +337,7 @@ static struct attribute *f2fs_attrs[] = { ATTR_LIST(min_ipu_util), ATTR_LIST(min_fsync_blocks), ATTR_LIST(min_hot_blocks), + ATTR_LIST(min_ssr_sections), ATTR_LIST(max_victim_search), ATTR_LIST(dir_level), ATTR_LIST(ram_thresh), @@ -333,9 +350,11 @@ static struct attribute *f2fs_attrs[] = { ATTR_LIST(inject_rate), ATTR_LIST(inject_type), #endif + ATTR_LIST(dirty_segments), ATTR_LIST(lifetime_write_kbytes), ATTR_LIST(features), ATTR_LIST(reserved_blocks), + ATTR_LIST(current_reserved_blocks), NULL, }; @@ -350,6 +369,8 @@ static struct attribute *f2fs_feat_attrs[] = { ATTR_LIST(extra_attr), ATTR_LIST(project_quota), ATTR_LIST(inode_checksum), + ATTR_LIST(flexible_inline_xattr), + ATTR_LIST(quota_ino), NULL, }; diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c index ab658419552b..7acf56ebda65 100644 --- a/fs/f2fs/xattr.c +++ b/fs/f2fs/xattr.c @@ -264,12 +264,12 @@ static struct f2fs_xattr_entry *__find_xattr(void *base_addr, int index, return entry; } -static struct f2fs_xattr_entry *__find_inline_xattr(void *base_addr, - void **last_addr, int index, - size_t len, const char *name) +static struct f2fs_xattr_entry *__find_inline_xattr(struct inode *inode, + void *base_addr, void **last_addr, int index, + size_t len, const char *name) { struct f2fs_xattr_entry *entry; - unsigned int inline_size = F2FS_INLINE_XATTR_ADDRS << 2; + unsigned int inline_size = inline_xattr_size(inode); list_for_each_xattr(entry, base_addr) { if ((void *)entry + sizeof(__u32) > base_addr + inline_size || @@ -288,12 +288,54 @@ static struct f2fs_xattr_entry *__find_inline_xattr(void *base_addr, return entry; } +static int read_inline_xattr(struct inode *inode, struct page *ipage, + void *txattr_addr) +{ + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + unsigned int inline_size = inline_xattr_size(inode); + struct page *page = NULL; + void *inline_addr; + + if (ipage) { + inline_addr = inline_xattr_addr(inode, ipage); + } else { + page = get_node_page(sbi, inode->i_ino); + if (IS_ERR(page)) + return PTR_ERR(page); + + inline_addr = inline_xattr_addr(inode, page); + } + memcpy(txattr_addr, inline_addr, inline_size); + f2fs_put_page(page, 1); + + return 0; +} + +static int read_xattr_block(struct inode *inode, void *txattr_addr) +{ + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + nid_t xnid = F2FS_I(inode)->i_xattr_nid; + unsigned int inline_size = inline_xattr_size(inode); + struct page *xpage; + void *xattr_addr; + + /* The inode already has an extended attribute block. */ + xpage = get_node_page(sbi, xnid); + if (IS_ERR(xpage)) + return PTR_ERR(xpage); + + xattr_addr = page_address(xpage); + memcpy(txattr_addr + inline_size, xattr_addr, VALID_XATTR_BLOCK_SIZE); + f2fs_put_page(xpage, 1); + + return 0; +} + static int lookup_all_xattrs(struct inode *inode, struct page *ipage, unsigned int index, unsigned int len, const char *name, struct f2fs_xattr_entry **xe, void **base_addr) { - struct f2fs_sb_info *sbi = F2FS_I_SB(inode); void *cur_addr, *txattr_addr, *last_addr = NULL; nid_t xnid = F2FS_I(inode)->i_xattr_nid; unsigned int size = xnid ? VALID_XATTR_BLOCK_SIZE : 0; @@ -310,23 +352,11 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage, /* read from inline xattr */ if (inline_size) { - struct page *page = NULL; - void *inline_addr; + err = read_inline_xattr(inode, ipage, txattr_addr); + if (err) + goto out; - if (ipage) { - inline_addr = inline_xattr_addr(ipage); - } else { - page = get_node_page(sbi, inode->i_ino); - if (IS_ERR(page)) { - err = PTR_ERR(page); - goto out; - } - inline_addr = inline_xattr_addr(page); - } - memcpy(txattr_addr, inline_addr, inline_size); - f2fs_put_page(page, 1); - - *xe = __find_inline_xattr(txattr_addr, &last_addr, + *xe = __find_inline_xattr(inode, txattr_addr, &last_addr, index, len, name); if (*xe) goto check; @@ -334,19 +364,9 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage, /* read from xattr node block */ if (xnid) { - struct page *xpage; - void *xattr_addr; - - /* The inode already has an extended attribute block. */ - xpage = get_node_page(sbi, xnid); - if (IS_ERR(xpage)) { - err = PTR_ERR(xpage); + err = read_xattr_block(inode, txattr_addr); + if (err) goto out; - } - - xattr_addr = page_address(xpage); - memcpy(txattr_addr + inline_size, xattr_addr, size); - f2fs_put_page(xpage, 1); } if (last_addr) @@ -371,7 +391,6 @@ out: static int read_all_xattrs(struct inode *inode, struct page *ipage, void **base_addr) { - struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_xattr_header *header; nid_t xnid = F2FS_I(inode)->i_xattr_nid; unsigned int size = VALID_XATTR_BLOCK_SIZE; @@ -386,38 +405,16 @@ static int read_all_xattrs(struct inode *inode, struct page *ipage, /* read from inline xattr */ if (inline_size) { - struct page *page = NULL; - void *inline_addr; - - if (ipage) { - inline_addr = inline_xattr_addr(ipage); - } else { - page = get_node_page(sbi, inode->i_ino); - if (IS_ERR(page)) { - err = PTR_ERR(page); - goto fail; - } - inline_addr = inline_xattr_addr(page); - } - memcpy(txattr_addr, inline_addr, inline_size); - f2fs_put_page(page, 1); + err = read_inline_xattr(inode, ipage, txattr_addr); + if (err) + goto fail; } /* read from xattr node block */ if (xnid) { - struct page *xpage; - void *xattr_addr; - - /* The inode already has an extended attribute block. */ - xpage = get_node_page(sbi, xnid); - if (IS_ERR(xpage)) { - err = PTR_ERR(xpage); + err = read_xattr_block(inode, txattr_addr); + if (err) goto fail; - } - - xattr_addr = page_address(xpage); - memcpy(txattr_addr + inline_size, xattr_addr, size); - f2fs_put_page(xpage, 1); } header = XATTR_HDR(txattr_addr); @@ -439,10 +436,12 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize, { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); size_t inline_size = inline_xattr_size(inode); + struct page *in_page = NULL; void *xattr_addr; + void *inline_addr = NULL; struct page *xpage; nid_t new_nid = 0; - int err; + int err = 0; if (hsize > inline_size && !F2FS_I(inode)->i_xattr_nid) if (!alloc_nid(sbi, &new_nid)) @@ -450,30 +449,30 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize, /* write to inline xattr */ if (inline_size) { - struct page *page = NULL; - void *inline_addr; - if (ipage) { - inline_addr = inline_xattr_addr(ipage); - f2fs_wait_on_page_writeback(ipage, NODE, true); - set_page_dirty(ipage); + inline_addr = inline_xattr_addr(inode, ipage); } else { - page = get_node_page(sbi, inode->i_ino); - if (IS_ERR(page)) { + in_page = get_node_page(sbi, inode->i_ino); + if (IS_ERR(in_page)) { alloc_nid_failed(sbi, new_nid); - return PTR_ERR(page); + return PTR_ERR(in_page); } - inline_addr = inline_xattr_addr(page); - f2fs_wait_on_page_writeback(page, NODE, true); + inline_addr = inline_xattr_addr(inode, in_page); } - memcpy(inline_addr, txattr_addr, inline_size); - f2fs_put_page(page, 1); + f2fs_wait_on_page_writeback(ipage ? ipage : in_page, + NODE, true); /* no need to use xattr node block */ if (hsize <= inline_size) { - err = truncate_xattr_node(inode, ipage); + err = truncate_xattr_node(inode); alloc_nid_failed(sbi, new_nid); - return err; + if (err) { + f2fs_put_page(in_page, 1); + return err; + } + memcpy(inline_addr, txattr_addr, inline_size); + set_page_dirty(ipage ? ipage : in_page); + goto in_page_out; } } @@ -482,7 +481,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize, xpage = get_node_page(sbi, F2FS_I(inode)->i_xattr_nid); if (IS_ERR(xpage)) { alloc_nid_failed(sbi, new_nid); - return PTR_ERR(xpage); + goto in_page_out; } f2fs_bug_on(sbi, new_nid); f2fs_wait_on_page_writeback(xpage, NODE, true); @@ -492,17 +491,24 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize, xpage = new_node_page(&dn, XATTR_NODE_OFFSET); if (IS_ERR(xpage)) { alloc_nid_failed(sbi, new_nid); - return PTR_ERR(xpage); + goto in_page_out; } alloc_nid_done(sbi, new_nid); } - xattr_addr = page_address(xpage); - memcpy(xattr_addr, txattr_addr + inline_size, VALID_XATTR_BLOCK_SIZE); - set_page_dirty(xpage); - f2fs_put_page(xpage, 1); - return 0; + if (inline_size) + memcpy(inline_addr, txattr_addr, inline_size); + memcpy(xattr_addr, txattr_addr + inline_size, VALID_XATTR_BLOCK_SIZE); + + if (inline_size) + set_page_dirty(ipage ? ipage : in_page); + set_page_dirty(xpage); + + f2fs_put_page(xpage, 1); +in_page_out: + f2fs_put_page(in_page, 1); + return err; } int f2fs_getxattr(struct inode *inode, int index, const char *name, @@ -721,6 +727,10 @@ int f2fs_setxattr(struct inode *inode, int index, const char *name, struct f2fs_sb_info *sbi = F2FS_I_SB(inode); int err; + err = dquot_initialize(inode); + if (err) + return err; + /* this case is only from init_inode_metadata */ if (ipage) return __f2fs_setxattr(inode, index, name, value, diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index c2a975e4a711..fef1caeddf54 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -36,6 +36,8 @@ #define F2FS_NODE_INO(sbi) (sbi->node_ino_num) #define F2FS_META_INO(sbi) (sbi->meta_ino_num) +#define F2FS_MAX_QUOTAS 3 + #define F2FS_IO_SIZE(sbi) (1 << (sbi)->write_io_size_bits) /* Blocks */ #define F2FS_IO_SIZE_KB(sbi) (1 << ((sbi)->write_io_size_bits + 2)) /* KB */ #define F2FS_IO_SIZE_BYTES(sbi) (1 << ((sbi)->write_io_size_bits + 12)) /* B */ @@ -108,7 +110,8 @@ struct f2fs_super_block { __u8 encryption_level; /* versioning level for encryption */ __u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */ struct f2fs_device devs[MAX_DEVICES]; /* device list */ - __u8 reserved[327]; /* valid reserved region */ + __le32 qf_ino[F2FS_MAX_QUOTAS]; /* quota inode numbers */ + __u8 reserved[315]; /* valid reserved region */ } __packed; /* @@ -184,7 +187,8 @@ struct f2fs_extent { } __packed; #define F2FS_NAME_LEN 255 -#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */ +/* 200 bytes for inline xattrs by default */ +#define DEFAULT_INLINE_XATTR_ADDRS 50 #define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */ #define CUR_ADDRS_PER_INODE(inode) (DEF_ADDRS_PER_INODE - \ get_extra_isize(inode)) @@ -238,7 +242,7 @@ struct f2fs_inode { union { struct { __le16 i_extra_isize; /* extra inode attribute size */ - __le16 i_padding; /* padding */ + __le16 i_inline_xattr_size; /* inline xattr size, unit: 4 bytes */ __le32 i_projid; /* project id */ __le32 i_inode_checksum;/* inode meta checksum */ __le32 i_extra_end[0]; /* for attribute size calculation */ diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 7063bbcca03b..589df6f73789 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -128,6 +128,18 @@ TRACE_DEFINE_ENUM(CP_TRIMMED); { CP_DISCARD, "Discard" }, \ { CP_UMOUNT | CP_TRIMMED, "Umount,Trimmed" }) +#define show_fsync_cpreason(type) \ + __print_symbolic(type, \ + { CP_NO_NEEDED, "no needed" }, \ + { CP_NON_REGULAR, "non regular" }, \ + { CP_HARDLINK, "hardlink" }, \ + { CP_SB_NEED_CP, "sb needs cp" }, \ + { CP_WRONG_PINO, "wrong pino" }, \ + { CP_NO_SPC_ROLL, "no space roll forward" }, \ + { CP_NODE_NEED_CP, "node needs cp" }, \ + { CP_FASTBOOT_MODE, "fastboot mode" }, \ + { CP_SPEC_LOG_NUM, "log type is 2" }) + struct victim_sel_policy; struct f2fs_map_blocks; @@ -202,14 +214,14 @@ DEFINE_EVENT(f2fs__inode, f2fs_sync_file_enter, TRACE_EVENT(f2fs_sync_file_exit, - TP_PROTO(struct inode *inode, int need_cp, int datasync, int ret), + TP_PROTO(struct inode *inode, int cp_reason, int datasync, int ret), - TP_ARGS(inode, need_cp, datasync, ret), + TP_ARGS(inode, cp_reason, datasync, ret), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) - __field(int, need_cp) + __field(int, cp_reason) __field(int, datasync) __field(int, ret) ), @@ -217,15 +229,15 @@ TRACE_EVENT(f2fs_sync_file_exit, TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; - __entry->need_cp = need_cp; + __entry->cp_reason = cp_reason; __entry->datasync = datasync; __entry->ret = ret; ), - TP_printk("dev = (%d,%d), ino = %lu, checkpoint is %s, " + TP_printk("dev = (%d,%d), ino = %lu, cp_reason: %s, " "datasync = %d, ret = %d", show_dev_ino(__entry), - __entry->need_cp ? "needed" : "not needed", + show_fsync_cpreason(__entry->cp_reason), __entry->datasync, __entry->ret) ); @@ -716,6 +728,91 @@ TRACE_EVENT(f2fs_get_victim, __entry->free) ); +TRACE_EVENT(f2fs_lookup_start, + + TP_PROTO(struct inode *dir, struct dentry *dentry, unsigned int flags), + + TP_ARGS(dir, dentry, flags), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(const char *, name) + __field(unsigned int, flags) + ), + + TP_fast_assign( + __entry->dev = dir->i_sb->s_dev; + __entry->ino = dir->i_ino; + __entry->name = dentry->d_name.name; + __entry->flags = flags; + ), + + TP_printk("dev = (%d,%d), pino = %lu, name:%s, flags:%u", + show_dev_ino(__entry), + __entry->name, + __entry->flags) +); + +TRACE_EVENT(f2fs_lookup_end, + + TP_PROTO(struct inode *dir, struct dentry *dentry, nid_t ino, + int err), + + TP_ARGS(dir, dentry, ino, err), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(const char *, name) + __field(nid_t, cino) + __field(int, err) + ), + + TP_fast_assign( + __entry->dev = dir->i_sb->s_dev; + __entry->ino = dir->i_ino; + __entry->name = dentry->d_name.name; + __entry->cino = ino; + __entry->err = err; + ), + + TP_printk("dev = (%d,%d), pino = %lu, name:%s, ino:%u, err:%d", + show_dev_ino(__entry), + __entry->name, + __entry->cino, + __entry->err) +); + +TRACE_EVENT(f2fs_readdir, + + TP_PROTO(struct inode *dir, loff_t start_pos, loff_t end_pos, int err), + + TP_ARGS(dir, start_pos, end_pos, err), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(loff_t, start) + __field(loff_t, end) + __field(int, err) + ), + + TP_fast_assign( + __entry->dev = dir->i_sb->s_dev; + __entry->ino = dir->i_ino; + __entry->start = start_pos; + __entry->end = end_pos; + __entry->err = err; + ), + + TP_printk("dev = (%d,%d), ino = %lu, start_pos:%llu, end_pos:%llu, err:%d", + show_dev_ino(__entry), + __entry->start, + __entry->end, + __entry->err) +); + TRACE_EVENT(f2fs_fallocate, TP_PROTO(struct inode *inode, int mode, @@ -1274,6 +1371,13 @@ DEFINE_EVENT(f2fs_discard, f2fs_issue_discard, TP_ARGS(dev, blkstart, blklen) ); +DEFINE_EVENT(f2fs_discard, f2fs_remove_discard, + + TP_PROTO(struct block_device *dev, block_t blkstart, block_t blklen), + + TP_ARGS(dev, blkstart, blklen) +); + TRACE_EVENT(f2fs_issue_reset_zone, TP_PROTO(struct block_device *dev, block_t blkstart), From 67a8ab4adc2d30e3f3442a0659b887b4da83f7c2 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 30 Oct 2017 21:23:19 +0000 Subject: [PATCH 174/312] UPSTREAM: arm64: vdso: fix clock_getres for 4GiB-aligned res (cherry pick from commit c80ed088a519da53f27b798a69748eaabc66aadf) The vdso tries to check for a NULL res pointer in __kernel_clock_getres, but only checks the lower 32 bits as is uses CBZ on the W register the res pointer is held in. Thus, if the res pointer happened to be aligned to a 4GiB boundary, we'd spuriously skip storing the timespec to it, while returning a zero error code to the caller. Prevent this by checking the whole pointer, using CBZ on the X register the res pointer is held in. Fixes: 9031fefde6f2ac1d ("arm64: VDSO support") Signed-off-by: Mark Rutland Reported-by: Andrew Pinski Reported-by: Mark Salyzyn Cc: Catalin Marinas Cc: Will Deacon Signed-off-by: Will Deacon Bug: 20045882 Bug: 63737556 Change-Id: Iab5449d8515f9d655e792e3d7ce43a8f016fa2a0 --- arch/arm64/kernel/vdso/gettimeofday.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S index e00b4671bd7c..c97ce91cf023 100644 --- a/arch/arm64/kernel/vdso/gettimeofday.S +++ b/arch/arm64/kernel/vdso/gettimeofday.S @@ -310,7 +310,7 @@ ENTRY(__kernel_clock_getres) b.ne 4f ldr x2, 6f 2: - cbz w1, 3f + cbz x1, 3f stp xzr, x2, [x1] 3: /* res == NULL. */ From 28850c79d071b4eef0001bb76af2cfb29402f4ac Mon Sep 17 00:00:00 2001 From: John Stultz Date: Thu, 8 Jun 2017 16:44:21 -0700 Subject: [PATCH 175/312] BACKPORT: time: Fix CLOCK_MONOTONIC_RAW sub-nanosecond accounting (cherry pick from commit 3d88d56c5873f6eebe23e05c3da701960146b801) Due to how the MONOTONIC_RAW accumulation logic was handled, there is the potential for a 1ns discontinuity when we do accumulations. This small discontinuity has for the most part gone un-noticed, but since ARM64 enabled CLOCK_MONOTONIC_RAW in their vDSO clock_gettime implementation, we've seen failures with the inconsistency-check test in kselftest. This patch addresses the issue by using the same sub-ns accumulation handling that CLOCK_MONOTONIC uses, which avoids the issue for in-kernel users. Since the ARM64 vDSO implementation has its own clock_gettime calculation logic, this patch reduces the frequency of errors, but failures are still seen. The ARM64 vDSO will need to be updated to include the sub-nanosecond xtime_nsec values in its calculation for this issue to be completely fixed. Signed-off-by: John Stultz Tested-by: Daniel Mentz Cc: Prarit Bhargava Cc: Kevin Brodsky Cc: Richard Cochran Cc: Stephen Boyd Cc: Will Deacon Cc: "stable #4 . 8+" Cc: Miroslav Lichvar Link: http://lkml.kernel.org/r/1496965462-20003-3-git-send-email-john.stultz@linaro.org Signed-off-by: Thomas Gleixner Bug: 20045882 Bug: 63737556 Change-Id: I6c55dd7685f6bd212c6af9d09c527528e1dd5fa1 --- include/linux/timekeeper_internal.h | 4 ++-- kernel/time/timekeeping.c | 20 ++++++++++---------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index f0f1793cfa49..115216ec7cfe 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -56,7 +56,7 @@ struct tk_read_base { * interval. * @xtime_remainder: Shifted nano seconds left over when rounding * @cycle_interval - * @raw_interval: Raw nano seconds accumulated per NTP interval. + * @raw_interval: Shifted raw nano seconds accumulated per NTP interval. * @ntp_error: Difference between accumulated time and NTP time in ntp * shifted nano seconds. * @ntp_error_shift: Shift conversion between clock shifted nano seconds and @@ -97,7 +97,7 @@ struct timekeeper { cycle_t cycle_interval; u64 xtime_interval; s64 xtime_remainder; - u32 raw_interval; + u64 raw_interval; /* The ntp_tick_length() value currently being used. * This cached copy ensures we consistently apply the tick * length for an entire tick, as ntp_tick_length may change diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 738f3467d169..5a514e6002d2 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -277,8 +277,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) /* Go back from cycles -> shifted ns */ tk->xtime_interval = (u64) interval * clock->mult; tk->xtime_remainder = ntpinterval - tk->xtime_interval; - tk->raw_interval = - ((u64) interval * clock->mult) >> clock->shift; + tk->raw_interval = interval * clock->mult; /* if changing clocks, convert xtime_nsec shift units */ if (old_clock) { @@ -1796,7 +1795,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, unsigned int *clock_set) { cycle_t interval = tk->cycle_interval << shift; - u64 raw_nsecs; + u64 snsec_per_sec; /* If the offset is smaller than a shifted interval, do nothing */ if (offset < interval) @@ -1811,14 +1810,15 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, *clock_set |= accumulate_nsecs_to_secs(tk); /* Accumulate raw time */ - raw_nsecs = (u64)tk->raw_interval << shift; - raw_nsecs += tk->raw_time.tv_nsec; - if (raw_nsecs >= NSEC_PER_SEC) { - u64 raw_secs = raw_nsecs; - raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); - tk->raw_time.tv_sec += raw_secs; + tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift; + tk->tkr_raw.xtime_nsec += tk->raw_interval << shift; + snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift; + while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) { + tk->tkr_raw.xtime_nsec -= snsec_per_sec; + tk->raw_time.tv_sec++; } - tk->raw_time.tv_nsec = raw_nsecs; + tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift; + tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift; /* Accumulate error between NTP and clock interval */ tk->ntp_error += tk->ntp_tick << shift; From 1d35c0438678c7ad4c367135082685d5754eed20 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Mon, 22 May 2017 17:20:20 -0700 Subject: [PATCH 176/312] BACKPORT: time: Clean up CLOCK_MONOTONIC_RAW time handling (cherry pick from commit fc6eead7c1e2e5376c25d2795d4539fdacbc0648) Now that we fixed the sub-ns handling for CLOCK_MONOTONIC_RAW, remove the duplicitive tk->raw_time.tv_nsec, which can be stored in tk->tkr_raw.xtime_nsec (similarly to how its handled for monotonic time). Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Miroslav Lichvar Cc: Richard Cochran Cc: Prarit Bhargava Cc: Stephen Boyd Cc: Kevin Brodsky Cc: Will Deacon Cc: Daniel Mentz Tested-by: Daniel Mentz Signed-off-by: John Stultz Bug: 20045882 Bug: 63737556 Change-Id: I243827d21b08703a09d2d2fe738a9258be224582 --- arch/arm64/kernel/vdso.c | 4 +-- include/linux/timekeeper_internal.h | 4 +-- kernel/time/timekeeping.c | 45 ++++++++++++++++------------- 3 files changed, 29 insertions(+), 24 deletions(-) diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index b7730ba782dd..7e9dd94452bb 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -218,8 +218,8 @@ void update_vsyscall(struct timekeeper *tk) if (!use_syscall) { /* tkr_mono.cycle_last == tkr_raw.cycle_last */ vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; - vdso_data->raw_time_sec = tk->raw_time.tv_sec; - vdso_data->raw_time_nsec = tk->raw_time.tv_nsec; + vdso_data->raw_time_sec = tk->raw_sec; + vdso_data->raw_time_nsec = tk->tkr_raw.xtime_nsec; vdso_data->xtime_clock_sec = tk->xtime_sec; vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec; /* tkr_raw.xtime_nsec == 0 */ diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 115216ec7cfe..3a5af09af18b 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -50,7 +50,7 @@ struct tk_read_base { * @tai_offset: The current UTC to TAI offset in seconds * @clock_was_set_seq: The sequence number of clock was set events * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second - * @raw_time: Monotonic raw base time in timespec64 format + * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds * @cycle_interval: Number of clock cycles in one NTP interval * @xtime_interval: Number of clock shifted nano seconds in one NTP * interval. @@ -91,7 +91,7 @@ struct timekeeper { s32 tai_offset; unsigned int clock_was_set_seq; ktime_t next_leap_ktime; - struct timespec64 raw_time; + u64 raw_sec; /* The following members are for timekeeping internal use */ cycle_t cycle_interval; diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 5a514e6002d2..fc86fdcce932 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -70,6 +70,10 @@ static inline void tk_normalize_xtime(struct timekeeper *tk) tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift; tk->xtime_sec++; } + while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) { + tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift; + tk->raw_sec++; + } } static inline struct timespec64 tk_xtime(struct timekeeper *tk) @@ -282,12 +286,14 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) /* if changing clocks, convert xtime_nsec shift units */ if (old_clock) { int shift_change = clock->shift - old_clock->shift; - if (shift_change < 0) + if (shift_change < 0) { tk->tkr_mono.xtime_nsec >>= -shift_change; - else + tk->tkr_raw.xtime_nsec >>= -shift_change; + } else { tk->tkr_mono.xtime_nsec <<= shift_change; + tk->tkr_raw.xtime_nsec <<= shift_change; + } } - tk->tkr_raw.xtime_nsec = 0; tk->tkr_mono.shift = clock->shift; tk->tkr_raw.shift = clock->shift; @@ -616,9 +622,6 @@ static inline void tk_update_ktime_data(struct timekeeper *tk) nsec = (u32) tk->wall_to_monotonic.tv_nsec; tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec); - /* Update the monotonic raw base */ - tk->tkr_raw.base = timespec64_to_ktime(tk->raw_time); - /* * The sum of the nanoseconds portions of xtime and * wall_to_monotonic can be greater/equal one second. Take @@ -628,6 +631,11 @@ static inline void tk_update_ktime_data(struct timekeeper *tk) if (nsec >= NSEC_PER_SEC) seconds++; tk->ktime_sec = seconds; + + /* Update the monotonic raw base */ + seconds = tk->raw_sec; + nsec = (u32)(tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift); + tk->tkr_raw.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec); } /* must hold timekeeper_lock */ @@ -669,7 +677,6 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action) static void timekeeping_forward_now(struct timekeeper *tk) { cycle_t cycle_now, delta; - s64 nsec; cycle_now = tk_clock_read(&tk->tkr_mono); delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask); @@ -681,10 +688,13 @@ static void timekeeping_forward_now(struct timekeeper *tk) /* If arch requires, add in get_arch_timeoffset() */ tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift; - tk_normalize_xtime(tk); - nsec = clocksource_cyc2ns(delta, tk->tkr_raw.mult, tk->tkr_raw.shift); - timespec64_add_ns(&tk->raw_time, nsec); + tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult; + + /* If arch requires, add in get_arch_timeoffset() */ + tk->tkr_raw.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_raw.shift; + + tk_normalize_xtime(tk); } /** @@ -1178,19 +1188,18 @@ int timekeeping_notify(struct clocksource *clock) void getrawmonotonic64(struct timespec64 *ts) { struct timekeeper *tk = &tk_core.timekeeper; - struct timespec64 ts64; unsigned long seq; s64 nsecs; do { seq = read_seqcount_begin(&tk_core.seq); + ts->tv_sec = tk->raw_sec; nsecs = timekeeping_get_ns(&tk->tkr_raw); - ts64 = tk->raw_time; } while (read_seqcount_retry(&tk_core.seq, seq)); - timespec64_add_ns(&ts64, nsecs); - *ts = ts64; + ts->tv_nsec = 0; + timespec64_add_ns(ts, nsecs); } EXPORT_SYMBOL(getrawmonotonic64); @@ -1314,8 +1323,7 @@ void __init timekeeping_init(void) tk_setup_internals(tk, clock); tk_set_xtime(tk, &now); - tk->raw_time.tv_sec = 0; - tk->raw_time.tv_nsec = 0; + tk->raw_sec = 0; if (boot.tv_sec == 0 && boot.tv_nsec == 0) boot = tk_xtime(tk); @@ -1810,15 +1818,12 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, *clock_set |= accumulate_nsecs_to_secs(tk); /* Accumulate raw time */ - tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift; tk->tkr_raw.xtime_nsec += tk->raw_interval << shift; snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift; while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) { tk->tkr_raw.xtime_nsec -= snsec_per_sec; - tk->raw_time.tv_sec++; + tk->raw_sec++; } - tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift; - tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift; /* Accumulate error between NTP and clock interval */ tk->ntp_error += tk->ntp_tick << shift; From 7ddbe701076d4ff2ba15c1a111bd41681594819e Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 9 Nov 2017 12:29:34 +0100 Subject: [PATCH 177/312] s390: fix transactional execution control register handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit a1c5befc1c24eb9c1ee83f711e0f21ee79cbb556 upstream. Dan Horák reported the following crash related to transactional execution: User process fault: interruption code 0013 ilc:3 in libpthread-2.26.so[3ff93c00000+1b000] CPU: 2 PID: 1 Comm: /init Not tainted 4.13.4-300.fc27.s390x #1 Hardware name: IBM 2827 H43 400 (z/VM 6.4.0) task: 00000000fafc8000 task.stack: 00000000fafc4000 User PSW : 0705200180000000 000003ff93c14e70 R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:1 AS:0 CC:2 PM:0 RI:0 EA:3 User GPRS: 0000000000000077 000003ff00000000 000003ff93144d48 000003ff93144d5e 0000000000000000 0000000000000002 0000000000000000 000003ff00000000 0000000000000000 0000000000000418 0000000000000000 000003ffcc9fe770 000003ff93d28f50 000003ff9310acf0 000003ff92b0319a 000003ffcc9fe6d0 User Code: 000003ff93c14e62: 60e0b030 std %f14,48(%r11) 000003ff93c14e66: 60f0b038 std %f15,56(%r11) #000003ff93c14e6a: e5600000ff0e tbegin 0,65294 >000003ff93c14e70: a7740006 brc 7,3ff93c14e7c 000003ff93c14e74: a7080000 lhi %r0,0 000003ff93c14e78: a7f40023 brc 15,3ff93c14ebe 000003ff93c14e7c: b2220000 ipm %r0 000003ff93c14e80: 8800001c srl %r0,28 There are several bugs with control register handling with respect to transactional execution: - on task switch update_per_regs() is only called if the next task has an mm (is not a kernel thread). This however is incorrect. This breaks e.g. for user mode helper handling, where the kernel creates a kernel thread and then execve's a user space program. Control register contents related to transactional execution won't be updated on execve. If the previous task ran with transactional execution disabled then the new task will also run with transactional execution disabled, which is incorrect. Therefore call update_per_regs() unconditionally within switch_to(). - on startup the transactional execution facility is not enabled for the idle thread. This is not really a bug, but an inconsistency to other facilities. Therefore enable the facility if it is available. - on fork the new thread's per_flags field is not cleared. This means that a child process inherits the PER_FLAG_NO_TE flag. This flag can be set with a ptrace request to disable transactional execution for the current process. It should not be inherited by new child processes in order to be consistent with the handling of all other PER related debugging options. Therefore clear the per_flags field in copy_thread_tls(). Reported-and-tested-by: Dan Horák Fixes: d35339a42dd1 ("s390: add support for transactional memory") Cc: Martin Schwidefsky Reviewed-by: Christian Borntraeger Reviewed-by: Hendrik Brueckner Signed-off-by: Heiko Carstens Signed-off-by: Greg Kroah-Hartman --- arch/s390/include/asm/switch_to.h | 2 +- arch/s390/kernel/early.c | 4 +++- arch/s390/kernel/process.c | 1 + 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h index 12d45f0cfdd9..dde6b52359c5 100644 --- a/arch/s390/include/asm/switch_to.h +++ b/arch/s390/include/asm/switch_to.h @@ -34,8 +34,8 @@ static inline void restore_access_regs(unsigned int *acrs) save_access_regs(&prev->thread.acrs[0]); \ save_ri_cb(prev->thread.ri_cb); \ } \ + update_cr_regs(next); \ if (next->mm) { \ - update_cr_regs(next); \ set_cpu_flag(CIF_FPU); \ restore_access_regs(&next->thread.acrs[0]); \ restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 3c31609df959..ee7b8e7ca4f8 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -325,8 +325,10 @@ static __init void detect_machine_facilities(void) S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE; if (test_facility(40)) S390_lowcore.machine_flags |= MACHINE_FLAG_LPP; - if (test_facility(50) && test_facility(73)) + if (test_facility(50) && test_facility(73)) { S390_lowcore.machine_flags |= MACHINE_FLAG_TE; + __ctl_set_bit(0, 55); + } if (test_facility(51)) S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; if (test_facility(129)) { diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 114ee8b96f17..efa035a31b98 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -137,6 +137,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, memset(&p->thread.per_user, 0, sizeof(p->thread.per_user)); memset(&p->thread.per_event, 0, sizeof(p->thread.per_event)); clear_tsk_thread_flag(p, TIF_SINGLE_STEP); + p->thread.per_flags = 0; /* Initialize per thread user and system timer values */ ti = task_thread_info(p); ti->user_timer = 0; From 04bc7a273264ce3a528b97af72a6473bc4a13fd7 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 11 Sep 2017 11:24:22 +0200 Subject: [PATCH 178/312] s390/runtime instrumention: fix possible memory corruption commit d6e646ad7cfa7034d280459b2b2546288f247144 upstream. For PREEMPT enabled kernels the runtime instrumentation (RI) code contains a possible use-after-free bug. If a task that makes use of RI exits, it will execute do_exit() while still enabled for preemption. That function will call exit_thread_runtime_instr() via exit_thread(). If exit_thread_runtime_instr() gets preempted after the RI control block of the task has been freed but before the pointer to it is set to NULL, then save_ri_cb(), called from switch_to(), will write to already freed memory. Avoid this and simply disable preemption while freeing the control block and setting the pointer to NULL. Fixes: e4b8b3f33fca ("s390: add support for runtime instrumentation") Reviewed-by: Christian Borntraeger Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky Signed-off-by: Greg Kroah-Hartman --- arch/s390/kernel/runtime_instr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c index fffa0e5462af..70cdb03d4acd 100644 --- a/arch/s390/kernel/runtime_instr.c +++ b/arch/s390/kernel/runtime_instr.c @@ -47,11 +47,13 @@ void exit_thread_runtime_instr(void) { struct task_struct *task = current; + preempt_disable(); if (!task->thread.ri_cb) return; disable_runtime_instr(); kfree(task->thread.ri_cb); task->thread.ri_cb = NULL; + preempt_enable(); } SYSCALL_DEFINE1(s390_runtime_instr, int, command) @@ -62,9 +64,7 @@ SYSCALL_DEFINE1(s390_runtime_instr, int, command) return -EOPNOTSUPP; if (command == S390_RUNTIME_INSTR_STOP) { - preempt_disable(); exit_thread_runtime_instr(); - preempt_enable(); return 0; } From 4337fa2425f6b00ba0977444a12dc5439858cb38 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 26 Sep 2017 09:16:48 +0200 Subject: [PATCH 179/312] s390/disassembler: add missing end marker for e7 table commit 5c50538752af7968f53924b22dede8ed4ce4cb3b upstream. The e7 opcode table does not have an end marker. Hence when trying to find an unknown e7 instruction the code will access memory behind the table until it finds something that matches the opcode, or the kernel crashes, whatever comes first. This affects not only the in-kernel disassembler but also uprobes and kprobes which refuse to set a probe on unknown instructions, and therefore search the opcode tables to figure out if instructions are known or not. Fixes: 3585cb0280654 ("s390/disassembler: add vector instructions") Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky Signed-off-by: Greg Kroah-Hartman --- arch/s390/kernel/dis.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index 6e72961608f0..33357e959fed 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -1549,6 +1549,7 @@ static struct s390_insn opcode_e7[] = { { "vfsq", 0xce, INSTR_VRR_VV000MM }, { "vfs", 0xe2, INSTR_VRR_VVV00MM }, { "vftci", 0x4a, INSTR_VRI_VVIMM }, + { "", 0, INSTR_INVALID } }; static struct s390_insn opcode_eb[] = { From 5c2607d3e7cd8d1e4ee96fdc8d18d49a57f9500f Mon Sep 17 00:00:00 2001 From: Vasily Gorbik Date: Wed, 15 Nov 2017 14:15:36 +0100 Subject: [PATCH 180/312] s390/disassembler: increase show_code buffer size commit b192571d1ae375e0bbe0aa3ccfa1a3c3704454b9 upstream. Current buffer size of 64 is too small. objdump shows that there are instructions which would require up to 75 bytes buffer (with current formating). 128 bytes "ought to be enough for anybody". Also replaces 8 spaces with a single tab to reduce the memory footprint. Fixes the following KASAN finding: BUG: KASAN: stack-out-of-bounds in number+0x3fe/0x538 Write of size 1 at addr 000000005a4a75a0 by task bash/1282 CPU: 1 PID: 1282 Comm: bash Not tainted 4.14.0+ #215 Hardware name: IBM 2964 N96 702 (z/VM 6.4.0) Call Trace: ([<000000000011eeb6>] show_stack+0x56/0x88) [<0000000000e1ce1a>] dump_stack+0x15a/0x1b0 [<00000000004e2994>] print_address_description+0xf4/0x288 [<00000000004e2cf2>] kasan_report+0x13a/0x230 [<0000000000e38ae6>] number+0x3fe/0x538 [<0000000000e3dfe4>] vsnprintf+0x194/0x948 [<0000000000e3ea42>] sprintf+0xa2/0xb8 [<00000000001198dc>] print_insn+0x374/0x500 [<0000000000119346>] show_code+0x4ee/0x538 [<000000000011f234>] show_registers+0x34c/0x388 [<000000000011f2ae>] show_regs+0x3e/0xa8 [<000000000011f502>] die+0x1ea/0x2e8 [<0000000000138f0e>] do_no_context+0x106/0x168 [<0000000000139a1a>] do_protection_exception+0x4da/0x7d0 [<0000000000e55914>] pgm_check_handler+0x16c/0x1c0 [<000000000090639e>] sysrq_handle_crash+0x46/0x58 ([<0000000000000007>] 0x7) [<00000000009073fa>] __handle_sysrq+0x102/0x218 [<0000000000907c06>] write_sysrq_trigger+0xd6/0x100 [<000000000061d67a>] proc_reg_write+0xb2/0x128 [<0000000000520be6>] __vfs_write+0xee/0x368 [<0000000000521222>] vfs_write+0x21a/0x278 [<000000000052156a>] SyS_write+0xda/0x178 [<0000000000e555cc>] system_call+0xc4/0x270 The buggy address belongs to the page: page:000003d1016929c0 count:0 mapcount:0 mapping: (null) index:0x0 flags: 0x0() raw: 0000000000000000 0000000000000000 0000000000000000 ffffffff00000000 raw: 0000000000000100 0000000000000200 0000000000000000 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: 000000005a4a7480: 00 00 00 00 00 00 00 00 00 00 00 00 f1 f1 f1 f1 000000005a4a7500: 00 00 00 00 00 00 00 00 f2 f2 f2 f2 00 00 00 00 >000000005a4a7580: 00 00 00 00 f3 f3 f3 f3 00 00 00 00 00 00 00 00 ^ 000000005a4a7600: 00 00 00 00 00 00 00 00 00 00 f1 f1 f1 f1 f8 f8 000000005a4a7680: f2 f2 f2 f2 f2 f2 f8 f8 f2 f2 f3 f3 f3 f3 00 00 ================================================================== Signed-off-by: Vasily Gorbik Signed-off-by: Martin Schwidefsky Signed-off-by: Greg Kroah-Hartman --- arch/s390/kernel/dis.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index 33357e959fed..07477ba392b7 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -1962,7 +1962,7 @@ void show_code(struct pt_regs *regs) { char *mode = user_mode(regs) ? "User" : "Krnl"; unsigned char code[64]; - char buffer[64], *ptr; + char buffer[128], *ptr; mm_segment_t old_fs; unsigned long addr; int start, end, opsize, hops, i; @@ -2025,7 +2025,7 @@ void show_code(struct pt_regs *regs) start += opsize; printk(buffer); ptr = buffer; - ptr += sprintf(ptr, "\n "); + ptr += sprintf(ptr, "\n\t "); hops++; } printk("\n"); From 2417da3f4d6bc4fc6c77f613f0e2264090892aa5 Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Tue, 20 Jun 2017 11:42:27 -0700 Subject: [PATCH 181/312] ipv6: only call ip6_route_dev_notify() once for NETDEV_UNREGISTER commit 76da0704507bbc51875013f6557877ab308cfd0a upstream. In commit 242d3a49a2a1 ("ipv6: reorder ip6_route_dev_notifier after ipv6_dev_notf") I assumed NETDEV_REGISTER and NETDEV_UNREGISTER are paired, unfortunately, as reported by jeffy, netdev_wait_allrefs() could rebroadcast NETDEV_UNREGISTER event until all refs are gone. We have to add an additional check to avoid this corner case. For netdev_wait_allrefs() dev->reg_state is NETREG_UNREGISTERED, for dev_change_net_namespace(), dev->reg_state is NETREG_REGISTERED. So check for dev->reg_state != NETREG_UNREGISTERED. Fixes: 242d3a49a2a1 ("ipv6: reorder ip6_route_dev_notifier after ipv6_dev_notf") Reported-by: jeffy Cc: David Ahern Signed-off-by: Cong Wang Acked-by: David Ahern Signed-off-by: David S. Miller Cc: Konstantin Khlebnikov Signed-off-by: Greg Kroah-Hartman --- net/ipv6/route.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 48917437550e..7336a7311038 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3378,7 +3378,11 @@ static int ip6_route_dev_notify(struct notifier_block *this, net->ipv6.ip6_blk_hole_entry->dst.dev = dev; net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); #endif - } else if (event == NETDEV_UNREGISTER) { + } else if (event == NETDEV_UNREGISTER && + dev->reg_state != NETREG_UNREGISTERED) { + /* NETDEV_UNREGISTER could be fired for multiple times by + * netdev_wait_allrefs(). Make sure we only call this once. + */ in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev); #ifdef CONFIG_IPV6_MULTIPLE_TABLES in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev); From df24d6c224602ec23ad2626b07e13c40d018cbbc Mon Sep 17 00:00:00 2001 From: Claudio Imbrenda Date: Tue, 22 Mar 2016 17:05:52 +0100 Subject: [PATCH 182/312] AF_VSOCK: Shrink the area influenced by prepare_to_wait commit f7f9b5e7f8eccfd68ffa7b8d74b07c478bb9e7f0 upstream. When a thread is prepared for waiting by calling prepare_to_wait, sleeping is not allowed until either the wait has taken place or finish_wait has been called. The existing code in af_vsock imposed unnecessary no-sleep assumptions to a broad list of backend functions. This patch shrinks the influence of prepare_to_wait to the area where it is strictly needed, therefore relaxing the no-sleep restriction there. Signed-off-by: Claudio Imbrenda Signed-off-by: David S. Miller Cc: "Jorgen S. Hansen" Signed-off-by: Greg Kroah-Hartman --- net/vmw_vsock/af_vsock.c | 158 +++++++++++++++++++++------------------ 1 file changed, 85 insertions(+), 73 deletions(-) diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 9b5bd6d142dc..b5f1221f48d4 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -1209,10 +1209,14 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr, if (signal_pending(current)) { err = sock_intr_errno(timeout); - goto out_wait_error; + sk->sk_state = SS_UNCONNECTED; + sock->state = SS_UNCONNECTED; + goto out_wait; } else if (timeout == 0) { err = -ETIMEDOUT; - goto out_wait_error; + sk->sk_state = SS_UNCONNECTED; + sock->state = SS_UNCONNECTED; + goto out_wait; } prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); @@ -1220,20 +1224,17 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr, if (sk->sk_err) { err = -sk->sk_err; - goto out_wait_error; - } else + sk->sk_state = SS_UNCONNECTED; + sock->state = SS_UNCONNECTED; + } else { err = 0; + } out_wait: finish_wait(sk_sleep(sk), &wait); out: release_sock(sk); return err; - -out_wait_error: - sk->sk_state = SS_UNCONNECTED; - sock->state = SS_UNCONNECTED; - goto out_wait; } static int vsock_accept(struct socket *sock, struct socket *newsock, int flags) @@ -1270,18 +1271,20 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags) listener->sk_err == 0) { release_sock(listener); timeout = schedule_timeout(timeout); + finish_wait(sk_sleep(listener), &wait); lock_sock(listener); if (signal_pending(current)) { err = sock_intr_errno(timeout); - goto out_wait; + goto out; } else if (timeout == 0) { err = -EAGAIN; - goto out_wait; + goto out; } prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); } + finish_wait(sk_sleep(listener), &wait); if (listener->sk_err) err = -listener->sk_err; @@ -1301,19 +1304,15 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags) */ if (err) { vconnected->rejected = true; - release_sock(connected); - sock_put(connected); - goto out_wait; + } else { + newsock->state = SS_CONNECTED; + sock_graft(connected, newsock); } - newsock->state = SS_CONNECTED; - sock_graft(connected, newsock); release_sock(connected); sock_put(connected); } -out_wait: - finish_wait(sk_sleep(listener), &wait); out: release_sock(listener); return err; @@ -1557,11 +1556,11 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg, if (err < 0) goto out; - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); while (total_written < len) { ssize_t written; + prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); while (vsock_stream_has_space(vsk) == 0 && sk->sk_err == 0 && !(sk->sk_shutdown & SEND_SHUTDOWN) && @@ -1570,27 +1569,33 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg, /* Don't wait for non-blocking sockets. */ if (timeout == 0) { err = -EAGAIN; - goto out_wait; + finish_wait(sk_sleep(sk), &wait); + goto out_err; } err = transport->notify_send_pre_block(vsk, &send_data); - if (err < 0) - goto out_wait; + if (err < 0) { + finish_wait(sk_sleep(sk), &wait); + goto out_err; + } release_sock(sk); timeout = schedule_timeout(timeout); lock_sock(sk); if (signal_pending(current)) { err = sock_intr_errno(timeout); - goto out_wait; + finish_wait(sk_sleep(sk), &wait); + goto out_err; } else if (timeout == 0) { err = -EAGAIN; - goto out_wait; + finish_wait(sk_sleep(sk), &wait); + goto out_err; } prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } + finish_wait(sk_sleep(sk), &wait); /* These checks occur both as part of and after the loop * conditional since we need to check before and after @@ -1598,16 +1603,16 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg, */ if (sk->sk_err) { err = -sk->sk_err; - goto out_wait; + goto out_err; } else if ((sk->sk_shutdown & SEND_SHUTDOWN) || (vsk->peer_shutdown & RCV_SHUTDOWN)) { err = -EPIPE; - goto out_wait; + goto out_err; } err = transport->notify_send_pre_enqueue(vsk, &send_data); if (err < 0) - goto out_wait; + goto out_err; /* Note that enqueue will only write as many bytes as are free * in the produce queue, so we don't need to ensure len is @@ -1620,7 +1625,7 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg, len - total_written); if (written < 0) { err = -ENOMEM; - goto out_wait; + goto out_err; } total_written += written; @@ -1628,14 +1633,13 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg, err = transport->notify_send_post_enqueue( vsk, written, &send_data); if (err < 0) - goto out_wait; + goto out_err; } -out_wait: +out_err: if (total_written > 0) err = total_written; - finish_wait(sk_sleep(sk), &wait); out: release_sock(sk); return err; @@ -1716,21 +1720,61 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, if (err < 0) goto out; - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); while (1) { - s64 ready = vsock_stream_has_data(vsk); + s64 ready; - if (ready < 0) { - /* Invalid queue pair content. XXX This should be - * changed to a connection reset in a later change. - */ + prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); + ready = vsock_stream_has_data(vsk); - err = -ENOMEM; - goto out_wait; - } else if (ready > 0) { + if (ready == 0) { + if (sk->sk_err != 0 || + (sk->sk_shutdown & RCV_SHUTDOWN) || + (vsk->peer_shutdown & SEND_SHUTDOWN)) { + finish_wait(sk_sleep(sk), &wait); + break; + } + /* Don't wait for non-blocking sockets. */ + if (timeout == 0) { + err = -EAGAIN; + finish_wait(sk_sleep(sk), &wait); + break; + } + + err = transport->notify_recv_pre_block( + vsk, target, &recv_data); + if (err < 0) { + finish_wait(sk_sleep(sk), &wait); + break; + } + release_sock(sk); + timeout = schedule_timeout(timeout); + lock_sock(sk); + + if (signal_pending(current)) { + err = sock_intr_errno(timeout); + finish_wait(sk_sleep(sk), &wait); + break; + } else if (timeout == 0) { + err = -EAGAIN; + finish_wait(sk_sleep(sk), &wait); + break; + } + } else { ssize_t read; + finish_wait(sk_sleep(sk), &wait); + + if (ready < 0) { + /* Invalid queue pair content. XXX This should + * be changed to a connection reset in a later + * change. + */ + + err = -ENOMEM; + goto out; + } + err = transport->notify_recv_pre_dequeue( vsk, target, &recv_data); if (err < 0) @@ -1750,42 +1794,12 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, vsk, target, read, !(flags & MSG_PEEK), &recv_data); if (err < 0) - goto out_wait; + goto out; if (read >= target || flags & MSG_PEEK) break; target -= read; - } else { - if (sk->sk_err != 0 || (sk->sk_shutdown & RCV_SHUTDOWN) - || (vsk->peer_shutdown & SEND_SHUTDOWN)) { - break; - } - /* Don't wait for non-blocking sockets. */ - if (timeout == 0) { - err = -EAGAIN; - break; - } - - err = transport->notify_recv_pre_block( - vsk, target, &recv_data); - if (err < 0) - break; - - release_sock(sk); - timeout = schedule_timeout(timeout); - lock_sock(sk); - - if (signal_pending(current)) { - err = sock_intr_errno(timeout); - break; - } else if (timeout == 0) { - err = -EAGAIN; - break; - } - - prepare_to_wait(sk_sleep(sk), &wait, - TASK_INTERRUPTIBLE); } } @@ -1797,8 +1811,6 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, if (copied > 0) err = copied; -out_wait: - finish_wait(sk_sleep(sk), &wait); out: release_sock(sk); return err; From 3223ea129170a7799b92ee24862275aa7f6b9d1b Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Fri, 19 May 2017 11:21:59 -0700 Subject: [PATCH 183/312] vsock: use new wait API for vsock_stream_sendmsg() commit 499fde662f1957e3cb8d192a94a099ebe19c714b upstream. As reported by Michal, vsock_stream_sendmsg() could still sleep at vsock_stream_has_space() after prepare_to_wait(): vsock_stream_has_space vmci_transport_stream_has_space vmci_qpair_produce_free_space qp_lock qp_acquire_queue_mutex mutex_lock Just switch to the new wait API like we did for commit d9dc8b0f8b4e ("net: fix sleeping for sk_wait_event()"). Reported-by: Michal Kubecek Cc: Stefan Hajnoczi Cc: Jorgen Hansen Cc: "Michael S. Tsirkin" Cc: Claudio Imbrenda Signed-off-by: Cong Wang Reviewed-by: Stefan Hajnoczi Signed-off-by: David S. Miller Cc: "Jorgen S. Hansen" Signed-off-by: Greg Kroah-Hartman --- net/vmw_vsock/af_vsock.c | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index b5f1221f48d4..60324f7c72bd 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -1512,8 +1512,7 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg, long timeout; int err; struct vsock_transport_send_notify_data send_data; - - DEFINE_WAIT(wait); + DEFINE_WAIT_FUNC(wait, woken_wake_function); sk = sock->sk; vsk = vsock_sk(sk); @@ -1556,11 +1555,10 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg, if (err < 0) goto out; - while (total_written < len) { ssize_t written; - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); + add_wait_queue(sk_sleep(sk), &wait); while (vsock_stream_has_space(vsk) == 0 && sk->sk_err == 0 && !(sk->sk_shutdown & SEND_SHUTDOWN) && @@ -1569,33 +1567,30 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg, /* Don't wait for non-blocking sockets. */ if (timeout == 0) { err = -EAGAIN; - finish_wait(sk_sleep(sk), &wait); + remove_wait_queue(sk_sleep(sk), &wait); goto out_err; } err = transport->notify_send_pre_block(vsk, &send_data); if (err < 0) { - finish_wait(sk_sleep(sk), &wait); + remove_wait_queue(sk_sleep(sk), &wait); goto out_err; } release_sock(sk); - timeout = schedule_timeout(timeout); + timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout); lock_sock(sk); if (signal_pending(current)) { err = sock_intr_errno(timeout); - finish_wait(sk_sleep(sk), &wait); + remove_wait_queue(sk_sleep(sk), &wait); goto out_err; } else if (timeout == 0) { err = -EAGAIN; - finish_wait(sk_sleep(sk), &wait); + remove_wait_queue(sk_sleep(sk), &wait); goto out_err; } - - prepare_to_wait(sk_sleep(sk), &wait, - TASK_INTERRUPTIBLE); } - finish_wait(sk_sleep(sk), &wait); + remove_wait_queue(sk_sleep(sk), &wait); /* These checks occur both as part of and after the loop * conditional since we need to check before and after From 8ff3471878f3f8161bff92f73b3ecb35d6c397dc Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 18 Sep 2017 08:54:40 -0700 Subject: [PATCH 184/312] sched: Make resched_cpu() unconditional commit 7c2102e56a3f7d85b5d8f33efbd7aecc1f36fdd8 upstream. The current implementation of synchronize_sched_expedited() incorrectly assumes that resched_cpu() is unconditional, which it is not. This means that synchronize_sched_expedited() can hang when resched_cpu()'s trylock fails as follows (analysis by Neeraj Upadhyay): o CPU1 is waiting for expedited wait to complete: sync_rcu_exp_select_cpus rdp->exp_dynticks_snap & 0x1 // returns 1 for CPU5 IPI sent to CPU5 synchronize_sched_expedited_wait ret = swait_event_timeout(rsp->expedited_wq, sync_rcu_preempt_exp_done(rnp_root), jiffies_stall); expmask = 0x20, CPU 5 in idle path (in cpuidle_enter()) o CPU5 handles IPI and fails to acquire rq lock. Handles IPI sync_sched_exp_handler resched_cpu returns while failing to try lock acquire rq->lock need_resched is not set o CPU5 calls rcu_idle_enter() and as need_resched is not set, goes to idle (schedule() is not called). o CPU 1 reports RCU stall. Given that resched_cpu() is now used only by RCU, this commit fixes the assumption by making resched_cpu() unconditional. Reported-by: Neeraj Upadhyay Suggested-by: Neeraj Upadhyay Signed-off-by: Paul E. McKenney Acked-by: Steven Rostedt (VMware) Acked-by: Peter Zijlstra (Intel) Signed-off-by: Greg Kroah-Hartman --- kernel/sched/core.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b5d372083624..a99c32946e49 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -600,8 +600,7 @@ void resched_cpu(int cpu) struct rq *rq = cpu_rq(cpu); unsigned long flags; - if (!raw_spin_trylock_irqsave(&rq->lock, flags)) - return; + raw_spin_lock_irqsave(&rq->lock, flags); resched_curr(rq); raw_spin_unlock_irqrestore(&rq->lock, flags); } From 4fdb1637b2083824403de119b07c6bfe6560915d Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 7 Nov 2017 14:15:27 -0800 Subject: [PATCH 185/312] lib/mpi: call cond_resched() from mpi_powm() loop commit 1d9ddde12e3c9bab7f3d3484eb9446315e3571ca upstream. On a non-preemptible kernel, if KEYCTL_DH_COMPUTE is called with the largest permitted inputs (16384 bits), the kernel spends 10+ seconds doing modular exponentiation in mpi_powm() without rescheduling. If all threads do it, it locks up the system. Moreover, it can cause rcu_sched-stall warnings. Notwithstanding the insanity of doing this calculation in kernel mode rather than in userspace, fix it by calling cond_resched() as each bit from the exponent is processed. It's still noninterruptible, but at least it's preemptible now. Do the cond_resched() once per bit rather than once per MPI limb because each limb might still easily take 100+ milliseconds on slow CPUs. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- lib/mpi/mpi-pow.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c index e24388a863a7..468fb7cd1221 100644 --- a/lib/mpi/mpi-pow.c +++ b/lib/mpi/mpi-pow.c @@ -26,6 +26,7 @@ * however I decided to publish this code under the plain GPL. */ +#include #include #include "mpi-internal.h" #include "longlong.h" @@ -256,6 +257,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) } e <<= 1; c--; + cond_resched(); } i--; From 29c4b6b4f46d4cc53b4f8c2b45e218ccfa1eea68 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 24 Nov 2017 13:56:30 +0900 Subject: [PATCH 186/312] x86/decoder: Add new TEST instruction pattern commit 12a78d43de767eaf8fb272facb7a7b6f2dc6a9df upstream. The kbuild test robot reported this build warning: Warning: arch/x86/tools/test_get_len found difference at :ffffffff8103dd2c Warning: ffffffff8103dd82: f6 09 d8 testb $0xd8,(%rcx) Warning: objdump says 3 bytes, but insn_get_length() says 2 Warning: decoded and checked 1569014 instructions with 1 warnings This sequence seems to be a new instruction not in the opcode map in the Intel SDM. The instruction sequence is "F6 09 d8", means Group3(F6), MOD(00)REG(001)RM(001), and 0xd8. Intel SDM vol2 A.4 Table A-6 said the table index in the group is "Encoding of Bits 5,4,3 of the ModR/M Byte (bits 2,1,0 in parenthesis)" In that table, opcodes listed by the index REG bits as: 000 001 010 011 100 101 110 111 TEST Ib/Iz,(undefined),NOT,NEG,MUL AL/rAX,IMUL AL/rAX,DIV AL/rAX,IDIV AL/rAX So, it seems TEST Ib is assigned to 001. Add the new pattern. Reported-by: kbuild test robot Signed-off-by: Masami Hiramatsu Cc: Greg Kroah-Hartman Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/lib/x86-opcode-map.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index d388de72eaca..ec039f2a0c13 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt @@ -833,7 +833,7 @@ EndTable GrpTable: Grp3_1 0: TEST Eb,Ib -1: +1: TEST Eb,Ib 2: NOT Eb 3: NEG Eb 4: MUL AL,Eb From 5f8046f7c9e3049000bbe610c15390bf698d681e Mon Sep 17 00:00:00 2001 From: Philip Derrin Date: Tue, 14 Nov 2017 00:55:25 +0100 Subject: [PATCH 187/312] ARM: 8722/1: mm: make STRICT_KERNEL_RWX effective for LPAE commit 400eeffaffc7232c0ae1134fe04e14ae4fb48d8c upstream. Currently, for ARM kernels with CONFIG_ARM_LPAE and CONFIG_STRICT_KERNEL_RWX enabled, the 2MiB pages mapping the kernel code and rodata are writable. They are marked read-only in a software bit (L_PMD_SECT_RDONLY) but the hardware read-only bit is not set (PMD_SECT_AP2). For user mappings, the logic that propagates the software bit to the hardware bit is in set_pmd_at(); but for the kernel, section_update() writes the PMDs directly, skipping this logic. The fix is to set PMD_SECT_AP2 for read-only sections in section_update(), at the same time as L_PMD_SECT_RDONLY. Fixes: 1e3479225acb ("ARM: 8275/1: mm: fix PMD_SECT_RDONLY undeclared compile error") Signed-off-by: Philip Derrin Reported-by: Neil Dick Tested-by: Neil Dick Tested-by: Laura Abbott Reviewed-by: Kees Cook Signed-off-by: Russell King Signed-off-by: Greg Kroah-Hartman --- arch/arm/mm/init.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 7f8cd1b3557f..c29ad610311b 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -611,8 +611,8 @@ static struct section_perm ro_perms[] = { .start = (unsigned long)_stext, .end = (unsigned long)__init_begin, #ifdef CONFIG_ARM_LPAE - .mask = ~L_PMD_SECT_RDONLY, - .prot = L_PMD_SECT_RDONLY, + .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2), + .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2, #else .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE), .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, From 36a082ce590f4e2625453d24dfcf98f74d2fe8c7 Mon Sep 17 00:00:00 2001 From: Philip Derrin Date: Tue, 14 Nov 2017 00:55:26 +0100 Subject: [PATCH 188/312] ARM: 8721/1: mm: dump: check hardware RO bit for LPAE commit 3b0c0c922ff4be275a8beb87ce5657d16f355b54 upstream. When CONFIG_ARM_LPAE is set, the PMD dump relies on the software read-only bit to determine whether a page is writable. This concealed a bug which left the kernel text section writable (AP2=0) while marked read-only in the software bit. In a kernel with the AP2 bug, the dump looks like this: ---[ Kernel Mapping ]--- 0xc0000000-0xc0200000 2M RW NX SHD 0xc0200000-0xc0600000 4M ro x SHD 0xc0600000-0xc0800000 2M ro NX SHD 0xc0800000-0xc4800000 64M RW NX SHD The fix is to check that the software and hardware bits are both set before displaying "ro". The dump then shows the true perms: ---[ Kernel Mapping ]--- 0xc0000000-0xc0200000 2M RW NX SHD 0xc0200000-0xc0600000 4M RW x SHD 0xc0600000-0xc0800000 2M RW NX SHD 0xc0800000-0xc4800000 64M RW NX SHD Fixes: ded947798469 ("ARM: 8109/1: mm: Modify pte_write and pmd_write logic for LPAE") Signed-off-by: Philip Derrin Tested-by: Neil Dick Reviewed-by: Kees Cook Signed-off-by: Russell King Signed-off-by: Greg Kroah-Hartman --- arch/arm/mm/dump.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c index 9fe8e241335c..e1f6f0daa847 100644 --- a/arch/arm/mm/dump.c +++ b/arch/arm/mm/dump.c @@ -126,8 +126,8 @@ static const struct prot_bits section_bits[] = { .val = PMD_SECT_USER, .set = "USR", }, { - .mask = L_PMD_SECT_RDONLY, - .val = L_PMD_SECT_RDONLY, + .mask = L_PMD_SECT_RDONLY | PMD_SECT_AP2, + .val = L_PMD_SECT_RDONLY | PMD_SECT_AP2, .set = "ro", .clear = "RW", #elif __LINUX_ARM_ARCH__ >= 6 From 84c785ed786a61dfd35ea3e818ead7dc0adddfd2 Mon Sep 17 00:00:00 2001 From: Mathias Kresin Date: Thu, 11 May 2017 08:11:14 +0200 Subject: [PATCH 189/312] MIPS: ralink: Fix MT7628 pinmux commit 8ef4b43cd3794d63052d85898e42424fd3b14d24 upstream. According to the datasheet the REFCLK pin is shared with GPIO#37 and the PERST pin is shared with GPIO#36. Fixes: 53263a1c6852 ("MIPS: ralink: add mt7628an support") Signed-off-by: Mathias Kresin Acked-by: John Crispin Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/16046/ Signed-off-by: James Hogan Signed-off-by: Greg Kroah-Hartman --- arch/mips/ralink/mt7620.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c index 48d6349fd9d7..9fd401c304b1 100644 --- a/arch/mips/ralink/mt7620.c +++ b/arch/mips/ralink/mt7620.c @@ -141,8 +141,8 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = { FUNC("i2c", 0, 4, 2), }; -static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) }; -static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) }; +static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 37, 1) }; +static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 36, 1) }; static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) }; static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) }; From 0c1faf9df0c840df75a4e1ee6a1c967f6ff10428 Mon Sep 17 00:00:00 2001 From: Mathias Kresin Date: Thu, 11 May 2017 08:11:15 +0200 Subject: [PATCH 190/312] MIPS: ralink: Fix typo in mt7628 pinmux function commit 05a67cc258e75ac9758e6f13d26337b8be51162a upstream. There is a typo inside the pinmux setup code. The function is called refclk and not reclk. Fixes: 53263a1c6852 ("MIPS: ralink: add mt7628an support") Signed-off-by: Mathias Kresin Acked-by: John Crispin Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/16047/ Signed-off-by: James Hogan Signed-off-by: Greg Kroah-Hartman --- arch/mips/ralink/mt7620.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c index 9fd401c304b1..c5f45fc96c74 100644 --- a/arch/mips/ralink/mt7620.c +++ b/arch/mips/ralink/mt7620.c @@ -141,7 +141,7 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = { FUNC("i2c", 0, 4, 2), }; -static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 37, 1) }; +static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("refclk", 0, 37, 1) }; static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 36, 1) }; static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) }; static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) }; From a9f066404fd096351320a16660dd47a8a868a220 Mon Sep 17 00:00:00 2001 From: Vijendar Mukunda Date: Thu, 23 Nov 2017 20:07:00 +0530 Subject: [PATCH 191/312] ALSA: hda: Add Raven PCI ID commit 9ceace3c9c18c67676e75141032a65a8e01f9a7a upstream. This commit adds PCI ID for Raven platform Signed-off-by: Vijendar Mukunda Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/hda_intel.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index e6de496bffbe..e2e08fc73b50 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2316,6 +2316,9 @@ static const struct pci_device_id azx_ids[] = { /* AMD Hudson */ { PCI_DEVICE(0x1022, 0x780d), .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB }, + /* AMD Raven */ + { PCI_DEVICE(0x1022, 0x15e3), + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB }, /* ATI HDMI */ { PCI_DEVICE(0x1002, 0x0002), .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, From 36c4819abc92e6cb22de81568407a92cdb8e9363 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 15 Nov 2017 16:38:09 -0800 Subject: [PATCH 192/312] dm bufio: fix integer overflow when limiting maximum cache size commit 74d4108d9e681dbbe4a2940ed8fdff1f6868184c upstream. The default max_cache_size_bytes for dm-bufio is meant to be the lesser of 25% of the size of the vmalloc area and 2% of the size of lowmem. However, on 32-bit systems the intermediate result in the expression (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100 overflows, causing the wrong result to be computed. For example, on a 32-bit system where the vmalloc area is 520093696 bytes, the result is 1174405 rather than the expected 130023424, which makes the maximum cache size much too small (far less than 2% of lowmem). This causes severe performance problems for dm-verity users on affected systems. Fix this by using mult_frac() to correctly multiply by a percentage. Do this for all places in dm-bufio that multiply by a percentage. Also replace (VMALLOC_END - VMALLOC_START) with VMALLOC_TOTAL, which contrary to the comment is now defined in include/linux/vmalloc.h. Depends-on: 9993bc635 ("sched/x86: Fix overflow in cyc2ns_offset") Fixes: 95d402f057f2 ("dm: add bufio") Signed-off-by: Eric Biggers Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-bufio.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index cdceefd0e57d..2ec7f90e3455 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -928,7 +928,8 @@ static void __get_memory_limit(struct dm_bufio_client *c, buffers = c->minimum_buffers; *limit_buffers = buffers; - *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100; + *threshold_buffers = mult_frac(buffers, + DM_BUFIO_WRITEBACK_PERCENT, 100); } /* @@ -1829,19 +1830,15 @@ static int __init dm_bufio_init(void) memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches); memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names); - mem = (__u64)((totalram_pages - totalhigh_pages) * - DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT; + mem = (__u64)mult_frac(totalram_pages - totalhigh_pages, + DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT; if (mem > ULONG_MAX) mem = ULONG_MAX; #ifdef CONFIG_MMU - /* - * Get the size of vmalloc space the same way as VMALLOC_TOTAL - * in fs/proc/internal.h - */ - if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100) - mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100; + if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100)) + mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100); #endif dm_bufio_default_cache_size = mem; From 4e82464aa4a398207e2ecbc4877c82319ecdbafa Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Wed, 1 Nov 2017 15:42:36 +0800 Subject: [PATCH 193/312] dm: fix race between dm_get_from_kobject() and __dm_destroy() commit b9a41d21dceadf8104812626ef85dc56ee8a60ed upstream. The following BUG_ON was hit when testing repeat creation and removal of DM devices: kernel BUG at drivers/md/dm.c:2919! CPU: 7 PID: 750 Comm: systemd-udevd Not tainted 4.1.44 Call Trace: [] dm_get_from_kobject+0x34/0x3a [] dm_attr_show+0x2b/0x5e [] ? mutex_lock+0x26/0x44 [] sysfs_kf_seq_show+0x83/0xcf [] kernfs_seq_show+0x23/0x25 [] seq_read+0x16f/0x325 [] kernfs_fop_read+0x3a/0x13f [] __vfs_read+0x26/0x9d [] ? security_file_permission+0x3c/0x44 [] ? rw_verify_area+0x83/0xd9 [] vfs_read+0x8f/0xcf [] ? __fdget_pos+0x12/0x41 [] SyS_read+0x4b/0x76 [] system_call_fastpath+0x12/0x71 The bug can be easily triggered, if an extra delay (e.g. 10ms) is added between the test of DMF_FREEING & DMF_DELETING and dm_get() in dm_get_from_kobject(). To fix it, we need to ensure the test of DMF_FREEING & DMF_DELETING and dm_get() are done in an atomic way, so _minor_lock is used. The other callers of dm_get() have also been checked to be OK: some callers invoke dm_get() under _minor_lock, some callers invoke it under _hash_lock, and dm_start_request() invoke it after increasing md->open_count. Signed-off-by: Hou Tao Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 320eb3c4bb6b..9ec6948e3b8b 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -3507,11 +3507,15 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj) md = container_of(kobj, struct mapped_device, kobj_holder.kobj); - if (test_bit(DMF_FREEING, &md->flags) || - dm_deleting_md(md)) - return NULL; - + spin_lock(&_minor_lock); + if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { + md = NULL; + goto out; + } dm_get(md); +out: + spin_unlock(&_minor_lock); + return md; } From 00fd53bc378fdeec3caece086de4eadd798b2346 Mon Sep 17 00:00:00 2001 From: "Maciej W. Rozycki" Date: Tue, 7 Nov 2017 19:09:20 +0000 Subject: [PATCH 194/312] MIPS: Fix an n32 core file generation regset support regression commit 547da673173de51f73887377eb275304775064ad upstream. Fix a commit 7aeb753b5353 ("MIPS: Implement task_user_regset_view.") regression, then activated by commit 6a9c001b7ec3 ("MIPS: Switch ELF core dumper to use regsets.)", that caused n32 processes to dump o32 core files by failing to set the EF_MIPS_ABI2 flag in the ELF core file header's `e_flags' member: $ file tls-core tls-core: ELF 32-bit MSB executable, MIPS, N32 MIPS64 rel2 version 1 (SYSV), [...] $ ./tls-core Aborted (core dumped) $ file core core: ELF 32-bit MSB core file MIPS, MIPS-I version 1 (SYSV), SVR4-style $ Previously the flag was set as the result of a: statement placed in arch/mips/kernel/binfmt_elfn32.c, however in the regset case, i.e. when CORE_DUMP_USE_REGSET is set, ELF_CORE_EFLAGS is no longer used by `fill_note_info' in fs/binfmt_elf.c, and instead the `->e_flags' member of the regset view chosen is. We have the views defined in arch/mips/kernel/ptrace.c, however only an o32 and an n64 one, and the latter is used for n32 as well. Consequently an o32 core file is incorrectly dumped from n32 processes (the ELF32 vs ELF64 class is chosen elsewhere, and the 32-bit one is correctly selected for n32). Correct the issue then by defining an n32 regset view and using it as appropriate. Issue discovered in GDB testing. Fixes: 7aeb753b5353 ("MIPS: Implement task_user_regset_view.") Signed-off-by: Maciej W. Rozycki Cc: Ralf Baechle Cc: Djordje Todorovic Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/17617/ Signed-off-by: James Hogan Signed-off-by: Greg Kroah-Hartman --- arch/mips/kernel/ptrace.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 24c115a0721a..a3f38e6b7ea1 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -650,6 +650,19 @@ static const struct user_regset_view user_mips64_view = { .n = ARRAY_SIZE(mips64_regsets), }; +#ifdef CONFIG_MIPS32_N32 + +static const struct user_regset_view user_mipsn32_view = { + .name = "mipsn32", + .e_flags = EF_MIPS_ABI2, + .e_machine = ELF_ARCH, + .ei_osabi = ELF_OSABI, + .regsets = mips64_regsets, + .n = ARRAY_SIZE(mips64_regsets), +}; + +#endif /* CONFIG_MIPS32_N32 */ + #endif /* CONFIG_64BIT */ const struct user_regset_view *task_user_regset_view(struct task_struct *task) @@ -660,6 +673,10 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task) #ifdef CONFIG_MIPS32_O32 if (test_tsk_thread_flag(task, TIF_32BIT_REGS)) return &user_mips_view; +#endif +#ifdef CONFIG_MIPS32_N32 + if (test_tsk_thread_flag(task, TIF_32BIT_ADDR)) + return &user_mipsn32_view; #endif return &user_mips64_view; #endif From 153142963ca1fa2b2f3bf3f687e00ef1a63bff5f Mon Sep 17 00:00:00 2001 From: Mirko Parthey Date: Thu, 18 May 2017 21:30:03 +0200 Subject: [PATCH 195/312] MIPS: BCM47XX: Fix LED inversion for WRT54GSv1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 56a46acf62af5ba44fca2f3f1c7c25a2d5385b19 upstream. The WLAN LED on the Linksys WRT54GSv1 is active low, but the software treats it as active high. Fix the inverted logic. Fixes: 7bb26b169116 ("MIPS: BCM47xx: Fix LEDs on WRT54GS V1.0") Signed-off-by: Mirko Parthey Looks-ok-by: Rafał Miłecki Cc: Hauke Mehrtens Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/16071/ Signed-off-by: James Hogan Signed-off-by: Greg Kroah-Hartman --- arch/mips/bcm47xx/leds.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c index d20ae63eb3c2..46abe9e4e0e0 100644 --- a/arch/mips/bcm47xx/leds.c +++ b/arch/mips/bcm47xx/leds.c @@ -330,7 +330,7 @@ bcm47xx_leds_linksys_wrt54g3gv2[] __initconst = { /* Verified on: WRT54GS V1.0 */ static const struct gpio_led bcm47xx_leds_linksys_wrt54g_type_0101[] __initconst = { - BCM47XX_GPIO_LED(0, "green", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(0, "green", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF), BCM47XX_GPIO_LED(1, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON), BCM47XX_GPIO_LED(7, "green", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF), }; From 9a4e08c634cebe28bfb96ec9482ed309f7c9b679 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 17 Nov 2017 15:29:13 -0800 Subject: [PATCH 196/312] autofs: don't fail mount for transient error commit ecc0c469f27765ed1e2b967be0aa17cee1a60b76 upstream. Currently if the autofs kernel module gets an error when writing to the pipe which links to the daemon, then it marks the whole moutpoint as catatonic, and it will stop working. It is possible that the error is transient. This can happen if the daemon is slow and more than 16 requests queue up. If a subsequent process tries to queue a request, and is then signalled, the write to the pipe will return -ERESTARTSYS and autofs will take that as total failure. So change the code to assess -ERESTARTSYS and -ENOMEM as transient failures which only abort the current request, not the whole mountpoint. It isn't a crash or a data corruption, but having autofs mountpoints suddenly stop working is rather inconvenient. Ian said: : And given the problems with a half dozen (or so) user space applications : consuming large amounts of CPU under heavy mount and umount activity this : could happen more easily than we expect. Link: http://lkml.kernel.org/r/87y3norvgp.fsf@notabene.neil.brown.name Signed-off-by: NeilBrown Acked-by: Ian Kent Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/autofs4/waitq.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 35b755e79c2d..fe6e7050fe50 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c @@ -87,7 +87,8 @@ static int autofs4_write(struct autofs_sb_info *sbi, spin_unlock_irqrestore(¤t->sighand->siglock, flags); } - return (bytes > 0); + /* if 'wr' returned 0 (impossible) we assume -EIO (safe) */ + return bytes == 0 ? 0 : wr < 0 ? wr : -EIO; } static void autofs4_notify_daemon(struct autofs_sb_info *sbi, @@ -101,6 +102,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi, } pkt; struct file *pipe = NULL; size_t pktsz; + int ret; DPRINTK("wait id = 0x%08lx, name = %.*s, type=%d", (unsigned long) wq->wait_queue_token, wq->name.len, wq->name.name, type); @@ -173,7 +175,18 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi, mutex_unlock(&sbi->wq_mutex); if (autofs4_write(sbi, pipe, &pkt, pktsz)) + switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) { + case 0: + break; + case -ENOMEM: + case -ERESTARTSYS: + /* Just fail this one */ + autofs4_wait_release(sbi, wq->wait_queue_token, ret); + break; + default: autofs4_catatonic_mode(sbi); + break; + } fput(pipe); } From 7d7b05e4ffd5ad7cf54b57dfc2f377d9a543c8c6 Mon Sep 17 00:00:00 2001 From: Andreas Rohner Date: Fri, 17 Nov 2017 15:29:35 -0800 Subject: [PATCH 197/312] nilfs2: fix race condition that causes file system corruption commit 31ccb1f7ba3cfe29631587d451cf5bb8ab593550 upstream. There is a race condition between nilfs_dirty_inode() and nilfs_set_file_dirty(). When a file is opened, nilfs_dirty_inode() is called to update the access timestamp in the inode. It calls __nilfs_mark_inode_dirty() in a separate transaction. __nilfs_mark_inode_dirty() caches the ifile buffer_head in the i_bh field of the inode info structure and marks it as dirty. After some data was written to the file in another transaction, the function nilfs_set_file_dirty() is called, which adds the inode to the ns_dirty_files list. Then the segment construction calls nilfs_segctor_collect_dirty_files(), which goes through the ns_dirty_files list and checks the i_bh field. If there is a cached buffer_head in i_bh it is not marked as dirty again. Since nilfs_dirty_inode() and nilfs_set_file_dirty() use separate transactions, it is possible that a segment construction that writes out the ifile occurs in-between the two. If this happens the inode is not on the ns_dirty_files list, but its ifile block is still marked as dirty and written out. In the next segment construction, the data for the file is written out and nilfs_bmap_propagate() updates the b-tree. Eventually the bmap root is written into the i_bh block, which is not dirty, because it was written out in another segment construction. As a result the bmap update can be lost, which leads to file system corruption. Either the virtual block address points to an unallocated DAT block, or the DAT entry will be reused for something different. The error can remain undetected for a long time. A typical error message would be one of the "bad btree" errors or a warning that a DAT entry could not be found. This bug can be reproduced reliably by a simple benchmark that creates and overwrites millions of 4k files. Link: http://lkml.kernel.org/r/1509367935-3086-2-git-send-email-konishi.ryusuke@lab.ntt.co.jp Signed-off-by: Andreas Rohner Signed-off-by: Ryusuke Konishi Tested-by: Andreas Rohner Tested-by: Ryusuke Konishi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/nilfs2/segment.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 2f27c935bd57..34c22fe4eca0 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -1945,8 +1945,6 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci, "failed to get inode block.\n"); return err; } - mark_buffer_dirty(ibh); - nilfs_mdt_mark_dirty(ifile); spin_lock(&nilfs->ns_inode_lock); if (likely(!ii->i_bh)) ii->i_bh = ibh; @@ -1955,6 +1953,10 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci, goto retry; } + // Always redirty the buffer to avoid race condition + mark_buffer_dirty(ii->i_bh); + nilfs_mdt_mark_dirty(ifile); + clear_bit(NILFS_I_QUEUED, &ii->i_state); set_bit(NILFS_I_BUSY, &ii->i_state); list_move_tail(&ii->i_dirty, &sci->sc_dirty_files); From 9c093a258350e86f781f23fbbef4e3d761c6b1e3 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 22 Aug 2017 23:41:28 +0300 Subject: [PATCH 198/312] eCryptfs: use after free in ecryptfs_release_messaging() commit db86be3a12d0b6e5c5b51c2ab2a48f06329cb590 upstream. We're freeing the list iterator so we should be using the _safe() version of hlist_for_each_entry(). Fixes: 88b4a07e6610 ("[PATCH] eCryptfs: Public key transport mechanism") Signed-off-by: Dan Carpenter Signed-off-by: Tyler Hicks Signed-off-by: Greg Kroah-Hartman --- fs/ecryptfs/messaging.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c index 286f10b0363b..4f457d5c4933 100644 --- a/fs/ecryptfs/messaging.c +++ b/fs/ecryptfs/messaging.c @@ -442,15 +442,16 @@ void ecryptfs_release_messaging(void) } if (ecryptfs_daemon_hash) { struct ecryptfs_daemon *daemon; + struct hlist_node *n; int i; mutex_lock(&ecryptfs_daemon_hash_mux); for (i = 0; i < (1 << ecryptfs_hash_bits); i++) { int rc; - hlist_for_each_entry(daemon, - &ecryptfs_daemon_hash[i], - euid_chain) { + hlist_for_each_entry_safe(daemon, n, + &ecryptfs_daemon_hash[i], + euid_chain) { rc = ecryptfs_exorcise_daemon(daemon); if (rc) printk(KERN_ERR "%s: Error whilst " From 85c79043808d400ace74c99526fb9f4f67253102 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Fri, 13 Oct 2017 16:35:29 -0700 Subject: [PATCH 199/312] bcache: check ca->alloc_thread initialized before wake up it commit 91af8300d9c1d7c6b6a2fd754109e08d4798b8d8 upstream. In bcache code, sysfs entries are created before all resources get allocated, e.g. allocation thread of a cache set. There is posibility for NULL pointer deference if a resource is accessed but which is not initialized yet. Indeed Jorg Bornschein catches one on cache set allocation thread and gets a kernel oops. The reason for this bug is, when bch_bucket_alloc() is called during cache set registration and attaching, ca->alloc_thread is not properly allocated and initialized yet, call wake_up_process() on ca->alloc_thread triggers NULL pointer deference failure. A simple and fast fix is, before waking up ca->alloc_thread, checking whether it is allocated, and only wake up ca->alloc_thread when it is not NULL. Signed-off-by: Coly Li Reported-by: Jorg Bornschein Cc: Kent Overstreet Reviewed-by: Michael Lyle Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- drivers/md/bcache/alloc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 8eeab72b93e2..ea47980949ef 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -406,7 +406,8 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait) finish_wait(&ca->set->bucket_wait, &w); out: - wake_up_process(ca->alloc_thread); + if (ca->alloc_thread) + wake_up_process(ca->alloc_thread); trace_bcache_alloc(ca, reserve); From 4e23be6169767d21f95d5bfd5df8a2c6274ee2d7 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 19 Oct 2017 16:47:48 +0200 Subject: [PATCH 200/312] isofs: fix timestamps beyond 2027 commit 34be4dbf87fc3e474a842305394534216d428f5d upstream. isofs uses a 'char' variable to load the number of years since 1900 for an inode timestamp. On architectures that use a signed char type by default, this results in an invalid date for anything beyond 2027. This changes the function argument to a 'u8' array, which is defined the same way on all architectures, and unambiguously lets us use years until 2155. This should be backported to all kernels that might still be in use by that date. Signed-off-by: Arnd Bergmann Signed-off-by: Jan Kara Signed-off-by: Greg Kroah-Hartman --- fs/isofs/isofs.h | 2 +- fs/isofs/rock.h | 2 +- fs/isofs/util.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h index 0ac4c1f73fbd..25177e6bd603 100644 --- a/fs/isofs/isofs.h +++ b/fs/isofs/isofs.h @@ -103,7 +103,7 @@ static inline unsigned int isonum_733(char *p) /* Ignore bigendian datum due to broken mastering programs */ return get_unaligned_le32(p); } -extern int iso_date(char *, int); +extern int iso_date(u8 *, int); struct inode; /* To make gcc happy */ diff --git a/fs/isofs/rock.h b/fs/isofs/rock.h index ed09e2b08637..f835976ce033 100644 --- a/fs/isofs/rock.h +++ b/fs/isofs/rock.h @@ -65,7 +65,7 @@ struct RR_PL_s { }; struct stamp { - char time[7]; + __u8 time[7]; /* actually 6 unsigned, 1 signed */ } __attribute__ ((packed)); struct RR_TF_s { diff --git a/fs/isofs/util.c b/fs/isofs/util.c index 005a15cfd30a..37860fea364d 100644 --- a/fs/isofs/util.c +++ b/fs/isofs/util.c @@ -15,7 +15,7 @@ * to GMT. Thus we should always be correct. */ -int iso_date(char * p, int flag) +int iso_date(u8 *p, int flag) { int year, month, day, hour, minute, second, tz; int crtime; From ab33df42eb3c53711c487c839d36379ffca3aeb9 Mon Sep 17 00:00:00 2001 From: Joshua Watt Date: Tue, 7 Nov 2017 16:25:47 -0600 Subject: [PATCH 201/312] NFS: Fix typo in nomigration mount option commit f02fee227e5f21981152850744a6084ff3fa94ee upstream. The option was incorrectly masking off all other options. Signed-off-by: Joshua Watt Signed-off-by: Anna Schumaker Signed-off-by: Greg Kroah-Hartman --- fs/nfs/super.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/nfs/super.c b/fs/nfs/super.c index f1268280244e..3149f7e58d6f 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -1322,7 +1322,7 @@ static int nfs_parse_mount_options(char *raw, mnt->options |= NFS_OPTION_MIGRATION; break; case Opt_nomigration: - mnt->options &= NFS_OPTION_MIGRATION; + mnt->options &= ~NFS_OPTION_MIGRATION; break; /* From 2a2d4b41472c73439adc6b8b55fed212d85f4faa Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sun, 5 Nov 2017 15:45:22 -0500 Subject: [PATCH 202/312] nfs: Fix ugly referral attributes commit c05cefcc72416a37eba5a2b35f0704ed758a9145 upstream. Before traversing a referral and performing a mount, the mounted-on directory looks strange: dr-xr-xr-x. 2 4294967294 4294967294 0 Dec 31 1969 dir.0 nfs4_get_referral is wiping out any cached attributes with what was returned via GETATTR(fs_locations), but the bit mask for that operation does not request any file attributes. Retrieve owner and timestamp information so that the memcpy in nfs4_get_referral fills in more attributes. Changes since v1: - Don't request attributes that the client unconditionally replaces - Request only MOUNTED_ON_FILEID or FILEID attribute, not both - encode_fs_locations() doesn't use the third bitmask word Fixes: 6b97fd3da1ea ("NFSv4: Follow a referral") Suggested-by: Pradeep Thomas Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker Signed-off-by: Greg Kroah-Hartman --- fs/nfs/nfs4proc.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 8e425f2c5ddd..6fef53f18dcf 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -242,15 +242,12 @@ const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE }; const u32 nfs4_fs_locations_bitmap[3] = { - FATTR4_WORD0_TYPE - | FATTR4_WORD0_CHANGE + FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE | FATTR4_WORD0_FSID | FATTR4_WORD0_FILEID | FATTR4_WORD0_FS_LOCATIONS, - FATTR4_WORD1_MODE - | FATTR4_WORD1_NUMLINKS - | FATTR4_WORD1_OWNER + FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP | FATTR4_WORD1_RAWDEV | FATTR4_WORD1_SPACE_USED @@ -6351,9 +6348,7 @@ static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, struct page *page) { struct nfs_server *server = NFS_SERVER(dir); - u32 bitmask[3] = { - [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, - }; + u32 bitmask[3]; struct nfs4_fs_locations_arg args = { .dir_fh = NFS_FH(dir), .name = name, @@ -6372,12 +6367,15 @@ static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, dprintk("%s: start\n", __func__); + bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS; + bitmask[1] = nfs4_fattr_bitmap[1]; + /* Ask for the fileid of the absent filesystem if mounted_on_fileid * is not supported */ if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) - bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID; + bitmask[0] &= ~FATTR4_WORD0_FILEID; else - bitmask[0] |= FATTR4_WORD0_FILEID; + bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID; nfs_fattr_init(&fs_locations->fattr); fs_locations->server = server; From a8b8ab79ca4fd69182f5864ddcd59994717e0186 Mon Sep 17 00:00:00 2001 From: Andrew Elble Date: Fri, 3 Nov 2017 14:06:31 -0400 Subject: [PATCH 203/312] nfsd: deal with revoked delegations appropriately commit 95da1b3a5aded124dd1bda1e3cdb876184813140 upstream. If a delegation has been revoked by the server, operations using that delegation should error out with NFS4ERR_DELEG_REVOKED in the >4.1 case, and NFS4ERR_BAD_STATEID otherwise. The server needs NFSv4.1 clients to explicitly free revoked delegations. If the server returns NFS4ERR_DELEG_REVOKED, the client will do that; otherwise it may just forget about the delegation and be unable to recover when it later sees SEQ4_STATUS_RECALLABLE_STATE_REVOKED set on a SEQUENCE reply. That can cause the Linux 4.1 client to loop in its stage manager. Signed-off-by: Andrew Elble Reviewed-by: Trond Myklebust Signed-off-by: J. Bruce Fields Signed-off-by: Greg Kroah-Hartman --- fs/nfsd/nfs4state.c | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index ca9ebc3242d3..421935f3d909 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -3829,7 +3829,8 @@ static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, statei { struct nfs4_stid *ret; - ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID); + ret = find_stateid_by_type(cl, s, + NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID); if (!ret) return NULL; return delegstateid(ret); @@ -3852,6 +3853,12 @@ nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open, deleg = find_deleg_stateid(cl, &open->op_delegate_stateid); if (deleg == NULL) goto out; + if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) { + nfs4_put_stid(&deleg->dl_stid); + if (cl->cl_minorversion) + status = nfserr_deleg_revoked; + goto out; + } flags = share_access_to_flags(open->op_share_access); status = nfs4_check_delegmode(deleg, flags); if (status) { @@ -4696,6 +4703,16 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, struct nfs4_stid **s, struct nfsd_net *nn) { __be32 status; + bool return_revoked = false; + + /* + * only return revoked delegations if explicitly asked. + * otherwise we report revoked or bad_stateid status. + */ + if (typemask & NFS4_REVOKED_DELEG_STID) + return_revoked = true; + else if (typemask & NFS4_DELEG_STID) + typemask |= NFS4_REVOKED_DELEG_STID; if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) return nfserr_bad_stateid; @@ -4710,6 +4727,12 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, *s = find_stateid_by_type(cstate->clp, stateid, typemask); if (!*s) return nfserr_bad_stateid; + if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) { + nfs4_put_stid(*s); + if (cstate->minorversion) + return nfserr_deleg_revoked; + return nfserr_bad_stateid; + } return nfs_ok; } From 3c260c60d20c51283904462015069063f78b065d Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 14 Sep 2017 13:17:44 -0500 Subject: [PATCH 204/312] rtlwifi: rtl8192ee: Fix memory leak when loading firmware commit 519ce2f933fa14acf69d5c8cabcc18711943d629 upstream. In routine rtl92ee_set_fw_rsvdpagepkt(), the driver allocates an skb, but never calls rtl_cmd_send_packet(), which will free the buffer. All other rtlwifi drivers perform this operation correctly. This problem has been in the driver since it was included in the kernel. Fortunately, each firmware load only leaks 4 buffers, which likely explains why it has not previously been detected. Signed-off-by: Larry Finger Signed-off-by: Kalle Valo Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c index 0708eedd9671..1c69e8140d9d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c @@ -664,7 +664,7 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct sk_buff *skb = NULL; - + bool rtstatus; u32 totalpacketlen; u8 u1rsvdpageloc[5] = { 0 }; bool b_dlok = false; @@ -727,7 +727,9 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet, totalpacketlen); - b_dlok = true; + rtstatus = rtl_cmd_send_packet(hw, skb); + if (rtstatus) + b_dlok = true; if (b_dlok) { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD , From f1be21021099046fbe288ae957cd75ab1c7a0daf Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 6 Nov 2017 14:55:35 +0100 Subject: [PATCH 205/312] rtlwifi: fix uninitialized rtlhal->last_suspend_sec time commit 3f2a162fab15aee243178b5308bb5d1206fc4043 upstream. We set rtlhal->last_suspend_sec to an uninitialized stack variable, but unfortunately gcc never warned about this, I only found it while working on another patch. I opened a gcc bug for this. Presumably the value of rtlhal->last_suspend_sec is not all that important, but it does get used, so we probably want the patch backported to stable kernels. Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82839 Signed-off-by: Arnd Bergmann Acked-by: Larry Finger Signed-off-by: Kalle Valo Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index bbb789f8990b..738d541a2255 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -1377,6 +1377,7 @@ static void _rtl8821ae_get_wakeup_reason(struct ieee80211_hw *hw) ppsc->wakeup_reason = 0; + do_gettimeofday(&ts); rtlhal->last_suspend_sec = ts.tv_sec; switch (fw_reason) { From 189bc689547a57100cb0c5d0e689a8555e98b547 Mon Sep 17 00:00:00 2001 From: Rameshwar Prasad Sahu Date: Thu, 2 Nov 2017 16:31:07 +0530 Subject: [PATCH 206/312] ata: fixes kernel crash while tracing ata_eh_link_autopsy event commit f1601113ddc0339a745e702f4fb1ca37d4875e65 upstream. When tracing ata link error event, the kernel crashes when the disk is removed due to NULL pointer access by trace_ata_eh_link_autopsy API. This occurs as the dev is NULL when the disk disappeared. This patch fixes this crash by calling trace_ata_eh_link_autopsy only if "dev" is not NULL. v2 changes: Removed direct passing "link" pointer instead of "dev" in trace API. Signed-off-by: Rameshwar Prasad Sahu Signed-off-by: Tejun Heo Fixes: 255c03d15a29 ("libata: Add tracepoints") Signed-off-by: Greg Kroah-Hartman --- drivers/ata/libata-eh.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 91a9e6af2ec4..75cced210b2a 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2245,8 +2245,8 @@ static void ata_eh_link_autopsy(struct ata_link *link) if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) eflags |= ATA_EFLAG_DUBIOUS_XFER; ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); + trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); } - trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); DPRINTK("EXIT\n"); } From db12d9b5a18143712362b5ccc4b077bf57b040ec Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Fri, 6 Oct 2017 23:09:55 -0400 Subject: [PATCH 207/312] ext4: fix interaction between i_size, fallocate, and delalloc after a crash commit 51e3ae81ec58e95f10a98ef3dd6d7bce5d8e35a2 upstream. If there are pending writes subject to delayed allocation, then i_size will show size after the writes have completed, while i_disksize contains the value of i_size on the disk (since the writes have not been persisted to disk). If fallocate(2) is called with the FALLOC_FL_KEEP_SIZE flag, either with or without the FALLOC_FL_ZERO_RANGE flag set, and the new size after the fallocate(2) is between i_size and i_disksize, then after a crash, if a journal commit has resulted in the changes made by the fallocate() call to be persisted after a crash, but the delayed allocation write has not resolved itself, i_size would not be updated, and this would cause the following e2fsck complaint: Inode 12, end of extent exceeds allowed value (logical block 33, physical block 33441, len 7) This can only take place on a sparse file, where the fallocate(2) call is allocating blocks in a range which is before a pending delayed allocation write which is extending i_size. Since this situation is quite rare, and the window in which the crash must take place is typically < 30 seconds, in practice this condition will rarely happen. Nevertheless, it can be triggered in testing, and in particular by xfstests generic/456. Signed-off-by: Theodore Ts'o Reported-by: Amir Goldstein Signed-off-by: Greg Kroah-Hartman --- fs/ext4/extents.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 61d5bfc7318c..31a3e480d484 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -4818,7 +4818,8 @@ static long ext4_zero_range(struct file *file, loff_t offset, } if (!(mode & FALLOC_FL_KEEP_SIZE) && - offset + len > i_size_read(inode)) { + (offset + len > i_size_read(inode) || + offset + len > EXT4_I(inode)->i_disksize)) { new_size = offset + len; ret = inode_newsize_ok(inode, new_size); if (ret) @@ -4994,7 +4995,8 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) } if (!(mode & FALLOC_FL_KEEP_SIZE) && - offset + len > i_size_read(inode)) { + (offset + len > i_size_read(inode) || + offset + len > EXT4_I(inode)->i_disksize)) { new_size = offset + len; ret = inode_newsize_ok(inode, new_size); if (ret) From b71cf750ddd3ab64e8596b9b07ff78a8a669ef30 Mon Sep 17 00:00:00 2001 From: Henrik Eriksson Date: Tue, 21 Nov 2017 09:29:28 +0100 Subject: [PATCH 208/312] ALSA: pcm: update tstamp only if audio_tstamp changed commit 20e3f985bb875fea4f86b04eba4b6cc29bfd6b71 upstream. commit 3179f6200188 ("ALSA: core: add .get_time_info") had a side effect of changing the behaviour of the PCM runtime tstamp. Prior to this change tstamp was not updated by snd_pcm_update_hw_ptr0() unless the hw_ptr had moved, after this change tstamp was always updated. For an application using alsa-lib, doing snd_pcm_readi() followed by snd_pcm_status() to estimate the age of the read samples by subtracting status->avail * [sample rate] from status->tstamp this change degraded the accuracy of the estimate on devices where the pcm hw does not provide a granular hw_ptr, e.g., devices using soc-generic-dmaengine-pcm.c and a dma-engine with residue_granularity DMA_RESIDUE_GRANULARITY_DESCRIPTOR. The accuracy of the estimate depended on the latency between the PCM hw completing a period and the driver called snd_pcm_period_elapsed() to notify ALSA core, typically determined by interrupt handling latency. After the change the accuracy of the estimate depended on the latency between the PCM hw completing a period and the application calling snd_pcm_status(), determined by the scheduling of the application process. The maximum error of the estimate is one period length in both cases, but the error average and variance is smaller when it depends on interrupt latency. Instead of always updating tstamp, update it only if audio_tstamp changed. Fixes: 3179f6200188 ("ALSA: core: add .get_time_info") Suggested-by: Pierre-Louis Bossart Signed-off-by: Henrik Eriksson Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/core/pcm_lib.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 0aca39762ed0..cd20f91326fe 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -264,8 +264,10 @@ static void update_audio_tstamp(struct snd_pcm_substream *substream, runtime->rate); *audio_tstamp = ns_to_timespec(audio_nsecs); } - runtime->status->audio_tstamp = *audio_tstamp; - runtime->status->tstamp = *curr_tstamp; + if (!timespec_equal(&runtime->status->audio_tstamp, audio_tstamp)) { + runtime->status->audio_tstamp = *audio_tstamp; + runtime->status->tstamp = *curr_tstamp; + } /* * re-take a driver timestamp to let apps detect if the reference tstamp From d1316b9d83de1f0ce969206aaa5d3a1a60dc5c37 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Tue, 21 Nov 2017 16:55:51 +0100 Subject: [PATCH 209/312] ALSA: usb-audio: Add sanity checks to FE parser commit d937cd6790a2bef2d07b500487646bd794c039bb upstream. When the usb-audio descriptor contains the malformed feature unit description with a too short length, the driver may access out-of-bounds. Add a sanity check of the header size at the beginning of parse_audio_feature_unit(). Fixes: 23caaf19b11e ("ALSA: usb-mixer: Add support for Audio Class v2.0") Reported-by: Andrey Konovalov Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/usb/mixer.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index a23efc8671d6..7008e74f8235 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1397,6 +1397,12 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, __u8 *bmaControls; if (state->mixer->protocol == UAC_VERSION_1) { + if (hdr->bLength < 7) { + usb_audio_err(state->chip, + "unit %u: invalid UAC_FEATURE_UNIT descriptor\n", + unitid); + return -EINVAL; + } csize = hdr->bControlSize; if (!csize) { usb_audio_dbg(state->chip, @@ -1414,6 +1420,12 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, } } else { struct uac2_feature_unit_descriptor *ftr = _ftr; + if (hdr->bLength < 6) { + usb_audio_err(state->chip, + "unit %u: invalid UAC_FEATURE_UNIT descriptor\n", + unitid); + return -EINVAL; + } csize = 4; channels = (hdr->bLength - 6) / 4 - 1; bmaControls = ftr->bmaControls; From 0b6cede2e45500452c5c49d9cfb7d10cb1aca437 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Tue, 21 Nov 2017 17:00:32 +0100 Subject: [PATCH 210/312] ALSA: usb-audio: Fix potential out-of-bound access at parsing SU commit f658f17b5e0e339935dca23e77e0f3cad591926b upstream. The usb-audio driver may trigger an out-of-bound access at parsing a malformed selector unit, as it checks the header length only after evaluating bNrInPins field, which can be already above the given length. Fix it by adding the length check beforehand. Fixes: 99fc86450c43 ("ALSA: usb-mixer: parse descriptors with structs") Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/usb/mixer.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 7008e74f8235..1050008d7719 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -2026,7 +2026,8 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, const struct usbmix_name_map *map; char **namelist; - if (!desc->bNrInPins || desc->bLength < 5 + desc->bNrInPins) { + if (desc->bLength < 5 || !desc->bNrInPins || + desc->bLength < 5 + desc->bNrInPins) { usb_audio_err(state->chip, "invalid SELECTOR UNIT descriptor %d\n", unitid); return -EINVAL; From 3532750d20f56f0165bd196aee3c6bb9fb5c1fdb Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Tue, 21 Nov 2017 17:28:06 +0100 Subject: [PATCH 211/312] ALSA: usb-audio: Add sanity checks in v2 clock parsers commit 0a62d6c966956d77397c32836a5bbfe3af786fc1 upstream. The helper functions to parse and look for the clock source, selector and multiplier unit may return the descriptor with a too short length than required, while there is no sanity check in the caller side. Add some sanity checks in the parsers, at least, to guarantee the given descriptor size, for avoiding the potential crashes. Fixes: 79f920fbff56 ("ALSA: usb-audio: parse clock topology of UAC2 devices") Reported-by: Andrey Konovalov Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/usb/clock.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/sound/usb/clock.c b/sound/usb/clock.c index 7ccbcaf6a147..66294eb64501 100644 --- a/sound/usb/clock.c +++ b/sound/usb/clock.c @@ -43,7 +43,7 @@ static struct uac_clock_source_descriptor * while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra, ctrl_iface->extralen, cs, UAC2_CLOCK_SOURCE))) { - if (cs->bClockID == clock_id) + if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id) return cs; } @@ -59,8 +59,11 @@ static struct uac_clock_selector_descriptor * while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra, ctrl_iface->extralen, cs, UAC2_CLOCK_SELECTOR))) { - if (cs->bClockID == clock_id) + if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id) { + if (cs->bLength < 5 + cs->bNrInPins) + return NULL; return cs; + } } return NULL; @@ -75,7 +78,7 @@ static struct uac_clock_multiplier_descriptor * while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra, ctrl_iface->extralen, cs, UAC2_CLOCK_MULTIPLIER))) { - if (cs->bClockID == clock_id) + if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id) return cs; } From ef67455316481c4a418542495636505a0ab8c88e Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Tue, 21 Nov 2017 16:36:11 +0100 Subject: [PATCH 212/312] ALSA: timer: Remove kernel warning at compat ioctl error paths commit 3d4e8303f2c747c8540a0a0126d0151514f6468b upstream. Some timer compat ioctls have NULL checks of timer instance with snd_BUG_ON() that bring up WARN_ON() when the debug option is set. Actually the condition can be met in the normal situation and it's confusing and bad to spew kernel warnings with stack trace there. Let's remove snd_BUG_ON() invocation and replace with the simple checks. Also, correct the error code to EBADFD to follow the native ioctl error handling. Reported-by: syzbot Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/core/timer_compat.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c index 0b4b028e8e98..de9155eed727 100644 --- a/sound/core/timer_compat.c +++ b/sound/core/timer_compat.c @@ -40,11 +40,11 @@ static int snd_timer_user_info_compat(struct file *file, struct snd_timer *t; tu = file->private_data; - if (snd_BUG_ON(!tu->timeri)) - return -ENXIO; + if (!tu->timeri) + return -EBADFD; t = tu->timeri->timer; - if (snd_BUG_ON(!t)) - return -ENXIO; + if (!t) + return -EBADFD; memset(&info, 0, sizeof(info)); info.card = t->card ? t->card->number : -1; if (t->hw.flags & SNDRV_TIMER_HW_SLAVE) @@ -73,8 +73,8 @@ static int snd_timer_user_status_compat(struct file *file, struct snd_timer_status32 status; tu = file->private_data; - if (snd_BUG_ON(!tu->timeri)) - return -ENXIO; + if (!tu->timeri) + return -EBADFD; memset(&status, 0, sizeof(status)); status.tstamp.tv_sec = tu->tstamp.tv_sec; status.tstamp.tv_nsec = tu->tstamp.tv_nsec; From 509ab500a240f42fd35a696e735ca8e787668791 Mon Sep 17 00:00:00 2001 From: Kailang Yang Date: Wed, 22 Nov 2017 15:21:32 +0800 Subject: [PATCH 213/312] ALSA: hda/realtek - Fix ALC700 family no sound issue commit 2d7fe6185722b0817bb345f62ab06b76a7b26542 upstream. It maybe the typo for ALC700 support patch. To fix the bit value on this patch. Fixes: 6fbae35a3170 ("ALSA: hda/realtek - Add support for new codecs ALC700/ALC701/ALC703") Signed-off-by: Kailang Yang Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index af0962307b7f..64085a512dbf 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6254,7 +6254,7 @@ static int patch_alc269(struct hda_codec *codec) case 0x10ec0703: spec->codec_variant = ALC269_TYPE_ALC700; spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */ - alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */ + alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */ break; } From 1b11593eb742ab6b69b1c23110fba024c5257101 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 24 Sep 2017 18:36:44 -0400 Subject: [PATCH 214/312] fix a page leak in vhost_scsi_iov_to_sgl() error recovery commit 11d49e9d089ccec81be87c2386dfdd010d7f7f6e upstream. we are advancing sg as we go, so the pages we need to drop in case of error are *before* the current sg. Signed-off-by: Al Viro Signed-off-by: Greg Kroah-Hartman --- drivers/vhost/scsi.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index e4110d6de0b5..da6cc25baaef 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -703,6 +703,7 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write, struct scatterlist *sg, int sg_count) { size_t off = iter->iov_offset; + struct scatterlist *p = sg; int i, ret; for (i = 0; i < iter->nr_segs; i++) { @@ -711,8 +712,8 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write, ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write); if (ret < 0) { - for (i = 0; i < sg_count; i++) { - struct page *page = sg_page(&sg[i]); + while (p < sg) { + struct page *page = sg_page(p++); if (page) put_page(page); } From 8709c5386109557e6cb9e3c38f011ce627fe3ce8 Mon Sep 17 00:00:00 2001 From: Tuomas Tynkkynen Date: Wed, 6 Sep 2017 17:59:07 +0300 Subject: [PATCH 215/312] fs/9p: Compare qid.path in v9fs_test_inode commit 8ee031631546cf2f7859cc69593bd60bbdd70b46 upstream. Commit fd2421f54423 ("fs/9p: When doing inode lookup compare qid details and inode mode bits.") transformed v9fs_qid_iget() to use iget5_locked() instead of iget_locked(). However, the test() callback is not checking fid.path at all, which means that a lookup in the inode cache can now accidentally locate a completely wrong inode from the same inode hash bucket if the other fields (qid.type and qid.version) match. Fixes: fd2421f54423 ("fs/9p: When doing inode lookup compare qid details and inode mode bits.") Reviewed-by: Latchesar Ionkov Signed-off-by: Tuomas Tynkkynen Signed-off-by: Al Viro Signed-off-by: Greg Kroah-Hartman --- fs/9p/vfs_inode.c | 3 +++ fs/9p/vfs_inode_dotl.c | 3 +++ 2 files changed, 6 insertions(+) diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 511078586fa1..73f1d1b3a51c 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -483,6 +483,9 @@ static int v9fs_test_inode(struct inode *inode, void *data) if (v9inode->qid.type != st->qid.type) return 0; + + if (v9inode->qid.path != st->qid.path) + return 0; return 1; } diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index cb899af1babc..0b88744c6446 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -87,6 +87,9 @@ static int v9fs_test_inode_dotl(struct inode *inode, void *data) if (v9inode->qid.type != st->qid.type) return 0; + + if (v9inode->qid.path != st->qid.path) + return 0; return 1; } From 63bfc4c90a878df5fcb4a80a06390393dbee1519 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 27 Oct 2017 20:52:56 -0700 Subject: [PATCH 216/312] iscsi-target: Fix non-immediate TMR reference leak commit 3fc9fb13a4b2576aeab86c62fd64eb29ab68659c upstream. This patch fixes a se_cmd->cmd_kref reference leak that can occur when a non immediate TMR is proceeded our of command sequence number order, and CMDSN_LOWER_THAN_EXP is returned by iscsit_sequence_cmd(). To address this bug, call target_put_sess_cmd() during this special case following what iscsit_process_scsi_cmd() does upon CMDSN_LOWER_THAN_EXP. Cc: Mike Christie Cc: Hannes Reinecke Signed-off-by: Nicholas Bellinger Signed-off-by: Greg Kroah-Hartman --- drivers/target/iscsi/iscsi_target.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index fd493412b172..bb73401f5761 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1923,12 +1923,14 @@ attach: if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); - if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) + if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) { out_of_order_cmdsn = 1; - else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) + } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { + target_put_sess_cmd(&cmd->se_cmd); return 0; - else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) + } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) { return -1; + } } iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); From 4e426ed57cd24b8d6621a4cf60d715d7a7da9a76 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 22 Sep 2017 16:48:28 -0700 Subject: [PATCH 217/312] target: Fix QUEUE_FULL + SCSI task attribute handling commit 1c79df1f349fb6050016cea4ef1dfbc3853a5685 upstream. This patch fixes a bug during QUEUE_FULL where transport_complete_qf() calls transport_complete_task_attr() after it's already been invoked by target_complete_ok_work() or transport_generic_request_failure() during initial completion, preceeding QUEUE_FULL. This will result in se_device->simple_cmds, se_device->dev_cur_ordered_id and/or se_device->dev_ordered_sync being updated multiple times for a single se_cmd. To address this bug, clear SCF_TASK_ATTR_SET after the first call to transport_complete_task_attr(), and avoid updating SCSI task attribute related counters for any subsequent calls. Also, when a se_cmd is deferred due to ordered tags and executed via target_restart_delayed_cmds(), set CMD_T_SENT before execution matching what target_execute_cmd() does. Cc: Michael Cyr Cc: Bryant G. Ly Cc: Mike Christie Cc: Hannes Reinecke Signed-off-by: Nicholas Bellinger Signed-off-by: Greg Kroah-Hartman --- drivers/target/target_core_transport.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index a42054edd427..37abf881ca75 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1970,6 +1970,8 @@ static void target_restart_delayed_cmds(struct se_device *dev) list_del(&cmd->se_delayed_node); spin_unlock(&dev->delayed_cmd_lock); + cmd->transport_state |= CMD_T_SENT; + __target_execute_cmd(cmd, true); if (cmd->sam_task_attr == TCM_ORDERED_TAG) @@ -2007,6 +2009,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd) pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", dev->dev_cur_ordered_id); } + cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET; + restart: target_restart_delayed_cmds(dev); } From 8293dc75de475ae9b6b0071f8dcf3585cb8be42a Mon Sep 17 00:00:00 2001 From: Ladi Prosek Date: Wed, 11 Oct 2017 16:54:42 +0200 Subject: [PATCH 218/312] KVM: nVMX: set IDTR and GDTR limits when loading L1 host state commit 21f2d551183847bc7fbe8d866151d00cdad18752 upstream. Intel SDM 27.5.2 Loading Host Segment and Descriptor-Table Registers: "The GDTR and IDTR limits are each set to FFFFH." Signed-off-by: Ladi Prosek Signed-off-by: Paolo Bonzini Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/vmx.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 9114588e3e61..67ba0d8f87c7 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -10394,6 +10394,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); + vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF); + vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF); /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) From a694b1f85aeafb674ad3c47e2efa3c3de69abf5e Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 26 Oct 2017 09:13:27 +0200 Subject: [PATCH 219/312] KVM: SVM: obey guest PAT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 15038e14724799b8c205beb5f20f9e54896013c3 upstream. For many years some users of assigned devices have reported worse performance on AMD processors with NPT than on AMD without NPT, Intel or bare metal. The reason turned out to be that SVM is discarding the guest PAT setting and uses the default (PA0=PA4=WB, PA1=PA5=WT, PA2=PA6=UC-, PA3=UC). The guest might be using a different setting, and especially might want write combining but isn't getting it (instead getting slow UC or UC- accesses). Thanks a lot to geoff@hostfission.com for noticing the relation to the g_pat setting. The patch has been tested also by a bunch of people on VFIO users forums. Fixes: 709ddebf81cb40e3c36c6109a7892e8b93a09464 Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=196409 Signed-off-by: Paolo Bonzini Reviewed-by: David Hildenbrand Tested-by: Nick Sarnie Signed-off-by: Radim Krčmář Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/svm.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 899c40f826dd..4e1b254c3695 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -3114,6 +3114,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) u32 ecx = msr->index; u64 data = msr->data; switch (ecx) { + case MSR_IA32_CR_PAT: + if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) + return 1; + vcpu->arch.pat = data; + svm->vmcb->save.g_pat = data; + mark_dirty(svm->vmcb, VMCB_NPT); + break; case MSR_IA32_TSC: kvm_write_tsc(vcpu, msr); break; From 36d2f19430e2711a1b327ac1734de4a52c67abd2 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 10 Oct 2017 17:31:42 -0400 Subject: [PATCH 220/312] SUNRPC: Fix tracepoint storage issues with svc_recv and svc_rqst_status commit e9d4bf219c83d09579bc62512fea2ca10f025d93 upstream. There is no guarantee that either the request or the svc_xprt exist by the time we get round to printing the trace message. Signed-off-by: Trond Myklebust Signed-off-by: J. Bruce Fields Signed-off-by: Greg Kroah-Hartman --- include/trace/events/sunrpc.h | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 5664ca07c9c7..a01a076ea060 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -455,20 +455,22 @@ TRACE_EVENT(svc_recv, TP_ARGS(rqst, status), TP_STRUCT__entry( - __field(struct sockaddr *, addr) __field(__be32, xid) __field(int, status) __field(unsigned long, flags) + __dynamic_array(unsigned char, addr, rqst->rq_addrlen) ), TP_fast_assign( - __entry->addr = (struct sockaddr *)&rqst->rq_addr; __entry->xid = status > 0 ? rqst->rq_xid : 0; __entry->status = status; __entry->flags = rqst->rq_flags; + memcpy(__get_dynamic_array(addr), + &rqst->rq_addr, rqst->rq_addrlen); ), - TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s", __entry->addr, + TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s", + (struct sockaddr *)__get_dynamic_array(addr), be32_to_cpu(__entry->xid), __entry->status, show_rqstp_flags(__entry->flags)) ); @@ -480,22 +482,23 @@ DECLARE_EVENT_CLASS(svc_rqst_status, TP_ARGS(rqst, status), TP_STRUCT__entry( - __field(struct sockaddr *, addr) __field(__be32, xid) - __field(int, dropme) __field(int, status) __field(unsigned long, flags) + __dynamic_array(unsigned char, addr, rqst->rq_addrlen) ), TP_fast_assign( - __entry->addr = (struct sockaddr *)&rqst->rq_addr; __entry->xid = rqst->rq_xid; __entry->status = status; __entry->flags = rqst->rq_flags; + memcpy(__get_dynamic_array(addr), + &rqst->rq_addr, rqst->rq_addrlen); ), TP_printk("addr=%pIScp rq_xid=0x%x status=%d flags=%s", - __entry->addr, be32_to_cpu(__entry->xid), + (struct sockaddr *)__get_dynamic_array(addr), + be32_to_cpu(__entry->xid), __entry->status, show_rqstp_flags(__entry->flags)) ); From eca460c485f93de1474352d1d509e1a3e9f1d407 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Fri, 11 Mar 2016 16:13:32 +0200 Subject: [PATCH 221/312] clk: ti: dra7-atl-clock: Fix of_node reference counting commit 660e1551939931657808d47838a3f443c0e83fd0 upstream. of_find_node_by_name() will call of_node_put() on the node so we need to get it first to avoid warnings. The cfg_node needs to be put after we have finished processing the properties. Signed-off-by: Peter Ujfalusi Tested-by: Nishanth Menon Signed-off-by: Stephen Boyd Signed-off-by: Greg Kroah-Hartman --- drivers/clk/ti/clk-dra7-atl.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c index 2e14dfb588f4..c77333230bdf 100644 --- a/drivers/clk/ti/clk-dra7-atl.c +++ b/drivers/clk/ti/clk-dra7-atl.c @@ -265,6 +265,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev) /* Get configuration for the ATL instances */ snprintf(prop, sizeof(prop), "atl%u", i); + of_node_get(node); cfg_node = of_find_node_by_name(node, prop); if (cfg_node) { ret = of_property_read_u32(cfg_node, "bws", @@ -278,6 +279,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev) atl_write(cinfo, DRA7_ATL_AWSMUX_REG(i), cdesc->aws); } + of_node_put(cfg_node); } cdesc->probed = true; From 037646313522559a65e91952c2e1ec623abf5147 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Sat, 11 Nov 2017 17:29:29 +0100 Subject: [PATCH 222/312] clk: ti: dra7-atl-clock: fix child-node lookups commit 33ec6dbc5a02677509d97fe36cd2105753f0f0ea upstream. Fix child node-lookup during probe, which ended up searching the whole device tree depth-first starting at parent rather than just matching on its children. Note that the original premature free of the parent node has already been fixed separately, but that fix was apparently never backported to stable. Fixes: 9ac33b0ce81f ("CLK: TI: Driver for DRA7 ATL (Audio Tracking Logic)") Fixes: 660e15519399 ("clk: ti: dra7-atl-clock: Fix of_node reference counting") Cc: Peter Ujfalusi Signed-off-by: Johan Hovold Acked-by: Peter Ujfalusi Signed-off-by: Stephen Boyd Signed-off-by: Greg Kroah-Hartman --- drivers/clk/ti/clk-dra7-atl.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c index c77333230bdf..7d060ffe8975 100644 --- a/drivers/clk/ti/clk-dra7-atl.c +++ b/drivers/clk/ti/clk-dra7-atl.c @@ -265,8 +265,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev) /* Get configuration for the ATL instances */ snprintf(prop, sizeof(prop), "atl%u", i); - of_node_get(node); - cfg_node = of_find_node_by_name(node, prop); + cfg_node = of_get_child_by_name(node, prop); if (cfg_node) { ret = of_property_read_u32(cfg_node, "bws", &cdesc->bws); From 4dae2f771fa72083cf247ea44bcdb7096409527d Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 26 Sep 2017 11:41:28 -0700 Subject: [PATCH 223/312] libnvdimm, namespace: fix label initialization to use valid seq numbers commit b18d4b8a25af6fe83d7692191d6ff962ea611c4f upstream. The set of valid sequence numbers is {1,2,3}. The specification indicates that an implementation should consider 0 a sign of a critical error: UEFI 2.7: 13.19 NVDIMM Label Protocol Software never writes the sequence number 00, so a correctly check-summed Index Block with this sequence number probably indicates a critical error. When software discovers this case it treats it as an invalid Index Block indication. While the expectation is that the invalid block is just thrown away, the Robustness Principle says we should fix this to make both sequence numbers valid. Fixes: f524bf271a5c ("libnvdimm: write pmem label set") Reported-by: Juston Li Signed-off-by: Dan Williams Signed-off-by: Greg Kroah-Hartman --- drivers/nvdimm/label.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index 96526dcfdd37..ff7b9632ad61 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -823,7 +823,7 @@ static int init_labels(struct nd_mapping *nd_mapping, int num_labels) nsindex = to_namespace_index(ndd, 0); memset(nsindex, 0, ndd->nsarea.config_size); for (i = 0; i < 2; i++) { - int rc = nd_label_write_index(ndd, i, i*2, ND_NSINDEX_INIT); + int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT); if (rc) return rc; From 54a8d930b93f171acaffbac83a1d0f595fa277bd Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 26 Sep 2017 11:21:24 -0700 Subject: [PATCH 224/312] libnvdimm, namespace: make 'resource' attribute only readable by root commit c1fb3542074fd0c4d901d778bd52455111e4eb6f upstream. For the same reason that /proc/iomem returns 0's for non-root readers and acpi tables are root-only, make the 'resource' attribute for namespace devices only readable by root. Otherwise we disclose physical address information. Fixes: bf9bccc14c05 ("libnvdimm: pmem label sets and namespace instantiation") Reported-by: Dave Hansen Signed-off-by: Dan Williams Signed-off-by: Greg Kroah-Hartman --- drivers/nvdimm/namespace_devs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index aae7379af4e4..c2184104b789 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -1305,7 +1305,7 @@ static umode_t namespace_visible(struct kobject *kobj, if (a == &dev_attr_resource.attr) { if (is_namespace_blk(dev)) return 0; - return a->mode; + return 0400; } if (is_namespace_pmem(dev) || is_namespace_blk(dev)) { From 3e32b40435b9d5e073c788dcfebb679c8a6ace1e Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 11 Oct 2017 10:27:22 -0700 Subject: [PATCH 225/312] IB/srpt: Do not accept invalid initiator port names commit c70ca38960399a63d5c048b7b700612ea321d17e upstream. Make srpt_parse_i_port_id() return a negative value if hex2bin() fails. Fixes: commit a42d985bd5b2 ("ib_srpt: Initial SRP Target merge for v3.3-rc1") Signed-off-by: Bart Van Assche Signed-off-by: Doug Ledford Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/ulp/srpt/ib_srpt.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index eaabf3125846..c52131233ba7 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -3425,7 +3425,7 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name) { const char *p; unsigned len, count, leading_zero_bytes; - int ret, rc; + int ret; p = name; if (strncasecmp(p, "0x", 2) == 0) @@ -3437,10 +3437,9 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name) count = min(len / 2, 16U); leading_zero_bytes = 16 - count; memset(i_port_id, 0, leading_zero_bytes); - rc = hex2bin(i_port_id + leading_zero_bytes, p, count); - if (rc < 0) - pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc); - ret = 0; + ret = hex2bin(i_port_id + leading_zero_bytes, p, count); + if (ret < 0) + pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", ret); out: return ret; } From ecc5e8914766d6d2c14f06e5dac0bfd173505d85 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 11 Oct 2017 10:27:26 -0700 Subject: [PATCH 226/312] IB/srp: Avoid that a cable pull can trigger a kernel crash commit 8a0d18c62121d3c554a83eb96e2752861d84d937 upstream. This patch fixes the following kernel crash: general protection fault: 0000 [#1] PREEMPT SMP Workqueue: ib_mad2 timeout_sends [ib_core] Call Trace: ib_sa_path_rec_callback+0x1c4/0x1d0 [ib_core] send_handler+0xb2/0xd0 [ib_core] timeout_sends+0x14d/0x220 [ib_core] process_one_work+0x200/0x630 worker_thread+0x4e/0x3b0 kthread+0x113/0x150 Fixes: commit aef9ec39c47f ("IB: Add SCSI RDMA Protocol (SRP) initiator") Signed-off-by: Bart Van Assche Reviewed-by: Sagi Grimberg Signed-off-by: Doug Ledford Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/ulp/srp/ib_srp.c | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index e397f1b0af09..9a99cee2665a 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -670,12 +670,19 @@ static void srp_path_rec_completion(int status, static int srp_lookup_path(struct srp_rdma_ch *ch) { struct srp_target_port *target = ch->target; - int ret; + int ret = -ENODEV; ch->path.numb_path = 1; init_completion(&ch->done); + /* + * Avoid that the SCSI host can be removed by srp_remove_target() + * before srp_path_rec_completion() is called. + */ + if (!scsi_host_get(target->scsi_host)) + goto out; + ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client, target->srp_host->srp_dev->dev, target->srp_host->port, @@ -689,18 +696,24 @@ static int srp_lookup_path(struct srp_rdma_ch *ch) GFP_KERNEL, srp_path_rec_completion, ch, &ch->path_query); - if (ch->path_query_id < 0) - return ch->path_query_id; + ret = ch->path_query_id; + if (ret < 0) + goto put; ret = wait_for_completion_interruptible(&ch->done); if (ret < 0) - return ret; + goto put; - if (ch->status < 0) + ret = ch->status; + if (ret < 0) shost_printk(KERN_WARNING, target->scsi_host, PFX "Path record query failed\n"); - return ch->status; +put: + scsi_host_put(target->scsi_host); + +out: + return ret; } static int srp_send_req(struct srp_rdma_ch *ch, bool multich) From 76c389ca1331aeec038d7a69cb1b8a0f3719dd72 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Sun, 9 Jul 2017 13:08:58 +0200 Subject: [PATCH 227/312] NFC: fix device-allocation error return commit c45e3e4c5b134b081e8af362109905427967eb19 upstream. A recent change fixing NFC device allocation itself introduced an error-handling bug by returning an error pointer in case device-id allocation failed. This is clearly broken as the callers still expected NULL to be returned on errors as detected by Dan's static checker. Fix this up by returning NULL in the event that we've run out of memory when allocating a new device id. Note that the offending commit is marked for stable (3.8) so this fix needs to be backported along with it. Fixes: 20777bc57c34 ("NFC: fix broken device allocation") Reported-by: Dan Carpenter Signed-off-by: Johan Hovold Signed-off-by: Samuel Ortiz Signed-off-by: Greg Kroah-Hartman --- net/nfc/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/nfc/core.c b/net/nfc/core.c index c5a2c7e733b3..1471e4b0aa2c 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -1093,7 +1093,7 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, err_free_dev: kfree(dev); - return ERR_PTR(rc); + return NULL; } EXPORT_SYMBOL(nfc_allocate_device); From 146d533713859f7990703021e33ab63c8f7bcb1d Mon Sep 17 00:00:00 2001 From: Brian King Date: Fri, 17 Nov 2017 11:05:44 -0600 Subject: [PATCH 228/312] i40e: Use smp_rmb rather than read_barrier_depends commit 52c6912fde0133981ee50ba08808f257829c4c93 upstream. The original issue being fixed in this patch was seen with the ixgbe driver, but the same issue exists with i40e as well, as the code is very similar. read_barrier_depends is not sufficient to ensure loads following it are not speculatively loaded out of order by the CPU, which can result in stale data being loaded, causing potential system crashes. Signed-off-by: Brian King Acked-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 4edbab6ca7ef..b5b228c9a030 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3595,7 +3595,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if the descriptor isn't done, no work yet to do */ if (!(eop_desc->cmd_type_offset_bsz & diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 26c55bba4bf3..6dcc3854844d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -663,7 +663,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* we have caught up to head, no work left to do */ if (tx_head == tx_desc) From 391cdaaaa9d9ce6b4cdc659d94ae59f9384f5e37 Mon Sep 17 00:00:00 2001 From: Brian King Date: Fri, 17 Nov 2017 11:05:47 -0600 Subject: [PATCH 229/312] igb: Use smp_rmb rather than read_barrier_depends commit c4cb99185b4cc96c0a1c70104dc21ae14d7e7f28 upstream. The original issue being fixed in this patch was seen with the ixgbe driver, but the same issue exists with igb as well, as the code is very similar. read_barrier_depends is not sufficient to ensure loads following it are not speculatively loaded out of order by the CPU, which can result in stale data being loaded, causing potential system crashes. Signed-off-by: Brian King Acked-by: Jesse Brandeburg Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/igb/igb_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ff6e57d788eb..c55552c3d2f9 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6433,7 +6433,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) From 2f7de4d5f525fc39c83e6c423e12f1954106bf38 Mon Sep 17 00:00:00 2001 From: Brian King Date: Fri, 17 Nov 2017 11:05:46 -0600 Subject: [PATCH 230/312] igbvf: Use smp_rmb rather than read_barrier_depends commit 1e1f9ca546556e508d021545861f6b5fc75a95fe upstream. The original issue being fixed in this patch was seen with the ixgbe driver, but the same issue exists with igbvf as well, as the code is very similar. read_barrier_depends is not sufficient to ensure loads following it are not speculatively loaded out of order by the CPU, which can result in stale data being loaded, causing potential system crashes. Signed-off-by: Brian King Acked-by: Jesse Brandeburg Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/igbvf/netdev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 297af801f051..519b72c41888 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -809,7 +809,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) From a8e699dfca04191e209a63d484e347aa921422e2 Mon Sep 17 00:00:00 2001 From: Brian King Date: Fri, 17 Nov 2017 11:05:45 -0600 Subject: [PATCH 231/312] ixgbevf: Use smp_rmb rather than read_barrier_depends commit ae0c585d93dfaf923d2c7eb44b2c3ab92854ea9b upstream. The original issue being fixed in this patch was seen with the ixgbe driver, but the same issue exists with ixgbevf as well, as the code is very similar. read_barrier_depends is not sufficient to ensure loads following it are not speculatively loaded out of order by the CPU, which can result in stale data being loaded, causing potential system crashes. Signed-off-by: Brian King Acked-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 592ff237d692..50bbad37d640 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -312,7 +312,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) From b4ca98b7a8030a154478cfdf48fadb6ddbb2b04b Mon Sep 17 00:00:00 2001 From: Brian King Date: Fri, 17 Nov 2017 11:05:49 -0600 Subject: [PATCH 232/312] i40evf: Use smp_rmb rather than read_barrier_depends commit f72271e2a0ae4277d53c4053f5eed8bb346ba38a upstream. The original issue being fixed in this patch was seen with the ixgbe driver, but the same issue exists with i40evf as well, as the code is very similar. read_barrier_depends is not sufficient to ensure loads following it are not speculatively loaded out of order by the CPU, which can result in stale data being loaded, causing potential system crashes. Signed-off-by: Brian King Acked-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 39db70a597ed..1ed27fcd5031 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -172,7 +172,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* we have caught up to head, no work left to do */ if (tx_head == tx_desc) From bb923a81c33e543ead08420ac03e7d704243d6b0 Mon Sep 17 00:00:00 2001 From: Brian King Date: Fri, 17 Nov 2017 11:05:48 -0600 Subject: [PATCH 233/312] fm10k: Use smp_rmb rather than read_barrier_depends commit 7b8edcc685b5e2c3c37aa13dc50a88e84a5bfef8 upstream. The original issue being fixed in this patch was seen with the ixgbe driver, but the same issue exists with fm10k as well, as the code is very similar. read_barrier_depends is not sufficient to ensure loads following it are not speculatively loaded out of order by the CPU, which can result in stale data being loaded, causing potential system crashes. Signed-off-by: Brian King Acked-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 09281558bfbc..c21fa56afd7c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1226,7 +1226,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE)) From aef7cdb6a6ffa07da7f713d9164d50f954df10b6 Mon Sep 17 00:00:00 2001 From: Brian King Date: Fri, 17 Nov 2017 11:05:43 -0600 Subject: [PATCH 234/312] ixgbe: Fix skb list corruption on Power systems commit 0a9a17e3bb4564caf4bfe2a6783ae1287667d188 upstream. This patch fixes an issue seen on Power systems with ixgbe which results in skb list corruption and an eventual kernel oops. The following is what was observed: CPU 1 CPU2 ============================ ============================ 1: ixgbe_xmit_frame_ring ixgbe_clean_tx_irq 2: first->skb = skb eop_desc = tx_buffer->next_to_watch 3: ixgbe_tx_map read_barrier_depends() 4: wmb check adapter written status bit 5: first->next_to_watch = tx_desc napi_consume_skb(tx_buffer->skb ..); 6: writel(i, tx_ring->tail); The read_barrier_depends is insufficient to ensure that tx_buffer->skb does not get loaded prior to tx_buffer->next_to_watch, which then results in loading a stale skb pointer. This patch replaces the read_barrier_depends with smp_rmb to ensure loads are ordered with respect to the load of tx_buffer->next_to_watch. Signed-off-by: Brian King Acked-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 83645d8503d4..a5b443171b8b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1114,7 +1114,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) From 937a91cd399220c11f513e899218b26b226ee334 Mon Sep 17 00:00:00 2001 From: John David Anglin Date: Sat, 11 Nov 2017 17:11:16 -0500 Subject: [PATCH 235/312] parisc: Fix validity check of pointer size argument in new CAS implementation commit 05f016d2ca7a4fab99d5d5472168506ddf95e74f upstream. As noted by Christoph Biedl, passing a pointer size of 4 in the new CAS implementation causes a kernel crash. The attached patch corrects the off by one error in the argument validity check. In reviewing the code, I noticed that we only perform word operations with the pointer size argument. The subi instruction intentionally uses a word condition on 64-bit kernels. Nullification was used instead of a cmpib instruction as the branch should never be taken. The shlw pseudo-operation generates a depw,z instruction and it clears the target before doing a shift left word deposit. Thus, we don't need to clip the upper 32 bits of this argument on 64-bit kernels. Tested with a gcc testsuite run with a 64-bit kernel. The gcc atomic code in libgcc is the only direct user of the new CAS implementation that I am aware of. Signed-off-by: John David Anglin Signed-off-by: Helge Deller Signed-off-by: Greg Kroah-Hartman --- arch/parisc/kernel/syscall.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index c6b855f7892c..9f22195b90ed 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -688,15 +688,15 @@ cas_action: /* ELF32 Process entry path */ lws_compare_and_swap_2: #ifdef CONFIG_64BIT - /* Clip the input registers */ + /* Clip the input registers. We don't need to clip %r23 as we + only use it for word operations */ depdi 0, 31, 32, %r26 depdi 0, 31, 32, %r25 depdi 0, 31, 32, %r24 - depdi 0, 31, 32, %r23 #endif /* Check the validity of the size pointer */ - subi,>>= 4, %r23, %r0 + subi,>>= 3, %r23, %r0 b,n lws_exit_nosys /* Jump to the functions which will load the old and new values into From a2943ce580f57d75df763c37846e9e478861e0fd Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Thu, 31 Aug 2017 21:55:57 +0530 Subject: [PATCH 236/312] powerpc/signal: Properly handle return value from uprobe_deny_signal() commit 46725b17f1c6c815a41429259b3f070c01e71bc1 upstream. When a uprobe is installed on an instruction that we currently do not emulate, we copy the instruction into a xol buffer and single step that instruction. If that instruction generates a fault, we abort the single stepping before invoking the signal handler. Once the signal handler is done, the uprobe trap is hit again since the instruction is retried and the process repeats. We use uprobe_deny_signal() to detect if the xol instruction triggered a signal. If so, we clear TIF_SIGPENDING and set TIF_UPROBE so that the signal is not handled until after the single stepping is aborted. In this case, uprobe_deny_signal() returns true and get_signal() ends up returning 0. However, in do_signal(), we are not looking at the return value, but depending on ksig.sig for further action, all with an uninitialized ksig that is not touched in this scenario. Fix the same by initializing ksig.sig to 0. Fixes: 129b69df9c90 ("powerpc: Use get_signal() signal_setup_done()") Reported-by: Anton Blanchard Signed-off-by: Naveen N. Rao Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kernel/signal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index cf8c7e4e0b21..984a54c85952 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -102,7 +102,7 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka, static void do_signal(struct pt_regs *regs) { sigset_t *oldset = sigmask_to_save(); - struct ksignal ksig; + struct ksignal ksig = { .sig = 0 }; int ret; int is32 = is_32bit_task(); From 878c0f9a7c693b238b30a85f870e5334a66a3935 Mon Sep 17 00:00:00 2001 From: Michele Baldessari Date: Mon, 6 Nov 2017 08:50:22 -0500 Subject: [PATCH 237/312] media: Don't do DMA on stack for firmware upload in the AS102 driver commit b3120d2cc447ee77b9d69bf4ad7b452c9adb4d39 upstream. Firmware load on AS102 is using the stack which is not allowed any longer. We currently fail with: kernel: transfer buffer not dma capable kernel: ------------[ cut here ]------------ kernel: WARNING: CPU: 0 PID: 598 at drivers/usb/core/hcd.c:1595 usb_hcd_map_urb_for_dma+0x41d/0x620 kernel: Modules linked in: amd64_edac_mod(-) edac_mce_amd as102_fe dvb_as102(+) kvm_amd kvm snd_hda_codec_realtek dvb_core snd_hda_codec_generic snd_hda_codec_hdmi snd_hda_intel snd_hda_codec irqbypass crct10dif_pclmul crc32_pclmul snd_hda_core snd_hwdep snd_seq ghash_clmulni_intel sp5100_tco fam15h_power wmi k10temp i2c_piix4 snd_seq_device snd_pcm snd_timer parport_pc parport tpm_infineon snd tpm_tis soundcore tpm_tis_core tpm shpchp acpi_cpufreq xfs libcrc32c amdgpu amdkfd amd_iommu_v2 radeon hid_logitech_hidpp i2c_algo_bit drm_kms_helper crc32c_intel ttm drm r8169 mii hid_logitech_dj kernel: CPU: 0 PID: 598 Comm: systemd-udevd Not tainted 4.13.10-200.fc26.x86_64 #1 kernel: Hardware name: ASUS All Series/AM1I-A, BIOS 0505 03/13/2014 kernel: task: ffff979933b24c80 task.stack: ffffaf83413a4000 kernel: RIP: 0010:usb_hcd_map_urb_for_dma+0x41d/0x620 systemd-fsck[659]: /dev/sda2: clean, 49/128016 files, 268609/512000 blocks kernel: RSP: 0018:ffffaf83413a7728 EFLAGS: 00010282 systemd-udevd[604]: link_config: autonegotiation is unset or enabled, the speed and duplex are not writable. kernel: RAX: 000000000000001f RBX: ffff979930bce780 RCX: 0000000000000000 kernel: RDX: 0000000000000000 RSI: ffff97993ec0e118 RDI: ffff97993ec0e118 kernel: RBP: ffffaf83413a7768 R08: 000000000000039a R09: 0000000000000000 kernel: R10: 0000000000000001 R11: 00000000ffffffff R12: 00000000fffffff5 kernel: R13: 0000000001400000 R14: 0000000000000001 R15: ffff979930806800 kernel: FS: 00007effaca5c8c0(0000) GS:ffff97993ec00000(0000) knlGS:0000000000000000 kernel: CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 kernel: CR2: 00007effa9fca962 CR3: 0000000233089000 CR4: 00000000000406f0 kernel: Call Trace: kernel: usb_hcd_submit_urb+0x493/0xb40 kernel: ? page_cache_tree_insert+0x100/0x100 kernel: ? xfs_iunlock+0xd5/0x100 [xfs] kernel: ? xfs_file_buffered_aio_read+0x57/0xc0 [xfs] kernel: usb_submit_urb+0x22d/0x560 kernel: usb_start_wait_urb+0x6e/0x180 kernel: usb_bulk_msg+0xb8/0x160 kernel: as102_send_ep1+0x49/0xe0 [dvb_as102] kernel: ? devres_add+0x3f/0x50 kernel: as102_firmware_upload.isra.0+0x1dc/0x210 [dvb_as102] kernel: as102_fw_upload+0xb6/0x1f0 [dvb_as102] kernel: as102_dvb_register+0x2af/0x2d0 [dvb_as102] kernel: as102_usb_probe+0x1f3/0x260 [dvb_as102] kernel: usb_probe_interface+0x124/0x300 kernel: driver_probe_device+0x2ff/0x450 kernel: __driver_attach+0xa4/0xe0 kernel: ? driver_probe_device+0x450/0x450 kernel: bus_for_each_dev+0x6e/0xb0 kernel: driver_attach+0x1e/0x20 kernel: bus_add_driver+0x1c7/0x270 kernel: driver_register+0x60/0xe0 kernel: usb_register_driver+0x81/0x150 kernel: ? 0xffffffffc0807000 kernel: as102_usb_driver_init+0x1e/0x1000 [dvb_as102] kernel: do_one_initcall+0x50/0x190 kernel: ? __vunmap+0x81/0xb0 kernel: ? kfree+0x154/0x170 kernel: ? kmem_cache_alloc_trace+0x15f/0x1c0 kernel: ? do_init_module+0x27/0x1e9 kernel: do_init_module+0x5f/0x1e9 kernel: load_module+0x2602/0x2c30 kernel: SYSC_init_module+0x170/0x1a0 kernel: ? SYSC_init_module+0x170/0x1a0 kernel: SyS_init_module+0xe/0x10 kernel: do_syscall_64+0x67/0x140 kernel: entry_SYSCALL64_slow_path+0x25/0x25 kernel: RIP: 0033:0x7effab6cf3ea kernel: RSP: 002b:00007fff5cfcbbc8 EFLAGS: 00000246 ORIG_RAX: 00000000000000af kernel: RAX: ffffffffffffffda RBX: 00005569e0b83760 RCX: 00007effab6cf3ea kernel: RDX: 00007effac2099c5 RSI: 0000000000009a13 RDI: 00005569e0b98c50 kernel: RBP: 00007effac2099c5 R08: 00005569e0b83ed0 R09: 0000000000001d80 kernel: R10: 00007effab98db00 R11: 0000000000000246 R12: 00005569e0b98c50 kernel: R13: 00005569e0b81c60 R14: 0000000000020000 R15: 00005569dfadfdf7 kernel: Code: 48 39 c8 73 30 80 3d 59 60 9d 00 00 41 bc f5 ff ff ff 0f 85 26 ff ff ff 48 c7 c7 b8 6b d0 92 c6 05 3f 60 9d 00 01 e8 24 3d ad ff <0f> ff 8b 53 64 e9 09 ff ff ff 65 48 8b 0c 25 00 d3 00 00 48 8b kernel: ---[ end trace c4cae366180e70ec ]--- kernel: as10x_usb: error during firmware upload part1 Let's allocate the the structure dynamically so we can get the firmware loaded correctly: [ 14.243057] as10x_usb: firmware: as102_data1_st.hex loaded with success [ 14.500777] as10x_usb: firmware: as102_data2_st.hex loaded with success Signed-off-by: Michele Baldessari Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/usb/as102/as102_fw.c | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/drivers/media/usb/as102/as102_fw.c b/drivers/media/usb/as102/as102_fw.c index 07d08c49f4d4..b2e16bb67572 100644 --- a/drivers/media/usb/as102/as102_fw.c +++ b/drivers/media/usb/as102/as102_fw.c @@ -101,18 +101,23 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap, unsigned char *cmd, const struct firmware *firmware) { - struct as10x_fw_pkt_t fw_pkt; + struct as10x_fw_pkt_t *fw_pkt; int total_read_bytes = 0, errno = 0; unsigned char addr_has_changed = 0; + fw_pkt = kmalloc(sizeof(*fw_pkt), GFP_KERNEL); + if (!fw_pkt) + return -ENOMEM; + + for (total_read_bytes = 0; total_read_bytes < firmware->size; ) { int read_bytes = 0, data_len = 0; /* parse intel hex line */ read_bytes = parse_hex_line( (u8 *) (firmware->data + total_read_bytes), - fw_pkt.raw.address, - fw_pkt.raw.data, + fw_pkt->raw.address, + fw_pkt->raw.data, &data_len, &addr_has_changed); @@ -122,28 +127,28 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap, /* detect the end of file */ total_read_bytes += read_bytes; if (total_read_bytes == firmware->size) { - fw_pkt.u.request[0] = 0x00; - fw_pkt.u.request[1] = 0x03; + fw_pkt->u.request[0] = 0x00; + fw_pkt->u.request[1] = 0x03; /* send EOF command */ errno = bus_adap->ops->upload_fw_pkt(bus_adap, (uint8_t *) - &fw_pkt, 2, 0); + fw_pkt, 2, 0); if (errno < 0) goto error; } else { if (!addr_has_changed) { /* prepare command to send */ - fw_pkt.u.request[0] = 0x00; - fw_pkt.u.request[1] = 0x01; + fw_pkt->u.request[0] = 0x00; + fw_pkt->u.request[1] = 0x01; - data_len += sizeof(fw_pkt.u.request); - data_len += sizeof(fw_pkt.raw.address); + data_len += sizeof(fw_pkt->u.request); + data_len += sizeof(fw_pkt->raw.address); /* send cmd to device */ errno = bus_adap->ops->upload_fw_pkt(bus_adap, (uint8_t *) - &fw_pkt, + fw_pkt, data_len, 0); if (errno < 0) @@ -152,6 +157,7 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap, } } error: + kfree(fw_pkt); return (errno == 0) ? total_read_bytes : errno; } From d758f4d8bf20e74f2afc6dfd0120d332d0fb9217 Mon Sep 17 00:00:00 2001 From: Sean Young Date: Sun, 8 Oct 2017 14:18:52 -0400 Subject: [PATCH 238/312] media: rc: check for integer overflow commit 3e45067f94bbd61dec0619b1c32744eb0de480c8 upstream. The ioctl LIRC_SET_REC_TIMEOUT would set a timeout of 704ns if called with a timeout of 4294968us. Signed-off-by: Sean Young Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/rc/ir-lirc-codec.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c index efc21b1da211..ca107033e429 100644 --- a/drivers/media/rc/ir-lirc-codec.c +++ b/drivers/media/rc/ir-lirc-codec.c @@ -286,11 +286,14 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd, if (!dev->max_timeout) return -ENOSYS; + /* Check for multiply overflow */ + if (val > U32_MAX / 1000) + return -EINVAL; + tmp = val * 1000; - if (tmp < dev->min_timeout || - tmp > dev->max_timeout) - return -EINVAL; + if (tmp < dev->min_timeout || tmp > dev->max_timeout) + return -EINVAL; dev->timeout = tmp; break; From 0870fb4c3566088dc222e582e43edbc9ececbce4 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Thu, 21 Sep 2017 05:40:18 -0300 Subject: [PATCH 239/312] cx231xx-cards: fix NULL-deref on missing association descriptor commit 6c3b047fa2d2286d5e438bcb470c7b1a49f415f6 upstream. Make sure to check that we actually have an Interface Association Descriptor before dereferencing it during probe to avoid dereferencing a NULL-pointer. Fixes: e0d3bafd0258 ("V4L/DVB (10954): Add cx231xx USB driver") Reported-by: Andrey Konovalov Signed-off-by: Johan Hovold Tested-by: Andrey Konovalov Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/usb/cx231xx/cx231xx-cards.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c index 2c5f76d588ac..04ae21278440 100644 --- a/drivers/media/usb/cx231xx/cx231xx-cards.c +++ b/drivers/media/usb/cx231xx/cx231xx-cards.c @@ -1672,7 +1672,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface, nr = dev->devno; assoc_desc = udev->actconfig->intf_assoc[0]; - if (assoc_desc->bFirstInterface != ifnum) { + if (!assoc_desc || assoc_desc->bFirstInterface != ifnum) { dev_err(d, "Not found matching IAD interface\n"); retval = -ENODEV; goto err_if; From 5a11b8458b35da1ad797973aa3d81120dd42cce8 Mon Sep 17 00:00:00 2001 From: Ricardo Ribalda Delgado Date: Tue, 17 Oct 2017 11:48:50 -0400 Subject: [PATCH 240/312] media: v4l2-ctrl: Fix flags field on Control events commit 9cac9d2fb2fe0e0cadacdb94415b3fe49e3f724f upstream. VIDIOC_DQEVENT and VIDIOC_QUERY_EXT_CTRL should give the same output for the control flags field. This patch creates a new function user_flags(), that calculates the user exported flags value (which is different than the kernel internal flags structure). This function is then used by all the code that exports the internal flags to userspace. Reported-by: Dimitrios Katsaros Signed-off-by: Ricardo Ribalda Delgado Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/v4l2-core/v4l2-ctrls.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index 4a1d9fdd14bb..523758e71fe6 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -1200,6 +1200,16 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type, } EXPORT_SYMBOL(v4l2_ctrl_fill); +static u32 user_flags(const struct v4l2_ctrl *ctrl) +{ + u32 flags = ctrl->flags; + + if (ctrl->is_ptr) + flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD; + + return flags; +} + static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes) { memset(ev->reserved, 0, sizeof(ev->reserved)); @@ -1207,7 +1217,7 @@ static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 change ev->id = ctrl->id; ev->u.ctrl.changes = changes; ev->u.ctrl.type = ctrl->type; - ev->u.ctrl.flags = ctrl->flags; + ev->u.ctrl.flags = user_flags(ctrl); if (ctrl->is_ptr) ev->u.ctrl.value64 = 0; else @@ -2536,10 +2546,8 @@ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctr else qc->id = ctrl->id; strlcpy(qc->name, ctrl->name, sizeof(qc->name)); - qc->flags = ctrl->flags; + qc->flags = user_flags(ctrl); qc->type = ctrl->type; - if (ctrl->is_ptr) - qc->flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD; qc->elem_size = ctrl->elem_size; qc->elems = ctrl->elems; qc->nr_of_dims = ctrl->nr_of_dims; From cb1831a83e54cd3269a2420fce81c4fd8ae6f667 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Fri, 6 Oct 2017 14:05:04 -0400 Subject: [PATCH 241/312] sched/rt: Simplify the IPI based RT balancing logic commit 4bdced5c9a2922521e325896a7bbbf0132c94e56 upstream. When a CPU lowers its priority (schedules out a high priority task for a lower priority one), a check is made to see if any other CPU has overloaded RT tasks (more than one). It checks the rto_mask to determine this and if so it will request to pull one of those tasks to itself if the non running RT task is of higher priority than the new priority of the next task to run on the current CPU. When we deal with large number of CPUs, the original pull logic suffered from large lock contention on a single CPU run queue, which caused a huge latency across all CPUs. This was caused by only having one CPU having overloaded RT tasks and a bunch of other CPUs lowering their priority. To solve this issue, commit: b6366f048e0c ("sched/rt: Use IPI to trigger RT task push migration instead of pulling") changed the way to request a pull. Instead of grabbing the lock of the overloaded CPU's runqueue, it simply sent an IPI to that CPU to do the work. Although the IPI logic worked very well in removing the large latency build up, it still could suffer from a large number of IPIs being sent to a single CPU. On a 80 CPU box, I measured over 200us of processing IPIs. Worse yet, when I tested this on a 120 CPU box, with a stress test that had lots of RT tasks scheduling on all CPUs, it actually triggered the hard lockup detector! One CPU had so many IPIs sent to it, and due to the restart mechanism that is triggered when the source run queue has a priority status change, the CPU spent minutes! processing the IPIs. Thinking about this further, I realized there's no reason for each run queue to send its own IPI. As all CPUs with overloaded tasks must be scanned regardless if there's one or many CPUs lowering their priority, because there's no current way to find the CPU with the highest priority task that can schedule to one of these CPUs, there really only needs to be one IPI being sent around at a time. This greatly simplifies the code! The new approach is to have each root domain have its own irq work, as the rto_mask is per root domain. The root domain has the following fields attached to it: rto_push_work - the irq work to process each CPU set in rto_mask rto_lock - the lock to protect some of the other rto fields rto_loop_start - an atomic that keeps contention down on rto_lock the first CPU scheduling in a lower priority task is the one to kick off the process. rto_loop_next - an atomic that gets incremented for each CPU that schedules in a lower priority task. rto_loop - a variable protected by rto_lock that is used to compare against rto_loop_next rto_cpu - The cpu to send the next IPI to, also protected by the rto_lock. When a CPU schedules in a lower priority task and wants to make sure overloaded CPUs know about it. It increments the rto_loop_next. Then it atomically sets rto_loop_start with a cmpxchg. If the old value is not "0", then it is done, as another CPU is kicking off the IPI loop. If the old value is "0", then it will take the rto_lock to synchronize with a possible IPI being sent around to the overloaded CPUs. If rto_cpu is greater than or equal to nr_cpu_ids, then there's either no IPI being sent around, or one is about to finish. Then rto_cpu is set to the first CPU in rto_mask and an IPI is sent to that CPU. If there's no CPUs set in rto_mask, then there's nothing to be done. When the CPU receives the IPI, it will first try to push any RT tasks that is queued on the CPU but can't run because a higher priority RT task is currently running on that CPU. Then it takes the rto_lock and looks for the next CPU in the rto_mask. If it finds one, it simply sends an IPI to that CPU and the process continues. If there's no more CPUs in the rto_mask, then rto_loop is compared with rto_loop_next. If they match, everything is done and the process is over. If they do not match, then a CPU scheduled in a lower priority task as the IPI was being passed around, and the process needs to start again. The first CPU in rto_mask is sent the IPI. This change removes this duplication of work in the IPI logic, and greatly lowers the latency caused by the IPIs. This removed the lockup happening on the 120 CPU machine. It also simplifies the code tremendously. What else could anyone ask for? Thanks to Peter Zijlstra for simplifying the rto_loop_start atomic logic and supplying me with the rto_start_trylock() and rto_start_unlock() helper functions. Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Peter Zijlstra (Intel) Cc: Clark Williams Cc: Daniel Bristot de Oliveira Cc: John Kacur Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Scott Wood Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20170424114732.1aac6dc4@gandalf.local.home Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- kernel/sched/core.c | 6 ++ kernel/sched/rt.c | 237 +++++++++++++++++++++---------------------- kernel/sched/sched.h | 24 +++-- 3 files changed, 139 insertions(+), 128 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a99c32946e49..15874a85ebcf 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5907,6 +5907,12 @@ static int init_rootdomain(struct root_domain *rd) if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) goto free_dlo_mask; +#ifdef HAVE_RT_PUSH_IPI + rd->rto_cpu = -1; + raw_spin_lock_init(&rd->rto_lock); + init_irq_work(&rd->rto_push_work, rto_push_irq_work_func); +#endif + init_dl_bw(&rd->dl_bw); if (cpudl_init(&rd->cpudl) != 0) goto free_dlo_mask; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 78ae5c1d9412..faa75afcb7fe 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -64,10 +64,6 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) raw_spin_unlock(&rt_b->rt_runtime_lock); } -#if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI) -static void push_irq_work_func(struct irq_work *work); -#endif - void init_rt_rq(struct rt_rq *rt_rq) { struct rt_prio_array *array; @@ -87,13 +83,6 @@ void init_rt_rq(struct rt_rq *rt_rq) rt_rq->rt_nr_migratory = 0; rt_rq->overloaded = 0; plist_head_init(&rt_rq->pushable_tasks); - -#ifdef HAVE_RT_PUSH_IPI - rt_rq->push_flags = 0; - rt_rq->push_cpu = nr_cpu_ids; - raw_spin_lock_init(&rt_rq->push_lock); - init_irq_work(&rt_rq->push_work, push_irq_work_func); -#endif #endif /* CONFIG_SMP */ /* We start is dequeued state, because no RT tasks are queued */ rt_rq->rt_queued = 0; @@ -1802,160 +1791,166 @@ static void push_rt_tasks(struct rq *rq) } #ifdef HAVE_RT_PUSH_IPI + /* - * The search for the next cpu always starts at rq->cpu and ends - * when we reach rq->cpu again. It will never return rq->cpu. - * This returns the next cpu to check, or nr_cpu_ids if the loop - * is complete. + * When a high priority task schedules out from a CPU and a lower priority + * task is scheduled in, a check is made to see if there's any RT tasks + * on other CPUs that are waiting to run because a higher priority RT task + * is currently running on its CPU. In this case, the CPU with multiple RT + * tasks queued on it (overloaded) needs to be notified that a CPU has opened + * up that may be able to run one of its non-running queued RT tasks. + * + * All CPUs with overloaded RT tasks need to be notified as there is currently + * no way to know which of these CPUs have the highest priority task waiting + * to run. Instead of trying to take a spinlock on each of these CPUs, + * which has shown to cause large latency when done on machines with many + * CPUs, sending an IPI to the CPUs to have them push off the overloaded + * RT tasks waiting to run. + * + * Just sending an IPI to each of the CPUs is also an issue, as on large + * count CPU machines, this can cause an IPI storm on a CPU, especially + * if its the only CPU with multiple RT tasks queued, and a large number + * of CPUs scheduling a lower priority task at the same time. + * + * Each root domain has its own irq work function that can iterate over + * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT + * tassk must be checked if there's one or many CPUs that are lowering + * their priority, there's a single irq work iterator that will try to + * push off RT tasks that are waiting to run. + * + * When a CPU schedules a lower priority task, it will kick off the + * irq work iterator that will jump to each CPU with overloaded RT tasks. + * As it only takes the first CPU that schedules a lower priority task + * to start the process, the rto_start variable is incremented and if + * the atomic result is one, then that CPU will try to take the rto_lock. + * This prevents high contention on the lock as the process handles all + * CPUs scheduling lower priority tasks. + * + * All CPUs that are scheduling a lower priority task will increment the + * rt_loop_next variable. This will make sure that the irq work iterator + * checks all RT overloaded CPUs whenever a CPU schedules a new lower + * priority task, even if the iterator is in the middle of a scan. Incrementing + * the rt_loop_next will cause the iterator to perform another scan. * - * rq->rt.push_cpu holds the last cpu returned by this function, - * or if this is the first instance, it must hold rq->cpu. */ static int rto_next_cpu(struct rq *rq) { - int prev_cpu = rq->rt.push_cpu; + struct root_domain *rd = rq->rd; + int next; int cpu; - cpu = cpumask_next(prev_cpu, rq->rd->rto_mask); - /* - * If the previous cpu is less than the rq's CPU, then it already - * passed the end of the mask, and has started from the beginning. - * We end if the next CPU is greater or equal to rq's CPU. + * When starting the IPI RT pushing, the rto_cpu is set to -1, + * rt_next_cpu() will simply return the first CPU found in + * the rto_mask. + * + * If rto_next_cpu() is called with rto_cpu is a valid cpu, it + * will return the next CPU found in the rto_mask. + * + * If there are no more CPUs left in the rto_mask, then a check is made + * against rto_loop and rto_loop_next. rto_loop is only updated with + * the rto_lock held, but any CPU may increment the rto_loop_next + * without any locking. */ - if (prev_cpu < rq->cpu) { - if (cpu >= rq->cpu) - return nr_cpu_ids; + for (;;) { + + /* When rto_cpu is -1 this acts like cpumask_first() */ + cpu = cpumask_next(rd->rto_cpu, rd->rto_mask); + + rd->rto_cpu = cpu; + + if (cpu < nr_cpu_ids) + return cpu; + + rd->rto_cpu = -1; - } else if (cpu >= nr_cpu_ids) { /* - * We passed the end of the mask, start at the beginning. - * If the result is greater or equal to the rq's CPU, then - * the loop is finished. + * ACQUIRE ensures we see the @rto_mask changes + * made prior to the @next value observed. + * + * Matches WMB in rt_set_overload(). */ - cpu = cpumask_first(rq->rd->rto_mask); - if (cpu >= rq->cpu) - return nr_cpu_ids; - } - rq->rt.push_cpu = cpu; + next = atomic_read_acquire(&rd->rto_loop_next); - /* Return cpu to let the caller know if the loop is finished or not */ - return cpu; + if (rd->rto_loop == next) + break; + + rd->rto_loop = next; + } + + return -1; } -static int find_next_push_cpu(struct rq *rq) +static inline bool rto_start_trylock(atomic_t *v) { - struct rq *next_rq; - int cpu; - - while (1) { - cpu = rto_next_cpu(rq); - if (cpu >= nr_cpu_ids) - break; - next_rq = cpu_rq(cpu); - - /* Make sure the next rq can push to this rq */ - if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr) - break; - } - - return cpu; + return !atomic_cmpxchg_acquire(v, 0, 1); } -#define RT_PUSH_IPI_EXECUTING 1 -#define RT_PUSH_IPI_RESTART 2 +static inline void rto_start_unlock(atomic_t *v) +{ + atomic_set_release(v, 0); +} static void tell_cpu_to_push(struct rq *rq) { - int cpu; + int cpu = -1; - if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) { - raw_spin_lock(&rq->rt.push_lock); - /* Make sure it's still executing */ - if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) { - /* - * Tell the IPI to restart the loop as things have - * changed since it started. - */ - rq->rt.push_flags |= RT_PUSH_IPI_RESTART; - raw_spin_unlock(&rq->rt.push_lock); - return; - } - raw_spin_unlock(&rq->rt.push_lock); - } + /* Keep the loop going if the IPI is currently active */ + atomic_inc(&rq->rd->rto_loop_next); - /* When here, there's no IPI going around */ - - rq->rt.push_cpu = rq->cpu; - cpu = find_next_push_cpu(rq); - if (cpu >= nr_cpu_ids) + /* Only one CPU can initiate a loop at a time */ + if (!rto_start_trylock(&rq->rd->rto_loop_start)) return; - rq->rt.push_flags = RT_PUSH_IPI_EXECUTING; + raw_spin_lock(&rq->rd->rto_lock); - irq_work_queue_on(&rq->rt.push_work, cpu); + /* + * The rto_cpu is updated under the lock, if it has a valid cpu + * then the IPI is still running and will continue due to the + * update to loop_next, and nothing needs to be done here. + * Otherwise it is finishing up and an ipi needs to be sent. + */ + if (rq->rd->rto_cpu < 0) + cpu = rto_next_cpu(rq); + + raw_spin_unlock(&rq->rd->rto_lock); + + rto_start_unlock(&rq->rd->rto_loop_start); + + if (cpu >= 0) + irq_work_queue_on(&rq->rd->rto_push_work, cpu); } /* Called from hardirq context */ -static void try_to_push_tasks(void *arg) +void rto_push_irq_work_func(struct irq_work *work) { - struct rt_rq *rt_rq = arg; - struct rq *rq, *src_rq; - int this_cpu; + struct rq *rq; int cpu; - this_cpu = rt_rq->push_cpu; + rq = this_rq(); - /* Paranoid check */ - BUG_ON(this_cpu != smp_processor_id()); - - rq = cpu_rq(this_cpu); - src_rq = rq_of_rt_rq(rt_rq); - -again: + /* + * We do not need to grab the lock to check for has_pushable_tasks. + * When it gets updated, a check is made if a push is possible. + */ if (has_pushable_tasks(rq)) { raw_spin_lock(&rq->lock); - push_rt_task(rq); + push_rt_tasks(rq); raw_spin_unlock(&rq->lock); } + raw_spin_lock(&rq->rd->rto_lock); + /* Pass the IPI to the next rt overloaded queue */ - raw_spin_lock(&rt_rq->push_lock); - /* - * If the source queue changed since the IPI went out, - * we need to restart the search from that CPU again. - */ - if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) { - rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART; - rt_rq->push_cpu = src_rq->cpu; - } + cpu = rto_next_cpu(rq); - cpu = find_next_push_cpu(src_rq); + raw_spin_unlock(&rq->rd->rto_lock); - if (cpu >= nr_cpu_ids) - rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING; - raw_spin_unlock(&rt_rq->push_lock); - - if (cpu >= nr_cpu_ids) + if (cpu < 0) return; - /* - * It is possible that a restart caused this CPU to be - * chosen again. Don't bother with an IPI, just see if we - * have more to push. - */ - if (unlikely(cpu == rq->cpu)) - goto again; - /* Try the next RT overloaded CPU */ - irq_work_queue_on(&rt_rq->push_work, cpu); -} - -static void push_irq_work_func(struct irq_work *work) -{ - struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work); - - try_to_push_tasks(rt_rq); + irq_work_queue_on(&rq->rd->rto_push_work, cpu); } #endif /* HAVE_RT_PUSH_IPI */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 55d92a1ca070..448a8266ceea 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -429,7 +429,7 @@ static inline int rt_bandwidth_enabled(void) } /* RT IPI pull logic requires IRQ_WORK */ -#ifdef CONFIG_IRQ_WORK +#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP) # define HAVE_RT_PUSH_IPI #endif @@ -450,12 +450,6 @@ struct rt_rq { unsigned long rt_nr_total; int overloaded; struct plist_head pushable_tasks; -#ifdef HAVE_RT_PUSH_IPI - int push_flags; - int push_cpu; - struct irq_work push_work; - raw_spinlock_t push_lock; -#endif #endif /* CONFIG_SMP */ int rt_queued; @@ -537,6 +531,19 @@ struct root_domain { struct dl_bw dl_bw; struct cpudl cpudl; +#ifdef HAVE_RT_PUSH_IPI + /* + * For IPI pull requests, loop across the rto_mask. + */ + struct irq_work rto_push_work; + raw_spinlock_t rto_lock; + /* These are only updated and read within rto_lock */ + int rto_loop; + int rto_cpu; + /* These atomics are updated outside of a lock */ + atomic_t rto_loop_next; + atomic_t rto_loop_start; +#endif /* * The "RT overload" flag: it gets set if a CPU has more than * one runnable RT task. @@ -547,6 +554,9 @@ struct root_domain { extern struct root_domain def_root_domain; +#ifdef HAVE_RT_PUSH_IPI +extern void rto_push_irq_work_func(struct irq_work *work); +#endif #endif /* CONFIG_SMP */ /* From 91bd72dd8c72c603132bbbfd348a4216b8c83f58 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 29 Oct 2017 06:30:19 -0400 Subject: [PATCH 242/312] fscrypt: lock mutex before checking for bounce page pool commit a0b3bc855374c50b5ea85273553485af48caf2f7 upstream. fscrypt_initialize(), which allocates the global bounce page pool when an encrypted file is first accessed, uses "double-checked locking" to try to avoid locking fscrypt_init_mutex. However, it doesn't use any memory barriers, so it's theoretically possible for a thread to observe a bounce page pool which has not been fully initialized. This is a classic bug with "double-checked locking". While "only a theoretical issue" in the latest kernel, in pre-4.8 kernels the pointer that was checked was not even the last to be initialized, so it was easily possible for a crash (NULL pointer dereference) to happen. This was changed only incidentally by the large refactor to use fs/crypto/. Solve both problems in a trivial way that can easily be backported: just always take the mutex. It's theoretically less efficient, but it shouldn't be noticeable in practice as the mutex is only acquired very briefly once per encrypted file. Later I'd like to make this use a helper macro like DO_ONCE(). However, DO_ONCE() runs in atomic context, so we'd need to add a new macro that allows blocking. Signed-off-by: Eric Biggers Signed-off-by: Theodore Ts'o Signed-off-by: Greg Kroah-Hartman --- fs/ext4/crypto_key.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/fs/ext4/crypto_key.c b/fs/ext4/crypto_key.c index 9a1bc638abce..9308fe4b66e6 100644 --- a/fs/ext4/crypto_key.c +++ b/fs/ext4/crypto_key.c @@ -129,11 +129,9 @@ int ext4_get_encryption_info(struct inode *inode) if (ei->i_crypt_info) return 0; - if (!ext4_read_workqueue) { - res = ext4_init_crypto(); - if (res) - return res; - } + res = ext4_init_crypto(); + if (res) + return res; res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION, EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, From fd3c395d4d466a8d76c6afd01c9a0e718daf6ca4 Mon Sep 17 00:00:00 2001 From: Tuomas Tynkkynen Date: Wed, 6 Sep 2017 17:59:08 +0300 Subject: [PATCH 243/312] net/9p: Switch to wait_event_killable() commit 9523feac272ccad2ad8186ba4fcc89103754de52 upstream. Because userspace gets Very Unhappy when calls like stat() and execve() return -EINTR on 9p filesystem mounts. For instance, when bash is looking in PATH for things to execute and some SIGCHLD interrupts stat(), bash can throw a spurious 'command not found' since it doesn't retry the stat(). In practice, hitting the problem is rare and needs a really slow/bogged down 9p server. Signed-off-by: Tuomas Tynkkynen Signed-off-by: Al Viro Signed-off-by: Greg Kroah-Hartman --- net/9p/client.c | 3 +-- net/9p/trans_virtio.c | 13 ++++++------- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/net/9p/client.c b/net/9p/client.c index f5feac4ff4ec..3ff26eb1ea20 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -749,8 +749,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...) } again: /* Wait for the response */ - err = wait_event_interruptible(*req->wq, - req->status >= REQ_STATUS_RCVD); + err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD); /* * Make sure our req is coherent with regard to updates in other diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 6e70ddb158b4..2ddeecca5b12 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -290,8 +290,8 @@ req_retry: if (err == -ENOSPC) { chan->ring_bufs_avail = 0; spin_unlock_irqrestore(&chan->lock, flags); - err = wait_event_interruptible(*chan->vc_wq, - chan->ring_bufs_avail); + err = wait_event_killable(*chan->vc_wq, + chan->ring_bufs_avail); if (err == -ERESTARTSYS) return err; @@ -331,7 +331,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan, * Other zc request to finish here */ if (atomic_read(&vp_pinned) >= chan->p9_max_pages) { - err = wait_event_interruptible(vp_wq, + err = wait_event_killable(vp_wq, (atomic_read(&vp_pinned) < chan->p9_max_pages)); if (err == -ERESTARTSYS) return err; @@ -475,8 +475,8 @@ req_retry_pinned: if (err == -ENOSPC) { chan->ring_bufs_avail = 0; spin_unlock_irqrestore(&chan->lock, flags); - err = wait_event_interruptible(*chan->vc_wq, - chan->ring_bufs_avail); + err = wait_event_killable(*chan->vc_wq, + chan->ring_bufs_avail); if (err == -ERESTARTSYS) goto err_out; @@ -493,8 +493,7 @@ req_retry_pinned: virtqueue_kick(chan->vq); spin_unlock_irqrestore(&chan->lock, flags); p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n"); - err = wait_event_interruptible(*req->wq, - req->status >= REQ_STATUS_RCVD); + err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD); /* * Non kernel buffers are pinned, unpin them */ From d6968bc56e4469324482b31fa3304fff0e196c46 Mon Sep 17 00:00:00 2001 From: Tobias Jordan Date: Wed, 4 Oct 2017 11:35:03 +0530 Subject: [PATCH 244/312] PM / OPP: Add missing of_node_put(np) commit 7978db344719dab1e56d05e6fc04aaaddcde0a5e upstream. The for_each_available_child_of_node() loop in _of_add_opp_table_v2() doesn't drop the reference to "np" on errors. Fix that. Fixes: 274659029c9d (PM / OPP: Add support to parse "operating-points-v2" bindings) Signed-off-by: Tobias Jordan [ VK: Improved commit log. ] Signed-off-by: Viresh Kumar Reviewed-by: Stephen Boyd Signed-off-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman --- drivers/base/power/opp/core.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index f8580900c273..db6e7e57081c 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -1205,6 +1205,7 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) if (ret) { dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, ret); + of_node_put(np); goto free_table; } } From fbb2d8000a8f2a257f33ecb2d1fc346dcfe2e09f Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 21 Jul 2017 11:36:23 -0700 Subject: [PATCH 245/312] e1000e: Fix error path in link detection commit c4c40e51f9c32c6dd8adf606624c930a1c4d9bbb upstream. In case of error from e1e_rphy(), the loop will exit early and "success" will be set to true erroneously. Signed-off-by: Benjamin Poirier Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/e1000e/phy.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index de13aeacae97..8e674a0988b0 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -1744,6 +1744,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, s32 ret_val = 0; u16 i, phy_status; + *success = false; for (i = 0; i < iterations; i++) { /* Some PHYs require the MII_BMSR register to be read * twice due to the link bit being sticky. No harm doing @@ -1763,16 +1764,16 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); if (ret_val) break; - if (phy_status & BMSR_LSTATUS) + if (phy_status & BMSR_LSTATUS) { + *success = true; break; + } if (usec_interval >= 1000) msleep(usec_interval / 1000); else udelay(usec_interval); } - *success = (i < iterations); - return ret_val; } From 5df4097cfc7049f64cc0896ede59e94087abaefe Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 21 Jul 2017 11:36:25 -0700 Subject: [PATCH 246/312] e1000e: Fix return value test commit d3509f8bc7b0560044c15f0e3ecfde1d9af757a6 upstream. All the helpers return -E1000_ERR_PHY. Signed-off-by: Benjamin Poirier Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/e1000e/netdev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 80ec587d510e..3ecbc00a633a 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5035,7 +5035,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter) break; } - if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && + if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ e_info("Gigabit has been disabled, downgrading speed\n"); From bbb1fc744894bdba5a94b46b3e178cfb2330831d Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 21 Jul 2017 11:36:26 -0700 Subject: [PATCH 247/312] e1000e: Separate signaling for link check/link up commit 19110cfbb34d4af0cdfe14cd243f3b09dc95b013 upstream. Lennart reported the following race condition: \ e1000_watchdog_task \ e1000e_has_link \ hw->mac.ops.check_for_link() === e1000e_check_for_copper_link /* link is up */ mac->get_link_status = false; /* interrupt */ \ e1000_msix_other hw->mac.get_link_status = true; link_active = !hw->mac.get_link_status /* link_active is false, wrongly */ This problem arises because the single flag get_link_status is used to signal two different states: link status needs checking and link status is down. Avoid the problem by using the return value of .check_for_link to signal the link status to e1000e_has_link(). Reported-by: Lennart Sorensen Signed-off-by: Benjamin Poirier Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/e1000e/mac.c | 11 ++++++++--- drivers/net/ethernet/intel/e1000e/netdev.c | 2 +- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index e59d7c283cd4..645ace74429e 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -410,6 +410,9 @@ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) * Checks to see of the link status of the hardware has changed. If a * change in link status has been detected, then we read the PHY registers * to get the current speed/duplex if link exists. + * + * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link + * up). **/ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) { @@ -423,7 +426,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) * Change or Rx Sequence Error interrupt. */ if (!mac->get_link_status) - return 0; + return 1; /* First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex @@ -461,10 +464,12 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) * different link partner. */ ret_val = e1000e_config_fc_after_link_up(hw); - if (ret_val) + if (ret_val) { e_dbg("Error configuring flow control\n"); + return ret_val; + } - return ret_val; + return 1; } /** diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 3ecbc00a633a..5205f1ebe381 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5017,7 +5017,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter) case e1000_media_type_copper: if (hw->mac.get_link_status) { ret_val = hw->mac.ops.check_for_link(hw); - link_active = !hw->mac.get_link_status; + link_active = ret_val > 0; } else { link_active = true; } From 57d2ce1603101ce3f30d0ccdc35b98af08d2ed88 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Mon, 4 Jul 2016 17:04:37 -0700 Subject: [PATCH 248/312] RDS: RDMA: return appropriate error on rdma map failures [ Upstream commit 584a8279a44a800dea5a5c1e9d53a002e03016b4 ] The first message to a remote node should prompt a new connection even if it is RDMA operation. For RDMA operation the MR mapping can fail because connections is not yet up. Since the connection establishment is asynchronous, we make sure the map failure because of unavailable connection reach to the user by appropriate error code. Before returning to the user, lets trigger the connection so that its ready for the next retry. Signed-off-by: Santosh Shilimkar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/rds/send.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/net/rds/send.c b/net/rds/send.c index 6815f03324d7..1a3c6acdd3f8 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -959,6 +959,11 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm, ret = rds_cmsg_rdma_map(rs, rm, cmsg); if (!ret) *allocated_mr = 1; + else if (ret == -ENODEV) + /* Accommodate the get_mr() case which can fail + * if connection isn't established yet. + */ + ret = -EAGAIN; break; case RDS_CMSG_ATOMIC_CSWP: case RDS_CMSG_ATOMIC_FADD: @@ -1072,8 +1077,12 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) /* Parse any control messages the user may have included. */ ret = rds_cmsg_send(rs, rm, msg, &allocated_mr); - if (ret) + if (ret) { + /* Trigger connection so that its ready for the next retry */ + if (ret == -EAGAIN) + rds_conn_connect_if_down(conn); goto out; + } if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) { printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", From 37a48e6d83f513c0edabe4f12a80cb9fc4b8286b Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Mon, 2 Jan 2017 14:04:24 -0600 Subject: [PATCH 249/312] PCI: Apply _HPX settings only to relevant devices [ Upstream commit 977509f7c5c6fb992ffcdf4291051af343b91645 ] Previously we didn't check the type of device before trying to apply Type 1 (PCI-X) or Type 2 (PCIe) Setting Records from _HPX. We don't support PCI-X Setting Records, so this was harmless, but the warning was useless. We do support PCIe Setting Records, and we didn't check whether a device was PCIe before applying settings. I don't think anything bad happened on non-PCIe devices because pcie_capability_clear_and_set_word(), pcie_cap_has_lnkctl(), etc., would fail before doing any harm. But it's ugly to depend on those internals. Check the device type before attempting to apply Type 1 and Type 2 Setting Records (Type 0 records are applicable to PCI, PCI-X, and PCIe devices). A side benefit is that this prevents useless "not supported" warnings when a BIOS supplies a Type 1 (PCI-X) Setting Record and we try to apply it to every single device: pci 0000:00:00.0: PCI-X settings not supported After this patch, we'll get the warning only when a BIOS supplies a Type 1 record and we have a PCI-X device to which it should be applied. Link: https://bugzilla.kernel.org/show_bug.cgi?id=187731 Signed-off-by: Bjorn Helgaas Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/pci/probe.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index b83df942794f..193ac13de49b 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1414,8 +1414,16 @@ static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp) static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp) { - if (hpp) - dev_warn(&dev->dev, "PCI-X settings not supported\n"); + int pos; + + if (!hpp) + return; + + pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); + if (!pos) + return; + + dev_warn(&dev->dev, "PCI-X settings not supported\n"); } static bool pcie_root_rcb_set(struct pci_dev *dev) @@ -1441,6 +1449,9 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) if (!hpp) return; + if (!pci_is_pcie(dev)) + return; + if (hpp->revision > 1) { dev_warn(&dev->dev, "PCIe settings rev %d not supported\n", hpp->revision); From 778395506a7f5744dce0cc14876c2f45ac779dea Mon Sep 17 00:00:00 2001 From: Shawn Guo Date: Thu, 15 Dec 2016 22:03:36 +0800 Subject: [PATCH 250/312] dmaengine: zx: set DMA_CYCLIC cap_mask bit [ Upstream commit fc318d64f3d91e15babac00e08354b1beb650b57 ] The zx_dma driver supports cyclic transfer mode. Let's set DMA_CYCLIC cap_mask bit to make that clear, and avoid unnecessary failure when clients request channel via dma_request_chan_by_mask() with DMA_CYCLIC bit set in mask. Signed-off-by: Shawn Guo Reviewed-by: Jun Nie Signed-off-by: Vinod Koul Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/dma/zx296702_dma.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c index 245d759d5ffc..6059d81e701a 100644 --- a/drivers/dma/zx296702_dma.c +++ b/drivers/dma/zx296702_dma.c @@ -813,6 +813,7 @@ static int zx_dma_probe(struct platform_device *op) INIT_LIST_HEAD(&d->slave.channels); dma_cap_set(DMA_SLAVE, d->slave.cap_mask); dma_cap_set(DMA_MEMCPY, d->slave.cap_mask); + dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); dma_cap_set(DMA_PRIVATE, d->slave.cap_mask); d->slave.dev = &op->dev; d->slave.device_free_chan_resources = zx_dma_free_chan_resources; From d7c3d5333e39a3089ad259222b767a965963a693 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 29 Dec 2016 15:39:37 -0800 Subject: [PATCH 251/312] net: Allow IP_MULTICAST_IF to set index to L3 slave [ Upstream commit 7bb387c5ab12aeac3d5eea28686489ff46b53ca9 ] IP_MULTICAST_IF fails if sk_bound_dev_if is already set and the new index does not match it. e.g., ntpd[15381]: setsockopt IP_MULTICAST_IF 192.168.1.23 fails: Invalid argument Relax the check in setsockopt to allow setting mc_index to an L3 slave if sk_bound_dev_if points to an L3 master. Make a similar change for IPv6. In this case change the device lookup to take the rcu_read_lock avoiding a refcnt. The rcu lock is also needed for the lookup of a potential L3 master device. This really only silences a setsockopt failure since uses of mc_index are secondary to sk_bound_dev_if if it is set. In both cases, if either index is an L3 slave or master, lookups are directed to the same FIB table so relaxing the check at setsockopt time causes no harm. Patch is based on a suggested change by Darwin for a problem noted in their code base. Suggested-by: Darwin Dingel Signed-off-by: David Ahern Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/ipv4/ip_sockglue.c | 7 ++++++- net/ipv6/ipv6_sockglue.c | 16 ++++++++++++---- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index f300d1cbfa91..097a1243c16c 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -808,6 +808,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, { struct ip_mreqn mreq; struct net_device *dev = NULL; + int midx; if (sk->sk_type == SOCK_STREAM) goto e_inval; @@ -852,11 +853,15 @@ static int do_ip_setsockopt(struct sock *sk, int level, err = -EADDRNOTAVAIL; if (!dev) break; + + midx = l3mdev_master_ifindex(dev); + dev_put(dev); err = -EINVAL; if (sk->sk_bound_dev_if && - mreq.imr_ifindex != sk->sk_bound_dev_if) + mreq.imr_ifindex != sk->sk_bound_dev_if && + (!midx || midx != sk->sk_bound_dev_if)) break; inet->mc_index = mreq.imr_ifindex; diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 4449ad1f8114..a4a30d2ca66f 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -583,16 +583,24 @@ done: if (val) { struct net_device *dev; + int midx; - if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val) - goto e_inval; + rcu_read_lock(); - dev = dev_get_by_index(net, val); + dev = dev_get_by_index_rcu(net, val); if (!dev) { + rcu_read_unlock(); retv = -ENODEV; break; } - dev_put(dev); + midx = l3mdev_master_ifindex_rcu(dev); + + rcu_read_unlock(); + + if (sk->sk_bound_dev_if && + sk->sk_bound_dev_if != val && + (!midx || midx != sk->sk_bound_dev_if)) + goto e_inval; } np->mcast_oif = val; retv = 0; From 609797646f9b1fbfa6817aec31ca1d0ea64efa19 Mon Sep 17 00:00:00 2001 From: Thomas Preisner Date: Fri, 30 Dec 2016 03:37:54 +0100 Subject: [PATCH 252/312] net: 3com: typhoon: typhoon_init_one: make return values more specific [ Upstream commit 6b6bbb5922a4b1d4b58125a572da91010295fba3 ] In some cases the return value of a failing function is not being used and the function typhoon_init_one() returns another negative error code instead. Signed-off-by: Thomas Preisner Signed-off-by: Milan Stephan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/3com/typhoon.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c index 8f8418d2ac4a..6d0f0eb2ee2f 100644 --- a/drivers/net/ethernet/3com/typhoon.c +++ b/drivers/net/ethernet/3com/typhoon.c @@ -2366,9 +2366,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) * 4) Get the hardware address. * 5) Put the card to sleep. */ - if (typhoon_reset(ioaddr, WaitSleep) < 0) { + err = typhoon_reset(ioaddr, WaitSleep); + if (err < 0) { err_msg = "could not reset 3XP"; - err = -EIO; goto error_out_dma; } @@ -2382,16 +2382,16 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) typhoon_init_interface(tp); typhoon_init_rings(tp); - if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { + err = typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST); + if (err < 0) { err_msg = "cannot boot 3XP sleep image"; - err = -EIO; goto error_out_reset; } INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS); - if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) { + err = typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp); + if (err < 0) { err_msg = "cannot read MAC address"; - err = -EIO; goto error_out_reset; } @@ -2424,9 +2424,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if(xp_resp[0].numDesc != 0) tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET; - if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) { + err = typhoon_sleep(tp, PCI_D3hot, 0); + if (err < 0) { err_msg = "cannot put adapter to sleep"; - err = -EIO; goto error_out_reset; } From 7f69dc100400482425c108a0dc0102101e192a21 Mon Sep 17 00:00:00 2001 From: Thomas Preisner Date: Fri, 30 Dec 2016 03:37:53 +0100 Subject: [PATCH 253/312] net: 3com: typhoon: typhoon_init_one: fix incorrect return values [ Upstream commit 107fded7bf616ad6f46823d98b8ed6405d7adf2d ] In a few cases the err-variable is not set to a negative error code if a function call in typhoon_init_one() fails and thus 0 is returned instead. It may be better to set err to the appropriate negative error code before returning. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=188841 Reported-by: Pan Bian Signed-off-by: Thomas Preisner Signed-off-by: Milan Stephan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/3com/typhoon.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c index 6d0f0eb2ee2f..a0012c3cb4f6 100644 --- a/drivers/net/ethernet/3com/typhoon.c +++ b/drivers/net/ethernet/3com/typhoon.c @@ -2398,8 +2398,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1)); *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2)); - if(!is_valid_ether_addr(dev->dev_addr)) { + if (!is_valid_ether_addr(dev->dev_addr)) { err_msg = "Could not obtain valid ethernet address, aborting"; + err = -EIO; goto error_out_reset; } @@ -2407,7 +2408,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) * later when we print out the version reported. */ INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS); - if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) { + err = typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp); + if (err < 0) { err_msg = "Could not get Sleep Image version"; goto error_out_reset; } @@ -2449,7 +2451,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->features = dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM; - if(register_netdev(dev) < 0) { + err = register_netdev(dev); + if (err < 0) { err_msg = "unable to register netdev"; goto error_out_reset; } From c4cf731af838706523c3746fafe49afaa323887c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 30 Dec 2016 17:38:52 +0100 Subject: [PATCH 254/312] drm/armada: Fix compile fail [ Upstream commit 7357f89954b6d005df6ab8929759e78d7d9a80f9 ] I reported the include issue for tracepoints a while ago, but nothing seems to have happened. Now it bit us, since the drm_mm_print conversion was broken for armada. Fix it, so I can re-enable armada in the drm-misc build configs. v2: Rebase just the compile fix on top of Chris' build fix. Cc: Russell King Cc: Chris Wilson Acked: Chris Wilson Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1483115932-19584-1-git-send-email-daniel.vetter@ffwll.ch Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/armada/Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile index ffd673615772..26412d2f8c98 100644 --- a/drivers/gpu/drm/armada/Makefile +++ b/drivers/gpu/drm/armada/Makefile @@ -4,3 +4,5 @@ armada-y += armada_510.o armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o obj-$(CONFIG_DRM_ARMADA) := armada.o + +CFLAGS_armada_trace.o := -I$(src) From 64b22ee723eed2e35265b9431db0e7587c53141f Mon Sep 17 00:00:00 2001 From: Ryan Hsu Date: Tue, 13 Dec 2016 14:55:19 -0800 Subject: [PATCH 255/312] ath10k: fix incorrect txpower set by P2P_DEVICE interface [ Upstream commit 88407beb1b1462f706a1950a355fd086e1c450b6 ] Ath10k reports the phy capability that supports P2P_DEVICE interface. When we use the P2P supported wpa_supplicant to start connection, it'll create two interfaces, one is wlan0 (vdev_id=0) and one is P2P_DEVICE p2p-dev-wlan0 which is for p2p control channel (vdev_id=1). ath10k_pci mac vdev create 0 (add interface) type 2 subtype 0 ath10k_add_interface: vdev_id: 0, txpower: 0, bss_power: 0 ... ath10k_pci mac vdev create 1 (add interface) type 2 subtype 1 ath10k_add_interface: vdev_id: 1, txpower: 0, bss_power: 0 And the txpower in per vif bss_conf will only be set to valid tx power when the interface is assigned with channel_ctx. But this P2P_DEVICE interface will never be used for any connection, so that the uninitialized bss_conf.txpower=0 is assinged to the arvif->txpower when interface created. Since the txpower configuration is firmware per physical interface. So the smallest txpower of all vifs will be the one limit the tx power of the physical device, that causing the low txpower issue on other active interfaces. wlan0: Limiting TX power to 21 (24 - 3) dBm ath10k_pci mac vdev_id 0 txpower 21 ath10k_mac_txpower_recalc: vdev_id: 1, txpower: 0 ath10k_mac_txpower_recalc: vdev_id: 0, txpower: 21 ath10k_pci mac txpower 0 This issue only happens when we use the wpa_supplicant that supports P2P or if we use the iw tool to create the control P2P_DEVICE interface. Signed-off-by: Ryan Hsu Signed-off-by: Kalle Valo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/ath/ath10k/mac.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 6decf4a95ce1..cf3baf3185e8 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -4180,7 +4180,8 @@ static int ath10k_mac_txpower_recalc(struct ath10k *ar) lockdep_assert_held(&ar->conf_mutex); list_for_each_entry(arvif, &ar->arvifs, list) { - WARN_ON(arvif->txpower < 0); + if (arvif->txpower <= 0) + continue; if (txpower == -1) txpower = arvif->txpower; @@ -4188,8 +4189,8 @@ static int ath10k_mac_txpower_recalc(struct ath10k *ar) txpower = min(txpower, arvif->txpower); } - if (WARN_ON(txpower == -1)) - return -EINVAL; + if (txpower == -1) + return 0; ret = ath10k_mac_txpower_setup(ar, txpower); if (ret) { From b24769300a00453fb4d12a319c70516fe030354e Mon Sep 17 00:00:00 2001 From: Ryan Hsu Date: Thu, 22 Dec 2016 15:02:37 -0800 Subject: [PATCH 256/312] ath10k: ignore configuring the incorrect board_id [ Upstream commit d2e202c06ca42d353d95df12437740921a6d05b5 ] With command to get board_id from otp, in the case of following boot get otp board id result 0x00000000 board_id 0 chip_id 0 boot using board name 'bus=pci,bmi-chip-id=0,bmi-board-id=0" ... failed to fetch board data for bus=pci,bmi-chip-id=0,bmi-board-id=0 from ath10k/QCA6174/hw3.0/board-2.bin The invalid board_id=0 will be used as index to search in the board-2.bin. Ignore the case with board_id=0, as it means the otp is not carrying the board id information. Signed-off-by: Ryan Hsu Signed-off-by: Kalle Valo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/ath/ath10k/core.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 05de75360fa4..ee638cb8b48f 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -548,8 +548,11 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) "boot get otp board id result 0x%08x board_id %d chip_id %d\n", result, board_id, chip_id); - if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0) + if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0 || + (board_id == 0)) { + ath10k_warn(ar, "board id is not exist in otp, ignore it\n"); return -EOPNOTSUPP; + } ar->id.bmi_ids_valid = true; ar->id.bmi_board_id = board_id; From ef751ca54602d380873b33580eb1f1da19c6b592 Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Thu, 29 Dec 2016 16:12:09 +0200 Subject: [PATCH 257/312] ath10k: fix potential memory leak in ath10k_wmi_tlv_op_pull_fw_stats() [ Upstream commit 097e46d2ae90265d1afe141ba6208ba598b79e01 ] ath10k_wmi_tlv_op_pull_fw_stats() uses tb = ath10k_wmi_tlv_parse_alloc(...) function, which allocates memory. If any of the three error-paths are taken, this tb needs to be freed. Signed-off-by: Christian Lamparter Signed-off-by: Kalle Valo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/ath/ath10k/wmi-tlv.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 6fbd17b69469..02eea3c3b5d3 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -1105,8 +1105,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar, struct ath10k_fw_stats_pdev *dst; src = data; - if (data_len < sizeof(*src)) + if (data_len < sizeof(*src)) { + kfree(tb); return -EPROTO; + } data += sizeof(*src); data_len -= sizeof(*src); @@ -1126,8 +1128,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar, struct ath10k_fw_stats_vdev *dst; src = data; - if (data_len < sizeof(*src)) + if (data_len < sizeof(*src)) { + kfree(tb); return -EPROTO; + } data += sizeof(*src); data_len -= sizeof(*src); @@ -1145,8 +1149,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar, struct ath10k_fw_stats_peer *dst; src = data; - if (data_len < sizeof(*src)) + if (data_len < sizeof(*src)) { + kfree(tb); return -EPROTO; + } data += sizeof(*src); data_len -= sizeof(*src); From 7e920566c352c9434010cbaf6a5ccd82c3ce73cf Mon Sep 17 00:00:00 2001 From: Bartosz Markowski Date: Thu, 15 Dec 2016 11:23:24 +0200 Subject: [PATCH 258/312] ath10k: set CTS protection VDEV param only if VDEV is up [ Upstream commit 7cfe0455ee1218add152e986b89b4bb8dbeafcdd ] The cts protection vdev parameter, in new QCA9377 TF2.0 firmware, requires bss peer to be created for the STATION vdev type. bss peer is being allocated by the firmware after vdev_start/_up commands. mac80211 may call the cts protection setup at any time, so the we needs to track the situation and defer the cts configuration to prevent firmware asserts, like below: [00]: 0x05020001 0x000015B3 0x0099ACE2 0x00955B31 [04]: 0x0099ACE2 0x00060730 0x00000004 0x00000000 [08]: 0x0044C754 0x00412C10 0x00000000 0x00409C54 [12]: 0x00000009 0x00000000 0x00952F6C 0x00952F77 [16]: 0x00952CC4 0x00910712 0x00000000 0x00000000 [20]: 0x4099ACE2 0x0040E858 0x00421254 0x004127F4 [24]: 0x8099B9B2 0x0040E8B8 0x00000000 0xC099ACE2 [28]: 0x800B75CB 0x0040E8F8 0x00000007 0x00005008 [32]: 0x809B048A 0x0040E958 0x00000010 0x00433B10 [36]: 0x809AFBBC 0x0040E9A8 0x0042BB74 0x0042BBBC [40]: 0x8091D252 0x0040E9C8 0x0042BBBC 0x00000001 [44]: 0x809FFA45 0x0040EA78 0x0043D3E4 0x0042C2C8 [48]: 0x809FCEF4 0x0040EA98 0x0043D3E4 0x00000001 [52]: 0x80911210 0x0040EAE8 0x00000010 0x004041D0 [56]: 0x80911154 0x0040EB28 0x00400000 0x00000000 Signed-off-by: Bartosz Markowski Signed-off-by: Kalle Valo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/ath/ath10k/mac.c | 51 ++++++++++++++++++++++----- 1 file changed, 43 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index cf3baf3185e8..bed8d89fe3a0 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1127,6 +1127,36 @@ static int ath10k_monitor_recalc(struct ath10k *ar) return ath10k_monitor_stop(ar); } +static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + + lockdep_assert_held(&ar->conf_mutex); + + if (!arvif->is_started) { + ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n"); + return false; + } + + return true; +} + +static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + u32 vdev_param; + + lockdep_assert_held(&ar->conf_mutex); + + vdev_param = ar->wmi.vdev_param->protection_mode; + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n", + arvif->vdev_id, arvif->use_cts_prot); + + return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, + arvif->use_cts_prot ? 1 : 0); +} + static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif) { struct ath10k *ar = arvif->ar; @@ -4788,20 +4818,18 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_ERP_CTS_PROT) { arvif->use_cts_prot = info->use_cts_prot; - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n", - arvif->vdev_id, info->use_cts_prot); ret = ath10k_recalc_rtscts_prot(arvif); if (ret) ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", arvif->vdev_id, ret); - vdev_param = ar->wmi.vdev_param->protection_mode; - ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, - info->use_cts_prot ? 1 : 0); - if (ret) - ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n", - info->use_cts_prot, arvif->vdev_id, ret); + if (ath10k_mac_can_set_cts_prot(arvif)) { + ret = ath10k_mac_set_cts_prot(arvif); + if (ret) + ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", + arvif->vdev_id, ret); + } } if (changed & BSS_CHANGED_ERP_SLOT) { @@ -6713,6 +6741,13 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, arvif->is_up = true; } + if (ath10k_mac_can_set_cts_prot(arvif)) { + ret = ath10k_mac_set_cts_prot(arvif); + if (ret) + ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", + arvif->vdev_id, ret); + } + mutex_unlock(&ar->conf_mutex); return 0; From 31447ebb1a863b611676e2787db7160e19bd7e3c Mon Sep 17 00:00:00 2001 From: Gabriele Mazzotta Date: Sat, 24 Dec 2016 19:50:00 +0100 Subject: [PATCH 259/312] ALSA: hda - Apply ALC269_FIXUP_NO_SHUTUP on HDA_FIXUP_ACT_PROBE [ Upstream commit 972aa2c708703c21f14eb958b37e82aae2530e44 ] Setting shutup when the action is HDA_FIXUP_ACT_PRE_PROBE might not have the desired effect since it could be overridden by another more generic shutup function. Prevent this by setting the more specific shutup function on HDA_FIXUP_ACT_PROBE. Signed-off-by: Gabriele Mazzotta Signed-off-by: Takashi Iwai Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 64085a512dbf..e5730a7d0480 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -4404,7 +4404,7 @@ static void alc_no_shutup(struct hda_codec *codec) static void alc_fixup_no_shutup(struct hda_codec *codec, const struct hda_fixup *fix, int action) { - if (action == HDA_FIXUP_ACT_PRE_PROBE) { + if (action == HDA_FIXUP_ACT_PROBE) { struct alc_spec *spec = codec->spec; spec->shutup = alc_no_shutup; } From f0d8fb74479b25ad56d25530ea5efc2881b02ad9 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 22 Dec 2016 08:36:38 +0000 Subject: [PATCH 260/312] drm: Apply range restriction after color adjustment when allocation [ Upstream commit 3db93756b501e5f0a3951c79cfa9ed43c26d3455 ] mm->color_adjust() compares the hole with its neighbouring nodes. They only abutt before we restrict the hole, so we have to apply color_adjust before we apply the range restriction. Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20161222083641.2691-36-chris@chris-wilson.co.uk Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/drm_mm.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 04de6fd88f8c..521b5bddb99c 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -262,14 +262,12 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, BUG_ON(!hole_node->hole_follows || node->allocated); - if (adj_start < start) - adj_start = start; - if (adj_end > end) - adj_end = end; - if (mm->color_adjust) mm->color_adjust(hole_node, color, &adj_start, &adj_end); + adj_start = max(adj_start, start); + adj_end = min(adj_end, end); + if (flags & DRM_MM_CREATE_TOP) adj_start = adj_end - size; @@ -475,17 +473,15 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_ flags & DRM_MM_SEARCH_BELOW) { u64 hole_size = adj_end - adj_start; - if (adj_start < start) - adj_start = start; - if (adj_end > end) - adj_end = end; - if (mm->color_adjust) { mm->color_adjust(entry, color, &adj_start, &adj_end); if (adj_end <= adj_start) continue; } + adj_start = max(adj_start, start); + adj_end = min(adj_end, end); + if (!check_free_hole(adj_start, adj_end, size, alignment)) continue; From 9825826ed7bd7ab22e835b88508f00057b2848c0 Mon Sep 17 00:00:00 2001 From: Masashi Honma Date: Thu, 8 Dec 2016 10:15:50 +0900 Subject: [PATCH 261/312] mac80211: Remove invalid flag operations in mesh TSF synchronization [ Upstream commit 76f43b4c0a9337af22827d78de4f2b8fd5328489 ] mesh_sync_offset_adjust_tbtt() implements Extensible synchronization framework ([1] 13.13.2 Extensible synchronization framework). It shall not operate the flag "TBTT Adjusting subfield" ([1] 8.4.2.100.8 Mesh Capability), since it is used only for MBCA ([1] 13.13.4 Mesh beacon collision avoidance, see 13.13.4.4.3 TBTT scanning and adjustment procedures for detail). So this patch remove the flag operations. [1] IEEE Std 802.11 2012 Signed-off-by: Masashi Honma [remove adjusting_tbtt entirely, since it's now unused] Signed-off-by: Johannes Berg Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/mac80211/ieee80211_i.h | 1 - net/mac80211/mesh.c | 3 --- net/mac80211/mesh_sync.c | 11 ----------- 3 files changed, 15 deletions(-) diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 6837a46ca4a2..7b271f3ded6b 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -682,7 +682,6 @@ struct ieee80211_if_mesh { const struct ieee80211_mesh_sync_ops *sync_ops; s64 sync_offset_clockdrift_max; spinlock_t sync_offset_lock; - bool adjusting_tbtt; /* mesh power save */ enum nl80211_mesh_power_mode nonpeer_pm; int ps_peers_light_sleep; diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 9063e8e736ad..9e1ded80a992 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -295,8 +295,6 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata, /* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */ *pos |= ifmsh->ps_peers_deep_sleep ? IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL : 0x00; - *pos++ |= ifmsh->adjusting_tbtt ? - IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00; *pos++ = 0x00; return 0; @@ -866,7 +864,6 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) ifmsh->mesh_cc_id = 0; /* Disabled */ /* register sync ops from extensible synchronization framework */ ifmsh->sync_ops = ieee80211_mesh_sync_ops_get(ifmsh->mesh_sp_id); - ifmsh->adjusting_tbtt = false; ifmsh->sync_offset_clockdrift_max = 0; set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); ieee80211_mesh_root_setup(ifmsh); diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c index 64bc22ad9496..16ed43fe4841 100644 --- a/net/mac80211/mesh_sync.c +++ b/net/mac80211/mesh_sync.c @@ -119,7 +119,6 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, */ if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) { - clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); msync_dbg(sdata, "STA %pM : is adjusting TBTT\n", sta->sta.addr); goto no_sync; @@ -168,11 +167,9 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata, struct beacon_data *beacon) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - u8 cap; WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET); WARN_ON(!rcu_read_lock_held()); - cap = beacon->meshconf->meshconf_cap; spin_lock_bh(&ifmsh->sync_offset_lock); @@ -186,21 +183,13 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata, "TBTT : kicking off TBTT adjustment with clockdrift_max=%lld\n", ifmsh->sync_offset_clockdrift_max); set_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags); - - ifmsh->adjusting_tbtt = true; } else { msync_dbg(sdata, "TBTT : max clockdrift=%lld; too small to adjust\n", (long long)ifmsh->sync_offset_clockdrift_max); ifmsh->sync_offset_clockdrift_max = 0; - - ifmsh->adjusting_tbtt = false; } spin_unlock_bh(&ifmsh->sync_offset_lock); - - beacon->meshconf->meshconf_cap = ifmsh->adjusting_tbtt ? - IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING | cap : - ~IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING & cap; } static const struct sync_method sync_methods[] = { From 7e927748d4db4d248e919e44f4dd4891ba6b4d42 Mon Sep 17 00:00:00 2001 From: Masashi Honma Date: Wed, 30 Nov 2016 09:06:04 +0900 Subject: [PATCH 262/312] mac80211: Suppress NEW_PEER_CANDIDATE event if no room [ Upstream commit 11197d006bcfabf0173a7820a163fcaac420d10e ] Previously, kernel sends NEW_PEER_CANDIDATE event to user land even if the found peer does not have any room to accept other peer. This causes continuous connection trials. Signed-off-by: Masashi Honma Signed-off-by: Johannes Berg Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/mac80211/mesh_plink.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index bd3d55eb21d4..9f02e54ad2a5 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -495,12 +495,14 @@ mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *addr, /* Userspace handles station allocation */ if (sdata->u.mesh.user_mpm || - sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) - cfg80211_notify_new_peer_candidate(sdata->dev, addr, - elems->ie_start, - elems->total_len, - GFP_KERNEL); - else + sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) { + if (mesh_peer_accepts_plinks(elems) && + mesh_plink_availables(sdata)) + cfg80211_notify_new_peer_candidate(sdata->dev, addr, + elems->ie_start, + elems->total_len, + GFP_KERNEL); + } else sta = __mesh_sta_info_alloc(sdata, addr); return sta; From 266913b390c38f101971dfc6c1a5084e37fd41a7 Mon Sep 17 00:00:00 2001 From: Pan Bian Date: Sat, 3 Dec 2016 17:24:17 +0800 Subject: [PATCH 263/312] iio: light: fix improper return value [ Upstream commit db4e5376d058af8924fafd0520a0942d92538d0e ] In function cm3232_reg_init(), it returns 0 even if the last call to i2c_smbus_write_byte_data() returns a negative value (indicates error). As a result, the return value may be inconsistent with the execution status, and the caller of cm3232_reg_init() will not be able to detect the error. This patch fixes the bug. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=188641 Signed-off-by: Pan Bian Signed-off-by: Jonathan Cameron Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/iio/light/cm3232.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iio/light/cm3232.c b/drivers/iio/light/cm3232.c index fe89b6823217..263e97235ea0 100644 --- a/drivers/iio/light/cm3232.c +++ b/drivers/iio/light/cm3232.c @@ -119,7 +119,7 @@ static int cm3232_reg_init(struct cm3232_chip *chip) if (ret < 0) dev_err(&chip->client->dev, "Error writing reg_cmd\n"); - return 0; + return ret; } /** From 8537e96e4351dc80efe1b713c058da488b3afd29 Mon Sep 17 00:00:00 2001 From: Pan Bian Date: Sat, 3 Dec 2016 21:44:30 +0800 Subject: [PATCH 264/312] staging: iio: cdc: fix improper return value [ Upstream commit 91ca1a8c584f55857b1f6ab20a1d3a1ce7a559bb ] At the end of function ad7150_write_event_config(), directly returns 0. As a result, the errors will be ignored by the callers. It may be better to return variable "ret". Signed-off-by: Pan Bian Signed-off-by: Jonathan Cameron Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/staging/iio/cdc/ad7150.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c index e8d0ff2d5c9b..808d6ebf6c94 100644 --- a/drivers/staging/iio/cdc/ad7150.c +++ b/drivers/staging/iio/cdc/ad7150.c @@ -272,7 +272,7 @@ static int ad7150_write_event_config(struct iio_dev *indio_dev, error_ret: mutex_unlock(&chip->state_lock); - return 0; + return ret; } static int ad7150_read_event_value(struct iio_dev *indio_dev, From 85f286d6f8cddb199d40fbf6a0dfba2eb4f94bd5 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 14 Dec 2016 13:28:05 +0100 Subject: [PATCH 265/312] spi: SPI_FSL_DSPI should depend on HAS_DMA [ Upstream commit dadab2d4e3cf708ceba22ecddd94aedfecb39199 ] If NO_DMA=y: ERROR: "bad_dma_ops" [drivers/spi/spi-fsl-dspi.ko] undefined! Add a dependency on HAS_DMA to fix this. Signed-off-by: Geert Uytterhoeven Signed-off-by: Mark Brown Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/spi/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 8b9c2a38d1cc..b0a24dedd1ed 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -315,6 +315,7 @@ config SPI_FSL_SPI config SPI_FSL_DSPI tristate "Freescale DSPI controller" select REGMAP_MMIO + depends on HAS_DMA depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST help This enables support for the Freescale DSPI controller in master From a1e4f6a151035a549d1192c03ff82c3d81da18ce Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Sun, 11 Dec 2016 20:46:51 +0100 Subject: [PATCH 266/312] netfilter: nft_queue: use raw_smp_processor_id() [ Upstream commit c2e756ff9e699865d294cdc112acfc36419cf5cc ] Using smp_processor_id() causes splats with PREEMPT_RCU: [19379.552780] BUG: using smp_processor_id() in preemptible [00000000] code: ping/32389 [19379.552793] caller is debug_smp_processor_id+0x17/0x19 [...] [19379.552823] Call Trace: [19379.552832] [] dump_stack+0x67/0x90 [19379.552837] [] check_preemption_disabled+0xe5/0xf5 [19379.552842] [] debug_smp_processor_id+0x17/0x19 [19379.552849] [] nft_queue_eval+0x35/0x20c [nft_queue] No need to disable preemption since we only fetch the numeric value, so let's use raw_smp_processor_id() instead. Signed-off-by: Pablo Neira Ayuso Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/netfilter/nft_queue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c index 61d216eb7917..5d189c11d208 100644 --- a/net/netfilter/nft_queue.c +++ b/net/netfilter/nft_queue.c @@ -37,7 +37,7 @@ static void nft_queue_eval(const struct nft_expr *expr, if (priv->queues_total > 1) { if (priv->flags & NFT_QUEUE_FLAG_CPU_FANOUT) { - int cpu = smp_processor_id(); + int cpu = raw_smp_processor_id(); queue = priv->queuenum + cpu % priv->queues_total; } else { From 47b99a3306d08c03467b0efe13fc1bc1721afa15 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 13 Dec 2016 13:59:33 +0100 Subject: [PATCH 267/312] netfilter: nf_tables: fix oob access [ Upstream commit 3e38df136e453aa69eb4472108ebce2fb00b1ba6 ] BUG: KASAN: slab-out-of-bounds in nf_tables_rule_destroy+0xf1/0x130 at addr ffff88006a4c35c8 Read of size 8 by task nft/1607 When we've destroyed last valid expr, nft_expr_next() returns an invalid expr. We must not dereference it unless it passes != nft_expr_last() check. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/netfilter/nf_tables_api.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 2cb429d34c03..120e9ae04db3 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1996,7 +1996,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx, * is called on error from nf_tables_newrule(). */ expr = nft_expr_first(rule); - while (expr->ops && expr != nft_expr_last(rule)) { + while (expr != nft_expr_last(rule) && expr->ops) { nf_tables_expr_destroy(ctx, expr); expr = nft_expr_next(expr); } From e88f3fb0e34059c6d697802dc385c756129c17a1 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 8 Dec 2016 13:05:43 +0000 Subject: [PATCH 268/312] ASoC: rsnd: don't double free kctrl [ Upstream commit 0ea617a298dcdc2251b4e10f83ac3f3e627b66e3 ] On an error, snd_ctl_add already free's kctrl, so calling snd_ctl_free_one to free it again leads to a double free error. Fix this by removing the extraneous snd_ctl_free_one call. Issue found using static analysis with CoverityScan, CID 1372908 Signed-off-by: Colin Ian King Acked-by: Kuninori Morimoto Signed-off-by: Mark Brown Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- sound/soc/sh/rcar/core.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c index 362446c36c9e..e00dfbec22c5 100644 --- a/sound/soc/sh/rcar/core.c +++ b/sound/soc/sh/rcar/core.c @@ -1049,10 +1049,8 @@ static int __rsnd_kctrl_new(struct rsnd_mod *mod, return -ENOMEM; ret = snd_ctl_add(card, kctrl); - if (ret < 0) { - snd_ctl_free_one(kctrl); + if (ret < 0) return ret; - } cfg->update = update; cfg->card = card; From b63209c78f9ec90b1cb43e458918b89f53e0db1b Mon Sep 17 00:00:00 2001 From: Pan Bian Date: Sun, 4 Dec 2016 12:51:53 +0800 Subject: [PATCH 269/312] btrfs: return the actual error value from from btrfs_uuid_tree_iterate [ Upstream commit 73ba39ab9307340dc98ec3622891314bbc09cc2e ] In function btrfs_uuid_tree_iterate(), errno is assigned to variable ret on errors. However, it directly returns 0. It may be better to return ret. This patch also removes the warning, because the caller already prints a warning. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=188731 Signed-off-by: Pan Bian Reviewed-by: Omar Sandoval [ edited subject ] Signed-off-by: David Sterba Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/uuid-tree.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c index 778282944530..837a9a8d579e 100644 --- a/fs/btrfs/uuid-tree.c +++ b/fs/btrfs/uuid-tree.c @@ -348,7 +348,5 @@ skip: out: btrfs_free_path(path); - if (ret) - btrfs_warn(fs_info, "btrfs_uuid_tree_iterate failed %d", ret); - return 0; + return ret; } From 681b2239862dd88abf28b73e17e4f48af443f72e Mon Sep 17 00:00:00 2001 From: Richard Fitzgerald Date: Tue, 20 Dec 2016 10:29:12 +0000 Subject: [PATCH 270/312] ASoC: wm_adsp: Don't overrun firmware file buffer when reading region data [ Upstream commit 1cab2a84f470e15ecc8e5143bfe9398c6e888032 ] Protect against corrupt firmware files by ensuring that the length we get for the data in a region actually lies within the available firmware file data buffer. Signed-off-by: Richard Fitzgerald Signed-off-by: Mark Brown Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- sound/soc/codecs/wm_adsp.c | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index 0bb415a28723..f1f990b325ad 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c @@ -1060,7 +1060,7 @@ static int wm_adsp_load(struct wm_adsp *dsp) const struct wmfw_region *region; const struct wm_adsp_region *mem; const char *region_name; - char *file, *text; + char *file, *text = NULL; struct wm_adsp_buf *buf; unsigned int reg; int regions = 0; @@ -1221,10 +1221,21 @@ static int wm_adsp_load(struct wm_adsp *dsp) regions, le32_to_cpu(region->len), offset, region_name); + if ((pos + le32_to_cpu(region->len) + sizeof(*region)) > + firmware->size) { + adsp_err(dsp, + "%s.%d: %s region len %d bytes exceeds file length %zu\n", + file, regions, region_name, + le32_to_cpu(region->len), firmware->size); + ret = -EINVAL; + goto out_fw; + } + if (text) { memcpy(text, region->data, le32_to_cpu(region->len)); adsp_info(dsp, "%s: %s\n", file, text); kfree(text); + text = NULL; } if (reg) { @@ -1269,6 +1280,7 @@ out_fw: regmap_async_complete(regmap); wm_adsp_buf_free(&buf_list); release_firmware(firmware); + kfree(text); out: kfree(file); @@ -1730,6 +1742,17 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp) } if (reg) { + if ((pos + le32_to_cpu(blk->len) + sizeof(*blk)) > + firmware->size) { + adsp_err(dsp, + "%s.%d: %s region len %d bytes exceeds file length %zu\n", + file, blocks, region_name, + le32_to_cpu(blk->len), + firmware->size); + ret = -EINVAL; + goto out_fw; + } + buf = wm_adsp_buf_alloc(blk->data, le32_to_cpu(blk->len), &buf_list); From eedd29f51078395ce8db909cf1130bd468d70ec8 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 20 Dec 2016 12:58:10 +0100 Subject: [PATCH 271/312] s390/kbuild: enable modversions for symbols exported from asm [ Upstream commit cabab3f9f5ca077535080b3252e6168935b914af ] s390 version of commit 334bb7738764 ("x86/kbuild: enable modversions for symbols exported from asm") so we get also rid of all these warnings: WARNING: EXPORT symbol "_mcount" [vmlinux] version generation failed, symbol will not be versioned. WARNING: EXPORT symbol "memcpy" [vmlinux] version generation failed, symbol will not be versioned. WARNING: EXPORT symbol "memmove" [vmlinux] version generation failed, symbol will not be versioned. WARNING: EXPORT symbol "memset" [vmlinux] version generation failed, symbol will not be versioned. WARNING: EXPORT symbol "save_fpu_regs" [vmlinux] version generation failed, symbol will not be versioned. WARNING: EXPORT symbol "sie64a" [vmlinux] version generation failed, symbol will not be versioned. WARNING: EXPORT symbol "sie_exit" [vmlinux] version generation failed, symbol will not be versioned. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/s390/include/asm/asm-prototypes.h | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 arch/s390/include/asm/asm-prototypes.h diff --git a/arch/s390/include/asm/asm-prototypes.h b/arch/s390/include/asm/asm-prototypes.h new file mode 100644 index 000000000000..2c3413b0ca52 --- /dev/null +++ b/arch/s390/include/asm/asm-prototypes.h @@ -0,0 +1,8 @@ +#ifndef _ASM_S390_PROTOTYPES_H + +#include +#include +#include +#include + +#endif /* _ASM_S390_PROTOTYPES_H */ From c73eb1e0cc5675ebb584b3824b71f5c20ca33fc9 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Thu, 22 Dec 2016 08:19:46 +0100 Subject: [PATCH 272/312] xen: xenbus driver must not accept invalid transaction ids [ Upstream commit 639b08810d6ad74ded2c5f6e233c4fcb9d147168 ] When accessing Xenstore in a transaction the user is specifying a transaction id which he normally obtained from Xenstore when starting the transaction. Xenstore is validating a transaction id against all known transaction ids of the connection the request came in. As all requests of a domain not being the one where Xenstore lives share one connection, validation of transaction ids of different users of Xenstore in that domain should be done by the kernel of that domain being the multiplexer between the Xenstore users in that domain and Xenstore. In order to prohibit one Xenstore user "hijacking" a transaction from another user the xenbus driver has to verify a given transaction id against all known transaction ids of the user before forwarding it to Xenstore. Signed-off-by: Juergen Gross Reviewed-by: Boris Ostrovsky Signed-off-by: Juergen Gross Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/xen/xenbus/xenbus_dev_frontend.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 0e0eb10f82a0..816a0e08ef10 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c @@ -316,7 +316,7 @@ static int xenbus_write_transaction(unsigned msg_type, rc = -ENOMEM; goto out; } - } else if (msg_type == XS_TRANSACTION_END) { + } else if (u->u.msg.tx_id != 0) { list_for_each_entry(trans, &u->transactions, list) if (trans->handle.id == u->u.msg.tx_id) break; From 3d7214a338d72a66739e83b32fe6e78688051ecc Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 28 Nov 2017 12:29:45 +0100 Subject: [PATCH 273/312] Revert "sctp: do not peel off an assoc from one netns to another one" This reverts commit 2a0e60907e54dad75e9b3568d02bec11d6e74f6b which is commit df80cd9b28b9ebaa284a41df611dbf3a2d05ca74 upstream as I messed up by applying it to the tree twice. Reported-by: Michal Kubecek Cc: ChunYu Wang Cc: Xin Long Cc: Marcelo Ricardo Leitner Cc: Neil Horman Cc: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/sctp/socket.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 272edd7748a0..7f0f689b8d2b 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4453,10 +4453,6 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) struct socket *sock; int err = 0; - /* Do not peel off from one netns to another one. */ - if (!net_eq(current->nsproxy->net_ns, sock_net(sk))) - return -EINVAL; - /* Do not peel off from one netns to another one. */ if (!net_eq(current->nsproxy->net_ns, sock_net(sk))) return -EINVAL; From 08c15ad2e6278a5fe1b209e8fcdbd2d235c48f34 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 30 Nov 2017 08:37:28 +0000 Subject: [PATCH 274/312] Linux 4.4.103 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 9e036fac9c04..f5a51cd7ca49 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 4 -SUBLEVEL = 102 +SUBLEVEL = 103 EXTRAVERSION = NAME = Blurry Fish Butt From 84baff98495f8e1910890d70e564a497b130669e Mon Sep 17 00:00:00 2001 From: Lorenzo Colitti Date: Mon, 20 Nov 2017 19:26:02 +0900 Subject: [PATCH 275/312] UPSTREAM: net: xfrm: allow clearing socket xfrm policies. Currently it is possible to add or update socket policies, but not clear them. Therefore, once a socket policy has been applied, the socket cannot be used for unencrypted traffic. This patch allows (privileged) users to clear socket policies by passing in a NULL pointer and zero length argument to the {IP,IPV6}_{IPSEC,XFRM}_POLICY setsockopts. This results in both the incoming and outgoing policies being cleared. The simple approach taken in this patch cannot clear socket policies in only one direction. If desired this could be added in the future, for example by continuing to pass in a length of zero (which currently is guaranteed to return EMSGSIZE) and making the policy be a pointer to an integer that contains one of the XFRM_POLICY_{IN,OUT} enum values. An alternative would have been to interpret the length as a signed integer and use XFRM_POLICY_IN (i.e., 0) to clear the input policy and -XFRM_POLICY_OUT (i.e., -1) to clear the output policy. Bug: 65857891 Tested: https://android-review.googlesource.com/539816 Signed-off-by: Lorenzo Colitti Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_policy.c | 2 +- net/xfrm/xfrm_state.c | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 4096f699ba00..5b3e5f54c79e 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1307,7 +1307,7 @@ EXPORT_SYMBOL(xfrm_policy_delete); int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) { - struct net *net = xp_net(pol); + struct net *net = sock_net(sk); struct xfrm_policy *old_pol; #ifdef CONFIG_XFRM_SUB_POLICY diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index c0aa1b18fe8f..7944daeb7378 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1845,6 +1845,13 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen struct xfrm_mgr *km; struct xfrm_policy *pol = NULL; + if (!optval && !optlen) { + xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL); + xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL); + __sk_dst_reset(sk); + return 0; + } + if (optlen <= 0 || optlen > PAGE_SIZE) return -EMSGSIZE; From 6d7ed3d3cf8119dc3547c8f8cfd1af2c3083ed86 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Mon, 4 Jan 2016 15:37:32 +0100 Subject: [PATCH 276/312] ARM: 8478/2: arm/arm64: add arm-smccc Adds helpers to do SMC and HVC based on ARM SMC Calling Convention. CONFIG_HAVE_ARM_SMCCC is enabled for architectures that may support the SMC or HVC instruction. It's the responsibility of the caller to know if the SMC instruction is supported by the platform. This patch doesn't provide an implementation of the declared functions. Later patches will bring in implementations and set CONFIG_HAVE_ARM_SMCCC for ARM and ARM64 respectively. Change-Id: I36d7927543c5bbb5313417e93e45fec1fb9997af Reviewed-by: Lorenzo Pieralisi Signed-off-by: Jens Wiklander Signed-off-by: Russell King (cherry picked from commit 98dd64f34f47ce19b388d9015f767f48393a81eb) Signed-off-by: Victor Chong --- drivers/firmware/Kconfig | 3 ++ include/linux/arm-smccc.h | 104 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 107 insertions(+) create mode 100644 include/linux/arm-smccc.h diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index cf478fe6b335..49a3a1185bb6 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -173,6 +173,9 @@ config QCOM_SCM_64 def_bool y depends on QCOM_SCM && ARM64 +config HAVE_ARM_SMCCC + bool + source "drivers/firmware/broadcom/Kconfig" source "drivers/firmware/google/Kconfig" source "drivers/firmware/efi/Kconfig" diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h new file mode 100644 index 000000000000..b5abfda80465 --- /dev/null +++ b/include/linux/arm-smccc.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __LINUX_ARM_SMCCC_H +#define __LINUX_ARM_SMCCC_H + +#include +#include + +/* + * This file provides common defines for ARM SMC Calling Convention as + * specified in + * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html + */ + +#define ARM_SMCCC_STD_CALL 0 +#define ARM_SMCCC_FAST_CALL 1 +#define ARM_SMCCC_TYPE_SHIFT 31 + +#define ARM_SMCCC_SMC_32 0 +#define ARM_SMCCC_SMC_64 1 +#define ARM_SMCCC_CALL_CONV_SHIFT 30 + +#define ARM_SMCCC_OWNER_MASK 0x3F +#define ARM_SMCCC_OWNER_SHIFT 24 + +#define ARM_SMCCC_FUNC_MASK 0xFFFF + +#define ARM_SMCCC_IS_FAST_CALL(smc_val) \ + ((smc_val) & (ARM_SMCCC_FAST_CALL << ARM_SMCCC_TYPE_SHIFT)) +#define ARM_SMCCC_IS_64(smc_val) \ + ((smc_val) & (ARM_SMCCC_SMC_64 << ARM_SMCCC_CALL_CONV_SHIFT)) +#define ARM_SMCCC_FUNC_NUM(smc_val) ((smc_val) & ARM_SMCCC_FUNC_MASK) +#define ARM_SMCCC_OWNER_NUM(smc_val) \ + (((smc_val) >> ARM_SMCCC_OWNER_SHIFT) & ARM_SMCCC_OWNER_MASK) + +#define ARM_SMCCC_CALL_VAL(type, calling_convention, owner, func_num) \ + (((type) << ARM_SMCCC_TYPE_SHIFT) | \ + ((calling_convention) << ARM_SMCCC_CALL_CONV_SHIFT) | \ + (((owner) & ARM_SMCCC_OWNER_MASK) << ARM_SMCCC_OWNER_SHIFT) | \ + ((func_num) & ARM_SMCCC_FUNC_MASK)) + +#define ARM_SMCCC_OWNER_ARCH 0 +#define ARM_SMCCC_OWNER_CPU 1 +#define ARM_SMCCC_OWNER_SIP 2 +#define ARM_SMCCC_OWNER_OEM 3 +#define ARM_SMCCC_OWNER_STANDARD 4 +#define ARM_SMCCC_OWNER_TRUSTED_APP 48 +#define ARM_SMCCC_OWNER_TRUSTED_APP_END 49 +#define ARM_SMCCC_OWNER_TRUSTED_OS 50 +#define ARM_SMCCC_OWNER_TRUSTED_OS_END 63 + +/** + * struct arm_smccc_res - Result from SMC/HVC call + * @a0-a3 result values from registers 0 to 3 + */ +struct arm_smccc_res { + unsigned long a0; + unsigned long a1; + unsigned long a2; + unsigned long a3; +}; + +/** + * arm_smccc_smc() - make SMC calls + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * + * This function is used to make SMC calls following SMC Calling Convention. + * The content of the supplied param are copied to registers 0 to 7 prior + * to the SMC instruction. The return values are updated with the content + * from register 0 to 3 on return from the SMC instruction. + */ +asmlinkage void arm_smccc_smc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, unsigned long a4, + unsigned long a5, unsigned long a6, unsigned long a7, + struct arm_smccc_res *res); + +/** + * arm_smccc_hvc() - make HVC calls + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * + * This function is used to make HVC calls following SMC Calling + * Convention. The content of the supplied param are copied to registers 0 + * to 7 prior to the HVC instruction. The return values are updated with + * the content from register 0 to 3 on return from the HVC instruction. + */ +asmlinkage void arm_smccc_hvc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, unsigned long a4, + unsigned long a5, unsigned long a6, unsigned long a7, + struct arm_smccc_res *res); + +#endif /*__LINUX_ARM_SMCCC_H*/ From e9f7e56e94604d8ff2647c4a79ca751b67b1608d Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Mon, 4 Jan 2016 15:42:55 +0100 Subject: [PATCH 277/312] ARM: 8479/2: add implementation for arm-smccc Adds implementation for arm-smccc and enables CONFIG_HAVE_SMCCC for architectures that may support arm-smccc. It's the responsibility of the caller to know if the SMC instruction is supported by the platform. Change-Id: I11bacaf436a1178fee13169f8f65214011044783 Reviewed-by: Lars Persson Signed-off-by: Jens Wiklander Signed-off-by: Russell King (cherry picked from commit b329f95d70f3f955093e9a2b18ac1ed3587a8f73) Signed-off-by: Victor Chong --- arch/arm/Kconfig | 1 + arch/arm/kernel/Makefile | 2 ++ arch/arm/kernel/armksyms.c | 6 ++++ arch/arm/kernel/smccc-call.S | 62 ++++++++++++++++++++++++++++++++++++ 4 files changed, 71 insertions(+) create mode 100644 arch/arm/kernel/smccc-call.S diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 586134e01928..e4bde66b34e8 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -39,6 +39,7 @@ config ARM select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT) select HAVE_ARCH_TRACEHOOK + select HAVE_ARM_SMCCC if CPU_V7 select HAVE_BPF_JIT select HAVE_CC_STACKPROTECTOR select HAVE_CONTEXT_TRACKING diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 3c789496297f..599c950468fc 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -91,4 +91,6 @@ obj-y += psci-call.o obj-$(CONFIG_SMP) += psci_smp.o endif +obj-$(CONFIG_HAVE_ARM_SMCCC) += smccc-call.o + extra-y := $(head-y) vmlinux.lds diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index f89811fb9a55..7e45f69a0ddc 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -175,3 +176,8 @@ EXPORT_SYMBOL(__gnu_mcount_nc); EXPORT_SYMBOL(__pv_phys_pfn_offset); EXPORT_SYMBOL(__pv_offset); #endif + +#ifdef CONFIG_HAVE_ARM_SMCCC +EXPORT_SYMBOL(arm_smccc_smc); +EXPORT_SYMBOL(arm_smccc_hvc); +#endif diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S new file mode 100644 index 000000000000..2e48b674aab1 --- /dev/null +++ b/arch/arm/kernel/smccc-call.S @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include + +#include +#include +#include + + /* + * Wrap c macros in asm macros to delay expansion until after the + * SMCCC asm macro is expanded. + */ + .macro SMCCC_SMC + __SMC(0) + .endm + + .macro SMCCC_HVC + __HVC(0) + .endm + + .macro SMCCC instr +UNWIND( .fnstart) + mov r12, sp + push {r4-r7} +UNWIND( .save {r4-r7}) + ldm r12, {r4-r7} + \instr + pop {r4-r7} + ldr r12, [sp, #(4 * 4)] + stm r12, {r0-r3} + bx lr +UNWIND( .fnend) + .endm + +/* + * void smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2, + * unsigned long a3, unsigned long a4, unsigned long a5, + * unsigned long a6, unsigned long a7, struct arm_smccc_res *res) + */ +ENTRY(arm_smccc_smc) + SMCCC SMCCC_SMC +ENDPROC(arm_smccc_smc) + +/* + * void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2, + * unsigned long a3, unsigned long a4, unsigned long a5, + * unsigned long a6, unsigned long a7, struct arm_smccc_res *res) + */ +ENTRY(arm_smccc_hvc) + SMCCC SMCCC_HVC +ENDPROC(arm_smccc_hvc) From a794759d492ef0970327134785895942cd2eff92 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Mon, 4 Jan 2016 15:44:32 +0100 Subject: [PATCH 278/312] ARM: 8480/2: arm64: add implementation for arm-smccc Adds implementation for arm-smccc and enables CONFIG_HAVE_SMCCC. Change-Id: Iee20985ae688c68c5f3d6494545718c70057661b Acked-by: Will Deacon Signed-off-by: Jens Wiklander Signed-off-by: Russell King (cherry picked from commit 14457459f9ca2ff8521686168ea179edc3a56a44) Signed-off-by: Victor Chong --- arch/arm64/Kconfig | 1 + arch/arm64/kernel/Makefile | 2 +- arch/arm64/kernel/arm64ksyms.c | 5 ++++ arch/arm64/kernel/asm-offsets.c | 3 +++ arch/arm64/kernel/smccc-call.S | 43 +++++++++++++++++++++++++++++++++ 5 files changed, 53 insertions(+), 1 deletion(-) create mode 100644 arch/arm64/kernel/smccc-call.S diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index da007c26de52..1f7ba1848c64 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -99,6 +99,7 @@ config ARM64 select SYSCTL_EXCEPTION_TRACE select HAVE_CONTEXT_TRACKING select THREAD_INFO_IN_TASK + select HAVE_ARM_SMCCC help ARM 64-bit (AArch64) Linux support. diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index b570e9e36812..08aebac80e3c 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -17,7 +17,7 @@ arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o \ return_address.o cpuinfo.o cpu_errata.o \ cpufeature.o alternative.o cacheinfo.o \ - smp.o smp_spin_table.o topology.o + smp.o smp_spin_table.o topology.o smccc-call.o extra-$(CONFIG_EFI) := efi-entry.o diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index c654df05b7d7..2dc44406a7ad 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c @@ -26,6 +26,7 @@ #include #include #include +#include #include @@ -68,3 +69,7 @@ EXPORT_SYMBOL(test_and_change_bit); #ifdef CONFIG_FUNCTION_TRACER EXPORT_SYMBOL(_mcount); #endif + + /* arm-smccc */ +EXPORT_SYMBOL(arm_smccc_smc); +EXPORT_SYMBOL(arm_smccc_hvc); diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 24e65f0897ee..3288c0cae9e6 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -28,6 +28,7 @@ #include #include #include +#include int main(void) { @@ -179,5 +180,7 @@ int main(void) DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys)); DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash)); #endif + DEFINE(ARM_SMCCC_RES_X0_OFFS, offsetof(struct arm_smccc_res, a0)); + DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2)); return 0; } diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S new file mode 100644 index 000000000000..ae0496fa4235 --- /dev/null +++ b/arch/arm64/kernel/smccc-call.S @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License Version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include + + .macro SMCCC instr + .cfi_startproc + \instr #0 + ldr x4, [sp] + stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS] + stp x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS] + ret + .cfi_endproc + .endm + +/* + * void arm_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2, + * unsigned long a3, unsigned long a4, unsigned long a5, + * unsigned long a6, unsigned long a7, struct arm_smccc_res *res) + */ +ENTRY(arm_smccc_smc) + SMCCC smc +ENDPROC(arm_smccc_smc) + +/* + * void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2, + * unsigned long a3, unsigned long a4, unsigned long a5, + * unsigned long a6, unsigned long a7, struct arm_smccc_res *res) + */ +ENTRY(arm_smccc_hvc) + SMCCC hvc +ENDPROC(arm_smccc_hvc) From 11b4c995ebcb1a3b92602ad25a65c21d842b3c5e Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Mon, 4 Jan 2016 15:46:47 +0100 Subject: [PATCH 279/312] ARM: 8481/2: drivers: psci: replace psci firmware calls Switch to use a generic interface for issuing SMC/HVC based on ARM SMC Calling Convention. Removes now the now unused psci-call.S. Change-Id: Ic3be85884c9d4726211346c6ecfff60ca51b08b7 Acked-by: Will Deacon Reviewed-by: Mark Rutland Tested-by: Mark Rutland Acked-by: Lorenzo Pieralisi Tested-by: Lorenzo Pieralisi Signed-off-by: Jens Wiklander Signed-off-by: Russell King (cherry picked from commit e679660dbb8347f275fe5d83a5dd59c1fb6c8e63) Signed-off-by: Victor Chong --- arch/arm/Kconfig | 2 +- arch/arm/kernel/Makefile | 1 - arch/arm/kernel/psci-call.S | 31 ------------------------------- arch/arm64/kernel/Makefile | 2 +- arch/arm64/kernel/psci-call.S | 28 ---------------------------- drivers/firmware/psci.c | 23 +++++++++++++++++++++-- 6 files changed, 23 insertions(+), 64 deletions(-) delete mode 100644 arch/arm/kernel/psci-call.S delete mode 100644 arch/arm64/kernel/psci-call.S diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index e4bde66b34e8..07ac8f7df8bb 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1492,7 +1492,7 @@ config HOTPLUG_CPU config ARM_PSCI bool "Support for the ARM Power State Coordination Interface (PSCI)" - depends on CPU_V7 + depends on HAVE_ARM_SMCCC select ARM_PSCI_FW help Say Y here if you want Linux to communicate with system firmware diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 599c950468fc..82bdac0f2804 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -87,7 +87,6 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o ifeq ($(CONFIG_ARM_PSCI),y) -obj-y += psci-call.o obj-$(CONFIG_SMP) += psci_smp.o endif diff --git a/arch/arm/kernel/psci-call.S b/arch/arm/kernel/psci-call.S deleted file mode 100644 index a78e9e1e206d..000000000000 --- a/arch/arm/kernel/psci-call.S +++ /dev/null @@ -1,31 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Copyright (C) 2015 ARM Limited - * - * Author: Mark Rutland - */ - -#include - -#include -#include - -/* int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */ -ENTRY(__invoke_psci_fn_hvc) - __HVC(0) - bx lr -ENDPROC(__invoke_psci_fn_hvc) - -/* int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */ -ENTRY(__invoke_psci_fn_smc) - __SMC(0) - bx lr -ENDPROC(__invoke_psci_fn_smc) diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 08aebac80e3c..d10034327423 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -14,7 +14,7 @@ CFLAGS_REMOVE_return_address.o = -pg arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ entry-fpsimd.o process.o ptrace.o setup.o signal.o \ sys.o stacktrace.o time.o traps.o io.o vdso.o \ - hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o \ + hyp-stub.o psci.o cpu_ops.o insn.o \ return_address.o cpuinfo.o cpu_errata.o \ cpufeature.o alternative.o cacheinfo.o \ smp.o smp_spin_table.o topology.o smccc-call.o diff --git a/arch/arm64/kernel/psci-call.S b/arch/arm64/kernel/psci-call.S deleted file mode 100644 index cf83e61cd3b5..000000000000 --- a/arch/arm64/kernel/psci-call.S +++ /dev/null @@ -1,28 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Copyright (C) 2015 ARM Limited - * - * Author: Will Deacon - */ - -#include - -/* int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */ -ENTRY(__invoke_psci_fn_hvc) - hvc #0 - ret -ENDPROC(__invoke_psci_fn_hvc) - -/* int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */ -ENTRY(__invoke_psci_fn_smc) - smc #0 - ret -ENDPROC(__invoke_psci_fn_smc) diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c index ae70d2485ca1..b38305ba0965 100644 --- a/drivers/firmware/psci.c +++ b/drivers/firmware/psci.c @@ -13,6 +13,7 @@ #define pr_fmt(fmt) "psci: " fmt +#include #include #include #include @@ -58,8 +59,6 @@ struct psci_operations psci_ops; typedef unsigned long (psci_fn)(unsigned long, unsigned long, unsigned long, unsigned long); -asmlinkage psci_fn __invoke_psci_fn_hvc; -asmlinkage psci_fn __invoke_psci_fn_smc; static psci_fn *invoke_psci_fn; enum psci_function { @@ -107,6 +106,26 @@ bool psci_power_state_is_valid(u32 state) return !(state & ~valid_mask); } +static unsigned long __invoke_psci_fn_hvc(unsigned long function_id, + unsigned long arg0, unsigned long arg1, + unsigned long arg2) +{ + struct arm_smccc_res res; + + arm_smccc_hvc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res); + return res.a0; +} + +static unsigned long __invoke_psci_fn_smc(unsigned long function_id, + unsigned long arg0, unsigned long arg1, + unsigned long arg2) +{ + struct arm_smccc_res res; + + arm_smccc_smc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res); + return res.a0; +} + static int psci_to_linux_errno(int errno) { switch (errno) { From c51866baa4fba000c0075f8625139014a6c248f5 Mon Sep 17 00:00:00 2001 From: Gustavo Padovan Date: Tue, 26 Apr 2016 12:32:27 -0300 Subject: [PATCH 280/312] kernel.h: add u64_to_user_ptr() This function had copies in 3 different files. Unify them in kernel.h. Change-Id: Iff8fbe002d3ceb81068e9cbd0acdf5fd5be14783 Cc: Joe Perches Cc: Andrew Morton Cc: David Airlie Cc: Daniel Vetter Cc: Rob Clark Signed-off-by: Gustavo Padovan Acked-by: Daniel Vetter [drm/i915/] Acked-by: Rob Clark [drm/msm/] Acked-by: Lucas Stach [drm/etinav/] Acked-by: Maarten Lankhorst Signed-off-by: Greg Kroah-Hartman (cherry picked from commit 3ed605bc8a0a688d8750a1e2eff39c854418c5cf) Signed-off-by: Victor Chong --- drivers/gpu/drm/i915/i915_drv.h | 5 ----- drivers/gpu/drm/i915/i915_gem.c | 14 +++++++------- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 14 +++++++------- drivers/gpu/drm/msm/msm_gem_submit.c | 11 +++-------- include/linux/kernel.h | 7 +++++++ 5 files changed, 24 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5044f2257e89..6fca39e1c419 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3475,11 +3475,6 @@ static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) return VGACNTRL; } -static inline void __user *to_user_ptr(u64 address) -{ - return (void __user *)(uintptr_t)address; -} - static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) { unsigned long j = msecs_to_jiffies(m); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f56af0aaafde..659b90657f36 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -324,7 +324,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; void *vaddr = obj->phys_handle->vaddr + args->offset; - char __user *user_data = to_user_ptr(args->data_ptr); + char __user *user_data = u64_to_user_ptr(args->data_ptr); int ret = 0; /* We manually control the domain here and pretend that it @@ -605,7 +605,7 @@ i915_gem_shmem_pread(struct drm_device *dev, int needs_clflush = 0; struct sg_page_iter sg_iter; - user_data = to_user_ptr(args->data_ptr); + user_data = u64_to_user_ptr(args->data_ptr); remain = args->size; obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); @@ -692,7 +692,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, return 0; if (!access_ok(VERIFY_WRITE, - to_user_ptr(args->data_ptr), + u64_to_user_ptr(args->data_ptr), args->size)) return -EFAULT; @@ -783,7 +783,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, if (ret) goto out_unpin; - user_data = to_user_ptr(args->data_ptr); + user_data = u64_to_user_ptr(args->data_ptr); remain = args->size; offset = i915_gem_obj_ggtt_offset(obj) + args->offset; @@ -907,7 +907,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, int needs_clflush_before = 0; struct sg_page_iter sg_iter; - user_data = to_user_ptr(args->data_ptr); + user_data = u64_to_user_ptr(args->data_ptr); remain = args->size; obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); @@ -1036,12 +1036,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, return 0; if (!access_ok(VERIFY_READ, - to_user_ptr(args->data_ptr), + u64_to_user_ptr(args->data_ptr), args->size)) return -EFAULT; if (likely(!i915.prefault_disable)) { - ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr), + ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr), args->size); if (ret) return -EFAULT; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 6ed7d63a0688..c66307c58a7f 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -492,7 +492,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma, struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; int remain, ret; - user_relocs = to_user_ptr(entry->relocs_ptr); + user_relocs = u64_to_user_ptr(entry->relocs_ptr); remain = entry->relocation_count; while (remain) { @@ -833,7 +833,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, u64 invalid_offset = (u64)-1; int j; - user_relocs = to_user_ptr(exec[i].relocs_ptr); + user_relocs = u64_to_user_ptr(exec[i].relocs_ptr); if (copy_from_user(reloc+total, user_relocs, exec[i].relocation_count * sizeof(*reloc))) { @@ -977,7 +977,7 @@ validate_exec_list(struct drm_device *dev, invalid_flags |= EXEC_OBJECT_NEEDS_GTT; for (i = 0; i < count; i++) { - char __user *ptr = to_user_ptr(exec[i].relocs_ptr); + char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr); int length; /* limited by fault_in_pages_readable() */ if (exec[i].flags & invalid_flags) @@ -1635,7 +1635,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, return -ENOMEM; } ret = copy_from_user(exec_list, - to_user_ptr(args->buffers_ptr), + u64_to_user_ptr(args->buffers_ptr), sizeof(*exec_list) * args->buffer_count); if (ret != 0) { DRM_DEBUG("copy %d exec entries failed %d\n", @@ -1671,7 +1671,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); if (!ret) { struct drm_i915_gem_exec_object __user *user_exec_list = - to_user_ptr(args->buffers_ptr); + u64_to_user_ptr(args->buffers_ptr); /* Copy the new buffer offsets back to the user's exec list. */ for (i = 0; i < args->buffer_count; i++) { @@ -1723,7 +1723,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, return -ENOMEM; } ret = copy_from_user(exec2_list, - to_user_ptr(args->buffers_ptr), + u64_to_user_ptr(args->buffers_ptr), sizeof(*exec2_list) * args->buffer_count); if (ret != 0) { DRM_DEBUG("copy %d exec entries failed %d\n", @@ -1736,7 +1736,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ struct drm_i915_gem_exec_object2 __user *user_exec_list = - to_user_ptr(args->buffers_ptr); + u64_to_user_ptr(args->buffers_ptr); int i; for (i = 0; i < args->buffer_count; i++) { diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index f4eaccb191d4..2422be9a6828 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -28,11 +28,6 @@ #define BO_LOCKED 0x4000 #define BO_PINNED 0x2000 -static inline void __user *to_user_ptr(u64 address) -{ - return (void __user *)(uintptr_t)address; -} - static struct msm_gem_submit *submit_create(struct drm_device *dev, struct msm_gpu *gpu, uint32_t nr) { @@ -80,7 +75,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, struct drm_gem_object *obj; struct msm_gem_object *msm_obj; void __user *userptr = - to_user_ptr(args->bos + (i * sizeof(submit_bo))); + u64_to_user_ptr(args->bos + (i * sizeof(submit_bo))); ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo)); if (unlikely(ret)) { @@ -278,7 +273,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob for (i = 0; i < nr_relocs; i++) { struct drm_msm_gem_submit_reloc submit_reloc; void __user *userptr = - to_user_ptr(relocs + (i * sizeof(submit_reloc))); + u64_to_user_ptr(relocs + (i * sizeof(submit_reloc))); uint32_t iova, off; bool valid; @@ -378,7 +373,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, for (i = 0; i < args->nr_cmds; i++) { struct drm_msm_gem_submit_cmd submit_cmd; void __user *userptr = - to_user_ptr(args->cmds + (i * sizeof(submit_cmd))); + u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd))); struct msm_gem_object *msm_obj; uint32_t iova; diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 50220cab738c..05b63a1e9f84 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -53,6 +53,13 @@ #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) +#define u64_to_user_ptr(x) ( \ +{ \ + typecheck(u64, x); \ + (void __user *)(uintptr_t)x; \ +} \ +) + /* * This looks more complex than it should be. But we need to * get the type for the ~ right in round_down (it needs to be From 9aad2ea1a413dc97fadb344763cc80862734dd3a Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Thu, 21 May 2015 07:47:09 +0200 Subject: [PATCH 281/312] dt/bindings: add bindings for optee Introduces linaro prefix and adds bindings for ARM TrustZone based OP-TEE implementation. Change-Id: If4b626d0e8a14e2f1cdeac8aa0837e8ddc4dcbdd Acked-by: Rob Herring Signed-off-by: Jens Wiklander (cherry picked from commit c8bfafb1594435889b571b79325011e8b7fd087b) Signed-off-by: Victor Chong --- .../bindings/arm/firmware/linaro,optee-tz.txt | 31 +++++++++++++++++++ .../devicetree/bindings/vendor-prefixes.txt | 1 + 2 files changed, 32 insertions(+) create mode 100644 Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.txt diff --git a/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.txt b/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.txt new file mode 100644 index 000000000000..d38834c67dff --- /dev/null +++ b/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.txt @@ -0,0 +1,31 @@ +OP-TEE Device Tree Bindings + +OP-TEE is a piece of software using hardware features to provide a Trusted +Execution Environment. The security can be provided with ARM TrustZone, but +also by virtualization or a separate chip. + +We're using "linaro" as the first part of the compatible property for +the reference implementation maintained by Linaro. + +* OP-TEE based on ARM TrustZone required properties: + +- compatible : should contain "linaro,optee-tz" + +- method : The method of calling the OP-TEE Trusted OS. Permitted + values are: + + "smc" : SMC #0, with the register assignments specified + in drivers/tee/optee/optee_smc.h + + "hvc" : HVC #0, with the register assignments specified + in drivers/tee/optee/optee_smc.h + + + +Example: + firmware { + optee { + compatible = "linaro,optee-tz"; + method = "smc"; + }; + }; diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 98dc17507a84..7004fa94c368 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -128,6 +128,7 @@ lacie LaCie lantiq Lantiq Semiconductor lenovo Lenovo Group Ltd. lg LG Corporation +linaro Linaro Limited linux Linux-specific binding lsi LSI Corp. (LSI Logic) lltc Linear Technology Corporation From 048370cd16c142d16171e1ad3f91ee5f80d3b6dc Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Wed, 11 Mar 2015 14:39:39 +0100 Subject: [PATCH 282/312] tee: generic TEE subsystem MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Initial patch for generic TEE subsystem. This subsystem provides: * Registration/un-registration of TEE drivers. * Shared memory between normal world and secure world. * Ioctl interface for interaction with user space. * Sysfs implementation_id of TEE driver A TEE (Trusted Execution Environment) driver is a driver that interfaces with a trusted OS running in some secure environment, for example, TrustZone on ARM cpus, or a separate secure co-processor etc. The TEE subsystem can serve a TEE driver for a Global Platform compliant TEE, but it's not limited to only Global Platform TEEs. This patch builds on other similar implementations trying to solve the same problem: * "optee_linuxdriver" by among others Jean-michel DELORME and Emmanuel MICHEL * "Generic TrustZone Driver" by Javier González Change-Id: I35b763e23b706383df5013c429c510c68d7f4176 Acked-by: Andreas Dannenberg Tested-by: Jerome Forissier (HiKey) Tested-by: Volodymyr Babchuk (RCAR H3) Tested-by: Scott Branden Reviewed-by: Javier González Signed-off-by: Jens Wiklander (cherry picked from commit 967c9cca2cc50569efc65945325c173cecba83bd) Signed-off-by: Victor Chong --- Documentation/ioctl/ioctl-number.txt | 1 + MAINTAINERS | 7 + drivers/Kconfig | 2 + drivers/Makefile | 1 + drivers/tee/Kconfig | 8 + drivers/tee/Makefile | 4 + drivers/tee/tee_core.c | 893 +++++++++++++++++++++++++++ drivers/tee/tee_private.h | 129 ++++ drivers/tee/tee_shm.c | 358 +++++++++++ drivers/tee/tee_shm_pool.c | 156 +++++ include/linux/tee_drv.h | 277 +++++++++ include/uapi/linux/tee.h | 346 +++++++++++ 12 files changed, 2182 insertions(+) create mode 100644 drivers/tee/Kconfig create mode 100644 drivers/tee/Makefile create mode 100644 drivers/tee/tee_core.c create mode 100644 drivers/tee/tee_private.h create mode 100644 drivers/tee/tee_shm.c create mode 100644 drivers/tee/tee_shm_pool.c create mode 100644 include/linux/tee_drv.h create mode 100644 include/uapi/linux/tee.h diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index 91261a32a573..b5ce7b6c3576 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt @@ -307,6 +307,7 @@ Code Seq#(hex) Include File Comments 0xA3 80-8F Port ACL in development: 0xA3 90-9F linux/dtlk.h +0xA4 00-1F uapi/linux/tee.h Generic TEE subsystem 0xAA 00-3F linux/uapi/linux/userfaultfd.h 0xAB 00-1F linux/nbd.h 0xAC 00-1F linux/raw.h diff --git a/MAINTAINERS b/MAINTAINERS index ab65bbecb159..ef689b0afc53 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9361,6 +9361,13 @@ F: drivers/hwtracing/stm/ F: include/linux/stm.h F: include/uapi/linux/stm.h +TEE SUBSYSTEM +M: Jens Wiklander +S: Maintained +F: include/linux/tee_drv.h +F: include/uapi/linux/tee.h +F: drivers/tee/ + THUNDERBOLT DRIVER M: Andreas Noever S: Maintained diff --git a/drivers/Kconfig b/drivers/Kconfig index d2ac339de85f..63baceb6c118 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -198,4 +198,6 @@ source "drivers/hwtracing/intel_th/Kconfig" source "drivers/fpga/Kconfig" +source "drivers/tee/Kconfig" + endmenu diff --git a/drivers/Makefile b/drivers/Makefile index 098997f2cc3a..a2a1fbd53bbe 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -173,3 +173,4 @@ obj-$(CONFIG_STM) += hwtracing/stm/ obj-$(CONFIG_ANDROID) += android/ obj-$(CONFIG_NVMEM) += nvmem/ obj-$(CONFIG_FPGA) += fpga/ +obj-$(CONFIG_TEE) += tee/ diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig new file mode 100644 index 000000000000..50c244ead46d --- /dev/null +++ b/drivers/tee/Kconfig @@ -0,0 +1,8 @@ +# Generic Trusted Execution Environment Configuration +config TEE + tristate "Trusted Execution Environment support" + select DMA_SHARED_BUFFER + select GENERIC_ALLOCATOR + help + This implements a generic interface towards a Trusted Execution + Environment (TEE). diff --git a/drivers/tee/Makefile b/drivers/tee/Makefile new file mode 100644 index 000000000000..ec64047a86e2 --- /dev/null +++ b/drivers/tee/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_TEE) += tee.o +tee-objs += tee_core.o +tee-objs += tee_shm.o +tee-objs += tee_shm_pool.o diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c new file mode 100644 index 000000000000..5c60bf4423e6 --- /dev/null +++ b/drivers/tee/tee_core.c @@ -0,0 +1,893 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "tee_private.h" + +#define TEE_NUM_DEVICES 32 + +#define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x)) + +/* + * Unprivileged devices in the lower half range and privileged devices in + * the upper half range. + */ +static DECLARE_BITMAP(dev_mask, TEE_NUM_DEVICES); +static DEFINE_SPINLOCK(driver_lock); + +static struct class *tee_class; +static dev_t tee_devt; + +static int tee_open(struct inode *inode, struct file *filp) +{ + int rc; + struct tee_device *teedev; + struct tee_context *ctx; + + teedev = container_of(inode->i_cdev, struct tee_device, cdev); + if (!tee_device_get(teedev)) + return -EINVAL; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) { + rc = -ENOMEM; + goto err; + } + + ctx->teedev = teedev; + INIT_LIST_HEAD(&ctx->list_shm); + filp->private_data = ctx; + rc = teedev->desc->ops->open(ctx); + if (rc) + goto err; + + return 0; +err: + kfree(ctx); + tee_device_put(teedev); + return rc; +} + +static int tee_release(struct inode *inode, struct file *filp) +{ + struct tee_context *ctx = filp->private_data; + struct tee_device *teedev = ctx->teedev; + struct tee_shm *shm; + + ctx->teedev->desc->ops->release(ctx); + mutex_lock(&ctx->teedev->mutex); + list_for_each_entry(shm, &ctx->list_shm, link) + shm->ctx = NULL; + mutex_unlock(&ctx->teedev->mutex); + kfree(ctx); + tee_device_put(teedev); + return 0; +} + +static int tee_ioctl_version(struct tee_context *ctx, + struct tee_ioctl_version_data __user *uvers) +{ + struct tee_ioctl_version_data vers; + + ctx->teedev->desc->ops->get_version(ctx->teedev, &vers); + if (copy_to_user(uvers, &vers, sizeof(vers))) + return -EFAULT; + return 0; +} + +static int tee_ioctl_shm_alloc(struct tee_context *ctx, + struct tee_ioctl_shm_alloc_data __user *udata) +{ + long ret; + struct tee_ioctl_shm_alloc_data data; + struct tee_shm *shm; + + if (copy_from_user(&data, udata, sizeof(data))) + return -EFAULT; + + /* Currently no input flags are supported */ + if (data.flags) + return -EINVAL; + + data.id = -1; + + shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + data.id = shm->id; + data.flags = shm->flags; + data.size = shm->size; + + if (copy_to_user(udata, &data, sizeof(data))) + ret = -EFAULT; + else + ret = tee_shm_get_fd(shm); + + /* + * When user space closes the file descriptor the shared memory + * should be freed or if tee_shm_get_fd() failed then it will + * be freed immediately. + */ + tee_shm_put(shm); + return ret; +} + +static int params_from_user(struct tee_context *ctx, struct tee_param *params, + size_t num_params, + struct tee_ioctl_param __user *uparams) +{ + size_t n; + + for (n = 0; n < num_params; n++) { + struct tee_shm *shm; + struct tee_ioctl_param ip; + + if (copy_from_user(&ip, uparams + n, sizeof(ip))) + return -EFAULT; + + /* All unused attribute bits has to be zero */ + if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK) + return -EINVAL; + + params[n].attr = ip.attr; + switch (ip.attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_NONE: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + params[n].u.value.a = ip.a; + params[n].u.value.b = ip.b; + params[n].u.value.c = ip.c; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + /* + * If we fail to get a pointer to a shared memory + * object (and increase the ref count) from an + * identifier we return an error. All pointers that + * has been added in params have an increased ref + * count. It's the callers responibility to do + * tee_shm_put() on all resolved pointers. + */ + shm = tee_shm_get_from_id(ctx, ip.c); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + params[n].u.memref.shm_offs = ip.a; + params[n].u.memref.size = ip.b; + params[n].u.memref.shm = shm; + break; + default: + /* Unknown attribute */ + return -EINVAL; + } + } + return 0; +} + +static int params_to_user(struct tee_ioctl_param __user *uparams, + size_t num_params, struct tee_param *params) +{ + size_t n; + + for (n = 0; n < num_params; n++) { + struct tee_ioctl_param __user *up = uparams + n; + struct tee_param *p = params + n; + + switch (p->attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + if (put_user(p->u.value.a, &up->a) || + put_user(p->u.value.b, &up->b) || + put_user(p->u.value.c, &up->c)) + return -EFAULT; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + if (put_user((u64)p->u.memref.size, &up->b)) + return -EFAULT; + default: + break; + } + } + return 0; +} + +static bool param_is_memref(struct tee_param *param) +{ + switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + return true; + default: + return false; + } +} + +static int tee_ioctl_open_session(struct tee_context *ctx, + struct tee_ioctl_buf_data __user *ubuf) +{ + int rc; + size_t n; + struct tee_ioctl_buf_data buf; + struct tee_ioctl_open_session_arg __user *uarg; + struct tee_ioctl_open_session_arg arg; + struct tee_ioctl_param __user *uparams = NULL; + struct tee_param *params = NULL; + bool have_session = false; + + if (!ctx->teedev->desc->ops->open_session) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, sizeof(buf))) + return -EFAULT; + + if (buf.buf_len > TEE_MAX_ARG_SIZE || + buf.buf_len < sizeof(struct tee_ioctl_open_session_arg)) + return -EINVAL; + + uarg = u64_to_user_ptr(buf.buf_ptr); + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len) + return -EINVAL; + + if (arg.num_params) { + params = kcalloc(arg.num_params, sizeof(struct tee_param), + GFP_KERNEL); + if (!params) + return -ENOMEM; + uparams = uarg->params; + rc = params_from_user(ctx, params, arg.num_params, uparams); + if (rc) + goto out; + } + + rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params); + if (rc) + goto out; + have_session = true; + + if (put_user(arg.session, &uarg->session) || + put_user(arg.ret, &uarg->ret) || + put_user(arg.ret_origin, &uarg->ret_origin)) { + rc = -EFAULT; + goto out; + } + rc = params_to_user(uparams, arg.num_params, params); +out: + /* + * If we've succeeded to open the session but failed to communicate + * it back to user space, close the session again to avoid leakage. + */ + if (rc && have_session && ctx->teedev->desc->ops->close_session) + ctx->teedev->desc->ops->close_session(ctx, arg.session); + + if (params) { + /* Decrease ref count for all valid shared memory pointers */ + for (n = 0; n < arg.num_params; n++) + if (param_is_memref(params + n) && + params[n].u.memref.shm) + tee_shm_put(params[n].u.memref.shm); + kfree(params); + } + + return rc; +} + +static int tee_ioctl_invoke(struct tee_context *ctx, + struct tee_ioctl_buf_data __user *ubuf) +{ + int rc; + size_t n; + struct tee_ioctl_buf_data buf; + struct tee_ioctl_invoke_arg __user *uarg; + struct tee_ioctl_invoke_arg arg; + struct tee_ioctl_param __user *uparams = NULL; + struct tee_param *params = NULL; + + if (!ctx->teedev->desc->ops->invoke_func) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, sizeof(buf))) + return -EFAULT; + + if (buf.buf_len > TEE_MAX_ARG_SIZE || + buf.buf_len < sizeof(struct tee_ioctl_invoke_arg)) + return -EINVAL; + + uarg = u64_to_user_ptr(buf.buf_ptr); + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len) + return -EINVAL; + + if (arg.num_params) { + params = kcalloc(arg.num_params, sizeof(struct tee_param), + GFP_KERNEL); + if (!params) + return -ENOMEM; + uparams = uarg->params; + rc = params_from_user(ctx, params, arg.num_params, uparams); + if (rc) + goto out; + } + + rc = ctx->teedev->desc->ops->invoke_func(ctx, &arg, params); + if (rc) + goto out; + + if (put_user(arg.ret, &uarg->ret) || + put_user(arg.ret_origin, &uarg->ret_origin)) { + rc = -EFAULT; + goto out; + } + rc = params_to_user(uparams, arg.num_params, params); +out: + if (params) { + /* Decrease ref count for all valid shared memory pointers */ + for (n = 0; n < arg.num_params; n++) + if (param_is_memref(params + n) && + params[n].u.memref.shm) + tee_shm_put(params[n].u.memref.shm); + kfree(params); + } + return rc; +} + +static int tee_ioctl_cancel(struct tee_context *ctx, + struct tee_ioctl_cancel_arg __user *uarg) +{ + struct tee_ioctl_cancel_arg arg; + + if (!ctx->teedev->desc->ops->cancel_req) + return -EINVAL; + + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + return ctx->teedev->desc->ops->cancel_req(ctx, arg.cancel_id, + arg.session); +} + +static int +tee_ioctl_close_session(struct tee_context *ctx, + struct tee_ioctl_close_session_arg __user *uarg) +{ + struct tee_ioctl_close_session_arg arg; + + if (!ctx->teedev->desc->ops->close_session) + return -EINVAL; + + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + return ctx->teedev->desc->ops->close_session(ctx, arg.session); +} + +static int params_to_supp(struct tee_context *ctx, + struct tee_ioctl_param __user *uparams, + size_t num_params, struct tee_param *params) +{ + size_t n; + + for (n = 0; n < num_params; n++) { + struct tee_ioctl_param ip; + struct tee_param *p = params + n; + + ip.attr = p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK; + switch (p->attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + ip.a = p->u.value.a; + ip.b = p->u.value.b; + ip.c = p->u.value.c; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + ip.b = p->u.memref.size; + if (!p->u.memref.shm) { + ip.a = 0; + ip.c = (u64)-1; /* invalid shm id */ + break; + } + ip.a = p->u.memref.shm_offs; + ip.c = p->u.memref.shm->id; + break; + default: + ip.a = 0; + ip.b = 0; + ip.c = 0; + break; + } + + if (copy_to_user(uparams + n, &ip, sizeof(ip))) + return -EFAULT; + } + + return 0; +} + +static int tee_ioctl_supp_recv(struct tee_context *ctx, + struct tee_ioctl_buf_data __user *ubuf) +{ + int rc; + struct tee_ioctl_buf_data buf; + struct tee_iocl_supp_recv_arg __user *uarg; + struct tee_param *params; + u32 num_params; + u32 func; + + if (!ctx->teedev->desc->ops->supp_recv) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, sizeof(buf))) + return -EFAULT; + + if (buf.buf_len > TEE_MAX_ARG_SIZE || + buf.buf_len < sizeof(struct tee_iocl_supp_recv_arg)) + return -EINVAL; + + uarg = u64_to_user_ptr(buf.buf_ptr); + if (get_user(num_params, &uarg->num_params)) + return -EFAULT; + + if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len) + return -EINVAL; + + params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL); + if (!params) + return -ENOMEM; + + rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params); + if (rc) + goto out; + + if (put_user(func, &uarg->func) || + put_user(num_params, &uarg->num_params)) { + rc = -EFAULT; + goto out; + } + + rc = params_to_supp(ctx, uarg->params, num_params, params); +out: + kfree(params); + return rc; +} + +static int params_from_supp(struct tee_param *params, size_t num_params, + struct tee_ioctl_param __user *uparams) +{ + size_t n; + + for (n = 0; n < num_params; n++) { + struct tee_param *p = params + n; + struct tee_ioctl_param ip; + + if (copy_from_user(&ip, uparams + n, sizeof(ip))) + return -EFAULT; + + /* All unused attribute bits has to be zero */ + if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK) + return -EINVAL; + + p->attr = ip.attr; + switch (ip.attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + /* Only out and in/out values can be updated */ + p->u.value.a = ip.a; + p->u.value.b = ip.b; + p->u.value.c = ip.c; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + /* + * Only the size of the memref can be updated. + * Since we don't have access to the original + * parameters here, only store the supplied size. + * The driver will copy the updated size into the + * original parameters. + */ + p->u.memref.shm = NULL; + p->u.memref.shm_offs = 0; + p->u.memref.size = ip.b; + break; + default: + memset(&p->u, 0, sizeof(p->u)); + break; + } + } + return 0; +} + +static int tee_ioctl_supp_send(struct tee_context *ctx, + struct tee_ioctl_buf_data __user *ubuf) +{ + long rc; + struct tee_ioctl_buf_data buf; + struct tee_iocl_supp_send_arg __user *uarg; + struct tee_param *params; + u32 num_params; + u32 ret; + + /* Not valid for this driver */ + if (!ctx->teedev->desc->ops->supp_send) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, sizeof(buf))) + return -EFAULT; + + if (buf.buf_len > TEE_MAX_ARG_SIZE || + buf.buf_len < sizeof(struct tee_iocl_supp_send_arg)) + return -EINVAL; + + uarg = u64_to_user_ptr(buf.buf_ptr); + if (get_user(ret, &uarg->ret) || + get_user(num_params, &uarg->num_params)) + return -EFAULT; + + if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) > buf.buf_len) + return -EINVAL; + + params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL); + if (!params) + return -ENOMEM; + + rc = params_from_supp(params, num_params, uarg->params); + if (rc) + goto out; + + rc = ctx->teedev->desc->ops->supp_send(ctx, ret, num_params, params); +out: + kfree(params); + return rc; +} + +static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct tee_context *ctx = filp->private_data; + void __user *uarg = (void __user *)arg; + + switch (cmd) { + case TEE_IOC_VERSION: + return tee_ioctl_version(ctx, uarg); + case TEE_IOC_SHM_ALLOC: + return tee_ioctl_shm_alloc(ctx, uarg); + case TEE_IOC_OPEN_SESSION: + return tee_ioctl_open_session(ctx, uarg); + case TEE_IOC_INVOKE: + return tee_ioctl_invoke(ctx, uarg); + case TEE_IOC_CANCEL: + return tee_ioctl_cancel(ctx, uarg); + case TEE_IOC_CLOSE_SESSION: + return tee_ioctl_close_session(ctx, uarg); + case TEE_IOC_SUPPL_RECV: + return tee_ioctl_supp_recv(ctx, uarg); + case TEE_IOC_SUPPL_SEND: + return tee_ioctl_supp_send(ctx, uarg); + default: + return -EINVAL; + } +} + +static const struct file_operations tee_fops = { + .owner = THIS_MODULE, + .open = tee_open, + .release = tee_release, + .unlocked_ioctl = tee_ioctl, + .compat_ioctl = tee_ioctl, +}; + +static void tee_release_device(struct device *dev) +{ + struct tee_device *teedev = container_of(dev, struct tee_device, dev); + + spin_lock(&driver_lock); + clear_bit(teedev->id, dev_mask); + spin_unlock(&driver_lock); + mutex_destroy(&teedev->mutex); + idr_destroy(&teedev->idr); + kfree(teedev); +} + +/** + * tee_device_alloc() - Allocate a new struct tee_device instance + * @teedesc: Descriptor for this driver + * @dev: Parent device for this device + * @pool: Shared memory pool, NULL if not used + * @driver_data: Private driver data for this device + * + * Allocates a new struct tee_device instance. The device is + * removed by tee_device_unregister(). + * + * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure + */ +struct tee_device *tee_device_alloc(const struct tee_desc *teedesc, + struct device *dev, + struct tee_shm_pool *pool, + void *driver_data) +{ + struct tee_device *teedev; + void *ret; + int rc; + int offs = 0; + + if (!teedesc || !teedesc->name || !teedesc->ops || + !teedesc->ops->get_version || !teedesc->ops->open || + !teedesc->ops->release || !pool) + return ERR_PTR(-EINVAL); + + teedev = kzalloc(sizeof(*teedev), GFP_KERNEL); + if (!teedev) { + ret = ERR_PTR(-ENOMEM); + goto err; + } + + if (teedesc->flags & TEE_DESC_PRIVILEGED) + offs = TEE_NUM_DEVICES / 2; + + spin_lock(&driver_lock); + teedev->id = find_next_zero_bit(dev_mask, TEE_NUM_DEVICES, offs); + if (teedev->id < TEE_NUM_DEVICES) + set_bit(teedev->id, dev_mask); + spin_unlock(&driver_lock); + + if (teedev->id >= TEE_NUM_DEVICES) { + ret = ERR_PTR(-ENOMEM); + goto err; + } + + snprintf(teedev->name, sizeof(teedev->name), "tee%s%d", + teedesc->flags & TEE_DESC_PRIVILEGED ? "priv" : "", + teedev->id - offs); + + teedev->dev.class = tee_class; + teedev->dev.release = tee_release_device; + teedev->dev.parent = dev; + + teedev->dev.devt = MKDEV(MAJOR(tee_devt), teedev->id); + + rc = dev_set_name(&teedev->dev, "%s", teedev->name); + if (rc) { + ret = ERR_PTR(rc); + goto err_devt; + } + + cdev_init(&teedev->cdev, &tee_fops); + teedev->cdev.owner = teedesc->owner; + teedev->cdev.kobj.parent = &teedev->dev.kobj; + + dev_set_drvdata(&teedev->dev, driver_data); + device_initialize(&teedev->dev); + + /* 1 as tee_device_unregister() does one final tee_device_put() */ + teedev->num_users = 1; + init_completion(&teedev->c_no_users); + mutex_init(&teedev->mutex); + idr_init(&teedev->idr); + + teedev->desc = teedesc; + teedev->pool = pool; + + return teedev; +err_devt: + unregister_chrdev_region(teedev->dev.devt, 1); +err: + pr_err("could not register %s driver\n", + teedesc->flags & TEE_DESC_PRIVILEGED ? "privileged" : "client"); + if (teedev && teedev->id < TEE_NUM_DEVICES) { + spin_lock(&driver_lock); + clear_bit(teedev->id, dev_mask); + spin_unlock(&driver_lock); + } + kfree(teedev); + return ret; +} +EXPORT_SYMBOL_GPL(tee_device_alloc); + +static ssize_t implementation_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tee_device *teedev = container_of(dev, struct tee_device, dev); + struct tee_ioctl_version_data vers; + + teedev->desc->ops->get_version(teedev, &vers); + return scnprintf(buf, PAGE_SIZE, "%d\n", vers.impl_id); +} +static DEVICE_ATTR_RO(implementation_id); + +static struct attribute *tee_dev_attrs[] = { + &dev_attr_implementation_id.attr, + NULL +}; + +static const struct attribute_group tee_dev_group = { + .attrs = tee_dev_attrs, +}; + +/** + * tee_device_register() - Registers a TEE device + * @teedev: Device to register + * + * tee_device_unregister() need to be called to remove the @teedev if + * this function fails. + * + * @returns < 0 on failure + */ +int tee_device_register(struct tee_device *teedev) +{ + int rc; + + if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) { + dev_err(&teedev->dev, "attempt to register twice\n"); + return -EINVAL; + } + + rc = cdev_add(&teedev->cdev, teedev->dev.devt, 1); + if (rc) { + dev_err(&teedev->dev, + "unable to cdev_add() %s, major %d, minor %d, err=%d\n", + teedev->name, MAJOR(teedev->dev.devt), + MINOR(teedev->dev.devt), rc); + return rc; + } + + rc = device_add(&teedev->dev); + if (rc) { + dev_err(&teedev->dev, + "unable to device_add() %s, major %d, minor %d, err=%d\n", + teedev->name, MAJOR(teedev->dev.devt), + MINOR(teedev->dev.devt), rc); + goto err_device_add; + } + + rc = sysfs_create_group(&teedev->dev.kobj, &tee_dev_group); + if (rc) { + dev_err(&teedev->dev, + "failed to create sysfs attributes, err=%d\n", rc); + goto err_sysfs_create_group; + } + + teedev->flags |= TEE_DEVICE_FLAG_REGISTERED; + return 0; + +err_sysfs_create_group: + device_del(&teedev->dev); +err_device_add: + cdev_del(&teedev->cdev); + return rc; +} +EXPORT_SYMBOL_GPL(tee_device_register); + +void tee_device_put(struct tee_device *teedev) +{ + mutex_lock(&teedev->mutex); + /* Shouldn't put in this state */ + if (!WARN_ON(!teedev->desc)) { + teedev->num_users--; + if (!teedev->num_users) { + teedev->desc = NULL; + complete(&teedev->c_no_users); + } + } + mutex_unlock(&teedev->mutex); +} + +bool tee_device_get(struct tee_device *teedev) +{ + mutex_lock(&teedev->mutex); + if (!teedev->desc) { + mutex_unlock(&teedev->mutex); + return false; + } + teedev->num_users++; + mutex_unlock(&teedev->mutex); + return true; +} + +/** + * tee_device_unregister() - Removes a TEE device + * @teedev: Device to unregister + * + * This function should be called to remove the @teedev even if + * tee_device_register() hasn't been called yet. Does nothing if + * @teedev is NULL. + */ +void tee_device_unregister(struct tee_device *teedev) +{ + if (!teedev) + return; + + if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) { + sysfs_remove_group(&teedev->dev.kobj, &tee_dev_group); + cdev_del(&teedev->cdev); + device_del(&teedev->dev); + } + + tee_device_put(teedev); + wait_for_completion(&teedev->c_no_users); + + /* + * No need to take a mutex any longer now since teedev->desc was + * set to NULL before teedev->c_no_users was completed. + */ + + teedev->pool = NULL; + + put_device(&teedev->dev); +} +EXPORT_SYMBOL_GPL(tee_device_unregister); + +/** + * tee_get_drvdata() - Return driver_data pointer + * @teedev: Device containing the driver_data pointer + * @returns the driver_data pointer supplied to tee_register(). + */ +void *tee_get_drvdata(struct tee_device *teedev) +{ + return dev_get_drvdata(&teedev->dev); +} +EXPORT_SYMBOL_GPL(tee_get_drvdata); + +static int __init tee_init(void) +{ + int rc; + + tee_class = class_create(THIS_MODULE, "tee"); + if (IS_ERR(tee_class)) { + pr_err("couldn't create class\n"); + return PTR_ERR(tee_class); + } + + rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee"); + if (rc) { + pr_err("failed to allocate char dev region\n"); + class_destroy(tee_class); + tee_class = NULL; + } + + return rc; +} + +static void __exit tee_exit(void) +{ + class_destroy(tee_class); + tee_class = NULL; + unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES); +} + +subsys_initcall(tee_init); +module_exit(tee_exit); + +MODULE_AUTHOR("Linaro"); +MODULE_DESCRIPTION("TEE Driver"); +MODULE_VERSION("1.0"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h new file mode 100644 index 000000000000..21cb6be8bce9 --- /dev/null +++ b/drivers/tee/tee_private.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef TEE_PRIVATE_H +#define TEE_PRIVATE_H + +#include +#include +#include +#include +#include +#include + +struct tee_device; + +/** + * struct tee_shm - shared memory object + * @teedev: device used to allocate the object + * @ctx: context using the object, if NULL the context is gone + * @link link element + * @paddr: physical address of the shared memory + * @kaddr: virtual address of the shared memory + * @size: size of shared memory + * @dmabuf: dmabuf used to for exporting to user space + * @flags: defined by TEE_SHM_* in tee_drv.h + * @id: unique id of a shared memory object on this device + */ +struct tee_shm { + struct tee_device *teedev; + struct tee_context *ctx; + struct list_head link; + phys_addr_t paddr; + void *kaddr; + size_t size; + struct dma_buf *dmabuf; + u32 flags; + int id; +}; + +struct tee_shm_pool_mgr; + +/** + * struct tee_shm_pool_mgr_ops - shared memory pool manager operations + * @alloc: called when allocating shared memory + * @free: called when freeing shared memory + */ +struct tee_shm_pool_mgr_ops { + int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm, + size_t size); + void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm); +}; + +/** + * struct tee_shm_pool_mgr - shared memory manager + * @ops: operations + * @private_data: private data for the shared memory manager + */ +struct tee_shm_pool_mgr { + const struct tee_shm_pool_mgr_ops *ops; + void *private_data; +}; + +/** + * struct tee_shm_pool - shared memory pool + * @private_mgr: pool manager for shared memory only between kernel + * and secure world + * @dma_buf_mgr: pool manager for shared memory exported to user space + * @destroy: called when destroying the pool + * @private_data: private data for the pool + */ +struct tee_shm_pool { + struct tee_shm_pool_mgr private_mgr; + struct tee_shm_pool_mgr dma_buf_mgr; + void (*destroy)(struct tee_shm_pool *pool); + void *private_data; +}; + +#define TEE_DEVICE_FLAG_REGISTERED 0x1 +#define TEE_MAX_DEV_NAME_LEN 32 + +/** + * struct tee_device - TEE Device representation + * @name: name of device + * @desc: description of device + * @id: unique id of device + * @flags: represented by TEE_DEVICE_FLAG_REGISTERED above + * @dev: embedded basic device structure + * @cdev: embedded cdev + * @num_users: number of active users of this device + * @c_no_user: completion used when unregistering the device + * @mutex: mutex protecting @num_users and @idr + * @idr: register of shared memory object allocated on this device + * @pool: shared memory pool + */ +struct tee_device { + char name[TEE_MAX_DEV_NAME_LEN]; + const struct tee_desc *desc; + int id; + unsigned int flags; + + struct device dev; + struct cdev cdev; + + size_t num_users; + struct completion c_no_users; + struct mutex mutex; /* protects num_users and idr */ + + struct idr idr; + struct tee_shm_pool *pool; +}; + +int tee_shm_init(void); + +int tee_shm_get_fd(struct tee_shm *shm); + +bool tee_device_get(struct tee_device *teedev); +void tee_device_put(struct tee_device *teedev); + +#endif /*TEE_PRIVATE_H*/ diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c new file mode 100644 index 000000000000..0be1e3e93bee --- /dev/null +++ b/drivers/tee/tee_shm.c @@ -0,0 +1,358 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include "tee_private.h" + +static void tee_shm_release(struct tee_shm *shm) +{ + struct tee_device *teedev = shm->teedev; + struct tee_shm_pool_mgr *poolm; + + mutex_lock(&teedev->mutex); + idr_remove(&teedev->idr, shm->id); + if (shm->ctx) + list_del(&shm->link); + mutex_unlock(&teedev->mutex); + + if (shm->flags & TEE_SHM_DMA_BUF) + poolm = &teedev->pool->dma_buf_mgr; + else + poolm = &teedev->pool->private_mgr; + + poolm->ops->free(poolm, shm); + kfree(shm); + + tee_device_put(teedev); +} + +static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment + *attach, enum dma_data_direction dir) +{ + return NULL; +} + +static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *table, + enum dma_data_direction dir) +{ +} + +static void tee_shm_op_release(struct dma_buf *dmabuf) +{ + struct tee_shm *shm = dmabuf->priv; + + tee_shm_release(shm); +} + +static void *tee_shm_op_kmap_atomic(struct dma_buf *dmabuf, unsigned long pgnum) +{ + return NULL; +} + +static void *tee_shm_op_kmap(struct dma_buf *dmabuf, unsigned long pgnum) +{ + return NULL; +} + +static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + struct tee_shm *shm = dmabuf->priv; + size_t size = vma->vm_end - vma->vm_start; + + return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, + size, vma->vm_page_prot); +} + +static struct dma_buf_ops tee_shm_dma_buf_ops = { + .map_dma_buf = tee_shm_op_map_dma_buf, + .unmap_dma_buf = tee_shm_op_unmap_dma_buf, + .release = tee_shm_op_release, + .kmap_atomic = tee_shm_op_kmap_atomic, + .kmap = tee_shm_op_kmap, + .mmap = tee_shm_op_mmap, +}; + +/** + * tee_shm_alloc() - Allocate shared memory + * @ctx: Context that allocates the shared memory + * @size: Requested size of shared memory + * @flags: Flags setting properties for the requested shared memory. + * + * Memory allocated as global shared memory is automatically freed when the + * TEE file pointer is closed. The @flags field uses the bits defined by + * TEE_SHM_* in . TEE_SHM_MAPPED must currently always be + * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and + * associated with a dma-buf handle, else driver private memory. + */ +struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) +{ + struct tee_device *teedev = ctx->teedev; + struct tee_shm_pool_mgr *poolm = NULL; + struct tee_shm *shm; + void *ret; + int rc; + + if (!(flags & TEE_SHM_MAPPED)) { + dev_err(teedev->dev.parent, + "only mapped allocations supported\n"); + return ERR_PTR(-EINVAL); + } + + if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF))) { + dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags); + return ERR_PTR(-EINVAL); + } + + if (!tee_device_get(teedev)) + return ERR_PTR(-EINVAL); + + if (!teedev->pool) { + /* teedev has been detached from driver */ + ret = ERR_PTR(-EINVAL); + goto err_dev_put; + } + + shm = kzalloc(sizeof(*shm), GFP_KERNEL); + if (!shm) { + ret = ERR_PTR(-ENOMEM); + goto err_dev_put; + } + + shm->flags = flags; + shm->teedev = teedev; + shm->ctx = ctx; + if (flags & TEE_SHM_DMA_BUF) + poolm = &teedev->pool->dma_buf_mgr; + else + poolm = &teedev->pool->private_mgr; + + rc = poolm->ops->alloc(poolm, shm, size); + if (rc) { + ret = ERR_PTR(rc); + goto err_kfree; + } + + mutex_lock(&teedev->mutex); + shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); + mutex_unlock(&teedev->mutex); + if (shm->id < 0) { + ret = ERR_PTR(shm->id); + goto err_pool_free; + } + + if (flags & TEE_SHM_DMA_BUF) { + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + exp_info.ops = &tee_shm_dma_buf_ops; + exp_info.size = shm->size; + exp_info.flags = O_RDWR; + exp_info.priv = shm; + + shm->dmabuf = dma_buf_export(&exp_info); + if (IS_ERR(shm->dmabuf)) { + ret = ERR_CAST(shm->dmabuf); + goto err_rem; + } + } + mutex_lock(&teedev->mutex); + list_add_tail(&shm->link, &ctx->list_shm); + mutex_unlock(&teedev->mutex); + + return shm; +err_rem: + mutex_lock(&teedev->mutex); + idr_remove(&teedev->idr, shm->id); + mutex_unlock(&teedev->mutex); +err_pool_free: + poolm->ops->free(poolm, shm); +err_kfree: + kfree(shm); +err_dev_put: + tee_device_put(teedev); + return ret; +} +EXPORT_SYMBOL_GPL(tee_shm_alloc); + +/** + * tee_shm_get_fd() - Increase reference count and return file descriptor + * @shm: Shared memory handle + * @returns user space file descriptor to shared memory + */ +int tee_shm_get_fd(struct tee_shm *shm) +{ + u32 req_flags = TEE_SHM_MAPPED | TEE_SHM_DMA_BUF; + int fd; + + if ((shm->flags & req_flags) != req_flags) + return -EINVAL; + + fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC); + if (fd >= 0) + get_dma_buf(shm->dmabuf); + return fd; +} + +/** + * tee_shm_free() - Free shared memory + * @shm: Handle to shared memory to free + */ +void tee_shm_free(struct tee_shm *shm) +{ + /* + * dma_buf_put() decreases the dmabuf reference counter and will + * call tee_shm_release() when the last reference is gone. + * + * In the case of driver private memory we call tee_shm_release + * directly instead as it doesn't have a reference counter. + */ + if (shm->flags & TEE_SHM_DMA_BUF) + dma_buf_put(shm->dmabuf); + else + tee_shm_release(shm); +} +EXPORT_SYMBOL_GPL(tee_shm_free); + +/** + * tee_shm_va2pa() - Get physical address of a virtual address + * @shm: Shared memory handle + * @va: Virtual address to tranlsate + * @pa: Returned physical address + * @returns 0 on success and < 0 on failure + */ +int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa) +{ + /* Check that we're in the range of the shm */ + if ((char *)va < (char *)shm->kaddr) + return -EINVAL; + if ((char *)va >= ((char *)shm->kaddr + shm->size)) + return -EINVAL; + + return tee_shm_get_pa( + shm, (unsigned long)va - (unsigned long)shm->kaddr, pa); +} +EXPORT_SYMBOL_GPL(tee_shm_va2pa); + +/** + * tee_shm_pa2va() - Get virtual address of a physical address + * @shm: Shared memory handle + * @pa: Physical address to tranlsate + * @va: Returned virtual address + * @returns 0 on success and < 0 on failure + */ +int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va) +{ + /* Check that we're in the range of the shm */ + if (pa < shm->paddr) + return -EINVAL; + if (pa >= (shm->paddr + shm->size)) + return -EINVAL; + + if (va) { + void *v = tee_shm_get_va(shm, pa - shm->paddr); + + if (IS_ERR(v)) + return PTR_ERR(v); + *va = v; + } + return 0; +} +EXPORT_SYMBOL_GPL(tee_shm_pa2va); + +/** + * tee_shm_get_va() - Get virtual address of a shared memory plus an offset + * @shm: Shared memory handle + * @offs: Offset from start of this shared memory + * @returns virtual address of the shared memory + offs if offs is within + * the bounds of this shared memory, else an ERR_PTR + */ +void *tee_shm_get_va(struct tee_shm *shm, size_t offs) +{ + if (offs >= shm->size) + return ERR_PTR(-EINVAL); + return (char *)shm->kaddr + offs; +} +EXPORT_SYMBOL_GPL(tee_shm_get_va); + +/** + * tee_shm_get_pa() - Get physical address of a shared memory plus an offset + * @shm: Shared memory handle + * @offs: Offset from start of this shared memory + * @pa: Physical address to return + * @returns 0 if offs is within the bounds of this shared memory, else an + * error code. + */ +int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa) +{ + if (offs >= shm->size) + return -EINVAL; + if (pa) + *pa = shm->paddr + offs; + return 0; +} +EXPORT_SYMBOL_GPL(tee_shm_get_pa); + +/** + * tee_shm_get_from_id() - Find shared memory object and increase reference + * count + * @ctx: Context owning the shared memory + * @id: Id of shared memory object + * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure + */ +struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id) +{ + struct tee_device *teedev; + struct tee_shm *shm; + + if (!ctx) + return ERR_PTR(-EINVAL); + + teedev = ctx->teedev; + mutex_lock(&teedev->mutex); + shm = idr_find(&teedev->idr, id); + if (!shm || shm->ctx != ctx) + shm = ERR_PTR(-EINVAL); + else if (shm->flags & TEE_SHM_DMA_BUF) + get_dma_buf(shm->dmabuf); + mutex_unlock(&teedev->mutex); + return shm; +} +EXPORT_SYMBOL_GPL(tee_shm_get_from_id); + +/** + * tee_shm_get_id() - Get id of a shared memory object + * @shm: Shared memory handle + * @returns id + */ +int tee_shm_get_id(struct tee_shm *shm) +{ + return shm->id; +} +EXPORT_SYMBOL_GPL(tee_shm_get_id); + +/** + * tee_shm_put() - Decrease reference count on a shared memory handle + * @shm: Shared memory handle + */ +void tee_shm_put(struct tee_shm *shm) +{ + if (shm->flags & TEE_SHM_DMA_BUF) + dma_buf_put(shm->dmabuf); +} +EXPORT_SYMBOL_GPL(tee_shm_put); diff --git a/drivers/tee/tee_shm_pool.c b/drivers/tee/tee_shm_pool.c new file mode 100644 index 000000000000..fb4f8522a526 --- /dev/null +++ b/drivers/tee/tee_shm_pool.c @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include "tee_private.h" + +static int pool_op_gen_alloc(struct tee_shm_pool_mgr *poolm, + struct tee_shm *shm, size_t size) +{ + unsigned long va; + struct gen_pool *genpool = poolm->private_data; + size_t s = roundup(size, 1 << genpool->min_alloc_order); + + va = gen_pool_alloc(genpool, s); + if (!va) + return -ENOMEM; + + memset((void *)va, 0, s); + shm->kaddr = (void *)va; + shm->paddr = gen_pool_virt_to_phys(genpool, va); + shm->size = s; + return 0; +} + +static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm, + struct tee_shm *shm) +{ + gen_pool_free(poolm->private_data, (unsigned long)shm->kaddr, + shm->size); + shm->kaddr = NULL; +} + +static const struct tee_shm_pool_mgr_ops pool_ops_generic = { + .alloc = pool_op_gen_alloc, + .free = pool_op_gen_free, +}; + +static void pool_res_mem_destroy(struct tee_shm_pool *pool) +{ + gen_pool_destroy(pool->private_mgr.private_data); + gen_pool_destroy(pool->dma_buf_mgr.private_data); +} + +static int pool_res_mem_mgr_init(struct tee_shm_pool_mgr *mgr, + struct tee_shm_pool_mem_info *info, + int min_alloc_order) +{ + size_t page_mask = PAGE_SIZE - 1; + struct gen_pool *genpool = NULL; + int rc; + + /* + * Start and end must be page aligned + */ + if ((info->vaddr & page_mask) || (info->paddr & page_mask) || + (info->size & page_mask)) + return -EINVAL; + + genpool = gen_pool_create(min_alloc_order, -1); + if (!genpool) + return -ENOMEM; + + gen_pool_set_algo(genpool, gen_pool_best_fit, NULL); + rc = gen_pool_add_virt(genpool, info->vaddr, info->paddr, info->size, + -1); + if (rc) { + gen_pool_destroy(genpool); + return rc; + } + + mgr->private_data = genpool; + mgr->ops = &pool_ops_generic; + return 0; +} + +/** + * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved + * memory range + * @priv_info: Information for driver private shared memory pool + * @dmabuf_info: Information for dma-buf shared memory pool + * + * Start and end of pools will must be page aligned. + * + * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied + * in @dmabuf, others will use the range provided by @priv. + * + * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. + */ +struct tee_shm_pool * +tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info, + struct tee_shm_pool_mem_info *dmabuf_info) +{ + struct tee_shm_pool *pool = NULL; + int ret; + + pool = kzalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) { + ret = -ENOMEM; + goto err; + } + + /* + * Create the pool for driver private shared memory + */ + ret = pool_res_mem_mgr_init(&pool->private_mgr, priv_info, + 3 /* 8 byte aligned */); + if (ret) + goto err; + + /* + * Create the pool for dma_buf shared memory + */ + ret = pool_res_mem_mgr_init(&pool->dma_buf_mgr, dmabuf_info, + PAGE_SHIFT); + if (ret) + goto err; + + pool->destroy = pool_res_mem_destroy; + return pool; +err: + if (ret == -ENOMEM) + pr_err("%s: can't allocate memory for res_mem shared memory pool\n", __func__); + if (pool && pool->private_mgr.private_data) + gen_pool_destroy(pool->private_mgr.private_data); + kfree(pool); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem); + +/** + * tee_shm_pool_free() - Free a shared memory pool + * @pool: The shared memory pool to free + * + * There must be no remaining shared memory allocated from this pool when + * this function is called. + */ +void tee_shm_pool_free(struct tee_shm_pool *pool) +{ + pool->destroy(pool); + kfree(pool); +} +EXPORT_SYMBOL_GPL(tee_shm_pool_free); diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h new file mode 100644 index 000000000000..0f175b8f6456 --- /dev/null +++ b/include/linux/tee_drv.h @@ -0,0 +1,277 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __TEE_DRV_H +#define __TEE_DRV_H + +#include +#include +#include +#include + +/* + * The file describes the API provided by the generic TEE driver to the + * specific TEE driver. + */ + +#define TEE_SHM_MAPPED 0x1 /* Memory mapped by the kernel */ +#define TEE_SHM_DMA_BUF 0x2 /* Memory with dma-buf handle */ + +struct tee_device; +struct tee_shm; +struct tee_shm_pool; + +/** + * struct tee_context - driver specific context on file pointer data + * @teedev: pointer to this drivers struct tee_device + * @list_shm: List of shared memory object owned by this context + * @data: driver specific context data, managed by the driver + */ +struct tee_context { + struct tee_device *teedev; + struct list_head list_shm; + void *data; +}; + +struct tee_param_memref { + size_t shm_offs; + size_t size; + struct tee_shm *shm; +}; + +struct tee_param_value { + u64 a; + u64 b; + u64 c; +}; + +struct tee_param { + u64 attr; + union { + struct tee_param_memref memref; + struct tee_param_value value; + } u; +}; + +/** + * struct tee_driver_ops - driver operations vtable + * @get_version: returns version of driver + * @open: called when the device file is opened + * @release: release this open file + * @open_session: open a new session + * @close_session: close a session + * @invoke_func: invoke a trusted function + * @cancel_req: request cancel of an ongoing invoke or open + * @supp_revc: called for supplicant to get a command + * @supp_send: called for supplicant to send a response + */ +struct tee_driver_ops { + void (*get_version)(struct tee_device *teedev, + struct tee_ioctl_version_data *vers); + int (*open)(struct tee_context *ctx); + void (*release)(struct tee_context *ctx); + int (*open_session)(struct tee_context *ctx, + struct tee_ioctl_open_session_arg *arg, + struct tee_param *param); + int (*close_session)(struct tee_context *ctx, u32 session); + int (*invoke_func)(struct tee_context *ctx, + struct tee_ioctl_invoke_arg *arg, + struct tee_param *param); + int (*cancel_req)(struct tee_context *ctx, u32 cancel_id, u32 session); + int (*supp_recv)(struct tee_context *ctx, u32 *func, u32 *num_params, + struct tee_param *param); + int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params, + struct tee_param *param); +}; + +/** + * struct tee_desc - Describes the TEE driver to the subsystem + * @name: name of driver + * @ops: driver operations vtable + * @owner: module providing the driver + * @flags: Extra properties of driver, defined by TEE_DESC_* below + */ +#define TEE_DESC_PRIVILEGED 0x1 +struct tee_desc { + const char *name; + const struct tee_driver_ops *ops; + struct module *owner; + u32 flags; +}; + +/** + * tee_device_alloc() - Allocate a new struct tee_device instance + * @teedesc: Descriptor for this driver + * @dev: Parent device for this device + * @pool: Shared memory pool, NULL if not used + * @driver_data: Private driver data for this device + * + * Allocates a new struct tee_device instance. The device is + * removed by tee_device_unregister(). + * + * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure + */ +struct tee_device *tee_device_alloc(const struct tee_desc *teedesc, + struct device *dev, + struct tee_shm_pool *pool, + void *driver_data); + +/** + * tee_device_register() - Registers a TEE device + * @teedev: Device to register + * + * tee_device_unregister() need to be called to remove the @teedev if + * this function fails. + * + * @returns < 0 on failure + */ +int tee_device_register(struct tee_device *teedev); + +/** + * tee_device_unregister() - Removes a TEE device + * @teedev: Device to unregister + * + * This function should be called to remove the @teedev even if + * tee_device_register() hasn't been called yet. Does nothing if + * @teedev is NULL. + */ +void tee_device_unregister(struct tee_device *teedev); + +/** + * struct tee_shm_pool_mem_info - holds information needed to create a shared + * memory pool + * @vaddr: Virtual address of start of pool + * @paddr: Physical address of start of pool + * @size: Size in bytes of the pool + */ +struct tee_shm_pool_mem_info { + unsigned long vaddr; + phys_addr_t paddr; + size_t size; +}; + +/** + * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved + * memory range + * @priv_info: Information for driver private shared memory pool + * @dmabuf_info: Information for dma-buf shared memory pool + * + * Start and end of pools will must be page aligned. + * + * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied + * in @dmabuf, others will use the range provided by @priv. + * + * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. + */ +struct tee_shm_pool * +tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info, + struct tee_shm_pool_mem_info *dmabuf_info); + +/** + * tee_shm_pool_free() - Free a shared memory pool + * @pool: The shared memory pool to free + * + * The must be no remaining shared memory allocated from this pool when + * this function is called. + */ +void tee_shm_pool_free(struct tee_shm_pool *pool); + +/** + * tee_get_drvdata() - Return driver_data pointer + * @returns the driver_data pointer supplied to tee_register(). + */ +void *tee_get_drvdata(struct tee_device *teedev); + +/** + * tee_shm_alloc() - Allocate shared memory + * @ctx: Context that allocates the shared memory + * @size: Requested size of shared memory + * @flags: Flags setting properties for the requested shared memory. + * + * Memory allocated as global shared memory is automatically freed when the + * TEE file pointer is closed. The @flags field uses the bits defined by + * TEE_SHM_* above. TEE_SHM_MAPPED must currently always be set. If + * TEE_SHM_DMA_BUF global shared memory will be allocated and associated + * with a dma-buf handle, else driver private memory. + * + * @returns a pointer to 'struct tee_shm' + */ +struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags); + +/** + * tee_shm_free() - Free shared memory + * @shm: Handle to shared memory to free + */ +void tee_shm_free(struct tee_shm *shm); + +/** + * tee_shm_put() - Decrease reference count on a shared memory handle + * @shm: Shared memory handle + */ +void tee_shm_put(struct tee_shm *shm); + +/** + * tee_shm_va2pa() - Get physical address of a virtual address + * @shm: Shared memory handle + * @va: Virtual address to tranlsate + * @pa: Returned physical address + * @returns 0 on success and < 0 on failure + */ +int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa); + +/** + * tee_shm_pa2va() - Get virtual address of a physical address + * @shm: Shared memory handle + * @pa: Physical address to tranlsate + * @va: Returned virtual address + * @returns 0 on success and < 0 on failure + */ +int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va); + +/** + * tee_shm_get_va() - Get virtual address of a shared memory plus an offset + * @shm: Shared memory handle + * @offs: Offset from start of this shared memory + * @returns virtual address of the shared memory + offs if offs is within + * the bounds of this shared memory, else an ERR_PTR + */ +void *tee_shm_get_va(struct tee_shm *shm, size_t offs); + +/** + * tee_shm_get_pa() - Get physical address of a shared memory plus an offset + * @shm: Shared memory handle + * @offs: Offset from start of this shared memory + * @pa: Physical address to return + * @returns 0 if offs is within the bounds of this shared memory, else an + * error code. + */ +int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa); + +/** + * tee_shm_get_id() - Get id of a shared memory object + * @shm: Shared memory handle + * @returns id + */ +int tee_shm_get_id(struct tee_shm *shm); + +/** + * tee_shm_get_from_id() - Find shared memory object and increase reference + * count + * @ctx: Context owning the shared memory + * @id: Id of shared memory object + * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure + */ +struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id); + +#endif /*__TEE_DRV_H*/ diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h new file mode 100644 index 000000000000..370d8845ab21 --- /dev/null +++ b/include/uapi/linux/tee.h @@ -0,0 +1,346 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __TEE_H +#define __TEE_H + +#include +#include + +/* + * This file describes the API provided by a TEE driver to user space. + * + * Each TEE driver defines a TEE specific protocol which is used for the + * data passed back and forth using TEE_IOC_CMD. + */ + +/* Helpers to make the ioctl defines */ +#define TEE_IOC_MAGIC 0xa4 +#define TEE_IOC_BASE 0 + +/* Flags relating to shared memory */ +#define TEE_IOCTL_SHM_MAPPED 0x1 /* memory mapped in normal world */ +#define TEE_IOCTL_SHM_DMA_BUF 0x2 /* dma-buf handle on shared memory */ + +#define TEE_MAX_ARG_SIZE 1024 + +#define TEE_GEN_CAP_GP (1 << 0)/* GlobalPlatform compliant TEE */ + +/* + * TEE Implementation ID + */ +#define TEE_IMPL_ID_OPTEE 1 + +/* + * OP-TEE specific capabilities + */ +#define TEE_OPTEE_CAP_TZ (1 << 0) + +/** + * struct tee_ioctl_version_data - TEE version + * @impl_id: [out] TEE implementation id + * @impl_caps: [out] Implementation specific capabilities + * @gen_caps: [out] Generic capabilities, defined by TEE_GEN_CAPS_* above + * + * Identifies the TEE implementation, @impl_id is one of TEE_IMPL_ID_* above. + * @impl_caps is implementation specific, for example TEE_OPTEE_CAP_* + * is valid when @impl_id == TEE_IMPL_ID_OPTEE. + */ +struct tee_ioctl_version_data { + __u32 impl_id; + __u32 impl_caps; + __u32 gen_caps; +}; + +/** + * TEE_IOC_VERSION - query version of TEE + * + * Takes a tee_ioctl_version_data struct and returns with the TEE version + * data filled in. + */ +#define TEE_IOC_VERSION _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 0, \ + struct tee_ioctl_version_data) + +/** + * struct tee_ioctl_shm_alloc_data - Shared memory allocate argument + * @size: [in/out] Size of shared memory to allocate + * @flags: [in/out] Flags to/from allocation. + * @id: [out] Identifier of the shared memory + * + * The flags field should currently be zero as input. Updated by the call + * with actual flags as defined by TEE_IOCTL_SHM_* above. + * This structure is used as argument for TEE_IOC_SHM_ALLOC below. + */ +struct tee_ioctl_shm_alloc_data { + __u64 size; + __u32 flags; + __s32 id; +}; + +/** + * TEE_IOC_SHM_ALLOC - allocate shared memory + * + * Allocates shared memory between the user space process and secure OS. + * + * Returns a file descriptor on success or < 0 on failure + * + * The returned file descriptor is used to map the shared memory into user + * space. The shared memory is freed when the descriptor is closed and the + * memory is unmapped. + */ +#define TEE_IOC_SHM_ALLOC _IOWR(TEE_IOC_MAGIC, TEE_IOC_BASE + 1, \ + struct tee_ioctl_shm_alloc_data) + +/** + * struct tee_ioctl_buf_data - Variable sized buffer + * @buf_ptr: [in] A __user pointer to a buffer + * @buf_len: [in] Length of the buffer above + * + * Used as argument for TEE_IOC_OPEN_SESSION, TEE_IOC_INVOKE, + * TEE_IOC_SUPPL_RECV, and TEE_IOC_SUPPL_SEND below. + */ +struct tee_ioctl_buf_data { + __u64 buf_ptr; + __u64 buf_len; +}; + +/* + * Attributes for struct tee_ioctl_param, selects field in the union + */ +#define TEE_IOCTL_PARAM_ATTR_TYPE_NONE 0 /* parameter not used */ + +/* + * These defines value parameters (struct tee_ioctl_param_value) + */ +#define TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT 1 +#define TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT 2 +#define TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT 3 /* input and output */ + +/* + * These defines shared memory reference parameters (struct + * tee_ioctl_param_memref) + */ +#define TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT 5 +#define TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT 6 +#define TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT 7 /* input and output */ + +/* + * Mask for the type part of the attribute, leaves room for more types + */ +#define TEE_IOCTL_PARAM_ATTR_TYPE_MASK 0xff + +/* + * Matches TEEC_LOGIN_* in GP TEE Client API + * Are only defined for GP compliant TEEs + */ +#define TEE_IOCTL_LOGIN_PUBLIC 0 +#define TEE_IOCTL_LOGIN_USER 1 +#define TEE_IOCTL_LOGIN_GROUP 2 +#define TEE_IOCTL_LOGIN_APPLICATION 4 +#define TEE_IOCTL_LOGIN_USER_APPLICATION 5 +#define TEE_IOCTL_LOGIN_GROUP_APPLICATION 6 + +/** + * struct tee_ioctl_param - parameter + * @attr: attributes + * @a: if a memref, offset into the shared memory object, else a value parameter + * @b: if a memref, size of the buffer, else a value parameter + * @c: if a memref, shared memory identifier, else a value parameter + * + * @attr & TEE_PARAM_ATTR_TYPE_MASK indicates if memref or value is used in + * the union. TEE_PARAM_ATTR_TYPE_VALUE_* indicates value and + * TEE_PARAM_ATTR_TYPE_MEMREF_* indicates memref. TEE_PARAM_ATTR_TYPE_NONE + * indicates that none of the members are used. + * + * Shared memory is allocated with TEE_IOC_SHM_ALLOC which returns an + * identifier representing the shared memory object. A memref can reference + * a part of a shared memory by specifying an offset (@a) and size (@b) of + * the object. To supply the entire shared memory object set the offset + * (@a) to 0 and size (@b) to the previously returned size of the object. + */ +struct tee_ioctl_param { + __u64 attr; + __u64 a; + __u64 b; + __u64 c; +}; + +#define TEE_IOCTL_UUID_LEN 16 + +/** + * struct tee_ioctl_open_session_arg - Open session argument + * @uuid: [in] UUID of the Trusted Application + * @clnt_uuid: [in] UUID of client + * @clnt_login: [in] Login class of client, TEE_IOCTL_LOGIN_* above + * @cancel_id: [in] Cancellation id, a unique value to identify this request + * @session: [out] Session id + * @ret: [out] return value + * @ret_origin [out] origin of the return value + * @num_params [in] number of parameters following this struct + */ +struct tee_ioctl_open_session_arg { + __u8 uuid[TEE_IOCTL_UUID_LEN]; + __u8 clnt_uuid[TEE_IOCTL_UUID_LEN]; + __u32 clnt_login; + __u32 cancel_id; + __u32 session; + __u32 ret; + __u32 ret_origin; + __u32 num_params; + /* num_params tells the actual number of element in params */ + struct tee_ioctl_param params[]; +}; + +/** + * TEE_IOC_OPEN_SESSION - opens a session to a Trusted Application + * + * Takes a struct tee_ioctl_buf_data which contains a struct + * tee_ioctl_open_session_arg followed by any array of struct + * tee_ioctl_param + */ +#define TEE_IOC_OPEN_SESSION _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 2, \ + struct tee_ioctl_buf_data) + +/** + * struct tee_ioctl_invoke_func_arg - Invokes a function in a Trusted + * Application + * @func: [in] Trusted Application function, specific to the TA + * @session: [in] Session id + * @cancel_id: [in] Cancellation id, a unique value to identify this request + * @ret: [out] return value + * @ret_origin [out] origin of the return value + * @num_params [in] number of parameters following this struct + */ +struct tee_ioctl_invoke_arg { + __u32 func; + __u32 session; + __u32 cancel_id; + __u32 ret; + __u32 ret_origin; + __u32 num_params; + /* num_params tells the actual number of element in params */ + struct tee_ioctl_param params[]; +}; + +/** + * TEE_IOC_INVOKE - Invokes a function in a Trusted Application + * + * Takes a struct tee_ioctl_buf_data which contains a struct + * tee_invoke_func_arg followed by any array of struct tee_param + */ +#define TEE_IOC_INVOKE _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 3, \ + struct tee_ioctl_buf_data) + +/** + * struct tee_ioctl_cancel_arg - Cancels an open session or invoke ioctl + * @cancel_id: [in] Cancellation id, a unique value to identify this request + * @session: [in] Session id, if the session is opened, else set to 0 + */ +struct tee_ioctl_cancel_arg { + __u32 cancel_id; + __u32 session; +}; + +/** + * TEE_IOC_CANCEL - Cancels an open session or invoke + */ +#define TEE_IOC_CANCEL _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 4, \ + struct tee_ioctl_cancel_arg) + +/** + * struct tee_ioctl_close_session_arg - Closes an open session + * @session: [in] Session id + */ +struct tee_ioctl_close_session_arg { + __u32 session; +}; + +/** + * TEE_IOC_CLOSE_SESSION - Closes a session + */ +#define TEE_IOC_CLOSE_SESSION _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 5, \ + struct tee_ioctl_close_session_arg) + +/** + * struct tee_iocl_supp_recv_arg - Receive a request for a supplicant function + * @func: [in] supplicant function + * @num_params [in/out] number of parameters following this struct + * + * @num_params is the number of params that tee-supplicant has room to + * receive when input, @num_params is the number of actual params + * tee-supplicant receives when output. + */ +struct tee_iocl_supp_recv_arg { + __u32 func; + __u32 num_params; + /* num_params tells the actual number of element in params */ + struct tee_ioctl_param params[]; +}; + +/** + * TEE_IOC_SUPPL_RECV - Receive a request for a supplicant function + * + * Takes a struct tee_ioctl_buf_data which contains a struct + * tee_iocl_supp_recv_arg followed by any array of struct tee_param + */ +#define TEE_IOC_SUPPL_RECV _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 6, \ + struct tee_ioctl_buf_data) + +/** + * struct tee_iocl_supp_send_arg - Send a response to a received request + * @ret: [out] return value + * @num_params [in] number of parameters following this struct + */ +struct tee_iocl_supp_send_arg { + __u32 ret; + __u32 num_params; + /* num_params tells the actual number of element in params */ + struct tee_ioctl_param params[]; +}; + +/** + * TEE_IOC_SUPPL_SEND - Receive a request for a supplicant function + * + * Takes a struct tee_ioctl_buf_data which contains a struct + * tee_iocl_supp_send_arg followed by any array of struct tee_param + */ +#define TEE_IOC_SUPPL_SEND _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 7, \ + struct tee_ioctl_buf_data) + +/* + * Five syscalls are used when communicating with the TEE driver. + * open(): opens the device associated with the driver + * ioctl(): as described above operating on the file descriptor from open() + * close(): two cases + * - closes the device file descriptor + * - closes a file descriptor connected to allocated shared memory + * mmap(): maps shared memory into user space using information from struct + * tee_ioctl_shm_alloc_data + * munmap(): unmaps previously shared memory + */ + +#endif /*__TEE_H*/ From 09da41c575ea9acbd184653853dc3fe1567c4e9a Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Tue, 14 Apr 2015 14:33:20 +0200 Subject: [PATCH 283/312] tee: add OP-TEE driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds a OP-TEE driver which also can be compiled as a loadable module. * Targets ARM and ARM64 * Supports using reserved memory from OP-TEE as shared memory * Probes OP-TEE version using SMCs * Accepts requests on privileged and unprivileged device * Uses OPTEE message protocol version 2 to communicate with secure world Change-Id: Iffaf30a91fff2d29dd87e61173c564271bcc7776 Acked-by: Andreas Dannenberg Tested-by: Jerome Forissier (HiKey) Tested-by: Volodymyr Babchuk (RCAR H3) Tested-by: Scott Branden Reviewed-by: Javier González Signed-off-by: Jens Wiklander (cherry picked from commit 4fb0a5eb364d239722e745c02aef0dbd4e0f1ad2) Signed-off-by: Victor Chong --- MAINTAINERS | 5 + drivers/tee/Kconfig | 10 + drivers/tee/Makefile | 1 + drivers/tee/optee/Kconfig | 7 + drivers/tee/optee/Makefile | 5 + drivers/tee/optee/call.c | 444 +++++++++++++++++++++ drivers/tee/optee/core.c | 622 ++++++++++++++++++++++++++++++ drivers/tee/optee/optee_msg.h | 418 ++++++++++++++++++++ drivers/tee/optee/optee_private.h | 183 +++++++++ drivers/tee/optee/optee_smc.h | 450 +++++++++++++++++++++ drivers/tee/optee/rpc.c | 396 +++++++++++++++++++ drivers/tee/optee/supp.c | 273 +++++++++++++ 12 files changed, 2814 insertions(+) create mode 100644 drivers/tee/optee/Kconfig create mode 100644 drivers/tee/optee/Makefile create mode 100644 drivers/tee/optee/call.c create mode 100644 drivers/tee/optee/core.c create mode 100644 drivers/tee/optee/optee_msg.h create mode 100644 drivers/tee/optee/optee_private.h create mode 100644 drivers/tee/optee/optee_smc.h create mode 100644 drivers/tee/optee/rpc.c create mode 100644 drivers/tee/optee/supp.c diff --git a/MAINTAINERS b/MAINTAINERS index ef689b0afc53..6e070bed5f58 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7935,6 +7935,11 @@ F: arch/*/oprofile/ F: drivers/oprofile/ F: include/linux/oprofile.h +OP-TEE DRIVER +M: Jens Wiklander +S: Maintained +F: drivers/tee/optee/ + ORACLE CLUSTER FILESYSTEM 2 (OCFS2) M: Mark Fasheh M: Joel Becker diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig index 50c244ead46d..2330a4eb4e8b 100644 --- a/drivers/tee/Kconfig +++ b/drivers/tee/Kconfig @@ -6,3 +6,13 @@ config TEE help This implements a generic interface towards a Trusted Execution Environment (TEE). + +if TEE + +menu "TEE drivers" + +source "drivers/tee/optee/Kconfig" + +endmenu + +endif diff --git a/drivers/tee/Makefile b/drivers/tee/Makefile index ec64047a86e2..7a4e4a1ac39c 100644 --- a/drivers/tee/Makefile +++ b/drivers/tee/Makefile @@ -2,3 +2,4 @@ obj-$(CONFIG_TEE) += tee.o tee-objs += tee_core.o tee-objs += tee_shm.o tee-objs += tee_shm_pool.o +obj-$(CONFIG_OPTEE) += optee/ diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig new file mode 100644 index 000000000000..0126de898036 --- /dev/null +++ b/drivers/tee/optee/Kconfig @@ -0,0 +1,7 @@ +# OP-TEE Trusted Execution Environment Configuration +config OPTEE + tristate "OP-TEE" + depends on HAVE_ARM_SMCCC + help + This implements the OP-TEE Trusted Execution Environment (TEE) + driver. diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile new file mode 100644 index 000000000000..92fe5789bcce --- /dev/null +++ b/drivers/tee/optee/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_OPTEE) += optee.o +optee-objs += core.o +optee-objs += call.o +optee-objs += rpc.o +optee-objs += supp.o diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c new file mode 100644 index 000000000000..f7b7b404c990 --- /dev/null +++ b/drivers/tee/optee/call.c @@ -0,0 +1,444 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "optee_private.h" +#include "optee_smc.h" + +struct optee_call_waiter { + struct list_head list_node; + struct completion c; +}; + +static void optee_cq_wait_init(struct optee_call_queue *cq, + struct optee_call_waiter *w) +{ + /* + * We're preparing to make a call to secure world. In case we can't + * allocate a thread in secure world we'll end up waiting in + * optee_cq_wait_for_completion(). + * + * Normally if there's no contention in secure world the call will + * complete and we can cleanup directly with optee_cq_wait_final(). + */ + mutex_lock(&cq->mutex); + + /* + * We add ourselves to the queue, but we don't wait. This + * guarantees that we don't lose a completion if secure world + * returns busy and another thread just exited and try to complete + * someone. + */ + init_completion(&w->c); + list_add_tail(&w->list_node, &cq->waiters); + + mutex_unlock(&cq->mutex); +} + +static void optee_cq_wait_for_completion(struct optee_call_queue *cq, + struct optee_call_waiter *w) +{ + wait_for_completion(&w->c); + + mutex_lock(&cq->mutex); + + /* Move to end of list to get out of the way for other waiters */ + list_del(&w->list_node); + reinit_completion(&w->c); + list_add_tail(&w->list_node, &cq->waiters); + + mutex_unlock(&cq->mutex); +} + +static void optee_cq_complete_one(struct optee_call_queue *cq) +{ + struct optee_call_waiter *w; + + list_for_each_entry(w, &cq->waiters, list_node) { + if (!completion_done(&w->c)) { + complete(&w->c); + break; + } + } +} + +static void optee_cq_wait_final(struct optee_call_queue *cq, + struct optee_call_waiter *w) +{ + /* + * We're done with the call to secure world. The thread in secure + * world that was used for this call is now available for some + * other task to use. + */ + mutex_lock(&cq->mutex); + + /* Get out of the list */ + list_del(&w->list_node); + + /* Wake up one eventual waiting task */ + optee_cq_complete_one(cq); + + /* + * If we're completed we've got a completion from another task that + * was just done with its call to secure world. Since yet another + * thread now is available in secure world wake up another eventual + * waiting task. + */ + if (completion_done(&w->c)) + optee_cq_complete_one(cq); + + mutex_unlock(&cq->mutex); +} + +/* Requires the filpstate mutex to be held */ +static struct optee_session *find_session(struct optee_context_data *ctxdata, + u32 session_id) +{ + struct optee_session *sess; + + list_for_each_entry(sess, &ctxdata->sess_list, list_node) + if (sess->session_id == session_id) + return sess; + + return NULL; +} + +/** + * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world + * @ctx: calling context + * @parg: physical address of message to pass to secure world + * + * Does and SMC to OP-TEE in secure world and handles eventual resulting + * Remote Procedure Calls (RPC) from OP-TEE. + * + * Returns return code from secure world, 0 is OK + */ +u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg) +{ + struct optee *optee = tee_get_drvdata(ctx->teedev); + struct optee_call_waiter w; + struct optee_rpc_param param = { }; + u32 ret; + + param.a0 = OPTEE_SMC_CALL_WITH_ARG; + reg_pair_from_64(¶m.a1, ¶m.a2, parg); + /* Initialize waiter */ + optee_cq_wait_init(&optee->call_queue, &w); + while (true) { + struct arm_smccc_res res; + + optee->invoke_fn(param.a0, param.a1, param.a2, param.a3, + param.a4, param.a5, param.a6, param.a7, + &res); + + if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) { + /* + * Out of threads in secure world, wait for a thread + * become available. + */ + optee_cq_wait_for_completion(&optee->call_queue, &w); + } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) { + param.a0 = res.a0; + param.a1 = res.a1; + param.a2 = res.a2; + param.a3 = res.a3; + optee_handle_rpc(ctx, ¶m); + } else { + ret = res.a0; + break; + } + } + + /* + * We're done with our thread in secure world, if there's any + * thread waiters wake up one. + */ + optee_cq_wait_final(&optee->call_queue, &w); + + return ret; +} + +static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params, + struct optee_msg_arg **msg_arg, + phys_addr_t *msg_parg) +{ + int rc; + struct tee_shm *shm; + struct optee_msg_arg *ma; + + shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params), + TEE_SHM_MAPPED); + if (IS_ERR(shm)) + return shm; + + ma = tee_shm_get_va(shm, 0); + if (IS_ERR(ma)) { + rc = PTR_ERR(ma); + goto out; + } + + rc = tee_shm_get_pa(shm, 0, msg_parg); + if (rc) + goto out; + + memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params)); + ma->num_params = num_params; + *msg_arg = ma; +out: + if (rc) { + tee_shm_free(shm); + return ERR_PTR(rc); + } + + return shm; +} + +int optee_open_session(struct tee_context *ctx, + struct tee_ioctl_open_session_arg *arg, + struct tee_param *param) +{ + struct optee_context_data *ctxdata = ctx->data; + int rc; + struct tee_shm *shm; + struct optee_msg_arg *msg_arg; + phys_addr_t msg_parg; + struct optee_session *sess = NULL; + + /* +2 for the meta parameters added below */ + shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION; + msg_arg->cancel_id = arg->cancel_id; + + /* + * Initialize and add the meta parameters needed when opening a + * session. + */ + msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | + OPTEE_MSG_ATTR_META; + msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | + OPTEE_MSG_ATTR_META; + memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid)); + memcpy(&msg_arg->params[1].u.value, arg->uuid, sizeof(arg->clnt_uuid)); + msg_arg->params[1].u.value.c = arg->clnt_login; + + rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param); + if (rc) + goto out; + + sess = kzalloc(sizeof(*sess), GFP_KERNEL); + if (!sess) { + rc = -ENOMEM; + goto out; + } + + if (optee_do_call_with_arg(ctx, msg_parg)) { + msg_arg->ret = TEEC_ERROR_COMMUNICATION; + msg_arg->ret_origin = TEEC_ORIGIN_COMMS; + } + + if (msg_arg->ret == TEEC_SUCCESS) { + /* A new session has been created, add it to the list. */ + sess->session_id = msg_arg->session; + mutex_lock(&ctxdata->mutex); + list_add(&sess->list_node, &ctxdata->sess_list); + mutex_unlock(&ctxdata->mutex); + } else { + kfree(sess); + } + + if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) { + arg->ret = TEEC_ERROR_COMMUNICATION; + arg->ret_origin = TEEC_ORIGIN_COMMS; + /* Close session again to avoid leakage */ + optee_close_session(ctx, msg_arg->session); + } else { + arg->session = msg_arg->session; + arg->ret = msg_arg->ret; + arg->ret_origin = msg_arg->ret_origin; + } +out: + tee_shm_free(shm); + + return rc; +} + +int optee_close_session(struct tee_context *ctx, u32 session) +{ + struct optee_context_data *ctxdata = ctx->data; + struct tee_shm *shm; + struct optee_msg_arg *msg_arg; + phys_addr_t msg_parg; + struct optee_session *sess; + + /* Check that the session is valid and remove it from the list */ + mutex_lock(&ctxdata->mutex); + sess = find_session(ctxdata, session); + if (sess) + list_del(&sess->list_node); + mutex_unlock(&ctxdata->mutex); + if (!sess) + return -EINVAL; + kfree(sess); + + shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION; + msg_arg->session = session; + optee_do_call_with_arg(ctx, msg_parg); + + tee_shm_free(shm); + return 0; +} + +int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, + struct tee_param *param) +{ + struct optee_context_data *ctxdata = ctx->data; + struct tee_shm *shm; + struct optee_msg_arg *msg_arg; + phys_addr_t msg_parg; + struct optee_session *sess; + int rc; + + /* Check that the session is valid */ + mutex_lock(&ctxdata->mutex); + sess = find_session(ctxdata, arg->session); + mutex_unlock(&ctxdata->mutex); + if (!sess) + return -EINVAL; + + shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg); + if (IS_ERR(shm)) + return PTR_ERR(shm); + msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND; + msg_arg->func = arg->func; + msg_arg->session = arg->session; + msg_arg->cancel_id = arg->cancel_id; + + rc = optee_to_msg_param(msg_arg->params, arg->num_params, param); + if (rc) + goto out; + + if (optee_do_call_with_arg(ctx, msg_parg)) { + msg_arg->ret = TEEC_ERROR_COMMUNICATION; + msg_arg->ret_origin = TEEC_ORIGIN_COMMS; + } + + if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) { + msg_arg->ret = TEEC_ERROR_COMMUNICATION; + msg_arg->ret_origin = TEEC_ORIGIN_COMMS; + } + + arg->ret = msg_arg->ret; + arg->ret_origin = msg_arg->ret_origin; +out: + tee_shm_free(shm); + return rc; +} + +int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session) +{ + struct optee_context_data *ctxdata = ctx->data; + struct tee_shm *shm; + struct optee_msg_arg *msg_arg; + phys_addr_t msg_parg; + struct optee_session *sess; + + /* Check that the session is valid */ + mutex_lock(&ctxdata->mutex); + sess = find_session(ctxdata, session); + mutex_unlock(&ctxdata->mutex); + if (!sess) + return -EINVAL; + + shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + msg_arg->cmd = OPTEE_MSG_CMD_CANCEL; + msg_arg->session = session; + msg_arg->cancel_id = cancel_id; + optee_do_call_with_arg(ctx, msg_parg); + + tee_shm_free(shm); + return 0; +} + +/** + * optee_enable_shm_cache() - Enables caching of some shared memory allocation + * in OP-TEE + * @optee: main service struct + */ +void optee_enable_shm_cache(struct optee *optee) +{ + struct optee_call_waiter w; + + /* We need to retry until secure world isn't busy. */ + optee_cq_wait_init(&optee->call_queue, &w); + while (true) { + struct arm_smccc_res res; + + optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0, + 0, &res); + if (res.a0 == OPTEE_SMC_RETURN_OK) + break; + optee_cq_wait_for_completion(&optee->call_queue, &w); + } + optee_cq_wait_final(&optee->call_queue, &w); +} + +/** + * optee_disable_shm_cache() - Disables caching of some shared memory allocation + * in OP-TEE + * @optee: main service struct + */ +void optee_disable_shm_cache(struct optee *optee) +{ + struct optee_call_waiter w; + + /* We need to retry until secure world isn't busy. */ + optee_cq_wait_init(&optee->call_queue, &w); + while (true) { + union { + struct arm_smccc_res smccc; + struct optee_smc_disable_shm_cache_result result; + } res; + + optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0, + 0, &res.smccc); + if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL) + break; /* All shm's freed */ + if (res.result.status == OPTEE_SMC_RETURN_OK) { + struct tee_shm *shm; + + shm = reg_pair_to_ptr(res.result.shm_upper32, + res.result.shm_lower32); + tee_shm_free(shm); + } else { + optee_cq_wait_for_completion(&optee->call_queue, &w); + } + } + optee_cq_wait_final(&optee->call_queue, &w); +} diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c new file mode 100644 index 000000000000..58169e519422 --- /dev/null +++ b/drivers/tee/optee/core.c @@ -0,0 +1,622 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "optee_private.h" +#include "optee_smc.h" + +#define DRIVER_NAME "optee" + +#define OPTEE_SHM_NUM_PRIV_PAGES 1 + +/** + * optee_from_msg_param() - convert from OPTEE_MSG parameters to + * struct tee_param + * @params: subsystem internal parameter representation + * @num_params: number of elements in the parameter arrays + * @msg_params: OPTEE_MSG parameters + * Returns 0 on success or <0 on failure + */ +int optee_from_msg_param(struct tee_param *params, size_t num_params, + const struct optee_msg_param *msg_params) +{ + int rc; + size_t n; + struct tee_shm *shm; + phys_addr_t pa; + + for (n = 0; n < num_params; n++) { + struct tee_param *p = params + n; + const struct optee_msg_param *mp = msg_params + n; + u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK; + + switch (attr) { + case OPTEE_MSG_ATTR_TYPE_NONE: + p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE; + memset(&p->u, 0, sizeof(p->u)); + break; + case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT: + case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT: + case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT: + p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT + + attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; + p->u.value.a = mp->u.value.a; + p->u.value.b = mp->u.value.b; + p->u.value.c = mp->u.value.c; + break; + case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT: + case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: + case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT: + p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT + + attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT; + p->u.memref.size = mp->u.tmem.size; + shm = (struct tee_shm *)(unsigned long) + mp->u.tmem.shm_ref; + if (!shm) { + p->u.memref.shm_offs = 0; + p->u.memref.shm = NULL; + break; + } + rc = tee_shm_get_pa(shm, 0, &pa); + if (rc) + return rc; + p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa; + p->u.memref.shm = shm; + + /* Check that the memref is covered by the shm object */ + if (p->u.memref.size) { + size_t o = p->u.memref.shm_offs + + p->u.memref.size - 1; + + rc = tee_shm_get_pa(shm, o, NULL); + if (rc) + return rc; + } + break; + default: + return -EINVAL; + } + } + return 0; +} + +/** + * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters + * @msg_params: OPTEE_MSG parameters + * @num_params: number of elements in the parameter arrays + * @params: subsystem itnernal parameter representation + * Returns 0 on success or <0 on failure + */ +int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params, + const struct tee_param *params) +{ + int rc; + size_t n; + phys_addr_t pa; + + for (n = 0; n < num_params; n++) { + const struct tee_param *p = params + n; + struct optee_msg_param *mp = msg_params + n; + + switch (p->attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_NONE: + mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE; + memset(&mp->u, 0, sizeof(mp->u)); + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr - + TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; + mp->u.value.a = p->u.value.a; + mp->u.value.b = p->u.value.b; + mp->u.value.c = p->u.value.c; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + + p->attr - + TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; + mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm; + mp->u.tmem.size = p->u.memref.size; + if (!p->u.memref.shm) { + mp->u.tmem.buf_ptr = 0; + break; + } + rc = tee_shm_get_pa(p->u.memref.shm, + p->u.memref.shm_offs, &pa); + if (rc) + return rc; + mp->u.tmem.buf_ptr = pa; + mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED << + OPTEE_MSG_ATTR_CACHE_SHIFT; + break; + default: + return -EINVAL; + } + } + return 0; +} + +static void optee_get_version(struct tee_device *teedev, + struct tee_ioctl_version_data *vers) +{ + struct tee_ioctl_version_data v = { + .impl_id = TEE_IMPL_ID_OPTEE, + .impl_caps = TEE_OPTEE_CAP_TZ, + .gen_caps = TEE_GEN_CAP_GP, + }; + *vers = v; +} + +static int optee_open(struct tee_context *ctx) +{ + struct optee_context_data *ctxdata; + struct tee_device *teedev = ctx->teedev; + struct optee *optee = tee_get_drvdata(teedev); + + ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL); + if (!ctxdata) + return -ENOMEM; + + if (teedev == optee->supp_teedev) { + bool busy = true; + + mutex_lock(&optee->supp.ctx_mutex); + if (!optee->supp.ctx) { + busy = false; + optee->supp.ctx = ctx; + } + mutex_unlock(&optee->supp.ctx_mutex); + if (busy) { + kfree(ctxdata); + return -EBUSY; + } + } + + mutex_init(&ctxdata->mutex); + INIT_LIST_HEAD(&ctxdata->sess_list); + + ctx->data = ctxdata; + return 0; +} + +static void optee_release(struct tee_context *ctx) +{ + struct optee_context_data *ctxdata = ctx->data; + struct tee_device *teedev = ctx->teedev; + struct optee *optee = tee_get_drvdata(teedev); + struct tee_shm *shm; + struct optee_msg_arg *arg = NULL; + phys_addr_t parg; + struct optee_session *sess; + struct optee_session *sess_tmp; + + if (!ctxdata) + return; + + shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg), TEE_SHM_MAPPED); + if (!IS_ERR(shm)) { + arg = tee_shm_get_va(shm, 0); + /* + * If va2pa fails for some reason, we can't call + * optee_close_session(), only free the memory. Secure OS + * will leak sessions and finally refuse more sessions, but + * we will at least let normal world reclaim its memory. + */ + if (!IS_ERR(arg)) + tee_shm_va2pa(shm, arg, &parg); + } + + list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list, + list_node) { + list_del(&sess->list_node); + if (!IS_ERR_OR_NULL(arg)) { + memset(arg, 0, sizeof(*arg)); + arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION; + arg->session = sess->session_id; + optee_do_call_with_arg(ctx, parg); + } + kfree(sess); + } + kfree(ctxdata); + + if (!IS_ERR(shm)) + tee_shm_free(shm); + + ctx->data = NULL; + + if (teedev == optee->supp_teedev) { + mutex_lock(&optee->supp.ctx_mutex); + optee->supp.ctx = NULL; + mutex_unlock(&optee->supp.ctx_mutex); + } +} + +static struct tee_driver_ops optee_ops = { + .get_version = optee_get_version, + .open = optee_open, + .release = optee_release, + .open_session = optee_open_session, + .close_session = optee_close_session, + .invoke_func = optee_invoke_func, + .cancel_req = optee_cancel_req, +}; + +static struct tee_desc optee_desc = { + .name = DRIVER_NAME "-clnt", + .ops = &optee_ops, + .owner = THIS_MODULE, +}; + +static struct tee_driver_ops optee_supp_ops = { + .get_version = optee_get_version, + .open = optee_open, + .release = optee_release, + .supp_recv = optee_supp_recv, + .supp_send = optee_supp_send, +}; + +static struct tee_desc optee_supp_desc = { + .name = DRIVER_NAME "-supp", + .ops = &optee_supp_ops, + .owner = THIS_MODULE, + .flags = TEE_DESC_PRIVILEGED, +}; + +static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn) +{ + struct arm_smccc_res res; + + invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res); + + if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 && + res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3) + return true; + return false; +} + +static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn) +{ + union { + struct arm_smccc_res smccc; + struct optee_smc_calls_revision_result result; + } res; + + invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc); + + if (res.result.major == OPTEE_MSG_REVISION_MAJOR && + (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR) + return true; + return false; +} + +static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn, + u32 *sec_caps) +{ + union { + struct arm_smccc_res smccc; + struct optee_smc_exchange_capabilities_result result; + } res; + u32 a1 = 0; + + /* + * TODO This isn't enough to tell if it's UP system (from kernel + * point of view) or not, is_smp() returns the the information + * needed, but can't be called directly from here. + */ + if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1) + a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR; + + invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0, + &res.smccc); + + if (res.result.status != OPTEE_SMC_RETURN_OK) + return false; + + *sec_caps = res.result.capabilities; + return true; +} + +static struct tee_shm_pool * +optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm) +{ + union { + struct arm_smccc_res smccc; + struct optee_smc_get_shm_config_result result; + } res; + struct tee_shm_pool *pool; + unsigned long vaddr; + phys_addr_t paddr; + size_t size; + phys_addr_t begin; + phys_addr_t end; + void *va; + struct tee_shm_pool_mem_info priv_info; + struct tee_shm_pool_mem_info dmabuf_info; + + invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc); + if (res.result.status != OPTEE_SMC_RETURN_OK) { + pr_info("shm service not available\n"); + return ERR_PTR(-ENOENT); + } + + if (res.result.settings != OPTEE_SMC_SHM_CACHED) { + pr_err("only normal cached shared memory supported\n"); + return ERR_PTR(-EINVAL); + } + + begin = roundup(res.result.start, PAGE_SIZE); + end = rounddown(res.result.start + res.result.size, PAGE_SIZE); + paddr = begin; + size = end - begin; + + if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) { + pr_err("too small shared memory area\n"); + return ERR_PTR(-EINVAL); + } + + va = memremap(paddr, size, MEMREMAP_WB); + if (!va) { + pr_err("shared memory ioremap failed\n"); + return ERR_PTR(-EINVAL); + } + vaddr = (unsigned long)va; + + priv_info.vaddr = vaddr; + priv_info.paddr = paddr; + priv_info.size = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; + dmabuf_info.vaddr = vaddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; + dmabuf_info.paddr = paddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; + dmabuf_info.size = size - OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; + + pool = tee_shm_pool_alloc_res_mem(&priv_info, &dmabuf_info); + if (IS_ERR(pool)) { + memunmap(va); + goto out; + } + + *memremaped_shm = va; +out: + return pool; +} + +/* Simple wrapper functions to be able to use a function pointer */ +static void optee_smccc_smc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, + unsigned long a4, unsigned long a5, + unsigned long a6, unsigned long a7, + struct arm_smccc_res *res) +{ + arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res); +} + +static void optee_smccc_hvc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, + unsigned long a4, unsigned long a5, + unsigned long a6, unsigned long a7, + struct arm_smccc_res *res) +{ + arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res); +} + +static optee_invoke_fn *get_invoke_func(struct device_node *np) +{ + const char *method; + + pr_info("probing for conduit method from DT.\n"); + + if (of_property_read_string(np, "method", &method)) { + pr_warn("missing \"method\" property\n"); + return ERR_PTR(-ENXIO); + } + + if (!strcmp("hvc", method)) + return optee_smccc_hvc; + else if (!strcmp("smc", method)) + return optee_smccc_smc; + + pr_warn("invalid \"method\" property: %s\n", method); + return ERR_PTR(-EINVAL); +} + +static struct optee *optee_probe(struct device_node *np) +{ + optee_invoke_fn *invoke_fn; + struct tee_shm_pool *pool; + struct optee *optee = NULL; + void *memremaped_shm = NULL; + struct tee_device *teedev; + u32 sec_caps; + int rc; + + invoke_fn = get_invoke_func(np); + if (IS_ERR(invoke_fn)) + return (void *)invoke_fn; + + if (!optee_msg_api_uid_is_optee_api(invoke_fn)) { + pr_warn("api uid mismatch\n"); + return ERR_PTR(-EINVAL); + } + + if (!optee_msg_api_revision_is_compatible(invoke_fn)) { + pr_warn("api revision mismatch\n"); + return ERR_PTR(-EINVAL); + } + + if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) { + pr_warn("capabilities mismatch\n"); + return ERR_PTR(-EINVAL); + } + + /* + * We have no other option for shared memory, if secure world + * doesn't have any reserved memory we can use we can't continue. + */ + if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM)) + return ERR_PTR(-EINVAL); + + pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm); + if (IS_ERR(pool)) + return (void *)pool; + + optee = kzalloc(sizeof(*optee), GFP_KERNEL); + if (!optee) { + rc = -ENOMEM; + goto err; + } + + optee->invoke_fn = invoke_fn; + + teedev = tee_device_alloc(&optee_desc, NULL, pool, optee); + if (IS_ERR(teedev)) { + rc = PTR_ERR(teedev); + goto err; + } + optee->teedev = teedev; + + teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee); + if (IS_ERR(teedev)) { + rc = PTR_ERR(teedev); + goto err; + } + optee->supp_teedev = teedev; + + rc = tee_device_register(optee->teedev); + if (rc) + goto err; + + rc = tee_device_register(optee->supp_teedev); + if (rc) + goto err; + + mutex_init(&optee->call_queue.mutex); + INIT_LIST_HEAD(&optee->call_queue.waiters); + optee_wait_queue_init(&optee->wait_queue); + optee_supp_init(&optee->supp); + optee->memremaped_shm = memremaped_shm; + optee->pool = pool; + + optee_enable_shm_cache(optee); + + pr_info("initialized driver\n"); + return optee; +err: + if (optee) { + /* + * tee_device_unregister() is safe to call even if the + * devices hasn't been registered with + * tee_device_register() yet. + */ + tee_device_unregister(optee->supp_teedev); + tee_device_unregister(optee->teedev); + kfree(optee); + } + if (pool) + tee_shm_pool_free(pool); + if (memremaped_shm) + memunmap(memremaped_shm); + return ERR_PTR(rc); +} + +static void optee_remove(struct optee *optee) +{ + /* + * Ask OP-TEE to free all cached shared memory objects to decrease + * reference counters and also avoid wild pointers in secure world + * into the old shared memory range. + */ + optee_disable_shm_cache(optee); + + /* + * The two devices has to be unregistered before we can free the + * other resources. + */ + tee_device_unregister(optee->supp_teedev); + tee_device_unregister(optee->teedev); + + tee_shm_pool_free(optee->pool); + if (optee->memremaped_shm) + memunmap(optee->memremaped_shm); + optee_wait_queue_exit(&optee->wait_queue); + optee_supp_uninit(&optee->supp); + mutex_destroy(&optee->call_queue.mutex); + + kfree(optee); +} + +static const struct of_device_id optee_match[] = { + { .compatible = "linaro,optee-tz" }, + {}, +}; + +static struct optee *optee_svc; + +static int __init optee_driver_init(void) +{ + struct device_node *fw_np; + struct device_node *np; + struct optee *optee; + + /* Node is supposed to be below /firmware */ + fw_np = of_find_node_by_name(NULL, "firmware"); + if (!fw_np) + return -ENODEV; + + np = of_find_matching_node(fw_np, optee_match); + of_node_put(fw_np); + if (!np) + return -ENODEV; + + optee = optee_probe(np); + of_node_put(np); + + if (IS_ERR(optee)) + return PTR_ERR(optee); + + optee_svc = optee; + + return 0; +} +module_init(optee_driver_init); + +static void __exit optee_driver_exit(void) +{ + struct optee *optee = optee_svc; + + optee_svc = NULL; + if (optee) + optee_remove(optee); +} +module_exit(optee_driver_exit); + +MODULE_AUTHOR("Linaro"); +MODULE_DESCRIPTION("OP-TEE driver"); +MODULE_SUPPORTED_DEVICE(""); +MODULE_VERSION("1.0"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h new file mode 100644 index 000000000000..dd7a06ee0462 --- /dev/null +++ b/drivers/tee/optee/optee_msg.h @@ -0,0 +1,418 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _OPTEE_MSG_H +#define _OPTEE_MSG_H + +#include +#include + +/* + * This file defines the OP-TEE message protocol used to communicate + * with an instance of OP-TEE running in secure world. + * + * This file is divided into three sections. + * 1. Formatting of messages. + * 2. Requests from normal world + * 3. Requests from secure world, Remote Procedure Call (RPC), handled by + * tee-supplicant. + */ + +/***************************************************************************** + * Part 1 - formatting of messages + *****************************************************************************/ + +#define OPTEE_MSG_ATTR_TYPE_NONE 0x0 +#define OPTEE_MSG_ATTR_TYPE_VALUE_INPUT 0x1 +#define OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT 0x2 +#define OPTEE_MSG_ATTR_TYPE_VALUE_INOUT 0x3 +#define OPTEE_MSG_ATTR_TYPE_RMEM_INPUT 0x5 +#define OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT 0x6 +#define OPTEE_MSG_ATTR_TYPE_RMEM_INOUT 0x7 +#define OPTEE_MSG_ATTR_TYPE_TMEM_INPUT 0x9 +#define OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT 0xa +#define OPTEE_MSG_ATTR_TYPE_TMEM_INOUT 0xb + +#define OPTEE_MSG_ATTR_TYPE_MASK GENMASK(7, 0) + +/* + * Meta parameter to be absorbed by the Secure OS and not passed + * to the Trusted Application. + * + * Currently only used with OPTEE_MSG_CMD_OPEN_SESSION. + */ +#define OPTEE_MSG_ATTR_META BIT(8) + +/* + * The temporary shared memory object is not physically contigous and this + * temp memref is followed by another fragment until the last temp memref + * that doesn't have this bit set. + */ +#define OPTEE_MSG_ATTR_FRAGMENT BIT(9) + +/* + * Memory attributes for caching passed with temp memrefs. The actual value + * used is defined outside the message protocol with the exception of + * OPTEE_MSG_ATTR_CACHE_PREDEFINED which means the attributes already + * defined for the memory range should be used. If optee_smc.h is used as + * bearer of this protocol OPTEE_SMC_SHM_* is used for values. + */ +#define OPTEE_MSG_ATTR_CACHE_SHIFT 16 +#define OPTEE_MSG_ATTR_CACHE_MASK GENMASK(2, 0) +#define OPTEE_MSG_ATTR_CACHE_PREDEFINED 0 + +/* + * Same values as TEE_LOGIN_* from TEE Internal API + */ +#define OPTEE_MSG_LOGIN_PUBLIC 0x00000000 +#define OPTEE_MSG_LOGIN_USER 0x00000001 +#define OPTEE_MSG_LOGIN_GROUP 0x00000002 +#define OPTEE_MSG_LOGIN_APPLICATION 0x00000004 +#define OPTEE_MSG_LOGIN_APPLICATION_USER 0x00000005 +#define OPTEE_MSG_LOGIN_APPLICATION_GROUP 0x00000006 + +/** + * struct optee_msg_param_tmem - temporary memory reference parameter + * @buf_ptr: Address of the buffer + * @size: Size of the buffer + * @shm_ref: Temporary shared memory reference, pointer to a struct tee_shm + * + * Secure and normal world communicates pointers as physical address + * instead of the virtual address. This is because secure and normal world + * have completely independent memory mapping. Normal world can even have a + * hypervisor which need to translate the guest physical address (AKA IPA + * in ARM documentation) to a real physical address before passing the + * structure to secure world. + */ +struct optee_msg_param_tmem { + u64 buf_ptr; + u64 size; + u64 shm_ref; +}; + +/** + * struct optee_msg_param_rmem - registered memory reference parameter + * @offs: Offset into shared memory reference + * @size: Size of the buffer + * @shm_ref: Shared memory reference, pointer to a struct tee_shm + */ +struct optee_msg_param_rmem { + u64 offs; + u64 size; + u64 shm_ref; +}; + +/** + * struct optee_msg_param_value - opaque value parameter + * + * Value parameters are passed unchecked between normal and secure world. + */ +struct optee_msg_param_value { + u64 a; + u64 b; + u64 c; +}; + +/** + * struct optee_msg_param - parameter used together with struct optee_msg_arg + * @attr: attributes + * @tmem: parameter by temporary memory reference + * @rmem: parameter by registered memory reference + * @value: parameter by opaque value + * + * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in + * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value, + * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates tmem and + * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates rmem. + * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used. + */ +struct optee_msg_param { + u64 attr; + union { + struct optee_msg_param_tmem tmem; + struct optee_msg_param_rmem rmem; + struct optee_msg_param_value value; + } u; +}; + +/** + * struct optee_msg_arg - call argument + * @cmd: Command, one of OPTEE_MSG_CMD_* or OPTEE_MSG_RPC_CMD_* + * @func: Trusted Application function, specific to the Trusted Application, + * used if cmd == OPTEE_MSG_CMD_INVOKE_COMMAND + * @session: In parameter for all OPTEE_MSG_CMD_* except + * OPTEE_MSG_CMD_OPEN_SESSION where it's an output parameter instead + * @cancel_id: Cancellation id, a unique value to identify this request + * @ret: return value + * @ret_origin: origin of the return value + * @num_params: number of parameters supplied to the OS Command + * @params: the parameters supplied to the OS Command + * + * All normal calls to Trusted OS uses this struct. If cmd requires further + * information than what these field holds it can be passed as a parameter + * tagged as meta (setting the OPTEE_MSG_ATTR_META bit in corresponding + * attrs field). All parameters tagged as meta has to come first. + * + * Temp memref parameters can be fragmented if supported by the Trusted OS + * (when optee_smc.h is bearer of this protocol this is indicated with + * OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM). If a logical memref parameter is + * fragmented then has all but the last fragment the + * OPTEE_MSG_ATTR_FRAGMENT bit set in attrs. Even if a memref is fragmented + * it will still be presented as a single logical memref to the Trusted + * Application. + */ +struct optee_msg_arg { + u32 cmd; + u32 func; + u32 session; + u32 cancel_id; + u32 pad; + u32 ret; + u32 ret_origin; + u32 num_params; + + /* num_params tells the actual number of element in params */ + struct optee_msg_param params[0]; +}; + +/** + * OPTEE_MSG_GET_ARG_SIZE - return size of struct optee_msg_arg + * + * @num_params: Number of parameters embedded in the struct optee_msg_arg + * + * Returns the size of the struct optee_msg_arg together with the number + * of embedded parameters. + */ +#define OPTEE_MSG_GET_ARG_SIZE(num_params) \ + (sizeof(struct optee_msg_arg) + \ + sizeof(struct optee_msg_param) * (num_params)) + +/***************************************************************************** + * Part 2 - requests from normal world + *****************************************************************************/ + +/* + * Return the following UID if using API specified in this file without + * further extensions: + * 384fb3e0-e7f8-11e3-af63-0002a5d5c51b. + * Represented in 4 32-bit words in OPTEE_MSG_UID_0, OPTEE_MSG_UID_1, + * OPTEE_MSG_UID_2, OPTEE_MSG_UID_3. + */ +#define OPTEE_MSG_UID_0 0x384fb3e0 +#define OPTEE_MSG_UID_1 0xe7f811e3 +#define OPTEE_MSG_UID_2 0xaf630002 +#define OPTEE_MSG_UID_3 0xa5d5c51b +#define OPTEE_MSG_FUNCID_CALLS_UID 0xFF01 + +/* + * Returns 2.0 if using API specified in this file without further + * extensions. Represented in 2 32-bit words in OPTEE_MSG_REVISION_MAJOR + * and OPTEE_MSG_REVISION_MINOR + */ +#define OPTEE_MSG_REVISION_MAJOR 2 +#define OPTEE_MSG_REVISION_MINOR 0 +#define OPTEE_MSG_FUNCID_CALLS_REVISION 0xFF03 + +/* + * Get UUID of Trusted OS. + * + * Used by non-secure world to figure out which Trusted OS is installed. + * Note that returned UUID is the UUID of the Trusted OS, not of the API. + * + * Returns UUID in 4 32-bit words in the same way as + * OPTEE_MSG_FUNCID_CALLS_UID described above. + */ +#define OPTEE_MSG_OS_OPTEE_UUID_0 0x486178e0 +#define OPTEE_MSG_OS_OPTEE_UUID_1 0xe7f811e3 +#define OPTEE_MSG_OS_OPTEE_UUID_2 0xbc5e0002 +#define OPTEE_MSG_OS_OPTEE_UUID_3 0xa5d5c51b +#define OPTEE_MSG_FUNCID_GET_OS_UUID 0x0000 + +/* + * Get revision of Trusted OS. + * + * Used by non-secure world to figure out which version of the Trusted OS + * is installed. Note that the returned revision is the revision of the + * Trusted OS, not of the API. + * + * Returns revision in 2 32-bit words in the same way as + * OPTEE_MSG_CALLS_REVISION described above. + */ +#define OPTEE_MSG_FUNCID_GET_OS_REVISION 0x0001 + +/* + * Do a secure call with struct optee_msg_arg as argument + * The OPTEE_MSG_CMD_* below defines what goes in struct optee_msg_arg::cmd + * + * OPTEE_MSG_CMD_OPEN_SESSION opens a session to a Trusted Application. + * The first two parameters are tagged as meta, holding two value + * parameters to pass the following information: + * param[0].u.value.a-b uuid of Trusted Application + * param[1].u.value.a-b uuid of Client + * param[1].u.value.c Login class of client OPTEE_MSG_LOGIN_* + * + * OPTEE_MSG_CMD_INVOKE_COMMAND invokes a command a previously opened + * session to a Trusted Application. struct optee_msg_arg::func is Trusted + * Application function, specific to the Trusted Application. + * + * OPTEE_MSG_CMD_CLOSE_SESSION closes a previously opened session to + * Trusted Application. + * + * OPTEE_MSG_CMD_CANCEL cancels a currently invoked command. + * + * OPTEE_MSG_CMD_REGISTER_SHM registers a shared memory reference. The + * information is passed as: + * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + * [| OPTEE_MSG_ATTR_FRAGMENT] + * [in] param[0].u.tmem.buf_ptr physical address (of first fragment) + * [in] param[0].u.tmem.size size (of first fragment) + * [in] param[0].u.tmem.shm_ref holds shared memory reference + * ... + * The shared memory can optionally be fragmented, temp memrefs can follow + * each other with all but the last with the OPTEE_MSG_ATTR_FRAGMENT bit set. + * + * OPTEE_MSG_CMD_UNREGISTER_SHM unregisteres a previously registered shared + * memory reference. The information is passed as: + * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + * [in] param[0].u.rmem.shm_ref holds shared memory reference + * [in] param[0].u.rmem.offs 0 + * [in] param[0].u.rmem.size 0 + */ +#define OPTEE_MSG_CMD_OPEN_SESSION 0 +#define OPTEE_MSG_CMD_INVOKE_COMMAND 1 +#define OPTEE_MSG_CMD_CLOSE_SESSION 2 +#define OPTEE_MSG_CMD_CANCEL 3 +#define OPTEE_MSG_CMD_REGISTER_SHM 4 +#define OPTEE_MSG_CMD_UNREGISTER_SHM 5 +#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004 + +/***************************************************************************** + * Part 3 - Requests from secure world, RPC + *****************************************************************************/ + +/* + * All RPC is done with a struct optee_msg_arg as bearer of information, + * struct optee_msg_arg::arg holds values defined by OPTEE_MSG_RPC_CMD_* below + * + * RPC communication with tee-supplicant is reversed compared to normal + * client communication desribed above. The supplicant receives requests + * and sends responses. + */ + +/* + * Load a TA into memory, defined in tee-supplicant + */ +#define OPTEE_MSG_RPC_CMD_LOAD_TA 0 + +/* + * Reserved + */ +#define OPTEE_MSG_RPC_CMD_RPMB 1 + +/* + * File system access, defined in tee-supplicant + */ +#define OPTEE_MSG_RPC_CMD_FS 2 + +/* + * Get time + * + * Returns number of seconds and nano seconds since the Epoch, + * 1970-01-01 00:00:00 +0000 (UTC). + * + * [out] param[0].u.value.a Number of seconds + * [out] param[0].u.value.b Number of nano seconds. + */ +#define OPTEE_MSG_RPC_CMD_GET_TIME 3 + +/* + * Wait queue primitive, helper for secure world to implement a wait queue. + * + * If secure world need to wait for a secure world mutex it issues a sleep + * request instead of spinning in secure world. Conversely is a wakeup + * request issued when a secure world mutex with a thread waiting thread is + * unlocked. + * + * Waiting on a key + * [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP + * [in] param[0].u.value.b wait key + * + * Waking up a key + * [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP + * [in] param[0].u.value.b wakeup key + */ +#define OPTEE_MSG_RPC_CMD_WAIT_QUEUE 4 +#define OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP 0 +#define OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP 1 + +/* + * Suspend execution + * + * [in] param[0].value .a number of milliseconds to suspend + */ +#define OPTEE_MSG_RPC_CMD_SUSPEND 5 + +/* + * Allocate a piece of shared memory + * + * Shared memory can optionally be fragmented, to support that additional + * spare param entries are allocated to make room for eventual fragments. + * The spare param entries has .attr = OPTEE_MSG_ATTR_TYPE_NONE when + * unused. All returned temp memrefs except the last should have the + * OPTEE_MSG_ATTR_FRAGMENT bit set in the attr field. + * + * [in] param[0].u.value.a type of memory one of + * OPTEE_MSG_RPC_SHM_TYPE_* below + * [in] param[0].u.value.b requested size + * [in] param[0].u.value.c required alignment + * + * [out] param[0].u.tmem.buf_ptr physical address (of first fragment) + * [out] param[0].u.tmem.size size (of first fragment) + * [out] param[0].u.tmem.shm_ref shared memory reference + * ... + * [out] param[n].u.tmem.buf_ptr physical address + * [out] param[n].u.tmem.size size + * [out] param[n].u.tmem.shm_ref shared memory reference (same value + * as in param[n-1].u.tmem.shm_ref) + */ +#define OPTEE_MSG_RPC_CMD_SHM_ALLOC 6 +/* Memory that can be shared with a non-secure user space application */ +#define OPTEE_MSG_RPC_SHM_TYPE_APPL 0 +/* Memory only shared with non-secure kernel */ +#define OPTEE_MSG_RPC_SHM_TYPE_KERNEL 1 + +/* + * Free shared memory previously allocated with OPTEE_MSG_RPC_CMD_SHM_ALLOC + * + * [in] param[0].u.value.a type of memory one of + * OPTEE_MSG_RPC_SHM_TYPE_* above + * [in] param[0].u.value.b value of shared memory reference + * returned in param[0].u.tmem.shm_ref + * above + */ +#define OPTEE_MSG_RPC_CMD_SHM_FREE 7 + +#endif /* _OPTEE_MSG_H */ diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h new file mode 100644 index 000000000000..c374cd594314 --- /dev/null +++ b/drivers/tee/optee/optee_private.h @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef OPTEE_PRIVATE_H +#define OPTEE_PRIVATE_H + +#include +#include +#include +#include +#include "optee_msg.h" + +#define OPTEE_MAX_ARG_SIZE 1024 + +/* Some Global Platform error codes used in this driver */ +#define TEEC_SUCCESS 0x00000000 +#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006 +#define TEEC_ERROR_COMMUNICATION 0xFFFF000E +#define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C + +#define TEEC_ORIGIN_COMMS 0x00000002 + +typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long, + unsigned long, unsigned long, unsigned long, + unsigned long, unsigned long, + struct arm_smccc_res *); + +struct optee_call_queue { + /* Serializes access to this struct */ + struct mutex mutex; + struct list_head waiters; +}; + +struct optee_wait_queue { + /* Serializes access to this struct */ + struct mutex mu; + struct list_head db; +}; + +/** + * struct optee_supp - supplicant synchronization struct + * @ctx the context of current connected supplicant. + * if !NULL the supplicant device is available for use, + * else busy + * @ctx_mutex: held while accessing @ctx + * @func: supplicant function id to call + * @ret: call return value + * @num_params: number of elements in @param + * @param: parameters for @func + * @req_posted: if true, a request has been posted to the supplicant + * @supp_next_send: if true, next step is for supplicant to send response + * @thrd_mutex: held by the thread doing a request to supplicant + * @supp_mutex: held by supplicant while operating on this struct + * @data_to_supp: supplicant is waiting on this for next request + * @data_from_supp: requesting thread is waiting on this to get the result + */ +struct optee_supp { + struct tee_context *ctx; + /* Serializes access of ctx */ + struct mutex ctx_mutex; + + u32 func; + u32 ret; + size_t num_params; + struct tee_param *param; + + bool req_posted; + bool supp_next_send; + /* Serializes access to this struct for requesting thread */ + struct mutex thrd_mutex; + /* Serializes access to this struct for supplicant threads */ + struct mutex supp_mutex; + struct completion data_to_supp; + struct completion data_from_supp; +}; + +/** + * struct optee - main service struct + * @supp_teedev: supplicant device + * @teedev: client device + * @invoke_fn: function to issue smc or hvc + * @call_queue: queue of threads waiting to call @invoke_fn + * @wait_queue: queue of threads from secure world waiting for a + * secure world sync object + * @supp: supplicant synchronization struct for RPC to supplicant + * @pool: shared memory pool + * @memremaped_shm virtual address of memory in shared memory pool + */ +struct optee { + struct tee_device *supp_teedev; + struct tee_device *teedev; + optee_invoke_fn *invoke_fn; + struct optee_call_queue call_queue; + struct optee_wait_queue wait_queue; + struct optee_supp supp; + struct tee_shm_pool *pool; + void *memremaped_shm; +}; + +struct optee_session { + struct list_head list_node; + u32 session_id; +}; + +struct optee_context_data { + /* Serializes access to this struct */ + struct mutex mutex; + struct list_head sess_list; +}; + +struct optee_rpc_param { + u32 a0; + u32 a1; + u32 a2; + u32 a3; + u32 a4; + u32 a5; + u32 a6; + u32 a7; +}; + +void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param); + +void optee_wait_queue_init(struct optee_wait_queue *wq); +void optee_wait_queue_exit(struct optee_wait_queue *wq); + +u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, + struct tee_param *param); + +int optee_supp_read(struct tee_context *ctx, void __user *buf, size_t len); +int optee_supp_write(struct tee_context *ctx, void __user *buf, size_t len); +void optee_supp_init(struct optee_supp *supp); +void optee_supp_uninit(struct optee_supp *supp); + +int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params, + struct tee_param *param); +int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params, + struct tee_param *param); + +u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg); +int optee_open_session(struct tee_context *ctx, + struct tee_ioctl_open_session_arg *arg, + struct tee_param *param); +int optee_close_session(struct tee_context *ctx, u32 session); +int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, + struct tee_param *param); +int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session); + +void optee_enable_shm_cache(struct optee *optee); +void optee_disable_shm_cache(struct optee *optee); + +int optee_from_msg_param(struct tee_param *params, size_t num_params, + const struct optee_msg_param *msg_params); +int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params, + const struct tee_param *params); + +/* + * Small helpers + */ + +static inline void *reg_pair_to_ptr(u32 reg0, u32 reg1) +{ + return (void *)(unsigned long)(((u64)reg0 << 32) | reg1); +} + +static inline void reg_pair_from_64(u32 *reg0, u32 *reg1, u64 val) +{ + *reg0 = val >> 32; + *reg1 = val; +} + +#endif /*OPTEE_PRIVATE_H*/ diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h new file mode 100644 index 000000000000..13b7c98cdf25 --- /dev/null +++ b/drivers/tee/optee/optee_smc.h @@ -0,0 +1,450 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef OPTEE_SMC_H +#define OPTEE_SMC_H + +#include +#include + +#define OPTEE_SMC_STD_CALL_VAL(func_num) \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_TRUSTED_OS, (func_num)) +#define OPTEE_SMC_FAST_CALL_VAL(func_num) \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_TRUSTED_OS, (func_num)) + +/* + * Function specified by SMC Calling convention. + */ +#define OPTEE_SMC_FUNCID_CALLS_COUNT 0xFF00 +#define OPTEE_SMC_CALLS_COUNT \ + ARM_SMCCC_CALL_VAL(OPTEE_SMC_FAST_CALL, SMCCC_SMC_32, \ + SMCCC_OWNER_TRUSTED_OS_END, \ + OPTEE_SMC_FUNCID_CALLS_COUNT) + +/* + * Normal cached memory (write-back), shareable for SMP systems and not + * shareable for UP systems. + */ +#define OPTEE_SMC_SHM_CACHED 1 + +/* + * a0..a7 is used as register names in the descriptions below, on arm32 + * that translates to r0..r7 and on arm64 to w0..w7. In both cases it's + * 32-bit registers. + */ + +/* + * Function specified by SMC Calling convention + * + * Return one of the following UIDs if using API specified in this file + * without further extentions: + * 65cb6b93-af0c-4617-8ed6-644a8d1140f8 + * see also OPTEE_SMC_UID_* in optee_msg.h + */ +#define OPTEE_SMC_FUNCID_CALLS_UID OPTEE_MSG_FUNCID_CALLS_UID +#define OPTEE_SMC_CALLS_UID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_TRUSTED_OS_END, \ + OPTEE_SMC_FUNCID_CALLS_UID) + +/* + * Function specified by SMC Calling convention + * + * Returns 2.0 if using API specified in this file without further extentions. + * see also OPTEE_MSG_REVISION_* in optee_msg.h + */ +#define OPTEE_SMC_FUNCID_CALLS_REVISION OPTEE_MSG_FUNCID_CALLS_REVISION +#define OPTEE_SMC_CALLS_REVISION \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_TRUSTED_OS_END, \ + OPTEE_SMC_FUNCID_CALLS_REVISION) + +struct optee_smc_calls_revision_result { + unsigned long major; + unsigned long minor; + unsigned long reserved0; + unsigned long reserved1; +}; + +/* + * Get UUID of Trusted OS. + * + * Used by non-secure world to figure out which Trusted OS is installed. + * Note that returned UUID is the UUID of the Trusted OS, not of the API. + * + * Returns UUID in a0-4 in the same way as OPTEE_SMC_CALLS_UID + * described above. + */ +#define OPTEE_SMC_FUNCID_GET_OS_UUID OPTEE_MSG_FUNCID_GET_OS_UUID +#define OPTEE_SMC_CALL_GET_OS_UUID \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_UUID) + +/* + * Get revision of Trusted OS. + * + * Used by non-secure world to figure out which version of the Trusted OS + * is installed. Note that the returned revision is the revision of the + * Trusted OS, not of the API. + * + * Returns revision in a0-1 in the same way as OPTEE_SMC_CALLS_REVISION + * described above. + */ +#define OPTEE_SMC_FUNCID_GET_OS_REVISION OPTEE_MSG_FUNCID_GET_OS_REVISION +#define OPTEE_SMC_CALL_GET_OS_REVISION \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_REVISION) + +/* + * Call with struct optee_msg_arg as argument + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC*CALL_WITH_ARG + * a1 Upper 32bit of a 64bit physical pointer to a struct optee_msg_arg + * a2 Lower 32bit of a 64bit physical pointer to a struct optee_msg_arg + * a3 Cache settings, not used if physical pointer is in a predefined shared + * memory area else per OPTEE_SMC_SHM_* + * a4-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 Return value, OPTEE_SMC_RETURN_* + * a1-3 Not used + * a4-7 Preserved + * + * OPTEE_SMC_RETURN_ETHREAD_LIMIT return register usage: + * a0 Return value, OPTEE_SMC_RETURN_ETHREAD_LIMIT + * a1-3 Preserved + * a4-7 Preserved + * + * RPC return register usage: + * a0 Return value, OPTEE_SMC_RETURN_IS_RPC(val) + * a1-2 RPC parameters + * a3-7 Resume information, must be preserved + * + * Possible return values: + * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this + * function. + * OPTEE_SMC_RETURN_OK Call completed, result updated in + * the previously supplied struct + * optee_msg_arg. + * OPTEE_SMC_RETURN_ETHREAD_LIMIT Number of Trusted OS threads exceeded, + * try again later. + * OPTEE_SMC_RETURN_EBADADDR Bad physcial pointer to struct + * optee_msg_arg. + * OPTEE_SMC_RETURN_EBADCMD Bad/unknown cmd in struct optee_msg_arg + * OPTEE_SMC_RETURN_IS_RPC() Call suspended by RPC call to normal + * world. + */ +#define OPTEE_SMC_FUNCID_CALL_WITH_ARG OPTEE_MSG_FUNCID_CALL_WITH_ARG +#define OPTEE_SMC_CALL_WITH_ARG \ + OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_ARG) + +/* + * Get Shared Memory Config + * + * Returns the Secure/Non-secure shared memory config. + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_GET_SHM_CONFIG + * a1-6 Not used + * a7 Hypervisor Client ID register + * + * Have config return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1 Physical address of start of SHM + * a2 Size of of SHM + * a3 Cache settings of memory, as defined by the + * OPTEE_SMC_SHM_* values above + * a4-7 Preserved + * + * Not available register usage: + * a0 OPTEE_SMC_RETURN_ENOTAVAIL + * a1-3 Not used + * a4-7 Preserved + */ +#define OPTEE_SMC_FUNCID_GET_SHM_CONFIG 7 +#define OPTEE_SMC_GET_SHM_CONFIG \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_SHM_CONFIG) + +struct optee_smc_get_shm_config_result { + unsigned long status; + unsigned long start; + unsigned long size; + unsigned long settings; +}; + +/* + * Exchanges capabilities between normal world and secure world + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_EXCHANGE_CAPABILITIES + * a1 bitfield of normal world capabilities OPTEE_SMC_NSEC_CAP_* + * a2-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_* + * a2-7 Preserved + * + * Error return register usage: + * a0 OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world + * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_* + * a2-7 Preserved + */ +/* Normal world works as a uniprocessor system */ +#define OPTEE_SMC_NSEC_CAP_UNIPROCESSOR BIT(0) +/* Secure world has reserved shared memory for normal world to use */ +#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0) +/* Secure world can communicate via previously unregistered shared memory */ +#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1) +#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9 +#define OPTEE_SMC_EXCHANGE_CAPABILITIES \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES) + +struct optee_smc_exchange_capabilities_result { + unsigned long status; + unsigned long capabilities; + unsigned long reserved0; + unsigned long reserved1; +}; + +/* + * Disable and empties cache of shared memory objects + * + * Secure world can cache frequently used shared memory objects, for + * example objects used as RPC arguments. When secure world is idle this + * function returns one shared memory reference to free. To disable the + * cache and free all cached objects this function has to be called until + * it returns OPTEE_SMC_RETURN_ENOTAVAIL. + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_DISABLE_SHM_CACHE + * a1-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1 Upper 32bit of a 64bit Shared memory cookie + * a2 Lower 32bit of a 64bit Shared memory cookie + * a3-7 Preserved + * + * Cache empty return register usage: + * a0 OPTEE_SMC_RETURN_ENOTAVAIL + * a1-7 Preserved + * + * Not idle return register usage: + * a0 OPTEE_SMC_RETURN_EBUSY + * a1-7 Preserved + */ +#define OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE 10 +#define OPTEE_SMC_DISABLE_SHM_CACHE \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE) + +struct optee_smc_disable_shm_cache_result { + unsigned long status; + unsigned long shm_upper32; + unsigned long shm_lower32; + unsigned long reserved0; +}; + +/* + * Enable cache of shared memory objects + * + * Secure world can cache frequently used shared memory objects, for + * example objects used as RPC arguments. When secure world is idle this + * function returns OPTEE_SMC_RETURN_OK and the cache is enabled. If + * secure world isn't idle OPTEE_SMC_RETURN_EBUSY is returned. + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_ENABLE_SHM_CACHE + * a1-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1-7 Preserved + * + * Not idle return register usage: + * a0 OPTEE_SMC_RETURN_EBUSY + * a1-7 Preserved + */ +#define OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE 11 +#define OPTEE_SMC_ENABLE_SHM_CACHE \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE) + +/* + * Resume from RPC (for example after processing an IRQ) + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC + * a1-3 Value of a1-3 when OPTEE_SMC_CALL_WITH_ARG returned + * OPTEE_SMC_RETURN_RPC in a0 + * + * Return register usage is the same as for OPTEE_SMC_*CALL_WITH_ARG above. + * + * Possible return values + * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this + * function. + * OPTEE_SMC_RETURN_OK Original call completed, result + * updated in the previously supplied. + * struct optee_msg_arg + * OPTEE_SMC_RETURN_RPC Call suspended by RPC call to normal + * world. + * OPTEE_SMC_RETURN_ERESUME Resume failed, the opaque resume + * information was corrupt. + */ +#define OPTEE_SMC_FUNCID_RETURN_FROM_RPC 3 +#define OPTEE_SMC_CALL_RETURN_FROM_RPC \ + OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_RETURN_FROM_RPC) + +#define OPTEE_SMC_RETURN_RPC_PREFIX_MASK 0xFFFF0000 +#define OPTEE_SMC_RETURN_RPC_PREFIX 0xFFFF0000 +#define OPTEE_SMC_RETURN_RPC_FUNC_MASK 0x0000FFFF + +#define OPTEE_SMC_RETURN_GET_RPC_FUNC(ret) \ + ((ret) & OPTEE_SMC_RETURN_RPC_FUNC_MASK) + +#define OPTEE_SMC_RPC_VAL(func) ((func) | OPTEE_SMC_RETURN_RPC_PREFIX) + +/* + * Allocate memory for RPC parameter passing. The memory is used to hold a + * struct optee_msg_arg. + * + * "Call" register usage: + * a0 This value, OPTEE_SMC_RETURN_RPC_ALLOC + * a1 Size in bytes of required argument memory + * a2 Not used + * a3 Resume information, must be preserved + * a4-5 Not used + * a6-7 Resume information, must be preserved + * + * "Return" register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. + * a1 Upper 32bits of 64bit physical pointer to allocated + * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't + * be allocated. + * a2 Lower 32bits of 64bit physical pointer to allocated + * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't + * be allocated + * a3 Preserved + * a4 Upper 32bits of 64bit Shared memory cookie used when freeing + * the memory or doing an RPC + * a5 Lower 32bits of 64bit Shared memory cookie used when freeing + * the memory or doing an RPC + * a6-7 Preserved + */ +#define OPTEE_SMC_RPC_FUNC_ALLOC 0 +#define OPTEE_SMC_RETURN_RPC_ALLOC \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_ALLOC) + +/* + * Free memory previously allocated by OPTEE_SMC_RETURN_RPC_ALLOC + * + * "Call" register usage: + * a0 This value, OPTEE_SMC_RETURN_RPC_FREE + * a1 Upper 32bits of 64bit shared memory cookie belonging to this + * argument memory + * a2 Lower 32bits of 64bit shared memory cookie belonging to this + * argument memory + * a3-7 Resume information, must be preserved + * + * "Return" register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. + * a1-2 Not used + * a3-7 Preserved + */ +#define OPTEE_SMC_RPC_FUNC_FREE 2 +#define OPTEE_SMC_RETURN_RPC_FREE \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FREE) + +/* + * Deliver an IRQ in normal world. + * + * "Call" register usage: + * a0 OPTEE_SMC_RETURN_RPC_IRQ + * a1-7 Resume information, must be preserved + * + * "Return" register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. + * a1-7 Preserved + */ +#define OPTEE_SMC_RPC_FUNC_IRQ 4 +#define OPTEE_SMC_RETURN_RPC_IRQ \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_IRQ) + +/* + * Do an RPC request. The supplied struct optee_msg_arg tells which + * request to do and the parameters for the request. The following fields + * are used (the rest are unused): + * - cmd the Request ID + * - ret return value of the request, filled in by normal world + * - num_params number of parameters for the request + * - params the parameters + * - param_attrs attributes of the parameters + * + * "Call" register usage: + * a0 OPTEE_SMC_RETURN_RPC_CMD + * a1 Upper 32bit of a 64bit Shared memory cookie holding a + * struct optee_msg_arg, must be preserved, only the data should + * be updated + * a2 Lower 32bit of a 64bit Shared memory cookie holding a + * struct optee_msg_arg, must be preserved, only the data should + * be updated + * a3-7 Resume information, must be preserved + * + * "Return" register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. + * a1-2 Not used + * a3-7 Preserved + */ +#define OPTEE_SMC_RPC_FUNC_CMD 5 +#define OPTEE_SMC_RETURN_RPC_CMD \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_CMD) + +/* Returned in a0 */ +#define OPTEE_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF + +/* Returned in a0 only from Trusted OS functions */ +#define OPTEE_SMC_RETURN_OK 0x0 +#define OPTEE_SMC_RETURN_ETHREAD_LIMIT 0x1 +#define OPTEE_SMC_RETURN_EBUSY 0x2 +#define OPTEE_SMC_RETURN_ERESUME 0x3 +#define OPTEE_SMC_RETURN_EBADADDR 0x4 +#define OPTEE_SMC_RETURN_EBADCMD 0x5 +#define OPTEE_SMC_RETURN_ENOMEM 0x6 +#define OPTEE_SMC_RETURN_ENOTAVAIL 0x7 +#define OPTEE_SMC_RETURN_IS_RPC(ret) __optee_smc_return_is_rpc((ret)) + +static inline bool __optee_smc_return_is_rpc(u32 ret) +{ + return ret != OPTEE_SMC_RETURN_UNKNOWN_FUNCTION && + (ret & OPTEE_SMC_RETURN_RPC_PREFIX_MASK) == + OPTEE_SMC_RETURN_RPC_PREFIX; +} + +#endif /* OPTEE_SMC_H */ diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c new file mode 100644 index 000000000000..8814eca06021 --- /dev/null +++ b/drivers/tee/optee/rpc.c @@ -0,0 +1,396 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include "optee_private.h" +#include "optee_smc.h" + +struct wq_entry { + struct list_head link; + struct completion c; + u32 key; +}; + +void optee_wait_queue_init(struct optee_wait_queue *priv) +{ + mutex_init(&priv->mu); + INIT_LIST_HEAD(&priv->db); +} + +void optee_wait_queue_exit(struct optee_wait_queue *priv) +{ + mutex_destroy(&priv->mu); +} + +static void handle_rpc_func_cmd_get_time(struct optee_msg_arg *arg) +{ + struct timespec64 ts; + + if (arg->num_params != 1) + goto bad; + if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) != + OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT) + goto bad; + + getnstimeofday64(&ts); + arg->params[0].u.value.a = ts.tv_sec; + arg->params[0].u.value.b = ts.tv_nsec; + + arg->ret = TEEC_SUCCESS; + return; +bad: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; +} + +static struct wq_entry *wq_entry_get(struct optee_wait_queue *wq, u32 key) +{ + struct wq_entry *w; + + mutex_lock(&wq->mu); + + list_for_each_entry(w, &wq->db, link) + if (w->key == key) + goto out; + + w = kmalloc(sizeof(*w), GFP_KERNEL); + if (w) { + init_completion(&w->c); + w->key = key; + list_add_tail(&w->link, &wq->db); + } +out: + mutex_unlock(&wq->mu); + return w; +} + +static void wq_sleep(struct optee_wait_queue *wq, u32 key) +{ + struct wq_entry *w = wq_entry_get(wq, key); + + if (w) { + wait_for_completion(&w->c); + mutex_lock(&wq->mu); + list_del(&w->link); + mutex_unlock(&wq->mu); + kfree(w); + } +} + +static void wq_wakeup(struct optee_wait_queue *wq, u32 key) +{ + struct wq_entry *w = wq_entry_get(wq, key); + + if (w) + complete(&w->c); +} + +static void handle_rpc_func_cmd_wq(struct optee *optee, + struct optee_msg_arg *arg) +{ + if (arg->num_params != 1) + goto bad; + + if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) != + OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) + goto bad; + + switch (arg->params[0].u.value.a) { + case OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP: + wq_sleep(&optee->wait_queue, arg->params[0].u.value.b); + break; + case OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP: + wq_wakeup(&optee->wait_queue, arg->params[0].u.value.b); + break; + default: + goto bad; + } + + arg->ret = TEEC_SUCCESS; + return; +bad: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; +} + +static void handle_rpc_func_cmd_wait(struct optee_msg_arg *arg) +{ + u32 msec_to_wait; + + if (arg->num_params != 1) + goto bad; + + if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) != + OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) + goto bad; + + msec_to_wait = arg->params[0].u.value.a; + + /* set task's state to interruptible sleep */ + set_current_state(TASK_INTERRUPTIBLE); + + /* take a nap */ + msleep(msec_to_wait); + + arg->ret = TEEC_SUCCESS; + return; +bad: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; +} + +static void handle_rpc_supp_cmd(struct tee_context *ctx, + struct optee_msg_arg *arg) +{ + struct tee_param *params; + + arg->ret_origin = TEEC_ORIGIN_COMMS; + + params = kmalloc_array(arg->num_params, sizeof(struct tee_param), + GFP_KERNEL); + if (!params) { + arg->ret = TEEC_ERROR_OUT_OF_MEMORY; + return; + } + + if (optee_from_msg_param(params, arg->num_params, arg->params)) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + goto out; + } + + arg->ret = optee_supp_thrd_req(ctx, arg->cmd, arg->num_params, params); + + if (optee_to_msg_param(arg->params, arg->num_params, params)) + arg->ret = TEEC_ERROR_BAD_PARAMETERS; +out: + kfree(params); +} + +static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz) +{ + u32 ret; + struct tee_param param; + struct optee *optee = tee_get_drvdata(ctx->teedev); + struct tee_shm *shm; + + param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT; + param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL; + param.u.value.b = sz; + param.u.value.c = 0; + + ret = optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_ALLOC, 1, ¶m); + if (ret) + return ERR_PTR(-ENOMEM); + + mutex_lock(&optee->supp.ctx_mutex); + /* Increases count as secure world doesn't have a reference */ + shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c); + mutex_unlock(&optee->supp.ctx_mutex); + return shm; +} + +static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, + struct optee_msg_arg *arg) +{ + phys_addr_t pa; + struct tee_shm *shm; + size_t sz; + size_t n; + + arg->ret_origin = TEEC_ORIGIN_COMMS; + + if (!arg->num_params || + arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return; + } + + for (n = 1; n < arg->num_params; n++) { + if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return; + } + } + + sz = arg->params[0].u.value.b; + switch (arg->params[0].u.value.a) { + case OPTEE_MSG_RPC_SHM_TYPE_APPL: + shm = cmd_alloc_suppl(ctx, sz); + break; + case OPTEE_MSG_RPC_SHM_TYPE_KERNEL: + shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED); + break; + default: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return; + } + + if (IS_ERR(shm)) { + arg->ret = TEEC_ERROR_OUT_OF_MEMORY; + return; + } + + if (tee_shm_get_pa(shm, 0, &pa)) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + goto bad; + } + + arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT; + arg->params[0].u.tmem.buf_ptr = pa; + arg->params[0].u.tmem.size = sz; + arg->params[0].u.tmem.shm_ref = (unsigned long)shm; + arg->ret = TEEC_SUCCESS; + return; +bad: + tee_shm_free(shm); +} + +static void cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm) +{ + struct tee_param param; + + param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT; + param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL; + param.u.value.b = tee_shm_get_id(shm); + param.u.value.c = 0; + + /* + * Match the tee_shm_get_from_id() in cmd_alloc_suppl() as secure + * world has released its reference. + * + * It's better to do this before sending the request to supplicant + * as we'd like to let the process doing the initial allocation to + * do release the last reference too in order to avoid stacking + * many pending fput() on the client process. This could otherwise + * happen if secure world does many allocate and free in a single + * invoke. + */ + tee_shm_put(shm); + + optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_FREE, 1, ¶m); +} + +static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx, + struct optee_msg_arg *arg) +{ + struct tee_shm *shm; + + arg->ret_origin = TEEC_ORIGIN_COMMS; + + if (arg->num_params != 1 || + arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return; + } + + shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b; + switch (arg->params[0].u.value.a) { + case OPTEE_MSG_RPC_SHM_TYPE_APPL: + cmd_free_suppl(ctx, shm); + break; + case OPTEE_MSG_RPC_SHM_TYPE_KERNEL: + tee_shm_free(shm); + break; + default: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + } + arg->ret = TEEC_SUCCESS; +} + +static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, + struct tee_shm *shm) +{ + struct optee_msg_arg *arg; + + arg = tee_shm_get_va(shm, 0); + if (IS_ERR(arg)) { + pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm); + return; + } + + switch (arg->cmd) { + case OPTEE_MSG_RPC_CMD_GET_TIME: + handle_rpc_func_cmd_get_time(arg); + break; + case OPTEE_MSG_RPC_CMD_WAIT_QUEUE: + handle_rpc_func_cmd_wq(optee, arg); + break; + case OPTEE_MSG_RPC_CMD_SUSPEND: + handle_rpc_func_cmd_wait(arg); + break; + case OPTEE_MSG_RPC_CMD_SHM_ALLOC: + handle_rpc_func_cmd_shm_alloc(ctx, arg); + break; + case OPTEE_MSG_RPC_CMD_SHM_FREE: + handle_rpc_func_cmd_shm_free(ctx, arg); + break; + default: + handle_rpc_supp_cmd(ctx, arg); + } +} + +/** + * optee_handle_rpc() - handle RPC from secure world + * @ctx: context doing the RPC + * @param: value of registers for the RPC + * + * Result of RPC is written back into @param. + */ +void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param) +{ + struct tee_device *teedev = ctx->teedev; + struct optee *optee = tee_get_drvdata(teedev); + struct tee_shm *shm; + phys_addr_t pa; + + switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) { + case OPTEE_SMC_RPC_FUNC_ALLOC: + shm = tee_shm_alloc(ctx, param->a1, TEE_SHM_MAPPED); + if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) { + reg_pair_from_64(¶m->a1, ¶m->a2, pa); + reg_pair_from_64(¶m->a4, ¶m->a5, + (unsigned long)shm); + } else { + param->a1 = 0; + param->a2 = 0; + param->a4 = 0; + param->a5 = 0; + } + break; + case OPTEE_SMC_RPC_FUNC_FREE: + shm = reg_pair_to_ptr(param->a1, param->a2); + tee_shm_free(shm); + break; + case OPTEE_SMC_RPC_FUNC_IRQ: + /* + * An IRQ was raised while secure world was executing, + * since all IRQs are handled in Linux a dummy RPC is + * performed to let Linux take the IRQ through the normal + * vector. + */ + break; + case OPTEE_SMC_RPC_FUNC_CMD: + shm = reg_pair_to_ptr(param->a1, param->a2); + handle_rpc_func_cmd(ctx, optee, shm); + break; + default: + pr_warn("Unknown RPC func 0x%x\n", + (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)); + break; + } + + param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC; +} diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c new file mode 100644 index 000000000000..b4ea0678a436 --- /dev/null +++ b/drivers/tee/optee/supp.c @@ -0,0 +1,273 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include "optee_private.h" + +void optee_supp_init(struct optee_supp *supp) +{ + memset(supp, 0, sizeof(*supp)); + mutex_init(&supp->ctx_mutex); + mutex_init(&supp->thrd_mutex); + mutex_init(&supp->supp_mutex); + init_completion(&supp->data_to_supp); + init_completion(&supp->data_from_supp); +} + +void optee_supp_uninit(struct optee_supp *supp) +{ + mutex_destroy(&supp->ctx_mutex); + mutex_destroy(&supp->thrd_mutex); + mutex_destroy(&supp->supp_mutex); +} + +/** + * optee_supp_thrd_req() - request service from supplicant + * @ctx: context doing the request + * @func: function requested + * @num_params: number of elements in @param array + * @param: parameters for function + * + * Returns result of operation to be passed to secure world + */ +u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, + struct tee_param *param) +{ + bool interruptable; + struct optee *optee = tee_get_drvdata(ctx->teedev); + struct optee_supp *supp = &optee->supp; + u32 ret; + + /* + * Other threads blocks here until we've copied our answer from + * supplicant. + */ + while (mutex_lock_interruptible(&supp->thrd_mutex)) { + /* See comment below on when the RPC can be interrupted. */ + mutex_lock(&supp->ctx_mutex); + interruptable = !supp->ctx; + mutex_unlock(&supp->ctx_mutex); + if (interruptable) + return TEEC_ERROR_COMMUNICATION; + } + + /* + * We have exclusive access now since the supplicant at this + * point is either doing a + * wait_for_completion_interruptible(&supp->data_to_supp) or is in + * userspace still about to do the ioctl() to enter + * optee_supp_recv() below. + */ + + supp->func = func; + supp->num_params = num_params; + supp->param = param; + supp->req_posted = true; + + /* Let supplicant get the data */ + complete(&supp->data_to_supp); + + /* + * Wait for supplicant to process and return result, once we've + * returned from wait_for_completion(data_from_supp) we have + * exclusive access again. + */ + while (wait_for_completion_interruptible(&supp->data_from_supp)) { + mutex_lock(&supp->ctx_mutex); + interruptable = !supp->ctx; + if (interruptable) { + /* + * There's no supplicant available and since the + * supp->ctx_mutex currently is held none can + * become available until the mutex released + * again. + * + * Interrupting an RPC to supplicant is only + * allowed as a way of slightly improving the user + * experience in case the supplicant hasn't been + * started yet. During normal operation the supplicant + * will serve all requests in a timely manner and + * interrupting then wouldn't make sense. + */ + supp->ret = TEEC_ERROR_COMMUNICATION; + init_completion(&supp->data_to_supp); + } + mutex_unlock(&supp->ctx_mutex); + if (interruptable) + break; + } + + ret = supp->ret; + supp->param = NULL; + supp->req_posted = false; + + /* We're done, let someone else talk to the supplicant now. */ + mutex_unlock(&supp->thrd_mutex); + + return ret; +} + +/** + * optee_supp_recv() - receive request for supplicant + * @ctx: context receiving the request + * @func: requested function in supplicant + * @num_params: number of elements allocated in @param, updated with number + * used elements + * @param: space for parameters for @func + * + * Returns 0 on success or <0 on failure + */ +int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params, + struct tee_param *param) +{ + struct tee_device *teedev = ctx->teedev; + struct optee *optee = tee_get_drvdata(teedev); + struct optee_supp *supp = &optee->supp; + int rc; + + /* + * In case two threads in one supplicant is calling this function + * simultaneously we need to protect the data with a mutex which + * we'll release before returning. + */ + mutex_lock(&supp->supp_mutex); + + if (supp->supp_next_send) { + /* + * optee_supp_recv() has been called again without + * a optee_supp_send() in between. Supplicant has + * probably been restarted before it was able to + * write back last result. Abort last request and + * wait for a new. + */ + if (supp->req_posted) { + supp->ret = TEEC_ERROR_COMMUNICATION; + supp->supp_next_send = false; + complete(&supp->data_from_supp); + } + } + + /* + * This is where supplicant will be hanging most of the + * time, let's make this interruptable so we can easily + * restart supplicant if needed. + */ + if (wait_for_completion_interruptible(&supp->data_to_supp)) { + rc = -ERESTARTSYS; + goto out; + } + + /* We have exlusive access to the data */ + + if (*num_params < supp->num_params) { + /* + * Not enough room for parameters, tell supplicant + * it failed and abort last request. + */ + supp->ret = TEEC_ERROR_COMMUNICATION; + rc = -EINVAL; + complete(&supp->data_from_supp); + goto out; + } + + *func = supp->func; + *num_params = supp->num_params; + memcpy(param, supp->param, + sizeof(struct tee_param) * supp->num_params); + + /* Allow optee_supp_send() below to do its work */ + supp->supp_next_send = true; + + rc = 0; +out: + mutex_unlock(&supp->supp_mutex); + return rc; +} + +/** + * optee_supp_send() - send result of request from supplicant + * @ctx: context sending result + * @ret: return value of request + * @num_params: number of parameters returned + * @param: returned parameters + * + * Returns 0 on success or <0 on failure. + */ +int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params, + struct tee_param *param) +{ + struct tee_device *teedev = ctx->teedev; + struct optee *optee = tee_get_drvdata(teedev); + struct optee_supp *supp = &optee->supp; + size_t n; + int rc = 0; + + /* + * We still have exclusive access to the data since that's how we + * left it when returning from optee_supp_read(). + */ + + /* See comment on mutex in optee_supp_read() above */ + mutex_lock(&supp->supp_mutex); + + if (!supp->supp_next_send) { + /* + * Something strange is going on, supplicant shouldn't + * enter optee_supp_send() in this state + */ + rc = -ENOENT; + goto out; + } + + if (num_params != supp->num_params) { + /* + * Something is wrong, let supplicant restart. Next call to + * optee_supp_recv() will give an error to the requesting + * thread and release it. + */ + rc = -EINVAL; + goto out; + } + + /* Update out and in/out parameters */ + for (n = 0; n < num_params; n++) { + struct tee_param *p = supp->param + n; + + switch (p->attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + p->u.value.a = param[n].u.value.a; + p->u.value.b = param[n].u.value.b; + p->u.value.c = param[n].u.value.c; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + p->u.memref.size = param[n].u.memref.size; + break; + default: + break; + } + } + supp->ret = ret; + + /* Allow optee_supp_recv() above to do its work */ + supp->supp_next_send = false; + + /* Let the requesting thread continue */ + complete(&supp->data_from_supp); +out: + mutex_unlock(&supp->supp_mutex); + return rc; +} From 9329ea145a6e1c142b244243199df26faccd6517 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Mon, 1 Jun 2015 16:15:25 +0200 Subject: [PATCH 284/312] Documentation: tee subsystem and op-tee driver Change-Id: Ib5a195b501598bd6c7b869849938ab61797309ee Acked-by: Andreas Dannenberg Signed-off-by: Jens Wiklander (cherry picked from commit 6a6e77006fcdba89708214556c6d560323e850fc) Signed-off-by: Victor Chong --- Documentation/00-INDEX | 2 + Documentation/tee.txt | 118 +++++++++++++++++++++++++++++++++++++++++ MAINTAINERS | 1 + 3 files changed, 121 insertions(+) create mode 100644 Documentation/tee.txt diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX index cd077ca0e1b8..bd3f803a4e06 100644 --- a/Documentation/00-INDEX +++ b/Documentation/00-INDEX @@ -435,6 +435,8 @@ sysrq.txt - info on the magic SysRq key. target/ - directory with info on generating TCM v4 fabric .ko modules +tee.txt + - info on the TEE subsystem and drivers this_cpu_ops.txt - List rationale behind and the way to use this_cpu operations. thermal/ diff --git a/Documentation/tee.txt b/Documentation/tee.txt new file mode 100644 index 000000000000..718599357596 --- /dev/null +++ b/Documentation/tee.txt @@ -0,0 +1,118 @@ +TEE subsystem +This document describes the TEE subsystem in Linux. + +A TEE (Trusted Execution Environment) is a trusted OS running in some +secure environment, for example, TrustZone on ARM CPUs, or a separate +secure co-processor etc. A TEE driver handles the details needed to +communicate with the TEE. + +This subsystem deals with: + +- Registration of TEE drivers + +- Managing shared memory between Linux and the TEE + +- Providing a generic API to the TEE + +The TEE interface +================= + +include/uapi/linux/tee.h defines the generic interface to a TEE. + +User space (the client) connects to the driver by opening /dev/tee[0-9]* or +/dev/teepriv[0-9]*. + +- TEE_IOC_SHM_ALLOC allocates shared memory and returns a file descriptor + which user space can mmap. When user space doesn't need the file + descriptor any more, it should be closed. When shared memory isn't needed + any longer it should be unmapped with munmap() to allow the reuse of + memory. + +- TEE_IOC_VERSION lets user space know which TEE this driver handles and + the its capabilities. + +- TEE_IOC_OPEN_SESSION opens a new session to a Trusted Application. + +- TEE_IOC_INVOKE invokes a function in a Trusted Application. + +- TEE_IOC_CANCEL may cancel an ongoing TEE_IOC_OPEN_SESSION or TEE_IOC_INVOKE. + +- TEE_IOC_CLOSE_SESSION closes a session to a Trusted Application. + +There are two classes of clients, normal clients and supplicants. The latter is +a helper process for the TEE to access resources in Linux, for example file +system access. A normal client opens /dev/tee[0-9]* and a supplicant opens +/dev/teepriv[0-9]. + +Much of the communication between clients and the TEE is opaque to the +driver. The main job for the driver is to receive requests from the +clients, forward them to the TEE and send back the results. In the case of +supplicants the communication goes in the other direction, the TEE sends +requests to the supplicant which then sends back the result. + +OP-TEE driver +============= + +The OP-TEE driver handles OP-TEE [1] based TEEs. Currently it is only the ARM +TrustZone based OP-TEE solution that is supported. + +Lowest level of communication with OP-TEE builds on ARM SMC Calling +Convention (SMCCC) [2], which is the foundation for OP-TEE's SMC interface +[3] used internally by the driver. Stacked on top of that is OP-TEE Message +Protocol [4]. + +OP-TEE SMC interface provides the basic functions required by SMCCC and some +additional functions specific for OP-TEE. The most interesting functions are: + +- OPTEE_SMC_FUNCID_CALLS_UID (part of SMCCC) returns the version information + which is then returned by TEE_IOC_VERSION + +- OPTEE_SMC_CALL_GET_OS_UUID returns the particular OP-TEE implementation, used + to tell, for instance, a TrustZone OP-TEE apart from an OP-TEE running on a + separate secure co-processor. + +- OPTEE_SMC_CALL_WITH_ARG drives the OP-TEE message protocol + +- OPTEE_SMC_GET_SHM_CONFIG lets the driver and OP-TEE agree on which memory + range to used for shared memory between Linux and OP-TEE. + +The GlobalPlatform TEE Client API [5] is implemented on top of the generic +TEE API. + +Picture of the relationship between the different components in the +OP-TEE architecture. + + User space Kernel Secure world + ~~~~~~~~~~ ~~~~~~ ~~~~~~~~~~~~ + +--------+ +-------------+ + | Client | | Trusted | + +--------+ | Application | + /\ +-------------+ + || +----------+ /\ + || |tee- | || + || |supplicant| \/ + || +----------+ +-------------+ + \/ /\ | TEE Internal| + +-------+ || | API | + + TEE | || +--------+--------+ +-------------+ + | Client| || | TEE | OP-TEE | | OP-TEE | + | API | \/ | subsys | driver | | Trusted OS | + +-------+----------------+----+-------+----+-----------+-------------+ + | Generic TEE API | | OP-TEE MSG | + | IOCTL (TEE_IOC_*) | | SMCCC (OPTEE_SMC_CALL_*) | + +-----------------------------+ +------------------------------+ + +RPC (Remote Procedure Call) are requests from secure world to kernel driver +or tee-supplicant. An RPC is identified by a special range of SMCCC return +values from OPTEE_SMC_CALL_WITH_ARG. RPC messages which are intended for the +kernel are handled by the kernel driver. Other RPC messages will be forwarded to +tee-supplicant without further involvement of the driver, except switching +shared memory buffer representation. + +References: +[1] https://github.com/OP-TEE/optee_os +[2] http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html +[3] drivers/tee/optee/optee_smc.h +[4] drivers/tee/optee/optee_msg.h +[5] http://www.globalplatform.org/specificationsdevice.asp look for + "TEE Client API Specification v1.0" and click download. diff --git a/MAINTAINERS b/MAINTAINERS index 6e070bed5f58..e1e620dd3f78 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9372,6 +9372,7 @@ S: Maintained F: include/linux/tee_drv.h F: include/uapi/linux/tee.h F: drivers/tee/ +F: Documentation/tee.txt THUNDERBOLT DRIVER M: Andreas Noever From 27248d2fa77f028f639f2cbac621447cc94dbbb5 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Tue, 15 Dec 2015 15:41:37 -0800 Subject: [PATCH 285/312] netlink: add a start callback for starting a netlink dump commit fc9e50f5a5a4e1fa9ba2756f745a13e693cf6a06 upstream. The start callback allows the caller to set up a context for the dump callbacks. Presumably, the context can then be destroyed in the done callback. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller Cc: Guenter Roeck Signed-off-by: Greg Kroah-Hartman --- include/linux/netlink.h | 2 ++ include/net/genetlink.h | 2 ++ net/netlink/af_netlink.c | 4 ++++ net/netlink/genetlink.c | 16 ++++++++++++++++ 4 files changed, 24 insertions(+) diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 639e9b8b0e4d..0b41959aab9f 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -131,6 +131,7 @@ netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask) struct netlink_callback { struct sk_buff *skb; const struct nlmsghdr *nlh; + int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff * skb, struct netlink_callback *cb); int (*done)(struct netlink_callback *cb); @@ -153,6 +154,7 @@ struct nlmsghdr * __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags); struct netlink_dump_control { + int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff *skb, struct netlink_callback *); int (*done)(struct netlink_callback *); void *data; diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 1b6b6dcb018d..43c0e771f417 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -114,6 +114,7 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net) * @flags: flags * @policy: attribute validation policy * @doit: standard command callback + * @start: start callback for dumps * @dumpit: callback for dumpers * @done: completion callback for dumps * @ops_list: operations list @@ -122,6 +123,7 @@ struct genl_ops { const struct nla_policy *policy; int (*doit)(struct sk_buff *skb, struct genl_info *info); + int (*start)(struct netlink_callback *cb); int (*dumpit)(struct sk_buff *skb, struct netlink_callback *cb); int (*done)(struct netlink_callback *cb); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 9ecdd61c6463..a87afc4f3c91 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2203,6 +2203,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, cb = &nlk->cb; memset(cb, 0, sizeof(*cb)); + cb->start = control->start; cb->dump = control->dump; cb->done = control->done; cb->nlh = nlh; @@ -2216,6 +2217,9 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, mutex_unlock(nlk->cb_mutex); + if (cb->start) + cb->start(cb); + ret = netlink_dump(sk); sock_put(sk); diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index bc0e504f33a6..8e63662c6fb0 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -513,6 +513,20 @@ void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, } EXPORT_SYMBOL(genlmsg_put); +static int genl_lock_start(struct netlink_callback *cb) +{ + /* our ops are always const - netlink API doesn't propagate that */ + const struct genl_ops *ops = cb->data; + int rc = 0; + + if (ops->start) { + genl_lock(); + rc = ops->start(cb); + genl_unlock(); + } + return rc; +} + static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { /* our ops are always const - netlink API doesn't propagate that */ @@ -577,6 +591,7 @@ static int genl_family_rcv_msg(struct genl_family *family, .module = family->module, /* we have const, but the netlink API doesn't */ .data = (void *)ops, + .start = genl_lock_start, .dump = genl_lock_dumpit, .done = genl_lock_done, }; @@ -588,6 +603,7 @@ static int genl_family_rcv_msg(struct genl_family *family, } else { struct netlink_dump_control c = { .module = family->module, + .start = ops->start, .dump = ops->dumpit, .done = ops->done, }; From b377c453b3631531679cbe594b7d28c5ecd82cea Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 19 Oct 2017 20:51:10 +0800 Subject: [PATCH 286/312] ipsec: Fix aborted xfrm policy dump crash commit 1137b5e2529a8f5ca8ee709288ecba3e68044df2 upstream. An independent security researcher, Mohamed Ghannam, has reported this vulnerability to Beyond Security's SecuriTeam Secure Disclosure program. The xfrm_dump_policy_done function expects xfrm_dump_policy to have been called at least once or it will crash. This can be triggered if a dump fails because the target socket's receive buffer is full. This patch fixes it by using the cb->start mechanism to ensure that the initialisation is always done regardless of the buffer situation. Fixes: 12a169e7d8f4 ("ipsec: Put dumpers on the dump list") Signed-off-by: Herbert Xu Signed-off-by: Steffen Klassert Cc: Guenter Roeck Signed-off-by: Greg Kroah-Hartman --- net/xfrm/xfrm_user.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 7a5a64e70b4d..76944a4839a5 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1652,32 +1652,34 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr static int xfrm_dump_policy_done(struct netlink_callback *cb) { - struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; + struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; struct net *net = sock_net(cb->skb->sk); xfrm_policy_walk_done(walk, net); return 0; } +static int xfrm_dump_policy_start(struct netlink_callback *cb) +{ + struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; + + BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args)); + + xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY); + return 0; +} + static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); - struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; + struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; struct xfrm_dump_info info; - BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) > - sizeof(cb->args) - sizeof(cb->args[0])); - info.in_skb = cb->skb; info.out_skb = skb; info.nlmsg_seq = cb->nlh->nlmsg_seq; info.nlmsg_flags = NLM_F_MULTI; - if (!cb->args[0]) { - cb->args[0] = 1; - xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY); - } - (void) xfrm_policy_walk(net, walk, dump_one_policy, &info); return skb->len; @@ -2415,6 +2417,7 @@ static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = { static const struct xfrm_link { int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **); + int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); const struct nla_policy *nla_pol; @@ -2428,6 +2431,7 @@ static const struct xfrm_link { [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy }, [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy, + .start = xfrm_dump_policy_start, .dump = xfrm_dump_policy, .done = xfrm_dump_policy_done }, [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi }, @@ -2479,6 +2483,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { struct netlink_dump_control c = { + .start = link->start, .dump = link->dump, .done = link->done, }; From 87e2bd898d3a79a8c609f183180adac47879a2a4 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Fri, 27 Nov 2015 21:09:31 +0000 Subject: [PATCH 287/312] x86/mm/pat: Ensure cpa->pfn only contains page frame numbers commit edc3b9129cecd0f0857112136f5b8b1bc1d45918 upstream. The x86 pageattr code is confused about the data that is stored in cpa->pfn, sometimes it's treated as a page frame number, sometimes it's treated as an unshifted physical address, and in one place it's treated as a pte. The result of this is that the mapping functions do not map the intended physical address. This isn't a problem in practice because most of the addresses we're mapping in the EFI code paths are already mapped in 'trampoline_pgd' and so the pageattr mapping functions don't actually do anything in this case. But when we move to using a separate page table for the EFI runtime this will be an issue. Signed-off-by: Matt Fleming Reviewed-by: Borislav Petkov Acked-by: Borislav Petkov Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sai Praneeth Prakhya Cc: Thomas Gleixner Cc: Toshi Kani Cc: linux-efi@vger.kernel.org Link: http://lkml.kernel.org/r/1448658575-17029-3-git-send-email-matt@codeblueprint.co.uk Signed-off-by: Ingo Molnar Cc: "Ghannam, Yazen" Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/pageattr.c | 17 ++++++----------- arch/x86/platform/efi/efi_64.c | 16 ++++++++++------ 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index b599a780a5a9..a0fe62e3f4a3 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -911,15 +911,10 @@ static void populate_pte(struct cpa_data *cpa, pte = pte_offset_kernel(pmd, start); while (num_pages-- && start < end) { - - /* deal with the NX bit */ - if (!(pgprot_val(pgprot) & _PAGE_NX)) - cpa->pfn &= ~_PAGE_NX; - - set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot)); + set_pte(pte, pfn_pte(cpa->pfn, pgprot)); start += PAGE_SIZE; - cpa->pfn += PAGE_SIZE; + cpa->pfn++; pte++; } } @@ -975,11 +970,11 @@ static int populate_pmd(struct cpa_data *cpa, pmd = pmd_offset(pud, start); - set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | + set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE | massage_pgprot(pmd_pgprot))); start += PMD_SIZE; - cpa->pfn += PMD_SIZE; + cpa->pfn += PMD_SIZE >> PAGE_SHIFT; cur_pages += PMD_SIZE >> PAGE_SHIFT; } @@ -1048,11 +1043,11 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, * Map everything starting from the Gb boundary, possibly with 1G pages */ while (end - start >= PUD_SIZE) { - set_pud(pud, __pud(cpa->pfn | _PAGE_PSE | + set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE | massage_pgprot(pud_pgprot))); start += PUD_SIZE; - cpa->pfn += PUD_SIZE; + cpa->pfn += PUD_SIZE >> PAGE_SHIFT; cur_pages += PUD_SIZE >> PAGE_SHIFT; pud++; } diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index a0ac0f9c307f..5aa186db59e3 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -143,7 +143,7 @@ void efi_sync_low_kernel_mappings(void) int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) { - unsigned long text; + unsigned long pfn, text; struct page *page; unsigned npages; pgd_t *pgd; @@ -160,7 +160,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) * and ident-map those pages containing the map before calling * phys_efi_set_virtual_address_map(). */ - if (kernel_map_pages_in_pgd(pgd, pa_memmap, pa_memmap, num_pages, _PAGE_NX)) { + pfn = pa_memmap >> PAGE_SHIFT; + if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, _PAGE_NX)) { pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap); return 1; } @@ -185,8 +186,9 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) npages = (_end - _text) >> PAGE_SHIFT; text = __pa(_text); + pfn = text >> PAGE_SHIFT; - if (kernel_map_pages_in_pgd(pgd, text >> PAGE_SHIFT, text, npages, 0)) { + if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, 0)) { pr_err("Failed to map kernel text 1:1\n"); return 1; } @@ -204,12 +206,14 @@ void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages) static void __init __map_region(efi_memory_desc_t *md, u64 va) { pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); - unsigned long pf = 0; + unsigned long flags = 0; + unsigned long pfn; if (!(md->attribute & EFI_MEMORY_WB)) - pf |= _PAGE_PCD; + flags |= _PAGE_PCD; - if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf)) + pfn = md->phys_addr >> PAGE_SHIFT; + if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags)) pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n", md->phys_addr, va); } From b73adb60852034d84092d123b323196ca42529cd Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Fri, 27 Nov 2015 21:09:33 +0000 Subject: [PATCH 288/312] x86/efi: Hoist page table switching code into efi_call_virt() commit c9f2a9a65e4855b74d92cdad688f6ee4a1a323ff upstream. This change is a prerequisite for pending patches that switch to a dedicated EFI page table, instead of using 'trampoline_pgd' which shares PGD entries with 'swapper_pg_dir'. The pending patches make it impossible to dereference the runtime service function pointer without first switching %cr3. It's true that we now have duplicated switching code in efi_call_virt() and efi_call_phys_{prolog,epilog}() but we are sacrificing code duplication for a little more clarity and the ease of writing the page table switching code in C instead of asm. Signed-off-by: Matt Fleming Reviewed-by: Borislav Petkov Acked-by: Borislav Petkov Cc: Andrew Morton Cc: Andy Lutomirski Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Jones Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sai Praneeth Prakhya Cc: Stephen Smalley Cc: Thomas Gleixner Cc: Toshi Kani Cc: linux-efi@vger.kernel.org Link: http://lkml.kernel.org/r/1448658575-17029-5-git-send-email-matt@codeblueprint.co.uk Signed-off-by: Ingo Molnar Cc: "Ghannam, Yazen" Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/efi.h | 25 +++++++++++++++++ arch/x86/platform/efi/efi_64.c | 24 ++++++++-------- arch/x86/platform/efi/efi_stub_64.S | 43 ----------------------------- 3 files changed, 36 insertions(+), 56 deletions(-) diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 0010c78c4998..347eeacb06a8 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -3,6 +3,7 @@ #include #include +#include /* * We map the EFI regions needed for runtime services non-contiguously, @@ -64,6 +65,17 @@ extern u64 asmlinkage efi_call(void *fp, ...); #define efi_call_phys(f, args...) efi_call((f), args) +/* + * Scratch space used for switching the pagetable in the EFI stub + */ +struct efi_scratch { + u64 r15; + u64 prev_cr3; + pgd_t *efi_pgt; + bool use_pgd; + u64 phys_stack; +} __packed; + #define efi_call_virt(f, ...) \ ({ \ efi_status_t __s; \ @@ -71,7 +83,20 @@ extern u64 asmlinkage efi_call(void *fp, ...); efi_sync_low_kernel_mappings(); \ preempt_disable(); \ __kernel_fpu_begin(); \ + \ + if (efi_scratch.use_pgd) { \ + efi_scratch.prev_cr3 = read_cr3(); \ + write_cr3((unsigned long)efi_scratch.efi_pgt); \ + __flush_tlb_all(); \ + } \ + \ __s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__); \ + \ + if (efi_scratch.use_pgd) { \ + write_cr3(efi_scratch.prev_cr3); \ + __flush_tlb_all(); \ + } \ + \ __kernel_fpu_end(); \ preempt_enable(); \ __s; \ diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 5aa186db59e3..be8a32416e12 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -47,16 +47,7 @@ */ static u64 efi_va = EFI_VA_START; -/* - * Scratch space used for switching the pagetable in the EFI stub - */ -struct efi_scratch { - u64 r15; - u64 prev_cr3; - pgd_t *efi_pgt; - bool use_pgd; - u64 phys_stack; -} __packed; +struct efi_scratch efi_scratch; static void __init early_code_mapping_set_exec(int executable) { @@ -83,8 +74,11 @@ pgd_t * __init efi_call_phys_prolog(void) int pgd; int n_pgds; - if (!efi_enabled(EFI_OLD_MEMMAP)) - return NULL; + if (!efi_enabled(EFI_OLD_MEMMAP)) { + save_pgd = (pgd_t *)read_cr3(); + write_cr3((unsigned long)efi_scratch.efi_pgt); + goto out; + } early_code_mapping_set_exec(1); @@ -96,6 +90,7 @@ pgd_t * __init efi_call_phys_prolog(void) vaddress = (unsigned long)__va(pgd * PGDIR_SIZE); set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress)); } +out: __flush_tlb_all(); return save_pgd; @@ -109,8 +104,11 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd) int pgd_idx; int nr_pgds; - if (!save_pgd) + if (!efi_enabled(EFI_OLD_MEMMAP)) { + write_cr3((unsigned long)save_pgd); + __flush_tlb_all(); return; + } nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S index 86d0f9e08dd9..32020cb8bb08 100644 --- a/arch/x86/platform/efi/efi_stub_64.S +++ b/arch/x86/platform/efi/efi_stub_64.S @@ -38,41 +38,6 @@ mov %rsi, %cr0; \ mov (%rsp), %rsp - /* stolen from gcc */ - .macro FLUSH_TLB_ALL - movq %r15, efi_scratch(%rip) - movq %r14, efi_scratch+8(%rip) - movq %cr4, %r15 - movq %r15, %r14 - andb $0x7f, %r14b - movq %r14, %cr4 - movq %r15, %cr4 - movq efi_scratch+8(%rip), %r14 - movq efi_scratch(%rip), %r15 - .endm - - .macro SWITCH_PGT - cmpb $0, efi_scratch+24(%rip) - je 1f - movq %r15, efi_scratch(%rip) # r15 - # save previous CR3 - movq %cr3, %r15 - movq %r15, efi_scratch+8(%rip) # prev_cr3 - movq efi_scratch+16(%rip), %r15 # EFI pgt - movq %r15, %cr3 - 1: - .endm - - .macro RESTORE_PGT - cmpb $0, efi_scratch+24(%rip) - je 2f - movq efi_scratch+8(%rip), %r15 - movq %r15, %cr3 - movq efi_scratch(%rip), %r15 - FLUSH_TLB_ALL - 2: - .endm - ENTRY(efi_call) SAVE_XMM mov (%rsp), %rax @@ -83,16 +48,8 @@ ENTRY(efi_call) mov %r8, %r9 mov %rcx, %r8 mov %rsi, %rcx - SWITCH_PGT call *%rdi - RESTORE_PGT addq $48, %rsp RESTORE_XMM ret ENDPROC(efi_call) - - .data -ENTRY(efi_scratch) - .fill 3,8,0 - .byte 0 - .quad 0 From 36e0f05afd4e1d09fd47936761a502aedbc50649 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Fri, 27 Nov 2015 21:09:34 +0000 Subject: [PATCH 289/312] x86/efi: Build our own page table structures commit 67a9108ed4313b85a9c53406d80dc1ae3f8c3e36 upstream. With commit e1a58320a38d ("x86/mm: Warn on W^X mappings") all users booting on 64-bit UEFI machines see the following warning, ------------[ cut here ]------------ WARNING: CPU: 7 PID: 1 at arch/x86/mm/dump_pagetables.c:225 note_page+0x5dc/0x780() x86/mm: Found insecure W+X mapping at address ffff88000005f000/0xffff88000005f000 ... x86/mm: Checked W+X mappings: FAILED, 165660 W+X pages found. ... This is caused by mapping EFI regions with RWX permissions. There isn't much we can do to restrict the permissions for these regions due to the way the firmware toolchains mix code and data, but we can at least isolate these mappings so that they do not appear in the regular kernel page tables. In commit d2f7cbe7b26a ("x86/efi: Runtime services virtual mapping") we started using 'trampoline_pgd' to map the EFI regions because there was an existing identity mapping there which we use during the SetVirtualAddressMap() call and for broken firmware that accesses those addresses. But 'trampoline_pgd' shares some PGD entries with 'swapper_pg_dir' and does not provide the isolation we require. Notably the virtual address for __START_KERNEL_map and MODULES_START are mapped by the same PGD entry so we need to be more careful when copying changes over in efi_sync_low_kernel_mappings(). This patch doesn't go the full mile, we still want to share some PGD entries with 'swapper_pg_dir'. Having completely separate page tables brings its own issues such as synchronising new mappings after memory hotplug and module loading. Sharing also keeps memory usage down. Signed-off-by: Matt Fleming Reviewed-by: Borislav Petkov Acked-by: Borislav Petkov Cc: Andrew Morton Cc: Andy Lutomirski Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Jones Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sai Praneeth Prakhya Cc: Stephen Smalley Cc: Thomas Gleixner Cc: Toshi Kani Cc: linux-efi@vger.kernel.org Link: http://lkml.kernel.org/r/1448658575-17029-6-git-send-email-matt@codeblueprint.co.uk Signed-off-by: Ingo Molnar Cc: "Ghannam, Yazen" Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/efi.h | 1 + arch/x86/platform/efi/efi.c | 39 +++++--------- arch/x86/platform/efi/efi_32.c | 5 ++ arch/x86/platform/efi/efi_64.c | 97 ++++++++++++++++++++++++++++------ 4 files changed, 102 insertions(+), 40 deletions(-) diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 347eeacb06a8..8fd9e637629a 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -136,6 +136,7 @@ extern void __init efi_memory_uc(u64 addr, unsigned long size); extern void __init efi_map_region(efi_memory_desc_t *md); extern void __init efi_map_region_fixed(efi_memory_desc_t *md); extern void efi_sync_low_kernel_mappings(void); +extern int __init efi_alloc_page_tables(void); extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages); extern void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages); extern void __init old_map_region(efi_memory_desc_t *md); diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index ad285404ea7f..3c1f3cd7b2ba 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -869,7 +869,7 @@ static void __init kexec_enter_virtual_mode(void) * This function will switch the EFI runtime services to virtual mode. * Essentially, we look through the EFI memmap and map every region that * has the runtime attribute bit set in its memory descriptor into the - * ->trampoline_pgd page table using a top-down VA allocation scheme. + * efi_pgd page table. * * The old method which used to update that memory descriptor with the * virtual address obtained from ioremap() is still supported when the @@ -879,8 +879,8 @@ static void __init kexec_enter_virtual_mode(void) * * The new method does a pagetable switch in a preemption-safe manner * so that we're in a different address space when calling a runtime - * function. For function arguments passing we do copy the PGDs of the - * kernel page table into ->trampoline_pgd prior to each call. + * function. For function arguments passing we do copy the PUDs of the + * kernel page table into efi_pgd prior to each call. * * Specially for kexec boot, efi runtime maps in previous kernel should * be passed in via setup_data. In that case runtime ranges will be mapped @@ -895,6 +895,12 @@ static void __init __efi_enter_virtual_mode(void) efi.systab = NULL; + if (efi_alloc_page_tables()) { + pr_err("Failed to allocate EFI page tables\n"); + clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); + return; + } + efi_merge_regions(); new_memmap = efi_map_regions(&count, &pg_shift); if (!new_memmap) { @@ -954,28 +960,11 @@ static void __init __efi_enter_virtual_mode(void) efi_runtime_mkexec(); /* - * We mapped the descriptor array into the EFI pagetable above but we're - * not unmapping it here. Here's why: - * - * We're copying select PGDs from the kernel page table to the EFI page - * table and when we do so and make changes to those PGDs like unmapping - * stuff from them, those changes appear in the kernel page table and we - * go boom. - * - * From setup_real_mode(): - * - * ... - * trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd; - * - * In this particular case, our allocation is in PGD 0 of the EFI page - * table but we've copied that PGD from PGD[272] of the EFI page table: - * - * pgd_index(__PAGE_OFFSET = 0xffff880000000000) = 272 - * - * where the direct memory mapping in kernel space is. - * - * new_memmap's VA comes from that direct mapping and thus clearing it, - * it would get cleared in the kernel page table too. + * We mapped the descriptor array into the EFI pagetable above + * but we're not unmapping it here because if we're running in + * EFI mixed mode we need all of memory to be accessible when + * we pass parameters to the EFI runtime services in the + * thunking code. * * efi_cleanup_page_tables(__pa(new_memmap), 1 << pg_shift); */ diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c index ed5b67338294..58d669bc8250 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c @@ -38,6 +38,11 @@ * say 0 - 3G. */ +int __init efi_alloc_page_tables(void) +{ + return 0; +} + void efi_sync_low_kernel_mappings(void) {} void __init efi_dump_pagetable(void) {} int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index be8a32416e12..18dfaad71c99 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -40,6 +40,7 @@ #include #include #include +#include /* * We allocate runtime services regions bottom-up, starting from -4G, i.e. @@ -121,22 +122,92 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd) early_code_mapping_set_exec(0); } +static pgd_t *efi_pgd; + +/* + * We need our own copy of the higher levels of the page tables + * because we want to avoid inserting EFI region mappings (EFI_VA_END + * to EFI_VA_START) into the standard kernel page tables. Everything + * else can be shared, see efi_sync_low_kernel_mappings(). + */ +int __init efi_alloc_page_tables(void) +{ + pgd_t *pgd; + pud_t *pud; + gfp_t gfp_mask; + + if (efi_enabled(EFI_OLD_MEMMAP)) + return 0; + + gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO; + efi_pgd = (pgd_t *)__get_free_page(gfp_mask); + if (!efi_pgd) + return -ENOMEM; + + pgd = efi_pgd + pgd_index(EFI_VA_END); + + pud = pud_alloc_one(NULL, 0); + if (!pud) { + free_page((unsigned long)efi_pgd); + return -ENOMEM; + } + + pgd_populate(NULL, pgd, pud); + + return 0; +} + /* * Add low kernel mappings for passing arguments to EFI functions. */ void efi_sync_low_kernel_mappings(void) { - unsigned num_pgds; - pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); + unsigned num_entries; + pgd_t *pgd_k, *pgd_efi; + pud_t *pud_k, *pud_efi; if (efi_enabled(EFI_OLD_MEMMAP)) return; - num_pgds = pgd_index(MODULES_END - 1) - pgd_index(PAGE_OFFSET); + /* + * We can share all PGD entries apart from the one entry that + * covers the EFI runtime mapping space. + * + * Make sure the EFI runtime region mappings are guaranteed to + * only span a single PGD entry and that the entry also maps + * other important kernel regions. + */ + BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END)); + BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) != + (EFI_VA_END & PGDIR_MASK)); - memcpy(pgd + pgd_index(PAGE_OFFSET), - init_mm.pgd + pgd_index(PAGE_OFFSET), - sizeof(pgd_t) * num_pgds); + pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET); + pgd_k = pgd_offset_k(PAGE_OFFSET); + + num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET); + memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries); + + /* + * We share all the PUD entries apart from those that map the + * EFI regions. Copy around them. + */ + BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0); + BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0); + + pgd_efi = efi_pgd + pgd_index(EFI_VA_END); + pud_efi = pud_offset(pgd_efi, 0); + + pgd_k = pgd_offset_k(EFI_VA_END); + pud_k = pud_offset(pgd_k, 0); + + num_entries = pud_index(EFI_VA_END); + memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries); + + pud_efi = pud_offset(pgd_efi, EFI_VA_START); + pud_k = pud_offset(pgd_k, EFI_VA_START); + + num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START); + memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries); } int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) @@ -149,8 +220,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) if (efi_enabled(EFI_OLD_MEMMAP)) return 0; - efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd; - pgd = __va(efi_scratch.efi_pgt); + efi_scratch.efi_pgt = (pgd_t *)__pa(efi_pgd); + pgd = efi_pgd; /* * It can happen that the physical address of new_memmap lands in memory @@ -196,16 +267,14 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages) { - pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); - - kernel_unmap_pages_in_pgd(pgd, pa_memmap, num_pages); + kernel_unmap_pages_in_pgd(efi_pgd, pa_memmap, num_pages); } static void __init __map_region(efi_memory_desc_t *md, u64 va) { - pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); unsigned long flags = 0; unsigned long pfn; + pgd_t *pgd = efi_pgd; if (!(md->attribute & EFI_MEMORY_WB)) flags |= _PAGE_PCD; @@ -314,9 +383,7 @@ void __init efi_runtime_mkexec(void) void __init efi_dump_pagetable(void) { #ifdef CONFIG_EFI_PGT_DUMP - pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); - - ptdump_walk_pgd_level(NULL, pgd); + ptdump_walk_pgd_level(NULL, efi_pgd); #endif } From 44ff3af83ee1ebca7b1fc35c8471880628d31dbb Mon Sep 17 00:00:00 2001 From: Adam Ford Date: Thu, 17 Aug 2017 06:01:28 -0500 Subject: [PATCH 290/312] ARM: dts: omap3: logicpd-torpedo-37xx-devkit: Fix MMC1 cd-gpio commit b7ace5ed8867ca54503727988adec6b20af54eeb upstream. Fixes commit 687c27676151 ("ARM: dts: Add minimal support for LogicPD Torpedo DM3730 devkit") This patch corrects an issue where the cd-gpios was improperly setup using IRQ_TYPE_LEVEL_LOW instead of GPIO_ACTIVE_LOW. Signed-off-by: Adam Ford Signed-off-by: Tony Lindgren Signed-off-by: Greg Kroah-Hartman --- arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts index 5b0430041ec6..fec92cd36ae3 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts +++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts @@ -88,7 +88,7 @@ interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>; pinctrl-names = "default"; pinctrl-0 = <&mmc1_pins &mmc1_cd>; - cd-gpios = <&gpio4 31 IRQ_TYPE_LEVEL_LOW>; /* gpio127 */ + cd-gpios = <&gpio4 31 GPIO_ACTIVE_LOW>; /* gpio127 */ vmmc-supply = <&vmmc1>; bus-width = <4>; cap-power-off-card; From e85c6907b2b46b516950a801ea0648fec8f59ba9 Mon Sep 17 00:00:00 2001 From: Sai Praneeth Date: Wed, 9 Dec 2015 15:41:08 -0800 Subject: [PATCH 291/312] x86/efi-bgrt: Fix kernel panic when mapping BGRT data commit 50a0cb565246f20d59cdb161778531e4b19d35ac upstream. Starting with this commit 35eb8b81edd4 ("x86/efi: Build our own page table structures") efi regions have a separate page directory called "efi_pgd". In order to access any efi region we have to first shift %cr3 to this page table. In the bgrt code we are trying to copy bgrt_header and image, but these regions fall under "EFI_BOOT_SERVICES_DATA" and to access these regions we have to shift %cr3 to efi_pgd and not doing so will cause page fault as shown below. [ 0.251599] Last level dTLB entries: 4KB 64, 2MB 0, 4MB 0, 1GB 4 [ 0.259126] Freeing SMP alternatives memory: 32K (ffffffff8230e000 - ffffffff82316000) [ 0.271803] BUG: unable to handle kernel paging request at fffffffefce35002 [ 0.279740] IP: [] efi_bgrt_init+0x144/0x1fd [ 0.286383] PGD 300f067 PUD 0 [ 0.289879] Oops: 0000 [#1] SMP [ 0.293566] Modules linked in: [ 0.297039] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.4.0-rc1-eywa-eywa-built-in-47041+ #2 [ 0.306619] Hardware name: Intel Corporation Skylake Client platform/Skylake Y LPDDR3 RVP3, BIOS SKLSE2R1.R00.B104.B01.1511110114 11/11/2015 [ 0.320925] task: ffffffff820134c0 ti: ffffffff82000000 task.ti: ffffffff82000000 [ 0.329420] RIP: 0010:[] [] efi_bgrt_init+0x144/0x1fd [ 0.338821] RSP: 0000:ffffffff82003f18 EFLAGS: 00010246 [ 0.344852] RAX: fffffffefce35000 RBX: fffffffefce35000 RCX: fffffffefce2b000 [ 0.352952] RDX: 000000008a82b000 RSI: ffffffff8235bb80 RDI: 000000008a835000 [ 0.361050] RBP: ffffffff82003f30 R08: 000000008a865000 R09: ffffffffff202850 [ 0.369149] R10: ffffffff811ad62f R11: 0000000000000000 R12: 0000000000000000 [ 0.377248] R13: ffff88016dbaea40 R14: ffffffff822622c0 R15: ffffffff82003fb0 [ 0.385348] FS: 0000000000000000(0000) GS:ffff88016d800000(0000) knlGS:0000000000000000 [ 0.394533] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 0.401054] CR2: fffffffefce35002 CR3: 000000000300c000 CR4: 00000000003406f0 [ 0.409153] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 0.417252] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 0.425350] Stack: [ 0.427638] ffffffffffffffff ffffffff82256900 ffff88016dbaea40 ffffffff82003f40 [ 0.436086] ffffffff821bbce0 ffffffff82003f88 ffffffff8219c0c2 0000000000000000 [ 0.444533] ffffffff8219ba4a ffffffff822622c0 0000000000083000 00000000ffffffff [ 0.452978] Call Trace: [ 0.455763] [] efi_late_init+0x9/0xb [ 0.461697] [] start_kernel+0x463/0x47f [ 0.467928] [] ? set_init_arg+0x55/0x55 [ 0.474159] [] ? early_idt_handler_array+0x120/0x120 [ 0.481669] [] x86_64_start_reservations+0x2a/0x2c [ 0.488982] [] x86_64_start_kernel+0x13d/0x14c [ 0.495897] Code: 00 41 b4 01 48 8b 78 28 e8 09 36 01 00 48 85 c0 48 89 c3 75 13 48 c7 c7 f8 ac d3 81 31 c0 e8 d7 3b fb fe e9 b5 00 00 00 45 84 e4 <44> 8b 6b 02 74 0d be 06 00 00 00 48 89 df e8 ae 34 0$ [ 0.518151] RIP [] efi_bgrt_init+0x144/0x1fd [ 0.524888] RSP [ 0.528851] CR2: fffffffefce35002 [ 0.532615] ---[ end trace 7b06521e6ebf2aea ]--- [ 0.537852] Kernel panic - not syncing: Attempted to kill the idle task! As said above one way to fix this bug is to shift %cr3 to efi_pgd but we are not doing that way because it leaks inner details of how we switch to EFI page tables into a new call site and it also adds duplicate code. Instead, we remove the call to efi_lookup_mapped_addr() and always perform early_mem*() instead of early_io*() because we want to remap RAM regions and not I/O regions. We also delete efi_lookup_mapped_addr() because we are no longer using it. Signed-off-by: Sai Praneeth Prakhya Reported-by: Wendy Wang Cc: Borislav Petkov Cc: Josh Triplett Cc: Ricardo Neri Cc: Ravi Shankar Signed-off-by: Matt Fleming Cc: "Ghannam, Yazen" Signed-off-by: Greg Kroah-Hartman --- arch/x86/platform/efi/efi-bgrt.c | 39 ++++++++++++-------------------- drivers/firmware/efi/efi.c | 32 -------------------------- 2 files changed, 14 insertions(+), 57 deletions(-) diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c index ea48449b2e63..81c5bcd06b26 100644 --- a/arch/x86/platform/efi/efi-bgrt.c +++ b/arch/x86/platform/efi/efi-bgrt.c @@ -28,8 +28,7 @@ struct bmp_header { void __init efi_bgrt_init(void) { acpi_status status; - void __iomem *image; - bool ioremapped = false; + void *image; struct bmp_header bmp_header; if (acpi_disabled) @@ -70,20 +69,14 @@ void __init efi_bgrt_init(void) return; } - image = efi_lookup_mapped_addr(bgrt_tab->image_address); + image = early_memremap(bgrt_tab->image_address, sizeof(bmp_header)); if (!image) { - image = early_ioremap(bgrt_tab->image_address, - sizeof(bmp_header)); - ioremapped = true; - if (!image) { - pr_err("Ignoring BGRT: failed to map image header memory\n"); - return; - } + pr_err("Ignoring BGRT: failed to map image header memory\n"); + return; } - memcpy_fromio(&bmp_header, image, sizeof(bmp_header)); - if (ioremapped) - early_iounmap(image, sizeof(bmp_header)); + memcpy(&bmp_header, image, sizeof(bmp_header)); + early_memunmap(image, sizeof(bmp_header)); bgrt_image_size = bmp_header.size; bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL | __GFP_NOWARN); @@ -93,18 +86,14 @@ void __init efi_bgrt_init(void) return; } - if (ioremapped) { - image = early_ioremap(bgrt_tab->image_address, - bmp_header.size); - if (!image) { - pr_err("Ignoring BGRT: failed to map image memory\n"); - kfree(bgrt_image); - bgrt_image = NULL; - return; - } + image = early_memremap(bgrt_tab->image_address, bmp_header.size); + if (!image) { + pr_err("Ignoring BGRT: failed to map image memory\n"); + kfree(bgrt_image); + bgrt_image = NULL; + return; } - memcpy_fromio(bgrt_image, image, bgrt_image_size); - if (ioremapped) - early_iounmap(image, bmp_header.size); + memcpy(bgrt_image, image, bgrt_image_size); + early_memunmap(image, bmp_header.size); } diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 3b52677f459a..0cd8f039602e 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -325,38 +325,6 @@ u64 __init efi_mem_desc_end(efi_memory_desc_t *md) return end; } -/* - * We can't ioremap data in EFI boot services RAM, because we've already mapped - * it as RAM. So, look it up in the existing EFI memory map instead. Only - * callable after efi_enter_virtual_mode and before efi_free_boot_services. - */ -void __iomem *efi_lookup_mapped_addr(u64 phys_addr) -{ - struct efi_memory_map *map; - void *p; - map = efi.memmap; - if (!map) - return NULL; - if (WARN_ON(!map->map)) - return NULL; - for (p = map->map; p < map->map_end; p += map->desc_size) { - efi_memory_desc_t *md = p; - u64 size = md->num_pages << EFI_PAGE_SHIFT; - u64 end = md->phys_addr + size; - if (!(md->attribute & EFI_MEMORY_RUNTIME) && - md->type != EFI_BOOT_SERVICES_CODE && - md->type != EFI_BOOT_SERVICES_DATA) - continue; - if (!md->virt_addr) - continue; - if (phys_addr >= md->phys_addr && phys_addr < end) { - phys_addr += md->virt_addr - md->phys_addr; - return (__force void __iomem *)(unsigned long)phys_addr; - } - } - return NULL; -} - static __initdata efi_config_table_type_t common_tables[] = { {ACPI_20_TABLE_GUID, "ACPI 2.0", &efi.acpi20}, {ACPI_TABLE_GUID, "ACPI", &efi.acpi}, From f97fc9ab1ce23a8666cfe460264a6a45b7e0b335 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Mon, 21 Dec 2015 14:12:52 +0000 Subject: [PATCH 292/312] x86/efi-bgrt: Replace early_memremap() with memremap() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit e2c90dd7e11e3025b46719a79fb4bb1e7a5cef9f upstream. Môshe reported the following warning triggered on his machine since commit 50a0cb565246 ("x86/efi-bgrt: Fix kernel panic when mapping BGRT data"), [ 0.026936] ------------[ cut here ]------------ [ 0.026941] WARNING: CPU: 0 PID: 0 at mm/early_ioremap.c:137 __early_ioremap+0x102/0x1bb() [ 0.026941] Modules linked in: [ 0.026944] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.4.0-rc1 #2 [ 0.026945] Hardware name: Dell Inc. XPS 13 9343/09K8G1, BIOS A05 07/14/2015 [ 0.026946] 0000000000000000 900f03d5a116524d ffffffff81c03e60 ffffffff813a3fff [ 0.026948] 0000000000000000 ffffffff81c03e98 ffffffff810a0852 00000000d7b76000 [ 0.026949] 0000000000000000 0000000000000001 0000000000000001 000000000000017c [ 0.026951] Call Trace: [ 0.026955] [] dump_stack+0x44/0x55 [ 0.026958] [] warn_slowpath_common+0x82/0xc0 [ 0.026959] [] warn_slowpath_null+0x1a/0x20 [ 0.026961] [] __early_ioremap+0x102/0x1bb [ 0.026962] [] early_memremap+0x13/0x15 [ 0.026964] [] efi_bgrt_init+0x162/0x1ad [ 0.026966] [] efi_late_init+0x9/0xb [ 0.026968] [] start_kernel+0x46f/0x49f [ 0.026970] [] ? early_idt_handler_array+0x120/0x120 [ 0.026972] [] x86_64_start_reservations+0x2a/0x2c [ 0.026974] [] x86_64_start_kernel+0x14a/0x16d [ 0.026977] ---[ end trace f9b3812eb8e24c58 ]--- [ 0.026978] efi_bgrt: Ignoring BGRT: failed to map image memory early_memremap() has an upper limit on the size of mapping it can handle which is ~200KB. Clearly the BGRT image on Môshe's machine is much larger than that. There's actually no reason to restrict ourselves to using the early_* version of memremap() - the ACPI BGRT driver is invoked late enough in boot that we can use the standard version, with the benefit that the late version allows mappings of arbitrary size. Reported-by: Môshe van der Sterre Tested-by: Môshe van der Sterre Signed-off-by: Matt Fleming Cc: Josh Triplett Cc: Sai Praneeth Prakhya Cc: Borislav Petkov Link: http://lkml.kernel.org/r/1450707172-12561-1-git-send-email-matt@codeblueprint.co.uk Signed-off-by: Thomas Gleixner Cc: "Ghannam, Yazen" Signed-off-by: Greg Kroah-Hartman --- arch/x86/platform/efi/efi-bgrt.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c index 81c5bcd06b26..64fbc7e33226 100644 --- a/arch/x86/platform/efi/efi-bgrt.c +++ b/arch/x86/platform/efi/efi-bgrt.c @@ -69,14 +69,14 @@ void __init efi_bgrt_init(void) return; } - image = early_memremap(bgrt_tab->image_address, sizeof(bmp_header)); + image = memremap(bgrt_tab->image_address, sizeof(bmp_header), MEMREMAP_WB); if (!image) { pr_err("Ignoring BGRT: failed to map image header memory\n"); return; } memcpy(&bmp_header, image, sizeof(bmp_header)); - early_memunmap(image, sizeof(bmp_header)); + memunmap(image); bgrt_image_size = bmp_header.size; bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL | __GFP_NOWARN); @@ -86,7 +86,7 @@ void __init efi_bgrt_init(void) return; } - image = early_memremap(bgrt_tab->image_address, bmp_header.size); + image = memremap(bgrt_tab->image_address, bmp_header.size, MEMREMAP_WB); if (!image) { pr_err("Ignoring BGRT: failed to map image memory\n"); kfree(bgrt_image); @@ -95,5 +95,5 @@ void __init efi_bgrt_init(void) } memcpy(bgrt_image, image, bgrt_image_size); - early_memunmap(image, bmp_header.size); + memunmap(image); } From 2b7ef6bdd28610f2907cd766362cf95d0d023801 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Mon, 27 Nov 2017 06:21:25 +0300 Subject: [PATCH 293/312] mm, thp: Do not make page table dirty unconditionally in touch_p[mu]d() commit a8f97366452ed491d13cf1e44241bc0b5740b1f0 upstream. Currently, we unconditionally make page table dirty in touch_pmd(). It may result in false-positive can_follow_write_pmd(). We may avoid the situation, if we would only make the page table entry dirty if caller asks for write access -- FOLL_WRITE. The patch also changes touch_pud() in the same way. Signed-off-by: Kirill A. Shutemov Cc: Michal Hocko Cc: Hugh Dickins Signed-off-by: Linus Torvalds [Salvatore Bonaccorso: backport for 3.16: - Adjust context - Drop specific part for PUD-sized transparent hugepages. Support for PUD-sized transparent hugepages was added in v4.11-rc1 ] Signed-off-by: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- mm/huge_memory.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 6c6f5ccfcda1..8f3769ec8575 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1304,17 +1304,11 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, VM_BUG_ON_PAGE(!PageHead(page), page); if (flags & FOLL_TOUCH) { pmd_t _pmd; - /* - * We should set the dirty bit only for FOLL_WRITE but - * for now the dirty bit in the pmd is meaningless. - * And if the dirty bit will become meaningful and - * we'll only set it with FOLL_WRITE, an atomic - * set_bit will be required on the pmd to set the - * young bit, instead of the current set_pmd_at. - */ - _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); + _pmd = pmd_mkyoung(*pmd); + if (flags & FOLL_WRITE) + _pmd = pmd_mkdirty(_pmd); if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, - pmd, _pmd, 1)) + pmd, _pmd, flags & FOLL_WRITE)) update_mmu_cache_pmd(vma, addr, pmd); } if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { From 0d05a5593f6309cb0df0df474a807a3220855703 Mon Sep 17 00:00:00 2001 From: chenjie Date: Wed, 29 Nov 2017 16:10:54 -0800 Subject: [PATCH 294/312] mm/madvise.c: fix madvise() infinite loop under special circumstances commit 6ea8d958a2c95a1d514015d4e29ba21a8c0a1a91 upstream. MADVISE_WILLNEED has always been a noop for DAX (formerly XIP) mappings. Unfortunately madvise_willneed() doesn't communicate this information properly to the generic madvise syscall implementation. The calling convention is quite subtle there. madvise_vma() is supposed to either return an error or update &prev otherwise the main loop will never advance to the next vma and it will keep looping for ever without a way to get out of the kernel. It seems this has been broken since introduction. Nobody has noticed because nobody seems to be using MADVISE_WILLNEED on these DAX mappings. [mhocko@suse.com: rewrite changelog] Link: http://lkml.kernel.org/r/20171127115318.911-1-guoxuenan@huawei.com Fixes: fe77ba6f4f97 ("[PATCH] xip: madvice/fadvice: execute in place") Signed-off-by: chenjie Signed-off-by: guoxuenan Acked-by: Michal Hocko Cc: Minchan Kim Cc: zhangyi (F) Cc: Miao Xie Cc: Mike Rapoport Cc: Shaohua Li Cc: Andrea Arcangeli Cc: Mel Gorman Cc: Kirill A. Shutemov Cc: David Rientjes Cc: Anshuman Khandual Cc: Rik van Riel Cc: Carsten Otte Cc: Dan Williams Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/madvise.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/mm/madvise.c b/mm/madvise.c index c889fcbb530e..2a0f9a4504f1 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -223,15 +223,14 @@ static long madvise_willneed(struct vm_area_struct *vma, { struct file *file = vma->vm_file; + *prev = vma; #ifdef CONFIG_SWAP if (!file) { - *prev = vma; force_swapin_readahead(vma, start, end); return 0; } if (shmem_mapping(file->f_mapping)) { - *prev = vma; force_shm_swapin_readahead(vma, start, end, file->f_mapping); return 0; @@ -246,7 +245,6 @@ static long madvise_willneed(struct vm_area_struct *vma, return 0; } - *prev = vma; start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; if (end > vma->vm_end) end = vma->vm_end; From 591ddc92d756b49a95060b0203fe329a31f18c3e Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 17 Nov 2017 14:50:46 -0500 Subject: [PATCH 295/312] btrfs: clear space cache inode generation always commit 8e138e0d92c6c9d3d481674fb14e3439b495be37 upstream. We discovered a box that had double allocations, and suspected the space cache may be to blame. While auditing the write out path I noticed that if we've already setup the space cache we will just carry on. This means that any error we hit after cache_save_setup before we go to actually write the cache out we won't reset the inode generation, so whatever was already written will be considered correct, except it'll be stale. Fix this by _always_ resetting the generation on the block group inode, this way we only ever have valid or invalid cache. With this patch I was no longer able to reproduce cache corruption with dm-log-writes and my bpf error injection tool. Signed-off-by: Josef Bacik Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/extent-tree.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index c36a03fa7678..260f94b019c9 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3361,13 +3361,6 @@ again: goto again; } - /* We've already setup this transaction, go ahead and exit */ - if (block_group->cache_generation == trans->transid && - i_size_read(inode)) { - dcs = BTRFS_DC_SETUP; - goto out_put; - } - /* * We want to set the generation to 0, that way if anything goes wrong * from here on out we know not to trust this cache when we load up next @@ -3391,6 +3384,13 @@ again: } WARN_ON(ret); + /* We've already setup this transaction, go ahead and exit */ + if (block_group->cache_generation == trans->transid && + i_size_read(inode)) { + dcs = BTRFS_DC_SETUP; + goto out_put; + } + if (i_size_read(inode) > 0) { ret = btrfs_check_trunc_cache_free_space(root, &root->fs_info->global_block_rsv); From ab29b6b818aa1cc8c36fc510464c732d392f5df2 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Sun, 5 Nov 2017 16:11:30 +0200 Subject: [PATCH 296/312] KVM: x86: pvclock: Handle first-time write to pvclock-page contains random junk MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 51c4b8bba674cfd2260d173602c4dac08e4c3a99 upstream. When guest passes KVM it's pvclock-page GPA via WRMSR to MSR_KVM_SYSTEM_TIME / MSR_KVM_SYSTEM_TIME_NEW, KVM don't initialize pvclock-page to some start-values. It just requests a clock-update which will happen before entering to guest. The clock-update logic will call kvm_setup_pvclock_page() to update the pvclock-page with info. However, kvm_setup_pvclock_page() *wrongly* assumes that the version-field is initialized to an even number. This is wrong because at first-time write, field could be any-value. Fix simply makes sure that if first-time version-field is odd, increment it once more to make it even and only then start standard logic. This follows same logic as done in other pvclock shared-pages (See kvm_write_wall_clock() and record_steal_time()). Signed-off-by: Liran Alon Reviewed-by: Nikita Leshenko Reviewed-by: Konrad Rzeszutek Wilk Signed-off-by: Konrad Rzeszutek Wilk Reviewed-by: Paolo Bonzini Signed-off-by: Radim Krčmář Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/x86.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3ffd5900da5b..6a624989b13e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1812,6 +1812,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) */ BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0); + if (guest_hv_clock.version & 1) + ++guest_hv_clock.version; /* first time write, random junk */ + vcpu->hv_clock.version = guest_hv_clock.version + 1; kvm_write_guest_cached(v->kvm, &vcpu->pv_time, &vcpu->hv_clock, From 1e9e6bdccb80d9dca2a9dfd6cf2d34ce8f226d47 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Sun, 5 Nov 2017 16:56:32 +0200 Subject: [PATCH 297/312] KVM: x86: Exit to user-mode on #UD intercept when emulator requires MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 61cb57c9ed631c95b54f8e9090c89d18b3695b3c upstream. Instruction emulation after trapping a #UD exception can result in an MMIO access, for example when emulating a MOVBE on a processor that doesn't support the instruction. In this case, the #UD vmexit handler must exit to user mode, but there wasn't any code to do so. Add it for both VMX and SVM. Signed-off-by: Liran Alon Reviewed-by: Nikita Leshenko Reviewed-by: Konrad Rzeszutek Wilk Signed-off-by: Konrad Rzeszutek Wilk Reviewed-by: Wanpeng Li Reviewed-by: Paolo Bonzini Signed-off-by: Radim Krčmář Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/svm.c | 2 ++ arch/x86/kvm/vmx.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 4e1b254c3695..4b1152e57340 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1696,6 +1696,8 @@ static int ud_interception(struct vcpu_svm *svm) int er; er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD); + if (er == EMULATE_USER_EXIT) + return 0; if (er != EMULATE_DONE) kvm_queue_exception(&svm->vcpu, UD_VECTOR); return 1; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 67ba0d8f87c7..253a8c8207bb 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -5267,6 +5267,8 @@ static int handle_exception(struct kvm_vcpu *vcpu) return 1; } er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); + if (er == EMULATE_USER_EXIT) + return 0; if (er != EMULATE_DONE) kvm_queue_exception(vcpu, UD_VECTOR); return 1; From a6493ad6fc893431705637d7efa3035235d9e135 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 10 Nov 2017 10:49:38 +0100 Subject: [PATCH 298/312] KVM: x86: inject exceptions produced by x86_decode_insn MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 6ea6e84309ca7e0e850b3083e6b09344ee15c290 upstream. Sometimes, a processor might execute an instruction while another processor is updating the page tables for that instruction's code page, but before the TLB shootdown completes. The interesting case happens if the page is in the TLB. In general, the processor will succeed in executing the instruction and nothing bad happens. However, what if the instruction is an MMIO access? If *that* happens, KVM invokes the emulator, and the emulator gets the updated page tables. If the update side had marked the code page as non present, the page table walk then will fail and so will x86_decode_insn. Unfortunately, even though kvm_fetch_guest_virt is correctly returning X86EMUL_PROPAGATE_FAULT, x86_decode_insn's caller treats the failure as a fatal error if the instruction cannot simply be reexecuted (as is the case for MMIO). And this in fact happened sometimes when rebooting Windows 2012r2 guests. Just checking ctxt->have_exception and injecting the exception if true is enough to fix the case. Thanks to Eduardo Habkost for helping in the debugging of this issue. Reported-by: Yanan Fu Cc: Eduardo Habkost Signed-off-by: Paolo Bonzini Signed-off-by: Radim Krčmář Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/x86.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6a624989b13e..df81717a92f3 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5429,6 +5429,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, emulation_type)) return EMULATE_DONE; + if (ctxt->have_exception && inject_emulated_exception(vcpu)) + return EMULATE_DONE; if (emulation_type & EMULTYPE_SKIP) return EMULATE_FAIL; return handle_emulation_failure(vcpu); From 5c65b739389fbc353fb42d379e9b7379cfe6d3f6 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 21 Nov 2017 15:42:29 +0200 Subject: [PATCH 299/312] mmc: core: Do not leave the block driver in a suspended state commit ebe7dd45cf49e3b49cacbaace17f9f878f21fbea upstream. The block driver must be resumed if the mmc bus fails to suspend the card. Signed-off-by: Adrian Hunter Reviewed-by: Linus Walleij Signed-off-by: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/core/bus.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index 972ff844cf5a..cf7c7bc1e940 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -155,6 +155,9 @@ static int mmc_bus_suspend(struct device *dev) return ret; ret = host->bus_ops->suspend(host); + if (ret) + pm_generic_resume(dev); + return ret; } From 6b4901e0e3495f1ffc846f55f704bd19049579e2 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Fri, 24 Nov 2017 07:47:50 +0100 Subject: [PATCH 300/312] eeprom: at24: check at24_read/write arguments commit d9bcd462daf34aebb8de9ad7f76de0198bb5a0f0 upstream. So far we completely rely on the caller to provide valid arguments. To be on the safe side perform an own sanity check. Signed-off-by: Heiner Kallweit Signed-off-by: Bartosz Golaszewski Signed-off-by: Greg Kroah-Hartman --- drivers/misc/eeprom/at24.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 5d7c0900fa1b..f112c5bc082a 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -257,6 +257,9 @@ static ssize_t at24_read(struct at24_data *at24, if (unlikely(!count)) return count; + if (off + count > at24->chip.byte_len) + return -EINVAL; + /* * Read data from chip, protecting against concurrent updates * from this host, but not from other I2C masters. @@ -311,6 +314,9 @@ static ssize_t at24_eeprom_write(struct at24_data *at24, const char *buf, unsigned long timeout, write_time; unsigned next_page; + if (offset + count > at24->chip.byte_len) + return -EINVAL; + /* Get corresponding I2C address and adjust offset */ client = at24_translate_offset(at24, &offset); From 5a7391b6d8983a2d36b0df15349caef10624e241 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Fri, 24 Nov 2017 15:14:25 -0800 Subject: [PATCH 301/312] bcache: Fix building error on MIPS commit cf33c1ee5254c6a430bc1538232b49c3ea13e613 upstream. This patch try to fix the building error on MIPS. The reason is MIPS has already defined the PTR macro, which conflicts with the PTR macro in include/uapi/linux/bcache.h. [fixed by mlyle: corrected a line-length issue] Signed-off-by: Huacai Chen Reviewed-by: Michael Lyle Signed-off-by: Michael Lyle Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- drivers/md/bcache/alloc.c | 2 +- drivers/md/bcache/extents.c | 2 +- drivers/md/bcache/journal.c | 2 +- include/uapi/linux/bcache.h | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index ea47980949ef..4d46f2ce606f 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -479,7 +479,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, if (b == -1) goto err; - k->ptr[i] = PTR(ca->buckets[b].gen, + k->ptr[i] = MAKE_PTR(ca->buckets[b].gen, bucket_to_sector(c, b), ca->sb.nr_this_dev); diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index 243de0bf15cd..4bf15182c4da 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -584,7 +584,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey return false; for (i = 0; i < KEY_PTRS(l); i++) - if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] || + if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] || PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i)) return false; diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 29eba7219b01..6ed066a0e7c0 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -508,7 +508,7 @@ static void journal_reclaim(struct cache_set *c) continue; ja->cur_idx = next; - k->ptr[n++] = PTR(0, + k->ptr[n++] = MAKE_PTR(0, bucket_to_sector(c, ca->sb.d[ja->cur_idx]), ca->sb.nr_this_dev); } diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h index 22b6ad31c706..8562b1cb776b 100644 --- a/include/uapi/linux/bcache.h +++ b/include/uapi/linux/bcache.h @@ -90,7 +90,7 @@ PTR_FIELD(PTR_GEN, 0, 8) #define PTR_CHECK_DEV ((1 << PTR_DEV_BITS) - 1) -#define PTR(gen, offset, dev) \ +#define MAKE_PTR(gen, offset, dev) \ ((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen) /* Bkey utility code */ From 4d027a8bcc7febabe908d4ca6049a8af21a8fd7c Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 14 Nov 2017 17:19:29 -0500 Subject: [PATCH 302/312] Revert "drm/radeon: dont switch vt on suspend" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 18c437caa5b18a235dd65cec224eab54bebcee65 upstream. Fixes distorted colors on some cards on resume from suspend. This reverts commit b9729b17a414f99c61f4db9ac9f9ed987fa0cbfe. Bug: https://bugs.freedesktop.org/show_bug.cgi?id=98832 Bug: https://bugs.freedesktop.org/show_bug.cgi?id=99163 Bug: https://bugzilla.kernel.org/show_bug.cgi?id=107001 Reviewed-by: Michel Dänzer Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/radeon/radeon_fb.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 26da2f4d7b4f..a2937a693591 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -226,7 +226,6 @@ static int radeonfb_create(struct drm_fb_helper *helper, } info->par = rfbdev; - info->skip_vt_switch = true; ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); if (ret) { From c7716f65721a851cd925ee911f791e1ae92b398d Mon Sep 17 00:00:00 2001 From: Roman Kapl Date: Mon, 30 Oct 2017 11:56:13 +0100 Subject: [PATCH 303/312] drm/radeon: fix atombios on big endian commit 4f626a4ac8f57ddabf06d03870adab91e463217f upstream. The function for byteswapping the data send to/from atombios was buggy for num_bytes not divisible by four. The function must be aware of the fact that after byte-swapping the u32 units, valid bytes might end up after the num_bytes boundary. This patch was tested on kernel 3.12 and allowed us to sucesfully use DisplayPort on and Radeon SI card. Namely it fixed the link training and EDID readout. The function is patched both in radeon and amd drivers, since the functions and the fixes are identical. Signed-off-by: Roman Kapl Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | 38 ++++++++++---------- drivers/gpu/drm/radeon/atombios_dp.c | 38 ++++++++++---------- 2 files changed, 36 insertions(+), 40 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index f4cae5357e40..3e90ddcbb24a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1575,34 +1575,32 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev) WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]); } -/* Atom needs data in little endian format - * so swap as appropriate when copying data to - * or from atom. Note that atom operates on - * dw units. +/* Atom needs data in little endian format so swap as appropriate when copying + * data to or from atom. Note that atom operates on dw units. + * + * Use to_le=true when sending data to atom and provide at least + * ALIGN(num_bytes,4) bytes in the dst buffer. + * + * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4) + * byes in the src buffer. */ void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) { #ifdef __BIG_ENDIAN - u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */ - u32 *dst32, *src32; + u32 src_tmp[5], dst_tmp[5]; int i; + u8 align_num_bytes = ALIGN(num_bytes, 4); - memcpy(src_tmp, src, num_bytes); - src32 = (u32 *)src_tmp; - dst32 = (u32 *)dst_tmp; if (to_le) { - for (i = 0; i < ((num_bytes + 3) / 4); i++) - dst32[i] = cpu_to_le32(src32[i]); - memcpy(dst, dst_tmp, num_bytes); + memcpy(src_tmp, src, num_bytes); + for (i = 0; i < align_num_bytes / 4; i++) + dst_tmp[i] = cpu_to_le32(src_tmp[i]); + memcpy(dst, dst_tmp, align_num_bytes); } else { - u8 dws = num_bytes & ~3; - for (i = 0; i < ((num_bytes + 3) / 4); i++) - dst32[i] = le32_to_cpu(src32[i]); - memcpy(dst, dst_tmp, dws); - if (num_bytes % 4) { - for (i = 0; i < (num_bytes % 4); i++) - dst[dws+i] = dst_tmp[dws+i]; - } + memcpy(src_tmp, src, align_num_bytes); + for (i = 0; i < align_num_bytes / 4; i++) + dst_tmp[i] = le32_to_cpu(src_tmp[i]); + memcpy(dst, dst_tmp, num_bytes); } #else memcpy(dst, src, num_bytes); diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index b5760851195c..0c6216a6ee9e 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -45,34 +45,32 @@ static char *pre_emph_names[] = { /***** radeon AUX functions *****/ -/* Atom needs data in little endian format - * so swap as appropriate when copying data to - * or from atom. Note that atom operates on - * dw units. +/* Atom needs data in little endian format so swap as appropriate when copying + * data to or from atom. Note that atom operates on dw units. + * + * Use to_le=true when sending data to atom and provide at least + * ALIGN(num_bytes,4) bytes in the dst buffer. + * + * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4) + * byes in the src buffer. */ void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) { #ifdef __BIG_ENDIAN - u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */ - u32 *dst32, *src32; + u32 src_tmp[5], dst_tmp[5]; int i; + u8 align_num_bytes = ALIGN(num_bytes, 4); - memcpy(src_tmp, src, num_bytes); - src32 = (u32 *)src_tmp; - dst32 = (u32 *)dst_tmp; if (to_le) { - for (i = 0; i < ((num_bytes + 3) / 4); i++) - dst32[i] = cpu_to_le32(src32[i]); - memcpy(dst, dst_tmp, num_bytes); + memcpy(src_tmp, src, num_bytes); + for (i = 0; i < align_num_bytes / 4; i++) + dst_tmp[i] = cpu_to_le32(src_tmp[i]); + memcpy(dst, dst_tmp, align_num_bytes); } else { - u8 dws = num_bytes & ~3; - for (i = 0; i < ((num_bytes + 3) / 4); i++) - dst32[i] = le32_to_cpu(src32[i]); - memcpy(dst, dst_tmp, dws); - if (num_bytes % 4) { - for (i = 0; i < (num_bytes % 4); i++) - dst[dws+i] = dst_tmp[dws+i]; - } + memcpy(src_tmp, src, align_num_bytes); + for (i = 0; i < align_num_bytes / 4; i++) + dst_tmp[i] = le32_to_cpu(src_tmp[i]); + memcpy(dst, dst_tmp, num_bytes); } #else memcpy(dst, src, num_bytes); From a155a9568d9abf50fe4913c4bf7b46a8281d3dc3 Mon Sep 17 00:00:00 2001 From: Jonathan Liu Date: Mon, 7 Aug 2017 21:55:45 +1000 Subject: [PATCH 304/312] drm/panel: simple: Add missing panel_simple_unprepare() calls commit f3621a8eb59a913612c8e6e37d81f16b649f8b6c upstream. During panel removal or system shutdown panel_simple_disable() is called which disables the panel backlight but the panel is still powered due to missing calls to panel_simple_unprepare(). Fixes: d02fd93e2cd8 ("drm/panel: simple - Disable panel on shutdown") Signed-off-by: Jonathan Liu Signed-off-by: Thierry Reding Link: https://patchwork.freedesktop.org/patch/msgid/20170807115545.27747-1-net147@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/panel/panel-simple.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index f97b73ec4713..f418c002d323 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -352,6 +352,7 @@ static int panel_simple_remove(struct device *dev) drm_panel_remove(&panel->base); panel_simple_disable(&panel->base); + panel_simple_unprepare(&panel->base); if (panel->ddc) put_device(&panel->ddc->dev); @@ -367,6 +368,7 @@ static void panel_simple_shutdown(struct device *dev) struct panel_simple *panel = dev_get_drvdata(dev); panel_simple_disable(&panel->base); + panel_simple_unprepare(&panel->base); } static const struct drm_display_mode ampire_am800480r3tmqwa1h_mode = { From c8ea49b690eef5c180369a8d132c5e50a74b3acf Mon Sep 17 00:00:00 2001 From: Brent Taylor Date: Mon, 30 Oct 2017 22:32:45 -0500 Subject: [PATCH 305/312] mtd: nand: Fix writing mtdoops to nand flash. commit 30863e38ebeb500a31cecee8096fb5002677dd9b upstream. When mtdoops calls mtd_panic_write(), it eventually calls panic_nand_write() in nand_base.c. In order to properly wait for the nand chip to be ready in panic_nand_wait(), the chip must first be selected. When using the atmel nand flash controller, a panic would occur due to a NULL pointer exception. Fixes: 2af7c6539931 ("mtd: Add panic_write for NAND flashes") Cc: Signed-off-by: Brent Taylor Signed-off-by: Boris Brezillon Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/nand/nand_base.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 54ab48827258..7ba109e8cf88 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -2663,15 +2663,18 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const uint8_t *buf) { struct nand_chip *chip = mtd->priv; + int chipnr = (int)(to >> chip->chip_shift); struct mtd_oob_ops ops; int ret; - /* Wait for the device to get ready */ - panic_nand_wait(mtd, chip, 400); - /* Grab the device */ panic_nand_get_device(chip, mtd, FL_WRITING); + chip->select_chip(mtd, chipnr); + + /* Wait for the device to get ready */ + panic_nand_wait(mtd, chip, 400); + memset(&ops, 0, sizeof(ops)); ops.len = len; ops.datbuf = (uint8_t *)buf; From a61474fa8749f67ef18aa2104b48b0b56ea654b7 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 25 Aug 2017 17:34:41 +1000 Subject: [PATCH 306/312] NFS: revalidate "." etc correctly on "open". commit b688741cb06695312f18b730653d6611e1bad28d upstream. For correct close-to-open semantics, NFS must validate the change attribute of a directory (or file) on open. Since commit ecf3d1f1aa74 ("vfs: kill FS_REVAL_DOT by adding a d_weak_revalidate dentry op"), open() of "." or a path ending ".." is not revalidated reliably (except when that direct is a mount point). Prior to that commit, "." was revalidated using nfs_lookup_revalidate() which checks the LOOKUP_OPEN flag and forces revalidation if the flag is set. Since that commit, nfs_weak_revalidate() is used for NFSv3 (which ignores the flags) and nothing is used for NFSv4. This is fixed by using nfs_lookup_verify_inode() in nfs_weak_revalidate(). This does the revalidation exactly when needed. Also, add a definition of .d_weak_revalidate for NFSv4. The incorrect behavior is easily demonstrated by running "echo *" in some non-mountpoint NFS directory while watching network traffic. Without this patch, "echo *" sometimes doesn't produce any traffic. With the patch it always does. Fixes: ecf3d1f1aa74 ("vfs: kill FS_REVAL_DOT by adding a d_weak_revalidate dentry op") cc: stable@vger.kernel.org (3.9+) Signed-off-by: NeilBrown Signed-off-by: Anna Schumaker Signed-off-by: Greg Kroah-Hartman --- fs/nfs/dir.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 348e0a05bd18..44e09483d2cd 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1260,7 +1260,7 @@ static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags) return 0; } - error = nfs_revalidate_inode(NFS_SERVER(inode), inode); + error = nfs_lookup_verify_inode(inode, flags); dfprintk(LOOKUPCACHE, "NFS: %s: inode %lu is %s\n", __func__, inode->i_ino, error ? "invalid" : "valid"); return !error; @@ -1420,6 +1420,7 @@ static int nfs4_lookup_revalidate(struct dentry *, unsigned int); const struct dentry_operations nfs4_dentry_operations = { .d_revalidate = nfs4_lookup_revalidate, + .d_weak_revalidate = nfs_weak_revalidate, .d_delete = nfs_dentry_delete, .d_iput = nfs_dentry_iput, .d_automount = nfs_d_automount, From e6bcff7b6aa1c0fb30234864821e8169eeb8054d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 23 Nov 2017 21:41:56 +0200 Subject: [PATCH 307/312] drm/i915: Don't try indexed reads to alternate slave addresses MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit ae5c631e605a452a5a0e73205a92810c01ed954b upstream. We can only specify the one slave address to indexed reads/writes. Make sure the messages we check are destined to the same slave address before deciding to do an indexed transfer. Cc: Daniel Kurtz Cc: Chris Wilson Cc: Daniel Vetter Cc: Sean Paul Fixes: 56f9eac05489 ("drm/i915/intel_i2c: use INDEX cycles for i2c read transactions") Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20171123194157.25367-2-ville.syrjala@linux.intel.com Reviewed-by: Chris Wilson (cherry picked from commit c4deb62d7821672265b87952bcd1c808f3bf3e8f) Signed-off-by: Joonas Lahtinen Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/intel_i2c.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index f3bee54c414f..ce625496ce01 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -440,6 +440,7 @@ static bool gmbus_is_index_read(struct i2c_msg *msgs, int i, int num) { return (i + 1 < num && + msgs[i].addr == msgs[i + 1].addr && !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 && (msgs[i + 1].flags & I2C_M_RD)); } From 68e6cd9a0547d40d921baff918c111db96d96d76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 23 Nov 2017 21:41:57 +0200 Subject: [PATCH 308/312] drm/i915: Prevent zero length "index" write MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 56350fb8978bbf4aafe08f21234e161dd128b417 upstream. The hardware always writes one or two bytes in the index portion of an indexed transfer. Make sure the message we send as the index doesn't have a zero length. Cc: Daniel Kurtz Cc: Chris Wilson Cc: Daniel Vetter Cc: Sean Paul Fixes: 56f9eac05489 ("drm/i915/intel_i2c: use INDEX cycles for i2c read transactions") Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20171123194157.25367-3-ville.syrjala@linux.intel.com Reviewed-by: Chris Wilson (cherry picked from commit bb9e0d4bca50f429152e74a459160b41f3d60fb2) Signed-off-by: Joonas Lahtinen Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/intel_i2c.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index ce625496ce01..cb4313c68f71 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -441,7 +441,8 @@ gmbus_is_index_read(struct i2c_msg *msgs, int i, int num) { return (i + 1 < num && msgs[i].addr == msgs[i + 1].addr && - !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 && + !(msgs[i].flags & I2C_M_RD) && + (msgs[i].len == 1 || msgs[i].len == 2) && (msgs[i + 1].flags & I2C_M_RD)); } From d7f5f10f5159fd37dff9d566c0e6a9a61f2c8a66 Mon Sep 17 00:00:00 2001 From: Oleg Drokin Date: Tue, 14 Jun 2016 23:28:06 -0400 Subject: [PATCH 309/312] nfsd: Make init_open_stateid() a bit more whole commit 8c7245abda877d4689b3371db8ae2a4400d7d9ce upstream. Move the state selection logic inside from the caller, always making it return correct stp to use. Signed-off-by: J . Bruce Fields Signed-off-by: Oleg Drokin Signed-off-by: J. Bruce Fields Signed-off-by: Greg Kroah-Hartman --- fs/nfsd/nfs4state.c | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 421935f3d909..76caee97d2cd 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -3420,13 +3420,14 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open, } static struct nfs4_ol_stateid * -init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, - struct nfsd4_open *open) +init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open) { struct nfs4_openowner *oo = open->op_openowner; struct nfs4_ol_stateid *retstp = NULL; + struct nfs4_ol_stateid *stp; + stp = open->op_stp; /* We are moving these outside of the spinlocks to avoid the warnings */ mutex_init(&stp->st_mutex); mutex_lock(&stp->st_mutex); @@ -3437,6 +3438,8 @@ init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, retstp = nfsd4_find_existing_open(fp, open); if (retstp) goto out_unlock; + + open->op_stp = NULL; atomic_inc(&stp->st_stid.sc_count); stp->st_stid.sc_type = NFS4_OPEN_STID; INIT_LIST_HEAD(&stp->st_locks); @@ -3454,10 +3457,11 @@ out_unlock: spin_unlock(&oo->oo_owner.so_client->cl_lock); if (retstp) { mutex_lock(&retstp->st_mutex); - /* Not that we need to, just for neatness */ + /* To keep mutex tracking happy */ mutex_unlock(&stp->st_mutex); + stp = retstp; } - return retstp; + return stp; } /* @@ -4260,7 +4264,6 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; struct nfs4_file *fp = NULL; struct nfs4_ol_stateid *stp = NULL; - struct nfs4_ol_stateid *swapstp = NULL; struct nfs4_delegation *dp = NULL; __be32 status; @@ -4297,16 +4300,10 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf goto out; } } else { - stp = open->op_stp; - open->op_stp = NULL; - /* - * init_open_stateid() either returns a locked stateid - * it found, or initializes and locks the new one we passed in - */ - swapstp = init_open_stateid(stp, fp, open); - if (swapstp) { - nfs4_put_stid(&stp->st_stid); - stp = swapstp; + /* stp is returned locked. */ + stp = init_open_stateid(fp, open); + /* See if we lost the race to some other thread */ + if (stp->st_access_bmap != 0) { status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); if (status) { From 54298082dbacba2591b2aa461320f31946cdbffb Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 3 Nov 2017 08:00:10 -0400 Subject: [PATCH 310/312] nfsd: Fix stateid races between OPEN and CLOSE commit 15ca08d3299682dc49bad73251677b2c5017ef08 upstream. Open file stateids can linger on the nfs4_file list of stateids even after they have been closed. In order to avoid reusing such a stateid, and confusing the client, we need to recheck the nfs4_stid's type after taking the mutex. Otherwise, we risk reusing an old stateid that was already closed, which will confuse clients that expect new stateids to conform to RFC7530 Sections 9.1.4.2 and 16.2.5 or RFC5661 Sections 8.2.2 and 18.2.4. Signed-off-by: Trond Myklebust Signed-off-by: J. Bruce Fields Signed-off-by: Greg Kroah-Hartman --- fs/nfsd/nfs4state.c | 67 +++++++++++++++++++++++++++++++++++++++------ 1 file changed, 59 insertions(+), 8 deletions(-) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 76caee97d2cd..3bf086aca5c0 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -3379,7 +3379,9 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) /* ignore lock owners */ if (local->st_stateowner->so_is_open_owner == 0) continue; - if (local->st_stateowner == &oo->oo_owner) { + if (local->st_stateowner != &oo->oo_owner) + continue; + if (local->st_stid.sc_type == NFS4_OPEN_STID) { ret = local; atomic_inc(&ret->st_stid.sc_count); break; @@ -3388,6 +3390,52 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) return ret; } +static __be32 +nfsd4_verify_open_stid(struct nfs4_stid *s) +{ + __be32 ret = nfs_ok; + + switch (s->sc_type) { + default: + break; + case NFS4_CLOSED_STID: + case NFS4_CLOSED_DELEG_STID: + ret = nfserr_bad_stateid; + break; + case NFS4_REVOKED_DELEG_STID: + ret = nfserr_deleg_revoked; + } + return ret; +} + +/* Lock the stateid st_mutex, and deal with races with CLOSE */ +static __be32 +nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp) +{ + __be32 ret; + + mutex_lock(&stp->st_mutex); + ret = nfsd4_verify_open_stid(&stp->st_stid); + if (ret != nfs_ok) + mutex_unlock(&stp->st_mutex); + return ret; +} + +static struct nfs4_ol_stateid * +nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) +{ + struct nfs4_ol_stateid *stp; + for (;;) { + spin_lock(&fp->fi_lock); + stp = nfsd4_find_existing_open(fp, open); + spin_unlock(&fp->fi_lock); + if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok) + break; + nfs4_put_stid(&stp->st_stid); + } + return stp; +} + static struct nfs4_openowner * alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open, struct nfsd4_compound_state *cstate) @@ -3432,6 +3480,7 @@ init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open) mutex_init(&stp->st_mutex); mutex_lock(&stp->st_mutex); +retry: spin_lock(&oo->oo_owner.so_client->cl_lock); spin_lock(&fp->fi_lock); @@ -3456,7 +3505,11 @@ out_unlock: spin_unlock(&fp->fi_lock); spin_unlock(&oo->oo_owner.so_client->cl_lock); if (retstp) { - mutex_lock(&retstp->st_mutex); + /* Handle races with CLOSE */ + if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) { + nfs4_put_stid(&retstp->st_stid); + goto retry; + } /* To keep mutex tracking happy */ mutex_unlock(&stp->st_mutex); stp = retstp; @@ -4277,9 +4330,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf status = nfs4_check_deleg(cl, open, &dp); if (status) goto out; - spin_lock(&fp->fi_lock); - stp = nfsd4_find_existing_open(fp, open); - spin_unlock(&fp->fi_lock); + stp = nfsd4_find_and_lock_existing_open(fp, open); } else { open->op_file = NULL; status = nfserr_bad_stateid; @@ -4293,7 +4344,6 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf */ if (stp) { /* Stateid was found, this is an OPEN upgrade */ - mutex_lock(&stp->st_mutex); status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); if (status) { mutex_unlock(&stp->st_mutex); @@ -5150,7 +5200,6 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) bool unhashed; LIST_HEAD(reaplist); - s->st_stid.sc_type = NFS4_CLOSED_STID; spin_lock(&clp->cl_lock); unhashed = unhash_open_stateid(s, &reaplist); @@ -5189,10 +5238,12 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, nfsd4_bump_seqid(cstate, status); if (status) goto out; + + stp->st_stid.sc_type = NFS4_CLOSED_STID; nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid); - mutex_unlock(&stp->st_mutex); nfsd4_close_open_stateid(stp); + mutex_unlock(&stp->st_mutex); /* put reference from nfs4_preprocess_seqid_op */ nfs4_put_stid(&stp->st_stid); From bd249dd078272e8cef53b98a73c1cdd60c532251 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 3 Nov 2017 08:00:11 -0400 Subject: [PATCH 311/312] nfsd: Fix another OPEN stateid race commit d8a1a000555ecd1b824ac1ed6df8fe364dfbbbb0 upstream. If nfsd4_process_open2() is initialising a new stateid, and yet the call to nfs4_get_vfs_file() fails for some reason, then we must declare the stateid closed, and unhash it before dropping the mutex. Right now, we unhash the stateid after dropping the mutex, and without changing the stateid type, meaning that another OPEN could theoretically look it up and attempt to use it. Reported-by: Andrew W Elble Signed-off-by: Trond Myklebust Signed-off-by: J. Bruce Fields Signed-off-by: Greg Kroah-Hartman --- fs/nfsd/nfs4state.c | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 3bf086aca5c0..11c67e8b939d 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -4319,6 +4319,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf struct nfs4_ol_stateid *stp = NULL; struct nfs4_delegation *dp = NULL; __be32 status; + bool new_stp = false; /* * Lookup file; if found, lookup stateid and check open request, @@ -4338,11 +4339,19 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf goto out; } + if (!stp) { + stp = init_open_stateid(fp, open); + if (!open->op_stp) + new_stp = true; + } + /* * OPEN the file, or upgrade an existing OPEN. * If truncate fails, the OPEN fails. + * + * stp is already locked. */ - if (stp) { + if (!new_stp) { /* Stateid was found, this is an OPEN upgrade */ status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); if (status) { @@ -4350,22 +4359,11 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf goto out; } } else { - /* stp is returned locked. */ - stp = init_open_stateid(fp, open); - /* See if we lost the race to some other thread */ - if (stp->st_access_bmap != 0) { - status = nfs4_upgrade_open(rqstp, fp, current_fh, - stp, open); - if (status) { - mutex_unlock(&stp->st_mutex); - goto out; - } - goto upgrade_out; - } status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open); if (status) { - mutex_unlock(&stp->st_mutex); + stp->st_stid.sc_type = NFS4_CLOSED_STID; release_open_stateid(stp); + mutex_unlock(&stp->st_mutex); goto out; } @@ -4374,7 +4372,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf if (stp->st_clnt_odstate == open->op_odstate) open->op_odstate = NULL; } -upgrade_out: + nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid); mutex_unlock(&stp->st_mutex); From bd379939182247ace70e425ab60b3b1352db601b Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 5 Dec 2017 11:22:52 +0100 Subject: [PATCH 312/312] Linux 4.4.104 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index f5a51cd7ca49..55500e023f61 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 4 -SUBLEVEL = 103 +SUBLEVEL = 104 EXTRAVERSION = NAME = Blurry Fish Butt