This is the 4.4.155 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAluVYLUACgkQONu9yGCS aT5MxhAArBSShT0IXHg9oXGtkm6g3mkZ/EAXPrl3Tq2ayLjXeMfNfsdKkBvusjTr b/Fs9ZLm1x7bI4+kD/6sTLtGlWBr6djocnBtB8PxQxxkmIZRZPjE9laemsyBn7XD 7amJEHuyaQU10da2obX7z+Gge+bgSoN4Q5+19ZESr4fCxa7bMaY+VmLCuROe6Flo 9kUaLFvxrsowFLrdKfWb/Zc7WHQfYtfTd2c9T+lz3wC4+X3zxkwHl0odvwe1yX9a xDc674yWepl1D8wMB3i7O5KGoOSghhZZmH2Cnb/cNWoeSmFO8rttCWYiSVEIOWWN 5HOmHRqMDPFUqH5g9F3z1A9uM5uQa9uOu7BGcDJjeU3oXZRFzTjJLMZj4Zcv0hLM WMo2+5iXFBByUVvUk2nKHotNNmnzxITW9CDWEuAv4jGlA8bjpIwkHUncqknTesan SRf63jC2+7N0PV5pGCLHA92NA/w663YtMyPPuLsYmprK1OFC1+X8o2bDyfX5ey59 bgkIItNRbgaBRTjPhS1EwJjuNRE59636x9EpFeb0M16j4YHFvGq2fS2LDuymPA3P JMVwsxpLtwHjI6KMcnIcDVphiJjLpTq6ijc727mTsHrTqHRa3/w6Ay/TZjRlDn00 YKpVKQtoUk0FURyVwdJjo0eH5O6MYfaw4uj4h1zEOFMXszkVmL4= =WUY2 -----END PGP SIGNATURE----- Merge 4.4.155 into android-4.4 Changes in 4.4.155 net: 6lowpan: fix reserved space for single frames net: mac802154: tx: expand tailroom if necessary 9p/net: Fix zero-copy path in the 9p virtio transport net: lan78xx: Fix misplaced tasklet_schedule() call spi: davinci: fix a NULL pointer dereference drm/i915/userptr: reject zero user_size powerpc/fadump: handle crash memory ranges array index overflow powerpc/pseries: Fix endianness while restoring of r3 in MCE handler. fs/9p/xattr.c: catch the error of p9_client_clunk when setting xattr failed 9p/virtio: fix off-by-one error in sg list bounds check net/9p/client.c: version pointer uninitialized net/9p/trans_fd.c: fix race-condition by flushing workqueue before the kfree() x86/mm/pat: Fix L1TF stable backport for CPA, 2nd call dm cache metadata: save in-core policy_hint_size to on-disk superblock iio: ad9523: Fix displayed phase iio: ad9523: Fix return value for ad952x_store() vmw_balloon: fix inflation of 64-bit GFNs vmw_balloon: do not use 2MB without batching vmw_balloon: VMCI_DOORBELL_SET does not check status vmw_balloon: fix VMCI use when balloon built into kernel tracing: Do not call start/stop() functions when tracing_on does not change tracing/blktrace: Fix to allow setting same value kthread, tracing: Don't expose half-written comm when creating kthreads uprobes: Use synchronize_rcu() not synchronize_sched() 9p: fix multiple NULL-pointer-dereferences PM / sleep: wakeup: Fix build error caused by missing SRCU support pnfs/blocklayout: off by one in bl_map_stripe() ARM: tegra: Fix Tegra30 Cardhu PCA954x reset mm/tlb: Remove tlb_remove_table() non-concurrent condition iommu/vt-d: Add definitions for PFSID iommu/vt-d: Fix dev iotlb pfsid use osf_getdomainname(): use copy_to_user() sys: don't hold uts_sem while accessing userspace memory userns: move user access out of the mutex ubifs: Fix memory leak in lprobs self-check Revert "UBIFS: Fix potential integer overflow in allocation" ubifs: Check data node size before truncate ubifs: Fix synced_i_size calculation for xattr inodes pwm: tiehrpwm: Fix disabling of output of PWMs fb: fix lost console when the user unplugs a USB adapter udlfb: set optimal write delay getxattr: use correct xattr length bcache: release dc->writeback_lock properly in bch_writeback_thread() perf auxtrace: Fix queue resize fs/quota: Fix spectre gadget in do_quotactl x86/io: add interface to reserve io memtype for a resource range. (v1.1) drm/drivers: add support for using the arch wc mapping API. Linux 4.4.155 Change-Id: Ie455609e00dd70d3fa723cd254f544109db8a788 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
b3f777efd9
53 changed files with 520 additions and 236 deletions
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 154
|
||||
SUBLEVEL = 155
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
|
@ -526,24 +526,19 @@ SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, const char __user *, path,
|
|||
SYSCALL_DEFINE1(osf_utsname, char __user *, name)
|
||||
{
|
||||
int error;
|
||||
char tmp[5 * 32];
|
||||
|
||||
down_read(&uts_sem);
|
||||
error = -EFAULT;
|
||||
if (copy_to_user(name + 0, utsname()->sysname, 32))
|
||||
goto out;
|
||||
if (copy_to_user(name + 32, utsname()->nodename, 32))
|
||||
goto out;
|
||||
if (copy_to_user(name + 64, utsname()->release, 32))
|
||||
goto out;
|
||||
if (copy_to_user(name + 96, utsname()->version, 32))
|
||||
goto out;
|
||||
if (copy_to_user(name + 128, utsname()->machine, 32))
|
||||
goto out;
|
||||
|
||||
error = 0;
|
||||
out:
|
||||
memcpy(tmp + 0 * 32, utsname()->sysname, 32);
|
||||
memcpy(tmp + 1 * 32, utsname()->nodename, 32);
|
||||
memcpy(tmp + 2 * 32, utsname()->release, 32);
|
||||
memcpy(tmp + 3 * 32, utsname()->version, 32);
|
||||
memcpy(tmp + 4 * 32, utsname()->machine, 32);
|
||||
up_read(&uts_sem);
|
||||
return error;
|
||||
|
||||
if (copy_to_user(name, tmp, sizeof(tmp)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE0(getpagesize)
|
||||
|
@ -561,24 +556,22 @@ SYSCALL_DEFINE0(getdtablesize)
|
|||
*/
|
||||
SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen)
|
||||
{
|
||||
unsigned len;
|
||||
int i;
|
||||
int len, err = 0;
|
||||
char *kname;
|
||||
char tmp[32];
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, name, namelen))
|
||||
return -EFAULT;
|
||||
|
||||
len = namelen;
|
||||
if (len > 32)
|
||||
len = 32;
|
||||
if (namelen < 0 || namelen > 32)
|
||||
namelen = 32;
|
||||
|
||||
down_read(&uts_sem);
|
||||
for (i = 0; i < len; ++i) {
|
||||
__put_user(utsname()->domainname[i], name + i);
|
||||
if (utsname()->domainname[i] == '\0')
|
||||
break;
|
||||
}
|
||||
kname = utsname()->domainname;
|
||||
len = strnlen(kname, namelen);
|
||||
len = min(len + 1, namelen);
|
||||
memcpy(tmp, kname, len);
|
||||
up_read(&uts_sem);
|
||||
|
||||
if (copy_to_user(name, tmp, len))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -741,13 +734,14 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
|
|||
};
|
||||
unsigned long offset;
|
||||
const char *res;
|
||||
long len, err = -EINVAL;
|
||||
long len;
|
||||
char tmp[__NEW_UTS_LEN + 1];
|
||||
|
||||
offset = command-1;
|
||||
if (offset >= ARRAY_SIZE(sysinfo_table)) {
|
||||
/* Digital UNIX has a few unpublished interfaces here */
|
||||
printk("sysinfo(%d)", command);
|
||||
goto out;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
down_read(&uts_sem);
|
||||
|
@ -755,13 +749,11 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
|
|||
len = strlen(res)+1;
|
||||
if ((unsigned long)len > (unsigned long)count)
|
||||
len = count;
|
||||
if (copy_to_user(buf, res, len))
|
||||
err = -EFAULT;
|
||||
else
|
||||
err = 0;
|
||||
memcpy(tmp, res, len);
|
||||
up_read(&uts_sem);
|
||||
out:
|
||||
return err;
|
||||
if (copy_to_user(buf, tmp, len))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer,
|
||||
|
|
|
@ -201,6 +201,7 @@
|
|||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
reg = <0x70>;
|
||||
reset-gpio = <&gpio TEGRA_GPIO(BB, 0) GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -194,9 +194,6 @@ struct fadump_crash_info_header {
|
|||
struct cpumask cpu_online_mask;
|
||||
};
|
||||
|
||||
/* Crash memory ranges */
|
||||
#define INIT_CRASHMEM_RANGES (INIT_MEMBLOCK_REGIONS + 2)
|
||||
|
||||
struct fad_crash_memory_ranges {
|
||||
unsigned long long base;
|
||||
unsigned long long size;
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include <linux/crash_dump.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/prom.h>
|
||||
|
@ -48,8 +49,10 @@ static struct fadump_mem_struct fdm;
|
|||
static const struct fadump_mem_struct *fdm_active;
|
||||
|
||||
static DEFINE_MUTEX(fadump_mutex);
|
||||
struct fad_crash_memory_ranges crash_memory_ranges[INIT_CRASHMEM_RANGES];
|
||||
struct fad_crash_memory_ranges *crash_memory_ranges;
|
||||
int crash_memory_ranges_size;
|
||||
int crash_mem_ranges;
|
||||
int max_crash_mem_ranges;
|
||||
|
||||
/* Scan the Firmware Assisted dump configuration details. */
|
||||
int __init early_init_dt_scan_fw_dump(unsigned long node,
|
||||
|
@ -726,38 +729,88 @@ static int __init process_fadump(const struct fadump_mem_struct *fdm_active)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void fadump_add_crash_memory(unsigned long long base,
|
||||
static void free_crash_memory_ranges(void)
|
||||
{
|
||||
kfree(crash_memory_ranges);
|
||||
crash_memory_ranges = NULL;
|
||||
crash_memory_ranges_size = 0;
|
||||
max_crash_mem_ranges = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate or reallocate crash memory ranges array in incremental units
|
||||
* of PAGE_SIZE.
|
||||
*/
|
||||
static int allocate_crash_memory_ranges(void)
|
||||
{
|
||||
struct fad_crash_memory_ranges *new_array;
|
||||
u64 new_size;
|
||||
|
||||
new_size = crash_memory_ranges_size + PAGE_SIZE;
|
||||
pr_debug("Allocating %llu bytes of memory for crash memory ranges\n",
|
||||
new_size);
|
||||
|
||||
new_array = krealloc(crash_memory_ranges, new_size, GFP_KERNEL);
|
||||
if (new_array == NULL) {
|
||||
pr_err("Insufficient memory for setting up crash memory ranges\n");
|
||||
free_crash_memory_ranges();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
crash_memory_ranges = new_array;
|
||||
crash_memory_ranges_size = new_size;
|
||||
max_crash_mem_ranges = (new_size /
|
||||
sizeof(struct fad_crash_memory_ranges));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int fadump_add_crash_memory(unsigned long long base,
|
||||
unsigned long long end)
|
||||
{
|
||||
if (base == end)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
if (crash_mem_ranges == max_crash_mem_ranges) {
|
||||
int ret;
|
||||
|
||||
ret = allocate_crash_memory_ranges();
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n",
|
||||
crash_mem_ranges, base, end - 1, (end - base));
|
||||
crash_memory_ranges[crash_mem_ranges].base = base;
|
||||
crash_memory_ranges[crash_mem_ranges].size = end - base;
|
||||
crash_mem_ranges++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fadump_exclude_reserved_area(unsigned long long start,
|
||||
static int fadump_exclude_reserved_area(unsigned long long start,
|
||||
unsigned long long end)
|
||||
{
|
||||
unsigned long long ra_start, ra_end;
|
||||
int ret = 0;
|
||||
|
||||
ra_start = fw_dump.reserve_dump_area_start;
|
||||
ra_end = ra_start + fw_dump.reserve_dump_area_size;
|
||||
|
||||
if ((ra_start < end) && (ra_end > start)) {
|
||||
if ((start < ra_start) && (end > ra_end)) {
|
||||
fadump_add_crash_memory(start, ra_start);
|
||||
fadump_add_crash_memory(ra_end, end);
|
||||
ret = fadump_add_crash_memory(start, ra_start);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = fadump_add_crash_memory(ra_end, end);
|
||||
} else if (start < ra_start) {
|
||||
fadump_add_crash_memory(start, ra_start);
|
||||
ret = fadump_add_crash_memory(start, ra_start);
|
||||
} else if (ra_end < end) {
|
||||
fadump_add_crash_memory(ra_end, end);
|
||||
ret = fadump_add_crash_memory(ra_end, end);
|
||||
}
|
||||
} else
|
||||
fadump_add_crash_memory(start, end);
|
||||
ret = fadump_add_crash_memory(start, end);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int fadump_init_elfcore_header(char *bufp)
|
||||
|
@ -793,10 +846,11 @@ static int fadump_init_elfcore_header(char *bufp)
|
|||
* Traverse through memblock structure and setup crash memory ranges. These
|
||||
* ranges will be used create PT_LOAD program headers in elfcore header.
|
||||
*/
|
||||
static void fadump_setup_crash_memory_ranges(void)
|
||||
static int fadump_setup_crash_memory_ranges(void)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
unsigned long long start, end;
|
||||
int ret;
|
||||
|
||||
pr_debug("Setup crash memory ranges.\n");
|
||||
crash_mem_ranges = 0;
|
||||
|
@ -807,7 +861,9 @@ static void fadump_setup_crash_memory_ranges(void)
|
|||
* specified during fadump registration. We need to create a separate
|
||||
* program header for this chunk with the correct offset.
|
||||
*/
|
||||
fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
|
||||
ret = fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
start = (unsigned long long)reg->base;
|
||||
|
@ -816,8 +872,12 @@ static void fadump_setup_crash_memory_ranges(void)
|
|||
start = fw_dump.boot_memory_size;
|
||||
|
||||
/* add this range excluding the reserved dump area. */
|
||||
fadump_exclude_reserved_area(start, end);
|
||||
ret = fadump_exclude_reserved_area(start, end);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -941,6 +1001,7 @@ static void register_fadump(void)
|
|||
{
|
||||
unsigned long addr;
|
||||
void *vaddr;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* If no memory is reserved then we can not register for firmware-
|
||||
|
@ -949,7 +1010,9 @@ static void register_fadump(void)
|
|||
if (!fw_dump.reserve_dump_area_size)
|
||||
return;
|
||||
|
||||
fadump_setup_crash_memory_ranges();
|
||||
ret = fadump_setup_crash_memory_ranges();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len);
|
||||
/* Initialize fadump crash info header. */
|
||||
|
@ -1028,6 +1091,7 @@ void fadump_cleanup(void)
|
|||
} else if (fw_dump.dump_registered) {
|
||||
/* Un-register Firmware-assisted dump if it was registered. */
|
||||
fadump_unregister_dump(&fdm);
|
||||
free_crash_memory_ranges();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -300,7 +300,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
|
|||
}
|
||||
|
||||
savep = __va(regs->gpr[3]);
|
||||
regs->gpr[3] = savep[0]; /* restore original r3 */
|
||||
regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */
|
||||
|
||||
/* If it isn't an extended log we can use the per cpu 64bit buffer */
|
||||
h = (struct rtas_error_log *)&savep[1];
|
||||
|
|
|
@ -202,6 +202,7 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig,
|
|||
asmlinkage long sys_getdomainname(char __user *name, int len)
|
||||
{
|
||||
int nlen, err;
|
||||
char tmp[__NEW_UTS_LEN + 1];
|
||||
|
||||
if (len < 0)
|
||||
return -EINVAL;
|
||||
|
@ -211,13 +212,16 @@ asmlinkage long sys_getdomainname(char __user *name, int len)
|
|||
nlen = strlen(utsname()->domainname) + 1;
|
||||
err = -EINVAL;
|
||||
if (nlen > len)
|
||||
goto out;
|
||||
goto out_unlock;
|
||||
memcpy(tmp, utsname()->domainname, nlen);
|
||||
|
||||
err = -EFAULT;
|
||||
if (!copy_to_user(name, utsname()->domainname, nlen))
|
||||
err = 0;
|
||||
up_read(&uts_sem);
|
||||
|
||||
out:
|
||||
if (copy_to_user(name, tmp, nlen))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
up_read(&uts_sem);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -525,6 +525,7 @@ extern void check_pending(int signum);
|
|||
SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
|
||||
{
|
||||
int nlen, err;
|
||||
char tmp[__NEW_UTS_LEN + 1];
|
||||
|
||||
if (len < 0)
|
||||
return -EINVAL;
|
||||
|
@ -534,13 +535,16 @@ SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
|
|||
nlen = strlen(utsname()->domainname) + 1;
|
||||
err = -EINVAL;
|
||||
if (nlen > len)
|
||||
goto out;
|
||||
goto out_unlock;
|
||||
memcpy(tmp, utsname()->domainname, nlen);
|
||||
|
||||
err = -EFAULT;
|
||||
if (!copy_to_user(name, utsname()->domainname, nlen))
|
||||
err = 0;
|
||||
up_read(&uts_sem);
|
||||
|
||||
out:
|
||||
if (copy_to_user(name, tmp, nlen))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
up_read(&uts_sem);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -351,4 +351,10 @@ extern void arch_phys_wc_del(int handle);
|
|||
#define arch_phys_wc_add arch_phys_wc_add
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_PAT
|
||||
extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size);
|
||||
extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size);
|
||||
#define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_IO_H */
|
||||
|
|
|
@ -1079,7 +1079,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
|
|||
* Map everything starting from the Gb boundary, possibly with 1G pages
|
||||
*/
|
||||
while (end - start >= PUD_SIZE) {
|
||||
set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
|
||||
set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn >> PAGE_SHIFT,
|
||||
canon_pgprot(pud_pgprot))));
|
||||
|
||||
start += PUD_SIZE;
|
||||
|
|
|
@ -726,6 +726,20 @@ void io_free_memtype(resource_size_t start, resource_size_t end)
|
|||
free_memtype(start, end);
|
||||
}
|
||||
|
||||
int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
|
||||
{
|
||||
enum page_cache_mode type = _PAGE_CACHE_MODE_WC;
|
||||
|
||||
return io_reserve_memtype(start, start + size, &type);
|
||||
}
|
||||
EXPORT_SYMBOL(arch_io_reserve_memtype_wc);
|
||||
|
||||
void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
|
||||
{
|
||||
io_free_memtype(start, start + size);
|
||||
}
|
||||
EXPORT_SYMBOL(arch_io_free_memtype_wc);
|
||||
|
||||
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t vma_prot)
|
||||
{
|
||||
|
|
|
@ -492,6 +492,10 @@ void amdgpu_bo_force_delete(struct amdgpu_device *adev)
|
|||
|
||||
int amdgpu_bo_init(struct amdgpu_device *adev)
|
||||
{
|
||||
/* reserve PAT memory space to WC for VRAM */
|
||||
arch_io_reserve_memtype_wc(adev->mc.aper_base,
|
||||
adev->mc.aper_size);
|
||||
|
||||
/* Add an MTRR for the VRAM */
|
||||
adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
|
||||
adev->mc.aper_size);
|
||||
|
@ -507,6 +511,7 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
|
|||
{
|
||||
amdgpu_ttm_fini(adev);
|
||||
arch_phys_wc_del(adev->mc.vram_mtrr);
|
||||
arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
|
||||
}
|
||||
|
||||
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
|
||||
|
|
|
@ -275,6 +275,8 @@ int ast_mm_init(struct ast_private *ast)
|
|||
return ret;
|
||||
}
|
||||
|
||||
arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
|
||||
pci_resource_len(dev->pdev, 0));
|
||||
ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
|
||||
pci_resource_len(dev->pdev, 0));
|
||||
|
||||
|
@ -283,11 +285,15 @@ int ast_mm_init(struct ast_private *ast)
|
|||
|
||||
void ast_mm_fini(struct ast_private *ast)
|
||||
{
|
||||
struct drm_device *dev = ast->dev;
|
||||
|
||||
ttm_bo_device_release(&ast->ttm.bdev);
|
||||
|
||||
ast_ttm_global_release(ast);
|
||||
|
||||
arch_phys_wc_del(ast->fb_mtrr);
|
||||
arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
|
||||
pci_resource_len(dev->pdev, 0));
|
||||
}
|
||||
|
||||
void ast_ttm_placement(struct ast_bo *bo, int domain)
|
||||
|
|
|
@ -275,6 +275,9 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
|
|||
return ret;
|
||||
}
|
||||
|
||||
arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
|
||||
pci_resource_len(dev->pdev, 0));
|
||||
|
||||
cirrus->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
|
||||
pci_resource_len(dev->pdev, 0));
|
||||
|
||||
|
@ -284,6 +287,8 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
|
|||
|
||||
void cirrus_mm_fini(struct cirrus_device *cirrus)
|
||||
{
|
||||
struct drm_device *dev = cirrus->dev;
|
||||
|
||||
if (!cirrus->mm_inited)
|
||||
return;
|
||||
|
||||
|
@ -293,6 +298,8 @@ void cirrus_mm_fini(struct cirrus_device *cirrus)
|
|||
|
||||
arch_phys_wc_del(cirrus->fb_mtrr);
|
||||
cirrus->fb_mtrr = 0;
|
||||
arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
|
||||
pci_resource_len(dev->pdev, 0));
|
||||
}
|
||||
|
||||
void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
|
||||
|
|
|
@ -842,6 +842,9 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
|
|||
I915_USERPTR_UNSYNCHRONIZED))
|
||||
return -EINVAL;
|
||||
|
||||
if (!args->user_size)
|
||||
return -EINVAL;
|
||||
|
||||
if (offset_in_page(args->user_ptr | args->user_size))
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -274,6 +274,9 @@ int mgag200_mm_init(struct mga_device *mdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
|
||||
pci_resource_len(dev->pdev, 0));
|
||||
|
||||
mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
|
||||
pci_resource_len(dev->pdev, 0));
|
||||
|
||||
|
@ -282,10 +285,14 @@ int mgag200_mm_init(struct mga_device *mdev)
|
|||
|
||||
void mgag200_mm_fini(struct mga_device *mdev)
|
||||
{
|
||||
struct drm_device *dev = mdev->dev;
|
||||
|
||||
ttm_bo_device_release(&mdev->ttm.bdev);
|
||||
|
||||
mgag200_ttm_global_release(mdev);
|
||||
|
||||
arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
|
||||
pci_resource_len(dev->pdev, 0));
|
||||
arch_phys_wc_del(mdev->fb_mtrr);
|
||||
mdev->fb_mtrr = 0;
|
||||
}
|
||||
|
|
|
@ -397,6 +397,9 @@ nouveau_ttm_init(struct nouveau_drm *drm)
|
|||
/* VRAM init */
|
||||
drm->gem.vram_available = drm->device.info.ram_user;
|
||||
|
||||
arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
|
||||
device->func->resource_size(device, 1));
|
||||
|
||||
ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
|
||||
drm->gem.vram_available >> PAGE_SHIFT);
|
||||
if (ret) {
|
||||
|
@ -429,6 +432,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
|
|||
void
|
||||
nouveau_ttm_fini(struct nouveau_drm *drm)
|
||||
{
|
||||
struct nvkm_device *device = nvxx_device(&drm->device);
|
||||
|
||||
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
|
||||
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
|
||||
|
||||
|
@ -438,4 +443,7 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
|
|||
|
||||
arch_phys_wc_del(drm->ttm.mtrr);
|
||||
drm->ttm.mtrr = 0;
|
||||
arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
|
||||
device->func->resource_size(device, 1));
|
||||
|
||||
}
|
||||
|
|
|
@ -447,6 +447,10 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
|
|||
|
||||
int radeon_bo_init(struct radeon_device *rdev)
|
||||
{
|
||||
/* reserve PAT memory space to WC for VRAM */
|
||||
arch_io_reserve_memtype_wc(rdev->mc.aper_base,
|
||||
rdev->mc.aper_size);
|
||||
|
||||
/* Add an MTRR for the VRAM */
|
||||
if (!rdev->fastfb_working) {
|
||||
rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
|
||||
|
@ -464,6 +468,7 @@ void radeon_bo_fini(struct radeon_device *rdev)
|
|||
{
|
||||
radeon_ttm_fini(rdev);
|
||||
arch_phys_wc_del(rdev->mc.vram_mtrr);
|
||||
arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size);
|
||||
}
|
||||
|
||||
/* Returns how many bytes TTM can move per IB.
|
||||
|
|
|
@ -507,7 +507,7 @@ static ssize_t ad9523_store(struct device *dev,
|
|||
return ret;
|
||||
|
||||
if (!state)
|
||||
return 0;
|
||||
return len;
|
||||
|
||||
mutex_lock(&indio_dev->mlock);
|
||||
switch ((u32)this_attr->address) {
|
||||
|
@ -641,7 +641,7 @@ static int ad9523_read_raw(struct iio_dev *indio_dev,
|
|||
code = (AD9523_CLK_DIST_DIV_PHASE_REV(ret) * 3141592) /
|
||||
AD9523_CLK_DIST_DIV_REV(ret);
|
||||
*val = code / 1000000;
|
||||
*val2 = (code % 1000000) * 10;
|
||||
*val2 = code % 1000000;
|
||||
return IIO_VAL_INT_PLUS_MICRO;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
|
|
@ -1315,8 +1315,8 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
|
|||
qi_submit_sync(&desc, iommu);
|
||||
}
|
||||
|
||||
void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
|
||||
u64 addr, unsigned mask)
|
||||
void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
|
||||
u16 qdep, u64 addr, unsigned mask)
|
||||
{
|
||||
struct qi_desc desc;
|
||||
|
||||
|
@ -1331,7 +1331,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
|
|||
qdep = 0;
|
||||
|
||||
desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
|
||||
QI_DIOTLB_TYPE;
|
||||
QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
|
||||
|
||||
qi_submit_sync(&desc, iommu);
|
||||
}
|
||||
|
|
|
@ -419,6 +419,7 @@ struct device_domain_info {
|
|||
struct list_head global; /* link to global list */
|
||||
u8 bus; /* PCI bus number */
|
||||
u8 devfn; /* PCI devfn number */
|
||||
u16 pfsid; /* SRIOV physical function source ID */
|
||||
u8 pasid_supported:3;
|
||||
u8 pasid_enabled:1;
|
||||
u8 pri_supported:1;
|
||||
|
@ -1479,6 +1480,20 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
|
|||
return;
|
||||
|
||||
pdev = to_pci_dev(info->dev);
|
||||
/* For IOMMU that supports device IOTLB throttling (DIT), we assign
|
||||
* PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
|
||||
* queue depth at PF level. If DIT is not set, PFSID will be treated as
|
||||
* reserved, which should be set to 0.
|
||||
*/
|
||||
if (!ecap_dit(info->iommu->ecap))
|
||||
info->pfsid = 0;
|
||||
else {
|
||||
struct pci_dev *pf_pdev;
|
||||
|
||||
/* pdev will be returned if device is not a vf */
|
||||
pf_pdev = pci_physfn(pdev);
|
||||
info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU_SVM
|
||||
/* The PCIe spec, in its wisdom, declares that the behaviour of
|
||||
|
@ -1537,7 +1552,8 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
|
|||
|
||||
sid = info->bus << 8 | info->devfn;
|
||||
qdep = info->ats_qdep;
|
||||
qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
|
||||
qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
|
||||
qdep, addr, mask);
|
||||
}
|
||||
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||
}
|
||||
|
|
|
@ -462,9 +462,11 @@ static int bch_writeback_thread(void *arg)
|
|||
* data on cache. BCACHE_DEV_DETACHING flag is set in
|
||||
* bch_cached_dev_detach().
|
||||
*/
|
||||
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
|
||||
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
|
||||
up_write(&dc->writeback_lock);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
up_write(&dc->writeback_lock);
|
||||
|
||||
|
|
|
@ -337,7 +337,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
|
|||
disk_super->version = cpu_to_le32(MAX_CACHE_VERSION);
|
||||
memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
|
||||
memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
|
||||
disk_super->policy_hint_size = 0;
|
||||
disk_super->policy_hint_size = cpu_to_le32(0);
|
||||
|
||||
__copy_sm_root(cmd, disk_super);
|
||||
|
||||
|
@ -652,6 +652,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
|
|||
disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
|
||||
disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
|
||||
disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
|
||||
disk_super->policy_hint_size = cpu_to_le32(cmd->policy_hint_size);
|
||||
|
||||
disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
|
||||
disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
|
||||
|
|
|
@ -341,7 +341,13 @@ static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
|
|||
success = false;
|
||||
}
|
||||
|
||||
if (b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS)
|
||||
/*
|
||||
* 2MB pages are only supported with batching. If batching is for some
|
||||
* reason disabled, do not use 2MB pages, since otherwise the legacy
|
||||
* mechanism is used with 2MB pages, causing a failure.
|
||||
*/
|
||||
if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
|
||||
(b->capabilities & VMW_BALLOON_BATCHED_CMDS))
|
||||
b->supported_page_sizes = 2;
|
||||
else
|
||||
b->supported_page_sizes = 1;
|
||||
|
@ -450,7 +456,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
|
|||
|
||||
pfn32 = (u32)pfn;
|
||||
if (pfn32 != pfn)
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
|
||||
STATS_INC(b->stats.lock[false]);
|
||||
|
||||
|
@ -460,7 +466,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
|
|||
|
||||
pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
|
||||
STATS_INC(b->stats.lock_fail[false]);
|
||||
return 1;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static int vmballoon_send_batched_lock(struct vmballoon *b,
|
||||
|
@ -597,11 +603,12 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
|
|||
|
||||
locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
|
||||
target);
|
||||
if (locked > 0) {
|
||||
if (locked) {
|
||||
STATS_INC(b->stats.refused_alloc[false]);
|
||||
|
||||
if (hv_status == VMW_BALLOON_ERROR_RESET ||
|
||||
hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
|
||||
if (locked == -EIO &&
|
||||
(hv_status == VMW_BALLOON_ERROR_RESET ||
|
||||
hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) {
|
||||
vmballoon_free_page(page, false);
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -617,7 +624,7 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
|
|||
} else {
|
||||
vmballoon_free_page(page, false);
|
||||
}
|
||||
return -EIO;
|
||||
return locked;
|
||||
}
|
||||
|
||||
/* track allocated page */
|
||||
|
@ -1029,29 +1036,30 @@ static void vmballoon_vmci_cleanup(struct vmballoon *b)
|
|||
*/
|
||||
static int vmballoon_vmci_init(struct vmballoon *b)
|
||||
{
|
||||
int error = 0;
|
||||
unsigned long error, dummy;
|
||||
|
||||
if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) != 0) {
|
||||
error = vmci_doorbell_create(&b->vmci_doorbell,
|
||||
VMCI_FLAG_DELAYED_CB,
|
||||
if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
|
||||
return 0;
|
||||
|
||||
error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
|
||||
VMCI_PRIVILEGE_FLAG_RESTRICTED,
|
||||
vmballoon_doorbell, b);
|
||||
|
||||
if (error == VMCI_SUCCESS) {
|
||||
VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET,
|
||||
b->vmci_doorbell.context,
|
||||
b->vmci_doorbell.resource, error);
|
||||
if (error != VMCI_SUCCESS)
|
||||
goto fail;
|
||||
|
||||
error = VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, b->vmci_doorbell.context,
|
||||
b->vmci_doorbell.resource, dummy);
|
||||
|
||||
STATS_INC(b->stats.doorbell_set);
|
||||
}
|
||||
}
|
||||
|
||||
if (error != 0) {
|
||||
vmballoon_vmci_cleanup(b);
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
if (error != VMW_BALLOON_SUCCESS)
|
||||
goto fail;
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
vmballoon_vmci_cleanup(b);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1289,7 +1297,14 @@ static int __init vmballoon_init(void)
|
|||
|
||||
return 0;
|
||||
}
|
||||
module_init(vmballoon_init);
|
||||
|
||||
/*
|
||||
* Using late_initcall() instead of module_init() allows the balloon to use the
|
||||
* VMCI doorbell even when the balloon is built into the kernel. Otherwise the
|
||||
* VMCI is probed only after the balloon is initialized. If the balloon is used
|
||||
* as a module, late_initcall() is equivalent to module_init().
|
||||
*/
|
||||
late_initcall(vmballoon_init);
|
||||
|
||||
static void __exit vmballoon_exit(void)
|
||||
{
|
||||
|
|
|
@ -902,6 +902,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
|
|||
|
||||
ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
|
||||
netif_carrier_on(dev->net);
|
||||
|
||||
tasklet_schedule(&dev->bh);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -1361,8 +1363,6 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
|
|||
netif_dbg(dev, ifup, dev->net,
|
||||
"MAC address set to random addr");
|
||||
}
|
||||
|
||||
tasklet_schedule(&dev->bh);
|
||||
}
|
||||
|
||||
ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
|
||||
|
|
|
@ -384,6 +384,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
|
|||
aqcsfrc_mask = AQCSFRC_CSFA_MASK;
|
||||
}
|
||||
|
||||
/* Update shadow register first before modifying active register */
|
||||
ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
|
||||
/*
|
||||
* Changes to immediate action on Action Qualifier. This puts
|
||||
* Action Qualifier control on PWM output from next TBCLK
|
||||
|
|
|
@ -220,7 +220,7 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
|
|||
pdata = &dspi->pdata;
|
||||
|
||||
/* program delay transfers if tx_delay is non zero */
|
||||
if (spicfg->wdelay)
|
||||
if (spicfg && spicfg->wdelay)
|
||||
spidat1 |= SPIDAT1_WDEL;
|
||||
|
||||
/*
|
||||
|
|
|
@ -1687,12 +1687,12 @@ static int do_register_framebuffer(struct fb_info *fb_info)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int do_unregister_framebuffer(struct fb_info *fb_info)
|
||||
static int unbind_console(struct fb_info *fb_info)
|
||||
{
|
||||
struct fb_event event;
|
||||
int i, ret = 0;
|
||||
int ret;
|
||||
int i = fb_info->node;
|
||||
|
||||
i = fb_info->node;
|
||||
if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1707,17 +1707,29 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
|
|||
unlock_fb_info(fb_info);
|
||||
console_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __unlink_framebuffer(struct fb_info *fb_info);
|
||||
|
||||
static int do_unregister_framebuffer(struct fb_info *fb_info)
|
||||
{
|
||||
struct fb_event event;
|
||||
int ret;
|
||||
|
||||
ret = unbind_console(fb_info);
|
||||
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
pm_vt_switch_unregister(fb_info->dev);
|
||||
|
||||
unlink_framebuffer(fb_info);
|
||||
__unlink_framebuffer(fb_info);
|
||||
if (fb_info->pixmap.addr &&
|
||||
(fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
|
||||
kfree(fb_info->pixmap.addr);
|
||||
fb_destroy_modelist(&fb_info->modelist);
|
||||
registered_fb[i] = NULL;
|
||||
registered_fb[fb_info->node] = NULL;
|
||||
num_registered_fb--;
|
||||
fb_cleanup_device(fb_info);
|
||||
event.info = fb_info;
|
||||
|
@ -1730,7 +1742,7 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int unlink_framebuffer(struct fb_info *fb_info)
|
||||
static int __unlink_framebuffer(struct fb_info *fb_info)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -1742,6 +1754,20 @@ int unlink_framebuffer(struct fb_info *fb_info)
|
|||
device_destroy(fb_class, MKDEV(FB_MAJOR, i));
|
||||
fb_info->dev = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int unlink_framebuffer(struct fb_info *fb_info)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = __unlink_framebuffer(fb_info);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
unbind_console(fb_info);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(unlink_framebuffer);
|
||||
|
|
|
@ -107,7 +107,7 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
|
|||
{
|
||||
struct kvec kvec = {.iov_base = (void *)value, .iov_len = value_len};
|
||||
struct iov_iter from;
|
||||
int retval;
|
||||
int retval, err;
|
||||
|
||||
iov_iter_kvec(&from, WRITE | ITER_KVEC, &kvec, 1, value_len);
|
||||
|
||||
|
@ -128,7 +128,9 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
|
|||
retval);
|
||||
else
|
||||
p9_client_write(fid, 0, &from, &retval);
|
||||
p9_client_clunk(fid);
|
||||
err = p9_client_clunk(fid);
|
||||
if (!retval && err)
|
||||
retval = err;
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
|
|
@ -162,7 +162,7 @@ static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset,
|
|||
chunk = div_u64(offset, dev->chunk_size);
|
||||
div_u64_rem(chunk, dev->nr_children, &chunk_idx);
|
||||
|
||||
if (chunk_idx > dev->nr_children) {
|
||||
if (chunk_idx >= dev->nr_children) {
|
||||
dprintk("%s: invalid chunk idx %d (%lld/%lld)\n",
|
||||
__func__, chunk_idx, offset, dev->chunk_size);
|
||||
/* error, should not happen */
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/quotaops.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/nospec.h>
|
||||
|
||||
static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
|
||||
qid_t id)
|
||||
|
@ -644,6 +645,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
|
|||
|
||||
if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS))
|
||||
return -EINVAL;
|
||||
type = array_index_nospec(type, MAXQUOTAS);
|
||||
/*
|
||||
* Quota not supported on this fs? Check this before s_quota_types
|
||||
* since they needn't be set if quota is not supported at all.
|
||||
|
|
|
@ -661,6 +661,11 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
|
|||
spin_lock(&ui->ui_lock);
|
||||
ui->synced_i_size = ui->ui_size;
|
||||
spin_unlock(&ui->ui_lock);
|
||||
if (xent) {
|
||||
spin_lock(&host_ui->ui_lock);
|
||||
host_ui->synced_i_size = host_ui->ui_size;
|
||||
spin_unlock(&host_ui->ui_lock);
|
||||
}
|
||||
mark_inode_clean(c, ui);
|
||||
mark_inode_clean(c, host_ui);
|
||||
return 0;
|
||||
|
@ -1107,7 +1112,7 @@ static int recomp_data_node(const struct ubifs_info *c,
|
|||
int err, len, compr_type, out_len;
|
||||
|
||||
out_len = le32_to_cpu(dn->size);
|
||||
buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS);
|
||||
buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1186,7 +1191,16 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
|
|||
else if (err)
|
||||
goto out_free;
|
||||
else {
|
||||
if (le32_to_cpu(dn->size) <= dlen)
|
||||
int dn_len = le32_to_cpu(dn->size);
|
||||
|
||||
if (dn_len <= 0 || dn_len > UBIFS_BLOCK_SIZE) {
|
||||
ubifs_err(c, "bad data node (block %u, inode %lu)",
|
||||
blk, inode->i_ino);
|
||||
ubifs_dump_node(c, dn);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (dn_len <= dlen)
|
||||
dlen = 0; /* Nothing to do */
|
||||
else {
|
||||
int compr_type = le16_to_cpu(dn->compr_type);
|
||||
|
|
|
@ -1091,10 +1091,6 @@ static int scan_check_cb(struct ubifs_info *c,
|
|||
}
|
||||
}
|
||||
|
||||
buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* After an unclean unmount, empty and freeable LEBs
|
||||
* may contain garbage - do not scan them.
|
||||
|
@ -1113,6 +1109,10 @@ static int scan_check_cb(struct ubifs_info *c,
|
|||
return LPT_SCAN_CONTINUE;
|
||||
}
|
||||
|
||||
buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
sleb = ubifs_scan(c, lnum, 0, buf, 0);
|
||||
if (IS_ERR(sleb)) {
|
||||
ret = PTR_ERR(sleb);
|
||||
|
|
|
@ -453,7 +453,7 @@ getxattr(struct dentry *d, const char __user *name, void __user *value,
|
|||
if (error > 0) {
|
||||
if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
|
||||
(strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
|
||||
posix_acl_fix_xattr_to_user(kvalue, size);
|
||||
posix_acl_fix_xattr_to_user(kvalue, error);
|
||||
if (size && copy_to_user(value, kvalue, error))
|
||||
error = -EFAULT;
|
||||
} else if (error == -ERANGE && size >= XATTR_SIZE_MAX) {
|
||||
|
|
|
@ -125,6 +125,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
|
|||
* Extended Capability Register
|
||||
*/
|
||||
|
||||
#define ecap_dit(e) ((e >> 41) & 0x1)
|
||||
#define ecap_pasid(e) ((e >> 40) & 0x1)
|
||||
#define ecap_pss(e) ((e >> 35) & 0x1f)
|
||||
#define ecap_eafs(e) ((e >> 34) & 0x1)
|
||||
|
@ -294,6 +295,7 @@ enum {
|
|||
#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
|
||||
#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
|
||||
#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
|
||||
#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
|
||||
#define QI_DEV_IOTLB_SIZE 1
|
||||
#define QI_DEV_IOTLB_MAX_INVS 32
|
||||
|
||||
|
@ -318,6 +320,7 @@ enum {
|
|||
#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
|
||||
#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
|
||||
#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
|
||||
#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
|
||||
#define QI_DEV_EIOTLB_MAX_INVS 32
|
||||
|
||||
#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
|
||||
|
@ -463,9 +466,8 @@ extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
|
|||
u8 fm, u64 type);
|
||||
extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
|
||||
unsigned int size_order, u64 type);
|
||||
extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
|
||||
u64 addr, unsigned mask);
|
||||
|
||||
extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
|
||||
u16 qdep, u64 addr, unsigned mask);
|
||||
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
|
||||
|
||||
extern int dmar_ir_support(void);
|
||||
|
|
|
@ -154,4 +154,26 @@ enum {
|
|||
void *memremap(resource_size_t offset, size_t size, unsigned long flags);
|
||||
void memunmap(void *addr);
|
||||
|
||||
/*
|
||||
* On x86 PAT systems we have memory tracking that keeps track of
|
||||
* the allowed mappings on memory ranges. This tracking works for
|
||||
* all the in-kernel mapping APIs (ioremap*), but where the user
|
||||
* wishes to map a range from a physical device into user memory
|
||||
* the tracking won't be updated. This API is to be used by
|
||||
* drivers which remap physical device pages into userspace,
|
||||
* and wants to make sure they are mapped WC and not UC.
|
||||
*/
|
||||
#ifndef arch_io_reserve_memtype_wc
|
||||
static inline int arch_io_reserve_memtype_wc(resource_size_t base,
|
||||
resource_size_t size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void arch_io_free_memtype_wc(resource_size_t base,
|
||||
resource_size_t size)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_IO_H */
|
||||
|
|
|
@ -87,7 +87,7 @@ struct dlfb_data {
|
|||
#define MIN_RAW_PIX_BYTES 2
|
||||
#define MIN_RAW_CMD_BYTES (RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES)
|
||||
|
||||
#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */
|
||||
#define DL_DEFIO_WRITE_DELAY msecs_to_jiffies(HZ <= 300 ? 4 : 10) /* optimal value for 720p video */
|
||||
#define DL_DEFIO_WRITE_DISABLE (HZ*60) /* "disable" with long delay */
|
||||
|
||||
/* remove these once align.h patch is taken into kernel */
|
||||
|
|
|
@ -313,10 +313,16 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
|
|||
task = create->result;
|
||||
if (!IS_ERR(task)) {
|
||||
static const struct sched_param param = { .sched_priority = 0 };
|
||||
char name[TASK_COMM_LEN];
|
||||
va_list args;
|
||||
|
||||
va_start(args, namefmt);
|
||||
vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
|
||||
/*
|
||||
* task is already visible to other tasks, so updating
|
||||
* COMM must be protected.
|
||||
*/
|
||||
vsnprintf(name, sizeof(name), namefmt, args);
|
||||
set_task_comm(task, name);
|
||||
va_end(args);
|
||||
/*
|
||||
* root may have changed our (kthreadd's) priority or CPU mask.
|
||||
|
|
|
@ -115,6 +115,7 @@ config PM_SLEEP
|
|||
def_bool y
|
||||
depends on SUSPEND || HIBERNATE_CALLBACKS
|
||||
select PM
|
||||
select SRCU
|
||||
|
||||
config PM_SLEEP_SMP
|
||||
def_bool y
|
||||
|
|
95
kernel/sys.c
95
kernel/sys.c
|
@ -1144,18 +1144,19 @@ static int override_release(char __user *release, size_t len)
|
|||
|
||||
SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
|
||||
{
|
||||
int errno = 0;
|
||||
struct new_utsname tmp;
|
||||
|
||||
down_read(&uts_sem);
|
||||
if (copy_to_user(name, utsname(), sizeof *name))
|
||||
errno = -EFAULT;
|
||||
memcpy(&tmp, utsname(), sizeof(tmp));
|
||||
up_read(&uts_sem);
|
||||
if (copy_to_user(name, &tmp, sizeof(tmp)))
|
||||
return -EFAULT;
|
||||
|
||||
if (!errno && override_release(name->release, sizeof(name->release)))
|
||||
errno = -EFAULT;
|
||||
if (!errno && override_architecture(name))
|
||||
errno = -EFAULT;
|
||||
return errno;
|
||||
if (override_release(name->release, sizeof(name->release)))
|
||||
return -EFAULT;
|
||||
if (override_architecture(name))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef __ARCH_WANT_SYS_OLD_UNAME
|
||||
|
@ -1164,55 +1165,46 @@ SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
|
|||
*/
|
||||
SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
|
||||
{
|
||||
int error = 0;
|
||||
struct old_utsname tmp;
|
||||
|
||||
if (!name)
|
||||
return -EFAULT;
|
||||
|
||||
down_read(&uts_sem);
|
||||
if (copy_to_user(name, utsname(), sizeof(*name)))
|
||||
error = -EFAULT;
|
||||
memcpy(&tmp, utsname(), sizeof(tmp));
|
||||
up_read(&uts_sem);
|
||||
if (copy_to_user(name, &tmp, sizeof(tmp)))
|
||||
return -EFAULT;
|
||||
|
||||
if (!error && override_release(name->release, sizeof(name->release)))
|
||||
error = -EFAULT;
|
||||
if (!error && override_architecture(name))
|
||||
error = -EFAULT;
|
||||
return error;
|
||||
if (override_release(name->release, sizeof(name->release)))
|
||||
return -EFAULT;
|
||||
if (override_architecture(name))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
|
||||
{
|
||||
int error;
|
||||
struct oldold_utsname tmp = {};
|
||||
|
||||
if (!name)
|
||||
return -EFAULT;
|
||||
if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
|
||||
return -EFAULT;
|
||||
|
||||
down_read(&uts_sem);
|
||||
error = __copy_to_user(&name->sysname, &utsname()->sysname,
|
||||
__OLD_UTS_LEN);
|
||||
error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
|
||||
error |= __copy_to_user(&name->nodename, &utsname()->nodename,
|
||||
__OLD_UTS_LEN);
|
||||
error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
|
||||
error |= __copy_to_user(&name->release, &utsname()->release,
|
||||
__OLD_UTS_LEN);
|
||||
error |= __put_user(0, name->release + __OLD_UTS_LEN);
|
||||
error |= __copy_to_user(&name->version, &utsname()->version,
|
||||
__OLD_UTS_LEN);
|
||||
error |= __put_user(0, name->version + __OLD_UTS_LEN);
|
||||
error |= __copy_to_user(&name->machine, &utsname()->machine,
|
||||
__OLD_UTS_LEN);
|
||||
error |= __put_user(0, name->machine + __OLD_UTS_LEN);
|
||||
memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
|
||||
memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
|
||||
memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN);
|
||||
memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN);
|
||||
memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN);
|
||||
up_read(&uts_sem);
|
||||
if (copy_to_user(name, &tmp, sizeof(tmp)))
|
||||
return -EFAULT;
|
||||
|
||||
if (!error && override_architecture(name))
|
||||
error = -EFAULT;
|
||||
if (!error && override_release(name->release, sizeof(name->release)))
|
||||
error = -EFAULT;
|
||||
return error ? -EFAULT : 0;
|
||||
if (override_architecture(name))
|
||||
return -EFAULT;
|
||||
if (override_release(name->release, sizeof(name->release)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -1226,17 +1218,18 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
|
|||
|
||||
if (len < 0 || len > __NEW_UTS_LEN)
|
||||
return -EINVAL;
|
||||
down_write(&uts_sem);
|
||||
errno = -EFAULT;
|
||||
if (!copy_from_user(tmp, name, len)) {
|
||||
struct new_utsname *u = utsname();
|
||||
struct new_utsname *u;
|
||||
|
||||
down_write(&uts_sem);
|
||||
u = utsname();
|
||||
memcpy(u->nodename, tmp, len);
|
||||
memset(u->nodename + len, 0, sizeof(u->nodename) - len);
|
||||
errno = 0;
|
||||
uts_proc_notify(UTS_PROC_HOSTNAME);
|
||||
}
|
||||
up_write(&uts_sem);
|
||||
}
|
||||
return errno;
|
||||
}
|
||||
|
||||
|
@ -1244,8 +1237,9 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
|
|||
|
||||
SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
|
||||
{
|
||||
int i, errno;
|
||||
int i;
|
||||
struct new_utsname *u;
|
||||
char tmp[__NEW_UTS_LEN + 1];
|
||||
|
||||
if (len < 0)
|
||||
return -EINVAL;
|
||||
|
@ -1254,11 +1248,11 @@ SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
|
|||
i = 1 + strlen(u->nodename);
|
||||
if (i > len)
|
||||
i = len;
|
||||
errno = 0;
|
||||
if (copy_to_user(name, u->nodename, i))
|
||||
errno = -EFAULT;
|
||||
memcpy(tmp, u->nodename, i);
|
||||
up_read(&uts_sem);
|
||||
return errno;
|
||||
if (copy_to_user(name, tmp, i))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -1277,17 +1271,18 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
|
|||
if (len < 0 || len > __NEW_UTS_LEN)
|
||||
return -EINVAL;
|
||||
|
||||
down_write(&uts_sem);
|
||||
errno = -EFAULT;
|
||||
if (!copy_from_user(tmp, name, len)) {
|
||||
struct new_utsname *u = utsname();
|
||||
struct new_utsname *u;
|
||||
|
||||
down_write(&uts_sem);
|
||||
u = utsname();
|
||||
memcpy(u->domainname, tmp, len);
|
||||
memset(u->domainname + len, 0, sizeof(u->domainname) - len);
|
||||
errno = 0;
|
||||
uts_proc_notify(UTS_PROC_DOMAINNAME);
|
||||
}
|
||||
up_write(&uts_sem);
|
||||
}
|
||||
return errno;
|
||||
}
|
||||
|
||||
|
|
|
@ -1716,6 +1716,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
|
|||
mutex_lock(&bdev->bd_mutex);
|
||||
|
||||
if (attr == &dev_attr_enable) {
|
||||
if (!!value == !!q->blk_trace) {
|
||||
ret = 0;
|
||||
goto out_unlock_bdev;
|
||||
}
|
||||
if (value)
|
||||
ret = blk_trace_setup_queue(q, bdev);
|
||||
else
|
||||
|
|
|
@ -6648,7 +6648,9 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
|
|||
|
||||
if (buffer) {
|
||||
mutex_lock(&trace_types_lock);
|
||||
if (val) {
|
||||
if (!!val == tracer_tracing_is_on(tr)) {
|
||||
val = 0; /* do nothing */
|
||||
} else if (val) {
|
||||
tracer_tracing_on(tr);
|
||||
if (tr->current_trace->start)
|
||||
tr->current_trace->start(tr);
|
||||
|
|
|
@ -969,7 +969,7 @@ probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
|
|||
|
||||
list_del_rcu(&link->list);
|
||||
/* synchronize with u{,ret}probe_trace_func */
|
||||
synchronize_sched();
|
||||
synchronize_rcu();
|
||||
kfree(link);
|
||||
|
||||
if (!list_empty(&tu->tp.files))
|
||||
|
|
|
@ -602,9 +602,26 @@ static ssize_t map_write(struct file *file, const char __user *buf,
|
|||
struct uid_gid_map new_map;
|
||||
unsigned idx;
|
||||
struct uid_gid_extent *extent = NULL;
|
||||
unsigned long page = 0;
|
||||
unsigned long page;
|
||||
char *kbuf, *pos, *next_line;
|
||||
ssize_t ret = -EINVAL;
|
||||
ssize_t ret;
|
||||
|
||||
/* Only allow < page size writes at the beginning of the file */
|
||||
if ((*ppos != 0) || (count >= PAGE_SIZE))
|
||||
return -EINVAL;
|
||||
|
||||
/* Get a buffer */
|
||||
page = __get_free_page(GFP_TEMPORARY);
|
||||
kbuf = (char *) page;
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Slurp in the user data */
|
||||
if (copy_from_user(kbuf, buf, count)) {
|
||||
free_page(page);
|
||||
return -EFAULT;
|
||||
}
|
||||
kbuf[count] = '\0';
|
||||
|
||||
/*
|
||||
* The userns_state_mutex serializes all writes to any given map.
|
||||
|
@ -638,24 +655,6 @@ static ssize_t map_write(struct file *file, const char __user *buf,
|
|||
if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
|
||||
goto out;
|
||||
|
||||
/* Get a buffer */
|
||||
ret = -ENOMEM;
|
||||
page = __get_free_page(GFP_TEMPORARY);
|
||||
kbuf = (char *) page;
|
||||
if (!page)
|
||||
goto out;
|
||||
|
||||
/* Only allow < page size writes at the beginning of the file */
|
||||
ret = -EINVAL;
|
||||
if ((*ppos != 0) || (count >= PAGE_SIZE))
|
||||
goto out;
|
||||
|
||||
/* Slurp in the user data */
|
||||
ret = -EFAULT;
|
||||
if (copy_from_user(kbuf, buf, count))
|
||||
goto out;
|
||||
kbuf[count] = '\0';
|
||||
|
||||
/* Parse the user data */
|
||||
ret = -EINVAL;
|
||||
pos = kbuf;
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
#ifdef CONFIG_PROC_SYSCTL
|
||||
|
||||
static void *get_uts(struct ctl_table *table, int write)
|
||||
static void *get_uts(struct ctl_table *table)
|
||||
{
|
||||
char *which = table->data;
|
||||
struct uts_namespace *uts_ns;
|
||||
|
@ -25,21 +25,9 @@ static void *get_uts(struct ctl_table *table, int write)
|
|||
uts_ns = current->nsproxy->uts_ns;
|
||||
which = (which - (char *)&init_uts_ns) + (char *)uts_ns;
|
||||
|
||||
if (!write)
|
||||
down_read(&uts_sem);
|
||||
else
|
||||
down_write(&uts_sem);
|
||||
return which;
|
||||
}
|
||||
|
||||
static void put_uts(struct ctl_table *table, int write, void *which)
|
||||
{
|
||||
if (!write)
|
||||
up_read(&uts_sem);
|
||||
else
|
||||
up_write(&uts_sem);
|
||||
}
|
||||
|
||||
/*
|
||||
* Special case of dostring for the UTS structure. This has locks
|
||||
* to observe. Should this be in kernel/sys.c ????
|
||||
|
@ -49,13 +37,34 @@ static int proc_do_uts_string(struct ctl_table *table, int write,
|
|||
{
|
||||
struct ctl_table uts_table;
|
||||
int r;
|
||||
memcpy(&uts_table, table, sizeof(uts_table));
|
||||
uts_table.data = get_uts(table, write);
|
||||
r = proc_dostring(&uts_table, write, buffer, lenp, ppos);
|
||||
put_uts(table, write, uts_table.data);
|
||||
char tmp_data[__NEW_UTS_LEN + 1];
|
||||
|
||||
if (write)
|
||||
memcpy(&uts_table, table, sizeof(uts_table));
|
||||
uts_table.data = tmp_data;
|
||||
|
||||
/*
|
||||
* Buffer the value in tmp_data so that proc_dostring() can be called
|
||||
* without holding any locks.
|
||||
* We also need to read the original value in the write==1 case to
|
||||
* support partial writes.
|
||||
*/
|
||||
down_read(&uts_sem);
|
||||
memcpy(tmp_data, get_uts(table), sizeof(tmp_data));
|
||||
up_read(&uts_sem);
|
||||
r = proc_dostring(&uts_table, write, buffer, lenp, ppos);
|
||||
|
||||
if (write) {
|
||||
/*
|
||||
* Write back the new value.
|
||||
* Note that, since we dropped uts_sem, the result can
|
||||
* theoretically be incorrect if there are two parallel writes
|
||||
* at non-zero offsets to the same sysctl.
|
||||
*/
|
||||
down_write(&uts_sem);
|
||||
memcpy(get_uts(table), tmp_data, sizeof(tmp_data));
|
||||
up_write(&uts_sem);
|
||||
proc_sys_poll_notify(table->poll);
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -361,15 +361,6 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
|
|||
{
|
||||
struct mmu_table_batch **batch = &tlb->batch;
|
||||
|
||||
/*
|
||||
* When there's less then two users of this mm there cannot be a
|
||||
* concurrent page-table walk.
|
||||
*/
|
||||
if (atomic_read(&tlb->mm->mm_users) < 2) {
|
||||
__tlb_remove_table(table);
|
||||
return;
|
||||
}
|
||||
|
||||
if (*batch == NULL) {
|
||||
*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
|
||||
if (*batch == NULL) {
|
||||
|
|
|
@ -931,7 +931,7 @@ static int p9_client_version(struct p9_client *c)
|
|||
{
|
||||
int err = 0;
|
||||
struct p9_req_t *req;
|
||||
char *version;
|
||||
char *version = NULL;
|
||||
int msize;
|
||||
|
||||
p9_debug(P9_DEBUG_9P, ">>> TVERSION msize %d protocol %d\n",
|
||||
|
|
|
@ -185,6 +185,8 @@ static void p9_mux_poll_stop(struct p9_conn *m)
|
|||
spin_lock_irqsave(&p9_poll_lock, flags);
|
||||
list_del_init(&m->poll_pending_link);
|
||||
spin_unlock_irqrestore(&p9_poll_lock, flags);
|
||||
|
||||
flush_work(&p9_poll_work);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -933,7 +935,7 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (valid_ipaddr4(addr) < 0)
|
||||
if (addr == NULL || valid_ipaddr4(addr) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
csocket = NULL;
|
||||
|
@ -981,6 +983,9 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
|
|||
|
||||
csocket = NULL;
|
||||
|
||||
if (addr == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (strlen(addr) >= UNIX_PATH_MAX) {
|
||||
pr_err("%s (%d): address too long: %s\n",
|
||||
__func__, task_pid_nr(current), addr);
|
||||
|
|
|
@ -644,6 +644,9 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
|
|||
struct ib_qp_init_attr qp_attr;
|
||||
struct ib_cq_init_attr cq_attr = {};
|
||||
|
||||
if (addr == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
/* Parse the transport specific mount options */
|
||||
err = parse_opts(args, &opts);
|
||||
if (err < 0)
|
||||
|
|
|
@ -192,7 +192,7 @@ static int pack_sg_list(struct scatterlist *sg, int start,
|
|||
s = rest_of_page(data);
|
||||
if (s > count)
|
||||
s = count;
|
||||
BUG_ON(index > limit);
|
||||
BUG_ON(index >= limit);
|
||||
/* Make sure we don't terminate early. */
|
||||
sg_unmark_end(&sg[index]);
|
||||
sg_set_buf(&sg[index++], data, s);
|
||||
|
@ -237,6 +237,7 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit,
|
|||
s = PAGE_SIZE - data_off;
|
||||
if (s > count)
|
||||
s = count;
|
||||
BUG_ON(index >= limit);
|
||||
/* Make sure we don't terminate early. */
|
||||
sg_unmark_end(&sg[index]);
|
||||
sg_set_page(&sg[index++], pdata[i++], s, data_off);
|
||||
|
@ -409,6 +410,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
|
|||
p9_debug(P9_DEBUG_TRANS, "virtio request\n");
|
||||
|
||||
if (uodata) {
|
||||
__le32 sz;
|
||||
int n = p9_get_mapped_pages(chan, &out_pages, uodata,
|
||||
outlen, &offs, &need_drop);
|
||||
if (n < 0)
|
||||
|
@ -419,6 +421,12 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
|
|||
memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4);
|
||||
outlen = n;
|
||||
}
|
||||
/* The size field of the message must include the length of the
|
||||
* header and the length of the data. We didn't actually know
|
||||
* the length of the data until this point so add it in now.
|
||||
*/
|
||||
sz = cpu_to_le32(req->tc->size + outlen);
|
||||
memcpy(&req->tc->sdata[0], &sz, sizeof(sz));
|
||||
} else if (uidata) {
|
||||
int n = p9_get_mapped_pages(chan, &in_pages, uidata,
|
||||
inlen, &offs, &need_drop);
|
||||
|
@ -646,6 +654,9 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
|
|||
int ret = -ENOENT;
|
||||
int found = 0;
|
||||
|
||||
if (devname == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&virtio_9p_lock);
|
||||
list_for_each_entry(chan, &virtio_chan_list, chan_list) {
|
||||
if (!strncmp(devname, chan->tag, chan->tag_len) &&
|
||||
|
|
|
@ -265,9 +265,24 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
|
|||
/* We must take a copy of the skb before we modify/replace the ipv6
|
||||
* header as the header could be used elsewhere
|
||||
*/
|
||||
if (unlikely(skb_headroom(skb) < ldev->needed_headroom ||
|
||||
skb_tailroom(skb) < ldev->needed_tailroom)) {
|
||||
struct sk_buff *nskb;
|
||||
|
||||
nskb = skb_copy_expand(skb, ldev->needed_headroom,
|
||||
ldev->needed_tailroom, GFP_ATOMIC);
|
||||
if (likely(nskb)) {
|
||||
consume_skb(skb);
|
||||
skb = nskb;
|
||||
} else {
|
||||
kfree_skb(skb);
|
||||
return NET_XMIT_DROP;
|
||||
}
|
||||
} else {
|
||||
skb = skb_unshare(skb, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return NET_XMIT_DROP;
|
||||
}
|
||||
|
||||
ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset);
|
||||
if (ret < 0) {
|
||||
|
|
|
@ -72,8 +72,21 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
|
|||
int ret;
|
||||
|
||||
if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) {
|
||||
u16 crc = crc_ccitt(0, skb->data, skb->len);
|
||||
struct sk_buff *nskb;
|
||||
u16 crc;
|
||||
|
||||
if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) {
|
||||
nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN,
|
||||
GFP_ATOMIC);
|
||||
if (likely(nskb)) {
|
||||
consume_skb(skb);
|
||||
skb = nskb;
|
||||
} else {
|
||||
goto err_tx;
|
||||
}
|
||||
}
|
||||
|
||||
crc = crc_ccitt(0, skb->data, skb->len);
|
||||
put_unaligned_le16(crc, skb_put(skb, 2));
|
||||
}
|
||||
|
||||
|
|
|
@ -186,6 +186,9 @@ static int auxtrace_queues__grow(struct auxtrace_queues *queues,
|
|||
for (i = 0; i < queues->nr_queues; i++) {
|
||||
list_splice_tail(&queues->queue_array[i].head,
|
||||
&queue_array[i].head);
|
||||
queue_array[i].tid = queues->queue_array[i].tid;
|
||||
queue_array[i].cpu = queues->queue_array[i].cpu;
|
||||
queue_array[i].set = queues->queue_array[i].set;
|
||||
queue_array[i].priv = queues->queue_array[i].priv;
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue