This is the 4.4.61 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAljuA8EACgkQONu9yGCS aT5smg//fcD0laNCo+dhbbadB2utsxnDRD0diRusmvJfmRYXysW0amxbdvxRI5+t bVhGRRaSr+XIpmUYC3p7QHbJ3/ct1Ikee3aK1yyTNwyd8/EGhl++1F7nnQ7FU5nb iGV09kDvddsX9SbZqkPyB1yosXfzQbSu5G5eQX+lqHsXU9gCLdmaq73NQBygSUq8 EVQivUvLlvRz8zQGKA5hUqz71G8V1mLmc2b1s9r6e5mUuPXBM+UdxbvlLA+iOFRT WuPTU8xNlFj55CckaGGwLTXSfIYmPl8UCgSdvOTo/TPbBEE2TIaQGn/0jvuqVns7 sDs9s9c3rNWVMc0KMZPJ6b7WIuGBgiDjSFGu2hqqNvG+X33s6qCvmnq2ZqLSVxs/ iXqKr8eC1YP9Sr6okhdMbUcS8jqqD99YDvH94ulvfC3nx9WvMS/2JY7SBbdh4nyN Jb4j3BeS4C4TXRtWuPo7ks3PbRj8mvrpKdAJ74zoKZNcjXd8PvtZem2P9UzYM5K9 9PS4T0Ne5eYHbOehWMC4t95Ijl/mYSKYCygltl2Fer29gEMGCJ4dGt3evfyaFfFZ 2l43A+WSeYdzQRsuPnFN/oMr/Q4o1U1+ZC5HCe/1Qx/FyfSonw5/hagVWzR6IxyJ LsbwmxQrZrZRy3vT4gBnoEe7xdwUgenuIoeGMJfjgpLaQiC0osU= =00n+ -----END PGP SIGNATURE----- Merge 4.4.61 into android-4.4 Changes in 4.4.61: drm/vmwgfx: Type-check lookups of fence objects drm/vmwgfx: NULL pointer dereference in vmw_surface_define_ioctl() drm/vmwgfx: avoid calling vzalloc with a 0 size in vmw_get_cap_3d_ioctl() drm/ttm, drm/vmwgfx: Relax permission checking when opening surfaces drm/vmwgfx: Remove getparam error message drm/vmwgfx: fix integer overflow in vmw_surface_define_ioctl() sysfs: be careful of error returns from ops->show() staging: android: ashmem: lseek failed due to no FMODE_LSEEK. arm/arm64: KVM: Take mmap_sem in stage2_unmap_vm arm/arm64: KVM: Take mmap_sem in kvm_arch_prepare_memory_region iio: bmg160: reset chip when probing Reset TreeId to zero on SMB2 TREE_CONNECT ptrace: fix PTRACE_LISTEN race corrupting task->state ring-buffer: Fix return value check in test_ringbuffer() metag/usercopy: Drop unused macros metag/usercopy: Fix alignment error checking metag/usercopy: Add early abort to copy_to_user metag/usercopy: Zero rest of buffer from copy_from_user metag/usercopy: Set flags before ADDZ metag/usercopy: Fix src fixup in from user rapf loops metag/usercopy: Add missing fixups powerpc/mm: Add missing global TLB invalidate if cxl is active powerpc: Don't try to fix up misaligned load-with-reservation instructions nios2: reserve boot memory for device tree s390/decompressor: fix initrd corruption caused by bss clear s390/uaccess: get_user() should zero on failure (again) MIPS: Force o32 fp64 support on 32bit MIPS64r6 kernels MIPS: ralink: Fix typos in rt3883 pinctrl MIPS: End spinlocks with .insn MIPS: Lantiq: fix missing xbar kernel panic MIPS: Flush wrong invalid FTLB entry for huge page mm/mempolicy.c: fix error handling in set_mempolicy and mbind. Linux 4.4.61 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
e3b87b234b
27 changed files with 348 additions and 323 deletions
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 4
|
PATCHLEVEL = 4
|
||||||
SUBLEVEL = 60
|
SUBLEVEL = 61
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = Blurry Fish Butt
|
NAME = Blurry Fish Butt
|
||||||
|
|
||||||
|
|
|
@ -796,6 +796,7 @@ void stage2_unmap_vm(struct kvm *kvm)
|
||||||
int idx;
|
int idx;
|
||||||
|
|
||||||
idx = srcu_read_lock(&kvm->srcu);
|
idx = srcu_read_lock(&kvm->srcu);
|
||||||
|
down_read(¤t->mm->mmap_sem);
|
||||||
spin_lock(&kvm->mmu_lock);
|
spin_lock(&kvm->mmu_lock);
|
||||||
|
|
||||||
slots = kvm_memslots(kvm);
|
slots = kvm_memslots(kvm);
|
||||||
|
@ -803,6 +804,7 @@ void stage2_unmap_vm(struct kvm *kvm)
|
||||||
stage2_unmap_memslot(kvm, memslot);
|
stage2_unmap_memslot(kvm, memslot);
|
||||||
|
|
||||||
spin_unlock(&kvm->mmu_lock);
|
spin_unlock(&kvm->mmu_lock);
|
||||||
|
up_read(¤t->mm->mmap_sem);
|
||||||
srcu_read_unlock(&kvm->srcu, idx);
|
srcu_read_unlock(&kvm->srcu, idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1759,6 +1761,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||||
(KVM_PHYS_SIZE >> PAGE_SHIFT))
|
(KVM_PHYS_SIZE >> PAGE_SHIFT))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
|
down_read(¤t->mm->mmap_sem);
|
||||||
/*
|
/*
|
||||||
* A memory region could potentially cover multiple VMAs, and any holes
|
* A memory region could potentially cover multiple VMAs, and any holes
|
||||||
* between them, so iterate over all of them to find out if we can map
|
* between them, so iterate over all of them to find out if we can map
|
||||||
|
@ -1802,8 +1805,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||||
pa += vm_start - vma->vm_start;
|
pa += vm_start - vma->vm_start;
|
||||||
|
|
||||||
/* IO region dirty page logging not allowed */
|
/* IO region dirty page logging not allowed */
|
||||||
if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
|
if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
|
||||||
return -EINVAL;
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
|
ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
|
||||||
vm_end - vm_start,
|
vm_end - vm_start,
|
||||||
|
@ -1815,7 +1820,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||||
} while (hva < reg_end);
|
} while (hva < reg_end);
|
||||||
|
|
||||||
if (change == KVM_MR_FLAGS_ONLY)
|
if (change == KVM_MR_FLAGS_ONLY)
|
||||||
return ret;
|
goto out;
|
||||||
|
|
||||||
spin_lock(&kvm->mmu_lock);
|
spin_lock(&kvm->mmu_lock);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -1823,6 +1828,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||||
else
|
else
|
||||||
stage2_flush_memslot(kvm, memslot);
|
stage2_flush_memslot(kvm, memslot);
|
||||||
spin_unlock(&kvm->mmu_lock);
|
spin_unlock(&kvm->mmu_lock);
|
||||||
|
out:
|
||||||
|
up_read(¤t->mm->mmap_sem);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -197,20 +197,21 @@ extern long __must_check strnlen_user(const char __user *src, long count);
|
||||||
|
|
||||||
#define strlen_user(str) strnlen_user(str, 32767)
|
#define strlen_user(str) strnlen_user(str, 32767)
|
||||||
|
|
||||||
extern unsigned long __must_check __copy_user_zeroing(void *to,
|
extern unsigned long raw_copy_from_user(void *to, const void __user *from,
|
||||||
const void __user *from,
|
unsigned long n);
|
||||||
unsigned long n);
|
|
||||||
|
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
copy_from_user(void *to, const void __user *from, unsigned long n)
|
copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||||
{
|
{
|
||||||
|
unsigned long res = n;
|
||||||
if (likely(access_ok(VERIFY_READ, from, n)))
|
if (likely(access_ok(VERIFY_READ, from, n)))
|
||||||
return __copy_user_zeroing(to, from, n);
|
res = raw_copy_from_user(to, from, n);
|
||||||
memset(to, 0, n);
|
if (unlikely(res))
|
||||||
return n;
|
memset(to + (n - res), 0, res);
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n)
|
#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
|
||||||
#define __copy_from_user_inatomic __copy_from_user
|
#define __copy_from_user_inatomic __copy_from_user
|
||||||
|
|
||||||
extern unsigned long __must_check __copy_user(void __user *to,
|
extern unsigned long __must_check __copy_user(void __user *to,
|
||||||
|
|
|
@ -29,7 +29,6 @@
|
||||||
COPY \
|
COPY \
|
||||||
"1:\n" \
|
"1:\n" \
|
||||||
" .section .fixup,\"ax\"\n" \
|
" .section .fixup,\"ax\"\n" \
|
||||||
" MOV D1Ar1,#0\n" \
|
|
||||||
FIXUP \
|
FIXUP \
|
||||||
" MOVT D1Ar1,#HI(1b)\n" \
|
" MOVT D1Ar1,#HI(1b)\n" \
|
||||||
" JUMP D1Ar1,#LO(1b)\n" \
|
" JUMP D1Ar1,#LO(1b)\n" \
|
||||||
|
@ -260,27 +259,31 @@
|
||||||
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||||
"22:\n" \
|
"22:\n" \
|
||||||
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||||
"SUB %3, %3, #32\n" \
|
|
||||||
"23:\n" \
|
"23:\n" \
|
||||||
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
"SUB %3, %3, #32\n" \
|
||||||
"24:\n" \
|
"24:\n" \
|
||||||
|
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||||
|
"25:\n" \
|
||||||
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||||
|
"26:\n" \
|
||||||
"SUB %3, %3, #32\n" \
|
"SUB %3, %3, #32\n" \
|
||||||
"DCACHE [%1+#-64], D0Ar6\n" \
|
"DCACHE [%1+#-64], D0Ar6\n" \
|
||||||
"BR $Lloop"id"\n" \
|
"BR $Lloop"id"\n" \
|
||||||
\
|
\
|
||||||
"MOV RAPF, %1\n" \
|
"MOV RAPF, %1\n" \
|
||||||
"25:\n" \
|
|
||||||
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
|
||||||
"26:\n" \
|
|
||||||
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
|
||||||
"SUB %3, %3, #32\n" \
|
|
||||||
"27:\n" \
|
"27:\n" \
|
||||||
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||||
"28:\n" \
|
"28:\n" \
|
||||||
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||||
"SUB %0, %0, #8\n" \
|
|
||||||
"29:\n" \
|
"29:\n" \
|
||||||
|
"SUB %3, %3, #32\n" \
|
||||||
|
"30:\n" \
|
||||||
|
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||||
|
"31:\n" \
|
||||||
|
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||||
|
"32:\n" \
|
||||||
|
"SUB %0, %0, #8\n" \
|
||||||
|
"33:\n" \
|
||||||
"SETL [%0++], D0.7, D1.7\n" \
|
"SETL [%0++], D0.7, D1.7\n" \
|
||||||
"SUB %3, %3, #32\n" \
|
"SUB %3, %3, #32\n" \
|
||||||
"1:" \
|
"1:" \
|
||||||
|
@ -312,11 +315,15 @@
|
||||||
" .long 26b,3b\n" \
|
" .long 26b,3b\n" \
|
||||||
" .long 27b,3b\n" \
|
" .long 27b,3b\n" \
|
||||||
" .long 28b,3b\n" \
|
" .long 28b,3b\n" \
|
||||||
" .long 29b,4b\n" \
|
" .long 29b,3b\n" \
|
||||||
|
" .long 30b,3b\n" \
|
||||||
|
" .long 31b,3b\n" \
|
||||||
|
" .long 32b,3b\n" \
|
||||||
|
" .long 33b,4b\n" \
|
||||||
" .previous\n" \
|
" .previous\n" \
|
||||||
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
|
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
|
||||||
: "0" (to), "1" (from), "2" (ret), "3" (n) \
|
: "0" (to), "1" (from), "2" (ret), "3" (n) \
|
||||||
: "D1Ar1", "D0Ar2", "memory")
|
: "D1Ar1", "D0Ar2", "cc", "memory")
|
||||||
|
|
||||||
/* rewind 'to' and 'from' pointers when a fault occurs
|
/* rewind 'to' and 'from' pointers when a fault occurs
|
||||||
*
|
*
|
||||||
|
@ -342,7 +349,7 @@
|
||||||
#define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
|
#define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
|
||||||
__asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
|
__asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
|
||||||
"LSR D0Ar2, D0Ar2, #8\n" \
|
"LSR D0Ar2, D0Ar2, #8\n" \
|
||||||
"AND D0Ar2, D0Ar2, #0x7\n" \
|
"ANDS D0Ar2, D0Ar2, #0x7\n" \
|
||||||
"ADDZ D0Ar2, D0Ar2, #4\n" \
|
"ADDZ D0Ar2, D0Ar2, #4\n" \
|
||||||
"SUB D0Ar2, D0Ar2, #1\n" \
|
"SUB D0Ar2, D0Ar2, #1\n" \
|
||||||
"MOV D1Ar1, #4\n" \
|
"MOV D1Ar1, #4\n" \
|
||||||
|
@ -403,47 +410,55 @@
|
||||||
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||||
"22:\n" \
|
"22:\n" \
|
||||||
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||||
"SUB %3, %3, #16\n" \
|
|
||||||
"23:\n" \
|
"23:\n" \
|
||||||
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
|
||||||
"24:\n" \
|
|
||||||
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
|
||||||
"SUB %3, %3, #16\n" \
|
"SUB %3, %3, #16\n" \
|
||||||
"25:\n" \
|
"24:\n" \
|
||||||
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||||
"26:\n" \
|
"25:\n" \
|
||||||
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||||
|
"26:\n" \
|
||||||
"SUB %3, %3, #16\n" \
|
"SUB %3, %3, #16\n" \
|
||||||
"27:\n" \
|
"27:\n" \
|
||||||
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||||
"28:\n" \
|
"28:\n" \
|
||||||
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||||
|
"29:\n" \
|
||||||
|
"SUB %3, %3, #16\n" \
|
||||||
|
"30:\n" \
|
||||||
|
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||||
|
"31:\n" \
|
||||||
|
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||||
|
"32:\n" \
|
||||||
"SUB %3, %3, #16\n" \
|
"SUB %3, %3, #16\n" \
|
||||||
"DCACHE [%1+#-64], D0Ar6\n" \
|
"DCACHE [%1+#-64], D0Ar6\n" \
|
||||||
"BR $Lloop"id"\n" \
|
"BR $Lloop"id"\n" \
|
||||||
\
|
\
|
||||||
"MOV RAPF, %1\n" \
|
"MOV RAPF, %1\n" \
|
||||||
"29:\n" \
|
|
||||||
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
|
||||||
"30:\n" \
|
|
||||||
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
|
||||||
"SUB %3, %3, #16\n" \
|
|
||||||
"31:\n" \
|
|
||||||
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
|
||||||
"32:\n" \
|
|
||||||
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
|
||||||
"SUB %3, %3, #16\n" \
|
|
||||||
"33:\n" \
|
"33:\n" \
|
||||||
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||||
"34:\n" \
|
"34:\n" \
|
||||||
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||||
"SUB %3, %3, #16\n" \
|
|
||||||
"35:\n" \
|
"35:\n" \
|
||||||
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
"SUB %3, %3, #16\n" \
|
||||||
"36:\n" \
|
"36:\n" \
|
||||||
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||||
"SUB %0, %0, #4\n" \
|
|
||||||
"37:\n" \
|
"37:\n" \
|
||||||
|
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||||
|
"38:\n" \
|
||||||
|
"SUB %3, %3, #16\n" \
|
||||||
|
"39:\n" \
|
||||||
|
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||||
|
"40:\n" \
|
||||||
|
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||||
|
"41:\n" \
|
||||||
|
"SUB %3, %3, #16\n" \
|
||||||
|
"42:\n" \
|
||||||
|
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||||
|
"43:\n" \
|
||||||
|
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||||
|
"44:\n" \
|
||||||
|
"SUB %0, %0, #4\n" \
|
||||||
|
"45:\n" \
|
||||||
"SETD [%0++], D0.7\n" \
|
"SETD [%0++], D0.7\n" \
|
||||||
"SUB %3, %3, #16\n" \
|
"SUB %3, %3, #16\n" \
|
||||||
"1:" \
|
"1:" \
|
||||||
|
@ -483,11 +498,19 @@
|
||||||
" .long 34b,3b\n" \
|
" .long 34b,3b\n" \
|
||||||
" .long 35b,3b\n" \
|
" .long 35b,3b\n" \
|
||||||
" .long 36b,3b\n" \
|
" .long 36b,3b\n" \
|
||||||
" .long 37b,4b\n" \
|
" .long 37b,3b\n" \
|
||||||
|
" .long 38b,3b\n" \
|
||||||
|
" .long 39b,3b\n" \
|
||||||
|
" .long 40b,3b\n" \
|
||||||
|
" .long 41b,3b\n" \
|
||||||
|
" .long 42b,3b\n" \
|
||||||
|
" .long 43b,3b\n" \
|
||||||
|
" .long 44b,3b\n" \
|
||||||
|
" .long 45b,4b\n" \
|
||||||
" .previous\n" \
|
" .previous\n" \
|
||||||
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
|
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
|
||||||
: "0" (to), "1" (from), "2" (ret), "3" (n) \
|
: "0" (to), "1" (from), "2" (ret), "3" (n) \
|
||||||
: "D1Ar1", "D0Ar2", "memory")
|
: "D1Ar1", "D0Ar2", "cc", "memory")
|
||||||
|
|
||||||
/* rewind 'to' and 'from' pointers when a fault occurs
|
/* rewind 'to' and 'from' pointers when a fault occurs
|
||||||
*
|
*
|
||||||
|
@ -513,7 +536,7 @@
|
||||||
#define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
|
#define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
|
||||||
__asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
|
__asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
|
||||||
"LSR D0Ar2, D0Ar2, #8\n" \
|
"LSR D0Ar2, D0Ar2, #8\n" \
|
||||||
"AND D0Ar2, D0Ar2, #0x7\n" \
|
"ANDS D0Ar2, D0Ar2, #0x7\n" \
|
||||||
"ADDZ D0Ar2, D0Ar2, #4\n" \
|
"ADDZ D0Ar2, D0Ar2, #4\n" \
|
||||||
"SUB D0Ar2, D0Ar2, #1\n" \
|
"SUB D0Ar2, D0Ar2, #1\n" \
|
||||||
"MOV D1Ar1, #4\n" \
|
"MOV D1Ar1, #4\n" \
|
||||||
|
@ -538,23 +561,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
|
||||||
if ((unsigned long) src & 1) {
|
if ((unsigned long) src & 1) {
|
||||||
__asm_copy_to_user_1(dst, src, retn);
|
__asm_copy_to_user_1(dst, src, retn);
|
||||||
n--;
|
n--;
|
||||||
|
if (retn)
|
||||||
|
return retn + n;
|
||||||
}
|
}
|
||||||
if ((unsigned long) dst & 1) {
|
if ((unsigned long) dst & 1) {
|
||||||
/* Worst case - byte copy */
|
/* Worst case - byte copy */
|
||||||
while (n > 0) {
|
while (n > 0) {
|
||||||
__asm_copy_to_user_1(dst, src, retn);
|
__asm_copy_to_user_1(dst, src, retn);
|
||||||
n--;
|
n--;
|
||||||
|
if (retn)
|
||||||
|
return retn + n;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (((unsigned long) src & 2) && n >= 2) {
|
if (((unsigned long) src & 2) && n >= 2) {
|
||||||
__asm_copy_to_user_2(dst, src, retn);
|
__asm_copy_to_user_2(dst, src, retn);
|
||||||
n -= 2;
|
n -= 2;
|
||||||
|
if (retn)
|
||||||
|
return retn + n;
|
||||||
}
|
}
|
||||||
if ((unsigned long) dst & 2) {
|
if ((unsigned long) dst & 2) {
|
||||||
/* Second worst case - word copy */
|
/* Second worst case - word copy */
|
||||||
while (n >= 2) {
|
while (n >= 2) {
|
||||||
__asm_copy_to_user_2(dst, src, retn);
|
__asm_copy_to_user_2(dst, src, retn);
|
||||||
n -= 2;
|
n -= 2;
|
||||||
|
if (retn)
|
||||||
|
return retn + n;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -569,6 +600,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
|
||||||
while (n >= 8) {
|
while (n >= 8) {
|
||||||
__asm_copy_to_user_8x64(dst, src, retn);
|
__asm_copy_to_user_8x64(dst, src, retn);
|
||||||
n -= 8;
|
n -= 8;
|
||||||
|
if (retn)
|
||||||
|
return retn + n;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (n >= RAPF_MIN_BUF_SIZE) {
|
if (n >= RAPF_MIN_BUF_SIZE) {
|
||||||
|
@ -581,6 +614,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
|
||||||
while (n >= 8) {
|
while (n >= 8) {
|
||||||
__asm_copy_to_user_8x64(dst, src, retn);
|
__asm_copy_to_user_8x64(dst, src, retn);
|
||||||
n -= 8;
|
n -= 8;
|
||||||
|
if (retn)
|
||||||
|
return retn + n;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -588,11 +623,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
|
||||||
while (n >= 16) {
|
while (n >= 16) {
|
||||||
__asm_copy_to_user_16(dst, src, retn);
|
__asm_copy_to_user_16(dst, src, retn);
|
||||||
n -= 16;
|
n -= 16;
|
||||||
|
if (retn)
|
||||||
|
return retn + n;
|
||||||
}
|
}
|
||||||
|
|
||||||
while (n >= 4) {
|
while (n >= 4) {
|
||||||
__asm_copy_to_user_4(dst, src, retn);
|
__asm_copy_to_user_4(dst, src, retn);
|
||||||
n -= 4;
|
n -= 4;
|
||||||
|
if (retn)
|
||||||
|
return retn + n;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (n) {
|
switch (n) {
|
||||||
|
@ -609,6 +648,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we get here, retn correctly reflects the number of failing
|
||||||
|
* bytes.
|
||||||
|
*/
|
||||||
return retn;
|
return retn;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__copy_user);
|
EXPORT_SYMBOL(__copy_user);
|
||||||
|
@ -617,16 +660,14 @@ EXPORT_SYMBOL(__copy_user);
|
||||||
__asm_copy_user_cont(to, from, ret, \
|
__asm_copy_user_cont(to, from, ret, \
|
||||||
" GETB D1Ar1,[%1++]\n" \
|
" GETB D1Ar1,[%1++]\n" \
|
||||||
"2: SETB [%0++],D1Ar1\n", \
|
"2: SETB [%0++],D1Ar1\n", \
|
||||||
"3: ADD %2,%2,#1\n" \
|
"3: ADD %2,%2,#1\n", \
|
||||||
" SETB [%0++],D1Ar1\n", \
|
|
||||||
" .long 2b,3b\n")
|
" .long 2b,3b\n")
|
||||||
|
|
||||||
#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
|
#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
|
||||||
__asm_copy_user_cont(to, from, ret, \
|
__asm_copy_user_cont(to, from, ret, \
|
||||||
" GETW D1Ar1,[%1++]\n" \
|
" GETW D1Ar1,[%1++]\n" \
|
||||||
"2: SETW [%0++],D1Ar1\n" COPY, \
|
"2: SETW [%0++],D1Ar1\n" COPY, \
|
||||||
"3: ADD %2,%2,#2\n" \
|
"3: ADD %2,%2,#2\n" FIXUP, \
|
||||||
" SETW [%0++],D1Ar1\n" FIXUP, \
|
|
||||||
" .long 2b,3b\n" TENTRY)
|
" .long 2b,3b\n" TENTRY)
|
||||||
|
|
||||||
#define __asm_copy_from_user_2(to, from, ret) \
|
#define __asm_copy_from_user_2(to, from, ret) \
|
||||||
|
@ -636,145 +677,26 @@ EXPORT_SYMBOL(__copy_user);
|
||||||
__asm_copy_from_user_2x_cont(to, from, ret, \
|
__asm_copy_from_user_2x_cont(to, from, ret, \
|
||||||
" GETB D1Ar1,[%1++]\n" \
|
" GETB D1Ar1,[%1++]\n" \
|
||||||
"4: SETB [%0++],D1Ar1\n", \
|
"4: SETB [%0++],D1Ar1\n", \
|
||||||
"5: ADD %2,%2,#1\n" \
|
"5: ADD %2,%2,#1\n", \
|
||||||
" SETB [%0++],D1Ar1\n", \
|
|
||||||
" .long 4b,5b\n")
|
" .long 4b,5b\n")
|
||||||
|
|
||||||
#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
|
#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
|
||||||
__asm_copy_user_cont(to, from, ret, \
|
__asm_copy_user_cont(to, from, ret, \
|
||||||
" GETD D1Ar1,[%1++]\n" \
|
" GETD D1Ar1,[%1++]\n" \
|
||||||
"2: SETD [%0++],D1Ar1\n" COPY, \
|
"2: SETD [%0++],D1Ar1\n" COPY, \
|
||||||
"3: ADD %2,%2,#4\n" \
|
"3: ADD %2,%2,#4\n" FIXUP, \
|
||||||
" SETD [%0++],D1Ar1\n" FIXUP, \
|
|
||||||
" .long 2b,3b\n" TENTRY)
|
" .long 2b,3b\n" TENTRY)
|
||||||
|
|
||||||
#define __asm_copy_from_user_4(to, from, ret) \
|
#define __asm_copy_from_user_4(to, from, ret) \
|
||||||
__asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
|
__asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
|
||||||
|
|
||||||
#define __asm_copy_from_user_5(to, from, ret) \
|
|
||||||
__asm_copy_from_user_4x_cont(to, from, ret, \
|
|
||||||
" GETB D1Ar1,[%1++]\n" \
|
|
||||||
"4: SETB [%0++],D1Ar1\n", \
|
|
||||||
"5: ADD %2,%2,#1\n" \
|
|
||||||
" SETB [%0++],D1Ar1\n", \
|
|
||||||
" .long 4b,5b\n")
|
|
||||||
|
|
||||||
#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
|
|
||||||
__asm_copy_from_user_4x_cont(to, from, ret, \
|
|
||||||
" GETW D1Ar1,[%1++]\n" \
|
|
||||||
"4: SETW [%0++],D1Ar1\n" COPY, \
|
|
||||||
"5: ADD %2,%2,#2\n" \
|
|
||||||
" SETW [%0++],D1Ar1\n" FIXUP, \
|
|
||||||
" .long 4b,5b\n" TENTRY)
|
|
||||||
|
|
||||||
#define __asm_copy_from_user_6(to, from, ret) \
|
|
||||||
__asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
|
|
||||||
|
|
||||||
#define __asm_copy_from_user_7(to, from, ret) \
|
|
||||||
__asm_copy_from_user_6x_cont(to, from, ret, \
|
|
||||||
" GETB D1Ar1,[%1++]\n" \
|
|
||||||
"6: SETB [%0++],D1Ar1\n", \
|
|
||||||
"7: ADD %2,%2,#1\n" \
|
|
||||||
" SETB [%0++],D1Ar1\n", \
|
|
||||||
" .long 6b,7b\n")
|
|
||||||
|
|
||||||
#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
|
|
||||||
__asm_copy_from_user_4x_cont(to, from, ret, \
|
|
||||||
" GETD D1Ar1,[%1++]\n" \
|
|
||||||
"4: SETD [%0++],D1Ar1\n" COPY, \
|
|
||||||
"5: ADD %2,%2,#4\n" \
|
|
||||||
" SETD [%0++],D1Ar1\n" FIXUP, \
|
|
||||||
" .long 4b,5b\n" TENTRY)
|
|
||||||
|
|
||||||
#define __asm_copy_from_user_8(to, from, ret) \
|
|
||||||
__asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
|
|
||||||
|
|
||||||
#define __asm_copy_from_user_9(to, from, ret) \
|
|
||||||
__asm_copy_from_user_8x_cont(to, from, ret, \
|
|
||||||
" GETB D1Ar1,[%1++]\n" \
|
|
||||||
"6: SETB [%0++],D1Ar1\n", \
|
|
||||||
"7: ADD %2,%2,#1\n" \
|
|
||||||
" SETB [%0++],D1Ar1\n", \
|
|
||||||
" .long 6b,7b\n")
|
|
||||||
|
|
||||||
#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
|
|
||||||
__asm_copy_from_user_8x_cont(to, from, ret, \
|
|
||||||
" GETW D1Ar1,[%1++]\n" \
|
|
||||||
"6: SETW [%0++],D1Ar1\n" COPY, \
|
|
||||||
"7: ADD %2,%2,#2\n" \
|
|
||||||
" SETW [%0++],D1Ar1\n" FIXUP, \
|
|
||||||
" .long 6b,7b\n" TENTRY)
|
|
||||||
|
|
||||||
#define __asm_copy_from_user_10(to, from, ret) \
|
|
||||||
__asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
|
|
||||||
|
|
||||||
#define __asm_copy_from_user_11(to, from, ret) \
|
|
||||||
__asm_copy_from_user_10x_cont(to, from, ret, \
|
|
||||||
" GETB D1Ar1,[%1++]\n" \
|
|
||||||
"8: SETB [%0++],D1Ar1\n", \
|
|
||||||
"9: ADD %2,%2,#1\n" \
|
|
||||||
" SETB [%0++],D1Ar1\n", \
|
|
||||||
" .long 8b,9b\n")
|
|
||||||
|
|
||||||
#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
|
|
||||||
__asm_copy_from_user_8x_cont(to, from, ret, \
|
|
||||||
" GETD D1Ar1,[%1++]\n" \
|
|
||||||
"6: SETD [%0++],D1Ar1\n" COPY, \
|
|
||||||
"7: ADD %2,%2,#4\n" \
|
|
||||||
" SETD [%0++],D1Ar1\n" FIXUP, \
|
|
||||||
" .long 6b,7b\n" TENTRY)
|
|
||||||
|
|
||||||
#define __asm_copy_from_user_12(to, from, ret) \
|
|
||||||
__asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
|
|
||||||
|
|
||||||
#define __asm_copy_from_user_13(to, from, ret) \
|
|
||||||
__asm_copy_from_user_12x_cont(to, from, ret, \
|
|
||||||
" GETB D1Ar1,[%1++]\n" \
|
|
||||||
"8: SETB [%0++],D1Ar1\n", \
|
|
||||||
"9: ADD %2,%2,#1\n" \
|
|
||||||
" SETB [%0++],D1Ar1\n", \
|
|
||||||
" .long 8b,9b\n")
|
|
||||||
|
|
||||||
#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
|
|
||||||
__asm_copy_from_user_12x_cont(to, from, ret, \
|
|
||||||
" GETW D1Ar1,[%1++]\n" \
|
|
||||||
"8: SETW [%0++],D1Ar1\n" COPY, \
|
|
||||||
"9: ADD %2,%2,#2\n" \
|
|
||||||
" SETW [%0++],D1Ar1\n" FIXUP, \
|
|
||||||
" .long 8b,9b\n" TENTRY)
|
|
||||||
|
|
||||||
#define __asm_copy_from_user_14(to, from, ret) \
|
|
||||||
__asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
|
|
||||||
|
|
||||||
#define __asm_copy_from_user_15(to, from, ret) \
|
|
||||||
__asm_copy_from_user_14x_cont(to, from, ret, \
|
|
||||||
" GETB D1Ar1,[%1++]\n" \
|
|
||||||
"10: SETB [%0++],D1Ar1\n", \
|
|
||||||
"11: ADD %2,%2,#1\n" \
|
|
||||||
" SETB [%0++],D1Ar1\n", \
|
|
||||||
" .long 10b,11b\n")
|
|
||||||
|
|
||||||
#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
|
|
||||||
__asm_copy_from_user_12x_cont(to, from, ret, \
|
|
||||||
" GETD D1Ar1,[%1++]\n" \
|
|
||||||
"8: SETD [%0++],D1Ar1\n" COPY, \
|
|
||||||
"9: ADD %2,%2,#4\n" \
|
|
||||||
" SETD [%0++],D1Ar1\n" FIXUP, \
|
|
||||||
" .long 8b,9b\n" TENTRY)
|
|
||||||
|
|
||||||
#define __asm_copy_from_user_16(to, from, ret) \
|
|
||||||
__asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
|
|
||||||
|
|
||||||
#define __asm_copy_from_user_8x64(to, from, ret) \
|
#define __asm_copy_from_user_8x64(to, from, ret) \
|
||||||
asm volatile ( \
|
asm volatile ( \
|
||||||
" GETL D0Ar2,D1Ar1,[%1++]\n" \
|
" GETL D0Ar2,D1Ar1,[%1++]\n" \
|
||||||
"2: SETL [%0++],D0Ar2,D1Ar1\n" \
|
"2: SETL [%0++],D0Ar2,D1Ar1\n" \
|
||||||
"1:\n" \
|
"1:\n" \
|
||||||
" .section .fixup,\"ax\"\n" \
|
" .section .fixup,\"ax\"\n" \
|
||||||
" MOV D1Ar1,#0\n" \
|
|
||||||
" MOV D0Ar2,#0\n" \
|
|
||||||
"3: ADD %2,%2,#8\n" \
|
"3: ADD %2,%2,#8\n" \
|
||||||
" SETL [%0++],D0Ar2,D1Ar1\n" \
|
|
||||||
" MOVT D0Ar2,#HI(1b)\n" \
|
" MOVT D0Ar2,#HI(1b)\n" \
|
||||||
" JUMP D0Ar2,#LO(1b)\n" \
|
" JUMP D0Ar2,#LO(1b)\n" \
|
||||||
" .previous\n" \
|
" .previous\n" \
|
||||||
|
@ -789,36 +711,57 @@ EXPORT_SYMBOL(__copy_user);
|
||||||
*
|
*
|
||||||
* Rationale:
|
* Rationale:
|
||||||
* A fault occurs while reading from user buffer, which is the
|
* A fault occurs while reading from user buffer, which is the
|
||||||
* source. Since the fault is at a single address, we only
|
* source.
|
||||||
* need to rewind by 8 bytes.
|
|
||||||
* Since we don't write to kernel buffer until we read first,
|
* Since we don't write to kernel buffer until we read first,
|
||||||
* the kernel buffer is at the right state and needn't be
|
* the kernel buffer is at the right state and needn't be
|
||||||
* corrected.
|
* corrected, but the source must be rewound to the beginning of
|
||||||
|
* the block, which is LSM_STEP*8 bytes.
|
||||||
|
* LSM_STEP is bits 10:8 in TXSTATUS which is already read
|
||||||
|
* and stored in D0Ar2
|
||||||
|
*
|
||||||
|
* NOTE: If a fault occurs at the last operation in M{G,S}ETL
|
||||||
|
* LSM_STEP will be 0. ie: we do 4 writes in our case, if
|
||||||
|
* a fault happens at the 4th write, LSM_STEP will be 0
|
||||||
|
* instead of 4. The code copes with that.
|
||||||
*/
|
*/
|
||||||
#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
|
#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
|
||||||
__asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
|
__asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
|
||||||
"SUB %1, %1, #8\n")
|
"LSR D0Ar2, D0Ar2, #5\n" \
|
||||||
|
"ANDS D0Ar2, D0Ar2, #0x38\n" \
|
||||||
|
"ADDZ D0Ar2, D0Ar2, #32\n" \
|
||||||
|
"SUB %1, %1, D0Ar2\n")
|
||||||
|
|
||||||
/* rewind 'from' pointer when a fault occurs
|
/* rewind 'from' pointer when a fault occurs
|
||||||
*
|
*
|
||||||
* Rationale:
|
* Rationale:
|
||||||
* A fault occurs while reading from user buffer, which is the
|
* A fault occurs while reading from user buffer, which is the
|
||||||
* source. Since the fault is at a single address, we only
|
* source.
|
||||||
* need to rewind by 4 bytes.
|
|
||||||
* Since we don't write to kernel buffer until we read first,
|
* Since we don't write to kernel buffer until we read first,
|
||||||
* the kernel buffer is at the right state and needn't be
|
* the kernel buffer is at the right state and needn't be
|
||||||
* corrected.
|
* corrected, but the source must be rewound to the beginning of
|
||||||
|
* the block, which is LSM_STEP*4 bytes.
|
||||||
|
* LSM_STEP is bits 10:8 in TXSTATUS which is already read
|
||||||
|
* and stored in D0Ar2
|
||||||
|
*
|
||||||
|
* NOTE: If a fault occurs at the last operation in M{G,S}ETL
|
||||||
|
* LSM_STEP will be 0. ie: we do 4 writes in our case, if
|
||||||
|
* a fault happens at the 4th write, LSM_STEP will be 0
|
||||||
|
* instead of 4. The code copes with that.
|
||||||
*/
|
*/
|
||||||
#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
|
#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
|
||||||
__asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
|
__asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
|
||||||
"SUB %1, %1, #4\n")
|
"LSR D0Ar2, D0Ar2, #6\n" \
|
||||||
|
"ANDS D0Ar2, D0Ar2, #0x1c\n" \
|
||||||
|
"ADDZ D0Ar2, D0Ar2, #16\n" \
|
||||||
|
"SUB %1, %1, D0Ar2\n")
|
||||||
|
|
||||||
|
|
||||||
/* Copy from user to kernel, zeroing the bytes that were inaccessible in
|
/*
|
||||||
userland. The return-value is the number of bytes that were
|
* Copy from user to kernel. The return-value is the number of bytes that were
|
||||||
inaccessible. */
|
* inaccessible.
|
||||||
unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
|
*/
|
||||||
unsigned long n)
|
unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
|
||||||
|
unsigned long n)
|
||||||
{
|
{
|
||||||
register char *dst asm ("A0.2") = pdst;
|
register char *dst asm ("A0.2") = pdst;
|
||||||
register const char __user *src asm ("A1.2") = psrc;
|
register const char __user *src asm ("A1.2") = psrc;
|
||||||
|
@ -830,6 +773,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
|
||||||
if ((unsigned long) src & 1) {
|
if ((unsigned long) src & 1) {
|
||||||
__asm_copy_from_user_1(dst, src, retn);
|
__asm_copy_from_user_1(dst, src, retn);
|
||||||
n--;
|
n--;
|
||||||
|
if (retn)
|
||||||
|
return retn + n;
|
||||||
}
|
}
|
||||||
if ((unsigned long) dst & 1) {
|
if ((unsigned long) dst & 1) {
|
||||||
/* Worst case - byte copy */
|
/* Worst case - byte copy */
|
||||||
|
@ -837,12 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
|
||||||
__asm_copy_from_user_1(dst, src, retn);
|
__asm_copy_from_user_1(dst, src, retn);
|
||||||
n--;
|
n--;
|
||||||
if (retn)
|
if (retn)
|
||||||
goto copy_exception_bytes;
|
return retn + n;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (((unsigned long) src & 2) && n >= 2) {
|
if (((unsigned long) src & 2) && n >= 2) {
|
||||||
__asm_copy_from_user_2(dst, src, retn);
|
__asm_copy_from_user_2(dst, src, retn);
|
||||||
n -= 2;
|
n -= 2;
|
||||||
|
if (retn)
|
||||||
|
return retn + n;
|
||||||
}
|
}
|
||||||
if ((unsigned long) dst & 2) {
|
if ((unsigned long) dst & 2) {
|
||||||
/* Second worst case - word copy */
|
/* Second worst case - word copy */
|
||||||
|
@ -850,16 +797,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
|
||||||
__asm_copy_from_user_2(dst, src, retn);
|
__asm_copy_from_user_2(dst, src, retn);
|
||||||
n -= 2;
|
n -= 2;
|
||||||
if (retn)
|
if (retn)
|
||||||
goto copy_exception_bytes;
|
return retn + n;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We only need one check after the unalignment-adjustments,
|
|
||||||
because if both adjustments were done, either both or
|
|
||||||
neither reference had an exception. */
|
|
||||||
if (retn != 0)
|
|
||||||
goto copy_exception_bytes;
|
|
||||||
|
|
||||||
#ifdef USE_RAPF
|
#ifdef USE_RAPF
|
||||||
/* 64 bit copy loop */
|
/* 64 bit copy loop */
|
||||||
if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
|
if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
|
||||||
|
@ -872,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
|
||||||
__asm_copy_from_user_8x64(dst, src, retn);
|
__asm_copy_from_user_8x64(dst, src, retn);
|
||||||
n -= 8;
|
n -= 8;
|
||||||
if (retn)
|
if (retn)
|
||||||
goto copy_exception_bytes;
|
return retn + n;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -888,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
|
||||||
__asm_copy_from_user_8x64(dst, src, retn);
|
__asm_copy_from_user_8x64(dst, src, retn);
|
||||||
n -= 8;
|
n -= 8;
|
||||||
if (retn)
|
if (retn)
|
||||||
goto copy_exception_bytes;
|
return retn + n;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -898,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
|
||||||
n -= 4;
|
n -= 4;
|
||||||
|
|
||||||
if (retn)
|
if (retn)
|
||||||
goto copy_exception_bytes;
|
return retn + n;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If we get here, there were no memory read faults. */
|
/* If we get here, there were no memory read faults. */
|
||||||
|
@ -924,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
|
||||||
/* If we get here, retn correctly reflects the number of failing
|
/* If we get here, retn correctly reflects the number of failing
|
||||||
bytes. */
|
bytes. */
|
||||||
return retn;
|
return retn;
|
||||||
|
|
||||||
copy_exception_bytes:
|
|
||||||
/* We already have "retn" bytes cleared, and need to clear the
|
|
||||||
remaining "n" bytes. A non-optimized simple byte-for-byte in-line
|
|
||||||
memset is preferred here, since this isn't speed-critical code and
|
|
||||||
we'd rather have this a leaf-function than calling memset. */
|
|
||||||
{
|
|
||||||
char *endp;
|
|
||||||
for (endp = dst + n; dst < endp; dst++)
|
|
||||||
*dst = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return retn + n;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__copy_user_zeroing);
|
EXPORT_SYMBOL(raw_copy_from_user);
|
||||||
|
|
||||||
#define __asm_clear_8x64(to, ret) \
|
#define __asm_clear_8x64(to, ret) \
|
||||||
asm volatile ( \
|
asm volatile ( \
|
||||||
|
|
|
@ -1412,7 +1412,7 @@ config CPU_MIPS32_R6
|
||||||
select CPU_SUPPORTS_MSA
|
select CPU_SUPPORTS_MSA
|
||||||
select GENERIC_CSUM
|
select GENERIC_CSUM
|
||||||
select HAVE_KVM
|
select HAVE_KVM
|
||||||
select MIPS_O32_FP64_SUPPORT
|
select MIPS_O32_FP64_SUPPORT if 32BIT
|
||||||
help
|
help
|
||||||
Choose this option to build a kernel for release 6 or later of the
|
Choose this option to build a kernel for release 6 or later of the
|
||||||
MIPS32 architecture. New MIPS processors, starting with the Warrior
|
MIPS32 architecture. New MIPS processors, starting with the Warrior
|
||||||
|
|
|
@ -112,7 +112,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||||
" andi %[ticket], %[ticket], 0xffff \n"
|
" andi %[ticket], %[ticket], 0xffff \n"
|
||||||
" bne %[ticket], %[my_ticket], 4f \n"
|
" bne %[ticket], %[my_ticket], 4f \n"
|
||||||
" subu %[ticket], %[my_ticket], %[ticket] \n"
|
" subu %[ticket], %[my_ticket], %[ticket] \n"
|
||||||
"2: \n"
|
"2: .insn \n"
|
||||||
" .subsection 2 \n"
|
" .subsection 2 \n"
|
||||||
"4: andi %[ticket], %[ticket], 0xffff \n"
|
"4: andi %[ticket], %[ticket], 0xffff \n"
|
||||||
" sll %[ticket], 5 \n"
|
" sll %[ticket], 5 \n"
|
||||||
|
@ -187,7 +187,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
|
||||||
" sc %[ticket], %[ticket_ptr] \n"
|
" sc %[ticket], %[ticket_ptr] \n"
|
||||||
" beqz %[ticket], 1b \n"
|
" beqz %[ticket], 1b \n"
|
||||||
" li %[ticket], 1 \n"
|
" li %[ticket], 1 \n"
|
||||||
"2: \n"
|
"2: .insn \n"
|
||||||
" .subsection 2 \n"
|
" .subsection 2 \n"
|
||||||
"3: b 2b \n"
|
"3: b 2b \n"
|
||||||
" li %[ticket], 0 \n"
|
" li %[ticket], 0 \n"
|
||||||
|
@ -367,7 +367,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
|
||||||
" .set reorder \n"
|
" .set reorder \n"
|
||||||
__WEAK_LLSC_MB
|
__WEAK_LLSC_MB
|
||||||
" li %2, 1 \n"
|
" li %2, 1 \n"
|
||||||
"2: \n"
|
"2: .insn \n"
|
||||||
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
|
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
|
||||||
: GCC_OFF_SMALL_ASM() (rw->lock)
|
: GCC_OFF_SMALL_ASM() (rw->lock)
|
||||||
: "memory");
|
: "memory");
|
||||||
|
@ -407,7 +407,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
|
||||||
" lui %1, 0x8000 \n"
|
" lui %1, 0x8000 \n"
|
||||||
" sc %1, %0 \n"
|
" sc %1, %0 \n"
|
||||||
" li %2, 1 \n"
|
" li %2, 1 \n"
|
||||||
"2: \n"
|
"2: .insn \n"
|
||||||
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
|
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
|
||||||
"=&r" (ret)
|
"=&r" (ret)
|
||||||
: GCC_OFF_SMALL_ASM() (rw->lock)
|
: GCC_OFF_SMALL_ASM() (rw->lock)
|
||||||
|
|
|
@ -467,7 +467,7 @@ void __init ltq_soc_init(void)
|
||||||
|
|
||||||
if (!np_xbar)
|
if (!np_xbar)
|
||||||
panic("Failed to load xbar nodes from devicetree");
|
panic("Failed to load xbar nodes from devicetree");
|
||||||
if (of_address_to_resource(np_pmu, 0, &res_xbar))
|
if (of_address_to_resource(np_xbar, 0, &res_xbar))
|
||||||
panic("Failed to get xbar resources");
|
panic("Failed to get xbar resources");
|
||||||
if (request_mem_region(res_xbar.start, resource_size(&res_xbar),
|
if (request_mem_region(res_xbar.start, resource_size(&res_xbar),
|
||||||
res_xbar.name) < 0)
|
res_xbar.name) < 0)
|
||||||
|
|
|
@ -757,7 +757,8 @@ static void build_huge_update_entries(u32 **p, unsigned int pte,
|
||||||
static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
|
static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
|
||||||
struct uasm_label **l,
|
struct uasm_label **l,
|
||||||
unsigned int pte,
|
unsigned int pte,
|
||||||
unsigned int ptr)
|
unsigned int ptr,
|
||||||
|
unsigned int flush)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
UASM_i_SC(p, pte, 0, ptr);
|
UASM_i_SC(p, pte, 0, ptr);
|
||||||
|
@ -766,6 +767,22 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
|
||||||
#else
|
#else
|
||||||
UASM_i_SW(p, pte, 0, ptr);
|
UASM_i_SW(p, pte, 0, ptr);
|
||||||
#endif
|
#endif
|
||||||
|
if (cpu_has_ftlb && flush) {
|
||||||
|
BUG_ON(!cpu_has_tlbinv);
|
||||||
|
|
||||||
|
UASM_i_MFC0(p, ptr, C0_ENTRYHI);
|
||||||
|
uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
|
||||||
|
UASM_i_MTC0(p, ptr, C0_ENTRYHI);
|
||||||
|
build_tlb_write_entry(p, l, r, tlb_indexed);
|
||||||
|
|
||||||
|
uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
|
||||||
|
UASM_i_MTC0(p, ptr, C0_ENTRYHI);
|
||||||
|
build_huge_update_entries(p, pte, ptr);
|
||||||
|
build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
build_huge_update_entries(p, pte, ptr);
|
build_huge_update_entries(p, pte, ptr);
|
||||||
build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
|
build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
|
||||||
}
|
}
|
||||||
|
@ -2082,7 +2099,7 @@ static void build_r4000_tlb_load_handler(void)
|
||||||
uasm_l_tlbl_goaround2(&l, p);
|
uasm_l_tlbl_goaround2(&l, p);
|
||||||
}
|
}
|
||||||
uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
|
uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
|
||||||
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
|
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
uasm_l_nopage_tlbl(&l, p);
|
uasm_l_nopage_tlbl(&l, p);
|
||||||
|
@ -2137,7 +2154,7 @@ static void build_r4000_tlb_store_handler(void)
|
||||||
build_tlb_probe_entry(&p);
|
build_tlb_probe_entry(&p);
|
||||||
uasm_i_ori(&p, wr.r1, wr.r1,
|
uasm_i_ori(&p, wr.r1, wr.r1,
|
||||||
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
|
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
|
||||||
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
|
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
uasm_l_nopage_tlbs(&l, p);
|
uasm_l_nopage_tlbs(&l, p);
|
||||||
|
@ -2193,7 +2210,7 @@ static void build_r4000_tlb_modify_handler(void)
|
||||||
build_tlb_probe_entry(&p);
|
build_tlb_probe_entry(&p);
|
||||||
uasm_i_ori(&p, wr.r1, wr.r1,
|
uasm_i_ori(&p, wr.r1, wr.r1,
|
||||||
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
|
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
|
||||||
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
|
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
uasm_l_nopage_tlbm(&l, p);
|
uasm_l_nopage_tlbm(&l, p);
|
||||||
|
|
|
@ -36,7 +36,7 @@ static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
|
||||||
static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
|
static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
|
||||||
static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
|
static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
|
||||||
static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
|
static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
|
||||||
static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) };
|
static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
|
||||||
static struct rt2880_pmx_func pci_func[] = {
|
static struct rt2880_pmx_func pci_func[] = {
|
||||||
FUNC("pci-dev", 0, 40, 32),
|
FUNC("pci-dev", 0, 40, 32),
|
||||||
FUNC("pci-host2", 1, 40, 32),
|
FUNC("pci-host2", 1, 40, 32),
|
||||||
|
@ -44,7 +44,7 @@ static struct rt2880_pmx_func pci_func[] = {
|
||||||
FUNC("pci-fnc", 3, 40, 32)
|
FUNC("pci-fnc", 3, 40, 32)
|
||||||
};
|
};
|
||||||
static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
|
static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
|
||||||
static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) };
|
static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
|
||||||
|
|
||||||
static struct rt2880_pmx_group rt3883_pinmux_data[] = {
|
static struct rt2880_pmx_group rt3883_pinmux_data[] = {
|
||||||
GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
|
GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
|
||||||
|
|
|
@ -48,6 +48,13 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
|
||||||
return alloc_bootmem_align(size, align);
|
return alloc_bootmem_align(size, align);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int __init early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
|
||||||
|
bool nomap)
|
||||||
|
{
|
||||||
|
reserve_bootmem(base, size, BOOTMEM_DEFAULT);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
void __init early_init_devtree(void *params)
|
void __init early_init_devtree(void *params)
|
||||||
{
|
{
|
||||||
__be32 *dtb = (u32 *)__dtb_start;
|
__be32 *dtb = (u32 *)__dtb_start;
|
||||||
|
|
|
@ -195,6 +195,9 @@ void __init setup_arch(char **cmdline_p)
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_BLK_DEV_INITRD */
|
#endif /* CONFIG_BLK_DEV_INITRD */
|
||||||
|
|
||||||
|
early_init_fdt_reserve_self();
|
||||||
|
early_init_fdt_scan_reserved_mem();
|
||||||
|
|
||||||
unflatten_and_copy_device_tree();
|
unflatten_and_copy_device_tree();
|
||||||
|
|
||||||
setup_cpuinfo();
|
setup_cpuinfo();
|
||||||
|
|
|
@ -808,14 +808,25 @@ int fix_alignment(struct pt_regs *regs)
|
||||||
nb = aligninfo[instr].len;
|
nb = aligninfo[instr].len;
|
||||||
flags = aligninfo[instr].flags;
|
flags = aligninfo[instr].flags;
|
||||||
|
|
||||||
/* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
|
/*
|
||||||
if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
|
* Handle some cases which give overlaps in the DSISR values.
|
||||||
nb = 8;
|
*/
|
||||||
flags = LD+SW;
|
if (IS_XFORM(instruction)) {
|
||||||
} else if (IS_XFORM(instruction) &&
|
switch (get_xop(instruction)) {
|
||||||
((instruction >> 1) & 0x3ff) == 660) {
|
case 532: /* ldbrx */
|
||||||
nb = 8;
|
nb = 8;
|
||||||
flags = ST+SW;
|
flags = LD+SW;
|
||||||
|
break;
|
||||||
|
case 660: /* stdbrx */
|
||||||
|
nb = 8;
|
||||||
|
flags = ST+SW;
|
||||||
|
break;
|
||||||
|
case 20: /* lwarx */
|
||||||
|
case 84: /* ldarx */
|
||||||
|
case 116: /* lharx */
|
||||||
|
case 276: /* lqarx */
|
||||||
|
return 0; /* not emulated ever */
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Byteswap little endian loads and stores */
|
/* Byteswap little endian loads and stores */
|
||||||
|
|
|
@ -645,6 +645,10 @@ static void native_flush_hash_range(unsigned long number, int local)
|
||||||
unsigned long psize = batch->psize;
|
unsigned long psize = batch->psize;
|
||||||
int ssize = batch->ssize;
|
int ssize = batch->ssize;
|
||||||
int i;
|
int i;
|
||||||
|
unsigned int use_local;
|
||||||
|
|
||||||
|
use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
|
||||||
|
mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
|
||||||
|
@ -671,8 +675,7 @@ static void native_flush_hash_range(unsigned long number, int local)
|
||||||
} pte_iterate_hashed_end();
|
} pte_iterate_hashed_end();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mmu_has_feature(MMU_FTR_TLBIEL) &&
|
if (use_local) {
|
||||||
mmu_psize_defs[psize].tlbiel && local) {
|
|
||||||
asm volatile("ptesync":::"memory");
|
asm volatile("ptesync":::"memory");
|
||||||
for (i = 0; i < number; i++) {
|
for (i = 0; i < number; i++) {
|
||||||
vpn = batch->vpn[i];
|
vpn = batch->vpn[i];
|
||||||
|
|
|
@ -141,31 +141,34 @@ static void check_ipl_parmblock(void *start, unsigned long size)
|
||||||
|
|
||||||
unsigned long decompress_kernel(void)
|
unsigned long decompress_kernel(void)
|
||||||
{
|
{
|
||||||
unsigned long output_addr;
|
void *output, *kernel_end;
|
||||||
unsigned char *output;
|
|
||||||
|
|
||||||
output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
|
output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE);
|
||||||
check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
|
kernel_end = output + SZ__bss_start;
|
||||||
memset(&_bss, 0, &_ebss - &_bss);
|
check_ipl_parmblock((void *) 0, (unsigned long) kernel_end);
|
||||||
free_mem_ptr = (unsigned long)&_end;
|
|
||||||
free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
|
|
||||||
output = (unsigned char *) output_addr;
|
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
#ifdef CONFIG_BLK_DEV_INITRD
|
||||||
/*
|
/*
|
||||||
* Move the initrd right behind the end of the decompressed
|
* Move the initrd right behind the end of the decompressed
|
||||||
* kernel image.
|
* kernel image. This also prevents initrd corruption caused by
|
||||||
|
* bss clearing since kernel_end will always be located behind the
|
||||||
|
* current bss section..
|
||||||
*/
|
*/
|
||||||
if (INITRD_START && INITRD_SIZE &&
|
if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
|
||||||
INITRD_START < (unsigned long) output + SZ__bss_start) {
|
check_ipl_parmblock(kernel_end, INITRD_SIZE);
|
||||||
check_ipl_parmblock(output + SZ__bss_start,
|
memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
|
||||||
INITRD_START + INITRD_SIZE);
|
INITRD_START = (unsigned long) kernel_end;
|
||||||
memmove(output + SZ__bss_start,
|
|
||||||
(void *) INITRD_START, INITRD_SIZE);
|
|
||||||
INITRD_START = (unsigned long) output + SZ__bss_start;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
|
||||||
|
* initialized afterwards since they reside in bss.
|
||||||
|
*/
|
||||||
|
memset(&_bss, 0, &_ebss - &_bss);
|
||||||
|
free_mem_ptr = (unsigned long) &_end;
|
||||||
|
free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
|
||||||
|
|
||||||
puts("Uncompressing Linux... ");
|
puts("Uncompressing Linux... ");
|
||||||
__decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
|
__decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
|
||||||
puts("Ok, booting the kernel.\n");
|
puts("Ok, booting the kernel.\n");
|
||||||
|
|
|
@ -150,7 +150,7 @@ unsigned long __must_check __copy_to_user(void __user *to, const void *from,
|
||||||
" jg 2b\n" \
|
" jg 2b\n" \
|
||||||
".popsection\n" \
|
".popsection\n" \
|
||||||
EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
|
EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
|
||||||
: "=d" (__rc), "=Q" (*(to)) \
|
: "=d" (__rc), "+Q" (*(to)) \
|
||||||
: "d" (size), "Q" (*(from)), \
|
: "d" (size), "Q" (*(from)), \
|
||||||
"d" (__reg0), "K" (-EFAULT) \
|
"d" (__reg0), "K" (-EFAULT) \
|
||||||
: "cc"); \
|
: "cc"); \
|
||||||
|
|
|
@ -179,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
goto out_err0;
|
goto out_err0;
|
||||||
|
|
||||||
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
|
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
goto out_err1;
|
goto out_err1;
|
||||||
|
|
||||||
|
@ -318,7 +318,8 @@ EXPORT_SYMBOL(ttm_ref_object_exists);
|
||||||
|
|
||||||
int ttm_ref_object_add(struct ttm_object_file *tfile,
|
int ttm_ref_object_add(struct ttm_object_file *tfile,
|
||||||
struct ttm_base_object *base,
|
struct ttm_base_object *base,
|
||||||
enum ttm_ref_type ref_type, bool *existed)
|
enum ttm_ref_type ref_type, bool *existed,
|
||||||
|
bool require_existed)
|
||||||
{
|
{
|
||||||
struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
|
struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
|
||||||
struct ttm_ref_object *ref;
|
struct ttm_ref_object *ref;
|
||||||
|
@ -345,6 +346,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
|
||||||
}
|
}
|
||||||
|
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
if (require_existed)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
|
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
|
||||||
false, false);
|
false, false);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
|
@ -635,7 +639,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
|
||||||
prime = (struct ttm_prime_object *) dma_buf->priv;
|
prime = (struct ttm_prime_object *) dma_buf->priv;
|
||||||
base = &prime->base;
|
base = &prime->base;
|
||||||
*handle = base->hash.key;
|
*handle = base->hash.key;
|
||||||
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
|
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
|
||||||
|
|
||||||
dma_buf_put(dma_buf);
|
dma_buf_put(dma_buf);
|
||||||
|
|
||||||
|
|
|
@ -539,7 +539,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
|
||||||
struct vmw_fence_obj **p_fence)
|
struct vmw_fence_obj **p_fence)
|
||||||
{
|
{
|
||||||
struct vmw_fence_obj *fence;
|
struct vmw_fence_obj *fence;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
|
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
|
||||||
if (unlikely(fence == NULL))
|
if (unlikely(fence == NULL))
|
||||||
|
@ -702,6 +702,41 @@ void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_fence_obj_lookup - Look up a user-space fence object
|
||||||
|
*
|
||||||
|
* @tfile: A struct ttm_object_file identifying the caller.
|
||||||
|
* @handle: A handle identifying the fence object.
|
||||||
|
* @return: A struct vmw_user_fence base ttm object on success or
|
||||||
|
* an error pointer on failure.
|
||||||
|
*
|
||||||
|
* The fence object is looked up and type-checked. The caller needs
|
||||||
|
* to have opened the fence object first, but since that happens on
|
||||||
|
* creation and fence objects aren't shareable, that's not an
|
||||||
|
* issue currently.
|
||||||
|
*/
|
||||||
|
static struct ttm_base_object *
|
||||||
|
vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
|
||||||
|
{
|
||||||
|
struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
|
||||||
|
|
||||||
|
if (!base) {
|
||||||
|
pr_err("Invalid fence object handle 0x%08lx.\n",
|
||||||
|
(unsigned long)handle);
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (base->refcount_release != vmw_user_fence_base_release) {
|
||||||
|
pr_err("Invalid fence object handle 0x%08lx.\n",
|
||||||
|
(unsigned long)handle);
|
||||||
|
ttm_base_object_unref(&base);
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
return base;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
|
int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv)
|
struct drm_file *file_priv)
|
||||||
{
|
{
|
||||||
|
@ -727,13 +762,9 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
|
||||||
arg->kernel_cookie = jiffies + wait_timeout;
|
arg->kernel_cookie = jiffies + wait_timeout;
|
||||||
}
|
}
|
||||||
|
|
||||||
base = ttm_base_object_lookup(tfile, arg->handle);
|
base = vmw_fence_obj_lookup(tfile, arg->handle);
|
||||||
if (unlikely(base == NULL)) {
|
if (IS_ERR(base))
|
||||||
printk(KERN_ERR "Wait invalid fence object handle "
|
return PTR_ERR(base);
|
||||||
"0x%08lx.\n",
|
|
||||||
(unsigned long)arg->handle);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
|
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
|
||||||
|
|
||||||
|
@ -772,13 +803,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
|
||||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||||
|
|
||||||
base = ttm_base_object_lookup(tfile, arg->handle);
|
base = vmw_fence_obj_lookup(tfile, arg->handle);
|
||||||
if (unlikely(base == NULL)) {
|
if (IS_ERR(base))
|
||||||
printk(KERN_ERR "Fence signaled invalid fence object handle "
|
return PTR_ERR(base);
|
||||||
"0x%08lx.\n",
|
|
||||||
(unsigned long)arg->handle);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
|
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
|
||||||
fman = fman_from_fence(fence);
|
fman = fman_from_fence(fence);
|
||||||
|
@ -1093,6 +1120,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
|
||||||
(struct drm_vmw_fence_event_arg *) data;
|
(struct drm_vmw_fence_event_arg *) data;
|
||||||
struct vmw_fence_obj *fence = NULL;
|
struct vmw_fence_obj *fence = NULL;
|
||||||
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
|
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
|
||||||
|
struct ttm_object_file *tfile = vmw_fp->tfile;
|
||||||
struct drm_vmw_fence_rep __user *user_fence_rep =
|
struct drm_vmw_fence_rep __user *user_fence_rep =
|
||||||
(struct drm_vmw_fence_rep __user *)(unsigned long)
|
(struct drm_vmw_fence_rep __user *)(unsigned long)
|
||||||
arg->fence_rep;
|
arg->fence_rep;
|
||||||
|
@ -1106,24 +1134,18 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
|
||||||
*/
|
*/
|
||||||
if (arg->handle) {
|
if (arg->handle) {
|
||||||
struct ttm_base_object *base =
|
struct ttm_base_object *base =
|
||||||
ttm_base_object_lookup_for_ref(dev_priv->tdev,
|
vmw_fence_obj_lookup(tfile, arg->handle);
|
||||||
arg->handle);
|
|
||||||
|
if (IS_ERR(base))
|
||||||
|
return PTR_ERR(base);
|
||||||
|
|
||||||
if (unlikely(base == NULL)) {
|
|
||||||
DRM_ERROR("Fence event invalid fence object handle "
|
|
||||||
"0x%08lx.\n",
|
|
||||||
(unsigned long)arg->handle);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
fence = &(container_of(base, struct vmw_user_fence,
|
fence = &(container_of(base, struct vmw_user_fence,
|
||||||
base)->fence);
|
base)->fence);
|
||||||
(void) vmw_fence_obj_reference(fence);
|
(void) vmw_fence_obj_reference(fence);
|
||||||
|
|
||||||
if (user_fence_rep != NULL) {
|
if (user_fence_rep != NULL) {
|
||||||
bool existed;
|
|
||||||
|
|
||||||
ret = ttm_ref_object_add(vmw_fp->tfile, base,
|
ret = ttm_ref_object_add(vmw_fp->tfile, base,
|
||||||
TTM_REF_USAGE, &existed);
|
TTM_REF_USAGE, NULL, false);
|
||||||
if (unlikely(ret != 0)) {
|
if (unlikely(ret != 0)) {
|
||||||
DRM_ERROR("Failed to reference a fence "
|
DRM_ERROR("Failed to reference a fence "
|
||||||
"object.\n");
|
"object.\n");
|
||||||
|
@ -1166,8 +1188,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
|
||||||
return 0;
|
return 0;
|
||||||
out_no_create:
|
out_no_create:
|
||||||
if (user_fence_rep != NULL)
|
if (user_fence_rep != NULL)
|
||||||
ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
|
ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
|
||||||
handle, TTM_REF_USAGE);
|
|
||||||
out_no_ref_obj:
|
out_no_ref_obj:
|
||||||
vmw_fence_obj_unreference(&fence);
|
vmw_fence_obj_unreference(&fence);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -114,8 +114,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
|
||||||
param->value = dev_priv->has_dx;
|
param->value = dev_priv->has_dx;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
|
|
||||||
param->param);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -186,7 +184,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
|
||||||
bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
|
bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
|
||||||
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
|
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
|
||||||
|
|
||||||
if (unlikely(arg->pad64 != 0)) {
|
if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
|
||||||
DRM_ERROR("Illegal GET_3D_CAP argument.\n");
|
DRM_ERROR("Illegal GET_3D_CAP argument.\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -591,7 +591,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
|
ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
|
||||||
TTM_REF_SYNCCPU_WRITE, &existed);
|
TTM_REF_SYNCCPU_WRITE, &existed, false);
|
||||||
if (ret != 0 || existed)
|
if (ret != 0 || existed)
|
||||||
ttm_bo_synccpu_write_release(&user_bo->dma.base);
|
ttm_bo_synccpu_write_release(&user_bo->dma.base);
|
||||||
|
|
||||||
|
@ -775,7 +775,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
|
||||||
|
|
||||||
*handle = user_bo->prime.base.hash.key;
|
*handle = user_bo->prime.base.hash.key;
|
||||||
return ttm_ref_object_add(tfile, &user_bo->prime.base,
|
return ttm_ref_object_add(tfile, &user_bo->prime.base,
|
||||||
TTM_REF_USAGE, NULL);
|
TTM_REF_USAGE, NULL, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -715,11 +715,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
||||||
128;
|
128;
|
||||||
|
|
||||||
num_sizes = 0;
|
num_sizes = 0;
|
||||||
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
|
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
|
||||||
|
if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
|
||||||
|
return -EINVAL;
|
||||||
num_sizes += req->mip_levels[i];
|
num_sizes += req->mip_levels[i];
|
||||||
|
}
|
||||||
|
|
||||||
if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
|
if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
|
||||||
DRM_VMW_MAX_MIP_LEVELS)
|
num_sizes == 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
size = vmw_user_surface_size + 128 +
|
size = vmw_user_surface_size + 128 +
|
||||||
|
@ -904,17 +907,16 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
|
||||||
uint32_t handle;
|
uint32_t handle;
|
||||||
struct ttm_base_object *base;
|
struct ttm_base_object *base;
|
||||||
int ret;
|
int ret;
|
||||||
|
bool require_exist = false;
|
||||||
|
|
||||||
if (handle_type == DRM_VMW_HANDLE_PRIME) {
|
if (handle_type == DRM_VMW_HANDLE_PRIME) {
|
||||||
ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
|
ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
return ret;
|
return ret;
|
||||||
} else {
|
} else {
|
||||||
if (unlikely(drm_is_render_client(file_priv))) {
|
if (unlikely(drm_is_render_client(file_priv)))
|
||||||
DRM_ERROR("Render client refused legacy "
|
require_exist = true;
|
||||||
"surface reference.\n");
|
|
||||||
return -EACCES;
|
|
||||||
}
|
|
||||||
if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
|
if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
|
||||||
DRM_ERROR("Locked master refused legacy "
|
DRM_ERROR("Locked master refused legacy "
|
||||||
"surface reference.\n");
|
"surface reference.\n");
|
||||||
|
@ -942,17 +944,14 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Make sure the surface creator has the same
|
* Make sure the surface creator has the same
|
||||||
* authenticating master.
|
* authenticating master, or is already registered with us.
|
||||||
*/
|
*/
|
||||||
if (drm_is_primary_client(file_priv) &&
|
if (drm_is_primary_client(file_priv) &&
|
||||||
user_srf->master != file_priv->master) {
|
user_srf->master != file_priv->master)
|
||||||
DRM_ERROR("Trying to reference surface outside of"
|
require_exist = true;
|
||||||
" master domain.\n");
|
|
||||||
ret = -EACCES;
|
|
||||||
goto out_bad_resource;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
|
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
|
||||||
|
require_exist);
|
||||||
if (unlikely(ret != 0)) {
|
if (unlikely(ret != 0)) {
|
||||||
DRM_ERROR("Could not add a reference to a surface.\n");
|
DRM_ERROR("Could not add a reference to a surface.\n");
|
||||||
goto out_bad_resource;
|
goto out_bad_resource;
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include <linux/iio/trigger_consumer.h>
|
#include <linux/iio/trigger_consumer.h>
|
||||||
#include <linux/iio/triggered_buffer.h>
|
#include <linux/iio/triggered_buffer.h>
|
||||||
#include <linux/regmap.h>
|
#include <linux/regmap.h>
|
||||||
|
#include <linux/delay.h>
|
||||||
#include "bmg160.h"
|
#include "bmg160.h"
|
||||||
|
|
||||||
#define BMG160_IRQ_NAME "bmg160_event"
|
#define BMG160_IRQ_NAME "bmg160_event"
|
||||||
|
@ -53,6 +54,9 @@
|
||||||
#define BMG160_NO_FILTER 0
|
#define BMG160_NO_FILTER 0
|
||||||
#define BMG160_DEF_BW 100
|
#define BMG160_DEF_BW 100
|
||||||
|
|
||||||
|
#define BMG160_GYRO_REG_RESET 0x14
|
||||||
|
#define BMG160_GYRO_RESET_VAL 0xb6
|
||||||
|
|
||||||
#define BMG160_REG_INT_MAP_0 0x17
|
#define BMG160_REG_INT_MAP_0 0x17
|
||||||
#define BMG160_INT_MAP_0_BIT_ANY BIT(1)
|
#define BMG160_INT_MAP_0_BIT_ANY BIT(1)
|
||||||
|
|
||||||
|
@ -186,6 +190,14 @@ static int bmg160_chip_init(struct bmg160_data *data)
|
||||||
int ret;
|
int ret;
|
||||||
unsigned int val;
|
unsigned int val;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Reset chip to get it in a known good state. A delay of 30ms after
|
||||||
|
* reset is required according to the datasheet.
|
||||||
|
*/
|
||||||
|
regmap_write(data->regmap, BMG160_GYRO_REG_RESET,
|
||||||
|
BMG160_GYRO_RESET_VAL);
|
||||||
|
usleep_range(30000, 30700);
|
||||||
|
|
||||||
ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val);
|
ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
dev_err(data->dev, "Error reading reg_chip_id\n");
|
dev_err(data->dev, "Error reading reg_chip_id\n");
|
||||||
|
|
|
@ -952,6 +952,10 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
|
||||||
|
if (tcon)
|
||||||
|
tcon->tid = 0;
|
||||||
|
|
||||||
rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
|
rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
kfree(unc_path);
|
kfree(unc_path);
|
||||||
|
|
|
@ -108,7 +108,7 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
|
||||||
{
|
{
|
||||||
const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
|
const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
|
||||||
struct kobject *kobj = of->kn->parent->priv;
|
struct kobject *kobj = of->kn->parent->priv;
|
||||||
size_t len;
|
ssize_t len;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If buf != of->prealloc_buf, we don't know how
|
* If buf != of->prealloc_buf, we don't know how
|
||||||
|
@ -117,13 +117,15 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
|
||||||
if (WARN_ON_ONCE(buf != of->prealloc_buf))
|
if (WARN_ON_ONCE(buf != of->prealloc_buf))
|
||||||
return 0;
|
return 0;
|
||||||
len = ops->show(kobj, of->kn->priv, buf);
|
len = ops->show(kobj, of->kn->priv, buf);
|
||||||
|
if (len < 0)
|
||||||
|
return len;
|
||||||
if (pos) {
|
if (pos) {
|
||||||
if (len <= pos)
|
if (len <= pos)
|
||||||
return 0;
|
return 0;
|
||||||
len -= pos;
|
len -= pos;
|
||||||
memmove(buf, buf + pos, len);
|
memmove(buf, buf + pos, len);
|
||||||
}
|
}
|
||||||
return min(count, len);
|
return min_t(ssize_t, count, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* kernfs write callback for regular sysfs files */
|
/* kernfs write callback for regular sysfs files */
|
||||||
|
|
|
@ -229,6 +229,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
|
||||||
* @ref_type: The type of reference.
|
* @ref_type: The type of reference.
|
||||||
* @existed: Upon completion, indicates that an identical reference object
|
* @existed: Upon completion, indicates that an identical reference object
|
||||||
* already existed, and the refcount was upped on that object instead.
|
* already existed, and the refcount was upped on that object instead.
|
||||||
|
* @require_existed: Fail with -EPERM if an identical ref object didn't
|
||||||
|
* already exist.
|
||||||
*
|
*
|
||||||
* Checks that the base object is shareable and adds a ref object to it.
|
* Checks that the base object is shareable and adds a ref object to it.
|
||||||
*
|
*
|
||||||
|
@ -243,7 +245,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
|
||||||
*/
|
*/
|
||||||
extern int ttm_ref_object_add(struct ttm_object_file *tfile,
|
extern int ttm_ref_object_add(struct ttm_object_file *tfile,
|
||||||
struct ttm_base_object *base,
|
struct ttm_base_object *base,
|
||||||
enum ttm_ref_type ref_type, bool *existed);
|
enum ttm_ref_type ref_type, bool *existed,
|
||||||
|
bool require_existed);
|
||||||
|
|
||||||
extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
|
extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
|
||||||
struct ttm_base_object *base);
|
struct ttm_base_object *base);
|
||||||
|
|
|
@ -151,11 +151,17 @@ static void ptrace_unfreeze_traced(struct task_struct *task)
|
||||||
|
|
||||||
WARN_ON(!task->ptrace || task->parent != current);
|
WARN_ON(!task->ptrace || task->parent != current);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
|
||||||
|
* Recheck state under the lock to close this race.
|
||||||
|
*/
|
||||||
spin_lock_irq(&task->sighand->siglock);
|
spin_lock_irq(&task->sighand->siglock);
|
||||||
if (__fatal_signal_pending(task))
|
if (task->state == __TASK_TRACED) {
|
||||||
wake_up_state(task, __TASK_TRACED);
|
if (__fatal_signal_pending(task))
|
||||||
else
|
wake_up_state(task, __TASK_TRACED);
|
||||||
task->state = TASK_TRACED;
|
else
|
||||||
|
task->state = TASK_TRACED;
|
||||||
|
}
|
||||||
spin_unlock_irq(&task->sighand->siglock);
|
spin_unlock_irq(&task->sighand->siglock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4875,9 +4875,9 @@ static __init int test_ringbuffer(void)
|
||||||
rb_data[cpu].cnt = cpu;
|
rb_data[cpu].cnt = cpu;
|
||||||
rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
|
rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
|
||||||
"rbtester/%d", cpu);
|
"rbtester/%d", cpu);
|
||||||
if (WARN_ON(!rb_threads[cpu])) {
|
if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
|
||||||
pr_cont("FAILED\n");
|
pr_cont("FAILED\n");
|
||||||
ret = -1;
|
ret = PTR_ERR(rb_threads[cpu]);
|
||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4887,9 +4887,9 @@ static __init int test_ringbuffer(void)
|
||||||
|
|
||||||
/* Now create the rb hammer! */
|
/* Now create the rb hammer! */
|
||||||
rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
|
rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
|
||||||
if (WARN_ON(!rb_hammer)) {
|
if (WARN_ON(IS_ERR(rb_hammer))) {
|
||||||
pr_cont("FAILED\n");
|
pr_cont("FAILED\n");
|
||||||
ret = -1;
|
ret = PTR_ERR(rb_hammer);
|
||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1493,7 +1493,6 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
|
||||||
COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
|
COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
|
||||||
compat_ulong_t, maxnode)
|
compat_ulong_t, maxnode)
|
||||||
{
|
{
|
||||||
long err = 0;
|
|
||||||
unsigned long __user *nm = NULL;
|
unsigned long __user *nm = NULL;
|
||||||
unsigned long nr_bits, alloc_size;
|
unsigned long nr_bits, alloc_size;
|
||||||
DECLARE_BITMAP(bm, MAX_NUMNODES);
|
DECLARE_BITMAP(bm, MAX_NUMNODES);
|
||||||
|
@ -1502,14 +1501,13 @@ COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
|
||||||
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
|
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
|
||||||
|
|
||||||
if (nmask) {
|
if (nmask) {
|
||||||
err = compat_get_bitmap(bm, nmask, nr_bits);
|
if (compat_get_bitmap(bm, nmask, nr_bits))
|
||||||
|
return -EFAULT;
|
||||||
nm = compat_alloc_user_space(alloc_size);
|
nm = compat_alloc_user_space(alloc_size);
|
||||||
err |= copy_to_user(nm, bm, alloc_size);
|
if (copy_to_user(nm, bm, alloc_size))
|
||||||
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (err)
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
return sys_set_mempolicy(mode, nm, nr_bits+1);
|
return sys_set_mempolicy(mode, nm, nr_bits+1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1517,7 +1515,6 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
|
||||||
compat_ulong_t, mode, compat_ulong_t __user *, nmask,
|
compat_ulong_t, mode, compat_ulong_t __user *, nmask,
|
||||||
compat_ulong_t, maxnode, compat_ulong_t, flags)
|
compat_ulong_t, maxnode, compat_ulong_t, flags)
|
||||||
{
|
{
|
||||||
long err = 0;
|
|
||||||
unsigned long __user *nm = NULL;
|
unsigned long __user *nm = NULL;
|
||||||
unsigned long nr_bits, alloc_size;
|
unsigned long nr_bits, alloc_size;
|
||||||
nodemask_t bm;
|
nodemask_t bm;
|
||||||
|
@ -1526,14 +1523,13 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
|
||||||
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
|
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
|
||||||
|
|
||||||
if (nmask) {
|
if (nmask) {
|
||||||
err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
|
if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
|
||||||
|
return -EFAULT;
|
||||||
nm = compat_alloc_user_space(alloc_size);
|
nm = compat_alloc_user_space(alloc_size);
|
||||||
err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
|
if (copy_to_user(nm, nodes_addr(bm), alloc_size))
|
||||||
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (err)
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
|
return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue