android_kernel_oneplus_msm8998/mm/mprotect.c
Greg Kroah-Hartman dfff30bca9 This is the 4.4.81 stable release
-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlmN2eMACgkQONu9yGCS
 aT7zCQ//eDgCF9YJnE1v8/JJ0yl2uK7XjVrF/tpPvzgTgszu4En4kGfhUO+WvmkU
 0/pqYBMAPZEbfmx+6q8FJx/MHDjFA1oKb+a9pS1RUovzWDLQoRxYwiBtR2osmuOE
 f1fbDMt9ETDUxUGLhRJ/vuzeIjmouhPkz5vZAg863+sKYYjPHlczymcgMs0sRMsE
 3kkgo6mhCKTLt8gvioSUjeVWs4a5y3unvImhSLjEHjcfydlDLwA8RuFdFwBIgNfP
 yPrgW3v5l9HHXI1lWMcOCTpVeDI272sKNOppYg4r2N/I/epBN79j7jGrqGQpG8NP
 mKOkgRDoR7ifyKLSS55R8anLyNoi4jfQAHbOxlSVGymwpd9kRuHoeTE5+IqYs+V5
 qLkqLz63hmbfRQuW6az6L+SGVwgj3DSHakGQFkB0ouB8h5ubU2OqINxOsaNABbHD
 C1Q9giqG8b2MEv5D4O4m7BhK1tDzSJmT2tb9UG+UV8LJn1PhFSnSMkjP4S7trZl+
 +8myxdoNVvDMpd23UqM7o1fuYalbslTKED9el31FimOaNF79+tzyjnNbWA6zqX+X
 U3I+Pp2FafOS2heTLTX59fz09LKRI+iP3pnlCBpp1a+MKAIEbjeW8YB5zTKrSNOv
 RkZ+1qIQtmGyhVp/YDsua5J1lhZVXeLeoEqDXYerELOdGKF30jw=
 =pHqB
 -----END PGP SIGNATURE-----

Merge 4.4.81 into android-4.4

Changes in 4.4.81
	libata: array underflow in ata_find_dev()
	workqueue: restore WQ_UNBOUND/max_active==1 to be ordered
	ALSA: hda - Fix speaker output from VAIO VPCL14M1R
	ASoC: do not close shared backend dailink
	KVM: async_pf: make rcu irq exit if not triggered from idle task
	mm/page_alloc: Remove kernel address exposure in free_reserved_area()
	ext4: fix SEEK_HOLE/SEEK_DATA for blocksize < pagesize
	ext4: fix overflow caused by missing cast in ext4_resize_fs()
	ARM: dts: armada-38x: Fix irq type for pca955
	media: platform: davinci: return -EINVAL for VPFE_CMD_S_CCDC_RAW_PARAMS ioctl
	target: Avoid mappedlun symlink creation during lun shutdown
	iscsi-target: Always wait for kthread_should_stop() before kthread exit
	iscsi-target: Fix early sk_data_ready LOGIN_FLAGS_READY race
	iscsi-target: Fix initial login PDU asynchronous socket close OOPs
	iscsi-target: Fix delayed logout processing greater than SECONDS_FOR_LOGOUT_COMP
	iser-target: Avoid isert_conn->cm_id dereference in isert_login_recv_done
	mm, mprotect: flush TLB if potentially racing with a parallel reclaim leaving stale TLB entries
	media: lirc: LIRC_GET_REC_RESOLUTION should return microseconds
	f2fs: sanity check checkpoint segno and blkoff
	drm: rcar-du: fix backport bug
	saa7164: fix double fetch PCIe access condition
	ipv4: ipv6: initialize treq->txhash in cookie_v[46]_check()
	net: Zero terminate ifr_name in dev_ifname().
	ipv6: avoid overflow of offset in ip6_find_1stfragopt
	ipv4: initialize fib_trie prior to register_netdev_notifier call.
	rtnetlink: allocate more memory for dev_set_mac_address()
	mcs7780: Fix initialization when CONFIG_VMAP_STACK is enabled
	openvswitch: fix potential out of bound access in parse_ct
	packet: fix use-after-free in prb_retire_rx_blk_timer_expired()
	ipv6: Don't increase IPSTATS_MIB_FRAGFAILS twice in ip6_fragment()
	net: ethernet: nb8800: Handle all 4 RGMII modes identically
	dccp: fix a memleak that dccp_ipv6 doesn't put reqsk properly
	dccp: fix a memleak that dccp_ipv4 doesn't put reqsk properly
	dccp: fix a memleak for dccp_feat_init err process
	sctp: don't dereference ptr before leaving _sctp_walk_{params, errors}()
	sctp: fix the check for _sctp_walk_params and _sctp_walk_errors
	net/mlx5: Fix command bad flow on command entry allocation failure
	net: phy: Correctly process PHY_HALTED in phy_stop_machine()
	net: phy: Fix PHY unbind crash
	xen-netback: correctly schedule rate-limited queues
	sparc64: Measure receiver forward progress to avoid send mondo timeout
	wext: handle NULL extra data in iwe_stream_add_point better
	sh_eth: R8A7740 supports packet shecksumming
	net: phy: dp83867: fix irq generation
	tg3: Fix race condition in tg3_get_stats64().
	x86/boot: Add missing declaration of string functions
	phy state machine: failsafe leave invalid RUNNING state
	scsi: qla2xxx: Get mutex lock before checking optrom_state
	drm/virtio: fix framebuffer sparse warning
	virtio_blk: fix panic in initialization error path
	ARM: 8632/1: ftrace: fix syscall name matching
	mm, slab: make sure that KMALLOC_MAX_SIZE will fit into MAX_ORDER
	lib/Kconfig.debug: fix frv build failure
	signal: protect SIGNAL_UNKILLABLE from unintentional clearing.
	mm: don't dereference struct page fields of invalid pages
	ipv4: Should use consistent conditional judgement for ip fragment in __ip_append_data and ip_finish_output
	net: account for current skb length when deciding about UFO
	workqueue: implicit ordered attribute should be overridable
	Linux 4.4.81

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
2017-08-11 13:09:21 -07:00

446 lines
11 KiB
C

/*
* mm/mprotect.c
*
* (C) Copyright 1994 Linus Torvalds
* (C) Copyright 2002 Christoph Hellwig
*
* Address space accounting code <alan@lxorguk.ukuu.org.uk>
* (C) Copyright 2002 Red Hat Inc, All Rights Reserved
*/
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/mempolicy.h>
#include <linux/personality.h>
#include <linux/syscalls.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/mmu_notifier.h>
#include <linux/migrate.h>
#include <linux/perf_event.h>
#include <linux/ksm.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include "internal.h"
/*
* For a prot_numa update we only hold mmap_sem for read so there is a
* potential race with faulting where a pmd was temporarily none. This
* function checks for a transhuge pmd under the appropriate lock. It
* returns a pte if it was successfully locked or NULL if it raced with
* a transhuge insertion.
*/
static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, int prot_numa, spinlock_t **ptl)
{
pte_t *pte;
spinlock_t *pmdl;
/* !prot_numa is protected by mmap_sem held for write */
if (!prot_numa)
return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
pmdl = pmd_lock(vma->vm_mm, pmd);
if (unlikely(pmd_trans_huge(*pmd) || pmd_none(*pmd))) {
spin_unlock(pmdl);
return NULL;
}
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
spin_unlock(pmdl);
return pte;
}
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end, pgprot_t newprot,
int dirty_accountable, int prot_numa)
{
struct mm_struct *mm = vma->vm_mm;
pte_t *pte, oldpte;
spinlock_t *ptl;
unsigned long pages = 0;
pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl);
if (!pte)
return 0;
flush_tlb_batched_pending(vma->vm_mm);
arch_enter_lazy_mmu_mode();
do {
oldpte = *pte;
if (pte_present(oldpte)) {
pte_t ptent;
bool preserve_write = prot_numa && pte_write(oldpte);
/*
* Avoid trapping faults against the zero or KSM
* pages. See similar comment in change_huge_pmd.
*/
if (prot_numa) {
struct page *page;
page = vm_normal_page(vma, addr, oldpte);
if (!page || PageKsm(page))
continue;
/* Avoid TLB flush if possible */
if (pte_protnone(oldpte))
continue;
}
ptent = ptep_modify_prot_start(mm, addr, pte);
ptent = pte_modify(ptent, newprot);
if (preserve_write)
ptent = pte_mkwrite(ptent);
/* Avoid taking write faults for known dirty pages */
if (dirty_accountable && pte_dirty(ptent) &&
(pte_soft_dirty(ptent) ||
!(vma->vm_flags & VM_SOFTDIRTY))) {
ptent = pte_mkwrite(ptent);
}
ptep_modify_prot_commit(mm, addr, pte, ptent);
pages++;
} else if (IS_ENABLED(CONFIG_MIGRATION)) {
swp_entry_t entry = pte_to_swp_entry(oldpte);
if (is_write_migration_entry(entry)) {
pte_t newpte;
/*
* A protection check is difficult so
* just be safe and disable write
*/
make_migration_entry_read(&entry);
newpte = swp_entry_to_pte(entry);
if (pte_swp_soft_dirty(oldpte))
newpte = pte_swp_mksoft_dirty(newpte);
set_pte_at(mm, addr, pte, newpte);
pages++;
}
}
} while (pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(pte - 1, ptl);
return pages;
}
static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
pud_t *pud, unsigned long addr, unsigned long end,
pgprot_t newprot, int dirty_accountable, int prot_numa)
{
pmd_t *pmd;
struct mm_struct *mm = vma->vm_mm;
unsigned long next;
unsigned long pages = 0;
unsigned long nr_huge_updates = 0;
unsigned long mni_start = 0;
pmd = pmd_offset(pud, addr);
do {
unsigned long this_pages;
next = pmd_addr_end(addr, end);
if (!pmd_trans_huge(*pmd) && pmd_none_or_clear_bad(pmd))
continue;
/* invoke the mmu notifier if the pmd is populated */
if (!mni_start) {
mni_start = addr;
mmu_notifier_invalidate_range_start(mm, mni_start, end);
}
if (pmd_trans_huge(*pmd)) {
if (next - addr != HPAGE_PMD_SIZE)
split_huge_page_pmd(vma, addr, pmd);
else {
int nr_ptes = change_huge_pmd(vma, pmd, addr,
newprot, prot_numa);
if (nr_ptes) {
if (nr_ptes == HPAGE_PMD_NR) {
pages += HPAGE_PMD_NR;
nr_huge_updates++;
}
/* huge pmd was handled */
continue;
}
}
/* fall through, the trans huge pmd just split */
}
this_pages = change_pte_range(vma, pmd, addr, next, newprot,
dirty_accountable, prot_numa);
pages += this_pages;
} while (pmd++, addr = next, addr != end);
if (mni_start)
mmu_notifier_invalidate_range_end(mm, mni_start, end);
if (nr_huge_updates)
count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
return pages;
}
static inline unsigned long change_pud_range(struct vm_area_struct *vma,
pgd_t *pgd, unsigned long addr, unsigned long end,
pgprot_t newprot, int dirty_accountable, int prot_numa)
{
pud_t *pud;
unsigned long next;
unsigned long pages = 0;
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
continue;
pages += change_pmd_range(vma, pud, addr, next, newprot,
dirty_accountable, prot_numa);
} while (pud++, addr = next, addr != end);
return pages;
}
static unsigned long change_protection_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long end, pgprot_t newprot,
int dirty_accountable, int prot_numa)
{
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgd;
unsigned long next;
unsigned long start = addr;
unsigned long pages = 0;
BUG_ON(addr >= end);
pgd = pgd_offset(mm, addr);
flush_cache_range(vma, addr, end);
set_tlb_flush_pending(mm);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
pages += change_pud_range(vma, pgd, addr, next, newprot,
dirty_accountable, prot_numa);
} while (pgd++, addr = next, addr != end);
/* Only flush the TLB if we actually modified any entries: */
if (pages)
flush_tlb_range(vma, start, end);
clear_tlb_flush_pending(mm);
return pages;
}
unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
unsigned long end, pgprot_t newprot,
int dirty_accountable, int prot_numa)
{
unsigned long pages;
if (is_vm_hugetlb_page(vma))
pages = hugetlb_change_protection(vma, start, end, newprot);
else
pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
return pages;
}
int
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
unsigned long start, unsigned long end, unsigned long newflags)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long oldflags = vma->vm_flags;
long nrpages = (end - start) >> PAGE_SHIFT;
unsigned long charged = 0;
pgoff_t pgoff;
int error;
int dirty_accountable = 0;
if (newflags == oldflags) {
*pprev = vma;
return 0;
}
/*
* If we make a private mapping writable we increase our commit;
* but (without finer accounting) cannot reduce our commit if we
* make it unwritable again. hugetlb mapping were accounted for
* even if read-only so there is no need to account for them here
*/
if (newflags & VM_WRITE) {
if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
VM_SHARED|VM_NORESERVE))) {
charged = nrpages;
if (security_vm_enough_memory_mm(mm, charged))
return -ENOMEM;
newflags |= VM_ACCOUNT;
}
}
/*
* First try to merge with previous and/or next vma.
*/
pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
*pprev = vma_merge(mm, *pprev, start, end, newflags,
vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
if (*pprev) {
vma = *pprev;
goto success;
}
*pprev = vma;
if (start != vma->vm_start) {
error = split_vma(mm, vma, start, 1);
if (error)
goto fail;
}
if (end != vma->vm_end) {
error = split_vma(mm, vma, end, 0);
if (error)
goto fail;
}
success:
/*
* vm_flags and vm_page_prot are protected by the mmap_sem
* held in write mode.
*/
vma->vm_flags = newflags;
dirty_accountable = vma_wants_writenotify(vma);
vma_set_page_prot(vma);
change_protection(vma, start, end, vma->vm_page_prot,
dirty_accountable, 0);
/*
* Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
* fault on access.
*/
if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED &&
(newflags & VM_WRITE)) {
populate_vma_page_range(vma, start, end, NULL);
}
vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
vm_stat_account(mm, newflags, vma->vm_file, nrpages);
perf_event_mmap(vma);
return 0;
fail:
vm_unacct_memory(charged);
return error;
}
SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
unsigned long, prot)
{
unsigned long vm_flags, nstart, end, tmp, reqprot;
struct vm_area_struct *vma, *prev;
int error = -EINVAL;
const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
return -EINVAL;
if (start & ~PAGE_MASK)
return -EINVAL;
if (!len)
return 0;
len = PAGE_ALIGN(len);
end = start + len;
if (end <= start)
return -ENOMEM;
if (!arch_validate_prot(prot))
return -EINVAL;
reqprot = prot;
/*
* Does the application expect PROT_READ to imply PROT_EXEC:
*/
if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
prot |= PROT_EXEC;
vm_flags = calc_vm_prot_bits(prot);
down_write(&current->mm->mmap_sem);
vma = find_vma(current->mm, start);
error = -ENOMEM;
if (!vma)
goto out;
prev = vma->vm_prev;
if (unlikely(grows & PROT_GROWSDOWN)) {
if (vma->vm_start >= end)
goto out;
start = vma->vm_start;
error = -EINVAL;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto out;
} else {
if (vma->vm_start > start)
goto out;
if (unlikely(grows & PROT_GROWSUP)) {
end = vma->vm_end;
error = -EINVAL;
if (!(vma->vm_flags & VM_GROWSUP))
goto out;
}
}
if (start > vma->vm_start)
prev = vma;
for (nstart = start ; ; ) {
unsigned long newflags;
/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
newflags = vm_flags;
newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
/* newflags >> 4 shift VM_MAY% in place of VM_% */
if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
error = -EACCES;
goto out;
}
error = security_file_mprotect(vma, reqprot, prot);
if (error)
goto out;
tmp = vma->vm_end;
if (tmp > end)
tmp = end;
error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
if (error)
goto out;
nstart = tmp;
if (nstart < prev->vm_end)
nstart = prev->vm_end;
if (nstart >= end)
goto out;
vma = prev->vm_next;
if (!vma || vma->vm_start != nstart) {
error = -ENOMEM;
goto out;
}
}
out:
up_write(&current->mm->mmap_sem);
return error;
}