android_kernel_oneplus_msm8998/mm/util.c
Greg Kroah-Hartman 3f51ea2db9 This is the 4.4.133 stable release
-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlsJA10ACgkQONu9yGCS
 aT4NqRAAr+4+KwFqbdUDDAdYMLgomybjLVNxbI80CvOTF24NIKfKIKUn+Q3e2qCE
 11y2Q+PixE9qbujYPg+qoC3Xux+S6DAj9QOJPJpuJVQhBRRmnFugKlAq630kaoxx
 VOPJx1x+244Q1OsAJMRDqEJEtMEFew/r0VGQ1yrXd9APYgc0KvDKHfjt8rXzGGuA
 sdf5GsbxSxptMCF6nnUAGcyfuRBVIBW0v6NOEnj5m/K6f4oESQb+uKk7R8MO7m3U
 kc2ggTALxA1u/0iAsfxScfaFkT865+2IxCz4i4N13PUmxuJJTDF0xshAOSdlrSxV
 j8x7B+YmVaPgs63m2EyClQpVitqkcgyfiPZ0byWEcaKtuYXavcOO77aGB7W/QUSw
 ZfGJeDhz0hkjOCSGD2LCx062clMSpqqZn20MUDyF32HiRl1mIf6prac/LBXphNHh
 l+arXyzRk9rVTgtfbqcKBgi8h5n0LKzqbfD4f+8hrhv8q0i+9tNoM1lW8R+GL4RC
 nXfCuhCEIEXbsfQIJeSkEp6AH8N9guMcbw9jOiji9HvNFQZj3RpfkuCHGGggBlwa
 EiD3GzMhwFyJmIzWqdYCSGfCh6YI6FA7KpspOKhUKZKkHVDfJ7M+A8lBQmOZGRBQ
 G44XJJvaKB7l/I0ux2S0C5CdcyBb7EMjD8tXXLnRjMEGjLoKpqM=
 =s+Ms
 -----END PGP SIGNATURE-----

Merge 4.4.133 into android-4.4

Changes in 4.4.133
	8139too: Use disable_irq_nosync() in rtl8139_poll_controller()
	bridge: check iface upper dev when setting master via ioctl
	dccp: fix tasklet usage
	ipv4: fix memory leaks in udp_sendmsg, ping_v4_sendmsg
	llc: better deal with too small mtu
	net: ethernet: sun: niu set correct packet size in skb
	net/mlx4_en: Verify coalescing parameters are in range
	net_sched: fq: take care of throttled flows before reuse
	net: support compat 64-bit time in {s,g}etsockopt
	openvswitch: Don't swap table in nlattr_set() after OVS_ATTR_NESTED is found
	qmi_wwan: do not steal interfaces from class drivers
	r8169: fix powering up RTL8168h
	sctp: handle two v4 addrs comparison in sctp_inet6_cmp_addr
	sctp: use the old asoc when making the cookie-ack chunk in dupcook_d
	tg3: Fix vunmap() BUG_ON() triggered from tg3_free_consistent().
	bonding: do not allow rlb updates to invalid mac
	tcp: ignore Fast Open on repair mode
	sctp: fix the issue that the cookie-ack with auth can't get processed
	sctp: delay the authentication for the duplicated cookie-echo chunk
	ALSA: timer: Call notifier in the same spinlock
	audit: move calcs after alloc and check when logging set loginuid
	arm64: introduce mov_q macro to move a constant into a 64-bit register
	arm64: Add work around for Arm Cortex-A55 Erratum 1024718
	futex: Remove unnecessary warning from get_futex_key
	futex: Remove duplicated code and fix undefined behaviour
	xfrm: fix xfrm_do_migrate() with AEAD e.g(AES-GCM)
	lockd: lost rollback of set_grace_period() in lockd_down_net()
	Revert "ARM: dts: imx6qdl-wandboard: Fix audio channel swap"
	l2tp: revert "l2tp: fix missing print session offset info"
	pipe: cap initial pipe capacity according to pipe-max-size limit
	futex: futex_wake_op, fix sign_extend32 sign bits
	kernel/exit.c: avoid undefined behaviour when calling wait4()
	usbip: usbip_host: refine probe and disconnect debug msgs to be useful
	usbip: usbip_host: delete device from busid_table after rebind
	usbip: usbip_host: run rebind from exit when module is removed
	usbip: usbip_host: fix NULL-ptr deref and use-after-free errors
	usbip: usbip_host: fix bad unlock balance during stub_probe()
	ALSA: usb: mixer: volume quirk for CM102-A+/102S+
	ALSA: hda: Add Lenovo C50 All in one to the power_save blacklist
	ALSA: control: fix a redundant-copy issue
	spi: pxa2xx: Allow 64-bit DMA
	powerpc/powernv: panic() on OPAL < V3
	powerpc/powernv: Remove OPALv2 firmware define and references
	powerpc/powernv: remove FW_FEATURE_OPALv3 and just use FW_FEATURE_OPAL
	cpuidle: coupled: remove unused define cpuidle_coupled_lock
	powerpc: Don't preempt_disable() in show_cpuinfo()
	vmscan: do not force-scan file lru if its absolute size is small
	proc: meminfo: estimate available memory more conservatively
	mm: filemap: remove redundant code in do_read_cache_page
	mm: filemap: avoid unnecessary calls to lock_page when waiting for IO to complete during a read
	signals: avoid unnecessary taking of sighand->siglock
	cpufreq: intel_pstate: Enable HWP by default
	tracing/x86/xen: Remove zero data size trace events trace_xen_mmu_flush_tlb{_all}
	proc read mm's {arg,env}_{start,end} with mmap semaphore taken.
	procfs: fix pthread cross-thread naming if !PR_DUMPABLE
	powerpc/powernv: Fix NVRAM sleep in invalid context when crashing
	mm: don't allow deferred pages with NEED_PER_CPU_KM
	s390/qdio: fix access to uninitialized qdio_q fields
	s390/cpum_sf: ensure sample frequency of perf event attributes is non-zero
	s390/qdio: don't release memory in qdio_setup_irq()
	s390: remove indirect branch from do_softirq_own_stack
	efi: Avoid potential crashes, fix the 'struct efi_pci_io_protocol_32' definition for mixed mode
	ARM: 8771/1: kprobes: Prohibit kprobes on do_undefinstr
	tick/broadcast: Use for_each_cpu() specially on UP kernels
	ARM: 8769/1: kprobes: Fix to use get_kprobe_ctlblk after irq-disabed
	ARM: 8770/1: kprobes: Prohibit probing on optimized_callback
	ARM: 8772/1: kprobes: Prohibit kprobes on get_user functions
	Btrfs: fix xattr loss after power failure
	btrfs: fix crash when trying to resume balance without the resume flag
	btrfs: fix reading stale metadata blocks after degraded raid1 mounts
	net: test tailroom before appending to linear skb
	packet: in packet_snd start writing at link layer allocation
	sock_diag: fix use-after-free read in __sk_free
	tcp: purge write queue in tcp_connect_init()
	ext2: fix a block leak
	s390: add assembler macros for CPU alternatives
	s390: move expoline assembler macros to a header
	s390/lib: use expoline for indirect branches
	s390/kernel: use expoline for indirect branches
	s390: move spectre sysfs attribute code
	s390: extend expoline to BC instructions
	s390: use expoline thunks in the BPF JIT
	scsi: libsas: defer ata device eh commands to libata
	scsi: sg: allocate with __GFP_ZERO in sg_build_indirect()
	scsi: zfcp: fix infinite iteration on ERP ready list
	dmaengine: ensure dmaengine helpers check valid callback
	time: Fix CLOCK_MONOTONIC_RAW sub-nanosecond accounting
	gpio: rcar: Add Runtime PM handling for interrupts
	cfg80211: limit wiphy names to 128 bytes
	hfsplus: stop workqueue when fill_super() failed
	x86/kexec: Avoid double free_page() upon do_kexec_load() failure
	Linux 4.4.133

Change-Id: I0554b12889bc91add2a444da95f18d59c6fb9cdb
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
2018-05-26 10:12:26 +02:00

473 lines
11 KiB
C

#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/security.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/mman.h>
#include <linux/hugetlb.h>
#include <linux/vmalloc.h>
#include <asm/sections.h>
#include <asm/uaccess.h>
#include "internal.h"
static inline int is_kernel_rodata(unsigned long addr)
{
return addr >= (unsigned long)__start_rodata &&
addr < (unsigned long)__end_rodata;
}
/**
* kfree_const - conditionally free memory
* @x: pointer to the memory
*
* Function calls kfree only if @x is not in .rodata section.
*/
void kfree_const(const void *x)
{
if (!is_kernel_rodata((unsigned long)x))
kfree(x);
}
EXPORT_SYMBOL(kfree_const);
/**
* kstrdup - allocate space for and copy an existing string
* @s: the string to duplicate
* @gfp: the GFP mask used in the kmalloc() call when allocating memory
*/
char *kstrdup(const char *s, gfp_t gfp)
{
size_t len;
char *buf;
if (!s)
return NULL;
len = strlen(s) + 1;
buf = kmalloc_track_caller(len, gfp);
if (buf)
memcpy(buf, s, len);
return buf;
}
EXPORT_SYMBOL(kstrdup);
/**
* kstrdup_const - conditionally duplicate an existing const string
* @s: the string to duplicate
* @gfp: the GFP mask used in the kmalloc() call when allocating memory
*
* Function returns source string if it is in .rodata section otherwise it
* fallbacks to kstrdup.
* Strings allocated by kstrdup_const should be freed by kfree_const.
*/
const char *kstrdup_const(const char *s, gfp_t gfp)
{
if (is_kernel_rodata((unsigned long)s))
return s;
return kstrdup(s, gfp);
}
EXPORT_SYMBOL(kstrdup_const);
/**
* kstrndup - allocate space for and copy an existing string
* @s: the string to duplicate
* @max: read at most @max chars from @s
* @gfp: the GFP mask used in the kmalloc() call when allocating memory
*
* Note: Use kmemdup_nul() instead if the size is known exactly.
*/
char *kstrndup(const char *s, size_t max, gfp_t gfp)
{
size_t len;
char *buf;
if (!s)
return NULL;
len = strnlen(s, max);
buf = kmalloc_track_caller(len+1, gfp);
if (buf) {
memcpy(buf, s, len);
buf[len] = '\0';
}
return buf;
}
EXPORT_SYMBOL(kstrndup);
/**
* kmemdup - duplicate region of memory
*
* @src: memory region to duplicate
* @len: memory region length
* @gfp: GFP mask to use
*/
void *kmemdup(const void *src, size_t len, gfp_t gfp)
{
void *p;
p = kmalloc_track_caller(len, gfp);
if (p)
memcpy(p, src, len);
return p;
}
EXPORT_SYMBOL(kmemdup);
/**
* kmemdup_nul - Create a NUL-terminated string from unterminated data
* @s: The data to stringify
* @len: The size of the data
* @gfp: the GFP mask used in the kmalloc() call when allocating memory
*/
char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
{
char *buf;
if (!s)
return NULL;
buf = kmalloc_track_caller(len + 1, gfp);
if (buf) {
memcpy(buf, s, len);
buf[len] = '\0';
}
return buf;
}
EXPORT_SYMBOL(kmemdup_nul);
/**
* memdup_user - duplicate memory region from user space
*
* @src: source address in user space
* @len: number of bytes to copy
*
* Returns an ERR_PTR() on failure.
*/
void *memdup_user(const void __user *src, size_t len)
{
void *p;
/*
* Always use GFP_KERNEL, since copy_from_user() can sleep and
* cause pagefault, which makes it pointless to use GFP_NOFS
* or GFP_ATOMIC.
*/
p = kmalloc_track_caller(len, GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM);
if (copy_from_user(p, src, len)) {
kfree(p);
return ERR_PTR(-EFAULT);
}
return p;
}
EXPORT_SYMBOL(memdup_user);
/*
* strndup_user - duplicate an existing string from user space
* @s: The string to duplicate
* @n: Maximum number of bytes to copy, including the trailing NUL.
*/
char *strndup_user(const char __user *s, long n)
{
char *p;
long length;
length = strnlen_user(s, n);
if (!length)
return ERR_PTR(-EFAULT);
if (length > n)
return ERR_PTR(-EINVAL);
p = memdup_user(s, length);
if (IS_ERR(p))
return p;
p[length - 1] = '\0';
return p;
}
EXPORT_SYMBOL(strndup_user);
void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *prev, struct rb_node *rb_parent)
{
struct vm_area_struct *next;
vma->vm_prev = prev;
if (prev) {
next = prev->vm_next;
prev->vm_next = vma;
} else {
mm->mmap = vma;
if (rb_parent)
next = rb_entry(rb_parent,
struct vm_area_struct, vm_rb);
else
next = NULL;
}
vma->vm_next = next;
if (next)
next->vm_prev = vma;
}
/* Check if the vma is being used as a stack by this task */
int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t)
{
return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
}
#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
void arch_pick_mmap_layout(struct mm_struct *mm)
{
mm->mmap_base = TASK_UNMAPPED_BASE;
mm->get_unmapped_area = arch_get_unmapped_area;
}
#endif
/*
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall
* back to the regular GUP.
* If the architecture not support this function, simply return with no
* page pinned
*/
int __weak __get_user_pages_fast(unsigned long start,
int nr_pages, int write, struct page **pages)
{
return 0;
}
EXPORT_SYMBOL_GPL(__get_user_pages_fast);
/**
* get_user_pages_fast() - pin user pages in memory
* @start: starting user address
* @nr_pages: number of pages from start to pin
* @write: whether pages will be written to
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long.
*
* Returns number of pages pinned. This may be fewer than the number
* requested. If nr_pages is 0 or negative, returns 0. If no pages
* were pinned, returns -errno.
*
* get_user_pages_fast provides equivalent functionality to get_user_pages,
* operating on current and current->mm, with force=0 and vma=NULL. However
* unlike get_user_pages, it must be called without mmap_sem held.
*
* get_user_pages_fast may take mmap_sem and page table locks, so no
* assumptions can be made about lack of locking. get_user_pages_fast is to be
* implemented in a way that is advantageous (vs get_user_pages()) when the
* user memory area is already faulted in and present in ptes. However if the
* pages have to be faulted in, it may turn out to be slightly slower so
* callers need to carefully consider what to use. On many architectures,
* get_user_pages_fast simply falls back to get_user_pages.
*/
int __weak get_user_pages_fast(unsigned long start,
int nr_pages, int write, struct page **pages)
{
struct mm_struct *mm = current->mm;
return get_user_pages_unlocked(current, mm, start, nr_pages,
write, 0, pages);
}
EXPORT_SYMBOL_GPL(get_user_pages_fast);
unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long pgoff)
{
unsigned long ret;
struct mm_struct *mm = current->mm;
unsigned long populate;
ret = security_mmap_file(file, prot, flag);
if (!ret) {
down_write(&mm->mmap_sem);
ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
&populate);
up_write(&mm->mmap_sem);
if (populate)
mm_populate(ret, populate);
}
return ret;
}
unsigned long vm_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long offset)
{
if (unlikely(offset + PAGE_ALIGN(len) < offset))
return -EINVAL;
if (unlikely(offset_in_page(offset)))
return -EINVAL;
return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
}
EXPORT_SYMBOL(vm_mmap);
void kvfree(const void *addr)
{
if (is_vmalloc_addr(addr))
vfree(addr);
else
kfree(addr);
}
EXPORT_SYMBOL(kvfree);
static inline void *__page_rmapping(struct page *page)
{
unsigned long mapping;
mapping = (unsigned long)page->mapping;
mapping &= ~PAGE_MAPPING_FLAGS;
return (void *)mapping;
}
/* Neutral page->mapping pointer to address_space or anon_vma or other */
void *page_rmapping(struct page *page)
{
page = compound_head(page);
return __page_rmapping(page);
}
struct anon_vma *page_anon_vma(struct page *page)
{
unsigned long mapping;
page = compound_head(page);
mapping = (unsigned long)page->mapping;
if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
return NULL;
return __page_rmapping(page);
}
struct address_space *page_mapping(struct page *page)
{
unsigned long mapping;
/* This happens if someone calls flush_dcache_page on slab page */
if (unlikely(PageSlab(page)))
return NULL;
if (unlikely(PageSwapCache(page))) {
swp_entry_t entry;
entry.val = page_private(page);
return swap_address_space(entry);
}
mapping = (unsigned long)page->mapping;
if (mapping & PAGE_MAPPING_FLAGS)
return NULL;
return page->mapping;
}
EXPORT_SYMBOL(page_mapping);
int overcommit_ratio_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (ret == 0 && write)
sysctl_overcommit_kbytes = 0;
return ret;
}
int overcommit_kbytes_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write)
sysctl_overcommit_ratio = 0;
return ret;
}
/*
* Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
*/
unsigned long vm_commit_limit(void)
{
unsigned long allowed;
if (sysctl_overcommit_kbytes)
allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
else
allowed = ((totalram_pages - hugetlb_total_pages())
* sysctl_overcommit_ratio / 100);
allowed += total_swap_pages;
return allowed;
}
/**
* get_cmdline() - copy the cmdline value to a buffer.
* @task: the task whose cmdline value to copy.
* @buffer: the buffer to copy to.
* @buflen: the length of the buffer. Larger cmdline values are truncated
* to this length.
* Returns the size of the cmdline field copied. Note that the copy does
* not guarantee an ending NULL byte.
*/
int get_cmdline(struct task_struct *task, char *buffer, int buflen)
{
int res = 0;
unsigned int len;
struct mm_struct *mm = get_task_mm(task);
unsigned long arg_start, arg_end, env_start, env_end;
if (!mm)
goto out;
if (!mm->arg_end)
goto out_mm; /* Shh! No looking before we're done */
down_read(&mm->mmap_sem);
arg_start = mm->arg_start;
arg_end = mm->arg_end;
env_start = mm->env_start;
env_end = mm->env_end;
up_read(&mm->mmap_sem);
len = arg_end - arg_start;
if (len > buflen)
len = buflen;
res = access_process_vm(task, arg_start, buffer, len, 0);
/*
* If the nul at the end of args has been overwritten, then
* assume application is using setproctitle(3).
*/
if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
len = strnlen(buffer, res);
if (len < res) {
res = len;
} else {
len = env_end - env_start;
if (len > buflen - res)
len = buflen - res;
res += access_process_vm(task, env_start,
buffer+res, len, 0);
res = strnlen(buffer, res);
}
}
out_mm:
mmput(mm);
out:
return res;
}