Merge remote-tracking branch 'common/android-4.4' into android-4.4.y
This commit is contained in:
commit
2a3670c622
13 changed files with 178 additions and 67 deletions
|
@ -706,7 +706,7 @@ __copy_from_user_overflow(int size, unsigned long count)
|
|||
|
||||
#endif
|
||||
|
||||
static inline unsigned long __must_check
|
||||
static __always_inline unsigned long __must_check
|
||||
copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
int sz = __compiletime_object_size(to);
|
||||
|
@ -742,7 +742,7 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
|
|||
return n;
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check
|
||||
static __always_inline unsigned long __must_check
|
||||
copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
int sz = __compiletime_object_size(from);
|
||||
|
|
|
@ -3560,6 +3560,7 @@ blk_zero_latency_hist(struct io_latency_state *s)
|
|||
s->latency_reads_elems = 0;
|
||||
s->latency_writes_elems = 0;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_zero_latency_hist);
|
||||
|
||||
ssize_t
|
||||
blk_latency_hist_show(struct io_latency_state *s, char *buf)
|
||||
|
@ -3621,3 +3622,4 @@ blk_latency_hist_show(struct io_latency_state *s, char *buf)
|
|||
}
|
||||
return bytes_written;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_latency_hist_show);
|
||||
|
|
|
@ -1251,6 +1251,7 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field,
|
|||
/* Ignore report if ErrorRollOver */
|
||||
if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
|
||||
value[n] >= min && value[n] <= max &&
|
||||
value[n] - min < field->maxusage &&
|
||||
field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1)
|
||||
goto exit;
|
||||
}
|
||||
|
@ -1263,11 +1264,13 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field,
|
|||
}
|
||||
|
||||
if (field->value[n] >= min && field->value[n] <= max
|
||||
&& field->value[n] - min < field->maxusage
|
||||
&& field->usage[field->value[n] - min].hid
|
||||
&& search(value, field->value[n], count))
|
||||
hid_process_event(hid, field, &field->usage[field->value[n] - min], 0, interrupt);
|
||||
|
||||
if (value[n] >= min && value[n] <= max
|
||||
&& value[n] - min < field->maxusage
|
||||
&& field->usage[value[n] - min].hid
|
||||
&& search(field->value, value[n], count))
|
||||
hid_process_event(hid, field, &field->usage[value[n] - min], 1, interrupt);
|
||||
|
|
|
@ -431,6 +431,40 @@ static int pstore_write_compat(enum pstore_type_id type,
|
|||
size, psi);
|
||||
}
|
||||
|
||||
static int pstore_write_buf_user_compat(enum pstore_type_id type,
|
||||
enum kmsg_dump_reason reason,
|
||||
u64 *id, unsigned int part,
|
||||
const char __user *buf,
|
||||
bool compressed, size_t size,
|
||||
struct pstore_info *psi)
|
||||
{
|
||||
unsigned long flags = 0;
|
||||
size_t i, bufsize = size;
|
||||
long ret = 0;
|
||||
|
||||
if (unlikely(!access_ok(VERIFY_READ, buf, size)))
|
||||
return -EFAULT;
|
||||
if (bufsize > psinfo->bufsize)
|
||||
bufsize = psinfo->bufsize;
|
||||
spin_lock_irqsave(&psinfo->buf_lock, flags);
|
||||
for (i = 0; i < size; ) {
|
||||
size_t c = min(size - i, bufsize);
|
||||
|
||||
ret = __copy_from_user(psinfo->buf, buf + i, c);
|
||||
if (unlikely(ret != 0)) {
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
ret = psi->write_buf(type, reason, id, part, psinfo->buf,
|
||||
compressed, c, psi);
|
||||
if (unlikely(ret < 0))
|
||||
break;
|
||||
i += c;
|
||||
}
|
||||
spin_unlock_irqrestore(&psinfo->buf_lock, flags);
|
||||
return unlikely(ret < 0) ? ret : size;
|
||||
}
|
||||
|
||||
/*
|
||||
* platform specific persistent storage driver registers with
|
||||
* us here. If pstore is already mounted, call the platform
|
||||
|
@ -453,6 +487,8 @@ int pstore_register(struct pstore_info *psi)
|
|||
|
||||
if (!psi->write)
|
||||
psi->write = pstore_write_compat;
|
||||
if (!psi->write_buf_user)
|
||||
psi->write_buf_user = pstore_write_buf_user_compat;
|
||||
psinfo = psi;
|
||||
mutex_init(&psinfo->read_mutex);
|
||||
spin_unlock(&pstore_lock);
|
||||
|
|
|
@ -19,48 +19,25 @@
|
|||
#include "internal.h"
|
||||
|
||||
static DEFINE_MUTEX(pmsg_lock);
|
||||
#define PMSG_MAX_BOUNCE_BUFFER_SIZE (2*PAGE_SIZE)
|
||||
|
||||
static ssize_t write_pmsg(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
size_t i, buffer_size;
|
||||
char *buffer;
|
||||
u64 id;
|
||||
int ret;
|
||||
|
||||
if (!count)
|
||||
return 0;
|
||||
|
||||
/* check outside lock, page in any data. write_buf_user also checks */
|
||||
if (!access_ok(VERIFY_READ, buf, count))
|
||||
return -EFAULT;
|
||||
|
||||
buffer_size = count;
|
||||
if (buffer_size > PMSG_MAX_BOUNCE_BUFFER_SIZE)
|
||||
buffer_size = PMSG_MAX_BOUNCE_BUFFER_SIZE;
|
||||
buffer = vmalloc(buffer_size);
|
||||
if (!buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&pmsg_lock);
|
||||
for (i = 0; i < count; ) {
|
||||
size_t c = min(count - i, buffer_size);
|
||||
u64 id;
|
||||
long ret;
|
||||
|
||||
ret = __copy_from_user(buffer, buf + i, c);
|
||||
if (unlikely(ret != 0)) {
|
||||
mutex_unlock(&pmsg_lock);
|
||||
vfree(buffer);
|
||||
return -EFAULT;
|
||||
}
|
||||
psinfo->write_buf(PSTORE_TYPE_PMSG, 0, &id, 0, buffer, 0, c,
|
||||
psinfo);
|
||||
|
||||
i += c;
|
||||
}
|
||||
|
||||
ret = psinfo->write_buf_user(PSTORE_TYPE_PMSG, 0, &id, 0, buf, 0, count,
|
||||
psinfo);
|
||||
mutex_unlock(&pmsg_lock);
|
||||
vfree(buffer);
|
||||
return count;
|
||||
return ret ? ret : count;
|
||||
}
|
||||
|
||||
static const struct file_operations pmsg_fops = {
|
||||
|
|
|
@ -331,6 +331,24 @@ static int notrace ramoops_pstore_write_buf(enum pstore_type_id type,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int notrace ramoops_pstore_write_buf_user(enum pstore_type_id type,
|
||||
enum kmsg_dump_reason reason,
|
||||
u64 *id, unsigned int part,
|
||||
const char __user *buf,
|
||||
bool compressed, size_t size,
|
||||
struct pstore_info *psi)
|
||||
{
|
||||
if (type == PSTORE_TYPE_PMSG) {
|
||||
struct ramoops_context *cxt = psi->data;
|
||||
|
||||
if (!cxt->mprz)
|
||||
return -ENOMEM;
|
||||
return persistent_ram_write_user(cxt->mprz, buf, size);
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int ramoops_pstore_erase(enum pstore_type_id type, u64 id, int count,
|
||||
struct timespec time, struct pstore_info *psi)
|
||||
{
|
||||
|
@ -369,6 +387,7 @@ static struct ramoops_context oops_cxt = {
|
|||
.open = ramoops_pstore_open,
|
||||
.read = ramoops_pstore_read,
|
||||
.write_buf = ramoops_pstore_write_buf,
|
||||
.write_buf_user = ramoops_pstore_write_buf_user,
|
||||
.erase = ramoops_pstore_erase,
|
||||
},
|
||||
};
|
||||
|
|
|
@ -17,15 +17,16 @@
|
|||
#include <linux/device.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/pstore_ram.h>
|
||||
#include <linux/rslib.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/pstore_ram.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
struct persistent_ram_buffer {
|
||||
|
@ -303,6 +304,16 @@ static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
|
|||
persistent_ram_update_ecc(prz, start, count);
|
||||
}
|
||||
|
||||
static int notrace persistent_ram_update_user(struct persistent_ram_zone *prz,
|
||||
const void __user *s, unsigned int start, unsigned int count)
|
||||
{
|
||||
struct persistent_ram_buffer *buffer = prz->buffer;
|
||||
int ret = unlikely(__copy_from_user(buffer->data + start, s, count)) ?
|
||||
-EFAULT : 0;
|
||||
persistent_ram_update_ecc(prz, start, count);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void persistent_ram_save_old(struct persistent_ram_zone *prz)
|
||||
{
|
||||
struct persistent_ram_buffer *buffer = prz->buffer;
|
||||
|
@ -356,6 +367,38 @@ int notrace persistent_ram_write(struct persistent_ram_zone *prz,
|
|||
return count;
|
||||
}
|
||||
|
||||
int notrace persistent_ram_write_user(struct persistent_ram_zone *prz,
|
||||
const void __user *s, unsigned int count)
|
||||
{
|
||||
int rem, ret = 0, c = count;
|
||||
size_t start;
|
||||
|
||||
if (unlikely(!access_ok(VERIFY_READ, s, count)))
|
||||
return -EFAULT;
|
||||
if (unlikely(c > prz->buffer_size)) {
|
||||
s += c - prz->buffer_size;
|
||||
c = prz->buffer_size;
|
||||
}
|
||||
|
||||
buffer_size_add(prz, c);
|
||||
|
||||
start = buffer_start_add(prz, c);
|
||||
|
||||
rem = prz->buffer_size - start;
|
||||
if (unlikely(rem < c)) {
|
||||
ret = persistent_ram_update_user(prz, s, start, rem);
|
||||
s += rem;
|
||||
c -= rem;
|
||||
start = 0;
|
||||
}
|
||||
if (likely(!ret))
|
||||
ret = persistent_ram_update_user(prz, s, start, c);
|
||||
|
||||
persistent_ram_update_header_ecc(prz);
|
||||
|
||||
return unlikely(ret) ? ret : count;
|
||||
}
|
||||
|
||||
size_t persistent_ram_old_size(struct persistent_ram_zone *prz)
|
||||
{
|
||||
return prz->old_log_size;
|
||||
|
|
|
@ -380,8 +380,10 @@ struct mmc_host {
|
|||
} embedded_sdio_data;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
int latency_hist_enabled;
|
||||
struct io_latency_state io_lat_s;
|
||||
#endif
|
||||
|
||||
unsigned long private[0] ____cacheline_aligned;
|
||||
};
|
||||
|
|
|
@ -22,12 +22,13 @@
|
|||
#ifndef _LINUX_PSTORE_H
|
||||
#define _LINUX_PSTORE_H
|
||||
|
||||
#include <linux/time.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kmsg_dump.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/* types */
|
||||
enum pstore_type_id {
|
||||
|
@ -67,6 +68,10 @@ struct pstore_info {
|
|||
enum kmsg_dump_reason reason, u64 *id,
|
||||
unsigned int part, const char *buf, bool compressed,
|
||||
size_t size, struct pstore_info *psi);
|
||||
int (*write_buf_user)(enum pstore_type_id type,
|
||||
enum kmsg_dump_reason reason, u64 *id,
|
||||
unsigned int part, const char __user *buf,
|
||||
bool compressed, size_t size, struct pstore_info *psi);
|
||||
int (*erase)(enum pstore_type_id type, u64 id,
|
||||
int count, struct timespec time,
|
||||
struct pstore_info *psi);
|
||||
|
|
|
@ -17,11 +17,12 @@
|
|||
#ifndef __LINUX_PSTORE_RAM_H__
|
||||
#define __LINUX_PSTORE_RAM_H__
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
struct persistent_ram_buffer;
|
||||
struct rs_control;
|
||||
|
@ -59,7 +60,9 @@ void persistent_ram_free(struct persistent_ram_zone *prz);
|
|||
void persistent_ram_zap(struct persistent_ram_zone *prz);
|
||||
|
||||
int persistent_ram_write(struct persistent_ram_zone *prz, const void *s,
|
||||
unsigned int count);
|
||||
unsigned int count);
|
||||
int persistent_ram_write_user(struct persistent_ram_zone *prz,
|
||||
const void __user *s, unsigned int count);
|
||||
|
||||
void persistent_ram_save_old(struct persistent_ram_zone *prz);
|
||||
size_t persistent_ram_old_size(struct persistent_ram_zone *prz);
|
||||
|
|
|
@ -158,10 +158,11 @@ static inline int arch_within_stack_frames(const void * const stack,
|
|||
extern void __check_object_size(const void *ptr, unsigned long n,
|
||||
bool to_user);
|
||||
|
||||
static inline void check_object_size(const void *ptr, unsigned long n,
|
||||
bool to_user)
|
||||
static __always_inline void check_object_size(const void *ptr, unsigned long n,
|
||||
bool to_user)
|
||||
{
|
||||
__check_object_size(ptr, n, to_user);
|
||||
if (!__builtin_constant_p(n))
|
||||
__check_object_size(ptr, n, to_user);
|
||||
}
|
||||
#else
|
||||
static inline void check_object_size(const void *ptr, unsigned long n,
|
||||
|
|
|
@ -135,30 +135,15 @@ static inline const char *check_bogus_address(const void *ptr, unsigned long n)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline const char *check_heap_object(const void *ptr, unsigned long n,
|
||||
bool to_user)
|
||||
/* Checks for allocs that are marked in some way as spanning multiple pages. */
|
||||
static inline const char *check_page_span(const void *ptr, unsigned long n,
|
||||
struct page *page, bool to_user)
|
||||
{
|
||||
struct page *page, *endpage;
|
||||
#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
|
||||
const void *end = ptr + n - 1;
|
||||
struct page *endpage;
|
||||
bool is_reserved, is_cma;
|
||||
|
||||
/*
|
||||
* Some architectures (arm64) return true for virt_addr_valid() on
|
||||
* vmalloced addresses. Work around this by checking for vmalloc
|
||||
* first.
|
||||
*/
|
||||
if (is_vmalloc_addr(ptr))
|
||||
return NULL;
|
||||
|
||||
if (!virt_addr_valid(ptr))
|
||||
return NULL;
|
||||
|
||||
page = virt_to_head_page(ptr);
|
||||
|
||||
/* Check slab allocator for flags and size. */
|
||||
if (PageSlab(page))
|
||||
return __check_heap_object(ptr, n, page);
|
||||
|
||||
/*
|
||||
* Sometimes the kernel data regions are not marked Reserved (see
|
||||
* check below). And sometimes [_sdata,_edata) does not cover
|
||||
|
@ -187,7 +172,7 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
|
|||
((unsigned long)end & (unsigned long)PAGE_MASK)))
|
||||
return NULL;
|
||||
|
||||
/* Allow if start and end are inside the same compound page. */
|
||||
/* Allow if fully inside the same compound (__GFP_COMP) page. */
|
||||
endpage = virt_to_head_page(end);
|
||||
if (likely(endpage == page))
|
||||
return NULL;
|
||||
|
@ -200,20 +185,44 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
|
|||
is_reserved = PageReserved(page);
|
||||
is_cma = is_migrate_cma_page(page);
|
||||
if (!is_reserved && !is_cma)
|
||||
goto reject;
|
||||
return "<spans multiple pages>";
|
||||
|
||||
for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
|
||||
page = virt_to_head_page(ptr);
|
||||
if (is_reserved && !PageReserved(page))
|
||||
goto reject;
|
||||
return "<spans Reserved and non-Reserved pages>";
|
||||
if (is_cma && !is_migrate_cma_page(page))
|
||||
goto reject;
|
||||
return "<spans CMA and non-CMA pages>";
|
||||
}
|
||||
#endif
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
reject:
|
||||
return "<spans multiple pages>";
|
||||
static inline const char *check_heap_object(const void *ptr, unsigned long n,
|
||||
bool to_user)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
/*
|
||||
* Some architectures (arm64) return true for virt_addr_valid() on
|
||||
* vmalloced addresses. Work around this by checking for vmalloc
|
||||
* first.
|
||||
*/
|
||||
if (is_vmalloc_addr(ptr))
|
||||
return NULL;
|
||||
|
||||
if (!virt_addr_valid(ptr))
|
||||
return NULL;
|
||||
|
||||
page = virt_to_head_page(ptr);
|
||||
|
||||
/* Check slab allocator for flags and size. */
|
||||
if (PageSlab(page))
|
||||
return __check_heap_object(ptr, n, page);
|
||||
|
||||
/* Verify object does not incorrectly span multiple pages. */
|
||||
return check_page_span(ptr, n, page, to_user);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -156,6 +156,17 @@ config HARDENED_USERCOPY
|
|||
or are part of the kernel text. This kills entire classes
|
||||
of heap overflow exploits and similar kernel memory exposures.
|
||||
|
||||
config HARDENED_USERCOPY_PAGESPAN
|
||||
bool "Refuse to copy allocations that span multiple pages"
|
||||
depends on HARDENED_USERCOPY
|
||||
depends on !COMPILE_TEST
|
||||
help
|
||||
When a multi-page allocation is done without __GFP_COMP,
|
||||
hardened usercopy will reject attempts to copy it. There are,
|
||||
however, several cases of this in the kernel that have not all
|
||||
been removed. This config is intended to be used only while
|
||||
trying to find such users.
|
||||
|
||||
source security/selinux/Kconfig
|
||||
source security/smack/Kconfig
|
||||
source security/tomoyo/Kconfig
|
||||
|
|
Loading…
Add table
Reference in a new issue