Merge branch 'v4.4/topic/mm-kaslr-pax_usercopy' into linux-linaro-lsk-v4.4
This commit is contained in:
commit
daa56e80f3
8 changed files with 136 additions and 74 deletions
|
@ -773,21 +773,21 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
|
|||
#define user_access_begin() __uaccess_begin()
|
||||
#define user_access_end() __uaccess_end()
|
||||
|
||||
#define unsafe_put_user(x, ptr) \
|
||||
({ \
|
||||
#define unsafe_put_user(x, ptr, err_label) \
|
||||
do { \
|
||||
int __pu_err; \
|
||||
__put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
|
||||
__builtin_expect(__pu_err, 0); \
|
||||
})
|
||||
if (unlikely(__pu_err)) goto err_label; \
|
||||
} while (0)
|
||||
|
||||
#define unsafe_get_user(x, ptr) \
|
||||
({ \
|
||||
#define unsafe_get_user(x, ptr, err_label) \
|
||||
do { \
|
||||
int __gu_err; \
|
||||
unsigned long __gu_val; \
|
||||
__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
__builtin_expect(__gu_err, 0); \
|
||||
})
|
||||
if (unlikely(__gu_err)) goto err_label; \
|
||||
} while (0)
|
||||
|
||||
#endif /* _ASM_X86_UACCESS_H */
|
||||
|
||||
|
|
|
@ -81,6 +81,7 @@ struct kmem_cache {
|
|||
int reserved; /* Reserved bytes at the end of slabs */
|
||||
const char *name; /* Name (only for display!) */
|
||||
struct list_head list; /* List of slab caches */
|
||||
int red_left_pad; /* Left redzone padding size */
|
||||
#ifdef CONFIG_SYSFS
|
||||
struct kobject kobj; /* For sysfs */
|
||||
#endif
|
||||
|
|
|
@ -114,8 +114,8 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
|
|||
#ifndef user_access_begin
|
||||
#define user_access_begin() do { } while (0)
|
||||
#define user_access_end() do { } while (0)
|
||||
#define unsafe_get_user(x, ptr) __get_user(x, ptr)
|
||||
#define unsafe_put_user(x, ptr) __put_user(x, ptr)
|
||||
#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
|
||||
#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_UACCESS_H__ */
|
||||
|
|
|
@ -39,8 +39,8 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, long
|
|||
unsigned long c, data;
|
||||
|
||||
/* Fall back to byte-at-a-time if we get a page fault */
|
||||
if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res))))
|
||||
break;
|
||||
unsafe_get_user(c, (unsigned long __user *)(src+res), byte_at_a_time);
|
||||
|
||||
*(unsigned long *)(dst+res) = c;
|
||||
if (has_zero(c, &data, &constants)) {
|
||||
data = prep_zero_mask(c, data, &constants);
|
||||
|
@ -55,8 +55,7 @@ byte_at_a_time:
|
|||
while (max) {
|
||||
char c;
|
||||
|
||||
if (unlikely(unsafe_get_user(c,src+res)))
|
||||
return -EFAULT;
|
||||
unsafe_get_user(c,src+res, efault);
|
||||
dst[res] = c;
|
||||
if (!c)
|
||||
return res;
|
||||
|
@ -75,6 +74,7 @@ byte_at_a_time:
|
|||
* Nope: we hit the address space limit, and we still had more
|
||||
* characters the caller would have wanted. That's an EFAULT.
|
||||
*/
|
||||
efault:
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
|
|
|
@ -45,8 +45,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
|
|||
src -= align;
|
||||
max += align;
|
||||
|
||||
if (unlikely(unsafe_get_user(c,(unsigned long __user *)src)))
|
||||
return 0;
|
||||
unsafe_get_user(c, (unsigned long __user *)src, efault);
|
||||
c |= aligned_byte_mask(align);
|
||||
|
||||
for (;;) {
|
||||
|
@ -61,8 +60,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
|
|||
if (unlikely(max <= sizeof(unsigned long)))
|
||||
break;
|
||||
max -= sizeof(unsigned long);
|
||||
if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res))))
|
||||
return 0;
|
||||
unsafe_get_user(c, (unsigned long __user *)(src+res), efault);
|
||||
}
|
||||
res -= align;
|
||||
|
||||
|
@ -77,6 +75,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
|
|||
* Nope: we hit the address space limit, and we still had more
|
||||
* characters the caller would have wanted. That's 0.
|
||||
*/
|
||||
efault:
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
100
mm/slub.c
100
mm/slub.c
|
@ -124,6 +124,14 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline void *fixup_red_left(struct kmem_cache *s, void *p)
|
||||
{
|
||||
if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
|
||||
p += s->red_left_pad;
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
|
||||
{
|
||||
#ifdef CONFIG_SLUB_CPU_PARTIAL
|
||||
|
@ -224,24 +232,6 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si)
|
|||
* Core slab cache functions
|
||||
*******************************************************************/
|
||||
|
||||
/* Verify that a pointer has an address that is valid within a slab page */
|
||||
static inline int check_valid_pointer(struct kmem_cache *s,
|
||||
struct page *page, const void *object)
|
||||
{
|
||||
void *base;
|
||||
|
||||
if (!object)
|
||||
return 1;
|
||||
|
||||
base = page_address(page);
|
||||
if (object < base || object >= base + page->objects * s->size ||
|
||||
(object - base) % s->size) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void *get_freepointer(struct kmem_cache *s, void *object)
|
||||
{
|
||||
return *(void **)(object + s->offset);
|
||||
|
@ -271,12 +261,14 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
|
|||
|
||||
/* Loop over all objects in a slab */
|
||||
#define for_each_object(__p, __s, __addr, __objects) \
|
||||
for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
|
||||
__p += (__s)->size)
|
||||
for (__p = fixup_red_left(__s, __addr); \
|
||||
__p < (__addr) + (__objects) * (__s)->size; \
|
||||
__p += (__s)->size)
|
||||
|
||||
#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
|
||||
for (__p = (__addr), __idx = 1; __idx <= __objects;\
|
||||
__p += (__s)->size, __idx++)
|
||||
for (__p = fixup_red_left(__s, __addr), __idx = 1; \
|
||||
__idx <= __objects; \
|
||||
__p += (__s)->size, __idx++)
|
||||
|
||||
/* Determine object index from a given position */
|
||||
static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
|
||||
|
@ -456,6 +448,22 @@ static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
|
|||
set_bit(slab_index(p, s, addr), map);
|
||||
}
|
||||
|
||||
static inline int size_from_object(struct kmem_cache *s)
|
||||
{
|
||||
if (s->flags & SLAB_RED_ZONE)
|
||||
return s->size - s->red_left_pad;
|
||||
|
||||
return s->size;
|
||||
}
|
||||
|
||||
static inline void *restore_red_left(struct kmem_cache *s, void *p)
|
||||
{
|
||||
if (s->flags & SLAB_RED_ZONE)
|
||||
p -= s->red_left_pad;
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
/*
|
||||
* Debug settings:
|
||||
*/
|
||||
|
@ -489,6 +497,26 @@ static inline void metadata_access_disable(void)
|
|||
/*
|
||||
* Object debugging
|
||||
*/
|
||||
|
||||
/* Verify that a pointer has an address that is valid within a slab page */
|
||||
static inline int check_valid_pointer(struct kmem_cache *s,
|
||||
struct page *page, void *object)
|
||||
{
|
||||
void *base;
|
||||
|
||||
if (!object)
|
||||
return 1;
|
||||
|
||||
base = page_address(page);
|
||||
object = restore_red_left(s, object);
|
||||
if (object < base || object >= base + page->objects * s->size ||
|
||||
(object - base) % s->size) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void print_section(char *text, u8 *addr, unsigned int length)
|
||||
{
|
||||
metadata_access_enable();
|
||||
|
@ -628,7 +656,9 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
|
|||
pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
|
||||
p, p - addr, get_freepointer(s, p));
|
||||
|
||||
if (p > addr + 16)
|
||||
if (s->flags & SLAB_RED_ZONE)
|
||||
print_section("Redzone ", p - s->red_left_pad, s->red_left_pad);
|
||||
else if (p > addr + 16)
|
||||
print_section("Bytes b4 ", p - 16, 16);
|
||||
|
||||
print_section("Object ", p, min_t(unsigned long, s->object_size,
|
||||
|
@ -645,9 +675,9 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
|
|||
if (s->flags & SLAB_STORE_USER)
|
||||
off += 2 * sizeof(struct track);
|
||||
|
||||
if (off != s->size)
|
||||
if (off != size_from_object(s))
|
||||
/* Beginning of the filler is the free pointer */
|
||||
print_section("Padding ", p + off, s->size - off);
|
||||
print_section("Padding ", p + off, size_from_object(s) - off);
|
||||
|
||||
dump_stack();
|
||||
}
|
||||
|
@ -677,6 +707,9 @@ static void init_object(struct kmem_cache *s, void *object, u8 val)
|
|||
{
|
||||
u8 *p = object;
|
||||
|
||||
if (s->flags & SLAB_RED_ZONE)
|
||||
memset(p - s->red_left_pad, val, s->red_left_pad);
|
||||
|
||||
if (s->flags & __OBJECT_POISON) {
|
||||
memset(p, POISON_FREE, s->object_size - 1);
|
||||
p[s->object_size - 1] = POISON_END;
|
||||
|
@ -769,11 +802,11 @@ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
|
|||
/* We also have user information there */
|
||||
off += 2 * sizeof(struct track);
|
||||
|
||||
if (s->size == off)
|
||||
if (size_from_object(s) == off)
|
||||
return 1;
|
||||
|
||||
return check_bytes_and_report(s, page, p, "Object padding",
|
||||
p + off, POISON_INUSE, s->size - off);
|
||||
p + off, POISON_INUSE, size_from_object(s) - off);
|
||||
}
|
||||
|
||||
/* Check the pad bytes at the end of a slab page */
|
||||
|
@ -817,6 +850,10 @@ static int check_object(struct kmem_cache *s, struct page *page,
|
|||
u8 *endobject = object + s->object_size;
|
||||
|
||||
if (s->flags & SLAB_RED_ZONE) {
|
||||
if (!check_bytes_and_report(s, page, object, "Redzone",
|
||||
object - s->red_left_pad, val, s->red_left_pad))
|
||||
return 0;
|
||||
|
||||
if (!check_bytes_and_report(s, page, object, "Redzone",
|
||||
endobject, val, s->inuse - s->object_size))
|
||||
return 0;
|
||||
|
@ -1468,7 +1505,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|||
set_freepointer(s, p, NULL);
|
||||
}
|
||||
|
||||
page->freelist = start;
|
||||
page->freelist = fixup_red_left(s, start);
|
||||
page->inuse = page->objects;
|
||||
page->frozen = 1;
|
||||
|
||||
|
@ -3283,7 +3320,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
|
|||
*/
|
||||
size += 2 * sizeof(struct track);
|
||||
|
||||
if (flags & SLAB_RED_ZONE)
|
||||
if (flags & SLAB_RED_ZONE) {
|
||||
/*
|
||||
* Add some empty padding so that we can catch
|
||||
* overwrites from earlier objects rather than let
|
||||
|
@ -3292,6 +3329,11 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
|
|||
* of the object.
|
||||
*/
|
||||
size += sizeof(void *);
|
||||
|
||||
s->red_left_pad = sizeof(void *);
|
||||
s->red_left_pad = ALIGN(s->red_left_pad, s->align);
|
||||
size += s->red_left_pad;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -83,7 +83,7 @@ static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
|
|||
unsigned long check_high = check_low + n;
|
||||
|
||||
/* Does not overlap if entirely above or entirely below. */
|
||||
if (check_low >= high || check_high < low)
|
||||
if (check_low >= high || check_high <= low)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
@ -134,30 +134,15 @@ static inline const char *check_bogus_address(const void *ptr, unsigned long n)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline const char *check_heap_object(const void *ptr, unsigned long n,
|
||||
bool to_user)
|
||||
/* Checks for allocs that are marked in some way as spanning multiple pages. */
|
||||
static inline const char *check_page_span(const void *ptr, unsigned long n,
|
||||
struct page *page, bool to_user)
|
||||
{
|
||||
struct page *page, *endpage;
|
||||
#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
|
||||
const void *end = ptr + n - 1;
|
||||
struct page *endpage;
|
||||
bool is_reserved, is_cma;
|
||||
|
||||
/*
|
||||
* Some architectures (arm64) return true for virt_addr_valid() on
|
||||
* vmalloced addresses. Work around this by checking for vmalloc
|
||||
* first.
|
||||
*/
|
||||
if (is_vmalloc_addr(ptr))
|
||||
return NULL;
|
||||
|
||||
if (!virt_addr_valid(ptr))
|
||||
return NULL;
|
||||
|
||||
page = virt_to_head_page(ptr);
|
||||
|
||||
/* Check slab allocator for flags and size. */
|
||||
if (PageSlab(page))
|
||||
return __check_heap_object(ptr, n, page);
|
||||
|
||||
/*
|
||||
* Sometimes the kernel data regions are not marked Reserved (see
|
||||
* check below). And sometimes [_sdata,_edata) does not cover
|
||||
|
@ -186,7 +171,7 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
|
|||
((unsigned long)end & (unsigned long)PAGE_MASK)))
|
||||
return NULL;
|
||||
|
||||
/* Allow if start and end are inside the same compound page. */
|
||||
/* Allow if fully inside the same compound (__GFP_COMP) page. */
|
||||
endpage = virt_to_head_page(end);
|
||||
if (likely(endpage == page))
|
||||
return NULL;
|
||||
|
@ -199,20 +184,44 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
|
|||
is_reserved = PageReserved(page);
|
||||
is_cma = is_migrate_cma_page(page);
|
||||
if (!is_reserved && !is_cma)
|
||||
goto reject;
|
||||
return "<spans multiple pages>";
|
||||
|
||||
for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
|
||||
page = virt_to_head_page(ptr);
|
||||
if (is_reserved && !PageReserved(page))
|
||||
goto reject;
|
||||
return "<spans Reserved and non-Reserved pages>";
|
||||
if (is_cma && !is_migrate_cma_page(page))
|
||||
goto reject;
|
||||
return "<spans CMA and non-CMA pages>";
|
||||
}
|
||||
#endif
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
reject:
|
||||
return "<spans multiple pages>";
|
||||
static inline const char *check_heap_object(const void *ptr, unsigned long n,
|
||||
bool to_user)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
/*
|
||||
* Some architectures (arm64) return true for virt_addr_valid() on
|
||||
* vmalloced addresses. Work around this by checking for vmalloc
|
||||
* first.
|
||||
*/
|
||||
if (is_vmalloc_addr(ptr))
|
||||
return NULL;
|
||||
|
||||
if (!virt_addr_valid(ptr))
|
||||
return NULL;
|
||||
|
||||
page = virt_to_head_page(ptr);
|
||||
|
||||
/* Check slab allocator for flags and size. */
|
||||
if (PageSlab(page))
|
||||
return __check_heap_object(ptr, n, page);
|
||||
|
||||
/* Verify object does not incorrectly span multiple pages. */
|
||||
return check_page_span(ptr, n, page, to_user);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -146,6 +146,17 @@ config HARDENED_USERCOPY
|
|||
or are part of the kernel text. This kills entire classes
|
||||
of heap overflow exploits and similar kernel memory exposures.
|
||||
|
||||
config HARDENED_USERCOPY_PAGESPAN
|
||||
bool "Refuse to copy allocations that span multiple pages"
|
||||
depends on HARDENED_USERCOPY
|
||||
depends on !COMPILE_TEST
|
||||
help
|
||||
When a multi-page allocation is done without __GFP_COMP,
|
||||
hardened usercopy will reject attempts to copy it. There are,
|
||||
however, several cases of this in the kernel that have not all
|
||||
been removed. This config is intended to be used only while
|
||||
trying to find such users.
|
||||
|
||||
source security/selinux/Kconfig
|
||||
source security/smack/Kconfig
|
||||
source security/tomoyo/Kconfig
|
||||
|
|
Loading…
Add table
Reference in a new issue