Currently we may put reserved by mempool elements into quarantine via kasan_kfree(). This is totally wrong since quarantine may really free these objects. So when mempool will try to use such element, use-after-free will happen. Or mempool may decide that it no longer need that element and double-free it. So don't put object into quarantine in kasan_kfree(), just poison it. Rename kasan_kfree() to kasan_poison_kfree() to respect that. Also, we shouldn't use kasan_slab_alloc()/kasan_krealloc() in kasan_unpoison_element() because those functions may update allocation stacktrace. This would be wrong for the most of the remove_element call sites. (The only call site where we may want to update alloc stacktrace is in mempool_alloc(). Kmemleak solves this by calling kmemleak_update_trace(), so we could make something like that too. But this is out of scope of this patch). Fixes: 55834c59098d ("mm: kasan: initial memory quarantine implementation") Link: http://lkml.kernel.org/r/575977C3.1010905@virtuozzo.com Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Reported-by: Kuthonuzo Luruo <kuthonuzo.luruo@hpe.com> Acked-by: Alexander Potapenko <glider@google.com> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Kostya Serebryany <kcc@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Bug: 64145065 (cherry-picked from 9b75a867cc9ddbafcaf35029358ac500f2635ff3) Change-Id: Idb6c152dae8f8f2975dbe6acb7165315be8b465b Signed-off-by: Paul Lawrence <paullawrence@google.com>
127 lines
4 KiB
C
127 lines
4 KiB
C
#ifndef _LINUX_KASAN_H
|
|
#define _LINUX_KASAN_H
|
|
|
|
#include <linux/sched.h>
|
|
#include <linux/types.h>
|
|
|
|
struct kmem_cache;
|
|
struct page;
|
|
struct vm_struct;
|
|
|
|
#ifdef CONFIG_KASAN
|
|
|
|
#define KASAN_SHADOW_SCALE_SHIFT 3
|
|
|
|
#include <asm/kasan.h>
|
|
#include <asm/pgtable.h>
|
|
|
|
extern unsigned char kasan_zero_page[PAGE_SIZE];
|
|
extern pte_t kasan_zero_pte[PTRS_PER_PTE];
|
|
extern pmd_t kasan_zero_pmd[PTRS_PER_PMD];
|
|
extern pud_t kasan_zero_pud[PTRS_PER_PUD];
|
|
|
|
void kasan_populate_zero_shadow(const void *shadow_start,
|
|
const void *shadow_end);
|
|
|
|
static inline void *kasan_mem_to_shadow(const void *addr)
|
|
{
|
|
return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
|
|
+ KASAN_SHADOW_OFFSET;
|
|
}
|
|
|
|
/* Enable reporting bugs after kasan_disable_current() */
|
|
static inline void kasan_enable_current(void)
|
|
{
|
|
current->kasan_depth++;
|
|
}
|
|
|
|
/* Disable reporting bugs for current task */
|
|
static inline void kasan_disable_current(void)
|
|
{
|
|
current->kasan_depth--;
|
|
}
|
|
|
|
void kasan_unpoison_shadow(const void *address, size_t size);
|
|
|
|
void kasan_unpoison_task_stack(struct task_struct *task);
|
|
|
|
void kasan_alloc_pages(struct page *page, unsigned int order);
|
|
void kasan_free_pages(struct page *page, unsigned int order);
|
|
|
|
void kasan_cache_create(struct kmem_cache *cache, size_t *size,
|
|
unsigned long *flags);
|
|
void kasan_cache_shrink(struct kmem_cache *cache);
|
|
void kasan_cache_destroy(struct kmem_cache *cache);
|
|
|
|
void kasan_poison_slab(struct page *page);
|
|
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
|
|
void kasan_poison_object_data(struct kmem_cache *cache, void *object);
|
|
|
|
void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
|
|
void kasan_kfree_large(const void *ptr);
|
|
void kasan_poison_kfree(void *ptr);
|
|
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
|
|
gfp_t flags);
|
|
void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
|
|
|
|
void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
|
|
bool kasan_slab_free(struct kmem_cache *s, void *object);
|
|
|
|
struct kasan_cache {
|
|
int alloc_meta_offset;
|
|
int free_meta_offset;
|
|
};
|
|
|
|
int kasan_module_alloc(void *addr, size_t size);
|
|
void kasan_free_shadow(const struct vm_struct *vm);
|
|
|
|
size_t ksize(const void *);
|
|
static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
|
|
|
|
#else /* CONFIG_KASAN */
|
|
|
|
static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
|
|
|
|
static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
|
|
|
|
static inline void kasan_enable_current(void) {}
|
|
static inline void kasan_disable_current(void) {}
|
|
|
|
static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
|
|
static inline void kasan_free_pages(struct page *page, unsigned int order) {}
|
|
|
|
static inline void kasan_cache_create(struct kmem_cache *cache,
|
|
size_t *size,
|
|
unsigned long *flags) {}
|
|
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
|
|
static inline void kasan_cache_destroy(struct kmem_cache *cache) {}
|
|
|
|
static inline void kasan_poison_slab(struct page *page) {}
|
|
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
|
|
void *object) {}
|
|
static inline void kasan_poison_object_data(struct kmem_cache *cache,
|
|
void *object) {}
|
|
|
|
static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
|
|
static inline void kasan_kfree_large(const void *ptr) {}
|
|
static inline void kasan_poison_kfree(void *ptr) {}
|
|
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
|
|
size_t size, gfp_t flags) {}
|
|
static inline void kasan_krealloc(const void *object, size_t new_size,
|
|
gfp_t flags) {}
|
|
|
|
static inline void kasan_slab_alloc(struct kmem_cache *s, void *object,
|
|
gfp_t flags) {}
|
|
static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
|
|
static inline void kasan_free_shadow(const struct vm_struct *vm) {}
|
|
|
|
static inline void kasan_unpoison_slab(const void *ptr) { }
|
|
|
|
#endif /* CONFIG_KASAN */
|
|
|
|
#endif /* LINUX_KASAN_H */
|