From 6adac935e61895ec353ba28b9911ff954a4dcd15 Mon Sep 17 00:00:00 2001 From: Patrick Daly Date: Wed, 23 Nov 2016 17:05:22 -0800 Subject: [PATCH] mm: Inform KASAN when allocating pages during isolation kasan_alloc_pages() should be called alongside kernel_map_pages() to prevent splats when CONFIG_KASAN is enabled. This is a greatly simplified version of commit 46f24fd857b37bb86ddd5d0ac3d194e984dfdf1c ("mm/page_alloc: introduce post allocation processing on page allocator") which solve the same problem on later kernel versions. Change-Id: Ib60d079a4d8685e781624bb1403b2a3060e31d27 Signed-off-by: Patrick Daly --- mm/page_isolation.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 00c96462cc36..3ecd3807c2c2 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -7,6 +7,7 @@ #include #include #include +#include #include "internal.h" static int set_migratetype_isolate(struct page *page, @@ -105,6 +106,8 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype) if (pfn_valid_within(page_to_pfn(buddy)) && !is_migrate_isolate_page(buddy)) { __isolate_free_page(page, order); + kasan_alloc_pages(page, order); + arch_alloc_page(page, order); kernel_map_pages(page, (1 << order), 1); set_page_refcounted(page); isolated_page = page;