From 49118fe6a32a49d198128a6dd1d6bfd0e3b189f8 Mon Sep 17 00:00:00 2001 From: Vinayak Menon Date: Fri, 31 Mar 2017 11:13:06 +1100 Subject: [PATCH] mm: enable page poisoning early at boot On SPARSEMEM systems page poisoning is enabled after buddy is up, because of the dependency on page extension init. This causes the pages released by free_all_bootmem not to be poisoned. This either delays or misses the identification of some issues because the pages have to undergo another cycle of alloc-free-alloc for any corruption to be detected. Enable page poisoning early by getting rid of the PAGE_EXT_DEBUG_POISON flag. Since all the free pages will now be poisoned, the flag need not be verified before checking the poison during an alloc. Link: http://lkml.kernel.org/r/1490358246-11001-1-git-send-email-vinmenon@codeaurora.org Acked-by: Laura Abbott Tested-by: Laura Abbott Cc: Joonsoo Kim Cc: Michal Hocko Cc: Akinobu Mita Signed-off-by: Andrew Morton [vinmenon@codeaurora.org: resolve trivial merge conflicts. Remove the redundant free pages RO feature from the page_poison.c file which is the reason for conflicts + squash the addendum commit 40961ef8d65f51093bc94de110b97b590b6b9275 ('mm-enable-page-poisoning-early-at-boot-v2')] Git-commit: c5b7cd344fd6341e6db79e55c0f1f4d1d9c67a7e Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git Change-Id: I1bb1f99d3a2e1135131911905e0916c837ba9d8a Signed-off-by: Vinayak Menon --- include/linux/mm.h | 1 - mm/page_alloc.c | 9 ++---- mm/page_ext.c | 3 -- mm/page_poison.c | 73 ++++++++-------------------------------------- 4 files changed, 15 insertions(+), 71 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index d9fe27c8a57e..3c10d4638646 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2306,7 +2306,6 @@ extern void copy_user_huge_page(struct page *dst, struct page *src, #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ extern struct page_ext_operations debug_guardpage_ops; -extern struct page_ext_operations page_poisoning_ops; #ifdef CONFIG_DEBUG_PAGEALLOC extern unsigned int _debug_guardpage_minorder; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a4cfa64634b4..170c1486e5c9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1397,24 +1397,21 @@ static inline int check_new_page(struct page *page) return 0; } -static inline bool free_pages_prezeroed(bool poisoned) +static inline bool free_pages_prezeroed(void) { return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) && - page_poisoning_enabled() && poisoned; + page_poisoning_enabled(); } static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, int alloc_flags) { int i; - bool poisoned = true; for (i = 0; i < (1 << order); i++) { struct page *p = page + i; if (unlikely(check_new_page(p))) return 1; - if (poisoned) - poisoned &= page_is_poisoned(p); } set_page_private(page, 0); @@ -1425,7 +1422,7 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, kernel_map_pages(page, 1 << order, 1); kernel_poison_pages(page, 1 << order, 1); - if (!free_pages_prezeroed(poisoned) && (gfp_flags & __GFP_ZERO)) + if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO)) for (i = 0; i < (1 << order); i++) clear_highpage(page + i); diff --git a/mm/page_ext.c b/mm/page_ext.c index 2d864e64f7fe..916accfec86a 100644 --- a/mm/page_ext.c +++ b/mm/page_ext.c @@ -54,9 +54,6 @@ static struct page_ext_operations *page_ext_ops[] = { &debug_guardpage_ops, -#ifdef CONFIG_PAGE_POISONING - &page_poisoning_ops, -#endif #ifdef CONFIG_PAGE_OWNER &page_owner_ops, #endif diff --git a/mm/page_poison.c b/mm/page_poison.c index 09192467efc9..eb3c4f1aade3 100644 --- a/mm/page_poison.c +++ b/mm/page_poison.c @@ -6,7 +6,6 @@ #include #include -static bool __page_poisoning_enabled __read_mostly; static bool want_page_poisoning __read_mostly; static int early_page_poison_param(char *buf) @@ -24,71 +23,23 @@ static int early_page_poison_param(char *buf) early_param("page_poison", early_page_poison_param); bool page_poisoning_enabled(void) -{ - return __page_poisoning_enabled; -} - -static bool need_page_poisoning(void) -{ - return want_page_poisoning; -} - -static void init_page_poisoning(void) { /* - * page poisoning is debug page alloc for some arches. If either - * of those options are enabled, enable poisoning + * Assumes that debug_pagealloc_enabled is set before + * free_all_bootmem. + * Page poisoning is debug page alloc for some arches. If + * either of those options are enabled, enable poisoning. */ - if (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC)) { - if (!want_page_poisoning && !debug_pagealloc_enabled()) - return; - } else { - if (!want_page_poisoning) - return; - } - - __page_poisoning_enabled = true; -} - -struct page_ext_operations page_poisoning_ops = { - .need = need_page_poisoning, - .init = init_page_poisoning, -}; - -static inline void set_page_poison(struct page *page) -{ - struct page_ext *page_ext; - - page_ext = lookup_page_ext(page); - __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); -} - -static inline void clear_page_poison(struct page *page) -{ - struct page_ext *page_ext; - - page_ext = lookup_page_ext(page); - __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); -} - -bool page_is_poisoned(struct page *page) -{ - struct page_ext *page_ext; - - page_ext = lookup_page_ext(page); - if (!page_ext) - return false; - - return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); + return (want_page_poisoning || + (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && + debug_pagealloc_enabled())); } static void poison_page(struct page *page) { void *addr = kmap_atomic(page); - set_page_poison(page); memset(addr, PAGE_POISON, PAGE_SIZE); - mark_addr_rdonly(addr); kunmap_atomic(addr); } @@ -145,13 +96,13 @@ static void unpoison_page(struct page *page) { void *addr; - if (!page_is_poisoned(page)) - return; - addr = kmap_atomic(page); + /* + * Page poisoning when enabled poisons each and every page + * that is freed to buddy. Thus no extra check is done to + * see if a page was posioned. + */ check_poison_mem(page, addr, PAGE_SIZE); - mark_addr_rdwrite(addr); - clear_page_poison(page); kunmap_atomic(addr); }