Merge "mm/page_alloc: introduce post allocation processing on page allocator"

This commit is contained in:
Linux Build Service Account 2017-07-11 00:40:15 -07:00 committed by Gerrit - the friendly Code Review server
commit d6b4382bc7
4 changed files with 19 additions and 22 deletions

View file

@ -69,14 +69,8 @@ static void map_pages(struct list_head *list)
order = page_private(page);
nr_pages = 1 << order;
set_page_private(page, 0);
set_page_refcounted(page);
kasan_alloc_pages(page, order);
arch_alloc_page(page, order);
kernel_map_pages(page, nr_pages, 1);
set_page_owner(page, order, __GFP_MOVABLE);
post_alloc_hook(page, order, __GFP_MOVABLE);
if (order)
split_page(page, order);

View file

@ -182,6 +182,8 @@ extern void prep_compound_page(struct page *page, unsigned int order);
#ifdef CONFIG_MEMORY_FAILURE
extern bool is_free_buddy_page(struct page *page);
#endif
extern void post_alloc_hook(struct page *page, unsigned int order,
gfp_t gfp_flags);
extern int user_min_free_kbytes;
#if defined CONFIG_COMPACTION || defined CONFIG_CMA

View file

@ -1443,8 +1443,21 @@ static inline bool free_pages_prezeroed(void)
page_poisoning_enabled();
}
inline void post_alloc_hook(struct page *page, unsigned int order,
gfp_t gfp_flags)
{
set_page_private(page, 0);
set_page_refcounted(page);
kasan_alloc_pages(page, order);
arch_alloc_page(page, order);
kernel_map_pages(page, 1 << order, 1);
kernel_poison_pages(page, 1 << order, 1);
set_page_owner(page, order, gfp_flags);
}
static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
int alloc_flags)
int alloc_flags)
{
int i;
@ -1454,13 +1467,7 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
return 1;
}
set_page_private(page, 0);
set_page_refcounted(page);
kasan_alloc_pages(page, order);
arch_alloc_page(page, order);
kernel_map_pages(page, 1 << order, 1);
kernel_poison_pages(page, 1 << order, 1);
post_alloc_hook(page, order, gfp_flags);
if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
for (i = 0; i < (1 << order); i++)
@ -1469,8 +1476,6 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
if (order && (gfp_flags & __GFP_COMP))
prep_compound_page(page, order);
set_page_owner(page, order, gfp_flags);
/*
* page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
* allocate the page. The expectation is that the caller is taking

View file

@ -126,11 +126,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
out:
spin_unlock_irqrestore(&zone->lock, flags);
if (isolated_page) {
kasan_alloc_pages(page, order);
arch_alloc_page(page, order);
kernel_map_pages(page, (1 << order), 1);
set_page_refcounted(page);
set_page_owner(page, order, __GFP_MOVABLE);
post_alloc_hook(page, order, __GFP_MOVABLE);
__free_pages(isolated_page, order);
}
}