mm/page_owner: initialize page owner without holding the zone lock
It's not necessary to initialized page_owner with holding the zone lock. It would cause more contention on the zone lock although it's not a big problem since it is just debug feature. But, it is better than before so do it. This is also preparation step to use stackdepot in page owner feature. Stackdepot allocates new pages when there is no reserved space and holding the zone lock in this case will cause deadlock. Change-Id: Id96ab8444f194bead3fa4a8ddda30cdcca4ddc9f Link: http://lkml.kernel.org/r/1464230275-25791-2-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Minchan Kim <minchan@kernel.org> Cc: Alexander Potapenko <glider@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Michal Hocko <mhocko@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Git-commit: 83358ece26b70f20c0ba2e0e00dc84b0ee24fe6d Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git [guptap@codeaurora.org: resolve trivial merge conflicts] Signed-off-by: Prakash Gupta <guptap@codeaurora.org>
This commit is contained in:
parent
592b8480c5
commit
5437536c75
3 changed files with 11 additions and 7 deletions
|
@ -19,6 +19,7 @@
|
|||
#include <linux/kasan.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/page_owner.h>
|
||||
#include "internal.h"
|
||||
|
||||
#ifdef CONFIG_COMPACTION
|
||||
|
@ -74,6 +75,8 @@ static void map_pages(struct list_head *list)
|
|||
kasan_alloc_pages(page, order);
|
||||
arch_alloc_page(page, order);
|
||||
kernel_map_pages(page, nr_pages, 1);
|
||||
|
||||
set_page_owner(page, order, __GFP_MOVABLE);
|
||||
if (order)
|
||||
split_page(page, order);
|
||||
|
||||
|
|
|
@ -2281,8 +2281,6 @@ int __isolate_free_page(struct page *page, unsigned int order)
|
|||
zone->free_area[order].nr_free--;
|
||||
rmv_page_order(page);
|
||||
|
||||
set_page_owner(page, order, __GFP_MOVABLE);
|
||||
|
||||
/* Set the pageblock if the isolated page is at least a pageblock */
|
||||
if (order >= pageblock_order - 1) {
|
||||
struct page *endpage = page + (1 << order) - 1;
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/memory.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/page_owner.h>
|
||||
#include "internal.h"
|
||||
|
||||
static int set_migratetype_isolate(struct page *page,
|
||||
|
@ -106,10 +107,6 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
|
|||
if (pfn_valid_within(page_to_pfn(buddy)) &&
|
||||
!is_migrate_isolate_page(buddy)) {
|
||||
__isolate_free_page(page, order);
|
||||
kasan_alloc_pages(page, order);
|
||||
arch_alloc_page(page, order);
|
||||
kernel_map_pages(page, (1 << order), 1);
|
||||
set_page_refcounted(page);
|
||||
isolated_page = page;
|
||||
}
|
||||
}
|
||||
|
@ -128,8 +125,14 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
|
|||
zone->nr_isolate_pageblock--;
|
||||
out:
|
||||
spin_unlock_irqrestore(&zone->lock, flags);
|
||||
if (isolated_page)
|
||||
if (isolated_page) {
|
||||
kasan_alloc_pages(page, order);
|
||||
arch_alloc_page(page, order);
|
||||
kernel_map_pages(page, (1 << order), 1);
|
||||
set_page_refcounted(page);
|
||||
set_page_owner(page, order, __GFP_MOVABLE);
|
||||
__free_pages(isolated_page, order);
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct page *
|
||||
|
|
Loading…
Add table
Reference in a new issue