Merge "mm/page_owner: copy last_migrate_reason in copy_page_owner()"
This commit is contained in:
commit
381c897543
5 changed files with 45 additions and 49 deletions
|
@ -529,7 +529,6 @@ void put_page(struct page *page);
|
|||
void put_pages_list(struct list_head *pages);
|
||||
|
||||
void split_page(struct page *page, unsigned int order);
|
||||
int split_free_page(struct page *page);
|
||||
|
||||
/*
|
||||
* Compound pages have a destructor function. Provide a
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/kasan.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/page_owner.h>
|
||||
#include "internal.h"
|
||||
|
||||
#ifdef CONFIG_COMPACTION
|
||||
|
@ -59,13 +60,33 @@ static unsigned long release_freepages(struct list_head *freelist)
|
|||
|
||||
static void map_pages(struct list_head *list)
|
||||
{
|
||||
struct page *page;
|
||||
unsigned int i, order, nr_pages;
|
||||
struct page *page, *next;
|
||||
LIST_HEAD(tmp_list);
|
||||
|
||||
list_for_each_entry(page, list, lru) {
|
||||
kasan_alloc_pages(page, 0);
|
||||
arch_alloc_page(page, 0);
|
||||
kernel_map_pages(page, 1, 1);
|
||||
list_for_each_entry_safe(page, next, list, lru) {
|
||||
list_del(&page->lru);
|
||||
|
||||
order = page_private(page);
|
||||
nr_pages = 1 << order;
|
||||
set_page_private(page, 0);
|
||||
set_page_refcounted(page);
|
||||
|
||||
kasan_alloc_pages(page, order);
|
||||
arch_alloc_page(page, order);
|
||||
kernel_map_pages(page, nr_pages, 1);
|
||||
|
||||
set_page_owner(page, order, __GFP_MOVABLE);
|
||||
if (order)
|
||||
split_page(page, order);
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
list_add(&page->lru, &tmp_list);
|
||||
page++;
|
||||
}
|
||||
}
|
||||
|
||||
list_splice(&tmp_list, list);
|
||||
}
|
||||
|
||||
static inline bool migrate_async_suitable(int migratetype)
|
||||
|
@ -442,12 +463,13 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
|
|||
unsigned long flags = 0;
|
||||
bool locked = false;
|
||||
unsigned long blockpfn = *start_pfn;
|
||||
unsigned int order;
|
||||
|
||||
cursor = pfn_to_page(blockpfn);
|
||||
|
||||
/* Isolate free pages. */
|
||||
for (; blockpfn < end_pfn; blockpfn++, cursor++) {
|
||||
int isolated, i;
|
||||
int isolated;
|
||||
struct page *page = cursor;
|
||||
|
||||
/*
|
||||
|
@ -513,17 +535,17 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
|
|||
goto isolate_fail;
|
||||
}
|
||||
|
||||
/* Found a free page, break it into order-0 pages */
|
||||
isolated = split_free_page(page);
|
||||
/* Found a free page, will break it into order-0 pages */
|
||||
order = page_order(page);
|
||||
isolated = __isolate_free_page(page, order);
|
||||
if (!isolated)
|
||||
break;
|
||||
set_page_private(page, order);
|
||||
|
||||
total_isolated += isolated;
|
||||
cc->nr_freepages += isolated;
|
||||
for (i = 0; i < isolated; i++) {
|
||||
list_add(&page->lru, freelist);
|
||||
page++;
|
||||
}
|
||||
list_add_tail(&page->lru, freelist);
|
||||
|
||||
if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
|
||||
blockpfn += isolated;
|
||||
break;
|
||||
|
@ -636,7 +658,7 @@ isolate_freepages_range(struct compact_control *cc,
|
|||
*/
|
||||
}
|
||||
|
||||
/* split_free_page does not map the pages */
|
||||
/* __isolate_free_page() does not map the pages */
|
||||
map_pages(&freelist);
|
||||
|
||||
if (pfn < end_pfn) {
|
||||
|
@ -1085,7 +1107,7 @@ static void isolate_freepages(struct compact_control *cc)
|
|||
}
|
||||
}
|
||||
|
||||
/* split_free_page does not map the pages */
|
||||
/* __isolate_free_page() does not map the pages */
|
||||
map_pages(freelist);
|
||||
|
||||
/*
|
||||
|
|
|
@ -2281,8 +2281,6 @@ int __isolate_free_page(struct page *page, unsigned int order)
|
|||
zone->free_area[order].nr_free--;
|
||||
rmv_page_order(page);
|
||||
|
||||
set_page_owner(page, order, __GFP_MOVABLE);
|
||||
|
||||
/* Set the pageblock if the isolated page is at least a pageblock */
|
||||
if (order >= pageblock_order - 1) {
|
||||
struct page *endpage = page + (1 << order) - 1;
|
||||
|
@ -2299,33 +2297,6 @@ int __isolate_free_page(struct page *page, unsigned int order)
|
|||
return 1UL << order;
|
||||
}
|
||||
|
||||
/*
|
||||
* Similar to split_page except the page is already free. As this is only
|
||||
* being used for migration, the migratetype of the block also changes.
|
||||
* As this is called with interrupts disabled, the caller is responsible
|
||||
* for calling arch_alloc_page() and kernel_map_page() after interrupts
|
||||
* are enabled.
|
||||
*
|
||||
* Note: this is probably too low level an operation for use in drivers.
|
||||
* Please consult with lkml before using this in your driver.
|
||||
*/
|
||||
int split_free_page(struct page *page)
|
||||
{
|
||||
unsigned int order;
|
||||
int nr_pages;
|
||||
|
||||
order = page_order(page);
|
||||
|
||||
nr_pages = __isolate_free_page(page, order);
|
||||
if (!nr_pages)
|
||||
return 0;
|
||||
|
||||
/* Split into individual pages */
|
||||
set_page_refcounted(page);
|
||||
split_page(page, order);
|
||||
return nr_pages;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a page from the given zone. Use pcplists for order-0 allocations.
|
||||
*/
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/memory.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/page_owner.h>
|
||||
#include "internal.h"
|
||||
|
||||
static int set_migratetype_isolate(struct page *page,
|
||||
|
@ -106,10 +107,6 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
|
|||
if (pfn_valid_within(page_to_pfn(buddy)) &&
|
||||
!is_migrate_isolate_page(buddy)) {
|
||||
__isolate_free_page(page, order);
|
||||
kasan_alloc_pages(page, order);
|
||||
arch_alloc_page(page, order);
|
||||
kernel_map_pages(page, (1 << order), 1);
|
||||
set_page_refcounted(page);
|
||||
isolated_page = page;
|
||||
}
|
||||
}
|
||||
|
@ -128,8 +125,14 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
|
|||
zone->nr_isolate_pageblock--;
|
||||
out:
|
||||
spin_unlock_irqrestore(&zone->lock, flags);
|
||||
if (isolated_page)
|
||||
if (isolated_page) {
|
||||
kasan_alloc_pages(page, order);
|
||||
arch_alloc_page(page, order);
|
||||
kernel_map_pages(page, (1 << order), 1);
|
||||
set_page_refcounted(page);
|
||||
set_page_owner(page, order, __GFP_MOVABLE);
|
||||
__free_pages(isolated_page, order);
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct page *
|
||||
|
|
|
@ -105,6 +105,7 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
|
|||
|
||||
new_ext->order = old_ext->order;
|
||||
new_ext->gfp_mask = old_ext->gfp_mask;
|
||||
new_ext->last_migrate_reason = old_ext->last_migrate_reason;
|
||||
new_ext->nr_entries = old_ext->nr_entries;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(new_ext->trace_entries); i++)
|
||||
|
|
Loading…
Add table
Reference in a new issue