[PATCH] hugepage allocator cleanup
Insert "fresh" huge pages into the hugepage allocator by the same means as they are freed back into it. This reduces code size and allows enqueue_huge_page to be inlined into the hugepage free fastpath. Eliminate occurances of hugepages on the free list with non-zero refcount. This can allow stricter refcount checks in future. Also required for lockless pagecache. Signed-off-by: Nick Piggin <npiggin@suse.de> "This patch also eliminates a leak "cleaned up" by re-clobbering the refcount on every allocation from the hugepage freelists. With respect to the lockless pagecache, the crucial aspect is to eliminate unconditional set_page_count() to 0 on pages with potentially nonzero refcounts, though closer inspection suggests the assignments removed are entirely spurious." Acked-by: William Irwin <wli@holomorphy.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
545b1ea9bf
commit
a482289d46
1 changed files with 8 additions and 16 deletions
24
mm/hugetlb.c
24
mm/hugetlb.c
|
@ -64,7 +64,7 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct page *alloc_fresh_huge_page(void)
|
static int alloc_fresh_huge_page(void)
|
||||||
{
|
{
|
||||||
static int nid = 0;
|
static int nid = 0;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
@ -72,12 +72,15 @@ static struct page *alloc_fresh_huge_page(void)
|
||||||
HUGETLB_PAGE_ORDER);
|
HUGETLB_PAGE_ORDER);
|
||||||
nid = (nid + 1) % num_online_nodes();
|
nid = (nid + 1) % num_online_nodes();
|
||||||
if (page) {
|
if (page) {
|
||||||
|
page[1].lru.next = (void *)free_huge_page; /* dtor */
|
||||||
spin_lock(&hugetlb_lock);
|
spin_lock(&hugetlb_lock);
|
||||||
nr_huge_pages++;
|
nr_huge_pages++;
|
||||||
nr_huge_pages_node[page_to_nid(page)]++;
|
nr_huge_pages_node[page_to_nid(page)]++;
|
||||||
spin_unlock(&hugetlb_lock);
|
spin_unlock(&hugetlb_lock);
|
||||||
|
put_page(page); /* free it into the hugepage allocator */
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
return page;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void free_huge_page(struct page *page)
|
void free_huge_page(struct page *page)
|
||||||
|
@ -85,7 +88,6 @@ void free_huge_page(struct page *page)
|
||||||
BUG_ON(page_count(page));
|
BUG_ON(page_count(page));
|
||||||
|
|
||||||
INIT_LIST_HEAD(&page->lru);
|
INIT_LIST_HEAD(&page->lru);
|
||||||
page[1].lru.next = NULL; /* reset dtor */
|
|
||||||
|
|
||||||
spin_lock(&hugetlb_lock);
|
spin_lock(&hugetlb_lock);
|
||||||
enqueue_huge_page(page);
|
enqueue_huge_page(page);
|
||||||
|
@ -105,7 +107,6 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr)
|
||||||
}
|
}
|
||||||
spin_unlock(&hugetlb_lock);
|
spin_unlock(&hugetlb_lock);
|
||||||
set_page_count(page, 1);
|
set_page_count(page, 1);
|
||||||
page[1].lru.next = (void *)free_huge_page; /* set dtor */
|
|
||||||
for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
|
for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
|
||||||
clear_user_highpage(&page[i], addr);
|
clear_user_highpage(&page[i], addr);
|
||||||
return page;
|
return page;
|
||||||
|
@ -114,7 +115,6 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr)
|
||||||
static int __init hugetlb_init(void)
|
static int __init hugetlb_init(void)
|
||||||
{
|
{
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
struct page *page;
|
|
||||||
|
|
||||||
if (HPAGE_SHIFT == 0)
|
if (HPAGE_SHIFT == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -123,12 +123,8 @@ static int __init hugetlb_init(void)
|
||||||
INIT_LIST_HEAD(&hugepage_freelists[i]);
|
INIT_LIST_HEAD(&hugepage_freelists[i]);
|
||||||
|
|
||||||
for (i = 0; i < max_huge_pages; ++i) {
|
for (i = 0; i < max_huge_pages; ++i) {
|
||||||
page = alloc_fresh_huge_page();
|
if (!alloc_fresh_huge_page())
|
||||||
if (!page)
|
|
||||||
break;
|
break;
|
||||||
spin_lock(&hugetlb_lock);
|
|
||||||
enqueue_huge_page(page);
|
|
||||||
spin_unlock(&hugetlb_lock);
|
|
||||||
}
|
}
|
||||||
max_huge_pages = free_huge_pages = nr_huge_pages = i;
|
max_huge_pages = free_huge_pages = nr_huge_pages = i;
|
||||||
printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
|
printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
|
||||||
|
@ -154,8 +150,8 @@ static void update_and_free_page(struct page *page)
|
||||||
page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
|
page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
|
||||||
1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
|
1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
|
||||||
1 << PG_private | 1<< PG_writeback);
|
1 << PG_private | 1<< PG_writeback);
|
||||||
set_page_count(&page[i], 0);
|
|
||||||
}
|
}
|
||||||
|
page[1].lru.next = NULL;
|
||||||
set_page_count(page, 1);
|
set_page_count(page, 1);
|
||||||
__free_pages(page, HUGETLB_PAGE_ORDER);
|
__free_pages(page, HUGETLB_PAGE_ORDER);
|
||||||
}
|
}
|
||||||
|
@ -188,12 +184,8 @@ static inline void try_to_free_low(unsigned long count)
|
||||||
static unsigned long set_max_huge_pages(unsigned long count)
|
static unsigned long set_max_huge_pages(unsigned long count)
|
||||||
{
|
{
|
||||||
while (count > nr_huge_pages) {
|
while (count > nr_huge_pages) {
|
||||||
struct page *page = alloc_fresh_huge_page();
|
if (!alloc_fresh_huge_page())
|
||||||
if (!page)
|
|
||||||
return nr_huge_pages;
|
return nr_huge_pages;
|
||||||
spin_lock(&hugetlb_lock);
|
|
||||||
enqueue_huge_page(page);
|
|
||||||
spin_unlock(&hugetlb_lock);
|
|
||||||
}
|
}
|
||||||
if (count >= nr_huge_pages)
|
if (count >= nr_huge_pages)
|
||||||
return nr_huge_pages;
|
return nr_huge_pages;
|
||||||
|
|
Loading…
Add table
Reference in a new issue