memory hotplug: Hot-add with sparsemem-vmemmap
This patch is to avoid panic when memory hot-add is executed with sparsemem-vmemmap. Current vmemmap-sparsemem code doesn't support memory hot-add. Vmemmap must be populated when hot-add. This is for 2.6.23-rc2-mm2. Todo: # Even if this patch is applied, the message "[xxxx-xxxx] potential offnode page_structs" is displayed. To allocate memmap on its node, memmap (and pgdat) must be initialized itself like chicken and egg relationship. # vmemmap_unpopulate will be necessary for followings. - For cancel hot-add due to error. - For unplug. Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com> Cc: Andy Whitcroft <apw@shadowen.org> Cc: Christoph Lameter <clameter@sgi.com> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
48e94196a5
commit
98f3cfc1dc
3 changed files with 24 additions and 5 deletions
|
@ -1147,7 +1147,7 @@ extern int randomize_va_space;
|
||||||
|
|
||||||
const char * arch_vma_name(struct vm_area_struct *vma);
|
const char * arch_vma_name(struct vm_area_struct *vma);
|
||||||
|
|
||||||
struct page *sparse_early_mem_map_populate(unsigned long pnum, int nid);
|
struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
|
||||||
pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
|
pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
|
||||||
pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
|
pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
|
||||||
pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
|
pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
|
||||||
|
|
|
@ -137,7 +137,7 @@ int __meminit vmemmap_populate_basepages(struct page *start_page,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct page __init *sparse_early_mem_map_populate(unsigned long pnum, int nid)
|
struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
|
||||||
{
|
{
|
||||||
struct page *map = pfn_to_page(pnum * PAGES_PER_SECTION);
|
struct page *map = pfn_to_page(pnum * PAGES_PER_SECTION);
|
||||||
int error = vmemmap_populate(map, PAGES_PER_SECTION, nid);
|
int error = vmemmap_populate(map, PAGES_PER_SECTION, nid);
|
||||||
|
|
25
mm/sparse.c
25
mm/sparse.c
|
@ -259,7 +259,7 @@ static unsigned long *sparse_early_usemap_alloc(unsigned long pnum)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef CONFIG_SPARSEMEM_VMEMMAP
|
#ifndef CONFIG_SPARSEMEM_VMEMMAP
|
||||||
struct page __init *sparse_early_mem_map_populate(unsigned long pnum, int nid)
|
struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
|
||||||
{
|
{
|
||||||
struct page *map;
|
struct page *map;
|
||||||
|
|
||||||
|
@ -284,7 +284,7 @@ struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
|
||||||
struct mem_section *ms = __nr_to_section(pnum);
|
struct mem_section *ms = __nr_to_section(pnum);
|
||||||
int nid = sparse_early_nid(ms);
|
int nid = sparse_early_nid(ms);
|
||||||
|
|
||||||
map = sparse_early_mem_map_populate(pnum, nid);
|
map = sparse_mem_map_populate(pnum, nid);
|
||||||
if (map)
|
if (map)
|
||||||
return map;
|
return map;
|
||||||
|
|
||||||
|
@ -322,6 +322,18 @@ void __init sparse_init(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||||
|
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||||
|
static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
|
||||||
|
unsigned long nr_pages)
|
||||||
|
{
|
||||||
|
/* This will make the necessary allocations eventually. */
|
||||||
|
return sparse_mem_map_populate(pnum, nid);
|
||||||
|
}
|
||||||
|
static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
|
||||||
|
{
|
||||||
|
return; /* XXX: Not implemented yet */
|
||||||
|
}
|
||||||
|
#else
|
||||||
static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
|
static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
|
||||||
{
|
{
|
||||||
struct page *page, *ret;
|
struct page *page, *ret;
|
||||||
|
@ -344,6 +356,12 @@ got_map_ptr:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
|
||||||
|
unsigned long nr_pages)
|
||||||
|
{
|
||||||
|
return __kmalloc_section_memmap(nr_pages);
|
||||||
|
}
|
||||||
|
|
||||||
static int vaddr_in_vmalloc_area(void *addr)
|
static int vaddr_in_vmalloc_area(void *addr)
|
||||||
{
|
{
|
||||||
if (addr >= (void *)VMALLOC_START &&
|
if (addr >= (void *)VMALLOC_START &&
|
||||||
|
@ -360,6 +378,7 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
|
||||||
free_pages((unsigned long)memmap,
|
free_pages((unsigned long)memmap,
|
||||||
get_order(sizeof(struct page) * nr_pages));
|
get_order(sizeof(struct page) * nr_pages));
|
||||||
}
|
}
|
||||||
|
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* returns the number of sections whose mem_maps were properly
|
* returns the number of sections whose mem_maps were properly
|
||||||
|
@ -382,7 +401,7 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
|
||||||
* plus, it does a kmalloc
|
* plus, it does a kmalloc
|
||||||
*/
|
*/
|
||||||
sparse_index_init(section_nr, pgdat->node_id);
|
sparse_index_init(section_nr, pgdat->node_id);
|
||||||
memmap = __kmalloc_section_memmap(nr_pages);
|
memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
|
||||||
usemap = __kmalloc_section_usemap();
|
usemap = __kmalloc_section_usemap();
|
||||||
|
|
||||||
pgdat_resize_lock(pgdat, &flags);
|
pgdat_resize_lock(pgdat, &flags);
|
||||||
|
|
Loading…
Add table
Reference in a new issue