msm: Allow lowmem to be non contiguous and mixed

Currently on 32 bit systems, virtual space above
PAGE_OFFSET is reserved for direct mapped lowmem
and part of virtual address space is reserved for
vmalloc. We want to optimize such as to have as
much direct mapped memory as possible since there is
penalty for mapping/unmapping highmem. Now, we may
have an image that is expected to have a lifetime of
the entire system and is reserved in physical region
that would be part of direct mapped lowmem. The
physical memory which is thus reserved is never used
by Linux. This means that even though the system is
not actually accessing the  virtual memory
corresponding to the reserved physical memory, we
are still losing that portion of direct mapped lowmem
space.

So by allowing lowmem to be non contiguous we can
give this unused virtual address space of reserved
region back for use in vmalloc.

Change-Id: I980b3dfafac71884dcdcb8cd2e4a6363cde5746a
Signed-off-by: Susheel Khiani <skhiani@codeaurora.org>
This commit is contained in:
Susheel Khiani 2015-09-08 15:05:43 +05:30 committed by David Keitel
parent 4af3c048cf
commit c064333eac
4 changed files with 70 additions and 3 deletions

View file

@ -91,7 +91,8 @@ void __init add_static_vm_early(struct static_vm *svm)
void *vaddr; void *vaddr;
vm = &svm->vm; vm = &svm->vm;
vm_area_add_early(vm); if (!vm_area_check_early(vm))
vm_area_add_early(vm);
vaddr = vm->addr; vaddr = vm->addr;
list_for_each_entry(curr_svm, &static_vmlist, list) { list_for_each_entry(curr_svm, &static_vmlist, list) {

View file

@ -1385,12 +1385,21 @@ static void __init map_lowmem(void)
struct memblock_region *reg; struct memblock_region *reg;
phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
struct static_vm *svm;
phys_addr_t start;
phys_addr_t end;
unsigned long vaddr;
unsigned long pfn;
unsigned long length;
unsigned int type;
int nr = 0;
/* Map all the lowmem memory banks. */ /* Map all the lowmem memory banks. */
for_each_memblock(memory, reg) { for_each_memblock(memory, reg) {
phys_addr_t start = reg->base;
phys_addr_t end = start + reg->size;
struct map_desc map; struct map_desc map;
start = reg->base;
end = start + reg->size;
nr++;
if (end > arm_lowmem_limit) if (end > arm_lowmem_limit)
end = arm_lowmem_limit; end = arm_lowmem_limit;
@ -1439,6 +1448,33 @@ static void __init map_lowmem(void)
} }
} }
} }
svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
for_each_memblock(memory, reg) {
struct vm_struct *vm;
start = reg->base;
end = start + reg->size;
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
if (start >= end)
break;
vm = &svm->vm;
pfn = __phys_to_pfn(start);
vaddr = __phys_to_virt(start);
length = end - start;
type = MT_MEMORY_RW;
vm->addr = (void *)(vaddr & PAGE_MASK);
vm->size = PAGE_ALIGN(length + (vaddr & ~PAGE_MASK));
vm->phys_addr = __pfn_to_phys(pfn);
vm->flags = VM_LOWMEM;
vm->flags |= VM_ARM_MTYPE(type);
vm->caller = map_lowmem;
add_static_vm_early(svm++);
}
} }
#ifdef CONFIG_ARM_PV_FIXUP #ifdef CONFIG_ARM_PV_FIXUP

View file

@ -18,6 +18,8 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
#define VM_NO_GUARD 0x00000040 /* don't add guard page */ #define VM_NO_GUARD 0x00000040 /* don't add guard page */
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
#define VM_LOWMEM 0x00000100 /* Tracking of direct mapped lowmem */
/* bits [20..32] reserved for arch specific ioremap internals */ /* bits [20..32] reserved for arch specific ioremap internals */
/* /*
@ -158,6 +160,7 @@ extern long vwrite(char *buf, char *addr, unsigned long count);
extern struct list_head vmap_area_list; extern struct list_head vmap_area_list;
extern __init void vm_area_add_early(struct vm_struct *vm); extern __init void vm_area_add_early(struct vm_struct *vm);
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
extern __init int vm_area_check_early(struct vm_struct *vm);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
# ifdef CONFIG_MMU # ifdef CONFIG_MMU

View file

@ -1144,6 +1144,33 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
EXPORT_SYMBOL(vm_map_ram); EXPORT_SYMBOL(vm_map_ram);
static struct vm_struct *vmlist __initdata; static struct vm_struct *vmlist __initdata;
/**
* vm_area_check_early - check if vmap area is already mapped
* @vm: vm_struct to be checked
*
* This function is used to check if the vmap area has been
* mapped already. @vm->addr, @vm->size and @vm->flags should
* contain proper values.
*
*/
int __init vm_area_check_early(struct vm_struct *vm)
{
struct vm_struct *tmp, **p;
BUG_ON(vmap_initialized);
for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
if (tmp->addr >= vm->addr) {
if (tmp->addr < vm->addr + vm->size)
return 1;
} else {
if (tmp->addr + tmp->size > vm->addr)
return 1;
}
}
return 0;
}
/** /**
* vm_area_add_early - add vmap area early during boot * vm_area_add_early - add vmap area early during boot
* @vm: vm_struct to add * @vm: vm_struct to add