arm: Allow remapping lowmem as 4K pages

Lowmem is currently mapped with sections (1MB/2MB) whenever
possible due to TLB performance boosts. Sections cannot easily
be changed at runtime however which makes implementing certain
features annoying. Add an option to map lowmem with 4K patches.
This is intended to be used as a debugging feature and should
NOT be used for performance testing.

Change-Id: I9612a99b8e05a022f5ba7e568f21307cf66b5667
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
Signed-off-by: Susheel Khiani <skhiani@codeaurora.org>
This commit is contained in:
Laura Abbott 2014-04-14 19:42:04 -07:00 committed by Kyle Yan
parent b1e748622f
commit 8a2e028a6d
3 changed files with 113 additions and 4 deletions

View file

@ -76,6 +76,17 @@ config DEBUG_USER
8 - SIGSEGV faults
16 - SIGBUS faults
config FORCE_PAGES
bool "Force lowmem to be mapped with 4K pages"
help
There are some advanced debug features that can only be done when
memory is mapped with pages instead of sections. Enable this option
to always map lowmem pages with pages. This may have a performance
cost due to increased TLB pressure.
If unsure say N.
# These options are only for real kernel hackers who want to get their hands dirty.
config DEBUG_LL
bool "Kernel low-level debugging functions (read help!)"

View file

@ -1574,6 +1574,100 @@ void __init early_paging_init(const struct machine_desc *mdesc)
#endif
#ifdef CONFIG_FORCE_PAGES
/*
* remap a PMD into pages
* We split a single pmd here none of this two pmd nonsense
*/
static noinline void __init split_pmd(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long pfn,
const struct mem_type *type)
{
pte_t *pte, *start_pte;
start_pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
pte = start_pte;
do {
set_pte_ext(pte, pfn_pte(pfn, type->prot_pte), 0);
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
*pmd = __pmd((__pa(start_pte) + PTE_HWTABLE_OFF) | type->prot_l1);
mb(); /* let pmd be programmed */
flush_pmd_entry(pmd);
flush_tlb_all();
}
/*
* It's significantly easier to remap as pages later after all memory is
* mapped. Everything is sections so all we have to do is split
*/
static void __init remap_pages(void)
{
struct memblock_region *reg;
for_each_memblock(memory, reg) {
phys_addr_t phys_start = reg->base;
phys_addr_t phys_end = reg->base + reg->size;
unsigned long addr = (unsigned long)__va(phys_start);
unsigned long end = (unsigned long)__va(phys_end);
pmd_t *pmd = NULL;
unsigned long next;
unsigned long pfn = __phys_to_pfn(phys_start);
bool fixup = false;
unsigned long saved_start = addr;
if (phys_end > arm_lowmem_limit)
end = (unsigned long)__va(arm_lowmem_limit);
if (phys_start >= phys_end)
break;
pmd = pmd_offset(
pud_offset(pgd_offset(&init_mm, addr), addr), addr);
#ifndef CONFIG_ARM_LPAE
if (addr & SECTION_SIZE) {
fixup = true;
pmd_empty_section_gap((addr - SECTION_SIZE) & PMD_MASK);
pmd++;
}
if (end & SECTION_SIZE)
pmd_empty_section_gap(end);
#endif
do {
next = addr + SECTION_SIZE;
if (pmd_none(*pmd) || pmd_bad(*pmd))
split_pmd(pmd, addr, next, pfn,
&mem_types[MT_MEMORY_RWX]);
pmd++;
pfn += SECTION_SIZE >> PAGE_SHIFT;
} while (addr = next, addr < end);
if (fixup) {
/*
* Put a faulting page table here to avoid detecting no
* pmd when accessing an odd section boundary. This
* needs to be faulting to help catch errors and avoid
* speculation
*/
pmd = pmd_off_k(saved_start);
pmd[0] = pmd[1] & ~1;
}
}
}
#else
static void __init remap_pages(void)
{
}
#endif
static void __init early_fixmap_shutdown(void)
{
int i;
@ -1617,6 +1711,7 @@ void __init paging_init(const struct machine_desc *mdesc)
memblock_set_current_limit(arm_lowmem_limit);
dma_contiguous_remap();
early_fixmap_shutdown();
remap_pages();
devicemaps_init(mdesc);
kmap_init();
tcm_init();

View file

@ -49,11 +49,14 @@ static int change_memory_common(unsigned long addr, int numpages,
WARN_ON_ONCE(1);
}
if (start < MODULES_VADDR || start >= MODULES_END)
return -EINVAL;
if (!IS_ENABLED(CONFIG_FORCE_PAGES)) {
if (end < MODULES_VADDR || start >= MODULES_END)
return -EINVAL;
if (start < MODULES_VADDR || start >= MODULES_END)
return -EINVAL;
if (end < MODULES_VADDR || start >= MODULES_END)
return -EINVAL;
}
data.set_mask = set_mask;
data.clear_mask = clear_mask;