diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index 71afa854e1c4..b87d5d924238 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -76,6 +76,17 @@ config DEBUG_USER 8 - SIGSEGV faults 16 - SIGBUS faults +config FORCE_PAGES + bool "Force lowmem to be mapped with 4K pages" + help + There are some advanced debug features that can only be done when + memory is mapped with pages instead of sections. Enable this option + to always map lowmem pages with pages. This may have a performance + cost due to increased TLB pressure. + + If unsure say N. + + # These options are only for real kernel hackers who want to get their hands dirty. config DEBUG_LL bool "Kernel low-level debugging functions (read help!)" diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 9ab506ebff59..be812c734296 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1574,6 +1574,100 @@ void __init early_paging_init(const struct machine_desc *mdesc) #endif +#ifdef CONFIG_FORCE_PAGES +/* + * remap a PMD into pages + * We split a single pmd here none of this two pmd nonsense + */ +static noinline void __init split_pmd(pmd_t *pmd, unsigned long addr, + unsigned long end, unsigned long pfn, + const struct mem_type *type) +{ + pte_t *pte, *start_pte; + + start_pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE); + + pte = start_pte; + + do { + set_pte_ext(pte, pfn_pte(pfn, type->prot_pte), 0); + pfn++; + } while (pte++, addr += PAGE_SIZE, addr != end); + + *pmd = __pmd((__pa(start_pte) + PTE_HWTABLE_OFF) | type->prot_l1); + mb(); /* let pmd be programmed */ + flush_pmd_entry(pmd); + flush_tlb_all(); +} + +/* + * It's significantly easier to remap as pages later after all memory is + * mapped. Everything is sections so all we have to do is split + */ +static void __init remap_pages(void) +{ + struct memblock_region *reg; + + for_each_memblock(memory, reg) { + phys_addr_t phys_start = reg->base; + phys_addr_t phys_end = reg->base + reg->size; + unsigned long addr = (unsigned long)__va(phys_start); + unsigned long end = (unsigned long)__va(phys_end); + pmd_t *pmd = NULL; + unsigned long next; + unsigned long pfn = __phys_to_pfn(phys_start); + bool fixup = false; + unsigned long saved_start = addr; + + if (phys_end > arm_lowmem_limit) + end = (unsigned long)__va(arm_lowmem_limit); + if (phys_start >= phys_end) + break; + + pmd = pmd_offset( + pud_offset(pgd_offset(&init_mm, addr), addr), addr); + +#ifndef CONFIG_ARM_LPAE + if (addr & SECTION_SIZE) { + fixup = true; + pmd_empty_section_gap((addr - SECTION_SIZE) & PMD_MASK); + pmd++; + } + + if (end & SECTION_SIZE) + pmd_empty_section_gap(end); +#endif + + do { + next = addr + SECTION_SIZE; + + if (pmd_none(*pmd) || pmd_bad(*pmd)) + split_pmd(pmd, addr, next, pfn, + &mem_types[MT_MEMORY_RWX]); + pmd++; + pfn += SECTION_SIZE >> PAGE_SHIFT; + + } while (addr = next, addr < end); + + if (fixup) { + /* + * Put a faulting page table here to avoid detecting no + * pmd when accessing an odd section boundary. This + * needs to be faulting to help catch errors and avoid + * speculation + */ + pmd = pmd_off_k(saved_start); + pmd[0] = pmd[1] & ~1; + } + } +} +#else +static void __init remap_pages(void) +{ + +} +#endif + static void __init early_fixmap_shutdown(void) { int i; @@ -1617,6 +1711,7 @@ void __init paging_init(const struct machine_desc *mdesc) memblock_set_current_limit(arm_lowmem_limit); dma_contiguous_remap(); early_fixmap_shutdown(); + remap_pages(); devicemaps_init(mdesc); kmap_init(); tcm_init(); diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c index cf30daff8932..ad5d4bfa5de6 100644 --- a/arch/arm/mm/pageattr.c +++ b/arch/arm/mm/pageattr.c @@ -49,11 +49,14 @@ static int change_memory_common(unsigned long addr, int numpages, WARN_ON_ONCE(1); } - if (start < MODULES_VADDR || start >= MODULES_END) - return -EINVAL; + if (!IS_ENABLED(CONFIG_FORCE_PAGES)) { - if (end < MODULES_VADDR || start >= MODULES_END) - return -EINVAL; + if (start < MODULES_VADDR || start >= MODULES_END) + return -EINVAL; + + if (end < MODULES_VADDR || start >= MODULES_END) + return -EINVAL; + } data.set_mask = set_mask; data.clear_mask = clear_mask;