arm: keep address range pmd aligned while remap
During early init, all dma areas are remapped to PAGE_SIZE granularity. Since full pmd regions are cleared to be remapped into PAGE_SIZE, ensure that address range is pmd size aligned while not crossing memory boundaries. This would ensure that even if address region is not pmd aligned, its mapping would not be cleared but factored in to PAGE_SIZE regions. Change-Id: Iad4ad7fd6169cdc693d532821aba453465addb7c Signed-off-by: Shiraz Hashim <shashim@codeaurora.org>
This commit is contained in:
parent
7215a1cfee
commit
4af3c048cf
2 changed files with 32 additions and 4 deletions
|
@ -437,6 +437,15 @@ void __init dma_contiguous_remap(void)
|
|||
struct map_desc map;
|
||||
unsigned long addr;
|
||||
|
||||
/*
|
||||
* Make start and end PMD_SIZE aligned, observing memory
|
||||
* boundaries
|
||||
*/
|
||||
if (memblock_is_memory(start & PMD_MASK))
|
||||
start = start & PMD_MASK;
|
||||
if (memblock_is_memory(ALIGN(end, PMD_SIZE)))
|
||||
end = ALIGN(end, PMD_SIZE);
|
||||
|
||||
if (end > arm_lowmem_limit)
|
||||
end = arm_lowmem_limit;
|
||||
if (start >= end)
|
||||
|
@ -457,8 +466,12 @@ void __init dma_contiguous_remap(void)
|
|||
* and ensures that this code is architecturally compliant.
|
||||
*/
|
||||
for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
|
||||
addr += PMD_SIZE)
|
||||
pmd_clear(pmd_off_k(addr));
|
||||
addr += PMD_SIZE) {
|
||||
pmd_t *pmd;
|
||||
pmd = pmd_off_k(addr);
|
||||
if (pmd_bad(*pmd))
|
||||
pmd_clear(pmd);
|
||||
}
|
||||
|
||||
flush_tlb_kernel_range(__phys_to_virt(start),
|
||||
__phys_to_virt(end));
|
||||
|
|
|
@ -305,12 +305,27 @@ void __init remap_as_pages(unsigned long start, unsigned long size)
|
|||
unsigned long addr;
|
||||
unsigned long end = start + size;
|
||||
|
||||
/*
|
||||
* Make start and end PMD_SIZE aligned, observing memory
|
||||
* boundaries
|
||||
*/
|
||||
if (memblock_is_memory(start & PMD_MASK))
|
||||
start = start & PMD_MASK;
|
||||
if (memblock_is_memory(ALIGN(end, PMD_SIZE)))
|
||||
end = ALIGN(end, PMD_SIZE);
|
||||
|
||||
size = end - start;
|
||||
|
||||
/*
|
||||
* Clear previous low-memory mapping
|
||||
*/
|
||||
for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
|
||||
addr += PMD_SIZE)
|
||||
pmd_clear(pmd_off_k(addr));
|
||||
addr += PMD_SIZE) {
|
||||
pmd_t *pmd;
|
||||
pmd = pmd_off_k(addr);
|
||||
if (pmd_bad(*pmd) || pmd_sect(*pmd))
|
||||
pmd_clear(pmd);
|
||||
}
|
||||
|
||||
create_mapping(start, __phys_to_virt(start), size, PAGE_KERNEL, true);
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue