ARM: 6672/1: LPAE: use phys_addr_t instead of unsigned long in mapping functions
The unsigned long datatype is not sufficient for mapping physical addresses >= 4GB. This patch ensures that the phys_addr_t datatype is used to represent physical addresses when converting from a PFN. Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
parent
ad6b9c9d78
commit
cae6292b65
3 changed files with 8 additions and 7 deletions
|
@ -351,7 +351,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
|
||||||
#define pte_unmap(pte) __pte_unmap(pte)
|
#define pte_unmap(pte) __pte_unmap(pte)
|
||||||
|
|
||||||
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
|
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
|
||||||
#define pfn_pte(pfn,prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
|
#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
|
||||||
|
|
||||||
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
|
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
|
||||||
#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
|
#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
|
||||||
|
|
|
@ -344,7 +344,7 @@ void __init bootmem_init(void)
|
||||||
*/
|
*/
|
||||||
arm_bootmem_free(min, max_low, max_high);
|
arm_bootmem_free(min, max_low, max_high);
|
||||||
|
|
||||||
high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
|
high_memory = __va(((phys_addr_t)max_low << PAGE_SHIFT) - 1) + 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This doesn't seem to be used by the Linux memory manager any
|
* This doesn't seem to be used by the Linux memory manager any
|
||||||
|
@ -392,8 +392,8 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn)
|
||||||
* Convert to physical addresses, and
|
* Convert to physical addresses, and
|
||||||
* round start upwards and end downwards.
|
* round start upwards and end downwards.
|
||||||
*/
|
*/
|
||||||
pg = PAGE_ALIGN(__pa(start_pg));
|
pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
|
||||||
pgend = __pa(end_pg) & PAGE_MASK;
|
pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If there are free pages between these,
|
* If there are free pages between these,
|
||||||
|
|
|
@ -591,7 +591,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
|
||||||
pgd_t *pgd;
|
pgd_t *pgd;
|
||||||
|
|
||||||
addr = md->virtual;
|
addr = md->virtual;
|
||||||
phys = (unsigned long)__pfn_to_phys(md->pfn);
|
phys = __pfn_to_phys(md->pfn);
|
||||||
length = PAGE_ALIGN(md->length);
|
length = PAGE_ALIGN(md->length);
|
||||||
|
|
||||||
if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
|
if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
|
||||||
|
@ -651,7 +651,8 @@ static void __init create_36bit_mapping(struct map_desc *md,
|
||||||
*/
|
*/
|
||||||
static void __init create_mapping(struct map_desc *md)
|
static void __init create_mapping(struct map_desc *md)
|
||||||
{
|
{
|
||||||
unsigned long phys, addr, length, end;
|
unsigned long addr, length, end;
|
||||||
|
phys_addr_t phys;
|
||||||
const struct mem_type *type;
|
const struct mem_type *type;
|
||||||
pgd_t *pgd;
|
pgd_t *pgd;
|
||||||
|
|
||||||
|
@ -680,7 +681,7 @@ static void __init create_mapping(struct map_desc *md)
|
||||||
}
|
}
|
||||||
|
|
||||||
addr = md->virtual & PAGE_MASK;
|
addr = md->virtual & PAGE_MASK;
|
||||||
phys = (unsigned long)__pfn_to_phys(md->pfn);
|
phys = __pfn_to_phys(md->pfn);
|
||||||
length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
|
length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
|
||||||
|
|
||||||
if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
|
if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
|
||||||
|
|
Loading…
Add table
Reference in a new issue