msm: kgsl: Move global memory region to 0x100000000

On a 64bit kernel, a 32bit user application is not
restricted to 3GB limit of virtual memory. It is
allowed to access complete 4GB range.

Move global memory region to 0x100000000 outside of
32bit range on 64bit kernel to increase the virtual
memory range for a 32bit application running on a
64bit kernel. This will also move secure memory
region to 0xF0000000.

Change-Id: I017ac0c052b4d9466f9f1a66af4a83f0636450cb
Signed-off-by: Deepak Kumar <dkumar@codeaurora.org>
This commit is contained in:
Deepak Kumar 2017-11-28 16:58:29 +05:30
parent a5cabe9334
commit 959a0fa9ae
4 changed files with 38 additions and 31 deletions

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
/* Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -1296,7 +1296,7 @@ static void _set_secvid(struct kgsl_device *device)
adreno_writereg64(adreno_dev,
ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
KGSL_IOMMU_SECURE_BASE);
KGSL_IOMMU_SECURE_BASE(&device->mmu));
adreno_writereg(adreno_dev,
ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
KGSL_IOMMU_SECURE_SIZE);
@ -1699,7 +1699,7 @@ static int adreno_getproperty(struct kgsl_device *device,
* anything to mmap().
*/
shadowprop.gpuaddr =
(unsigned int) device->memstore.gpuaddr;
(unsigned long)device->memstore.gpuaddr;
shadowprop.size = device->memstore.size;
/* GSL needs this to be set, even if it
appears to be meaningless */

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -2489,8 +2489,8 @@ static int a5xx_rb_start(struct adreno_device *adreno_dev,
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
A5XX_CP_RB_CNTL_DEFAULT);
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE,
rb->buffer_desc.gpuaddr);
adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_BASE,
ADRENO_REG_CP_RB_BASE_HI, rb->buffer_desc.gpuaddr);
ret = a5xx_microcode_load(adreno_dev);
if (ret)

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -38,9 +38,10 @@
#define _IOMMU_PRIV(_mmu) (&((_mmu)->priv.iommu))
#define ADDR_IN_GLOBAL(_a) \
(((_a) >= KGSL_IOMMU_GLOBAL_MEM_BASE) && \
((_a) < (KGSL_IOMMU_GLOBAL_MEM_BASE + KGSL_IOMMU_GLOBAL_MEM_SIZE)))
#define ADDR_IN_GLOBAL(_mmu, _a) \
(((_a) >= KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu)) && \
((_a) < (KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu) + \
KGSL_IOMMU_GLOBAL_MEM_SIZE)))
static struct kgsl_mmu_pt_ops iommu_pt_ops;
static bool need_iommu_sync;
@ -202,7 +203,7 @@ static void kgsl_iommu_add_global(struct kgsl_mmu *mmu,
BUG_ON(global_pt_count >= GLOBAL_PT_ENTRIES);
BUG_ON((global_pt_alloc + memdesc->size) >= KGSL_IOMMU_GLOBAL_MEM_SIZE);
memdesc->gpuaddr = KGSL_IOMMU_GLOBAL_MEM_BASE + global_pt_alloc;
memdesc->gpuaddr = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu) + global_pt_alloc;
memdesc->priv |= KGSL_MEMDESC_GLOBAL;
global_pt_alloc += memdesc->size;
@ -215,7 +216,7 @@ static void kgsl_iommu_add_global(struct kgsl_mmu *mmu,
void kgsl_add_global_secure_entry(struct kgsl_device *device,
struct kgsl_memdesc *memdesc)
{
memdesc->gpuaddr = KGSL_IOMMU_SECURE_BASE;
memdesc->gpuaddr = KGSL_IOMMU_SECURE_BASE(&device->mmu);
kgsl_global_secure_pt_entry = memdesc;
}
@ -688,7 +689,7 @@ static void _find_mem_entries(struct kgsl_mmu *mmu, uint64_t faultaddr,
/* Set the maximum possible size as an initial value */
nextentry->gpuaddr = (uint64_t) -1;
if (ADDR_IN_GLOBAL(faultaddr)) {
if (ADDR_IN_GLOBAL(mmu, faultaddr)) {
_get_global_entries(faultaddr, preventry, nextentry);
} else if (context) {
private = context->proc_priv;
@ -1058,14 +1059,14 @@ static void setup_64bit_pagetable(struct kgsl_mmu *mmu,
unsigned int secure_global_size = kgsl_global_secure_pt_entry != NULL ?
kgsl_global_secure_pt_entry->size : 0;
if (mmu->secured && pagetable->name == KGSL_MMU_SECURE_PT) {
pt->compat_va_start = KGSL_IOMMU_SECURE_BASE +
pt->compat_va_start = KGSL_IOMMU_SECURE_BASE(mmu) +
secure_global_size;
pt->compat_va_end = KGSL_IOMMU_SECURE_END;
pt->va_start = KGSL_IOMMU_SECURE_BASE + secure_global_size;
pt->va_end = KGSL_IOMMU_SECURE_END;
pt->compat_va_end = KGSL_IOMMU_SECURE_END(mmu);
pt->va_start = KGSL_IOMMU_SECURE_BASE(mmu) + secure_global_size;
pt->va_end = KGSL_IOMMU_SECURE_END(mmu);
} else {
pt->compat_va_start = KGSL_IOMMU_SVM_BASE32;
pt->compat_va_end = KGSL_IOMMU_SVM_END32;
pt->compat_va_end = KGSL_IOMMU_SECURE_BASE(mmu);
pt->va_start = KGSL_IOMMU_VA_BASE64;
pt->va_end = KGSL_IOMMU_VA_END64;
}
@ -1074,7 +1075,7 @@ static void setup_64bit_pagetable(struct kgsl_mmu *mmu,
pagetable->name != KGSL_MMU_SECURE_PT) {
if ((BITS_PER_LONG == 32) || is_compat_task()) {
pt->svm_start = KGSL_IOMMU_SVM_BASE32;
pt->svm_end = KGSL_IOMMU_SVM_END32;
pt->svm_end = KGSL_IOMMU_SECURE_BASE(mmu);
} else {
pt->svm_start = KGSL_IOMMU_SVM_BASE64;
pt->svm_end = KGSL_IOMMU_SVM_END64;
@ -1090,22 +1091,22 @@ static void setup_32bit_pagetable(struct kgsl_mmu *mmu,
kgsl_global_secure_pt_entry->size : 0;
if (mmu->secured) {
if (pagetable->name == KGSL_MMU_SECURE_PT) {
pt->compat_va_start = KGSL_IOMMU_SECURE_BASE +
pt->compat_va_start = KGSL_IOMMU_SECURE_BASE(mmu) +
secure_global_size;
pt->compat_va_end = KGSL_IOMMU_SECURE_END;
pt->va_start = KGSL_IOMMU_SECURE_BASE +
pt->compat_va_end = KGSL_IOMMU_SECURE_END(mmu);
pt->va_start = KGSL_IOMMU_SECURE_BASE(mmu) +
secure_global_size;
pt->va_end = KGSL_IOMMU_SECURE_END;
pt->va_end = KGSL_IOMMU_SECURE_END(mmu);
} else {
pt->va_start = KGSL_IOMMU_SVM_BASE32;
pt->va_end = KGSL_IOMMU_SECURE_BASE +
pt->va_end = KGSL_IOMMU_SECURE_BASE(mmu) +
secure_global_size;
pt->compat_va_start = pt->va_start;
pt->compat_va_end = pt->va_end;
}
} else {
pt->va_start = KGSL_IOMMU_SVM_BASE32;
pt->va_end = KGSL_IOMMU_GLOBAL_MEM_BASE;
pt->va_end = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu);
pt->compat_va_start = pt->va_start;
pt->compat_va_end = pt->va_end;
}
@ -2356,7 +2357,8 @@ static int kgsl_iommu_set_svm_region(struct kgsl_pagetable *pagetable,
struct rb_node *node;
/* Make sure the requested address doesn't fall in the global range */
if (ADDR_IN_GLOBAL(gpuaddr) || ADDR_IN_GLOBAL(gpuaddr + size))
if (ADDR_IN_GLOBAL(pagetable->mmu, gpuaddr) ||
ADDR_IN_GLOBAL(pagetable->mmu, gpuaddr + size))
return -ENOMEM;
spin_lock(&pagetable->lock);

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2016,2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -24,12 +24,17 @@
* are mapped into all pagetables.
*/
#define KGSL_IOMMU_GLOBAL_MEM_SIZE SZ_8M
#define KGSL_IOMMU_GLOBAL_MEM_BASE 0xf8000000
#define KGSL_IOMMU_GLOBAL_MEM_BASE32 0xf8000000
#define KGSL_IOMMU_GLOBAL_MEM_BASE64 0x100000000ULL
#define KGSL_IOMMU_GLOBAL_MEM_BASE(__mmu) \
(MMU_FEATURE(__mmu, KGSL_MMU_64BIT) ? \
KGSL_IOMMU_GLOBAL_MEM_BASE64 : KGSL_IOMMU_GLOBAL_MEM_BASE32)
#define KGSL_IOMMU_SECURE_SIZE SZ_256M
#define KGSL_IOMMU_SECURE_END KGSL_IOMMU_GLOBAL_MEM_BASE
#define KGSL_IOMMU_SECURE_BASE \
(KGSL_IOMMU_GLOBAL_MEM_BASE - KGSL_IOMMU_SECURE_SIZE)
#define KGSL_IOMMU_SECURE_END(_mmu) KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu)
#define KGSL_IOMMU_SECURE_BASE(_mmu) \
(KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu) - KGSL_IOMMU_SECURE_SIZE)
#define KGSL_IOMMU_SVM_BASE32 0x300000
#define KGSL_IOMMU_SVM_END32 (0xC0000000 - SZ_16M)