drm/msm: Support secure rendering for A5XX targets
A5XX targets support GPU rendering on secured surfaces by going into a special secure mode to execute the commands. In secure mode GPU rendering can only write to secure buffers that have been mapped in an appropriately secured pagetable. In secure mode the GPU can read both secure and unsecure buffers and the CP engine can only access unsecured buffers (so commands do not need to be secure). Secure buffers virtual addresses must fall into a specific range; this is the clue to the GPU that it should use the secure pagetable instead of the regular one. For A5XX targets that range will start at 0xC0000000 and be 256MB in size. All secure buffers in all processes share the same pagetable. Add a secure address space for A5XX targets and automatically trigger into secure mode if any buffer in the submission is marked as secure. Change-Id: Ic0dedbad8f7168711d10928cd1894b98f908425f Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
This commit is contained in:
parent
2db428de0b
commit
b2fd7c67bc
8 changed files with 150 additions and 49 deletions
|
@ -15,6 +15,9 @@
|
||||||
#include "msm_iommu.h"
|
#include "msm_iommu.h"
|
||||||
#include "a5xx_gpu.h"
|
#include "a5xx_gpu.h"
|
||||||
|
|
||||||
|
#define SECURE_VA_START 0xc0000000
|
||||||
|
#define SECURE_VA_SIZE SZ_256M
|
||||||
|
|
||||||
static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
|
static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
|
||||||
{
|
{
|
||||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||||
|
@ -133,6 +136,12 @@ static int a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||||
OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
|
OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
|
||||||
OUT_RING(ring, 0x02);
|
OUT_RING(ring, 0x02);
|
||||||
|
|
||||||
|
/* Turn on secure mode if the submission is secure */
|
||||||
|
if (submit->secure) {
|
||||||
|
OUT_PKT7(ring, CP_SET_SECURE_MODE, 1);
|
||||||
|
OUT_RING(ring, 1);
|
||||||
|
}
|
||||||
|
|
||||||
/* Record the always on counter before command execution */
|
/* Record the always on counter before command execution */
|
||||||
if (submit->profile_buf_iova) {
|
if (submit->profile_buf_iova) {
|
||||||
uint64_t gpuaddr = submit->profile_buf_iova +
|
uint64_t gpuaddr = submit->profile_buf_iova +
|
||||||
|
@ -212,6 +221,11 @@ static int a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||||
OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, fence)));
|
OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, fence)));
|
||||||
OUT_RING(ring, submit->fence);
|
OUT_RING(ring, submit->fence);
|
||||||
|
|
||||||
|
if (submit->secure) {
|
||||||
|
OUT_PKT7(ring, CP_SET_SECURE_MODE, 1);
|
||||||
|
OUT_RING(ring, 0);
|
||||||
|
}
|
||||||
|
|
||||||
/* Yield the floor on command completion */
|
/* Yield the floor on command completion */
|
||||||
OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
|
OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
|
||||||
/*
|
/*
|
||||||
|
@ -762,14 +776,10 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
||||||
ADRENO_PROTECT_RW(0x10000, 0x8000));
|
ADRENO_PROTECT_RW(0x10000, 0x8000));
|
||||||
|
|
||||||
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
|
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
|
||||||
/*
|
|
||||||
* Disable the trusted memory range - we don't actually supported secure
|
|
||||||
* memory rendering at this point in time and we don't want to block off
|
|
||||||
* part of the virtual memory space.
|
|
||||||
*/
|
|
||||||
gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
|
gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
|
||||||
REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
|
REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, SECURE_VA_START);
|
||||||
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
|
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, SECURE_VA_SIZE);
|
||||||
|
|
||||||
/* Put the GPU into 64 bit by default */
|
/* Put the GPU into 64 bit by default */
|
||||||
gpu_write(gpu, REG_A5XX_CP_ADDR_MODE_CNTL, 0x1);
|
gpu_write(gpu, REG_A5XX_CP_ADDR_MODE_CNTL, 0x1);
|
||||||
|
@ -1405,6 +1415,9 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
|
||||||
a5xx_config.va_start = 0x800000000;
|
a5xx_config.va_start = 0x800000000;
|
||||||
a5xx_config.va_end = 0x8ffffffff;
|
a5xx_config.va_end = 0x8ffffffff;
|
||||||
|
|
||||||
|
a5xx_config.secure_va_start = SECURE_VA_START;
|
||||||
|
a5xx_config.secure_va_end = SECURE_VA_START + SECURE_VA_SIZE - 1;
|
||||||
|
|
||||||
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, &a5xx_config);
|
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, &a5xx_config);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
a5xx_destroy(&(a5xx_gpu->base.base));
|
a5xx_destroy(&(a5xx_gpu->base.base));
|
||||||
|
|
|
@ -563,6 +563,15 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (gpu->secure_aspace) {
|
||||||
|
mmu = gpu->secure_aspace->mmu;
|
||||||
|
if (mmu) {
|
||||||
|
ret = mmu->funcs->attach(mmu, NULL, 0);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
mutex_lock(&drm->struct_mutex);
|
mutex_lock(&drm->struct_mutex);
|
||||||
adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
|
adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
|
||||||
MSM_BO_UNCACHED);
|
MSM_BO_UNCACHED);
|
||||||
|
@ -608,6 +617,12 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu)
|
||||||
aspace->mmu->funcs->detach(aspace->mmu);
|
aspace->mmu->funcs->detach(aspace->mmu);
|
||||||
msm_gem_address_space_put(aspace);
|
msm_gem_address_space_put(aspace);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (gpu->base.secure_aspace) {
|
||||||
|
aspace = gpu->base.secure_aspace;
|
||||||
|
aspace->mmu->funcs->detach(aspace->mmu);
|
||||||
|
msm_gem_address_space_put(aspace);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void adreno_snapshot_os(struct msm_gpu *gpu,
|
static void adreno_snapshot_os(struct msm_gpu *gpu,
|
||||||
|
|
|
@ -1208,23 +1208,34 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
|
||||||
if (args->flags & ~MSM_INFO_FLAGS)
|
if (args->flags & ~MSM_INFO_FLAGS)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!ctx || !ctx->aspace)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
obj = drm_gem_object_lookup(dev, file, args->handle);
|
obj = drm_gem_object_lookup(dev, file, args->handle);
|
||||||
if (!obj)
|
if (!obj)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
if (args->flags & MSM_INFO_IOVA) {
|
if (args->flags & MSM_INFO_IOVA) {
|
||||||
|
struct msm_gem_address_space *aspace = NULL;
|
||||||
|
struct msm_drm_private *priv = dev->dev_private;
|
||||||
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||||
uint64_t iova;
|
uint64_t iova;
|
||||||
|
|
||||||
ret = msm_gem_get_iova(obj, ctx->aspace, &iova);
|
if (msm_obj->flags & MSM_BO_SECURE && priv->gpu)
|
||||||
|
aspace = priv->gpu->secure_aspace;
|
||||||
|
else if (ctx)
|
||||||
|
aspace = ctx->aspace;
|
||||||
|
|
||||||
|
if (!aspace) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = msm_gem_get_iova(obj, aspace, &iova);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
args->offset = iova;
|
args->offset = iova;
|
||||||
} else {
|
} else {
|
||||||
args->offset = msm_gem_mmap_offset(obj);
|
args->offset = msm_gem_mmap_offset(obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
drm_gem_object_unreference_unlocked(obj);
|
drm_gem_object_unreference_unlocked(obj);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -118,6 +118,7 @@ struct msm_gem_submit {
|
||||||
bool valid;
|
bool valid;
|
||||||
uint64_t profile_buf_iova;
|
uint64_t profile_buf_iova;
|
||||||
void *profile_buf_vaddr;
|
void *profile_buf_vaddr;
|
||||||
|
bool secure;
|
||||||
unsigned int nr_cmds;
|
unsigned int nr_cmds;
|
||||||
unsigned int nr_bos;
|
unsigned int nr_bos;
|
||||||
struct {
|
struct {
|
||||||
|
|
|
@ -50,6 +50,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
|
||||||
|
|
||||||
submit->profile_buf_vaddr = NULL;
|
submit->profile_buf_vaddr = NULL;
|
||||||
submit->profile_buf_iova = 0;
|
submit->profile_buf_iova = 0;
|
||||||
|
submit->secure = false;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&submit->bo_list);
|
INIT_LIST_HEAD(&submit->bo_list);
|
||||||
ww_acquire_init(&submit->ticket, &reservation_ww_class);
|
ww_acquire_init(&submit->ticket, &reservation_ww_class);
|
||||||
|
@ -66,7 +67,8 @@ copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int submit_lookup_objects(struct msm_gem_submit *submit,
|
static int submit_lookup_objects(struct msm_gpu *gpu,
|
||||||
|
struct msm_gem_submit *submit,
|
||||||
struct drm_msm_gem_submit *args, struct drm_file *file)
|
struct drm_msm_gem_submit *args, struct drm_file *file)
|
||||||
{
|
{
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
@ -119,6 +121,20 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
|
||||||
|
|
||||||
msm_obj = to_msm_bo(obj);
|
msm_obj = to_msm_bo(obj);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the buffer is marked as secure make sure that we can
|
||||||
|
* handle secure buffers and then mark the submission as secure
|
||||||
|
*/
|
||||||
|
if (msm_obj->flags & MSM_BO_SECURE) {
|
||||||
|
if (!gpu->secure_aspace) {
|
||||||
|
DRM_ERROR("Cannot handle secure buffers\n");
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
submit->secure = true;
|
||||||
|
}
|
||||||
|
|
||||||
if (!list_empty(&msm_obj->submit_entry)) {
|
if (!list_empty(&msm_obj->submit_entry)) {
|
||||||
DRM_ERROR("handle %u at index %u already on submit list\n",
|
DRM_ERROR("handle %u at index %u already on submit list\n",
|
||||||
submit_bo.handle, i);
|
submit_bo.handle, i);
|
||||||
|
@ -143,12 +159,17 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
|
static void submit_unlock_unpin_bo(struct msm_gpu *gpu,
|
||||||
|
struct msm_gem_submit *submit, int i)
|
||||||
{
|
{
|
||||||
struct msm_gem_object *msm_obj = submit->bos[i].obj;
|
struct msm_gem_object *msm_obj = submit->bos[i].obj;
|
||||||
|
struct msm_gem_address_space *aspace;
|
||||||
|
|
||||||
|
aspace = (msm_obj->flags & MSM_BO_SECURE) ?
|
||||||
|
gpu->secure_aspace : submit->aspace;
|
||||||
|
|
||||||
if (submit->bos[i].flags & BO_PINNED)
|
if (submit->bos[i].flags & BO_PINNED)
|
||||||
msm_gem_put_iova(&msm_obj->base, submit->aspace);
|
msm_gem_put_iova(&msm_obj->base, aspace);
|
||||||
|
|
||||||
if (submit->bos[i].flags & BO_LOCKED)
|
if (submit->bos[i].flags & BO_LOCKED)
|
||||||
ww_mutex_unlock(&msm_obj->resv->lock);
|
ww_mutex_unlock(&msm_obj->resv->lock);
|
||||||
|
@ -160,7 +181,8 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This is where we make sure all the bo's are reserved and pin'd: */
|
/* This is where we make sure all the bo's are reserved and pin'd: */
|
||||||
static int submit_validate_objects(struct msm_gem_submit *submit)
|
static int submit_validate_objects(struct msm_gpu *gpu,
|
||||||
|
struct msm_gem_submit *submit)
|
||||||
{
|
{
|
||||||
int contended, slow_locked = -1, i, ret = 0;
|
int contended, slow_locked = -1, i, ret = 0;
|
||||||
|
|
||||||
|
@ -169,8 +191,12 @@ retry:
|
||||||
|
|
||||||
for (i = 0; i < submit->nr_bos; i++) {
|
for (i = 0; i < submit->nr_bos; i++) {
|
||||||
struct msm_gem_object *msm_obj = submit->bos[i].obj;
|
struct msm_gem_object *msm_obj = submit->bos[i].obj;
|
||||||
|
struct msm_gem_address_space *aspace;
|
||||||
uint64_t iova;
|
uint64_t iova;
|
||||||
|
|
||||||
|
aspace = (msm_obj->flags & MSM_BO_SECURE) ?
|
||||||
|
gpu->secure_aspace : submit->aspace;
|
||||||
|
|
||||||
if (slow_locked == i)
|
if (slow_locked == i)
|
||||||
slow_locked = -1;
|
slow_locked = -1;
|
||||||
|
|
||||||
|
@ -186,8 +212,7 @@ retry:
|
||||||
|
|
||||||
|
|
||||||
/* if locking succeeded, pin bo: */
|
/* if locking succeeded, pin bo: */
|
||||||
ret = msm_gem_get_iova_locked(&msm_obj->base,
|
ret = msm_gem_get_iova_locked(&msm_obj->base, aspace, &iova);
|
||||||
submit->aspace, &iova);
|
|
||||||
|
|
||||||
/* this would break the logic in the fail path.. there is no
|
/* this would break the logic in the fail path.. there is no
|
||||||
* reason for this to happen, but just to be on the safe side
|
* reason for this to happen, but just to be on the safe side
|
||||||
|
@ -215,10 +240,10 @@ retry:
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
for (; i >= 0; i--)
|
for (; i >= 0; i--)
|
||||||
submit_unlock_unpin_bo(submit, i);
|
submit_unlock_unpin_bo(gpu, submit, i);
|
||||||
|
|
||||||
if (slow_locked > 0)
|
if (slow_locked > 0)
|
||||||
submit_unlock_unpin_bo(submit, slow_locked);
|
submit_unlock_unpin_bo(gpu, submit, slow_locked);
|
||||||
|
|
||||||
if (ret == -EDEADLK) {
|
if (ret == -EDEADLK) {
|
||||||
struct msm_gem_object *msm_obj = submit->bos[contended].obj;
|
struct msm_gem_object *msm_obj = submit->bos[contended].obj;
|
||||||
|
@ -267,6 +292,11 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (obj->flags & MSM_BO_SECURE) {
|
||||||
|
DRM_ERROR("cannot do relocs on a secure buffer\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
/* For now, just map the entire thing. Eventually we probably
|
/* For now, just map the entire thing. Eventually we probably
|
||||||
* to do it page-by-page, w/ kmap() if not vmap()d..
|
* to do it page-by-page, w/ kmap() if not vmap()d..
|
||||||
*/
|
*/
|
||||||
|
@ -327,13 +357,14 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
|
static void submit_cleanup(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
||||||
|
bool fail)
|
||||||
{
|
{
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
for (i = 0; i < submit->nr_bos; i++) {
|
for (i = 0; i < submit->nr_bos; i++) {
|
||||||
struct msm_gem_object *msm_obj = submit->bos[i].obj;
|
struct msm_gem_object *msm_obj = submit->bos[i].obj;
|
||||||
submit_unlock_unpin_bo(submit, i);
|
submit_unlock_unpin_bo(gpu, submit, i);
|
||||||
list_del_init(&msm_obj->submit_entry);
|
list_del_init(&msm_obj->submit_entry);
|
||||||
drm_gem_object_unreference(&msm_obj->base);
|
drm_gem_object_unreference(&msm_obj->base);
|
||||||
}
|
}
|
||||||
|
@ -373,11 +404,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = submit_lookup_objects(submit, args, file);
|
ret = submit_lookup_objects(gpu, submit, args, file);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
ret = submit_validate_objects(submit);
|
ret = submit_validate_objects(gpu, submit);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -460,7 +491,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (submit)
|
if (submit)
|
||||||
submit_cleanup(submit, !!ret);
|
submit_cleanup(gpu, submit, !!ret);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -572,12 +572,16 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||||
WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
|
WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
|
||||||
|
|
||||||
if (!is_active(msm_obj)) {
|
if (!is_active(msm_obj)) {
|
||||||
|
struct msm_gem_address_space *aspace;
|
||||||
uint64_t iova;
|
uint64_t iova;
|
||||||
|
|
||||||
|
aspace = (msm_obj->flags & MSM_BO_SECURE) ?
|
||||||
|
gpu->secure_aspace : submit->aspace;
|
||||||
|
|
||||||
/* ring takes a reference to the bo and iova: */
|
/* ring takes a reference to the bo and iova: */
|
||||||
drm_gem_object_reference(&msm_obj->base);
|
drm_gem_object_reference(&msm_obj->base);
|
||||||
msm_gem_get_iova_locked(&msm_obj->base,
|
msm_gem_get_iova_locked(&msm_obj->base,
|
||||||
submit->aspace, &iova);
|
aspace, &iova);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
|
if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
|
||||||
|
@ -757,11 +761,49 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct msm_gem_address_space *
|
||||||
|
msm_gpu_create_address_space(struct msm_gpu *gpu, struct device *dev,
|
||||||
|
int type, u64 start, u64 end, const char *name)
|
||||||
|
{
|
||||||
|
struct msm_gem_address_space *aspace;
|
||||||
|
struct iommu_domain *iommu;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If start == end then assume we don't want an address space; this is
|
||||||
|
* mainly for targets to opt out of secure
|
||||||
|
*/
|
||||||
|
if (start == end)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
iommu = iommu_domain_alloc(&platform_bus_type);
|
||||||
|
if (!iommu) {
|
||||||
|
dev_info(gpu->dev->dev,
|
||||||
|
"%s: no IOMMU, fallback to VRAM carveout!\n",
|
||||||
|
gpu->name);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
iommu->geometry.aperture_start = start;
|
||||||
|
iommu->geometry.aperture_end = end;
|
||||||
|
|
||||||
|
dev_info(gpu->dev->dev, "%s: using IOMMU '%s'\n", gpu->name, name);
|
||||||
|
|
||||||
|
aspace = msm_gem_address_space_create(dev, iommu, type, name);
|
||||||
|
if (IS_ERR(aspace)) {
|
||||||
|
dev_err(gpu->dev->dev, "%s: failed to init IOMMU '%s': %ld\n",
|
||||||
|
gpu->name, name, PTR_ERR(aspace));
|
||||||
|
|
||||||
|
iommu_domain_free(iommu);
|
||||||
|
aspace = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return aspace;
|
||||||
|
}
|
||||||
|
|
||||||
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||||
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
|
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
|
||||||
const char *name, struct msm_gpu_config *config)
|
const char *name, struct msm_gpu_config *config)
|
||||||
{
|
{
|
||||||
struct iommu_domain *iommu;
|
|
||||||
int i, ret, nr_rings;
|
int i, ret, nr_rings;
|
||||||
|
|
||||||
if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
|
if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
|
||||||
|
@ -831,30 +873,13 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||||
if (IS_ERR(gpu->gpu_cx))
|
if (IS_ERR(gpu->gpu_cx))
|
||||||
gpu->gpu_cx = NULL;
|
gpu->gpu_cx = NULL;
|
||||||
|
|
||||||
/* Setup IOMMU.. eventually we will (I think) do this once per context
|
gpu->aspace = msm_gpu_create_address_space(gpu, &pdev->dev,
|
||||||
* and have separate page tables per context. For now, to keep things
|
MSM_IOMMU_DOMAIN_USER, config->va_start, config->va_end,
|
||||||
* simple and to get something working, just use a single address space:
|
"gpu");
|
||||||
*/
|
|
||||||
iommu = iommu_domain_alloc(&platform_bus_type);
|
|
||||||
if (iommu) {
|
|
||||||
/* TODO 32b vs 64b address space.. */
|
|
||||||
iommu->geometry.aperture_start = config->va_start;
|
|
||||||
iommu->geometry.aperture_end = config->va_end;
|
|
||||||
|
|
||||||
dev_info(drm->dev, "%s: using IOMMU\n", name);
|
gpu->secure_aspace = msm_gpu_create_address_space(gpu, &pdev->dev,
|
||||||
gpu->aspace = msm_gem_address_space_create(&pdev->dev,
|
MSM_IOMMU_DOMAIN_SECURE, config->secure_va_start,
|
||||||
iommu, MSM_IOMMU_DOMAIN_USER, "gpu");
|
config->secure_va_end, "gpu_secure");
|
||||||
if (IS_ERR(gpu->aspace)) {
|
|
||||||
ret = PTR_ERR(gpu->aspace);
|
|
||||||
dev_err(drm->dev, "failed to init iommu: %d\n", ret);
|
|
||||||
gpu->aspace = NULL;
|
|
||||||
iommu_domain_free(iommu);
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
|
|
||||||
}
|
|
||||||
|
|
||||||
nr_rings = config->nr_rings;
|
nr_rings = config->nr_rings;
|
||||||
|
|
||||||
|
|
|
@ -38,6 +38,8 @@ struct msm_gpu_config {
|
||||||
int nr_rings;
|
int nr_rings;
|
||||||
uint64_t va_start;
|
uint64_t va_start;
|
||||||
uint64_t va_end;
|
uint64_t va_end;
|
||||||
|
uint64_t secure_va_start;
|
||||||
|
uint64_t secure_va_end;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* So far, with hardware that I've seen to date, we can have:
|
/* So far, with hardware that I've seen to date, we can have:
|
||||||
|
@ -114,6 +116,7 @@ struct msm_gpu {
|
||||||
int irq;
|
int irq;
|
||||||
|
|
||||||
struct msm_gem_address_space *aspace;
|
struct msm_gem_address_space *aspace;
|
||||||
|
struct msm_gem_address_space *secure_aspace;
|
||||||
|
|
||||||
/* Power Control: */
|
/* Power Control: */
|
||||||
struct regulator *gpu_reg, *gpu_cx;
|
struct regulator *gpu_reg, *gpu_cx;
|
||||||
|
|
|
@ -78,6 +78,7 @@ struct drm_msm_param {
|
||||||
#define MSM_BO_SCANOUT 0x00000001 /* scanout capable */
|
#define MSM_BO_SCANOUT 0x00000001 /* scanout capable */
|
||||||
#define MSM_BO_GPU_READONLY 0x00000002
|
#define MSM_BO_GPU_READONLY 0x00000002
|
||||||
#define MSM_BO_PRIVILEGED 0x00000004
|
#define MSM_BO_PRIVILEGED 0x00000004
|
||||||
|
#define MSM_BO_SECURE 0x00000008 /* Allocate and map as secure */
|
||||||
#define MSM_BO_CACHE_MASK 0x000f0000
|
#define MSM_BO_CACHE_MASK 0x000f0000
|
||||||
/* cache modes */
|
/* cache modes */
|
||||||
#define MSM_BO_CACHED 0x00010000
|
#define MSM_BO_CACHED 0x00010000
|
||||||
|
@ -86,6 +87,7 @@ struct drm_msm_param {
|
||||||
|
|
||||||
#define MSM_BO_FLAGS (MSM_BO_SCANOUT | \
|
#define MSM_BO_FLAGS (MSM_BO_SCANOUT | \
|
||||||
MSM_BO_GPU_READONLY | \
|
MSM_BO_GPU_READONLY | \
|
||||||
|
MSM_BO_SECURE | \
|
||||||
MSM_BO_CACHED | \
|
MSM_BO_CACHED | \
|
||||||
MSM_BO_WC | \
|
MSM_BO_WC | \
|
||||||
MSM_BO_UNCACHED)
|
MSM_BO_UNCACHED)
|
||||||
|
|
Loading…
Add table
Reference in a new issue