drm/msm: Attach the MMUs as soon as they are allocated

Currently the normal and secure MMUs are allocated when the
address space is created in msm_gpu_init() but not attached
until the end of adreno_gpu_init(). Since we can't map buffer
objects in the IOMMU without attaching it first this restricts
when we can allocate buffer objects in the sequence.

For arm-smmu based targets there isn't any reason why we can't
immediately attach the MMU after creating the address space -
this makes the whole system immediately available to map memory
and will facilitate moving around global allocations.

Change-Id: Ic0dedbad161396e9d095f3f3d1e4fca2d240a084
Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
This commit is contained in:
Jordan Crouse 2017-06-12 09:16:49 -06:00
parent 1db3fbd43a
commit ae2cb03114
2 changed files with 28 additions and 33 deletions

View file

@ -516,7 +516,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
{
struct adreno_platform_config *config = pdev->dev.platform_data;
struct msm_gpu *gpu = &adreno_gpu->base;
struct msm_mmu *mmu;
int ret;
adreno_gpu->funcs = funcs;
@ -547,22 +546,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
mmu = gpu->aspace->mmu;
if (mmu) {
ret = mmu->funcs->attach(mmu, NULL, 0);
if (ret)
return ret;
}
if (gpu->secure_aspace) {
mmu = gpu->secure_aspace->mmu;
if (mmu) {
ret = mmu->funcs->attach(mmu, NULL, 0);
if (ret)
return ret;
}
}
adreno_gpu->memptrs = msm_gem_kernel_new(drm,
sizeof(*adreno_gpu->memptrs), MSM_BO_UNCACHED, gpu->aspace,
&adreno_gpu->memptrs_bo, &adreno_gpu->memptrs_iova);
@ -577,28 +560,14 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
void adreno_gpu_cleanup(struct adreno_gpu *gpu)
{
struct msm_gem_address_space *aspace = gpu->base.aspace;
if (gpu->memptrs_bo) {
if (gpu->memptrs_iova)
msm_gem_put_iova(gpu->memptrs_bo, aspace);
msm_gem_put_iova(gpu->memptrs_bo, gpu->base.aspace);
drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
}
release_firmware(gpu->pm4);
release_firmware(gpu->pfp);
msm_gpu_cleanup(&gpu->base);
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu);
msm_gem_address_space_put(aspace);
}
if (gpu->base.secure_aspace) {
aspace = gpu->base.secure_aspace;
aspace->mmu->funcs->detach(aspace->mmu);
msm_gem_address_space_put(aspace);
}
}
static void adreno_snapshot_os(struct msm_gpu *gpu,

View file

@ -810,12 +810,32 @@ msm_gpu_create_address_space(struct msm_gpu *gpu, struct device *dev,
gpu->name, name, PTR_ERR(aspace));
iommu_domain_free(iommu);
aspace = NULL;
return NULL;
}
if (aspace->mmu) {
int ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
if (ret) {
dev_err(gpu->dev->dev,
"%s: failed to atach IOMMU '%s': %d\n",
gpu->name, name, ret);
msm_gem_address_space_put(aspace);
aspace = ERR_PTR(ret);
}
}
return aspace;
}
static void msm_gpu_destroy_address_space(struct msm_gem_address_space *aspace)
{
if (!IS_ERR_OR_NULL(aspace) && aspace->mmu)
aspace->mmu->funcs->detach(aspace->mmu);
msm_gem_address_space_put(aspace);
}
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config)
@ -938,6 +958,9 @@ fail:
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++)
msm_ringbuffer_destroy(gpu->rb[i]);
msm_gpu_destroy_address_space(gpu->aspace);
msm_gpu_destroy_address_space(gpu->secure_aspace);
pm_runtime_disable(&pdev->dev);
return ret;
}
@ -960,4 +983,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
msm_snapshot_destroy(gpu, gpu->snapshot);
pm_runtime_disable(&pdev->dev);
msm_gpu_destroy_address_space(gpu->aspace);
msm_gpu_destroy_address_space(gpu->secure_aspace);
}