drm/msm: Finish consolidating the address space code

Now that the SMMU/IOMMU differences have been resolved the only delta
between the SMMU and the IOMMU address space implementations is the
actual address space allocation which we can work around by assuming
the caller doesn't want address generation if they specify the same
start and end address (i.e. 0).

With that optimization we can get rid of the address space
sub functions and a bunch of otherwise duplicated code.

Change-Id: Ic0dedbaddef0fcd3a8f39e30f95c71245d84f111
Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
This commit is contained in:
Jordan Crouse 2017-04-07 15:01:40 -06:00
parent 438cdcdae0
commit 1576b22ae9
2 changed files with 66 additions and 139 deletions

View file

@ -25,21 +25,12 @@
/* Additional internal-use only BO flags: */ /* Additional internal-use only BO flags: */
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */ #define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
struct msm_gem_aspace_ops {
int (*map)(struct msm_gem_address_space *, struct msm_gem_vma *,
struct sg_table *sgt, void *priv, unsigned int flags);
void (*unmap)(struct msm_gem_address_space *, struct msm_gem_vma *,
struct sg_table *sgt, void *priv);
void (*destroy)(struct msm_gem_address_space *);
};
struct msm_gem_address_space { struct msm_gem_address_space {
const char *name; const char *name;
struct msm_mmu *mmu; struct msm_mmu *mmu;
const struct msm_gem_aspace_ops *ops;
struct kref kref; struct kref kref;
struct drm_mm mm;
u64 va_len;
}; };
struct msm_gem_vma { struct msm_gem_vma {

View file

@ -25,8 +25,10 @@ msm_gem_address_space_destroy(struct kref *kref)
struct msm_gem_address_space *aspace = container_of(kref, struct msm_gem_address_space *aspace = container_of(kref,
struct msm_gem_address_space, kref); struct msm_gem_address_space, kref);
if (aspace->ops->destroy) if (aspace->va_len)
aspace->ops->destroy(aspace); drm_mm_takedown(&aspace->mm);
aspace->mmu->funcs->destroy(aspace->mmu);
kfree(aspace); kfree(aspace);
} }
@ -37,45 +39,9 @@ void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
kref_put(&aspace->kref, msm_gem_address_space_destroy); kref_put(&aspace->kref, msm_gem_address_space_destroy);
} }
/* SDE address space operations */ static struct msm_gem_address_space *
static void smmu_aspace_unmap_vma(struct msm_gem_address_space *aspace, msm_gem_address_space_new(struct msm_mmu *mmu, const char *name,
struct msm_gem_vma *vma, struct sg_table *sgt, uint64_t start, uint64_t end)
void *priv)
{
aspace->mmu->funcs->unmap(aspace->mmu, 0, sgt, priv);
vma->iova = 0;
msm_gem_address_space_put(aspace);
}
static int smmu_aspace_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
void *priv, unsigned int flags)
{
int ret;
ret = aspace->mmu->funcs->map(aspace->mmu, 0, sgt, flags, priv);
if (!ret) {
vma->iova = sg_dma_address(sgt->sgl);
/* Get a reference to the aspace to keep it around */
kref_get(&aspace->kref);
}
return ret;
}
static const struct msm_gem_aspace_ops smmu_aspace_ops = {
.map = smmu_aspace_map_vma,
.unmap = smmu_aspace_unmap_vma,
};
struct msm_gem_address_space *
msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
const char *name)
{ {
struct msm_gem_address_space *aspace; struct msm_gem_address_space *aspace;
@ -88,125 +54,95 @@ msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
aspace->name = name; aspace->name = name;
aspace->mmu = mmu; aspace->mmu = mmu;
aspace->ops = &smmu_aspace_ops;
aspace->va_len = end - start;
if (aspace->va_len)
drm_mm_init(&aspace->mm, (start >> PAGE_SHIFT),
(end >> PAGE_SHIFT) - 1);
kref_init(&aspace->kref); kref_init(&aspace->kref);
return aspace; return aspace;
} }
/* GPU address space operations */ static int allocate_iova(struct msm_gem_address_space *aspace,
struct msm_iommu_aspace { struct msm_gem_vma *vma, struct sg_table *sgt,
struct msm_gem_address_space base; u64 *iova)
struct drm_mm mm;
};
#define to_iommu_aspace(aspace) \
((struct msm_iommu_aspace *) \
container_of(aspace, struct msm_iommu_aspace, base))
static void iommu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
{ {
if (!vma->iova)
return;
if (aspace->mmu)
aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, NULL);
drm_mm_remove_node(&vma->node);
vma->iova = 0;
msm_gem_address_space_put(aspace);
}
static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt, void *priv,
unsigned int flags)
{
struct msm_iommu_aspace *local = to_iommu_aspace(aspace);
size_t size = 0;
struct scatterlist *sg; struct scatterlist *sg;
size_t size = 0;
int ret, i; int ret, i;
if (!aspace->va_len)
return 0;
if (WARN_ON(drm_mm_node_allocated(&vma->node))) if (WARN_ON(drm_mm_node_allocated(&vma->node)))
return 0; return 0;
for_each_sg(sgt->sgl, sg, sgt->nents, i) for_each_sg(sgt->sgl, sg, sgt->nents, i)
size += sg->length + sg->offset; size += sg->length + sg->offset;
ret = drm_mm_insert_node(&local->mm, &vma->node, size >> PAGE_SHIFT, ret = drm_mm_insert_node(&aspace->mm, &vma->node, size >> PAGE_SHIFT,
0, DRM_MM_SEARCH_DEFAULT); 0, DRM_MM_SEARCH_DEFAULT);
if (ret)
return ret;
vma->iova = vma->node.start << PAGE_SHIFT; if (!ret && iova)
*iova = vma->node.start << PAGE_SHIFT;
if (aspace->mmu)
ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
flags, NULL);
/* Get a reference to the aspace to keep it around */
kref_get(&aspace->kref);
return ret; return ret;
} }
static void iommu_aspace_destroy(struct msm_gem_address_space *aspace)
{
struct msm_iommu_aspace *local = to_iommu_aspace(aspace);
drm_mm_takedown(&local->mm);
aspace->mmu->funcs->destroy(aspace->mmu);
}
static const struct msm_gem_aspace_ops msm_iommu_aspace_ops = {
.map = iommu_aspace_map_vma,
.unmap = iommu_aspace_unmap_vma,
.destroy = iommu_aspace_destroy,
};
static struct msm_gem_address_space *
msm_gem_address_space_new(struct msm_mmu *mmu, const char *name,
uint64_t start, uint64_t end)
{
struct msm_iommu_aspace *local;
if (!mmu)
return ERR_PTR(-EINVAL);
local = kzalloc(sizeof(*local), GFP_KERNEL);
if (!local)
return ERR_PTR(-ENOMEM);
drm_mm_init(&local->mm, (start >> PAGE_SHIFT),
(end >> PAGE_SHIFT) - 1);
local->base.name = name;
local->base.mmu = mmu;
local->base.ops = &msm_iommu_aspace_ops;
kref_init(&local->base.kref);
return &local->base;
}
int msm_gem_map_vma(struct msm_gem_address_space *aspace, int msm_gem_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt, struct msm_gem_vma *vma, struct sg_table *sgt,
void *priv, unsigned int flags) void *priv, unsigned int flags)
{ {
if (aspace && aspace->ops->map) u64 iova = 0;
return aspace->ops->map(aspace, vma, sgt, priv, flags); int ret;
if (!aspace)
return -EINVAL; return -EINVAL;
ret = allocate_iova(aspace, vma, sgt, &iova);
if (ret)
return ret;
ret = aspace->mmu->funcs->map(aspace->mmu, iova, sgt,
flags, priv);
if (ret) {
if (drm_mm_node_allocated(&vma->node))
drm_mm_remove_node(&vma->node);
return ret;
}
vma->iova = sg_dma_address(sgt->sgl);
kref_get(&aspace->kref);
return 0;
} }
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt, void *priv) struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
{ {
if (aspace && aspace->ops->unmap) if (!aspace || !vma->iova)
aspace->ops->unmap(aspace, vma, sgt, priv); return;
aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, priv);
if (drm_mm_node_allocated(&vma->node))
drm_mm_remove_node(&vma->node);
vma->iova = 0;
msm_gem_address_space_put(aspace);
}
struct msm_gem_address_space *
msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
const char *name)
{
return msm_gem_address_space_new(mmu, name, 0, 0);
} }
struct msm_gem_address_space * struct msm_gem_address_space *