From ffd2f3eb423812150b36e4c8cbd711f6d220c321 Mon Sep 17 00:00:00 2001 From: Jordan Crouse Date: Mon, 13 Feb 2017 10:14:11 -0700 Subject: [PATCH] drm/msm: Support different SMMU backends for address spaces SDE and the GPU have different requirements for the SMMU backends - the SDE generates its own iova addresses and needs special support for DMA buffers and the GPU does its own IOMMU operations. Add a shim layer to aspace to break out the address generation and call the appropriate SMMU functions. There is probably consolidation that can be done, but for now this is the best way to deal with the two use cases. Change-Id: Ic0dedbadc6dc03504ef7dffded18ba09fb3ef291 Signed-off-by: Jordan Crouse --- drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c | 21 ++- drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | 39 +++-- drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h | 2 +- drivers/gpu/drm/msm/msm_drv.h | 23 +-- drivers/gpu/drm/msm/msm_gem.c | 25 ++-- drivers/gpu/drm/msm/msm_gem.h | 16 ++- drivers/gpu/drm/msm/msm_gem_vma.c | 181 ++++++++++++++++++++---- drivers/gpu/drm/msm/msm_mmu.h | 2 - drivers/gpu/drm/msm/sde/sde_kms.c | 36 +++-- drivers/gpu/drm/msm/sde/sde_kms.h | 3 +- 10 files changed, 264 insertions(+), 84 deletions(-) diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index 12cabb31dc20..6d8e174236ea 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c @@ -192,11 +192,21 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file) static void mdp4_destroy(struct msm_kms *kms) { + struct device *dev = mdp4_kms->dev->dev; + struct msm_gem_address_space *aspace = mdp4_kms->aspace; + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); if (mdp4_kms->blank_cursor_iova) msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id); if (mdp4_kms->blank_cursor_bo) drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo); + + if (aspace) { + aspace->mmu->funcs->detach(aspace->mmu, + iommu_ports, ARRAY_SIZE(iommu_ports)); + msm_gem_address_space_destroy(aspace); + } + kfree(mdp4_kms); } @@ -416,7 +426,6 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) struct mdp4_platform_config *config = mdp4_get_config(pdev); struct mdp4_kms *mdp4_kms; struct msm_kms *kms = NULL; - struct msm_mmu *mmu; struct msm_gem_address_space *aspace; int ret; @@ -506,8 +515,15 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) mdelay(16); if (config->iommu) { + struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, config->iommu); + + if (IS_ERR(mmu)) { + ret = PTR_ERR(mmu); + goto fail; + } + aspace = msm_gem_address_space_create(&pdev->dev, - config->iommu, "mdp4"); + mmu, "mdp4", 0x1000, 0xffffffff); if (IS_ERR(aspace)) { ret = PTR_ERR(aspace); goto fail; @@ -575,6 +591,7 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev) /* TODO */ config.max_clk = 266667000; config.iommu = iommu_domain_alloc(msm_iommu_get_bus(&dev->dev)); + #else if (cpu_is_apq8064()) config.max_clk = 266667000; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index f7aebf5516ce..cc5a73505537 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark * @@ -18,6 +18,7 @@ #include "msm_drv.h" +#include "msm_gem.h" #include "msm_mmu.h" #include "mdp5_kms.h" @@ -130,13 +131,14 @@ static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file) static void mdp5_destroy(struct msm_kms *kms) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - struct msm_mmu *mmu = mdp5_kms->mmu; + struct msm_gem_address_space *aspace = mdp5_kms->aspace; mdp5_irq_domain_fini(mdp5_kms); - if (mmu) { - mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); - mmu->funcs->destroy(mmu); + if (aspace) { + aspace->mmu->funcs->detach(aspace->mmu, + iommu_ports, ARRAY_SIZE(iommu_ports)); + msm_gem_address_space_destroy(aspace); } if (mdp5_kms->ctlm) @@ -474,7 +476,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) struct mdp5_cfg *config; struct mdp5_kms *mdp5_kms; struct msm_kms *kms = NULL; - struct msm_mmu *mmu; + struct msm_gem_address_space *aspace; uint32_t major, minor; int i, ret; @@ -595,30 +597,37 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) mdelay(16); if (config->platform.iommu) { - mmu = msm_smmu_new(&pdev->dev, + struct msm_mmu *mmu = msm_smmu_new(&pdev->dev, MSM_SMMU_DOMAIN_UNSECURE); if (IS_ERR(mmu)) { ret = PTR_ERR(mmu); dev_err(dev->dev, "failed to init iommu: %d\n", ret); iommu_domain_free(config->platform.iommu); + } + + aspace = msm_gem_smmu_address_space_create(&pdev->dev, + mmu, "mdp5"); + if (IS_ERR(aspace)) { + ret = PTR_ERR(aspace); goto fail; } - ret = mmu->funcs->attach(mmu, iommu_ports, + mdp5_kms->aspace = aspace; + + ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); if (ret) { - dev_err(dev->dev, "failed to attach iommu: %d\n", ret); - mmu->funcs->destroy(mmu); + dev_err(&pdev->dev, "failed to attach iommu: %d\n", + ret); goto fail; } } else { - dev_info(dev->dev, "no iommu, fallback to phys " - "contig buffers for scanout\n"); - mmu = NULL; + dev_info(&pdev->dev, + "no iommu, fallback to phys contig buffers for scanout\n"); + aspace = NULL; } - mdp5_kms->mmu = mmu; - mdp5_kms->id = msm_register_mmu(dev, mmu); + mdp5_kms->id = msm_register_address_space(dev, aspace); if (mdp5_kms->id < 0) { ret = mdp5_kms->id; dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index 84f65d415598..6cb43ad0c1d7 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -37,7 +37,7 @@ struct mdp5_kms { /* mapper-id used to request GEM buffer mapped for scanout: */ int id; - struct msm_mmu *mmu; + struct msm_gem_address_space *aspace; struct mdp5_smp *smp; struct mdp5_ctl_manager *ctlm; diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 3af8b9e3403d..4ed5953d07f9 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -368,31 +368,32 @@ void __msm_fence_worker(struct work_struct *work); int msm_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool async); -static inline int msm_register_mmu(struct drm_device *dev, - struct msm_mmu *mmu) { - return -ENODEV; -} -static inline void msm_unregister_mmu(struct drm_device *dev, - struct msm_mmu *mmu) { -} int msm_wait_fence(struct drm_device *dev, uint32_t fence, ktime_t *timeout, bool interruptible); int msm_queue_fence_cb(struct drm_device *dev, struct msm_fence_cb *cb, uint32_t fence); void msm_update_fence(struct drm_device *dev, uint32_t fence); + int msm_register_address_space(struct drm_device *dev, struct msm_gem_address_space *aspace); - void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma, struct sg_table *sgt); + struct msm_gem_vma *vma, struct sg_table *sgt, + void *priv); int msm_gem_map_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma, struct sg_table *sgt, int npages); - + struct msm_gem_vma *vma, struct sg_table *sgt, + void *priv); void msm_gem_address_space_destroy(struct msm_gem_address_space *aspace); + +/* For GPU and legacy display */ struct msm_gem_address_space * msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, const char *name); +/* For SDE display */ +struct msm_gem_address_space * +msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu, + const char *name); + int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 43cef983dabc..5aa08cf4d6d8 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -24,6 +24,11 @@ #include "msm_gpu.h" #include "msm_mmu.h" +static void *get_dmabuf_ptr(struct drm_gem_object *obj) +{ + return (obj && obj->import_attach) ? obj->import_attach->dmabuf : NULL; +} + static dma_addr_t physaddr(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); @@ -279,8 +284,8 @@ put_iova(struct drm_gem_object *obj) WARN_ON(!mutex_is_locked(&dev->struct_mutex)); for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { - msm_gem_unmap_vma(priv->aspace[id], - &msm_obj->domain[id], msm_obj->sgt); + msm_gem_unmap_vma(priv->aspace[id], &msm_obj->domain[id], + msm_obj->sgt, get_dmabuf_ptr(obj)); } } @@ -305,12 +310,11 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, return PTR_ERR(pages); if (iommu_present(&platform_bus_type)) { - ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id], - msm_obj->sgt, obj->size >> PAGE_SHIFT); - } else { - WARN_ONCE(1, "physical address being used\n"); + ret = msm_gem_map_vma(priv->aspace[id], + &msm_obj->domain[id], msm_obj->sgt, + get_dmabuf_ptr(obj)); + } else msm_obj->domain[id].iova = physaddr(obj); - } } if (!ret) @@ -485,7 +489,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_drm_private *priv = obj->dev->dev_private; uint64_t off = drm_vma_node_start(&obj->vma_node); - unsigned id; + int id; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p\t", @@ -496,6 +500,8 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) for (id = 0; id < priv->num_aspaces; id++) seq_printf(m, " %08llx", msm_obj->domain[id].iova); + + seq_puts(m, "\n"); } void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) @@ -532,7 +538,8 @@ void msm_gem_free_object(struct drm_gem_object *obj) if (obj->import_attach) { if (msm_obj->vaddr) - dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); + dma_buf_vunmap(obj->import_attach->dmabuf, + msm_obj->vaddr); /* Don't drop the pages for imported dmabuf, as they are not * ours, just free the array we allocated: diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 1b025d67dfe3..86c297c6de32 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -24,16 +24,24 @@ /* Additional internal-use only BO flags: */ #define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */ +struct msm_gem_aspace_ops { + int (*map)(struct msm_gem_address_space *, struct msm_gem_vma *, + struct sg_table *sgt, void *priv); + + void (*unmap)(struct msm_gem_address_space *, struct msm_gem_vma *, + struct sg_table *sgt, void *priv); + + void (*destroy)(struct msm_gem_address_space *); +}; + struct msm_gem_address_space { const char *name; - /* NOTE: mm managed at the page level, size is in # of pages - * and position mm_node->start is in # of pages: - */ - struct drm_mm mm; struct msm_mmu *mmu; + const struct msm_gem_aspace_ops *ops; }; struct msm_gem_vma { + /* Node used by the GPU address space, but not the SDE address space */ struct drm_mm_node node; uint64_t iova; }; diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 11febc9224d5..53e70263e03c 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -19,9 +19,81 @@ #include "msm_gem.h" #include "msm_mmu.h" -void -msm_gem_unmap_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma, struct sg_table *sgt) +/* SDE address space operations */ +static void smmu_aspace_unmap_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt, + void *priv) +{ + struct dma_buf *buf = priv; + + if (buf) + aspace->mmu->funcs->unmap_dma_buf(aspace->mmu, + sgt, buf, DMA_BIDIRECTIONAL); + else + aspace->mmu->funcs->unmap_sg(aspace->mmu, sgt, + DMA_BIDIRECTIONAL); + + vma->iova = 0; +} + + +static int smmu_aspace_map_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt, + void *priv) +{ + struct dma_buf *buf = priv; + int ret; + + if (buf) + ret = aspace->mmu->funcs->map_dma_buf(aspace->mmu, sgt, buf, + DMA_BIDIRECTIONAL); + else + ret = aspace->mmu->funcs->map_sg(aspace->mmu, sgt, + DMA_BIDIRECTIONAL); + + if (!ret) + vma->iova = sg_dma_address(sgt->sgl); + + return ret; +} + +static const struct msm_gem_aspace_ops smmu_aspace_ops = { + .map = smmu_aspace_map_vma, + .unmap = smmu_aspace_unmap_vma, +}; + +struct msm_gem_address_space * +msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu, + const char *name) +{ + struct msm_gem_address_space *aspace; + + if (!mmu) + return ERR_PTR(-EINVAL); + + aspace = kzalloc(sizeof(*aspace), GFP_KERNEL); + if (!aspace) + return ERR_PTR(-ENOMEM); + + aspace->name = name; + aspace->mmu = mmu; + aspace->ops = &smmu_aspace_ops; + + return aspace; +} + +/* GPU address space operations */ +struct msm_iommu_aspace { + struct msm_gem_address_space base; + struct drm_mm mm; +}; + +#define to_iommu_aspace(aspace) \ + ((struct msm_iommu_aspace *) \ + container_of(aspace, struct msm_iommu_aspace, base)) + +static void iommu_aspace_unmap_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt, void *priv) { if (!vma->iova) return; @@ -34,16 +106,22 @@ msm_gem_unmap_vma(struct msm_gem_address_space *aspace, vma->iova = 0; } -int -msm_gem_map_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma, struct sg_table *sgt, int npages) +static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt, + void *priv) { - int ret; + struct msm_iommu_aspace *local = to_iommu_aspace(aspace); + size_t size = 0; + struct scatterlist *sg; + int ret = 0, i; if (WARN_ON(drm_mm_node_allocated(&vma->node))) return 0; - ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages, + for_each_sg(sgt->sgl, sg, sgt->nents, i) + size += sg->length + sg->offset; + + ret = drm_mm_insert_node(&local->mm, &vma->node, size >> PAGE_SHIFT, 0, DRM_MM_SEARCH_DEFAULT); if (ret) return ret; @@ -51,36 +129,85 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace, vma->iova = vma->node.start << PAGE_SHIFT; if (aspace->mmu) - ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, - IOMMU_READ | IOMMU_WRITE); + ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, + sgt, IOMMU_READ | IOMMU_WRITE); return ret; } -void -msm_gem_address_space_destroy(struct msm_gem_address_space *aspace) +static void iommu_aspace_destroy(struct msm_gem_address_space *aspace) { - drm_mm_takedown(&aspace->mm); - if (aspace->mmu) - aspace->mmu->funcs->destroy(aspace->mmu); - kfree(aspace); + struct msm_iommu_aspace *local = to_iommu_aspace(aspace); + + drm_mm_takedown(&local->mm); + aspace->mmu->funcs->destroy(aspace->mmu); +} + +static const struct msm_gem_aspace_ops msm_iommu_aspace_ops = { + .map = iommu_aspace_map_vma, + .unmap = iommu_aspace_unmap_vma, + .destroy = iommu_aspace_destroy, +}; + +static struct msm_gem_address_space * +msm_gem_address_space_new(struct msm_mmu *mmu, const char *name, + uint64_t start, uint64_t end) +{ + struct msm_iommu_aspace *local; + + if (!mmu) + return ERR_PTR(-EINVAL); + + local = kzalloc(sizeof(*local), GFP_KERNEL); + if (!local) + return ERR_PTR(-ENOMEM); + + drm_mm_init(&local->mm, (start >> PAGE_SHIFT), + (end >> PAGE_SHIFT) - 1); + + local->base.name = name; + local->base.mmu = mmu; + local->base.ops = &msm_iommu_aspace_ops; + + return &local->base; +} + +int msm_gem_map_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt, + void *priv) +{ + if (aspace && aspace->ops->map) + return aspace->ops->map(aspace, vma, sgt, priv); + + return -EINVAL; +} + +void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt, void *priv) +{ + if (aspace && aspace->ops->unmap) + aspace->ops->unmap(aspace, vma, sgt, priv); } struct msm_gem_address_space * msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, const char *name) { - struct msm_gem_address_space *aspace; + struct msm_mmu *mmu = msm_iommu_new(dev, domain); - aspace = kzalloc(sizeof(*aspace), GFP_KERNEL); - if (!aspace) - return ERR_PTR(-ENOMEM); + if (IS_ERR(mmu)) + return (struct msm_gem_address_space *) mmu; - aspace->name = name; - aspace->mmu = msm_iommu_new(dev, domain); - - drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT), - (domain->geometry.aperture_end >> PAGE_SHIFT) - 1); - - return aspace; + return msm_gem_address_space_new(mmu, name, + domain->geometry.aperture_start, + domain->geometry.aperture_end); +} + +void +msm_gem_address_space_destroy(struct msm_gem_address_space *aspace) +{ + if (aspace && aspace->ops->destroy) + aspace->ops->destroy(aspace); + + kfree(aspace); } diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index c8c7755c8370..3d750eca5cae 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h @@ -21,7 +21,6 @@ #include struct msm_mmu; -struct msm_gpu; enum msm_mmu_domain_type { MSM_SMMU_DOMAIN_UNSECURE, @@ -61,7 +60,6 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, } struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain); -struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu); struct msm_mmu *msm_smmu_new(struct device *dev, enum msm_mmu_domain_type domain); diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index afe90d16e31d..e40e33f11fd9 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -941,15 +941,15 @@ static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms) int i; for (i = ARRAY_SIZE(sde_kms->mmu_id) - 1; i >= 0; i--) { - if (!sde_kms->mmu[i]) + mmu = sde_kms->aspace[i]->mmu; + + if (!mmu) continue; - mmu = sde_kms->mmu[i]; - msm_unregister_mmu(sde_kms->dev, mmu); mmu->funcs->detach(mmu, (const char **)iommu_ports, ARRAY_SIZE(iommu_ports)); - mmu->funcs->destroy(mmu); - sde_kms->mmu[i] = 0; + msm_gem_address_space_destroy(sde_kms->aspace[i]); + sde_kms->mmu_id[i] = 0; } @@ -962,6 +962,8 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms) int i, ret; for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) { + struct msm_gem_address_space *aspace; + mmu = msm_smmu_new(sde_kms->dev->dev, i); if (IS_ERR(mmu)) { /* MMU's can be optional depending on platform */ @@ -971,25 +973,35 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms) continue; } - ret = mmu->funcs->attach(mmu, (const char **)iommu_ports, - ARRAY_SIZE(iommu_ports)); - if (ret) { - SDE_ERROR("failed to attach iommu %d: %d\n", i, ret); + aspace = msm_gem_smmu_address_space_create(sde_kms->dev->dev, + mmu, "sde"); + if (IS_ERR(aspace)) { + ret = PTR_ERR(aspace); mmu->funcs->destroy(mmu); goto fail; } - sde_kms->mmu_id[i] = msm_register_mmu(sde_kms->dev, mmu); + sde_kms->aspace[i] = aspace; + + ret = mmu->funcs->attach(mmu, (const char **)iommu_ports, + ARRAY_SIZE(iommu_ports)); + if (ret) { + SDE_ERROR("failed to attach iommu %d: %d\n", i, ret); + msm_gem_address_space_destroy(aspace); + goto fail; + } + + sde_kms->mmu_id[i] = msm_register_address_space(sde_kms->dev, + aspace); if (sde_kms->mmu_id[i] < 0) { ret = sde_kms->mmu_id[i]; SDE_ERROR("failed to register sde iommu %d: %d\n", i, ret); mmu->funcs->detach(mmu, (const char **)iommu_ports, ARRAY_SIZE(iommu_ports)); + msm_gem_address_space_destroy(aspace); goto fail; } - - sde_kms->mmu[i] = mmu; } return 0; diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h index bf127ffe9eb6..8663c632699b 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.h +++ b/drivers/gpu/drm/msm/sde/sde_kms.h @@ -22,6 +22,7 @@ #include "msm_drv.h" #include "msm_kms.h" #include "msm_mmu.h" +#include "msm_gem.h" #include "sde_dbg.h" #include "sde_hw_catalog.h" #include "sde_hw_ctl.h" @@ -121,7 +122,7 @@ struct sde_kms { int core_rev; struct sde_mdss_cfg *catalog; - struct msm_mmu *mmu[MSM_SMMU_DOMAIN_MAX]; + struct msm_gem_address_space *aspace[MSM_SMMU_DOMAIN_MAX]; int mmu_id[MSM_SMMU_DOMAIN_MAX]; struct sde_power_client *core_client;