Merge "drm/msm: Detach the MMU during msm_gpu_cleanup()"

This commit is contained in:
Linux Build Service Account 2017-02-20 19:44:28 -08:00 committed by Gerrit - the friendly Code Review server
commit 09191dbe4d
18 changed files with 405 additions and 133 deletions

View file

@ -121,6 +121,7 @@ msm_drm-$(CONFIG_DRM_MSM) += \
msm_gem.o \
msm_gem_prime.o \
msm_gem_submit.o \
msm_gem_vma.o \
msm_gpu.o \
msm_iommu.o \
msm_smmu.o \

View file

@ -583,7 +583,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
#endif
}
if (!gpu->mmu) {
if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
* registers are unknown. For now just bail out and

View file

@ -665,7 +665,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
#endif
}
if (!gpu->mmu) {
if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
* registers are unknown. For now just bail out and

View file

@ -390,7 +390,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
mmu = gpu->mmu;
mmu = gpu->aspace->mmu;
if (mmu) {
ret = mmu->funcs->attach(mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
@ -427,6 +427,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
void adreno_gpu_cleanup(struct adreno_gpu *gpu)
{
struct msm_gem_address_space *aspace = gpu->base.aspace;
if (gpu->memptrs_bo) {
if (gpu->memptrs_iova)
msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
@ -434,5 +436,12 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu)
}
release_firmware(gpu->pm4);
release_firmware(gpu->pfp);
msm_gpu_cleanup(&gpu->base);
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu,
iommu_ports, ARRAY_SIZE(iommu_ports));
msm_gem_address_space_destroy(aspace);
}
}

View file

@ -17,6 +17,7 @@
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_mmu.h"
#include "mdp4_kms.h"
@ -177,18 +178,35 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
unsigned i;
struct msm_gem_address_space *aspace = mdp4_kms->aspace;
for (i = 0; i < priv->num_crtcs; i++)
mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu,
iommu_ports, ARRAY_SIZE(iommu_ports));
msm_gem_address_space_destroy(aspace);
}
}
static void mdp4_destroy(struct msm_kms *kms)
{
struct device *dev = mdp4_kms->dev->dev;
struct msm_gem_address_space *aspace = mdp4_kms->aspace;
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
if (mdp4_kms->blank_cursor_iova)
msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
if (mdp4_kms->blank_cursor_bo)
drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu,
iommu_ports, ARRAY_SIZE(iommu_ports));
msm_gem_address_space_destroy(aspace);
}
kfree(mdp4_kms);
}
@ -408,7 +426,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
struct mdp4_platform_config *config = mdp4_get_config(pdev);
struct mdp4_kms *mdp4_kms;
struct msm_kms *kms = NULL;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
int ret;
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
@ -497,22 +515,33 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdelay(16);
if (config->iommu) {
mmu = msm_iommu_new(&pdev->dev, config->iommu);
struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, config->iommu);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
goto fail;
}
ret = mmu->funcs->attach(mmu, iommu_ports,
aspace = msm_gem_address_space_create(&pdev->dev,
mmu, "mdp4", 0x1000, 0xffffffff);
if (IS_ERR(aspace)) {
ret = PTR_ERR(aspace);
goto fail;
}
mdp4_kms->aspace = aspace;
ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret)
goto fail;
} else {
dev_info(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
mmu = NULL;
aspace = NULL;
}
mdp4_kms->id = msm_register_mmu(dev, mmu);
mdp4_kms->id = msm_register_address_space(dev, aspace);
if (mdp4_kms->id < 0) {
ret = mdp4_kms->id;
dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
@ -562,6 +591,7 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
/* TODO */
config.max_clk = 266667000;
config.iommu = iommu_domain_alloc(msm_iommu_get_bus(&dev->dev));
#else
if (cpu_is_apq8064())
config.max_clk = 266667000;

View file

@ -45,6 +45,7 @@ struct mdp4_kms {
struct clk *pclk;
struct clk *lut_clk;
struct clk *axi_clk;
struct msm_gem_address_space *aspace;
struct mdp_irq error_handler;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
* Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@ -18,6 +18,7 @@
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_mmu.h"
#include "mdp5_kms.h"
@ -130,13 +131,14 @@ static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
static void mdp5_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct msm_mmu *mmu = mdp5_kms->mmu;
struct msm_gem_address_space *aspace = mdp5_kms->aspace;
mdp5_irq_domain_fini(mdp5_kms);
if (mmu) {
mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
mmu->funcs->destroy(mmu);
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu,
iommu_ports, ARRAY_SIZE(iommu_ports));
msm_gem_address_space_destroy(aspace);
}
if (mdp5_kms->ctlm)
@ -474,7 +476,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
struct mdp5_cfg *config;
struct mdp5_kms *mdp5_kms;
struct msm_kms *kms = NULL;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
uint32_t major, minor;
int i, ret;
@ -595,30 +597,37 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdelay(16);
if (config->platform.iommu) {
mmu = msm_smmu_new(&pdev->dev,
struct msm_mmu *mmu = msm_smmu_new(&pdev->dev,
MSM_SMMU_DOMAIN_UNSECURE);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
dev_err(dev->dev, "failed to init iommu: %d\n", ret);
iommu_domain_free(config->platform.iommu);
}
aspace = msm_gem_smmu_address_space_create(&pdev->dev,
mmu, "mdp5");
if (IS_ERR(aspace)) {
ret = PTR_ERR(aspace);
goto fail;
}
ret = mmu->funcs->attach(mmu, iommu_ports,
mdp5_kms->aspace = aspace;
ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret) {
dev_err(dev->dev, "failed to attach iommu: %d\n", ret);
mmu->funcs->destroy(mmu);
dev_err(&pdev->dev, "failed to attach iommu: %d\n",
ret);
goto fail;
}
} else {
dev_info(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
mmu = NULL;
dev_info(&pdev->dev,
"no iommu, fallback to phys contig buffers for scanout\n");
aspace = NULL;
}
mdp5_kms->mmu = mmu;
mdp5_kms->id = msm_register_mmu(dev, mmu);
mdp5_kms->id = msm_register_address_space(dev, aspace);
if (mdp5_kms->id < 0) {
ret = mdp5_kms->id;
dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret);

View file

@ -37,7 +37,7 @@ struct mdp5_kms {
/* mapper-id used to request GEM buffer mapped for scanout: */
int id;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
struct mdp5_smp *smp;
struct mdp5_ctl_manager *ctlm;

View file

@ -38,42 +38,20 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
.atomic_commit = msm_atomic_commit,
};
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
int msm_register_address_space(struct drm_device *dev,
struct msm_gem_address_space *aspace)
{
struct msm_drm_private *priv = dev->dev_private;
int idx = priv->num_mmus++;
int idx = priv->num_aspaces++;
if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus)))
if (WARN_ON(idx >= ARRAY_SIZE(priv->aspace)))
return -EINVAL;
priv->mmus[idx] = mmu;
priv->aspace[idx] = aspace;
return idx;
}
void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu)
{
struct msm_drm_private *priv = dev->dev_private;
int idx;
if (priv->num_mmus <= 0) {
dev_err(dev->dev, "invalid num mmus %d\n", priv->num_mmus);
return;
}
idx = priv->num_mmus - 1;
/* only support reverse-order deallocation */
if (priv->mmus[idx] != mmu) {
dev_err(dev->dev, "unexpected mmu at idx %d\n", idx);
return;
}
--priv->num_mmus;
priv->mmus[idx] = 0;
}
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
static bool reglog = false;
MODULE_PARM_DESC(reglog, "Enable register read/write logging");

View file

@ -63,6 +63,8 @@ struct msm_mmu;
struct msm_rd_state;
struct msm_perf_state;
struct msm_gem_submit;
struct msm_gem_address_space;
struct msm_gem_vma;
#define NUM_DOMAINS 4 /* one for KMS, then one per gpu core (?) */
#define MAX_CRTCS 8
@ -296,9 +298,13 @@ struct msm_drm_private {
uint32_t pending_crtcs;
wait_queue_head_t pending_crtcs_event;
/* registered MMUs: */
unsigned int num_mmus;
struct msm_mmu *mmus[NUM_DOMAINS];
/* Registered address spaces.. currently this is fixed per # of
* iommu's. Ie. one for display block and one for gpu block.
* Eventually, to do per-process gpu pagetables, we'll want one
* of these per-process.
*/
unsigned int num_aspaces;
struct msm_gem_address_space *aspace[NUM_DOMAINS];
unsigned int num_planes;
struct drm_plane *planes[MAX_PLANES];
@ -362,15 +368,32 @@ void __msm_fence_worker(struct work_struct *work);
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool async);
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
int msm_wait_fence(struct drm_device *dev, uint32_t fence,
ktime_t *timeout, bool interruptible);
int msm_queue_fence_cb(struct drm_device *dev,
struct msm_fence_cb *cb, uint32_t fence);
void msm_update_fence(struct drm_device *dev, uint32_t fence);
int msm_register_address_space(struct drm_device *dev,
struct msm_gem_address_space *aspace);
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
void *priv);
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
void *priv);
void msm_gem_address_space_destroy(struct msm_gem_address_space *aspace);
/* For GPU and legacy display */
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name);
/* For SDE display */
struct msm_gem_address_space *
msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
const char *name);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);

View file

@ -24,6 +24,11 @@
#include "msm_gpu.h"
#include "msm_mmu.h"
static void *get_dmabuf_ptr(struct drm_gem_object *obj)
{
return (obj && obj->import_attach) ? obj->import_attach->dmabuf : NULL;
}
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@ -279,21 +284,8 @@ put_iova(struct drm_gem_object *obj)
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
struct msm_mmu *mmu = priv->mmus[id];
if (!mmu || !msm_obj->domain[id].iova)
continue;
if (obj->import_attach) {
if (mmu->funcs->unmap_dma_buf)
mmu->funcs->unmap_dma_buf(mmu, msm_obj->sgt,
obj->import_attach->dmabuf,
DMA_BIDIRECTIONAL);
} else
mmu->funcs->unmap_sg(mmu, msm_obj->sgt,
DMA_BIDIRECTIONAL);
msm_obj->domain[id].iova = 0;
msm_gem_unmap_vma(priv->aspace[id], &msm_obj->domain[id],
msm_obj->sgt, get_dmabuf_ptr(obj));
}
}
@ -318,31 +310,11 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
return PTR_ERR(pages);
if (iommu_present(&platform_bus_type)) {
struct msm_mmu *mmu = priv->mmus[id];
if (WARN_ON(!mmu))
return -EINVAL;
if (obj->import_attach && mmu->funcs->map_dma_buf) {
ret = mmu->funcs->map_dma_buf(mmu, msm_obj->sgt,
obj->import_attach->dmabuf,
DMA_BIDIRECTIONAL);
if (ret) {
DRM_ERROR("Unable to map dma buf\n");
return ret;
}
} else {
ret = mmu->funcs->map_sg(mmu, msm_obj->sgt,
DMA_BIDIRECTIONAL);
}
if (!ret)
msm_obj->domain[id].iova =
sg_dma_address(msm_obj->sgt->sgl);
} else {
WARN_ONCE(1, "physical address being used\n");
ret = msm_gem_map_vma(priv->aspace[id],
&msm_obj->domain[id], msm_obj->sgt,
get_dmabuf_ptr(obj));
} else
msm_obj->domain[id].iova = physaddr(obj);
}
}
if (!ret)
@ -515,14 +487,21 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private;
uint64_t off = drm_vma_node_start(&obj->vma_node);
int id;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %zu\n",
seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p\t",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
msm_obj->read_fence, msm_obj->write_fence,
obj->name, obj->refcount.refcount.counter,
off, msm_obj->vaddr, obj->size);
off, msm_obj->vaddr);
for (id = 0; id < priv->num_aspaces; id++)
seq_printf(m, " %08llx", msm_obj->domain[id].iova);
seq_puts(m, "\n");
}
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
@ -559,7 +538,8 @@ void msm_gem_free_object(struct drm_gem_object *obj)
if (obj->import_attach) {
if (msm_obj->vaddr)
dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
dma_buf_vunmap(obj->import_attach->dmabuf,
msm_obj->vaddr);
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
@ -613,7 +593,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
unsigned sz;
bool use_vram = false;
switch (flags & MSM_BO_CACHE_MASK) {
@ -635,16 +614,12 @@ static int msm_gem_new_impl(struct drm_device *dev,
if (WARN_ON(use_vram && !priv->vram.size))
return -EINVAL;
sz = sizeof(*msm_obj);
if (use_vram)
sz += sizeof(struct drm_mm_node);
msm_obj = kzalloc(sz, GFP_KERNEL);
msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
if (!msm_obj)
return -ENOMEM;
if (use_vram)
msm_obj->vram_node = (void *)&msm_obj[1];
msm_obj->vram_node = &msm_obj->domain[0].node;
msm_obj->flags = flags;

View file

@ -24,6 +24,28 @@
/* Additional internal-use only BO flags: */
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
struct msm_gem_aspace_ops {
int (*map)(struct msm_gem_address_space *, struct msm_gem_vma *,
struct sg_table *sgt, void *priv);
void (*unmap)(struct msm_gem_address_space *, struct msm_gem_vma *,
struct sg_table *sgt, void *priv);
void (*destroy)(struct msm_gem_address_space *);
};
struct msm_gem_address_space {
const char *name;
struct msm_mmu *mmu;
const struct msm_gem_aspace_ops *ops;
};
struct msm_gem_vma {
/* Node used by the GPU address space, but not the SDE address space */
struct drm_mm_node node;
uint64_t iova;
};
struct msm_gem_object {
struct drm_gem_object base;
@ -52,9 +74,7 @@ struct msm_gem_object {
struct sg_table *sgt;
void *vaddr;
struct {
dma_addr_t iova;
} domain[NUM_DOMAINS];
struct msm_gem_vma domain[NUM_DOMAINS];
/* normally (resv == &_resv) except for imported bo's */
struct reservation_object *resv;

View file

@ -0,0 +1,213 @@
/*
* Copyright (C) 2016 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_mmu.h"
/* SDE address space operations */
static void smmu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
void *priv)
{
struct dma_buf *buf = priv;
if (buf)
aspace->mmu->funcs->unmap_dma_buf(aspace->mmu,
sgt, buf, DMA_BIDIRECTIONAL);
else
aspace->mmu->funcs->unmap_sg(aspace->mmu, sgt,
DMA_BIDIRECTIONAL);
vma->iova = 0;
}
static int smmu_aspace_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
void *priv)
{
struct dma_buf *buf = priv;
int ret;
if (buf)
ret = aspace->mmu->funcs->map_dma_buf(aspace->mmu, sgt, buf,
DMA_BIDIRECTIONAL);
else
ret = aspace->mmu->funcs->map_sg(aspace->mmu, sgt,
DMA_BIDIRECTIONAL);
if (!ret)
vma->iova = sg_dma_address(sgt->sgl);
return ret;
}
static const struct msm_gem_aspace_ops smmu_aspace_ops = {
.map = smmu_aspace_map_vma,
.unmap = smmu_aspace_unmap_vma,
};
struct msm_gem_address_space *
msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
const char *name)
{
struct msm_gem_address_space *aspace;
if (!mmu)
return ERR_PTR(-EINVAL);
aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
if (!aspace)
return ERR_PTR(-ENOMEM);
aspace->name = name;
aspace->mmu = mmu;
aspace->ops = &smmu_aspace_ops;
return aspace;
}
/* GPU address space operations */
struct msm_iommu_aspace {
struct msm_gem_address_space base;
struct drm_mm mm;
};
#define to_iommu_aspace(aspace) \
((struct msm_iommu_aspace *) \
container_of(aspace, struct msm_iommu_aspace, base))
static void iommu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
{
if (!vma->iova)
return;
if (aspace->mmu)
aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt);
drm_mm_remove_node(&vma->node);
vma->iova = 0;
}
static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
void *priv)
{
struct msm_iommu_aspace *local = to_iommu_aspace(aspace);
size_t size = 0;
struct scatterlist *sg;
int ret = 0, i;
if (WARN_ON(drm_mm_node_allocated(&vma->node)))
return 0;
for_each_sg(sgt->sgl, sg, sgt->nents, i)
size += sg->length + sg->offset;
ret = drm_mm_insert_node(&local->mm, &vma->node, size >> PAGE_SHIFT,
0, DRM_MM_SEARCH_DEFAULT);
if (ret)
return ret;
vma->iova = vma->node.start << PAGE_SHIFT;
if (aspace->mmu)
ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova,
sgt, IOMMU_READ | IOMMU_WRITE);
return ret;
}
static void iommu_aspace_destroy(struct msm_gem_address_space *aspace)
{
struct msm_iommu_aspace *local = to_iommu_aspace(aspace);
drm_mm_takedown(&local->mm);
aspace->mmu->funcs->destroy(aspace->mmu);
}
static const struct msm_gem_aspace_ops msm_iommu_aspace_ops = {
.map = iommu_aspace_map_vma,
.unmap = iommu_aspace_unmap_vma,
.destroy = iommu_aspace_destroy,
};
static struct msm_gem_address_space *
msm_gem_address_space_new(struct msm_mmu *mmu, const char *name,
uint64_t start, uint64_t end)
{
struct msm_iommu_aspace *local;
if (!mmu)
return ERR_PTR(-EINVAL);
local = kzalloc(sizeof(*local), GFP_KERNEL);
if (!local)
return ERR_PTR(-ENOMEM);
drm_mm_init(&local->mm, (start >> PAGE_SHIFT),
(end >> PAGE_SHIFT) - 1);
local->base.name = name;
local->base.mmu = mmu;
local->base.ops = &msm_iommu_aspace_ops;
return &local->base;
}
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
void *priv)
{
if (aspace && aspace->ops->map)
return aspace->ops->map(aspace, vma, sgt, priv);
return -EINVAL;
}
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
{
if (aspace && aspace->ops->unmap)
aspace->ops->unmap(aspace, vma, sgt, priv);
}
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name)
{
struct msm_mmu *mmu = msm_iommu_new(dev, domain);
if (IS_ERR(mmu))
return (struct msm_gem_address_space *) mmu;
return msm_gem_address_space_new(mmu, name,
domain->geometry.aperture_start,
domain->geometry.aperture_end);
}
void
msm_gem_address_space_destroy(struct msm_gem_address_space *aspace)
{
if (aspace && aspace->ops->destroy)
aspace->ops->destroy(aspace);
kfree(aspace);
}

View file

@ -649,12 +649,17 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
*/
iommu = iommu_domain_alloc(&platform_bus_type);
if (iommu) {
/* TODO 32b vs 64b address space.. */
iommu->geometry.aperture_start = 0x1000;
iommu->geometry.aperture_end = 0xffffffff;
dev_info(drm->dev, "%s: using IOMMU\n", name);
gpu->mmu = msm_iommu_new(&pdev->dev, iommu);
if (IS_ERR(gpu->mmu)) {
ret = PTR_ERR(gpu->mmu);
gpu->aspace = msm_gem_address_space_create(&pdev->dev,
iommu, "gpu");
if (IS_ERR(gpu->aspace)) {
ret = PTR_ERR(gpu->aspace);
dev_err(drm->dev, "failed to init iommu: %d\n", ret);
gpu->mmu = NULL;
gpu->aspace = NULL;
iommu_domain_free(iommu);
goto fail;
}
@ -662,7 +667,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
} else {
dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
}
gpu->id = msm_register_mmu(drm, gpu->mmu);
gpu->id = msm_register_address_space(drm, gpu->aspace);
/* Create ringbuffer: */
@ -697,7 +702,4 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
msm_gem_put_iova(gpu->rb->bo, gpu->id);
msm_ringbuffer_destroy(gpu->rb);
}
if (gpu->mmu)
gpu->mmu->funcs->destroy(gpu->mmu);
}

View file

@ -95,7 +95,7 @@ struct msm_gpu {
void __iomem *mmio;
int irq;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
int id;
/* Power Control: */

View file

@ -21,7 +21,6 @@
#include <linux/iommu.h>
struct msm_mmu;
struct msm_gpu;
enum msm_mmu_domain_type {
MSM_SMMU_DOMAIN_UNSECURE,
@ -61,7 +60,6 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
}
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
struct msm_mmu *msm_smmu_new(struct device *dev,
enum msm_mmu_domain_type domain);

View file

@ -941,15 +941,15 @@ static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
int i;
for (i = ARRAY_SIZE(sde_kms->mmu_id) - 1; i >= 0; i--) {
if (!sde_kms->mmu[i])
mmu = sde_kms->aspace[i]->mmu;
if (!mmu)
continue;
mmu = sde_kms->mmu[i];
msm_unregister_mmu(sde_kms->dev, mmu);
mmu->funcs->detach(mmu, (const char **)iommu_ports,
ARRAY_SIZE(iommu_ports));
mmu->funcs->destroy(mmu);
sde_kms->mmu[i] = 0;
msm_gem_address_space_destroy(sde_kms->aspace[i]);
sde_kms->mmu_id[i] = 0;
}
@ -962,6 +962,8 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
int i, ret;
for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
struct msm_gem_address_space *aspace;
mmu = msm_smmu_new(sde_kms->dev->dev, i);
if (IS_ERR(mmu)) {
/* MMU's can be optional depending on platform */
@ -971,25 +973,35 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
continue;
}
ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret) {
SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
aspace = msm_gem_smmu_address_space_create(sde_kms->dev->dev,
mmu, "sde");
if (IS_ERR(aspace)) {
ret = PTR_ERR(aspace);
mmu->funcs->destroy(mmu);
goto fail;
}
sde_kms->mmu_id[i] = msm_register_mmu(sde_kms->dev, mmu);
sde_kms->aspace[i] = aspace;
ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret) {
SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
msm_gem_address_space_destroy(aspace);
goto fail;
}
sde_kms->mmu_id[i] = msm_register_address_space(sde_kms->dev,
aspace);
if (sde_kms->mmu_id[i] < 0) {
ret = sde_kms->mmu_id[i];
SDE_ERROR("failed to register sde iommu %d: %d\n",
i, ret);
mmu->funcs->detach(mmu, (const char **)iommu_ports,
ARRAY_SIZE(iommu_ports));
msm_gem_address_space_destroy(aspace);
goto fail;
}
sde_kms->mmu[i] = mmu;
}
return 0;

View file

@ -22,6 +22,7 @@
#include "msm_drv.h"
#include "msm_kms.h"
#include "msm_mmu.h"
#include "msm_gem.h"
#include "sde_dbg.h"
#include "sde_hw_catalog.h"
#include "sde_hw_ctl.h"
@ -121,7 +122,7 @@ struct sde_kms {
int core_rev;
struct sde_mdss_cfg *catalog;
struct msm_mmu *mmu[MSM_SMMU_DOMAIN_MAX];
struct msm_gem_address_space *aspace[MSM_SMMU_DOMAIN_MAX];
int mmu_id[MSM_SMMU_DOMAIN_MAX];
struct sde_power_client *core_client;