Merge "msm/drm: Move msm_drm_config configuration into the GPUs"

This commit is contained in:
Linux Build Service Account 2017-05-02 09:07:18 -07:00 committed by Gerrit - the friendly Code Review server
commit 02e860bddb
17 changed files with 339 additions and 493 deletions

View file

@ -466,6 +466,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
struct msm_gpu *gpu;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
struct msm_gpu_config a3xx_config = { 0 };
int ret;
if (!pdev) {
@ -491,7 +492,13 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a3xx_registers;
adreno_gpu->reg_offsets = a3xx_register_offsets;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
a3xx_config.ioname = MSM_GPU_DEFAULT_IONAME;
a3xx_config.irqname = MSM_GPU_DEFAULT_IRQNAME;
a3xx_config.nr_rings = 1;
a3xx_config.va_start = 0x300000;
a3xx_config.va_end = 0xffffffff;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, &a3xx_config);
if (ret)
goto fail;

View file

@ -543,6 +543,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
struct msm_gpu *gpu;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
struct msm_gpu_config a4xx_config = { 0 };
int ret;
if (!pdev) {
@ -568,7 +569,13 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a4xx_registers;
adreno_gpu->reg_offsets = a4xx_register_offsets;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
a4xx_config.ioname = MSM_GPU_DEFAULT_IONAME;
a4xx_config.irqname = MSM_GPU_DEFAULT_IRQNAME;
a4xx_config.nr_rings = 1;
a4xx_config.va_start = 0x300000;
a4xx_config.va_end = 0xffffffff;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, &a4xx_config);
if (ret)
goto fail;

View file

@ -1368,6 +1368,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
struct a5xx_gpu *a5xx_gpu = NULL;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
struct msm_gpu_config a5xx_config = { 0 };
int ret;
if (!pdev) {
@ -1391,7 +1392,20 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
/* Check the efuses for some configuration */
a5xx_efuses_read(pdev, adreno_gpu);
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
a5xx_config.ioname = MSM_GPU_DEFAULT_IONAME;
a5xx_config.irqname = MSM_GPU_DEFAULT_IRQNAME;
/* Set the number of rings to 4 - yay preemption */
a5xx_config.nr_rings = 4;
/*
* Set the user domain range to fall into the TTBR1 region for global
* objects
*/
a5xx_config.va_start = 0x800000000;
a5xx_config.va_end = 0x8ffffffff;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, &a5xx_config);
if (ret) {
a5xx_destroy(&(a5xx_gpu->base.base));
return ERR_PTR(ret);

View file

@ -405,10 +405,6 @@ void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
ring->gpu->name, ring->id);
}
static const char *iommu_ports[] = {
"gfx3d_user",
};
/* Read the set of powerlevels */
static int _adreno_get_pwrlevels(struct msm_gpu *gpu, struct device_node *node)
{
@ -524,10 +520,10 @@ static int adreno_of_parse(struct platform_device *pdev, struct msm_gpu *gpu)
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *adreno_gpu,
const struct adreno_gpu_funcs *funcs, int nr_rings)
const struct adreno_gpu_funcs *funcs,
struct msm_gpu_config *gpu_config)
{
struct adreno_platform_config *config = pdev->dev.platform_data;
struct msm_gpu_config adreno_gpu_config = { 0 };
struct msm_gpu *gpu = &adreno_gpu->base;
struct msm_mmu *mmu;
int ret;
@ -541,26 +537,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
/* Get the rest of the target configuration from the device tree */
adreno_of_parse(pdev, gpu);
adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
adreno_gpu_config.irqname = "kgsl_3d0_irq";
adreno_gpu_config.nr_rings = nr_rings;
adreno_gpu_config.va_start = SZ_16M;
adreno_gpu_config.va_end = 0xffffffff;
if (adreno_gpu->revn >= 500) {
/* 5XX targets use a 64 bit region */
adreno_gpu_config.va_start = 0x800000000;
adreno_gpu_config.va_end = 0x8ffffffff;
} else {
adreno_gpu_config.va_start = 0x300000;
adreno_gpu_config.va_end = 0xffffffff;
}
adreno_gpu_config.nr_rings = nr_rings;
ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
adreno_gpu->info->name, &adreno_gpu_config);
adreno_gpu->info->name, gpu_config);
if (ret)
return ret;
@ -580,8 +558,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
mmu = gpu->aspace->mmu;
if (mmu) {
ret = mmu->funcs->attach(mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
ret = mmu->funcs->attach(mmu, NULL, 0);
if (ret)
return ret;
}
@ -722,7 +699,7 @@ static struct adreno_counter_group *get_counter_group(struct msm_gpu *gpu,
return ERR_PTR(-ENODEV);
if (groupid >= adreno_gpu->nr_counter_groups)
return ERR_PTR(-EINVAL);
return ERR_PTR(-ENODEV);
return (struct adreno_counter_group *)
adreno_gpu->counter_groups[groupid];
@ -745,7 +722,7 @@ u64 adreno_read_counter(struct msm_gpu *gpu, u32 groupid, int counterid)
struct adreno_counter_group *group =
get_counter_group(gpu, groupid);
if (!IS_ERR(group) && group->funcs.read)
if (!IS_ERR_OR_NULL(group) && group->funcs.read)
return group->funcs.read(gpu, group, counterid);
return 0;
@ -756,6 +733,6 @@ void adreno_put_counter(struct msm_gpu *gpu, u32 groupid, int counterid)
struct adreno_counter_group *group =
get_counter_group(gpu, groupid);
if (!IS_ERR(group) && group->funcs.put)
if (!IS_ERR_OR_NULL(group) && group->funcs.put)
group->funcs.put(gpu, group, counterid);
}

View file

@ -257,7 +257,7 @@ struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
int nr_rings);
struct msm_gpu_config *config);
void adreno_gpu_cleanup(struct adreno_gpu *gpu);
void adreno_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);

View file

@ -184,8 +184,7 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu,
iommu_ports, ARRAY_SIZE(iommu_ports));
aspace->mmu->funcs->detach(aspace->mmu);
msm_gem_address_space_destroy(aspace);
}
}
@ -202,8 +201,7 @@ static void mdp4_destroy(struct msm_kms *kms)
drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu,
iommu_ports, ARRAY_SIZE(iommu_ports));
aspace->mmu->funcs->detach(aspace->mmu);
msm_gem_address_space_put(aspace);
}
@ -416,10 +414,6 @@ fail:
return ret;
}
static const char *iommu_ports[] = {
"mdp_port0_cb0", "mdp_port1_cb0",
};
struct msm_kms *mdp4_kms_init(struct drm_device *dev)
{
struct platform_device *pdev = dev->platformdev;
@ -515,15 +509,11 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdelay(16);
if (config->iommu) {
struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, config->iommu);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
goto fail;
}
config->iommu->geometry.aperture_start = 0x1000;
config->iommu->geometry.aperture_end = 0xffffffff;
aspace = msm_gem_address_space_create(&pdev->dev,
mmu, "mdp4", 0x1000, 0xffffffff);
config->iommu, MSM_IOMMU_DOMAIN_DEFAULT, "mdp4");
if (IS_ERR(aspace)) {
ret = PTR_ERR(aspace);
goto fail;
@ -531,8 +521,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdp4_kms->aspace = aspace;
ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
if (ret)
goto fail;
} else {

View file

@ -22,10 +22,6 @@
#include "msm_mmu.h"
#include "mdp5_kms.h"
static const char *iommu_ports[] = {
"mdp_0",
};
static int mdp5_hw_init(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
@ -613,8 +609,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdp5_kms->aspace = aspace;
ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
if (ret) {
dev_err(&pdev->dev, "failed to attach iommu: %d\n",
ret);

View file

@ -413,7 +413,7 @@ void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
/* For GPU and legacy display */
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name);
int type, const char *name);
struct msm_gem_address_space *
msm_gem_address_space_create_instance(struct msm_mmu *parent, const char *name,
uint64_t start, uint64_t end);

View file

@ -25,21 +25,12 @@
/* Additional internal-use only BO flags: */
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
struct msm_gem_aspace_ops {
int (*map)(struct msm_gem_address_space *, struct msm_gem_vma *,
struct sg_table *sgt, void *priv, unsigned int flags);
void (*unmap)(struct msm_gem_address_space *, struct msm_gem_vma *,
struct sg_table *sgt, void *priv);
void (*destroy)(struct msm_gem_address_space *);
};
struct msm_gem_address_space {
const char *name;
struct msm_mmu *mmu;
const struct msm_gem_aspace_ops *ops;
struct kref kref;
struct drm_mm mm;
u64 va_len;
};
struct msm_gem_vma {

View file

@ -82,13 +82,16 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
void __user *userptr =
to_user_ptr(args->bos + (i * sizeof(submit_bo)));
ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
if (unlikely(ret)) {
if (copy_from_user_inatomic(&submit_bo, userptr,
sizeof(submit_bo))) {
pagefault_enable();
spin_unlock(&file->table_lock);
ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
if (ret)
if (copy_from_user(&submit_bo, userptr,
sizeof(submit_bo))) {
ret = -EFAULT;
goto out;
}
spin_lock(&file->table_lock);
pagefault_disable();
}
@ -283,8 +286,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
uint32_t off;
bool valid;
ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
if (ret)
if (copy_from_user(&submit_reloc, userptr,
sizeof(submit_reloc)))
return -EFAULT;
if (submit_reloc.submit_offset % 4) {

View file

@ -25,8 +25,10 @@ msm_gem_address_space_destroy(struct kref *kref)
struct msm_gem_address_space *aspace = container_of(kref,
struct msm_gem_address_space, kref);
if (aspace->ops->destroy)
aspace->ops->destroy(aspace);
if (aspace->va_len)
drm_mm_takedown(&aspace->mm);
aspace->mmu->funcs->destroy(aspace->mmu);
kfree(aspace);
}
@ -37,57 +39,9 @@ void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
kref_put(&aspace->kref, msm_gem_address_space_destroy);
}
/* SDE address space operations */
static void smmu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
void *priv)
{
struct dma_buf *buf = priv;
if (buf)
aspace->mmu->funcs->unmap_dma_buf(aspace->mmu,
sgt, buf, DMA_BIDIRECTIONAL);
else
aspace->mmu->funcs->unmap_sg(aspace->mmu, sgt,
DMA_BIDIRECTIONAL);
vma->iova = 0;
msm_gem_address_space_put(aspace);
}
static int smmu_aspace_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
void *priv, unsigned int flags)
{
struct dma_buf *buf = priv;
int ret;
if (buf)
ret = aspace->mmu->funcs->map_dma_buf(aspace->mmu, sgt, buf,
DMA_BIDIRECTIONAL);
else
ret = aspace->mmu->funcs->map_sg(aspace->mmu, sgt,
DMA_BIDIRECTIONAL);
if (!ret)
vma->iova = sg_dma_address(sgt->sgl);
/* Get a reference to the aspace to keep it around */
kref_get(&aspace->kref);
return ret;
}
static const struct msm_gem_aspace_ops smmu_aspace_ops = {
.map = smmu_aspace_map_vma,
.unmap = smmu_aspace_unmap_vma,
};
struct msm_gem_address_space *
msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
const char *name)
static struct msm_gem_address_space *
msm_gem_address_space_new(struct msm_mmu *mmu, const char *name,
uint64_t start, uint64_t end)
{
struct msm_gem_address_space *aspace;
@ -100,57 +54,28 @@ msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
aspace->name = name;
aspace->mmu = mmu;
aspace->ops = &smmu_aspace_ops;
aspace->va_len = end - start;
if (aspace->va_len)
drm_mm_init(&aspace->mm, (start >> PAGE_SHIFT),
(end >> PAGE_SHIFT) - 1);
kref_init(&aspace->kref);
return aspace;
}
/* GPU address space operations */
struct msm_iommu_aspace {
struct msm_gem_address_space base;
struct drm_mm mm;
};
#define to_iommu_aspace(aspace) \
((struct msm_iommu_aspace *) \
container_of(aspace, struct msm_iommu_aspace, base))
static void iommu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
static int allocate_iova(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
u64 *iova)
{
if (!vma->iova)
return;
if (aspace->mmu)
aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt);
drm_mm_remove_node(&vma->node);
vma->iova = 0;
msm_gem_address_space_put(aspace);
}
static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt, void *priv,
unsigned int flags)
{
struct msm_iommu_aspace *local = to_iommu_aspace(aspace);
size_t size = 0;
struct scatterlist *sg;
size_t size = 0;
int ret, i;
int iommu_flags = IOMMU_READ;
if (!(flags & MSM_BO_GPU_READONLY))
iommu_flags |= IOMMU_WRITE;
if (flags & MSM_BO_PRIVILEGED)
iommu_flags |= IOMMU_PRIV;
if ((flags & MSM_BO_CACHED) && msm_iommu_coherent(aspace->mmu))
iommu_flags |= IOMMU_CACHE;
if (!aspace->va_len)
return 0;
if (WARN_ON(drm_mm_node_allocated(&vma->node)))
return 0;
@ -158,84 +83,73 @@ static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace,
for_each_sg(sgt->sgl, sg, sgt->nents, i)
size += sg->length + sg->offset;
ret = drm_mm_insert_node(&local->mm, &vma->node, size >> PAGE_SHIFT,
ret = drm_mm_insert_node(&aspace->mm, &vma->node, size >> PAGE_SHIFT,
0, DRM_MM_SEARCH_DEFAULT);
if (ret)
return ret;
vma->iova = vma->node.start << PAGE_SHIFT;
if (aspace->mmu)
ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
iommu_flags);
/* Get a reference to the aspace to keep it around */
kref_get(&aspace->kref);
if (!ret && iova)
*iova = vma->node.start << PAGE_SHIFT;
return ret;
}
static void iommu_aspace_destroy(struct msm_gem_address_space *aspace)
{
struct msm_iommu_aspace *local = to_iommu_aspace(aspace);
drm_mm_takedown(&local->mm);
aspace->mmu->funcs->destroy(aspace->mmu);
}
static const struct msm_gem_aspace_ops msm_iommu_aspace_ops = {
.map = iommu_aspace_map_vma,
.unmap = iommu_aspace_unmap_vma,
.destroy = iommu_aspace_destroy,
};
static struct msm_gem_address_space *
msm_gem_address_space_new(struct msm_mmu *mmu, const char *name,
uint64_t start, uint64_t end)
{
struct msm_iommu_aspace *local;
if (!mmu)
return ERR_PTR(-EINVAL);
local = kzalloc(sizeof(*local), GFP_KERNEL);
if (!local)
return ERR_PTR(-ENOMEM);
drm_mm_init(&local->mm, (start >> PAGE_SHIFT),
(end >> PAGE_SHIFT) - 1);
local->base.name = name;
local->base.mmu = mmu;
local->base.ops = &msm_iommu_aspace_ops;
kref_init(&local->base.kref);
return &local->base;
}
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
void *priv, unsigned int flags)
{
if (aspace && aspace->ops->map)
return aspace->ops->map(aspace, vma, sgt, priv, flags);
u64 iova = 0;
int ret;
return -EINVAL;
if (!aspace)
return -EINVAL;
ret = allocate_iova(aspace, vma, sgt, &iova);
if (ret)
return ret;
ret = aspace->mmu->funcs->map(aspace->mmu, iova, sgt,
flags, priv);
if (ret) {
if (drm_mm_node_allocated(&vma->node))
drm_mm_remove_node(&vma->node);
return ret;
}
vma->iova = sg_dma_address(sgt->sgl);
kref_get(&aspace->kref);
return 0;
}
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
{
if (aspace && aspace->ops->unmap)
aspace->ops->unmap(aspace, vma, sgt, priv);
if (!aspace || !vma->iova)
return;
aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, priv);
if (drm_mm_node_allocated(&vma->node))
drm_mm_remove_node(&vma->node);
vma->iova = 0;
msm_gem_address_space_put(aspace);
}
struct msm_gem_address_space *
msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
const char *name)
{
return msm_gem_address_space_new(mmu, name, 0, 0);
}
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name)
int type, const char *name)
{
struct msm_mmu *mmu = msm_iommu_new(dev, domain);
struct msm_mmu *mmu = msm_iommu_new(dev, type, domain);
if (IS_ERR(mmu))
return (struct msm_gem_address_space *) mmu;

View file

@ -183,6 +183,9 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
if (ret)
return ret;
if (gpu->aspace && gpu->aspace->mmu)
msm_mmu_enable(gpu->aspace->mmu);
return 0;
}
@ -203,6 +206,9 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
if (WARN_ON(gpu->active_cnt < 0))
return -EINVAL;
if (gpu->aspace && gpu->aspace->mmu)
msm_mmu_disable(gpu->aspace->mmu);
ret = disable_axi(gpu);
if (ret)
return ret;
@ -837,7 +843,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
dev_info(drm->dev, "%s: using IOMMU\n", name);
gpu->aspace = msm_gem_address_space_create(&pdev->dev,
iommu, "gpu");
iommu, MSM_IOMMU_DOMAIN_USER, "gpu");
if (IS_ERR(gpu->aspace)) {
ret = PTR_ERR(gpu->aspace);
dev_err(drm->dev, "failed to init iommu: %d\n", ret);

View file

@ -29,6 +29,9 @@
struct msm_gem_submit;
struct msm_gpu_perfcntr;
#define MSM_GPU_DEFAULT_IONAME "kgsl_3d0_reg_memory"
#define MSM_GPU_DEFAULT_IRQNAME "kgsl_3d0_irq"
struct msm_gpu_config {
const char *ioname;
const char *irqname;

View file

@ -17,6 +17,7 @@
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <soc/qcom/secure_buffer.h>
#include "msm_drv.h"
#include "msm_iommu.h"
@ -27,31 +28,17 @@ static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
return 0;
}
/*
* Get and enable the IOMMU clocks so that we can make
* sure they stay on the entire duration so that we can
* safely change the pagetable from the GPU
*/
static void _get_iommu_clocks(struct msm_mmu *mmu, struct platform_device *pdev)
static void iommu_get_clocks(struct msm_iommu *iommu, struct device *dev)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct device *dev;
struct property *prop;
const char *name;
int i = 0;
if (WARN_ON(!pdev))
return;
dev = &pdev->dev;
iommu->nr_clocks =
of_property_count_strings(dev->of_node, "clock-names");
if (iommu->nr_clocks < 0) {
iommu->nr_clocks = 0;
if (iommu->nr_clocks < 0)
return;
}
if (WARN_ON(iommu->nr_clocks > ARRAY_SIZE(iommu->clocks)))
iommu->nr_clocks = ARRAY_SIZE(iommu->clocks);
@ -60,78 +47,58 @@ static void _get_iommu_clocks(struct msm_mmu *mmu, struct platform_device *pdev)
if (i == iommu->nr_clocks)
break;
iommu->clocks[i] = clk_get(dev, name);
if (iommu->clocks[i])
clk_prepare_enable(iommu->clocks[i]);
i++;
iommu->clocks[i++] = clk_get(dev, name);
}
}
static int _attach_iommu_device(struct msm_mmu *mmu,
struct iommu_domain *domain, const char **names, int cnt)
{
int i;
/* See if there is a iommus member in the current device. If not, look
* for the names and see if there is one in there.
*/
if (of_find_property(mmu->dev->of_node, "iommus", NULL))
return iommu_attach_device(domain, mmu->dev);
/* Look through the list of names for a target */
for (i = 0; i < cnt; i++) {
struct device_node *node =
of_find_node_by_name(mmu->dev->of_node, names[i]);
if (!node)
continue;
if (of_find_property(node, "iommus", NULL)) {
struct platform_device *pdev;
/* Get the platform device for the node */
of_platform_populate(node->parent, NULL, NULL,
mmu->dev);
pdev = of_find_device_by_node(node);
if (!pdev)
continue;
_get_iommu_clocks(mmu,
of_find_device_by_node(node->parent));
mmu->dev = &pdev->dev;
return iommu_attach_device(domain, mmu->dev);
}
}
dev_err(mmu->dev, "Couldn't find a IOMMU device\n");
return -ENODEV;
}
static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
static void msm_iommu_clocks_enable(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
int val = 1, ret;
int i;
if (!iommu->nr_clocks)
iommu_get_clocks(iommu, mmu->dev->parent);
for (i = 0; i < iommu->nr_clocks; i++) {
if (iommu->clocks[i])
clk_prepare_enable(iommu->clocks[i]);
}
}
static void msm_iommu_clocks_disable(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
int i;
for (i = 0; i < iommu->nr_clocks; i++) {
if (iommu->clocks[i])
clk_disable_unprepare(iommu->clocks[i]);
}
}
static int msm_iommu_attach(struct msm_mmu *mmu, const char **names,
int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
return iommu_attach_device(iommu->domain, mmu->dev);
}
static int msm_iommu_attach_user(struct msm_mmu *mmu, const char **names,
int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
int ret, val = 1;
/* Hope springs eternal */
iommu->allow_dynamic = true;
/* per-instance pagetables need TTBR1 support in the IOMMU driver */
ret = iommu_domain_set_attr(iommu->domain,
DOMAIN_ATTR_ENABLE_TTBR1, &val);
if (ret)
iommu->allow_dynamic = false;
iommu->allow_dynamic = !iommu_domain_set_attr(iommu->domain,
DOMAIN_ATTR_ENABLE_TTBR1, &val) ? true : false;
/* Mark the GPU as I/O coherent if it is supported */
iommu->is_coherent = of_dma_is_coherent(mmu->dev->of_node);
/* Attach the device to the domain */
ret = _attach_iommu_device(mmu, iommu->domain, names, cnt);
ret = iommu_attach_device(iommu->domain, mmu->dev);
if (ret)
return ret;
@ -176,17 +143,25 @@ static int msm_iommu_attach_dynamic(struct msm_mmu *mmu, const char **names,
return 0;
}
static int msm_iommu_attach_secure(struct msm_mmu *mmu, const char **names,
int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
int ret, vmid = VMID_CP_PIXEL;
ret = iommu_domain_set_attr(iommu->domain, DOMAIN_ATTR_SECURE_VMID,
&vmid);
if (ret)
return ret;
return iommu_attach_device(iommu->domain, mmu->dev);
}
static void msm_iommu_detach(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
int i;
iommu_detach_device(iommu->domain, mmu->dev);
for (i = 0; i < iommu->nr_clocks; i++) {
if (iommu->clocks[i])
clk_disable(iommu->clocks[i]);
}
}
static void msm_iommu_detach_dynamic(struct msm_mmu *mmu)
@ -196,69 +171,50 @@ static void msm_iommu_detach_dynamic(struct msm_mmu *mmu)
}
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, int prot)
struct sg_table *sgt, u32 flags, void *priv)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
uint64_t da = iova;
unsigned int i, j;
int ret;
u32 prot = IOMMU_READ;
if (!domain || !sgt)
return -EINVAL;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
phys_addr_t pa = sg_phys(sg) - sg->offset;
size_t bytes = sg->length + sg->offset;
if (!(flags & MSM_BO_GPU_READONLY))
prot |= IOMMU_WRITE;
VERB("map[%d]: %016llx %pa(%zx)", i, iova, &pa, bytes);
if (flags & MSM_BO_PRIVILEGED)
prot |= IOMMU_PRIV;
ret = iommu_map(domain, da, pa, bytes, prot);
if (ret)
goto fail;
if ((flags & MSM_BO_CACHED) && msm_iommu_coherent(mmu))
prot |= IOMMU_CACHE;
da += bytes;
}
/* iommu_map_sg returns the number of bytes mapped */
ret = iommu_map_sg(domain, iova, sgt->sgl, sgt->nents, prot);
if (ret)
sgt->sgl->dma_address = iova;
return 0;
fail:
da = iova;
for_each_sg(sgt->sgl, sg, i, j) {
size_t bytes = sg->length + sg->offset;
iommu_unmap(domain, da, bytes);
da += bytes;
}
return ret;
return ret ? 0 : -ENOMEM;
}
static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt)
static void msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, void *priv)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
uint64_t da = iova;
int i;
size_t len = 0;
int ret, i;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes = sg->length + sg->offset;
size_t unmapped;
for_each_sg(sgt->sgl, sg, sgt->nents, i)
len += sg->length;
unmapped = iommu_unmap(domain, da, bytes);
if (unmapped < bytes)
return unmapped;
ret = iommu_unmap(domain, iova, len);
if (ret != len)
dev_warn(mmu->dev, "could not unmap iova %llx\n", iova);
VERB("unmap[%d]: %016llx(%zx)", i, iova, bytes);
BUG_ON(!PAGE_ALIGNED(bytes));
da += bytes;
}
return 0;
sgt->sgl->dma_address = 0;
}
static void msm_iommu_destroy(struct msm_mmu *mmu)
@ -268,7 +224,30 @@ static void msm_iommu_destroy(struct msm_mmu *mmu)
kfree(iommu);
}
static const struct msm_mmu_funcs funcs = {
static struct device *find_context_bank(const char *name)
{
struct device_node *node = of_find_node_by_name(NULL, name);
struct platform_device *pdev, *parent;
if (!node)
return ERR_PTR(-ENODEV);
if (!of_find_property(node, "iommus", NULL))
return ERR_PTR(-ENODEV);
/* Get the parent device */
parent = of_find_device_by_node(node->parent);
/* Populate the sub nodes */
of_platform_populate(parent->dev.of_node, NULL, NULL, &parent->dev);
/* Get the context bank device */
pdev = of_find_device_by_node(node);
return pdev ? &pdev->dev : ERR_PTR(-ENODEV);
}
static const struct msm_mmu_funcs default_funcs = {
.attach = msm_iommu_attach,
.detach = msm_iommu_detach,
.map = msm_iommu_map,
@ -276,6 +255,24 @@ static const struct msm_mmu_funcs funcs = {
.destroy = msm_iommu_destroy,
};
static const struct msm_mmu_funcs user_funcs = {
.attach = msm_iommu_attach_user,
.detach = msm_iommu_detach,
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
.destroy = msm_iommu_destroy,
.enable = msm_iommu_clocks_enable,
.disable = msm_iommu_clocks_disable,
};
static const struct msm_mmu_funcs secure_funcs = {
.attach = msm_iommu_attach_secure,
.detach = msm_iommu_detach,
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
.destroy = msm_iommu_destroy,
};
static const struct msm_mmu_funcs dynamic_funcs = {
.attach = msm_iommu_attach_dynamic,
.detach = msm_iommu_detach_dynamic,
@ -284,8 +281,26 @@ static const struct msm_mmu_funcs dynamic_funcs = {
.destroy = msm_iommu_destroy,
};
struct msm_mmu *_msm_iommu_new(struct device *dev, struct iommu_domain *domain,
const struct msm_mmu_funcs *funcs)
static const struct {
const char *cbname;
const struct msm_mmu_funcs *funcs;
} msm_iommu_domains[] = {
[MSM_IOMMU_DOMAIN_DEFAULT] = {
.cbname = NULL,
.funcs = &default_funcs,
},
[MSM_IOMMU_DOMAIN_USER] = {
.cbname = "gfx3d_user",
.funcs = &user_funcs,
},
[MSM_IOMMU_DOMAIN_SECURE] = {
.cbname = "gfx3d_secure",
.funcs = &secure_funcs
},
};
static struct msm_mmu *iommu_create(struct device *dev,
struct iommu_domain *domain, const struct msm_mmu_funcs *funcs)
{
struct msm_iommu *iommu;
@ -299,9 +314,23 @@ struct msm_mmu *_msm_iommu_new(struct device *dev, struct iommu_domain *domain,
return &iommu->base;
}
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
struct msm_mmu *msm_iommu_new(struct device *parent,
enum msm_iommu_domain_type type, struct iommu_domain *domain)
{
return _msm_iommu_new(dev, domain, &funcs);
struct device *dev = parent;
if (type >= ARRAY_SIZE(msm_iommu_domains) ||
!msm_iommu_domains[type].funcs)
return ERR_PTR(-ENODEV);
if (msm_iommu_domains[type].cbname) {
dev = find_context_bank(msm_iommu_domains[type].cbname);
if (IS_ERR(dev))
return ERR_CAST(dev);
}
return iommu_create(dev, domain, msm_iommu_domains[type].funcs);
}
/*
@ -326,7 +355,7 @@ struct msm_mmu *msm_iommu_new_dynamic(struct msm_mmu *base)
if (!domain)
return ERR_PTR(-ENODEV);
mmu = _msm_iommu_new(base->dev, domain, &dynamic_funcs);
mmu = iommu_create(base->dev, domain, &dynamic_funcs);
if (IS_ERR(mmu)) {
if (domain)

View file

@ -30,21 +30,22 @@ enum msm_mmu_domain_type {
MSM_SMMU_DOMAIN_MAX,
};
enum msm_iommu_domain_type {
MSM_IOMMU_DOMAIN_DEFAULT,
MSM_IOMMU_DOMAIN_USER,
MSM_IOMMU_DOMAIN_SECURE,
};
struct msm_mmu_funcs {
int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
void (*detach)(struct msm_mmu *mmu);
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
int prot);
int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt);
int (*map_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
enum dma_data_direction dir);
void (*unmap_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
enum dma_data_direction dir);
int (*map_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
struct dma_buf *dma_buf, int dir);
void (*unmap_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
struct dma_buf *dma_buf, int dir);
u32 flags, void *priv);
void (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
void *priv);
void (*destroy)(struct msm_mmu *mmu);
void (*enable)(struct msm_mmu *mmu);
void (*disable)(struct msm_mmu *mmu);
};
struct msm_mmu {
@ -59,9 +60,27 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
mmu->funcs = funcs;
}
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
/* Create a new SDE mmu device */
struct msm_mmu *msm_smmu_new(struct device *dev,
enum msm_mmu_domain_type domain);
/* Create a new legacy MDP4 or GPU mmu device */
struct msm_mmu *msm_iommu_new(struct device *parent,
enum msm_iommu_domain_type type, struct iommu_domain *domain);
/* Create a new dynamic domain for GPU */
struct msm_mmu *msm_iommu_new_dynamic(struct msm_mmu *orig);
static inline void msm_mmu_enable(struct msm_mmu *mmu)
{
if (mmu->funcs->enable)
mmu->funcs->enable(mmu);
}
static inline void msm_mmu_disable(struct msm_mmu *mmu)
{
if (mmu->funcs->disable)
mmu->funcs->disable(mmu);
}
#endif /* __MSM_MMU_H__ */

View file

@ -105,106 +105,34 @@ static void msm_smmu_detach(struct msm_mmu *mmu)
}
static int msm_smmu_map(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, int prot)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
struct iommu_domain *domain;
struct scatterlist *sg;
uint64_t da = iova;
unsigned int i, j;
int ret;
if (!client)
return -ENODEV;
domain = client->mmu_mapping->domain;
if (!domain || !sgt)
return -EINVAL;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
u32 pa = sg_phys(sg) - sg->offset;
size_t bytes = sg->length + sg->offset;
VERB("map[%d]: %16llx %08x(%zx)", i, iova, pa, bytes);
ret = iommu_map(domain, da, pa, bytes, prot);
if (ret)
goto fail;
da += bytes;
}
return 0;
fail:
da = iova;
for_each_sg(sgt->sgl, sg, i, j) {
size_t bytes = sg->length + sg->offset;
iommu_unmap(domain, da, bytes);
da += bytes;
}
return ret;
}
static int msm_smmu_map_sg(struct msm_mmu *mmu, struct sg_table *sgt,
enum dma_data_direction dir)
struct sg_table *sgt, u32 flags, void *priv)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
int ret;
ret = dma_map_sg(client->dev, sgt->sgl, sgt->nents, dir);
if (ret != sgt->nents)
return -ENOMEM;
if (priv)
ret = msm_dma_map_sg_lazy(client->dev, sgt->sgl, sgt->nents,
DMA_BIDIRECTIONAL, priv);
else
ret = dma_map_sg(client->dev, sgt->sgl, sgt->nents,
DMA_BIDIRECTIONAL);
return 0;
return (ret != sgt->nents) ? -ENOMEM : 0;
}
static void msm_smmu_unmap_sg(struct msm_mmu *mmu, struct sg_table *sgt,
enum dma_data_direction dir)
static void msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, void *priv)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir);
}
static int msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
struct iommu_domain *domain;
struct scatterlist *sg;
uint64_t da = iova;
int i;
if (!client)
return -ENODEV;
domain = client->mmu_mapping->domain;
if (!domain || !sgt)
return -EINVAL;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes = sg->length + sg->offset;
size_t unmapped;
unmapped = iommu_unmap(domain, da, bytes);
if (unmapped < bytes)
return unmapped;
VERB("unmap[%d]: %16llx(%zx)", i, iova, bytes);
WARN_ON(!PAGE_ALIGNED(bytes));
da += bytes;
}
return 0;
if (priv)
msm_dma_unmap_sg(client->dev, sgt->sgl, sgt->nents,
DMA_BIDIRECTIONAL, priv);
else
dma_unmap_sg(client->dev, sgt->sgl, sgt->nents,
DMA_BIDIRECTIONAL);
}
static void msm_smmu_destroy(struct msm_mmu *mmu)
@ -217,42 +145,11 @@ static void msm_smmu_destroy(struct msm_mmu *mmu)
kfree(smmu);
}
static int msm_smmu_map_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
struct dma_buf *dma_buf, int dir)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
int ret;
ret = msm_dma_map_sg_lazy(client->dev, sgt->sgl, sgt->nents, dir,
dma_buf);
if (ret != sgt->nents) {
DRM_ERROR("dma map sg failed\n");
return -ENOMEM;
}
return 0;
}
static void msm_smmu_unmap_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
struct dma_buf *dma_buf, int dir)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
msm_dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir, dma_buf);
}
static const struct msm_mmu_funcs funcs = {
.attach = msm_smmu_attach,
.detach = msm_smmu_detach,
.map = msm_smmu_map,
.map_sg = msm_smmu_map_sg,
.unmap_sg = msm_smmu_unmap_sg,
.unmap = msm_smmu_unmap,
.map_dma_buf = msm_smmu_map_dma_buf,
.unmap_dma_buf = msm_smmu_unmap_dma_buf,
.destroy = msm_smmu_destroy,
};

View file

@ -41,10 +41,6 @@
#define CREATE_TRACE_POINTS
#include "sde_trace.h"
static const char * const iommu_ports[] = {
"mdp_0",
};
/**
* Controls size of event log buffer. Specified as a power of 2.
*/
@ -1077,8 +1073,7 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
sde_kms->aspace[i] = aspace;
ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
ARRAY_SIZE(iommu_ports));
ret = mmu->funcs->attach(mmu, NULL, 0);
if (ret) {
SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
msm_gem_address_space_put(aspace);