Merge "msm/drm: Move msm_drm_config configuration into the GPUs"
This commit is contained in:
commit
02e860bddb
17 changed files with 339 additions and 493 deletions
|
@ -466,6 +466,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
|
||||||
struct msm_gpu *gpu;
|
struct msm_gpu *gpu;
|
||||||
struct msm_drm_private *priv = dev->dev_private;
|
struct msm_drm_private *priv = dev->dev_private;
|
||||||
struct platform_device *pdev = priv->gpu_pdev;
|
struct platform_device *pdev = priv->gpu_pdev;
|
||||||
|
struct msm_gpu_config a3xx_config = { 0 };
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!pdev) {
|
if (!pdev) {
|
||||||
|
@ -491,7 +492,13 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
|
||||||
adreno_gpu->registers = a3xx_registers;
|
adreno_gpu->registers = a3xx_registers;
|
||||||
adreno_gpu->reg_offsets = a3xx_register_offsets;
|
adreno_gpu->reg_offsets = a3xx_register_offsets;
|
||||||
|
|
||||||
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
|
a3xx_config.ioname = MSM_GPU_DEFAULT_IONAME;
|
||||||
|
a3xx_config.irqname = MSM_GPU_DEFAULT_IRQNAME;
|
||||||
|
a3xx_config.nr_rings = 1;
|
||||||
|
a3xx_config.va_start = 0x300000;
|
||||||
|
a3xx_config.va_end = 0xffffffff;
|
||||||
|
|
||||||
|
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, &a3xx_config);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
|
|
|
@ -543,6 +543,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
|
||||||
struct msm_gpu *gpu;
|
struct msm_gpu *gpu;
|
||||||
struct msm_drm_private *priv = dev->dev_private;
|
struct msm_drm_private *priv = dev->dev_private;
|
||||||
struct platform_device *pdev = priv->gpu_pdev;
|
struct platform_device *pdev = priv->gpu_pdev;
|
||||||
|
struct msm_gpu_config a4xx_config = { 0 };
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!pdev) {
|
if (!pdev) {
|
||||||
|
@ -568,7 +569,13 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
|
||||||
adreno_gpu->registers = a4xx_registers;
|
adreno_gpu->registers = a4xx_registers;
|
||||||
adreno_gpu->reg_offsets = a4xx_register_offsets;
|
adreno_gpu->reg_offsets = a4xx_register_offsets;
|
||||||
|
|
||||||
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
|
a4xx_config.ioname = MSM_GPU_DEFAULT_IONAME;
|
||||||
|
a4xx_config.irqname = MSM_GPU_DEFAULT_IRQNAME;
|
||||||
|
a4xx_config.nr_rings = 1;
|
||||||
|
a4xx_config.va_start = 0x300000;
|
||||||
|
a4xx_config.va_end = 0xffffffff;
|
||||||
|
|
||||||
|
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, &a4xx_config);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
|
|
|
@ -1368,6 +1368,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
|
||||||
struct a5xx_gpu *a5xx_gpu = NULL;
|
struct a5xx_gpu *a5xx_gpu = NULL;
|
||||||
struct adreno_gpu *adreno_gpu;
|
struct adreno_gpu *adreno_gpu;
|
||||||
struct msm_gpu *gpu;
|
struct msm_gpu *gpu;
|
||||||
|
struct msm_gpu_config a5xx_config = { 0 };
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!pdev) {
|
if (!pdev) {
|
||||||
|
@ -1391,7 +1392,20 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
|
||||||
/* Check the efuses for some configuration */
|
/* Check the efuses for some configuration */
|
||||||
a5xx_efuses_read(pdev, adreno_gpu);
|
a5xx_efuses_read(pdev, adreno_gpu);
|
||||||
|
|
||||||
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
|
a5xx_config.ioname = MSM_GPU_DEFAULT_IONAME;
|
||||||
|
a5xx_config.irqname = MSM_GPU_DEFAULT_IRQNAME;
|
||||||
|
|
||||||
|
/* Set the number of rings to 4 - yay preemption */
|
||||||
|
a5xx_config.nr_rings = 4;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set the user domain range to fall into the TTBR1 region for global
|
||||||
|
* objects
|
||||||
|
*/
|
||||||
|
a5xx_config.va_start = 0x800000000;
|
||||||
|
a5xx_config.va_end = 0x8ffffffff;
|
||||||
|
|
||||||
|
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, &a5xx_config);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
a5xx_destroy(&(a5xx_gpu->base.base));
|
a5xx_destroy(&(a5xx_gpu->base.base));
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|
|
@ -405,10 +405,6 @@ void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
|
||||||
ring->gpu->name, ring->id);
|
ring->gpu->name, ring->id);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const char *iommu_ports[] = {
|
|
||||||
"gfx3d_user",
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Read the set of powerlevels */
|
/* Read the set of powerlevels */
|
||||||
static int _adreno_get_pwrlevels(struct msm_gpu *gpu, struct device_node *node)
|
static int _adreno_get_pwrlevels(struct msm_gpu *gpu, struct device_node *node)
|
||||||
{
|
{
|
||||||
|
@ -524,10 +520,10 @@ static int adreno_of_parse(struct platform_device *pdev, struct msm_gpu *gpu)
|
||||||
|
|
||||||
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||||
struct adreno_gpu *adreno_gpu,
|
struct adreno_gpu *adreno_gpu,
|
||||||
const struct adreno_gpu_funcs *funcs, int nr_rings)
|
const struct adreno_gpu_funcs *funcs,
|
||||||
|
struct msm_gpu_config *gpu_config)
|
||||||
{
|
{
|
||||||
struct adreno_platform_config *config = pdev->dev.platform_data;
|
struct adreno_platform_config *config = pdev->dev.platform_data;
|
||||||
struct msm_gpu_config adreno_gpu_config = { 0 };
|
|
||||||
struct msm_gpu *gpu = &adreno_gpu->base;
|
struct msm_gpu *gpu = &adreno_gpu->base;
|
||||||
struct msm_mmu *mmu;
|
struct msm_mmu *mmu;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -541,26 +537,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||||
/* Get the rest of the target configuration from the device tree */
|
/* Get the rest of the target configuration from the device tree */
|
||||||
adreno_of_parse(pdev, gpu);
|
adreno_of_parse(pdev, gpu);
|
||||||
|
|
||||||
adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
|
|
||||||
adreno_gpu_config.irqname = "kgsl_3d0_irq";
|
|
||||||
adreno_gpu_config.nr_rings = nr_rings;
|
|
||||||
|
|
||||||
adreno_gpu_config.va_start = SZ_16M;
|
|
||||||
adreno_gpu_config.va_end = 0xffffffff;
|
|
||||||
|
|
||||||
if (adreno_gpu->revn >= 500) {
|
|
||||||
/* 5XX targets use a 64 bit region */
|
|
||||||
adreno_gpu_config.va_start = 0x800000000;
|
|
||||||
adreno_gpu_config.va_end = 0x8ffffffff;
|
|
||||||
} else {
|
|
||||||
adreno_gpu_config.va_start = 0x300000;
|
|
||||||
adreno_gpu_config.va_end = 0xffffffff;
|
|
||||||
}
|
|
||||||
|
|
||||||
adreno_gpu_config.nr_rings = nr_rings;
|
|
||||||
|
|
||||||
ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
|
ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
|
||||||
adreno_gpu->info->name, &adreno_gpu_config);
|
adreno_gpu->info->name, gpu_config);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -580,8 +558,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||||
|
|
||||||
mmu = gpu->aspace->mmu;
|
mmu = gpu->aspace->mmu;
|
||||||
if (mmu) {
|
if (mmu) {
|
||||||
ret = mmu->funcs->attach(mmu, iommu_ports,
|
ret = mmu->funcs->attach(mmu, NULL, 0);
|
||||||
ARRAY_SIZE(iommu_ports));
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -722,7 +699,7 @@ static struct adreno_counter_group *get_counter_group(struct msm_gpu *gpu,
|
||||||
return ERR_PTR(-ENODEV);
|
return ERR_PTR(-ENODEV);
|
||||||
|
|
||||||
if (groupid >= adreno_gpu->nr_counter_groups)
|
if (groupid >= adreno_gpu->nr_counter_groups)
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-ENODEV);
|
||||||
|
|
||||||
return (struct adreno_counter_group *)
|
return (struct adreno_counter_group *)
|
||||||
adreno_gpu->counter_groups[groupid];
|
adreno_gpu->counter_groups[groupid];
|
||||||
|
@ -745,7 +722,7 @@ u64 adreno_read_counter(struct msm_gpu *gpu, u32 groupid, int counterid)
|
||||||
struct adreno_counter_group *group =
|
struct adreno_counter_group *group =
|
||||||
get_counter_group(gpu, groupid);
|
get_counter_group(gpu, groupid);
|
||||||
|
|
||||||
if (!IS_ERR(group) && group->funcs.read)
|
if (!IS_ERR_OR_NULL(group) && group->funcs.read)
|
||||||
return group->funcs.read(gpu, group, counterid);
|
return group->funcs.read(gpu, group, counterid);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -756,6 +733,6 @@ void adreno_put_counter(struct msm_gpu *gpu, u32 groupid, int counterid)
|
||||||
struct adreno_counter_group *group =
|
struct adreno_counter_group *group =
|
||||||
get_counter_group(gpu, groupid);
|
get_counter_group(gpu, groupid);
|
||||||
|
|
||||||
if (!IS_ERR(group) && group->funcs.put)
|
if (!IS_ERR_OR_NULL(group) && group->funcs.put)
|
||||||
group->funcs.put(gpu, group, counterid);
|
group->funcs.put(gpu, group, counterid);
|
||||||
}
|
}
|
||||||
|
|
|
@ -257,7 +257,7 @@ struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
|
||||||
|
|
||||||
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||||
struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
|
struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
|
||||||
int nr_rings);
|
struct msm_gpu_config *config);
|
||||||
void adreno_gpu_cleanup(struct adreno_gpu *gpu);
|
void adreno_gpu_cleanup(struct adreno_gpu *gpu);
|
||||||
|
|
||||||
void adreno_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
|
void adreno_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
|
||||||
|
|
|
@ -184,8 +184,7 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
|
||||||
mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
|
mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
|
||||||
|
|
||||||
if (aspace) {
|
if (aspace) {
|
||||||
aspace->mmu->funcs->detach(aspace->mmu,
|
aspace->mmu->funcs->detach(aspace->mmu);
|
||||||
iommu_ports, ARRAY_SIZE(iommu_ports));
|
|
||||||
msm_gem_address_space_destroy(aspace);
|
msm_gem_address_space_destroy(aspace);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -202,8 +201,7 @@ static void mdp4_destroy(struct msm_kms *kms)
|
||||||
drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
|
drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
|
||||||
|
|
||||||
if (aspace) {
|
if (aspace) {
|
||||||
aspace->mmu->funcs->detach(aspace->mmu,
|
aspace->mmu->funcs->detach(aspace->mmu);
|
||||||
iommu_ports, ARRAY_SIZE(iommu_ports));
|
|
||||||
msm_gem_address_space_put(aspace);
|
msm_gem_address_space_put(aspace);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -416,10 +414,6 @@ fail:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const char *iommu_ports[] = {
|
|
||||||
"mdp_port0_cb0", "mdp_port1_cb0",
|
|
||||||
};
|
|
||||||
|
|
||||||
struct msm_kms *mdp4_kms_init(struct drm_device *dev)
|
struct msm_kms *mdp4_kms_init(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct platform_device *pdev = dev->platformdev;
|
struct platform_device *pdev = dev->platformdev;
|
||||||
|
@ -515,15 +509,11 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
|
||||||
mdelay(16);
|
mdelay(16);
|
||||||
|
|
||||||
if (config->iommu) {
|
if (config->iommu) {
|
||||||
struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, config->iommu);
|
config->iommu->geometry.aperture_start = 0x1000;
|
||||||
|
config->iommu->geometry.aperture_end = 0xffffffff;
|
||||||
if (IS_ERR(mmu)) {
|
|
||||||
ret = PTR_ERR(mmu);
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
|
|
||||||
aspace = msm_gem_address_space_create(&pdev->dev,
|
aspace = msm_gem_address_space_create(&pdev->dev,
|
||||||
mmu, "mdp4", 0x1000, 0xffffffff);
|
config->iommu, MSM_IOMMU_DOMAIN_DEFAULT, "mdp4");
|
||||||
if (IS_ERR(aspace)) {
|
if (IS_ERR(aspace)) {
|
||||||
ret = PTR_ERR(aspace);
|
ret = PTR_ERR(aspace);
|
||||||
goto fail;
|
goto fail;
|
||||||
|
@ -531,8 +521,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
|
||||||
|
|
||||||
mdp4_kms->aspace = aspace;
|
mdp4_kms->aspace = aspace;
|
||||||
|
|
||||||
ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
|
ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
|
||||||
ARRAY_SIZE(iommu_ports));
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -22,10 +22,6 @@
|
||||||
#include "msm_mmu.h"
|
#include "msm_mmu.h"
|
||||||
#include "mdp5_kms.h"
|
#include "mdp5_kms.h"
|
||||||
|
|
||||||
static const char *iommu_ports[] = {
|
|
||||||
"mdp_0",
|
|
||||||
};
|
|
||||||
|
|
||||||
static int mdp5_hw_init(struct msm_kms *kms)
|
static int mdp5_hw_init(struct msm_kms *kms)
|
||||||
{
|
{
|
||||||
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
|
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
|
||||||
|
@ -613,8 +609,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
|
||||||
|
|
||||||
mdp5_kms->aspace = aspace;
|
mdp5_kms->aspace = aspace;
|
||||||
|
|
||||||
ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
|
ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
|
||||||
ARRAY_SIZE(iommu_ports));
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(&pdev->dev, "failed to attach iommu: %d\n",
|
dev_err(&pdev->dev, "failed to attach iommu: %d\n",
|
||||||
ret);
|
ret);
|
||||||
|
|
|
@ -413,7 +413,7 @@ void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
|
||||||
/* For GPU and legacy display */
|
/* For GPU and legacy display */
|
||||||
struct msm_gem_address_space *
|
struct msm_gem_address_space *
|
||||||
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
|
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
|
||||||
const char *name);
|
int type, const char *name);
|
||||||
struct msm_gem_address_space *
|
struct msm_gem_address_space *
|
||||||
msm_gem_address_space_create_instance(struct msm_mmu *parent, const char *name,
|
msm_gem_address_space_create_instance(struct msm_mmu *parent, const char *name,
|
||||||
uint64_t start, uint64_t end);
|
uint64_t start, uint64_t end);
|
||||||
|
|
|
@ -25,21 +25,12 @@
|
||||||
/* Additional internal-use only BO flags: */
|
/* Additional internal-use only BO flags: */
|
||||||
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
|
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
|
||||||
|
|
||||||
struct msm_gem_aspace_ops {
|
|
||||||
int (*map)(struct msm_gem_address_space *, struct msm_gem_vma *,
|
|
||||||
struct sg_table *sgt, void *priv, unsigned int flags);
|
|
||||||
|
|
||||||
void (*unmap)(struct msm_gem_address_space *, struct msm_gem_vma *,
|
|
||||||
struct sg_table *sgt, void *priv);
|
|
||||||
|
|
||||||
void (*destroy)(struct msm_gem_address_space *);
|
|
||||||
};
|
|
||||||
|
|
||||||
struct msm_gem_address_space {
|
struct msm_gem_address_space {
|
||||||
const char *name;
|
const char *name;
|
||||||
struct msm_mmu *mmu;
|
struct msm_mmu *mmu;
|
||||||
const struct msm_gem_aspace_ops *ops;
|
|
||||||
struct kref kref;
|
struct kref kref;
|
||||||
|
struct drm_mm mm;
|
||||||
|
u64 va_len;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct msm_gem_vma {
|
struct msm_gem_vma {
|
||||||
|
|
|
@ -82,13 +82,16 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
|
||||||
void __user *userptr =
|
void __user *userptr =
|
||||||
to_user_ptr(args->bos + (i * sizeof(submit_bo)));
|
to_user_ptr(args->bos + (i * sizeof(submit_bo)));
|
||||||
|
|
||||||
ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
|
if (copy_from_user_inatomic(&submit_bo, userptr,
|
||||||
if (unlikely(ret)) {
|
sizeof(submit_bo))) {
|
||||||
pagefault_enable();
|
pagefault_enable();
|
||||||
spin_unlock(&file->table_lock);
|
spin_unlock(&file->table_lock);
|
||||||
ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
|
if (copy_from_user(&submit_bo, userptr,
|
||||||
if (ret)
|
sizeof(submit_bo))) {
|
||||||
|
ret = -EFAULT;
|
||||||
goto out;
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock(&file->table_lock);
|
spin_lock(&file->table_lock);
|
||||||
pagefault_disable();
|
pagefault_disable();
|
||||||
}
|
}
|
||||||
|
@ -283,8 +286,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
|
||||||
uint32_t off;
|
uint32_t off;
|
||||||
bool valid;
|
bool valid;
|
||||||
|
|
||||||
ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
|
if (copy_from_user(&submit_reloc, userptr,
|
||||||
if (ret)
|
sizeof(submit_reloc)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
if (submit_reloc.submit_offset % 4) {
|
if (submit_reloc.submit_offset % 4) {
|
||||||
|
|
|
@ -25,8 +25,10 @@ msm_gem_address_space_destroy(struct kref *kref)
|
||||||
struct msm_gem_address_space *aspace = container_of(kref,
|
struct msm_gem_address_space *aspace = container_of(kref,
|
||||||
struct msm_gem_address_space, kref);
|
struct msm_gem_address_space, kref);
|
||||||
|
|
||||||
if (aspace->ops->destroy)
|
if (aspace->va_len)
|
||||||
aspace->ops->destroy(aspace);
|
drm_mm_takedown(&aspace->mm);
|
||||||
|
|
||||||
|
aspace->mmu->funcs->destroy(aspace->mmu);
|
||||||
|
|
||||||
kfree(aspace);
|
kfree(aspace);
|
||||||
}
|
}
|
||||||
|
@ -37,57 +39,9 @@ void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
|
||||||
kref_put(&aspace->kref, msm_gem_address_space_destroy);
|
kref_put(&aspace->kref, msm_gem_address_space_destroy);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* SDE address space operations */
|
static struct msm_gem_address_space *
|
||||||
static void smmu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
|
msm_gem_address_space_new(struct msm_mmu *mmu, const char *name,
|
||||||
struct msm_gem_vma *vma, struct sg_table *sgt,
|
uint64_t start, uint64_t end)
|
||||||
void *priv)
|
|
||||||
{
|
|
||||||
struct dma_buf *buf = priv;
|
|
||||||
|
|
||||||
if (buf)
|
|
||||||
aspace->mmu->funcs->unmap_dma_buf(aspace->mmu,
|
|
||||||
sgt, buf, DMA_BIDIRECTIONAL);
|
|
||||||
else
|
|
||||||
aspace->mmu->funcs->unmap_sg(aspace->mmu, sgt,
|
|
||||||
DMA_BIDIRECTIONAL);
|
|
||||||
|
|
||||||
vma->iova = 0;
|
|
||||||
|
|
||||||
msm_gem_address_space_put(aspace);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static int smmu_aspace_map_vma(struct msm_gem_address_space *aspace,
|
|
||||||
struct msm_gem_vma *vma, struct sg_table *sgt,
|
|
||||||
void *priv, unsigned int flags)
|
|
||||||
{
|
|
||||||
struct dma_buf *buf = priv;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (buf)
|
|
||||||
ret = aspace->mmu->funcs->map_dma_buf(aspace->mmu, sgt, buf,
|
|
||||||
DMA_BIDIRECTIONAL);
|
|
||||||
else
|
|
||||||
ret = aspace->mmu->funcs->map_sg(aspace->mmu, sgt,
|
|
||||||
DMA_BIDIRECTIONAL);
|
|
||||||
|
|
||||||
if (!ret)
|
|
||||||
vma->iova = sg_dma_address(sgt->sgl);
|
|
||||||
|
|
||||||
/* Get a reference to the aspace to keep it around */
|
|
||||||
kref_get(&aspace->kref);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct msm_gem_aspace_ops smmu_aspace_ops = {
|
|
||||||
.map = smmu_aspace_map_vma,
|
|
||||||
.unmap = smmu_aspace_unmap_vma,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct msm_gem_address_space *
|
|
||||||
msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
|
|
||||||
const char *name)
|
|
||||||
{
|
{
|
||||||
struct msm_gem_address_space *aspace;
|
struct msm_gem_address_space *aspace;
|
||||||
|
|
||||||
|
@ -100,57 +54,28 @@ msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
|
||||||
|
|
||||||
aspace->name = name;
|
aspace->name = name;
|
||||||
aspace->mmu = mmu;
|
aspace->mmu = mmu;
|
||||||
aspace->ops = &smmu_aspace_ops;
|
|
||||||
|
aspace->va_len = end - start;
|
||||||
|
|
||||||
|
if (aspace->va_len)
|
||||||
|
drm_mm_init(&aspace->mm, (start >> PAGE_SHIFT),
|
||||||
|
(end >> PAGE_SHIFT) - 1);
|
||||||
|
|
||||||
kref_init(&aspace->kref);
|
kref_init(&aspace->kref);
|
||||||
|
|
||||||
return aspace;
|
return aspace;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* GPU address space operations */
|
static int allocate_iova(struct msm_gem_address_space *aspace,
|
||||||
struct msm_iommu_aspace {
|
struct msm_gem_vma *vma, struct sg_table *sgt,
|
||||||
struct msm_gem_address_space base;
|
u64 *iova)
|
||||||
struct drm_mm mm;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define to_iommu_aspace(aspace) \
|
|
||||||
((struct msm_iommu_aspace *) \
|
|
||||||
container_of(aspace, struct msm_iommu_aspace, base))
|
|
||||||
|
|
||||||
static void iommu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
|
|
||||||
struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
|
|
||||||
{
|
{
|
||||||
if (!vma->iova)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (aspace->mmu)
|
|
||||||
aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt);
|
|
||||||
|
|
||||||
drm_mm_remove_node(&vma->node);
|
|
||||||
|
|
||||||
vma->iova = 0;
|
|
||||||
|
|
||||||
msm_gem_address_space_put(aspace);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace,
|
|
||||||
struct msm_gem_vma *vma, struct sg_table *sgt, void *priv,
|
|
||||||
unsigned int flags)
|
|
||||||
{
|
|
||||||
struct msm_iommu_aspace *local = to_iommu_aspace(aspace);
|
|
||||||
size_t size = 0;
|
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
|
size_t size = 0;
|
||||||
int ret, i;
|
int ret, i;
|
||||||
int iommu_flags = IOMMU_READ;
|
|
||||||
|
|
||||||
if (!(flags & MSM_BO_GPU_READONLY))
|
if (!aspace->va_len)
|
||||||
iommu_flags |= IOMMU_WRITE;
|
return 0;
|
||||||
|
|
||||||
if (flags & MSM_BO_PRIVILEGED)
|
|
||||||
iommu_flags |= IOMMU_PRIV;
|
|
||||||
|
|
||||||
if ((flags & MSM_BO_CACHED) && msm_iommu_coherent(aspace->mmu))
|
|
||||||
iommu_flags |= IOMMU_CACHE;
|
|
||||||
|
|
||||||
if (WARN_ON(drm_mm_node_allocated(&vma->node)))
|
if (WARN_ON(drm_mm_node_allocated(&vma->node)))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -158,84 +83,73 @@ static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace,
|
||||||
for_each_sg(sgt->sgl, sg, sgt->nents, i)
|
for_each_sg(sgt->sgl, sg, sgt->nents, i)
|
||||||
size += sg->length + sg->offset;
|
size += sg->length + sg->offset;
|
||||||
|
|
||||||
ret = drm_mm_insert_node(&local->mm, &vma->node, size >> PAGE_SHIFT,
|
ret = drm_mm_insert_node(&aspace->mm, &vma->node, size >> PAGE_SHIFT,
|
||||||
0, DRM_MM_SEARCH_DEFAULT);
|
0, DRM_MM_SEARCH_DEFAULT);
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
vma->iova = vma->node.start << PAGE_SHIFT;
|
if (!ret && iova)
|
||||||
|
*iova = vma->node.start << PAGE_SHIFT;
|
||||||
if (aspace->mmu)
|
|
||||||
ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
|
|
||||||
iommu_flags);
|
|
||||||
|
|
||||||
/* Get a reference to the aspace to keep it around */
|
|
||||||
kref_get(&aspace->kref);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iommu_aspace_destroy(struct msm_gem_address_space *aspace)
|
|
||||||
{
|
|
||||||
struct msm_iommu_aspace *local = to_iommu_aspace(aspace);
|
|
||||||
|
|
||||||
drm_mm_takedown(&local->mm);
|
|
||||||
aspace->mmu->funcs->destroy(aspace->mmu);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct msm_gem_aspace_ops msm_iommu_aspace_ops = {
|
|
||||||
.map = iommu_aspace_map_vma,
|
|
||||||
.unmap = iommu_aspace_unmap_vma,
|
|
||||||
.destroy = iommu_aspace_destroy,
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct msm_gem_address_space *
|
|
||||||
msm_gem_address_space_new(struct msm_mmu *mmu, const char *name,
|
|
||||||
uint64_t start, uint64_t end)
|
|
||||||
{
|
|
||||||
struct msm_iommu_aspace *local;
|
|
||||||
|
|
||||||
if (!mmu)
|
|
||||||
return ERR_PTR(-EINVAL);
|
|
||||||
|
|
||||||
local = kzalloc(sizeof(*local), GFP_KERNEL);
|
|
||||||
if (!local)
|
|
||||||
return ERR_PTR(-ENOMEM);
|
|
||||||
|
|
||||||
drm_mm_init(&local->mm, (start >> PAGE_SHIFT),
|
|
||||||
(end >> PAGE_SHIFT) - 1);
|
|
||||||
|
|
||||||
local->base.name = name;
|
|
||||||
local->base.mmu = mmu;
|
|
||||||
local->base.ops = &msm_iommu_aspace_ops;
|
|
||||||
|
|
||||||
kref_init(&local->base.kref);
|
|
||||||
|
|
||||||
return &local->base;
|
|
||||||
}
|
|
||||||
|
|
||||||
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
|
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
|
||||||
struct msm_gem_vma *vma, struct sg_table *sgt,
|
struct msm_gem_vma *vma, struct sg_table *sgt,
|
||||||
void *priv, unsigned int flags)
|
void *priv, unsigned int flags)
|
||||||
{
|
{
|
||||||
if (aspace && aspace->ops->map)
|
u64 iova = 0;
|
||||||
return aspace->ops->map(aspace, vma, sgt, priv, flags);
|
int ret;
|
||||||
|
|
||||||
|
if (!aspace)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
ret = allocate_iova(aspace, vma, sgt, &iova);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = aspace->mmu->funcs->map(aspace->mmu, iova, sgt,
|
||||||
|
flags, priv);
|
||||||
|
|
||||||
|
if (ret) {
|
||||||
|
if (drm_mm_node_allocated(&vma->node))
|
||||||
|
drm_mm_remove_node(&vma->node);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
vma->iova = sg_dma_address(sgt->sgl);
|
||||||
|
kref_get(&aspace->kref);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
|
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
|
||||||
struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
|
struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
|
||||||
{
|
{
|
||||||
if (aspace && aspace->ops->unmap)
|
if (!aspace || !vma->iova)
|
||||||
aspace->ops->unmap(aspace, vma, sgt, priv);
|
return;
|
||||||
|
|
||||||
|
aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, priv);
|
||||||
|
|
||||||
|
if (drm_mm_node_allocated(&vma->node))
|
||||||
|
drm_mm_remove_node(&vma->node);
|
||||||
|
|
||||||
|
vma->iova = 0;
|
||||||
|
|
||||||
|
msm_gem_address_space_put(aspace);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct msm_gem_address_space *
|
||||||
|
msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
|
||||||
|
const char *name)
|
||||||
|
{
|
||||||
|
return msm_gem_address_space_new(mmu, name, 0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct msm_gem_address_space *
|
struct msm_gem_address_space *
|
||||||
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
|
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
|
||||||
const char *name)
|
int type, const char *name)
|
||||||
{
|
{
|
||||||
struct msm_mmu *mmu = msm_iommu_new(dev, domain);
|
struct msm_mmu *mmu = msm_iommu_new(dev, type, domain);
|
||||||
|
|
||||||
if (IS_ERR(mmu))
|
if (IS_ERR(mmu))
|
||||||
return (struct msm_gem_address_space *) mmu;
|
return (struct msm_gem_address_space *) mmu;
|
||||||
|
|
|
@ -183,6 +183,9 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
if (gpu->aspace && gpu->aspace->mmu)
|
||||||
|
msm_mmu_enable(gpu->aspace->mmu);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -203,6 +206,9 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
|
||||||
if (WARN_ON(gpu->active_cnt < 0))
|
if (WARN_ON(gpu->active_cnt < 0))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (gpu->aspace && gpu->aspace->mmu)
|
||||||
|
msm_mmu_disable(gpu->aspace->mmu);
|
||||||
|
|
||||||
ret = disable_axi(gpu);
|
ret = disable_axi(gpu);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -837,7 +843,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||||
|
|
||||||
dev_info(drm->dev, "%s: using IOMMU\n", name);
|
dev_info(drm->dev, "%s: using IOMMU\n", name);
|
||||||
gpu->aspace = msm_gem_address_space_create(&pdev->dev,
|
gpu->aspace = msm_gem_address_space_create(&pdev->dev,
|
||||||
iommu, "gpu");
|
iommu, MSM_IOMMU_DOMAIN_USER, "gpu");
|
||||||
if (IS_ERR(gpu->aspace)) {
|
if (IS_ERR(gpu->aspace)) {
|
||||||
ret = PTR_ERR(gpu->aspace);
|
ret = PTR_ERR(gpu->aspace);
|
||||||
dev_err(drm->dev, "failed to init iommu: %d\n", ret);
|
dev_err(drm->dev, "failed to init iommu: %d\n", ret);
|
||||||
|
|
|
@ -29,6 +29,9 @@
|
||||||
struct msm_gem_submit;
|
struct msm_gem_submit;
|
||||||
struct msm_gpu_perfcntr;
|
struct msm_gpu_perfcntr;
|
||||||
|
|
||||||
|
#define MSM_GPU_DEFAULT_IONAME "kgsl_3d0_reg_memory"
|
||||||
|
#define MSM_GPU_DEFAULT_IRQNAME "kgsl_3d0_irq"
|
||||||
|
|
||||||
struct msm_gpu_config {
|
struct msm_gpu_config {
|
||||||
const char *ioname;
|
const char *ioname;
|
||||||
const char *irqname;
|
const char *irqname;
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
|
|
||||||
#include <linux/of_platform.h>
|
#include <linux/of_platform.h>
|
||||||
#include <linux/of_address.h>
|
#include <linux/of_address.h>
|
||||||
|
#include <soc/qcom/secure_buffer.h>
|
||||||
#include "msm_drv.h"
|
#include "msm_drv.h"
|
||||||
#include "msm_iommu.h"
|
#include "msm_iommu.h"
|
||||||
|
|
||||||
|
@ -27,31 +28,17 @@ static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static void iommu_get_clocks(struct msm_iommu *iommu, struct device *dev)
|
||||||
* Get and enable the IOMMU clocks so that we can make
|
|
||||||
* sure they stay on the entire duration so that we can
|
|
||||||
* safely change the pagetable from the GPU
|
|
||||||
*/
|
|
||||||
static void _get_iommu_clocks(struct msm_mmu *mmu, struct platform_device *pdev)
|
|
||||||
{
|
{
|
||||||
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
|
||||||
struct device *dev;
|
|
||||||
struct property *prop;
|
struct property *prop;
|
||||||
const char *name;
|
const char *name;
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
|
||||||
if (WARN_ON(!pdev))
|
|
||||||
return;
|
|
||||||
|
|
||||||
dev = &pdev->dev;
|
|
||||||
|
|
||||||
iommu->nr_clocks =
|
iommu->nr_clocks =
|
||||||
of_property_count_strings(dev->of_node, "clock-names");
|
of_property_count_strings(dev->of_node, "clock-names");
|
||||||
|
|
||||||
if (iommu->nr_clocks < 0) {
|
if (iommu->nr_clocks < 0)
|
||||||
iommu->nr_clocks = 0;
|
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
|
|
||||||
if (WARN_ON(iommu->nr_clocks > ARRAY_SIZE(iommu->clocks)))
|
if (WARN_ON(iommu->nr_clocks > ARRAY_SIZE(iommu->clocks)))
|
||||||
iommu->nr_clocks = ARRAY_SIZE(iommu->clocks);
|
iommu->nr_clocks = ARRAY_SIZE(iommu->clocks);
|
||||||
|
@ -60,78 +47,58 @@ static void _get_iommu_clocks(struct msm_mmu *mmu, struct platform_device *pdev)
|
||||||
if (i == iommu->nr_clocks)
|
if (i == iommu->nr_clocks)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
iommu->clocks[i] = clk_get(dev, name);
|
iommu->clocks[i++] = clk_get(dev, name);
|
||||||
if (iommu->clocks[i])
|
|
||||||
clk_prepare_enable(iommu->clocks[i]);
|
|
||||||
|
|
||||||
i++;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int _attach_iommu_device(struct msm_mmu *mmu,
|
|
||||||
struct iommu_domain *domain, const char **names, int cnt)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
/* See if there is a iommus member in the current device. If not, look
|
static void msm_iommu_clocks_enable(struct msm_mmu *mmu)
|
||||||
* for the names and see if there is one in there.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (of_find_property(mmu->dev->of_node, "iommus", NULL))
|
|
||||||
return iommu_attach_device(domain, mmu->dev);
|
|
||||||
|
|
||||||
/* Look through the list of names for a target */
|
|
||||||
for (i = 0; i < cnt; i++) {
|
|
||||||
struct device_node *node =
|
|
||||||
of_find_node_by_name(mmu->dev->of_node, names[i]);
|
|
||||||
|
|
||||||
if (!node)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (of_find_property(node, "iommus", NULL)) {
|
|
||||||
struct platform_device *pdev;
|
|
||||||
|
|
||||||
/* Get the platform device for the node */
|
|
||||||
of_platform_populate(node->parent, NULL, NULL,
|
|
||||||
mmu->dev);
|
|
||||||
|
|
||||||
pdev = of_find_device_by_node(node);
|
|
||||||
|
|
||||||
if (!pdev)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
_get_iommu_clocks(mmu,
|
|
||||||
of_find_device_by_node(node->parent));
|
|
||||||
|
|
||||||
mmu->dev = &pdev->dev;
|
|
||||||
|
|
||||||
return iommu_attach_device(domain, mmu->dev);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dev_err(mmu->dev, "Couldn't find a IOMMU device\n");
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
|
|
||||||
{
|
{
|
||||||
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
||||||
int val = 1, ret;
|
int i;
|
||||||
|
|
||||||
|
if (!iommu->nr_clocks)
|
||||||
|
iommu_get_clocks(iommu, mmu->dev->parent);
|
||||||
|
|
||||||
|
for (i = 0; i < iommu->nr_clocks; i++) {
|
||||||
|
if (iommu->clocks[i])
|
||||||
|
clk_prepare_enable(iommu->clocks[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void msm_iommu_clocks_disable(struct msm_mmu *mmu)
|
||||||
|
{
|
||||||
|
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < iommu->nr_clocks; i++) {
|
||||||
|
if (iommu->clocks[i])
|
||||||
|
clk_disable_unprepare(iommu->clocks[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int msm_iommu_attach(struct msm_mmu *mmu, const char **names,
|
||||||
|
int cnt)
|
||||||
|
{
|
||||||
|
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
||||||
|
|
||||||
|
return iommu_attach_device(iommu->domain, mmu->dev);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int msm_iommu_attach_user(struct msm_mmu *mmu, const char **names,
|
||||||
|
int cnt)
|
||||||
|
{
|
||||||
|
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
||||||
|
int ret, val = 1;
|
||||||
|
|
||||||
/* Hope springs eternal */
|
/* Hope springs eternal */
|
||||||
iommu->allow_dynamic = true;
|
iommu->allow_dynamic = !iommu_domain_set_attr(iommu->domain,
|
||||||
|
DOMAIN_ATTR_ENABLE_TTBR1, &val) ? true : false;
|
||||||
/* per-instance pagetables need TTBR1 support in the IOMMU driver */
|
|
||||||
ret = iommu_domain_set_attr(iommu->domain,
|
|
||||||
DOMAIN_ATTR_ENABLE_TTBR1, &val);
|
|
||||||
if (ret)
|
|
||||||
iommu->allow_dynamic = false;
|
|
||||||
|
|
||||||
/* Mark the GPU as I/O coherent if it is supported */
|
/* Mark the GPU as I/O coherent if it is supported */
|
||||||
iommu->is_coherent = of_dma_is_coherent(mmu->dev->of_node);
|
iommu->is_coherent = of_dma_is_coherent(mmu->dev->of_node);
|
||||||
|
|
||||||
/* Attach the device to the domain */
|
ret = iommu_attach_device(iommu->domain, mmu->dev);
|
||||||
ret = _attach_iommu_device(mmu, iommu->domain, names, cnt);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -176,17 +143,25 @@ static int msm_iommu_attach_dynamic(struct msm_mmu *mmu, const char **names,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int msm_iommu_attach_secure(struct msm_mmu *mmu, const char **names,
|
||||||
|
int cnt)
|
||||||
|
{
|
||||||
|
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
||||||
|
int ret, vmid = VMID_CP_PIXEL;
|
||||||
|
|
||||||
|
ret = iommu_domain_set_attr(iommu->domain, DOMAIN_ATTR_SECURE_VMID,
|
||||||
|
&vmid);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
return iommu_attach_device(iommu->domain, mmu->dev);
|
||||||
|
}
|
||||||
|
|
||||||
static void msm_iommu_detach(struct msm_mmu *mmu)
|
static void msm_iommu_detach(struct msm_mmu *mmu)
|
||||||
{
|
{
|
||||||
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
||||||
int i;
|
|
||||||
|
|
||||||
iommu_detach_device(iommu->domain, mmu->dev);
|
iommu_detach_device(iommu->domain, mmu->dev);
|
||||||
|
|
||||||
for (i = 0; i < iommu->nr_clocks; i++) {
|
|
||||||
if (iommu->clocks[i])
|
|
||||||
clk_disable(iommu->clocks[i]);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void msm_iommu_detach_dynamic(struct msm_mmu *mmu)
|
static void msm_iommu_detach_dynamic(struct msm_mmu *mmu)
|
||||||
|
@ -196,69 +171,50 @@ static void msm_iommu_detach_dynamic(struct msm_mmu *mmu)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
|
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
|
||||||
struct sg_table *sgt, int prot)
|
struct sg_table *sgt, u32 flags, void *priv)
|
||||||
{
|
{
|
||||||
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
||||||
struct iommu_domain *domain = iommu->domain;
|
struct iommu_domain *domain = iommu->domain;
|
||||||
struct scatterlist *sg;
|
|
||||||
uint64_t da = iova;
|
|
||||||
unsigned int i, j;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
u32 prot = IOMMU_READ;
|
||||||
|
|
||||||
if (!domain || !sgt)
|
if (!domain || !sgt)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
if (!(flags & MSM_BO_GPU_READONLY))
|
||||||
phys_addr_t pa = sg_phys(sg) - sg->offset;
|
prot |= IOMMU_WRITE;
|
||||||
size_t bytes = sg->length + sg->offset;
|
|
||||||
|
|
||||||
VERB("map[%d]: %016llx %pa(%zx)", i, iova, &pa, bytes);
|
if (flags & MSM_BO_PRIVILEGED)
|
||||||
|
prot |= IOMMU_PRIV;
|
||||||
|
|
||||||
ret = iommu_map(domain, da, pa, bytes, prot);
|
if ((flags & MSM_BO_CACHED) && msm_iommu_coherent(mmu))
|
||||||
|
prot |= IOMMU_CACHE;
|
||||||
|
|
||||||
|
/* iommu_map_sg returns the number of bytes mapped */
|
||||||
|
ret = iommu_map_sg(domain, iova, sgt->sgl, sgt->nents, prot);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
sgt->sgl->dma_address = iova;
|
||||||
|
|
||||||
da += bytes;
|
return ret ? 0 : -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
static void msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
|
||||||
|
struct sg_table *sgt, void *priv)
|
||||||
fail:
|
|
||||||
da = iova;
|
|
||||||
|
|
||||||
for_each_sg(sgt->sgl, sg, i, j) {
|
|
||||||
size_t bytes = sg->length + sg->offset;
|
|
||||||
iommu_unmap(domain, da, bytes);
|
|
||||||
da += bytes;
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
|
|
||||||
struct sg_table *sgt)
|
|
||||||
{
|
{
|
||||||
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
||||||
struct iommu_domain *domain = iommu->domain;
|
struct iommu_domain *domain = iommu->domain;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
uint64_t da = iova;
|
size_t len = 0;
|
||||||
int i;
|
int ret, i;
|
||||||
|
|
||||||
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
for_each_sg(sgt->sgl, sg, sgt->nents, i)
|
||||||
size_t bytes = sg->length + sg->offset;
|
len += sg->length;
|
||||||
size_t unmapped;
|
|
||||||
|
|
||||||
unmapped = iommu_unmap(domain, da, bytes);
|
ret = iommu_unmap(domain, iova, len);
|
||||||
if (unmapped < bytes)
|
if (ret != len)
|
||||||
return unmapped;
|
dev_warn(mmu->dev, "could not unmap iova %llx\n", iova);
|
||||||
|
|
||||||
VERB("unmap[%d]: %016llx(%zx)", i, iova, bytes);
|
sgt->sgl->dma_address = 0;
|
||||||
|
|
||||||
BUG_ON(!PAGE_ALIGNED(bytes));
|
|
||||||
|
|
||||||
da += bytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void msm_iommu_destroy(struct msm_mmu *mmu)
|
static void msm_iommu_destroy(struct msm_mmu *mmu)
|
||||||
|
@ -268,7 +224,30 @@ static void msm_iommu_destroy(struct msm_mmu *mmu)
|
||||||
kfree(iommu);
|
kfree(iommu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct msm_mmu_funcs funcs = {
|
static struct device *find_context_bank(const char *name)
|
||||||
|
{
|
||||||
|
struct device_node *node = of_find_node_by_name(NULL, name);
|
||||||
|
struct platform_device *pdev, *parent;
|
||||||
|
|
||||||
|
if (!node)
|
||||||
|
return ERR_PTR(-ENODEV);
|
||||||
|
|
||||||
|
if (!of_find_property(node, "iommus", NULL))
|
||||||
|
return ERR_PTR(-ENODEV);
|
||||||
|
|
||||||
|
/* Get the parent device */
|
||||||
|
parent = of_find_device_by_node(node->parent);
|
||||||
|
|
||||||
|
/* Populate the sub nodes */
|
||||||
|
of_platform_populate(parent->dev.of_node, NULL, NULL, &parent->dev);
|
||||||
|
|
||||||
|
/* Get the context bank device */
|
||||||
|
pdev = of_find_device_by_node(node);
|
||||||
|
|
||||||
|
return pdev ? &pdev->dev : ERR_PTR(-ENODEV);
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct msm_mmu_funcs default_funcs = {
|
||||||
.attach = msm_iommu_attach,
|
.attach = msm_iommu_attach,
|
||||||
.detach = msm_iommu_detach,
|
.detach = msm_iommu_detach,
|
||||||
.map = msm_iommu_map,
|
.map = msm_iommu_map,
|
||||||
|
@ -276,6 +255,24 @@ static const struct msm_mmu_funcs funcs = {
|
||||||
.destroy = msm_iommu_destroy,
|
.destroy = msm_iommu_destroy,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct msm_mmu_funcs user_funcs = {
|
||||||
|
.attach = msm_iommu_attach_user,
|
||||||
|
.detach = msm_iommu_detach,
|
||||||
|
.map = msm_iommu_map,
|
||||||
|
.unmap = msm_iommu_unmap,
|
||||||
|
.destroy = msm_iommu_destroy,
|
||||||
|
.enable = msm_iommu_clocks_enable,
|
||||||
|
.disable = msm_iommu_clocks_disable,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct msm_mmu_funcs secure_funcs = {
|
||||||
|
.attach = msm_iommu_attach_secure,
|
||||||
|
.detach = msm_iommu_detach,
|
||||||
|
.map = msm_iommu_map,
|
||||||
|
.unmap = msm_iommu_unmap,
|
||||||
|
.destroy = msm_iommu_destroy,
|
||||||
|
};
|
||||||
|
|
||||||
static const struct msm_mmu_funcs dynamic_funcs = {
|
static const struct msm_mmu_funcs dynamic_funcs = {
|
||||||
.attach = msm_iommu_attach_dynamic,
|
.attach = msm_iommu_attach_dynamic,
|
||||||
.detach = msm_iommu_detach_dynamic,
|
.detach = msm_iommu_detach_dynamic,
|
||||||
|
@ -284,8 +281,26 @@ static const struct msm_mmu_funcs dynamic_funcs = {
|
||||||
.destroy = msm_iommu_destroy,
|
.destroy = msm_iommu_destroy,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct msm_mmu *_msm_iommu_new(struct device *dev, struct iommu_domain *domain,
|
static const struct {
|
||||||
const struct msm_mmu_funcs *funcs)
|
const char *cbname;
|
||||||
|
const struct msm_mmu_funcs *funcs;
|
||||||
|
} msm_iommu_domains[] = {
|
||||||
|
[MSM_IOMMU_DOMAIN_DEFAULT] = {
|
||||||
|
.cbname = NULL,
|
||||||
|
.funcs = &default_funcs,
|
||||||
|
},
|
||||||
|
[MSM_IOMMU_DOMAIN_USER] = {
|
||||||
|
.cbname = "gfx3d_user",
|
||||||
|
.funcs = &user_funcs,
|
||||||
|
},
|
||||||
|
[MSM_IOMMU_DOMAIN_SECURE] = {
|
||||||
|
.cbname = "gfx3d_secure",
|
||||||
|
.funcs = &secure_funcs
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct msm_mmu *iommu_create(struct device *dev,
|
||||||
|
struct iommu_domain *domain, const struct msm_mmu_funcs *funcs)
|
||||||
{
|
{
|
||||||
struct msm_iommu *iommu;
|
struct msm_iommu *iommu;
|
||||||
|
|
||||||
|
@ -299,9 +314,23 @@ struct msm_mmu *_msm_iommu_new(struct device *dev, struct iommu_domain *domain,
|
||||||
|
|
||||||
return &iommu->base;
|
return &iommu->base;
|
||||||
}
|
}
|
||||||
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
|
|
||||||
|
struct msm_mmu *msm_iommu_new(struct device *parent,
|
||||||
|
enum msm_iommu_domain_type type, struct iommu_domain *domain)
|
||||||
{
|
{
|
||||||
return _msm_iommu_new(dev, domain, &funcs);
|
struct device *dev = parent;
|
||||||
|
|
||||||
|
if (type >= ARRAY_SIZE(msm_iommu_domains) ||
|
||||||
|
!msm_iommu_domains[type].funcs)
|
||||||
|
return ERR_PTR(-ENODEV);
|
||||||
|
|
||||||
|
if (msm_iommu_domains[type].cbname) {
|
||||||
|
dev = find_context_bank(msm_iommu_domains[type].cbname);
|
||||||
|
if (IS_ERR(dev))
|
||||||
|
return ERR_CAST(dev);
|
||||||
|
}
|
||||||
|
|
||||||
|
return iommu_create(dev, domain, msm_iommu_domains[type].funcs);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -326,7 +355,7 @@ struct msm_mmu *msm_iommu_new_dynamic(struct msm_mmu *base)
|
||||||
if (!domain)
|
if (!domain)
|
||||||
return ERR_PTR(-ENODEV);
|
return ERR_PTR(-ENODEV);
|
||||||
|
|
||||||
mmu = _msm_iommu_new(base->dev, domain, &dynamic_funcs);
|
mmu = iommu_create(base->dev, domain, &dynamic_funcs);
|
||||||
|
|
||||||
if (IS_ERR(mmu)) {
|
if (IS_ERR(mmu)) {
|
||||||
if (domain)
|
if (domain)
|
||||||
|
|
|
@ -30,21 +30,22 @@ enum msm_mmu_domain_type {
|
||||||
MSM_SMMU_DOMAIN_MAX,
|
MSM_SMMU_DOMAIN_MAX,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum msm_iommu_domain_type {
|
||||||
|
MSM_IOMMU_DOMAIN_DEFAULT,
|
||||||
|
MSM_IOMMU_DOMAIN_USER,
|
||||||
|
MSM_IOMMU_DOMAIN_SECURE,
|
||||||
|
};
|
||||||
|
|
||||||
struct msm_mmu_funcs {
|
struct msm_mmu_funcs {
|
||||||
int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
|
int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
|
||||||
void (*detach)(struct msm_mmu *mmu);
|
void (*detach)(struct msm_mmu *mmu);
|
||||||
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
|
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
|
||||||
int prot);
|
u32 flags, void *priv);
|
||||||
int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt);
|
void (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
|
||||||
int (*map_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
|
void *priv);
|
||||||
enum dma_data_direction dir);
|
|
||||||
void (*unmap_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
|
|
||||||
enum dma_data_direction dir);
|
|
||||||
int (*map_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
|
|
||||||
struct dma_buf *dma_buf, int dir);
|
|
||||||
void (*unmap_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
|
|
||||||
struct dma_buf *dma_buf, int dir);
|
|
||||||
void (*destroy)(struct msm_mmu *mmu);
|
void (*destroy)(struct msm_mmu *mmu);
|
||||||
|
void (*enable)(struct msm_mmu *mmu);
|
||||||
|
void (*disable)(struct msm_mmu *mmu);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct msm_mmu {
|
struct msm_mmu {
|
||||||
|
@ -59,9 +60,27 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
|
||||||
mmu->funcs = funcs;
|
mmu->funcs = funcs;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
|
/* Create a new SDE mmu device */
|
||||||
struct msm_mmu *msm_smmu_new(struct device *dev,
|
struct msm_mmu *msm_smmu_new(struct device *dev,
|
||||||
enum msm_mmu_domain_type domain);
|
enum msm_mmu_domain_type domain);
|
||||||
|
|
||||||
|
/* Create a new legacy MDP4 or GPU mmu device */
|
||||||
|
struct msm_mmu *msm_iommu_new(struct device *parent,
|
||||||
|
enum msm_iommu_domain_type type, struct iommu_domain *domain);
|
||||||
|
|
||||||
|
/* Create a new dynamic domain for GPU */
|
||||||
struct msm_mmu *msm_iommu_new_dynamic(struct msm_mmu *orig);
|
struct msm_mmu *msm_iommu_new_dynamic(struct msm_mmu *orig);
|
||||||
|
|
||||||
|
static inline void msm_mmu_enable(struct msm_mmu *mmu)
|
||||||
|
{
|
||||||
|
if (mmu->funcs->enable)
|
||||||
|
mmu->funcs->enable(mmu);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void msm_mmu_disable(struct msm_mmu *mmu)
|
||||||
|
{
|
||||||
|
if (mmu->funcs->disable)
|
||||||
|
mmu->funcs->disable(mmu);
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* __MSM_MMU_H__ */
|
#endif /* __MSM_MMU_H__ */
|
||||||
|
|
|
@ -105,106 +105,34 @@ static void msm_smmu_detach(struct msm_mmu *mmu)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int msm_smmu_map(struct msm_mmu *mmu, uint64_t iova,
|
static int msm_smmu_map(struct msm_mmu *mmu, uint64_t iova,
|
||||||
struct sg_table *sgt, int prot)
|
struct sg_table *sgt, u32 flags, void *priv)
|
||||||
{
|
|
||||||
struct msm_smmu *smmu = to_msm_smmu(mmu);
|
|
||||||
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
|
|
||||||
struct iommu_domain *domain;
|
|
||||||
struct scatterlist *sg;
|
|
||||||
uint64_t da = iova;
|
|
||||||
unsigned int i, j;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!client)
|
|
||||||
return -ENODEV;
|
|
||||||
|
|
||||||
domain = client->mmu_mapping->domain;
|
|
||||||
if (!domain || !sgt)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
|
||||||
u32 pa = sg_phys(sg) - sg->offset;
|
|
||||||
size_t bytes = sg->length + sg->offset;
|
|
||||||
|
|
||||||
VERB("map[%d]: %16llx %08x(%zx)", i, iova, pa, bytes);
|
|
||||||
|
|
||||||
ret = iommu_map(domain, da, pa, bytes, prot);
|
|
||||||
if (ret)
|
|
||||||
goto fail;
|
|
||||||
|
|
||||||
da += bytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
fail:
|
|
||||||
da = iova;
|
|
||||||
|
|
||||||
for_each_sg(sgt->sgl, sg, i, j) {
|
|
||||||
size_t bytes = sg->length + sg->offset;
|
|
||||||
|
|
||||||
iommu_unmap(domain, da, bytes);
|
|
||||||
da += bytes;
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int msm_smmu_map_sg(struct msm_mmu *mmu, struct sg_table *sgt,
|
|
||||||
enum dma_data_direction dir)
|
|
||||||
{
|
{
|
||||||
struct msm_smmu *smmu = to_msm_smmu(mmu);
|
struct msm_smmu *smmu = to_msm_smmu(mmu);
|
||||||
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
|
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = dma_map_sg(client->dev, sgt->sgl, sgt->nents, dir);
|
if (priv)
|
||||||
if (ret != sgt->nents)
|
ret = msm_dma_map_sg_lazy(client->dev, sgt->sgl, sgt->nents,
|
||||||
return -ENOMEM;
|
DMA_BIDIRECTIONAL, priv);
|
||||||
|
else
|
||||||
|
ret = dma_map_sg(client->dev, sgt->sgl, sgt->nents,
|
||||||
|
DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
return 0;
|
return (ret != sgt->nents) ? -ENOMEM : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void msm_smmu_unmap_sg(struct msm_mmu *mmu, struct sg_table *sgt,
|
static void msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova,
|
||||||
enum dma_data_direction dir)
|
struct sg_table *sgt, void *priv)
|
||||||
{
|
{
|
||||||
struct msm_smmu *smmu = to_msm_smmu(mmu);
|
struct msm_smmu *smmu = to_msm_smmu(mmu);
|
||||||
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
|
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
|
||||||
|
|
||||||
dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir);
|
if (priv)
|
||||||
}
|
msm_dma_unmap_sg(client->dev, sgt->sgl, sgt->nents,
|
||||||
|
DMA_BIDIRECTIONAL, priv);
|
||||||
static int msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova,
|
else
|
||||||
struct sg_table *sgt)
|
dma_unmap_sg(client->dev, sgt->sgl, sgt->nents,
|
||||||
{
|
DMA_BIDIRECTIONAL);
|
||||||
struct msm_smmu *smmu = to_msm_smmu(mmu);
|
|
||||||
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
|
|
||||||
struct iommu_domain *domain;
|
|
||||||
struct scatterlist *sg;
|
|
||||||
uint64_t da = iova;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
if (!client)
|
|
||||||
return -ENODEV;
|
|
||||||
|
|
||||||
domain = client->mmu_mapping->domain;
|
|
||||||
if (!domain || !sgt)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
|
||||||
size_t bytes = sg->length + sg->offset;
|
|
||||||
size_t unmapped;
|
|
||||||
|
|
||||||
unmapped = iommu_unmap(domain, da, bytes);
|
|
||||||
if (unmapped < bytes)
|
|
||||||
return unmapped;
|
|
||||||
|
|
||||||
VERB("unmap[%d]: %16llx(%zx)", i, iova, bytes);
|
|
||||||
|
|
||||||
WARN_ON(!PAGE_ALIGNED(bytes));
|
|
||||||
|
|
||||||
da += bytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void msm_smmu_destroy(struct msm_mmu *mmu)
|
static void msm_smmu_destroy(struct msm_mmu *mmu)
|
||||||
|
@ -217,42 +145,11 @@ static void msm_smmu_destroy(struct msm_mmu *mmu)
|
||||||
kfree(smmu);
|
kfree(smmu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int msm_smmu_map_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
|
|
||||||
struct dma_buf *dma_buf, int dir)
|
|
||||||
{
|
|
||||||
struct msm_smmu *smmu = to_msm_smmu(mmu);
|
|
||||||
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = msm_dma_map_sg_lazy(client->dev, sgt->sgl, sgt->nents, dir,
|
|
||||||
dma_buf);
|
|
||||||
if (ret != sgt->nents) {
|
|
||||||
DRM_ERROR("dma map sg failed\n");
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static void msm_smmu_unmap_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
|
|
||||||
struct dma_buf *dma_buf, int dir)
|
|
||||||
{
|
|
||||||
struct msm_smmu *smmu = to_msm_smmu(mmu);
|
|
||||||
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
|
|
||||||
|
|
||||||
msm_dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir, dma_buf);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct msm_mmu_funcs funcs = {
|
static const struct msm_mmu_funcs funcs = {
|
||||||
.attach = msm_smmu_attach,
|
.attach = msm_smmu_attach,
|
||||||
.detach = msm_smmu_detach,
|
.detach = msm_smmu_detach,
|
||||||
.map = msm_smmu_map,
|
.map = msm_smmu_map,
|
||||||
.map_sg = msm_smmu_map_sg,
|
|
||||||
.unmap_sg = msm_smmu_unmap_sg,
|
|
||||||
.unmap = msm_smmu_unmap,
|
.unmap = msm_smmu_unmap,
|
||||||
.map_dma_buf = msm_smmu_map_dma_buf,
|
|
||||||
.unmap_dma_buf = msm_smmu_unmap_dma_buf,
|
|
||||||
.destroy = msm_smmu_destroy,
|
.destroy = msm_smmu_destroy,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -41,10 +41,6 @@
|
||||||
#define CREATE_TRACE_POINTS
|
#define CREATE_TRACE_POINTS
|
||||||
#include "sde_trace.h"
|
#include "sde_trace.h"
|
||||||
|
|
||||||
static const char * const iommu_ports[] = {
|
|
||||||
"mdp_0",
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Controls size of event log buffer. Specified as a power of 2.
|
* Controls size of event log buffer. Specified as a power of 2.
|
||||||
*/
|
*/
|
||||||
|
@ -1077,8 +1073,7 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
|
||||||
|
|
||||||
sde_kms->aspace[i] = aspace;
|
sde_kms->aspace[i] = aspace;
|
||||||
|
|
||||||
ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
|
ret = mmu->funcs->attach(mmu, NULL, 0);
|
||||||
ARRAY_SIZE(iommu_ports));
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
|
SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
|
||||||
msm_gem_address_space_put(aspace);
|
msm_gem_address_space_put(aspace);
|
||||||
|
|
Loading…
Add table
Reference in a new issue