drm/msm: add smmu handler
Add msm_smmu driver to support mapping buffers to arm smmu memory. msm_smmu adds the hooks to support drm hooks. Current change only supports the unsecure domain memory. msm_gem object is also updated to attach the new msm_smmu driver. Change-Id: I4899bd74d8b41b864ed5e0dec2da11e929c7fa95 Signed-off-by: Adrian Salido-Moreno <adrianm@codeaurora.org> Signed-off-by: Krishna Srinivas Kundurthi <kskund@codeaurora.org>
This commit is contained in:
parent
54a4a3fb41
commit
29e061e200
6 changed files with 472 additions and 11 deletions
|
@ -51,6 +51,7 @@ msm-y := \
|
|||
msm_gem_submit.o \
|
||||
msm_gpu.o \
|
||||
msm_iommu.o \
|
||||
msm_smmu.o \
|
||||
msm_perf.o \
|
||||
msm_rd.o \
|
||||
msm_ringbuffer.o
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved.
|
||||
* Copyright (C) 2013 Red Hat
|
||||
* Author: Rob Clark <robdclark@gmail.com>
|
||||
*
|
||||
|
@ -595,7 +595,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
|
|||
mdelay(16);
|
||||
|
||||
if (config->platform.iommu) {
|
||||
mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
|
||||
mmu = msm_smmu_new(&pdev->dev,
|
||||
MSM_SMMU_DOMAIN_UNSECURE);
|
||||
if (IS_ERR(mmu)) {
|
||||
ret = PTR_ERR(mmu);
|
||||
dev_err(dev->dev, "failed to init iommu: %d\n", ret);
|
||||
|
|
|
@ -295,16 +295,23 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
|
|||
|
||||
if (iommu_present(&platform_bus_type)) {
|
||||
struct msm_mmu *mmu = priv->mmus[id];
|
||||
uint32_t offset;
|
||||
|
||||
if (WARN_ON(!mmu))
|
||||
return -EINVAL;
|
||||
|
||||
offset = (uint32_t)mmap_offset(obj);
|
||||
ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
|
||||
obj->size, IOMMU_READ | IOMMU_WRITE);
|
||||
msm_obj->domain[id].iova = offset;
|
||||
if (obj->import_attach && mmu->funcs->map_dma_buf) {
|
||||
ret = mmu->funcs->map_dma_buf(mmu, msm_obj->sgt,
|
||||
obj->import_attach->dmabuf,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (ret) {
|
||||
DRM_ERROR("Unable to map dma buf\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
msm_obj->domain[id].iova =
|
||||
sg_dma_address(msm_obj->sgt->sgl);
|
||||
} else {
|
||||
WARN_ONCE(1, "physical address being used\n");
|
||||
msm_obj->domain[id].iova = physaddr(obj);
|
||||
}
|
||||
}
|
||||
|
@ -524,8 +531,11 @@ void msm_gem_free_object(struct drm_gem_object *obj)
|
|||
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
|
||||
struct msm_mmu *mmu = priv->mmus[id];
|
||||
if (mmu && msm_obj->domain[id].iova) {
|
||||
uint32_t offset = msm_obj->domain[id].iova;
|
||||
mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
|
||||
if (obj->import_attach && mmu->funcs->unmap_dma_buf) {
|
||||
mmu->funcs->unmap_dma_buf(mmu, msm_obj->sgt,
|
||||
obj->import_attach->dmabuf,
|
||||
DMA_BIDIRECTIONAL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -53,8 +53,7 @@ struct msm_gem_object {
|
|||
void *vaddr;
|
||||
|
||||
struct {
|
||||
// XXX
|
||||
uint32_t iova;
|
||||
dma_addr_t iova;
|
||||
} domain[NUM_DOMAINS];
|
||||
|
||||
/* normally (resv == &_resv) except for imported bo's */
|
||||
|
|
|
@ -20,6 +20,14 @@
|
|||
|
||||
#include <linux/iommu.h>
|
||||
|
||||
struct msm_mmu;
|
||||
struct msm_gpu;
|
||||
|
||||
enum msm_mmu_domain_type {
|
||||
MSM_SMMU_DOMAIN_UNSECURE,
|
||||
MSM_SMMU_DOMAIN_MAX,
|
||||
};
|
||||
|
||||
struct msm_mmu_funcs {
|
||||
int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
|
||||
void (*detach)(struct msm_mmu *mmu, const char **names, int cnt);
|
||||
|
@ -27,6 +35,14 @@ struct msm_mmu_funcs {
|
|||
unsigned len, int prot);
|
||||
int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
|
||||
unsigned len);
|
||||
int (*map_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
|
||||
enum dma_data_direction dir);
|
||||
void (*unmap_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
|
||||
enum dma_data_direction dir);
|
||||
int (*map_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
|
||||
struct dma_buf *dma_buf, int dir);
|
||||
void (*unmap_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
|
||||
struct dma_buf *dma_buf, int dir);
|
||||
void (*destroy)(struct msm_mmu *mmu);
|
||||
};
|
||||
|
||||
|
@ -44,5 +60,7 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
|
|||
|
||||
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
|
||||
struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
|
||||
struct msm_mmu *msm_smmu_new(struct device *dev,
|
||||
enum msm_mmu_domain_type domain);
|
||||
|
||||
#endif /* __MSM_MMU_H__ */
|
||||
|
|
432
drivers/gpu/drm/msm/msm_smmu.c
Normal file
432
drivers/gpu/drm/msm/msm_smmu.c
Normal file
|
@ -0,0 +1,432 @@
|
|||
/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/msm_dma_iommu_mapping.h>
|
||||
|
||||
#include <asm/dma-iommu.h>
|
||||
#include <soc/qcom/secure_buffer.h>
|
||||
|
||||
#include "msm_drv.h"
|
||||
#include "msm_mmu.h"
|
||||
|
||||
struct msm_smmu_client {
|
||||
struct device *dev;
|
||||
struct dma_iommu_mapping *mmu_mapping;
|
||||
bool domain_attached;
|
||||
};
|
||||
|
||||
struct msm_smmu {
|
||||
struct msm_mmu base;
|
||||
struct device *client_dev;
|
||||
struct msm_smmu_client client;
|
||||
};
|
||||
|
||||
struct msm_smmu_domain {
|
||||
const char *label;
|
||||
size_t va_start;
|
||||
size_t va_size;
|
||||
bool secure;
|
||||
};
|
||||
|
||||
#define to_msm_smmu(x) container_of(x, struct msm_smmu, base)
|
||||
#define msm_smmu_to_client(smmu) (&smmu->client)
|
||||
|
||||
static int _msm_smmu_create_mapping(struct msm_smmu_client *client,
|
||||
const struct msm_smmu_domain *domain);
|
||||
|
||||
static int msm_smmu_attach(struct msm_mmu *mmu, const char **names, int cnt)
|
||||
{
|
||||
struct msm_smmu *smmu = to_msm_smmu(mmu);
|
||||
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
|
||||
int rc = 0;
|
||||
|
||||
/* domain attach only once */
|
||||
if (client->domain_attached)
|
||||
return 0;
|
||||
|
||||
rc = arm_iommu_attach_device(client->dev,
|
||||
client->mmu_mapping);
|
||||
if (rc) {
|
||||
dev_err(client->dev, "iommu attach dev failed (%d)\n",
|
||||
rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
client->domain_attached = true;
|
||||
|
||||
dev_dbg(client->dev, "iommu domain attached\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void msm_smmu_detach(struct msm_mmu *mmu, const char **names, int cnt)
|
||||
{
|
||||
DBG("detaching");
|
||||
}
|
||||
|
||||
static int msm_smmu_map(struct msm_mmu *mmu, uint32_t iova,
|
||||
struct sg_table *sgt, unsigned len, int prot)
|
||||
{
|
||||
struct msm_smmu *smmu = to_msm_smmu(mmu);
|
||||
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
|
||||
struct iommu_domain *domain;
|
||||
struct scatterlist *sg;
|
||||
unsigned int da = iova;
|
||||
unsigned int i, j;
|
||||
int ret;
|
||||
|
||||
if (!client)
|
||||
return -ENODEV;
|
||||
|
||||
domain = client->mmu_mapping->domain;
|
||||
if (!domain || !sgt)
|
||||
return -EINVAL;
|
||||
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
||||
u32 pa = sg_phys(sg) - sg->offset;
|
||||
size_t bytes = sg->length + sg->offset;
|
||||
|
||||
VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
|
||||
|
||||
ret = iommu_map(domain, da, pa, bytes, prot);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
da += bytes;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
da = iova;
|
||||
|
||||
for_each_sg(sgt->sgl, sg, i, j) {
|
||||
size_t bytes = sg->length + sg->offset;
|
||||
|
||||
iommu_unmap(domain, da, bytes);
|
||||
da += bytes;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int msm_smmu_map_sg(struct msm_mmu *mmu, struct sg_table *sgt,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct msm_smmu *smmu = to_msm_smmu(mmu);
|
||||
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
|
||||
int ret;
|
||||
|
||||
ret = dma_map_sg(client->dev, sgt->sgl, sgt->nents, dir);
|
||||
if (ret != sgt->nents)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void msm_smmu_unmap_sg(struct msm_mmu *mmu, struct sg_table *sgt,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct msm_smmu *smmu = to_msm_smmu(mmu);
|
||||
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
|
||||
|
||||
dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir);
|
||||
}
|
||||
|
||||
static int msm_smmu_unmap(struct msm_mmu *mmu, uint32_t iova,
|
||||
struct sg_table *sgt, unsigned len)
|
||||
{
|
||||
struct msm_smmu *smmu = to_msm_smmu(mmu);
|
||||
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
|
||||
struct iommu_domain *domain;
|
||||
struct scatterlist *sg;
|
||||
unsigned int da = iova;
|
||||
int i;
|
||||
|
||||
if (!client)
|
||||
return -ENODEV;
|
||||
|
||||
domain = client->mmu_mapping->domain;
|
||||
if (!domain || !sgt)
|
||||
return -EINVAL;
|
||||
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
||||
size_t bytes = sg->length + sg->offset;
|
||||
size_t unmapped;
|
||||
|
||||
unmapped = iommu_unmap(domain, da, bytes);
|
||||
if (unmapped < bytes)
|
||||
return unmapped;
|
||||
|
||||
VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
|
||||
|
||||
WARN_ON(!PAGE_ALIGNED(bytes));
|
||||
|
||||
da += bytes;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void msm_smmu_destroy(struct msm_mmu *mmu)
|
||||
{
|
||||
struct msm_smmu *smmu = to_msm_smmu(mmu);
|
||||
struct platform_device *pdev = to_platform_device(smmu->client_dev);
|
||||
|
||||
platform_device_unregister(pdev);
|
||||
kfree(smmu);
|
||||
}
|
||||
|
||||
static int msm_smmu_map_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
|
||||
struct dma_buf *dma_buf, int dir)
|
||||
{
|
||||
struct msm_smmu *smmu = to_msm_smmu(mmu);
|
||||
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
|
||||
int ret;
|
||||
|
||||
ret = msm_dma_map_sg_lazy(client->dev, sgt->sgl, sgt->nents, dir,
|
||||
dma_buf);
|
||||
if (ret != sgt->nents) {
|
||||
DRM_ERROR("dma map sg failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void msm_smmu_unmap_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
|
||||
struct dma_buf *dma_buf, int dir)
|
||||
{
|
||||
struct msm_smmu *smmu = to_msm_smmu(mmu);
|
||||
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
|
||||
|
||||
msm_dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir, dma_buf);
|
||||
}
|
||||
|
||||
static const struct msm_mmu_funcs funcs = {
|
||||
.attach = msm_smmu_attach,
|
||||
.detach = msm_smmu_detach,
|
||||
.map = msm_smmu_map,
|
||||
.map_sg = msm_smmu_map_sg,
|
||||
.unmap_sg = msm_smmu_unmap_sg,
|
||||
.unmap = msm_smmu_unmap,
|
||||
.map_dma_buf = msm_smmu_map_dma_buf,
|
||||
.unmap_dma_buf = msm_smmu_unmap_dma_buf,
|
||||
.destroy = msm_smmu_destroy,
|
||||
};
|
||||
|
||||
static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {
|
||||
[MSM_SMMU_DOMAIN_UNSECURE] = {
|
||||
.label = "mdp_ns",
|
||||
.va_start = SZ_1M,
|
||||
.va_size = SZ_2G,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct of_device_id msm_smmu_dt_match[] = {
|
||||
{ .compatible = "qcom,smmu_mdp_unsec",
|
||||
.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_UNSECURE] },
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, msm_smmu_dt_match);
|
||||
|
||||
static struct device *msm_smmu_device_create(struct device *dev,
|
||||
enum msm_mmu_domain_type domain,
|
||||
struct msm_smmu *smmu)
|
||||
{
|
||||
struct device_node *child;
|
||||
struct platform_device *pdev;
|
||||
int i;
|
||||
const char *compat = NULL;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(msm_smmu_dt_match); i++) {
|
||||
if (msm_smmu_dt_match[i].data == &msm_smmu_domains[domain]) {
|
||||
compat = msm_smmu_dt_match[i].compatible;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!compat) {
|
||||
DRM_ERROR("unable to find matching domain for %d\n", domain);
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
DRM_INFO("found domain %d compat: %s\n", domain, compat);
|
||||
|
||||
if (domain == MSM_SMMU_DOMAIN_UNSECURE) {
|
||||
int rc;
|
||||
|
||||
smmu->client.dev = dev;
|
||||
rc = _msm_smmu_create_mapping(msm_smmu_to_client(smmu),
|
||||
msm_smmu_dt_match[i].data);
|
||||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
child = of_find_compatible_node(dev->of_node, NULL, compat);
|
||||
if (!child) {
|
||||
DRM_ERROR("unable to find compatible node for %s\n", compat);
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
pdev = of_platform_device_create(child, NULL, dev);
|
||||
if (!pdev) {
|
||||
DRM_ERROR("unable to create smmu platform dev for domain %d\n",
|
||||
domain);
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
return &pdev->dev;
|
||||
}
|
||||
|
||||
struct msm_mmu *msm_smmu_new(struct device *dev,
|
||||
enum msm_mmu_domain_type domain)
|
||||
{
|
||||
struct msm_smmu *smmu;
|
||||
struct device *client_dev;
|
||||
|
||||
smmu = kzalloc(sizeof(*smmu), GFP_KERNEL);
|
||||
if (!smmu)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
client_dev = msm_smmu_device_create(dev, domain, smmu);
|
||||
if (IS_ERR(client_dev))
|
||||
return (void *)client_dev ? : ERR_PTR(-ENODEV);
|
||||
|
||||
smmu->client_dev = client_dev;
|
||||
msm_mmu_init(&smmu->base, dev, &funcs);
|
||||
|
||||
return &smmu->base;
|
||||
}
|
||||
|
||||
static int _msm_smmu_create_mapping(struct msm_smmu_client *client,
|
||||
const struct msm_smmu_domain *domain)
|
||||
{
|
||||
int disable_htw = 1;
|
||||
int rc;
|
||||
|
||||
client->mmu_mapping = arm_iommu_create_mapping(&platform_bus_type,
|
||||
domain->va_start, domain->va_size);
|
||||
if (IS_ERR(client->mmu_mapping)) {
|
||||
dev_err(client->dev,
|
||||
"iommu create mapping failed for domain=%s\n",
|
||||
domain->label);
|
||||
return PTR_ERR(client->mmu_mapping);
|
||||
}
|
||||
|
||||
if (domain->secure) {
|
||||
int secure_vmid = VMID_CP_PIXEL;
|
||||
|
||||
rc = iommu_domain_set_attr(client->mmu_mapping->domain,
|
||||
DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
|
||||
if (rc) {
|
||||
dev_err(client->dev, "couldn't set secure pix vmid\n");
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
arm_iommu_release_mapping(client->mmu_mapping);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* msm_smmu_probe()
|
||||
* @pdev: platform device
|
||||
*
|
||||
* Each smmu context acts as a separate device and the context banks are
|
||||
* configured with a VA range.
|
||||
* Registers the clks as each context bank has its own clks, for which voting
|
||||
* has to be done everytime before using that context bank.
|
||||
*/
|
||||
static int msm_smmu_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct of_device_id *match;
|
||||
struct msm_smmu_client *client;
|
||||
const struct msm_smmu_domain *domain;
|
||||
int rc;
|
||||
|
||||
match = of_match_device(msm_smmu_dt_match, &pdev->dev);
|
||||
if (!match || !match->data) {
|
||||
dev_err(&pdev->dev, "probe failed as match data is invalid\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
domain = match->data;
|
||||
if (!domain) {
|
||||
dev_err(&pdev->dev, "no matching device found\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_INFO("probing device %s\n", match->compatible);
|
||||
|
||||
client = devm_kzalloc(&pdev->dev, sizeof(*client), GFP_KERNEL);
|
||||
if (!client)
|
||||
return -ENOMEM;
|
||||
|
||||
client->dev = &pdev->dev;
|
||||
|
||||
rc = _msm_smmu_create_mapping(client, domain);
|
||||
platform_set_drvdata(pdev, client);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int msm_smmu_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct msm_smmu_client *client;
|
||||
|
||||
client = platform_get_drvdata(pdev);
|
||||
if (client->domain_attached) {
|
||||
arm_iommu_detach_device(client->dev);
|
||||
client->domain_attached = false;
|
||||
}
|
||||
arm_iommu_release_mapping(client->mmu_mapping);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver msm_smmu_driver = {
|
||||
.probe = msm_smmu_probe,
|
||||
.remove = msm_smmu_remove,
|
||||
.driver = {
|
||||
.name = "msmdrm_smmu",
|
||||
.of_match_table = msm_smmu_dt_match,
|
||||
},
|
||||
};
|
||||
|
||||
static int __init msm_smmu_driver_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = platform_driver_register(&msm_smmu_driver);
|
||||
if (ret)
|
||||
pr_err("mdss_smmu_register_driver() failed!\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
module_init(msm_smmu_driver_init);
|
||||
|
||||
static void __exit msm_smmu_driver_cleanup(void)
|
||||
{
|
||||
platform_driver_unregister(&msm_smmu_driver);
|
||||
}
|
||||
module_exit(msm_smmu_driver_cleanup);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("MSM SMMU driver");
|
Loading…
Add table
Reference in a new issue