|
|
|
@ -20,8 +20,6 @@
|
|
|
|
|
#include <linux/of.h>
|
|
|
|
|
#include <linux/of_address.h>
|
|
|
|
|
#include <linux/clk/msm-clk.h>
|
|
|
|
|
#include <linux/qcom_iommu.h>
|
|
|
|
|
#include <linux/msm_iommu_domains.h>
|
|
|
|
|
|
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
|
#include <linux/dma-buf.h>
|
|
|
|
@ -33,31 +31,6 @@
|
|
|
|
|
#include "mdss_mdp.h"
|
|
|
|
|
#include "mdss_smmu.h"
|
|
|
|
|
|
|
|
|
|
struct mdss_iommu_map_type mdss_iommu_map[MDSS_IOMMU_MAX_DOMAIN] = {
|
|
|
|
|
[MDSS_IOMMU_DOMAIN_UNSECURE] = {
|
|
|
|
|
.client_name = "mdp_ns",
|
|
|
|
|
.ctx_name = "mdp_0",
|
|
|
|
|
.partitions = {
|
|
|
|
|
{
|
|
|
|
|
.start = SZ_128K,
|
|
|
|
|
.size = SZ_1G - SZ_128K,
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
.npartitions = 1,
|
|
|
|
|
},
|
|
|
|
|
[MDSS_IOMMU_DOMAIN_SECURE] = {
|
|
|
|
|
.client_name = "mdp_secure",
|
|
|
|
|
.ctx_name = "mdp_1",
|
|
|
|
|
.partitions = {
|
|
|
|
|
{
|
|
|
|
|
.start = SZ_1G,
|
|
|
|
|
.size = SZ_1G,
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
.npartitions = 1,
|
|
|
|
|
},
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static int mdss_smmu_util_parse_dt_clock(struct platform_device *pdev,
|
|
|
|
|
struct dss_module_power *mp)
|
|
|
|
|
{
|
|
|
|
@ -126,48 +99,6 @@ static int mdss_smmu_clk_register(struct platform_device *pdev,
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* mdss_smmu_attach_v1()
|
|
|
|
|
*
|
|
|
|
|
* Attaches to the SMMU domain. Attaching should be done everytime before using
|
|
|
|
|
* the SMMU resources.
|
|
|
|
|
*/
|
|
|
|
|
static int mdss_smmu_attach_v1(struct mdss_data_type *mdata)
|
|
|
|
|
{
|
|
|
|
|
struct iommu_domain *domain;
|
|
|
|
|
struct mdss_iommu_map_type *iomap;
|
|
|
|
|
int i, rc = 0;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
|
|
|
|
|
|
|
|
|
|
if (!mdss_smmu_is_valid_domain_type(mdata, i))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
iomap = mdata->iommu_map + i;
|
|
|
|
|
|
|
|
|
|
domain = msm_get_iommu_domain(iomap->domain_idx);
|
|
|
|
|
if (!domain) {
|
|
|
|
|
WARN(1, "could not attach iommu client %s to ctx %s\n",
|
|
|
|
|
iomap->client_name, iomap->ctx_name);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
rc = iommu_attach_device(domain, iomap->ctx);
|
|
|
|
|
if (rc) {
|
|
|
|
|
WARN(1, "mdp::iommu device attach failed rc:%d\n", rc);
|
|
|
|
|
for (i--; i >= 0; i--) {
|
|
|
|
|
if (!mdss_smmu_is_valid_domain_type(mdata, i))
|
|
|
|
|
continue;
|
|
|
|
|
iomap = mdata->iommu_map + i;
|
|
|
|
|
iommu_detach_device(domain, iomap->ctx);
|
|
|
|
|
}
|
|
|
|
|
goto end;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
end:
|
|
|
|
|
return rc;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mdss_smmu_enable_power(struct dss_module_power *mp, bool enable)
|
|
|
|
|
{
|
|
|
|
|
int rc = 0;
|
|
|
|
@ -270,35 +201,6 @@ err:
|
|
|
|
|
return rc;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* mdss_smmu_detach_v1()
|
|
|
|
|
*
|
|
|
|
|
* Detaches from the smmu domain. Should be done immediately after the SMMU
|
|
|
|
|
* resource usage, in order to save power.
|
|
|
|
|
*/
|
|
|
|
|
static int mdss_smmu_detach_v1(struct mdss_data_type *mdata)
|
|
|
|
|
{
|
|
|
|
|
struct iommu_domain *domain;
|
|
|
|
|
struct mdss_iommu_map_type *iomap;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
|
|
|
|
|
if (!mdss_smmu_is_valid_domain_type(mdata, i))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
iomap = mdata->iommu_map + i;
|
|
|
|
|
|
|
|
|
|
domain = msm_get_iommu_domain(iomap->domain_idx);
|
|
|
|
|
if (!domain) {
|
|
|
|
|
pr_err("unable to get iommu domain(%d)\n",
|
|
|
|
|
iomap->domain_idx);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
iommu_detach_device(domain, iomap->ctx);
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* mdss_smmu_v2_detach()
|
|
|
|
|
*
|
|
|
|
@ -324,29 +226,11 @@ static int mdss_smmu_detach_v2(struct mdss_data_type *mdata)
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mdss_smmu_get_domain_id_v1(u32 type)
|
|
|
|
|
{
|
|
|
|
|
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
|
|
|
|
|
return mdata->iommu_map[type].domain_idx;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mdss_smmu_get_domain_id_v2(u32 type)
|
|
|
|
|
{
|
|
|
|
|
return type;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* mdss_smmu_dma_buf_attach_v1()
|
|
|
|
|
*
|
|
|
|
|
* Helps in attaching the dma buffer to the device. This api returns reference
|
|
|
|
|
* to an attachment structure, which is then used for scatterlist operations.
|
|
|
|
|
*/
|
|
|
|
|
static struct dma_buf_attachment *mdss_smmu_dma_buf_attach_v1(
|
|
|
|
|
struct dma_buf *dma_buf, struct device *dev, int domain)
|
|
|
|
|
{
|
|
|
|
|
return dma_buf_attach(dma_buf, dev);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* mdss_smmu_dma_buf_attach_v2()
|
|
|
|
|
*
|
|
|
|
@ -365,20 +249,6 @@ static struct dma_buf_attachment *mdss_smmu_dma_buf_attach_v2(
|
|
|
|
|
return dma_buf_attach(dma_buf, mdss_smmu->dev);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* mdss_smmu_map_dma_buf_v1()
|
|
|
|
|
*
|
|
|
|
|
* Maps existing buffer into the SMMU domain and sets the sets the virtual
|
|
|
|
|
* address in @iova
|
|
|
|
|
*/
|
|
|
|
|
static int mdss_smmu_map_dma_buf_v1(struct dma_buf *dma_buf,
|
|
|
|
|
struct sg_table *table, int domain, dma_addr_t *iova,
|
|
|
|
|
unsigned long *size, int dir)
|
|
|
|
|
{
|
|
|
|
|
return msm_map_dma_buf(dma_buf, table, mdss_smmu_get_domain_id(domain),
|
|
|
|
|
0, SZ_4K, 0, iova, size, 0, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* mdss_smmu_map_dma_buf_v2()
|
|
|
|
|
*
|
|
|
|
@ -407,12 +277,6 @@ static int mdss_smmu_map_dma_buf_v2(struct dma_buf *dma_buf,
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mdss_smmu_unmap_dma_buf_v1(struct sg_table *table, int domain,
|
|
|
|
|
int dir)
|
|
|
|
|
{
|
|
|
|
|
msm_unmap_dma_buf(table, mdss_smmu_get_domain_id(domain), 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mdss_smmu_unmap_dma_buf_v2(struct sg_table *table, int domain,
|
|
|
|
|
int dir)
|
|
|
|
|
{
|
|
|
|
@ -425,38 +289,6 @@ static void mdss_smmu_unmap_dma_buf_v2(struct sg_table *table, int domain,
|
|
|
|
|
dma_unmap_sg(mdss_smmu->dev, table->sgl, table->nents, dir);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* mdss_smmu_dma_alloc_coherent_v1()
|
|
|
|
|
*
|
|
|
|
|
* This routine allocates a region of @size bytes of consistent memory. It also
|
|
|
|
|
* returns a dma_handle which can be used as the physical address.
|
|
|
|
|
* dma_alloc_coherent returns a pointer to the allocated region (@cpu_addr) in
|
|
|
|
|
* the processor's virtual address space. This subroutine also takes care of the
|
|
|
|
|
* mapping of the buffer to the SMMU domain which sets the (@iova) the virtual
|
|
|
|
|
* address
|
|
|
|
|
*/
|
|
|
|
|
static int mdss_smmu_dma_alloc_coherent_v1(struct device *dev, size_t size,
|
|
|
|
|
dma_addr_t *phys, dma_addr_t *iova, void *cpu_addr,
|
|
|
|
|
gfp_t gfp, int domain)
|
|
|
|
|
{
|
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
|
|
cpu_addr = dma_alloc_coherent(dev, size, phys, gfp);
|
|
|
|
|
if (!cpu_addr) {
|
|
|
|
|
pr_err("dma alloc coherent failed!\n");
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ret = msm_iommu_map_contig_buffer(*phys,
|
|
|
|
|
mdss_smmu_get_domain_id(domain), 0,
|
|
|
|
|
size, SZ_4K, 0, iova);
|
|
|
|
|
if (IS_ERR_VALUE(ret)) {
|
|
|
|
|
pr_err("map contig buffer failed rc:%d\n", ret);
|
|
|
|
|
dma_free_coherent(dev, size, cpu_addr, *phys);
|
|
|
|
|
}
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* mdss_smmu_dma_alloc_coherent_v2()
|
|
|
|
|
*
|
|
|
|
@ -484,14 +316,6 @@ static int mdss_smmu_dma_alloc_coherent_v2(struct device *dev, size_t size,
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mdss_smmu_dma_free_coherent_v1(struct device *dev, size_t size,
|
|
|
|
|
void *cpu_addr, dma_addr_t phys, dma_addr_t iova, int domain)
|
|
|
|
|
{
|
|
|
|
|
msm_iommu_unmap_contig_buffer(phys, mdss_smmu_get_domain_id(domain),
|
|
|
|
|
0, size);
|
|
|
|
|
dma_free_coherent(dev, size, cpu_addr, phys);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mdss_smmu_dma_free_coherent_v2(struct device *dev, size_t size,
|
|
|
|
|
void *cpu_addr, dma_addr_t phys, dma_addr_t iova, int domain)
|
|
|
|
|
{
|
|
|
|
@ -504,25 +328,6 @@ static void mdss_smmu_dma_free_coherent_v2(struct device *dev, size_t size,
|
|
|
|
|
dma_free_coherent(mdss_smmu->dev, size, cpu_addr, iova);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* mdss_smmu_map_v1()
|
|
|
|
|
*
|
|
|
|
|
* Maps the address to the SMMU domain. Both the virtual address and the
|
|
|
|
|
* physical one, as well as the size of the mapping should be aligned (atleast)
|
|
|
|
|
* to the size of the smallest page supported by the hardware.
|
|
|
|
|
*/
|
|
|
|
|
static int mdss_smmu_map_v1(int domain, phys_addr_t iova, phys_addr_t phys,
|
|
|
|
|
int gfp_order, int prot)
|
|
|
|
|
{
|
|
|
|
|
struct iommu_domain *iommu_domain = msm_get_iommu_domain(
|
|
|
|
|
mdss_smmu_get_domain_id(domain));
|
|
|
|
|
if (!iommu_domain) {
|
|
|
|
|
pr_err("mdss iommu domain get failed in smmu map\n");
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
return iommu_map(iommu_domain, iova, phys, gfp_order, prot);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* mdss_smmu_map_v1()
|
|
|
|
|
*
|
|
|
|
@ -542,17 +347,6 @@ static int mdss_smmu_map_v2(int domain, phys_addr_t iova, phys_addr_t phys,
|
|
|
|
|
iova, phys, gfp_order, prot);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mdss_smmu_unmap_v1(int domain, unsigned long iova, int gfp_order)
|
|
|
|
|
{
|
|
|
|
|
struct iommu_domain *iommu_domain = msm_get_iommu_domain(
|
|
|
|
|
mdss_smmu_get_domain_id(domain));
|
|
|
|
|
if (!iommu_domain) {
|
|
|
|
|
pr_err("mdss iommu domain get failed in smmu unmap\n");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
iommu_unmap(iommu_domain, iova, gfp_order);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mdss_smmu_unmap_v2(int domain, unsigned long iova, int gfp_order)
|
|
|
|
|
{
|
|
|
|
|
struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
|
|
|
|
@ -564,17 +358,6 @@ static void mdss_smmu_unmap_v2(int domain, unsigned long iova, int gfp_order)
|
|
|
|
|
iommu_unmap(mdss_smmu->mmu_mapping->domain, iova, gfp_order);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* mdss_smmu_dsi_alloc_buf_v1()
|
|
|
|
|
*
|
|
|
|
|
* Allocates the buffer and mapping is handled later
|
|
|
|
|
*/
|
|
|
|
|
static char *mdss_smmu_dsi_alloc_buf_v1(struct device *dev, int size,
|
|
|
|
|
dma_addr_t *dmap, gfp_t gfp)
|
|
|
|
|
{
|
|
|
|
|
return dma_alloc_writecombine(dev, size, dmap, GFP_KERNEL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* mdss_smmUdsi_alloc_buf_v2()
|
|
|
|
|
*
|
|
|
|
@ -586,24 +369,6 @@ static char *mdss_smmu_dsi_alloc_buf_v2(struct device *dev, int size,
|
|
|
|
|
return kzalloc(size, GFP_KERNEL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* mdss_smmu_dsi_map_buffer_v1()
|
|
|
|
|
*
|
|
|
|
|
* Maps the buffer allocated with mdss_smmu_dsi_alloc_buf_v1 to the SMMU domain
|
|
|
|
|
*/
|
|
|
|
|
static int mdss_smmu_dsi_map_buffer_v1(phys_addr_t phys, unsigned int domain,
|
|
|
|
|
unsigned long size, dma_addr_t *dma_addr, void *cpu_addr,
|
|
|
|
|
int dir)
|
|
|
|
|
{
|
|
|
|
|
msm_iommu_map_contig_buffer(phys, mdss_smmu_get_domain_id(domain), 0,
|
|
|
|
|
size, SZ_4K, 0, dma_addr);
|
|
|
|
|
if (IS_ERR_VALUE(*dma_addr)) {
|
|
|
|
|
pr_err("dma map contig buffer failed\n");
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* mdss_smmu_dsi_map_buffer_v2()
|
|
|
|
|
*
|
|
|
|
@ -629,14 +394,6 @@ static int mdss_smmu_dsi_map_buffer_v2(phys_addr_t phys, unsigned int domain,
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mdss_smmu_dsi_unmap_buffer_v1(dma_addr_t dma_addr, int domain,
|
|
|
|
|
unsigned long size, int dir)
|
|
|
|
|
{
|
|
|
|
|
if (is_mdss_iommu_attached())
|
|
|
|
|
msm_iommu_unmap_contig_buffer(dma_addr,
|
|
|
|
|
mdss_smmu_get_domain_id(domain), 0, size);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mdss_smmu_dsi_unmap_buffer_v2(dma_addr_t dma_addr, int domain,
|
|
|
|
|
unsigned long size, int dir)
|
|
|
|
|
{
|
|
|
|
@ -650,33 +407,7 @@ static void mdss_smmu_dsi_unmap_buffer_v2(dma_addr_t dma_addr, int domain,
|
|
|
|
|
dma_unmap_single(mdss_smmu->dev, dma_addr, size, dir);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mdss_smmu_deinit_v1(struct mdss_data_type *mdata)
|
|
|
|
|
{
|
|
|
|
|
struct iommu_domain *domain;
|
|
|
|
|
struct mdss_iommu_map_type *iomap;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
if (!mdata->iommu_map) {
|
|
|
|
|
pr_warn("iommu not initialized\n");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
|
|
|
|
|
if (!mdss_smmu_is_valid_domain_type(mdata, i))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
iomap = &mdss_iommu_map[i];
|
|
|
|
|
|
|
|
|
|
domain = msm_get_iommu_domain(iomap->domain_idx);
|
|
|
|
|
if (!domain) {
|
|
|
|
|
pr_err("unable to get iommu domain(%d)\n",
|
|
|
|
|
iomap->domain_idx);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
iomap->domain_idx = msm_unregister_domain(domain);
|
|
|
|
|
}
|
|
|
|
|
mdata->iommu_map = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mdss_smmu_deinit_v2(struct mdss_data_type *mata)
|
|
|
|
|
{
|
|
|
|
@ -690,80 +421,27 @@ static void mdss_smmu_deinit_v2(struct mdss_data_type *mata)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mdss_smmu_ops_init(struct mdss_data_type *mdata, int smmu_version)
|
|
|
|
|
static void mdss_smmu_ops_init(struct mdss_data_type *mdata)
|
|
|
|
|
{
|
|
|
|
|
switch (smmu_version) {
|
|
|
|
|
case MDSS_SMMU_V1:
|
|
|
|
|
mdata->smmu_ops.smmu_attach = mdss_smmu_attach_v1;
|
|
|
|
|
mdata->smmu_ops.smmu_detach = mdss_smmu_detach_v1;
|
|
|
|
|
mdata->smmu_ops.smmu_get_domain_id = mdss_smmu_get_domain_id_v1;
|
|
|
|
|
mdata->smmu_ops.smmu_dma_buf_attach =
|
|
|
|
|
mdss_smmu_dma_buf_attach_v1;
|
|
|
|
|
mdata->smmu_ops.smmu_map_dma_buf = mdss_smmu_map_dma_buf_v1;
|
|
|
|
|
mdata->smmu_ops.smmu_unmap_dma_buf = mdss_smmu_unmap_dma_buf_v1;
|
|
|
|
|
mdata->smmu_ops.smmu_dma_alloc_coherent =
|
|
|
|
|
mdss_smmu_dma_alloc_coherent_v1;
|
|
|
|
|
mdata->smmu_ops.smmu_dma_free_coherent =
|
|
|
|
|
mdss_smmu_dma_free_coherent_v1;
|
|
|
|
|
mdata->smmu_ops.smmu_map = mdss_smmu_map_v1;
|
|
|
|
|
mdata->smmu_ops.smmu_unmap = mdss_smmu_unmap_v1;
|
|
|
|
|
mdata->smmu_ops.smmu_dsi_alloc_buf = mdss_smmu_dsi_alloc_buf_v1;
|
|
|
|
|
mdata->smmu_ops.smmu_dsi_map_buffer =
|
|
|
|
|
mdss_smmu_dsi_map_buffer_v1;
|
|
|
|
|
mdata->smmu_ops.smmu_dsi_unmap_buffer =
|
|
|
|
|
mdss_smmu_dsi_unmap_buffer_v1;
|
|
|
|
|
mdata->smmu_ops.smmu_deinit = mdss_smmu_deinit_v1;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case MDSS_SMMU_V2:
|
|
|
|
|
mdata->smmu_ops.smmu_attach = mdss_smmu_attach_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_detach = mdss_smmu_detach_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_get_domain_id = mdss_smmu_get_domain_id_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_dma_buf_attach =
|
|
|
|
|
mdss_smmu_dma_buf_attach_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_map_dma_buf = mdss_smmu_map_dma_buf_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_unmap_dma_buf = mdss_smmu_unmap_dma_buf_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_dma_alloc_coherent =
|
|
|
|
|
mdss_smmu_dma_alloc_coherent_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_dma_free_coherent =
|
|
|
|
|
mdss_smmu_dma_free_coherent_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_map = mdss_smmu_map_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_unmap = mdss_smmu_unmap_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_dsi_alloc_buf = mdss_smmu_dsi_alloc_buf_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_dsi_map_buffer =
|
|
|
|
|
mdss_smmu_dsi_map_buffer_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_dsi_unmap_buffer =
|
|
|
|
|
mdss_smmu_dsi_unmap_buffer_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_deinit = mdss_smmu_deinit_v2;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
pr_err("smmu ops init failed - invalid smmu version:%d",
|
|
|
|
|
smmu_version);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* mdss_smmu_find_version()
|
|
|
|
|
* @dev: mdss_mdp device
|
|
|
|
|
*
|
|
|
|
|
* It parses through the child devices of mdss_mdp device which is passed
|
|
|
|
|
* to this function and finds for smmu v2 related devices. If it exists it is
|
|
|
|
|
* termed as MDSS_SMMU_V2 else MDSS_SMMU_V1.
|
|
|
|
|
*/
|
|
|
|
|
static int mdss_smmu_find_version(struct device *dev)
|
|
|
|
|
{
|
|
|
|
|
struct device_node *parent, *child;
|
|
|
|
|
int version = MDSS_SMMU_V1;
|
|
|
|
|
|
|
|
|
|
parent = dev->of_node;
|
|
|
|
|
for_each_child_of_node(parent, child) {
|
|
|
|
|
if (is_mdss_smmu_compatible_device(child->name)) {
|
|
|
|
|
version = MDSS_SMMU_V2;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return version;
|
|
|
|
|
mdata->smmu_ops.smmu_attach = mdss_smmu_attach_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_detach = mdss_smmu_detach_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_get_domain_id = mdss_smmu_get_domain_id_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_dma_buf_attach =
|
|
|
|
|
mdss_smmu_dma_buf_attach_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_map_dma_buf = mdss_smmu_map_dma_buf_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_unmap_dma_buf = mdss_smmu_unmap_dma_buf_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_dma_alloc_coherent =
|
|
|
|
|
mdss_smmu_dma_alloc_coherent_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_dma_free_coherent =
|
|
|
|
|
mdss_smmu_dma_free_coherent_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_map = mdss_smmu_map_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_unmap = mdss_smmu_unmap_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_dsi_alloc_buf = mdss_smmu_dsi_alloc_buf_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_dsi_map_buffer =
|
|
|
|
|
mdss_smmu_dsi_map_buffer_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_dsi_unmap_buffer =
|
|
|
|
|
mdss_smmu_dsi_unmap_buffer_v2;
|
|
|
|
|
mdata->smmu_ops.smmu_deinit = mdss_smmu_deinit_v2;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
@ -787,56 +465,8 @@ void mdss_smmu_device_create(struct device *dev)
|
|
|
|
|
|
|
|
|
|
int mdss_smmu_init(struct mdss_data_type *mdata, struct device *dev)
|
|
|
|
|
{
|
|
|
|
|
struct msm_iova_layout layout;
|
|
|
|
|
struct iommu_domain *domain;
|
|
|
|
|
struct mdss_iommu_map_type *iomap;
|
|
|
|
|
int i, smmu_version;
|
|
|
|
|
|
|
|
|
|
smmu_version = mdss_smmu_find_version(dev);
|
|
|
|
|
|
|
|
|
|
if (smmu_version == MDSS_SMMU_V2) {
|
|
|
|
|
mdss_smmu_device_create(dev);
|
|
|
|
|
goto end;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (mdata->iommu_map) {
|
|
|
|
|
pr_warn("iommu already initialized\n");
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
|
|
|
|
|
if (!mdss_smmu_is_valid_domain_type(mdata, i))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
iomap = &mdss_iommu_map[i];
|
|
|
|
|
|
|
|
|
|
layout.client_name = iomap->client_name;
|
|
|
|
|
layout.partitions = iomap->partitions;
|
|
|
|
|
layout.npartitions = iomap->npartitions;
|
|
|
|
|
layout.is_secure = (i == MDSS_IOMMU_DOMAIN_SECURE);
|
|
|
|
|
|
|
|
|
|
iomap->domain_idx = msm_register_domain(&layout);
|
|
|
|
|
if (IS_ERR_VALUE(iomap->domain_idx))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
domain = msm_get_iommu_domain(iomap->domain_idx);
|
|
|
|
|
if (!domain) {
|
|
|
|
|
pr_err("unable to get iommu domain(%d)\n",
|
|
|
|
|
iomap->domain_idx);
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
iomap->ctx = msm_iommu_get_ctx(iomap->ctx_name);
|
|
|
|
|
if (!iomap->ctx) {
|
|
|
|
|
pr_warn("unable to get iommu ctx(%s)\n",
|
|
|
|
|
iomap->ctx_name);
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
mdata->iommu_map = mdss_iommu_map;
|
|
|
|
|
|
|
|
|
|
end:
|
|
|
|
|
mdss_smmu_ops_init(mdata, smmu_version);
|
|
|
|
|
mdss_smmu_device_create(dev);
|
|
|
|
|
mdss_smmu_ops_init(mdata);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|