From ae94b477afd5d71221964250395d8f109ddb5b06 Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Sat, 27 May 2017 17:38:27 +0800 Subject: [PATCH] msm: smmu: add re-route calling for specified iova. When calling smmu mapping, if iova is specified directly by user, not allocated dynamically in dma-mapping.c, smmu driver needs to provide support for this. This is needed in early display case. In this scenario, LK has set physical memory to display hardware for fetching, so if iova is not explicitly specified in kernel, but instead dynamically produced by "alloc_iova" in dma-mapping.c, display hardware has no chance to know this new iova, then smmu fault will happen if enabling the iommu stage-1 translation. To fix this smmu fault problem, add re-routing to the right path when iova specified by user is not 0 in smmu map/unmap function. Change-Id: I062b04d7eec65af1c106a5caa09ec787b5d26d0d Signed-off-by: Guchun Chen --- drivers/gpu/drm/msm/msm_smmu.c | 54 ++++++++++++++++++++++++++-------- 1 file changed, 41 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c index c2dd5f96521e..a6efb22b5ed4 100644 --- a/drivers/gpu/drm/msm/msm_smmu.c +++ b/drivers/gpu/drm/msm/msm_smmu.c @@ -120,16 +120,30 @@ static int msm_smmu_map(struct msm_mmu *mmu, uint64_t iova, { struct msm_smmu *smmu = to_msm_smmu(mmu); struct msm_smmu_client *client = msm_smmu_to_client(smmu); + struct iommu_domain *domain; int ret; - if (priv) - ret = msm_dma_map_sg_lazy(client->dev, sgt->sgl, sgt->nents, - DMA_BIDIRECTIONAL, priv); - else - ret = dma_map_sg(client->dev, sgt->sgl, sgt->nents, - DMA_BIDIRECTIONAL); + if (!client || !sgt) + return -EINVAL; - return (ret != sgt->nents) ? -ENOMEM : 0; + if (iova != 0) { + if (!client->mmu_mapping || !client->mmu_mapping->domain) + return -EINVAL; + + domain = client->mmu_mapping->domain; + + return iommu_map_sg(domain, iova, sgt->sgl, + sgt->nents, flags); + } else { + if (priv) + ret = msm_dma_map_sg_lazy(client->dev, sgt->sgl, + sgt->nents, DMA_BIDIRECTIONAL, priv); + else + ret = dma_map_sg(client->dev, sgt->sgl, sgt->nents, + DMA_BIDIRECTIONAL); + + return (ret != sgt->nents) ? -ENOMEM : 0; + } } static void msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova, @@ -137,13 +151,27 @@ static void msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova, { struct msm_smmu *smmu = to_msm_smmu(mmu); struct msm_smmu_client *client = msm_smmu_to_client(smmu); + struct iommu_domain *domain = client->mmu_mapping->domain; + struct scatterlist *sg; + size_t len = 0; + int unmapped, i = 0; - if (priv) - msm_dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, - DMA_BIDIRECTIONAL, priv); - else - dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, - DMA_BIDIRECTIONAL); + if (iova != 0) { + for_each_sg(sgt->sgl, sg, sgt->nents, i) + len += sg->length; + + unmapped = iommu_unmap(domain, iova, len); + if (unmapped < len) + dev_warn(mmu->dev, + "could not unmap iova@%llx\n", iova); + } else { + if (priv) + msm_dma_unmap_sg(client->dev, sgt->sgl, + sgt->nents, DMA_BIDIRECTIONAL, priv); + else + dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, + DMA_BIDIRECTIONAL); + } } static void msm_smmu_destroy(struct msm_mmu *mmu)