iommu/arm-smmu: Check the return type of map_sg and take appropriate action

We cannot call unmap directly if the map_sg fails partially as the tlb
invalidate functions need to enable/prepare clocks, which require
non-atomic context. Let map_sg return the failure and handle this when
we are out of the atomic context.

Change-Id: I6401c1e281850aeda27e32524cae34324045f762
Signed-off-by: Rohit Vaswani <rvaswani@codeaurora.org>
This commit is contained in:
Rohit Vaswani 2015-08-18 17:57:44 -07:00 committed by David Keitel
parent 0934068545
commit 599de5c63b
3 changed files with 15 additions and 9 deletions

View file

@ -2073,10 +2073,13 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
return ret;
}
static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size);
static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot)
{
int ret;
size_t size;
unsigned long flags;
struct arm_smmu_domain *smmu_domain = domain->priv;
struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
@ -2085,12 +2088,15 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
return -ENODEV;
spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
ret = ops->map_sg(ops, iova, sg, nents, prot);
ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
if (ret)
if (ret) {
if (arm_smmu_assign_table(smmu_domain))
return 0;
} else {
arm_smmu_unmap(domain, iova, size);
}
return ret;
}

View file

@ -459,7 +459,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
struct scatterlist *sg, unsigned int nents,
int iommu_prot)
int iommu_prot, size_t *size)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
arm_lpae_iopte *ptep = data->pgd;
@ -469,7 +469,6 @@ static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
size_t mapped = 0;
int i, ret;
unsigned int min_pagesz;
unsigned long orig_iova = iova;
struct map_state ms;
/* If no access, then nothing to do */
@ -529,10 +528,8 @@ static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
return mapped;
out_err:
/* undo mappings already done */
if (mapped)
ops->unmap(ops, orig_iova, mapped);
/* Return the size of the partial mapping so that they can be undone */
*size = mapped;
return 0;
}

View file

@ -77,6 +77,8 @@ struct io_pgtable_cfg {
* struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
*
* @map: Map a physically contiguous memory region.
* @map_sg: Map a scatterlist. The size parameter contains the size
* of the partial mapping in case of failure.
* @unmap: Unmap a physically contiguous memory region.
* @iova_to_phys: Translate iova to physical address.
*
@ -87,7 +89,8 @@ struct io_pgtable_ops {
int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
int (*map_sg)(struct io_pgtable_ops *ops, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot);
struct scatterlist *sg, unsigned int nents,
int prot, size_t *size);
size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
size_t size);
phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,