iommu/arm-smmu: Do an ATOS in the fault handler
In order to facilitate debugging of context faults, the result of a software table walk is currently printed as part of the fault handling. However, there are certain bugs and error conditions that can't be caught by doing a software table walk (like missing TLB invalidation or misbehaving hardware). Add a hardware table walk (via ATOS) to improve debugging capabilities. Change-Id: Ie89019df62f115627359e29b1f6cc5de3a36d1b5 Signed-off-by: Mitchel Humpherys <mitchelh@codeaurora.org>
This commit is contained in:
parent
9f24ce3582
commit
8f418bc8a3
1 changed files with 104 additions and 11 deletions
|
@ -943,9 +943,66 @@ static struct iommu_gather_ops arm_smmu_gather_ops = {
|
|||
.unprepare_pgtable = arm_smmu_unprepare_pgtable,
|
||||
};
|
||||
|
||||
static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
|
||||
dma_addr_t iova);
|
||||
static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
|
||||
struct iommu_domain *domain, dma_addr_t iova);
|
||||
static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu);
|
||||
static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu);
|
||||
static void arm_smmu_resume(struct arm_smmu_device *smmu);
|
||||
static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
|
||||
dma_addr_t iova);
|
||||
|
||||
static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
|
||||
dma_addr_t iova, u32 fsr)
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain = domain->priv;
|
||||
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
|
||||
struct arm_smmu_device *smmu;
|
||||
void __iomem *cb_base;
|
||||
u64 sctlr, sctlr_orig;
|
||||
phys_addr_t phys;
|
||||
|
||||
smmu = smmu_domain->smmu;
|
||||
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
|
||||
|
||||
arm_smmu_halt_nowait(smmu);
|
||||
|
||||
writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
|
||||
|
||||
arm_smmu_wait_for_halt(smmu);
|
||||
|
||||
/* clear FSR to allow ATOS to log any faults */
|
||||
writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
|
||||
|
||||
/* disable stall mode momentarily */
|
||||
sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
|
||||
sctlr = sctlr_orig & ~SCTLR_CFCFG;
|
||||
writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
|
||||
|
||||
phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
|
||||
|
||||
if (!phys) {
|
||||
dev_err(smmu->dev,
|
||||
"ATOS failed. Will issue a TLBIALL and try again...\n");
|
||||
arm_smmu_tlb_inv_context(smmu_domain);
|
||||
phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
|
||||
if (phys)
|
||||
dev_err(smmu->dev,
|
||||
"ATOS succeeded this time. Maybe we missed a TLB invalidation while messing with page tables earlier??\n");
|
||||
else
|
||||
dev_err(smmu->dev,
|
||||
"ATOS still failed. If the page tables look good (check the software table walk) then hardware might be misbehaving.\n");
|
||||
}
|
||||
|
||||
/* restore SCTLR */
|
||||
writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
|
||||
|
||||
arm_smmu_resume(smmu);
|
||||
|
||||
return phys;
|
||||
}
|
||||
|
||||
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
|
||||
{
|
||||
int flags, ret, tmp;
|
||||
|
@ -1016,6 +1073,9 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
|
|||
ret = IRQ_HANDLED;
|
||||
resume = ctx_hang_errata ? RESUME_TERMINATE : RESUME_RETRY;
|
||||
} else {
|
||||
phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
|
||||
fsr);
|
||||
|
||||
dev_err(smmu->dev,
|
||||
"Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
|
||||
iova, fsr, fsynr, cfg->cbndx);
|
||||
|
@ -1032,6 +1092,8 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
|
|||
(fsr & 0x80000000) ? "MULTI " : "");
|
||||
dev_err(smmu->dev,
|
||||
"soft iova-to-phys=%pa\n", &phys_soft);
|
||||
dev_err(smmu->dev,
|
||||
"hard iova-to-phys (ATOS)=%pa\n", &phys_atos);
|
||||
dev_err(smmu->dev, "SID=0x%x\n", frsynra & 0x1FF);
|
||||
ret = IRQ_NONE;
|
||||
resume = RESUME_TERMINATE;
|
||||
|
@ -1841,17 +1903,14 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int arm_smmu_halt(struct arm_smmu_device *smmu)
|
||||
static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu)
|
||||
{
|
||||
void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
|
||||
u32 tmp;
|
||||
|
||||
writel_relaxed(MICRO_MMU_CTRL_LOCAL_HALT_REQ,
|
||||
impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
|
||||
|
||||
if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
|
||||
tmp, (tmp & MICRO_MMU_CTRL_IDLE),
|
||||
0, 30000)) {
|
||||
tmp, (tmp & MICRO_MMU_CTRL_IDLE),
|
||||
0, 30000)) {
|
||||
dev_err(smmu->dev, "Couldn't halt SMMU!\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
@ -1859,6 +1918,26 @@ static int arm_smmu_halt(struct arm_smmu_device *smmu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __arm_smmu_halt(struct arm_smmu_device *smmu, bool wait)
|
||||
{
|
||||
void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
|
||||
|
||||
writel_relaxed(MICRO_MMU_CTRL_LOCAL_HALT_REQ,
|
||||
impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
|
||||
|
||||
return wait ? arm_smmu_wait_for_halt(smmu) : 0;
|
||||
}
|
||||
|
||||
static int arm_smmu_halt(struct arm_smmu_device *smmu)
|
||||
{
|
||||
return __arm_smmu_halt(smmu, true);
|
||||
}
|
||||
|
||||
static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu)
|
||||
{
|
||||
return __arm_smmu_halt(smmu, false);
|
||||
}
|
||||
|
||||
static void arm_smmu_resume(struct arm_smmu_device *smmu)
|
||||
{
|
||||
void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
|
||||
|
@ -1869,8 +1948,8 @@ static void arm_smmu_resume(struct arm_smmu_device *smmu)
|
|||
writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
|
||||
}
|
||||
|
||||
static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
|
||||
dma_addr_t iova)
|
||||
static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
|
||||
dma_addr_t iova, bool do_halt)
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain = domain->priv;
|
||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||
|
@ -1887,7 +1966,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
|
|||
|
||||
spin_lock_irqsave(&smmu->atos_lock, flags);
|
||||
|
||||
if (arm_smmu_halt(smmu))
|
||||
if (do_halt && arm_smmu_halt(smmu))
|
||||
goto err_unlock;
|
||||
|
||||
if (smmu->version == 1)
|
||||
|
@ -1905,7 +1984,8 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
|
|||
phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
|
||||
phys |= ((u64) readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32;
|
||||
|
||||
arm_smmu_resume(smmu);
|
||||
if (do_halt)
|
||||
arm_smmu_resume(smmu);
|
||||
spin_unlock_irqrestore(&smmu->atos_lock, flags);
|
||||
|
||||
if (phys & CB_PAR_F) {
|
||||
|
@ -1920,7 +2000,8 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
|
|||
return phys;
|
||||
|
||||
err_resume:
|
||||
arm_smmu_resume(smmu);
|
||||
if (do_halt)
|
||||
arm_smmu_resume(smmu);
|
||||
err_unlock:
|
||||
spin_unlock_irqrestore(&smmu->atos_lock, flags);
|
||||
arm_smmu_disable_clocks(smmu);
|
||||
|
@ -1931,6 +2012,18 @@ err_unlock:
|
|||
return 0;
|
||||
}
|
||||
|
||||
static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
|
||||
dma_addr_t iova)
|
||||
{
|
||||
return __arm_smmu_iova_to_phys_hard(domain, iova, true);
|
||||
}
|
||||
|
||||
static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
|
||||
struct iommu_domain *domain, dma_addr_t iova)
|
||||
{
|
||||
return __arm_smmu_iova_to_phys_hard(domain, iova, false);
|
||||
}
|
||||
|
||||
static bool arm_smmu_capable(enum iommu_cap cap)
|
||||
{
|
||||
switch (cap) {
|
||||
|
|
Loading…
Add table
Reference in a new issue