diff --git a/Documentation/devicetree/bindings/gpu/adreno-iommu.txt b/Documentation/devicetree/bindings/gpu/adreno-iommu.txt index de88a6eba7a5..b399145ea8a2 100644 --- a/Documentation/devicetree/bindings/gpu/adreno-iommu.txt +++ b/Documentation/devicetree/bindings/gpu/adreno-iommu.txt @@ -36,8 +36,6 @@ Optional properties: for secure buffer allocation - qcom,secure_align_mask: A mask for determining how secure buffers need to be aligned -- qcom,coherent-htw: A boolean specifying if coherent hardware table walks should - be enabled. - List of sub nodes, one for each of the translation context banks supported. The driver uses the names of these nodes to determine how they are used, diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c index 71b6086423d6..9f35a3197a4c 100644 --- a/drivers/gpu/msm/kgsl_iommu.c +++ b/drivers/gpu/msm/kgsl_iommu.c @@ -1118,7 +1118,6 @@ static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt) { int ret = 0; struct kgsl_iommu_pt *iommu_pt = NULL; - int disable_htw = !MMU_FEATURE(mmu, KGSL_MMU_COHERENT_HTW); unsigned int cb_num; struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu); struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER]; @@ -1128,9 +1127,6 @@ static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt) if (IS_ERR(iommu_pt)) return PTR_ERR(iommu_pt); - iommu_domain_set_attr(iommu_pt->domain, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw); - if (kgsl_mmu_is_perprocess(mmu)) { ret = iommu_domain_set_attr(iommu_pt->domain, DOMAIN_ATTR_PROCID, &pt->name); @@ -1189,7 +1185,6 @@ static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt) int ret = 0; struct kgsl_iommu_pt *iommu_pt = NULL; struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu); - int disable_htw = !MMU_FEATURE(mmu, KGSL_MMU_COHERENT_HTW); struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE]; int secure_vmid = VMID_CP_PIXEL; unsigned int cb_num; @@ -1207,9 +1202,6 @@ static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt) if (IS_ERR(iommu_pt)) return PTR_ERR(iommu_pt); - iommu_domain_set_attr(iommu_pt->domain, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw); - ret = iommu_domain_set_attr(iommu_pt->domain, DOMAIN_ATTR_SECURE_VMID, &secure_vmid); if (ret) { @@ -1251,7 +1243,6 @@ static int _init_per_process_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt) struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER]; int dynamic = 1; unsigned int cb_num = ctx->cb_num; - int disable_htw = !MMU_FEATURE(mmu, KGSL_MMU_COHERENT_HTW); iommu_pt = _alloc_pt(ctx->dev, mmu, pt); @@ -1278,9 +1269,6 @@ static int _init_per_process_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt) goto done; } - iommu_domain_set_attr(iommu_pt->domain, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw); - ret = _attach_pt(iommu_pt, ctx); if (ret) goto done; @@ -2492,7 +2480,6 @@ static const struct { { "qcom,global_pt", KGSL_MMU_GLOBAL_PAGETABLE }, { "qcom,hyp_secure_alloc", KGSL_MMU_HYP_SECURE_ALLOC }, { "qcom,force-32bit", KGSL_MMU_FORCE_32BIT }, - { "qcom,coherent-htw", KGSL_MMU_COHERENT_HTW }, }; static int _kgsl_iommu_probe(struct kgsl_device *device, diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h index acbc0e784cf2..3e32c25b3dbe 100644 --- a/drivers/gpu/msm/kgsl_mmu.h +++ b/drivers/gpu/msm/kgsl_mmu.h @@ -130,8 +130,6 @@ struct kgsl_mmu_pt_ops { #define KGSL_MMU_FORCE_32BIT BIT(5) /* 64 bit address is live */ #define KGSL_MMU_64BIT BIT(6) -/* MMU can do coherent hardware table walks */ -#define KGSL_MMU_COHERENT_HTW BIT(7) /* The MMU supports non-contigious pages */ #define KGSL_MMU_PAGED BIT(8) /* The device requires a guard page */