msm: kgsl: Add macros to facilitate checking MMU and pagetable ops

The MMU code does most of its magic by way of device specific MMU
and pagetable functions.  Add macros to make it easier for developers
to verify that hooks exist before calling them.

Change-Id: Ic0dedbadf74682adebec1a973384e1d3bbf4f79e
Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
This commit is contained in:
Jordan Crouse 2015-11-17 15:16:56 -07:00 committed by David Keitel
parent 6fce6a4e6f
commit bdd0368ce0
2 changed files with 61 additions and 42 deletions

View file

@ -265,7 +265,8 @@ static void kgsl_destroy_pagetable(struct kref *kref)
kgsl_unmap_global_pt_entries(pagetable); kgsl_unmap_global_pt_entries(pagetable);
pagetable->pt_ops->mmu_destroy_pagetable(pagetable); if (PT_OP_VALID(pagetable, mmu_destroy_pagetable))
pagetable->pt_ops->mmu_destroy_pagetable(pagetable);
kfree(pagetable); kfree(pagetable);
} }
@ -601,7 +602,7 @@ kgsl_mmu_createpagetableobject(struct kgsl_mmu *mmu,
atomic_long_set(&pagetable->stats.mapped, 0); atomic_long_set(&pagetable->stats.mapped, 0);
atomic_long_set(&pagetable->stats.max_mapped, 0); atomic_long_set(&pagetable->stats.max_mapped, 0);
if (mmu->mmu_ops && mmu->mmu_ops->mmu_init_pt) { if (MMU_OP_VALID(mmu, mmu_init_pt)) {
status = mmu->mmu_ops->mmu_init_pt(mmu, pagetable); status = mmu->mmu_ops->mmu_init_pt(mmu, pagetable);
if (status) if (status)
goto err; goto err;
@ -620,7 +621,7 @@ kgsl_mmu_createpagetableobject(struct kgsl_mmu *mmu,
return pagetable; return pagetable;
err: err:
if (pagetable->priv) if (PT_OP_VALID(pagetable, mmu_destroy_pagetable))
pagetable->pt_ops->mmu_destroy_pagetable(pagetable); pagetable->pt_ops->mmu_destroy_pagetable(pagetable);
kfree(pagetable); kfree(pagetable);
@ -686,9 +687,10 @@ uint64_t kgsl_mmu_find_svm_region(struct kgsl_pagetable *pagetable,
uint64_t start, uint64_t end, uint64_t size, uint64_t start, uint64_t end, uint64_t size,
uint64_t align) uint64_t align)
{ {
BUG_ON(pagetable == NULL || pagetable->pt_ops->find_svm_region == NULL); if (PT_OP_VALID(pagetable, find_svm_region))
return pagetable->pt_ops->find_svm_region(pagetable, start, end, size, return pagetable->pt_ops->find_svm_region(pagetable, start,
align); end, size, align);
return -ENOMEM;
} }
/** /**
@ -700,8 +702,10 @@ uint64_t kgsl_mmu_find_svm_region(struct kgsl_pagetable *pagetable,
int kgsl_mmu_set_svm_region(struct kgsl_pagetable *pagetable, uint64_t gpuaddr, int kgsl_mmu_set_svm_region(struct kgsl_pagetable *pagetable, uint64_t gpuaddr,
uint64_t size) uint64_t size)
{ {
BUG_ON(pagetable == NULL || pagetable->pt_ops->set_svm_region == NULL); if (PT_OP_VALID(pagetable, set_svm_region))
return pagetable->pt_ops->set_svm_region(pagetable, gpuaddr, size); return pagetable->pt_ops->set_svm_region(pagetable, gpuaddr,
size);
return -ENOMEM;
} }
/** /**
@ -716,8 +720,10 @@ kgsl_mmu_get_gpuaddr(struct kgsl_pagetable *pagetable,
if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
return _nommu_get_gpuaddr(memdesc); return _nommu_get_gpuaddr(memdesc);
BUG_ON(pagetable == NULL || pagetable->pt_ops->get_gpuaddr == NULL); if (PT_OP_VALID(pagetable, get_gpuaddr))
return pagetable->pt_ops->get_gpuaddr(pagetable, memdesc); return pagetable->pt_ops->get_gpuaddr(pagetable, memdesc);
return -ENOMEM;
} }
EXPORT_SYMBOL(kgsl_mmu_get_gpuaddr); EXPORT_SYMBOL(kgsl_mmu_get_gpuaddr);
@ -743,7 +749,8 @@ kgsl_mmu_map(struct kgsl_pagetable *pagetable,
if (kgsl_memdesc_has_guard_page(memdesc)) if (kgsl_memdesc_has_guard_page(memdesc))
size += kgsl_memdesc_guard_page_size(pagetable->mmu, memdesc); size += kgsl_memdesc_guard_page_size(pagetable->mmu, memdesc);
ret = pagetable->pt_ops->mmu_map(pagetable, memdesc); if (PT_OP_VALID(pagetable, mmu_map))
ret = pagetable->pt_ops->mmu_map(pagetable, memdesc);
if (ret == 0) { if (ret == 0) {
KGSL_STATS_ADD(size, &pagetable->stats.mapped, KGSL_STATS_ADD(size, &pagetable->stats.mapped,
@ -769,7 +776,7 @@ int kgsl_mmu_put_gpuaddr(struct kgsl_pagetable *pagetable,
if (memdesc->size == 0 || memdesc->gpuaddr == 0) if (memdesc->size == 0 || memdesc->gpuaddr == 0)
return 0; return 0;
if (pagetable != NULL && pagetable->pt_ops->put_gpuaddr != NULL) if (PT_OP_VALID(pagetable, put_gpuaddr))
pagetable->pt_ops->put_gpuaddr(pagetable, memdesc); pagetable->pt_ops->put_gpuaddr(pagetable, memdesc);
if (!kgsl_memdesc_is_global(memdesc)) if (!kgsl_memdesc_is_global(memdesc))
@ -789,10 +796,11 @@ EXPORT_SYMBOL(kgsl_mmu_put_gpuaddr);
int kgsl_mmu_svm_range(struct kgsl_pagetable *pagetable, int kgsl_mmu_svm_range(struct kgsl_pagetable *pagetable,
uint64_t *lo, uint64_t *hi, uint64_t memflags) uint64_t *lo, uint64_t *hi, uint64_t memflags)
{ {
if (pagetable == NULL || pagetable->pt_ops->svm_range == NULL) if (PT_OP_VALID(pagetable, svm_range))
return -ENODEV; return pagetable->pt_ops->svm_range(pagetable, lo, hi,
memflags);
return pagetable->pt_ops->svm_range(pagetable, lo, hi, memflags); return -ENODEV;
} }
EXPORT_SYMBOL(kgsl_mmu_svm_range); EXPORT_SYMBOL(kgsl_mmu_svm_range);
@ -819,7 +827,8 @@ kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
start_addr = memdesc->gpuaddr; start_addr = memdesc->gpuaddr;
end_addr = (memdesc->gpuaddr + size); end_addr = (memdesc->gpuaddr + size);
pagetable->pt_ops->mmu_unmap(pagetable, memdesc); if (PT_OP_VALID(pagetable, mmu_unmap))
pagetable->pt_ops->mmu_unmap(pagetable, memdesc);
/* If buffer is unmapped 0 fault addr */ /* If buffer is unmapped 0 fault addr */
if ((pagetable->fault_addr >= start_addr) && if ((pagetable->fault_addr >= start_addr) &&
@ -844,7 +853,7 @@ int kgsl_mmu_close(struct kgsl_device *device)
kgsl_free_global(&mmu->setstate_memory); kgsl_free_global(&mmu->setstate_memory);
if (mmu->mmu_ops != NULL) if (MMU_OP_VALID(mmu, mmu_close))
ret = mmu->mmu_ops->mmu_close(mmu); ret = mmu->mmu_ops->mmu_close(mmu);
return ret; return ret;
@ -878,9 +887,9 @@ bool kgsl_mmu_gpuaddr_in_range(struct kgsl_pagetable *pagetable,
if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
return (gpuaddr != 0); return (gpuaddr != 0);
if (pagetable == NULL || pagetable->pt_ops->addr_in_range == NULL) if (PT_OP_VALID(pagetable, addr_in_range))
return false; return pagetable->pt_ops->addr_in_range(pagetable, gpuaddr);
return pagetable->pt_ops->addr_in_range(pagetable, gpuaddr); return false;
} }
EXPORT_SYMBOL(kgsl_mmu_gpuaddr_in_range); EXPORT_SYMBOL(kgsl_mmu_gpuaddr_in_range);

View file

@ -196,18 +196,28 @@ int kgsl_mmu_svm_range(struct kgsl_pagetable *pagetable,
* of as wrappers around the actual function * of as wrappers around the actual function
*/ */
#define MMU_OP_VALID(_mmu, _field) \
(((_mmu) != NULL) && \
((_mmu)->mmu_ops != NULL) && \
((_mmu)->mmu_ops->_field != NULL))
#define PT_OP_VALID(_pt, _field) \
(((_pt) != NULL) && \
((_pt)->pt_ops != NULL) && \
((_pt)->pt_ops->_field != NULL))
static inline u64 kgsl_mmu_get_current_ttbr0(struct kgsl_mmu *mmu) static inline u64 kgsl_mmu_get_current_ttbr0(struct kgsl_mmu *mmu)
{ {
if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_current_ttbr0) if (MMU_OP_VALID(mmu, mmu_get_current_ttbr0))
return mmu->mmu_ops->mmu_get_current_ttbr0(mmu); return mmu->mmu_ops->mmu_get_current_ttbr0(mmu);
else
return 0; return 0;
} }
static inline int kgsl_mmu_set_pt(struct kgsl_mmu *mmu, static inline int kgsl_mmu_set_pt(struct kgsl_mmu *mmu,
struct kgsl_pagetable *pagetable) struct kgsl_pagetable *pagetable)
{ {
if (mmu->mmu_ops && mmu->mmu_ops->mmu_set_pt) if (MMU_OP_VALID(mmu, mmu_set_pt))
return mmu->mmu_ops->mmu_set_pt(mmu, pagetable); return mmu->mmu_ops->mmu_set_pt(mmu, pagetable);
return 0; return 0;
@ -215,21 +225,19 @@ static inline int kgsl_mmu_set_pt(struct kgsl_mmu *mmu,
static inline void kgsl_mmu_stop(struct kgsl_mmu *mmu) static inline void kgsl_mmu_stop(struct kgsl_mmu *mmu)
{ {
if (mmu->mmu_ops && mmu->mmu_ops->mmu_stop) if (MMU_OP_VALID(mmu, mmu_stop))
mmu->mmu_ops->mmu_stop(mmu); mmu->mmu_ops->mmu_stop(mmu);
} }
static inline void kgsl_mmu_enable_clk(struct kgsl_mmu *mmu) static inline void kgsl_mmu_enable_clk(struct kgsl_mmu *mmu)
{ {
if (mmu->mmu_ops && mmu->mmu_ops->mmu_enable_clk) if (MMU_OP_VALID(mmu, mmu_enable_clk))
mmu->mmu_ops->mmu_enable_clk(mmu); mmu->mmu_ops->mmu_enable_clk(mmu);
else
return;
} }
static inline void kgsl_mmu_disable_clk(struct kgsl_mmu *mmu) static inline void kgsl_mmu_disable_clk(struct kgsl_mmu *mmu)
{ {
if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk) if (MMU_OP_VALID(mmu, mmu_disable_clk))
mmu->mmu_ops->mmu_disable_clk(mmu); mmu->mmu_ops->mmu_disable_clk(mmu);
} }
@ -246,10 +254,10 @@ static inline unsigned int kgsl_mmu_get_reg_ahbaddr(struct kgsl_mmu *mmu,
enum kgsl_iommu_context_id ctx_id, enum kgsl_iommu_context_id ctx_id,
enum kgsl_iommu_reg_map reg) enum kgsl_iommu_reg_map reg)
{ {
if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_reg_ahbaddr) if (MMU_OP_VALID(mmu, mmu_get_reg_ahbaddr))
return mmu->mmu_ops->mmu_get_reg_ahbaddr(mmu, ctx_id, reg); return mmu->mmu_ops->mmu_get_reg_ahbaddr(mmu, ctx_id, reg);
else
return 0; return 0;
} }
/* /*
@ -281,31 +289,31 @@ static inline int kgsl_mmu_use_cpu_map(struct kgsl_mmu *mmu)
static inline int kgsl_mmu_set_pagefault_policy(struct kgsl_mmu *mmu, static inline int kgsl_mmu_set_pagefault_policy(struct kgsl_mmu *mmu,
unsigned long pf_policy) unsigned long pf_policy)
{ {
if (mmu->mmu_ops && mmu->mmu_ops->mmu_set_pf_policy) if (MMU_OP_VALID(mmu, mmu_set_pf_policy))
return mmu->mmu_ops->mmu_set_pf_policy(mmu, pf_policy); return mmu->mmu_ops->mmu_set_pf_policy(mmu, pf_policy);
else
return 0; return 0;
} }
static inline void kgsl_mmu_pagefault_resume(struct kgsl_mmu *mmu) static inline void kgsl_mmu_pagefault_resume(struct kgsl_mmu *mmu)
{ {
if (mmu->mmu_ops && mmu->mmu_ops->mmu_pagefault_resume) if (MMU_OP_VALID(mmu, mmu_pagefault_resume))
return mmu->mmu_ops->mmu_pagefault_resume(mmu); return mmu->mmu_ops->mmu_pagefault_resume(mmu);
} }
static inline void kgsl_mmu_clear_fsr(struct kgsl_mmu *mmu) static inline void kgsl_mmu_clear_fsr(struct kgsl_mmu *mmu)
{ {
if (mmu->mmu_ops && mmu->mmu_ops->mmu_clear_fsr) if (MMU_OP_VALID(mmu, mmu_clear_fsr))
return mmu->mmu_ops->mmu_clear_fsr(mmu); return mmu->mmu_ops->mmu_clear_fsr(mmu);
} }
static inline struct kgsl_protected_registers *kgsl_mmu_get_prot_regs static inline struct kgsl_protected_registers *kgsl_mmu_get_prot_regs
(struct kgsl_mmu *mmu) (struct kgsl_mmu *mmu)
{ {
if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_prot_regs) if (MMU_OP_VALID(mmu, mmu_get_prot_regs))
return mmu->mmu_ops->mmu_get_prot_regs(mmu); return mmu->mmu_ops->mmu_get_prot_regs(mmu);
else
return NULL; return NULL;
} }
static inline int kgsl_mmu_is_secured(struct kgsl_mmu *mmu) static inline int kgsl_mmu_is_secured(struct kgsl_mmu *mmu)
@ -316,16 +324,18 @@ static inline int kgsl_mmu_is_secured(struct kgsl_mmu *mmu)
static inline u64 static inline u64
kgsl_mmu_pagetable_get_ttbr0(struct kgsl_pagetable *pagetable) kgsl_mmu_pagetable_get_ttbr0(struct kgsl_pagetable *pagetable)
{ {
if (pagetable && pagetable->pt_ops->get_ttbr0) if (PT_OP_VALID(pagetable, get_ttbr0))
return pagetable->pt_ops->get_ttbr0(pagetable); return pagetable->pt_ops->get_ttbr0(pagetable);
return 0; return 0;
} }
static inline u32 static inline u32
kgsl_mmu_pagetable_get_contextidr(struct kgsl_pagetable *pagetable) kgsl_mmu_pagetable_get_contextidr(struct kgsl_pagetable *pagetable)
{ {
if (pagetable && pagetable->pt_ops->get_contextidr) if (PT_OP_VALID(pagetable, get_contextidr))
return pagetable->pt_ops->get_contextidr(pagetable); return pagetable->pt_ops->get_contextidr(pagetable);
return 0; return 0;
} }