Merge "drm/msm: Move memptrs to msm_gpu and reorganize"

This commit is contained in:
Linux Build Service Account 2017-06-22 14:00:12 -07:00 committed by Gerrit - the friendly Code Review server
commit 9419a8a284
12 changed files with 175 additions and 225 deletions

View file

@ -46,7 +46,6 @@ static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
static void a5xx_set_pagetable(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
struct msm_gem_address_space *aspace)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct msm_mmu *mmu = aspace->mmu;
struct msm_iommu *iommu = to_msm_iommu(mmu);
@ -75,17 +74,15 @@ static void a5xx_set_pagetable(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
* reload the pagetable if the current ring gets preempted out.
*/
OUT_PKT7(ring, CP_MEM_WRITE, 4);
OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id, ttbr0)));
OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, ttbr0)));
OUT_RING(ring, lower_32_bits(rbmemptr(ring, ttbr0)));
OUT_RING(ring, upper_32_bits(rbmemptr(ring, ttbr0)));
OUT_RING(ring, lower_32_bits(iommu->ttbr0));
OUT_RING(ring, upper_32_bits(iommu->ttbr0));
/* Also write the current contextidr (ASID) */
OUT_PKT7(ring, CP_MEM_WRITE, 3);
OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id,
contextidr)));
OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id,
contextidr)));
OUT_RING(ring, lower_32_bits(rbmemptr(ring, contextidr)));
OUT_RING(ring, upper_32_bits(rbmemptr(ring, contextidr)));
OUT_RING(ring, iommu->contextidr);
/* Invalidate the draw state so we start off fresh */
@ -217,8 +214,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_PKT7(ring, CP_EVENT_WRITE, 4);
OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id, fence)));
OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, fence)));
OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, submit->fence);
if (submit->secure) {
@ -477,30 +474,14 @@ static int a5xx_preempt_start(struct msm_gpu *gpu)
static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
const struct firmware *fw, u64 *iova)
{
struct drm_device *drm = gpu->dev;
struct drm_gem_object *bo;
void *ptr;
bo = msm_gem_new(drm, fw->size - 4,
MSM_BO_UNCACHED | MSM_BO_GPU_READONLY);
ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4,
MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
if (IS_ERR(bo))
return bo;
ptr = msm_gem_vaddr(bo);
if (!ptr) {
drm_gem_object_unreference_unlocked(bo);
return ERR_PTR(-ENOMEM);
}
if (iova) {
int ret = msm_gem_get_iova(bo, gpu->aspace, iova);
if (ret) {
drm_gem_object_unreference_unlocked(bo);
return ERR_PTR(ret);
}
}
if (IS_ERR(ptr))
return ERR_CAST(ptr);
memcpy(ptr, &fw->data[4], fw->size - 4);
return bo;

View file

@ -458,18 +458,10 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
*/
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize,
MSM_BO_UNCACHED | MSM_BO_GPU_READONLY);
if (IS_ERR(a5xx_gpu->gpmu_bo))
goto err;
if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace,
&a5xx_gpu->gpmu_iova))
goto err;
ptr = msm_gem_vaddr(a5xx_gpu->gpmu_bo);
if (!ptr)
ptr = msm_gem_kernel_new(drm, bosize,
MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace,
&a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
if (IS_ERR(ptr))
goto err;
while (cmds_size > 0) {

View file

@ -15,41 +15,6 @@
#include "msm_iommu.h"
#include "a5xx_gpu.h"
static void *alloc_kernel_bo(struct drm_device *drm, struct msm_gpu *gpu,
size_t size, uint32_t flags, struct drm_gem_object **bo,
u64 *iova)
{
struct drm_gem_object *_bo;
u64 _iova;
void *ptr;
int ret;
_bo = msm_gem_new(drm, size, flags);
if (IS_ERR(_bo))
return _bo;
ret = msm_gem_get_iova(_bo, gpu->aspace, &_iova);
if (ret)
goto out;
ptr = msm_gem_vaddr(_bo);
if (!ptr) {
ret = -ENOMEM;
goto out;
}
if (bo)
*bo = _bo;
if (iova)
*iova = _iova;
return ptr;
out:
drm_gem_object_unreference_unlocked(_bo);
return ERR_PTR(ret);
}
/*
* Try to transition the preemption state from old to new. Return
* true on success or false if the original state wasn't 'old'
@ -100,7 +65,6 @@ static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
/* Return the highest priority ringbuffer with something in it */
static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
unsigned long flags;
int i;
@ -109,7 +73,7 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
struct msm_ringbuffer *ring = gpu->rb[i];
spin_lock_irqsave(&ring->lock, flags);
empty = (get_wptr(ring) == adreno_gpu->memptrs->rptr[ring->id]);
empty = (get_wptr(ring) == ring->memptrs->rptr);
spin_unlock_irqrestore(&ring->lock, flags);
if (!empty)
@ -176,10 +140,8 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
/* Set the SMMU info for the preemption */
if (a5xx_gpu->smmu_info) {
a5xx_gpu->smmu_info->ttbr0 =
adreno_gpu->memptrs->ttbr0[ring->id];
a5xx_gpu->smmu_info->contextidr =
adreno_gpu->memptrs->contextidr[ring->id];
a5xx_gpu->smmu_info->ttbr0 = ring->memptrs->ttbr0;
a5xx_gpu->smmu_info->contextidr = ring->memptrs->contextidr;
}
/* Set the address of the incoming preemption record */
@ -278,10 +240,10 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
struct drm_gem_object *bo;
u64 iova;
ptr = alloc_kernel_bo(gpu->dev, gpu,
ptr = msm_gem_kernel_new(gpu->dev,
A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
MSM_BO_UNCACHED | MSM_BO_PRIVILEGED,
&bo, &iova);
gpu->aspace, &bo, &iova);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
@ -296,7 +258,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
ptr->info = 0;
ptr->data = 0;
ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
ptr->rptr_addr = rbmemptr(adreno_gpu, ring->id, rptr);
ptr->rptr_addr = rbmemptr(ring, rptr);
ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE;
return 0;
@ -352,10 +314,10 @@ void a5xx_preempt_init(struct msm_gpu *gpu)
}
if (msm_iommu_allow_dynamic(gpu->aspace->mmu)) {
ptr = alloc_kernel_bo(gpu->dev, gpu,
ptr = msm_gem_kernel_new(gpu->dev,
sizeof(struct a5xx_smmu_info),
MSM_BO_UNCACHED | MSM_BO_PRIVILEGED,
&bo, &iova);
gpu->aspace, &bo, &iova);
if (IS_ERR(ptr))
goto fail;

View file

@ -214,28 +214,14 @@ struct crashdump {
static int crashdump_init(struct msm_gpu *gpu, struct crashdump *crashdump)
{
struct drm_device *drm = gpu->dev;
int ret = -ENOMEM;
int ret = 0;
crashdump->bo = msm_gem_new_locked(drm, CRASHDUMP_BO_SIZE,
MSM_BO_UNCACHED);
if (IS_ERR(crashdump->bo)) {
ret = PTR_ERR(crashdump->bo);
crashdump->bo = NULL;
return ret;
}
crashdump->ptr = msm_gem_vaddr(crashdump->bo);
if (!crashdump->ptr)
goto out;
ret = msm_gem_get_iova(crashdump->bo, gpu->aspace,
&crashdump->iova);
out:
if (ret) {
drm_gem_object_unreference(crashdump->bo);
crashdump->bo = NULL;
crashdump->ptr = msm_gem_kernel_new_locked(gpu->dev,
CRASHDUMP_BO_SIZE, MSM_BO_UNCACHED,
gpu->aspace, &crashdump->bo, &crashdump->iova);
if (IS_ERR(crashdump->ptr)) {
ret = PTR_ERR(crashdump->ptr);
crashdump->ptr = NULL;
}
return ret;

View file

@ -90,7 +90,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(adreno_gpu, 0, rptr));
REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(gpu->rb[0], rptr));
return 0;
}
@ -106,10 +106,11 @@ static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
* ensure that it won't be. If not then this is why your
* a430 stopped working.
*/
return adreno_gpu->memptrs->rptr[ring->id] = adreno_gpu_read(
adreno_gpu, REG_ADRENO_CP_RB_RPTR);
} else
return adreno_gpu->memptrs->rptr[ring->id];
return ring->memptrs->rptr =
adreno_gpu_read(adreno_gpu, REG_ADRENO_CP_RB_RPTR);
}
return ring->memptrs->rptr;
}
struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
@ -128,17 +129,11 @@ uint32_t adreno_submitted_fence(struct msm_gpu *gpu,
uint32_t adreno_last_fence(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
if (!ring)
return 0;
return adreno_gpu->memptrs->fence[ring->id];
return ring ? ring->memptrs->fence : 0;
}
void adreno_recover(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct drm_device *dev = gpu->dev;
struct msm_ringbuffer *ring;
int ret, i;
@ -156,9 +151,8 @@ void adreno_recover(struct msm_gpu *gpu)
ring->next = ring->start;
/* reset completed fence seqno, discard anything pending: */
adreno_gpu->memptrs->fence[ring->id] =
adreno_submitted_fence(gpu, ring);
adreno_gpu->memptrs->rptr[ring->id] = 0;
ring->memptrs->fence = adreno_submitted_fence(gpu, ring);
ring->memptrs->rptr = 0;
}
gpu->funcs->pm_resume(gpu);
@ -213,7 +207,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS);
OUT_RING(ring, rbmemptr(adreno_gpu, ring->id, fence));
OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->fence);
/* we could maybe be clever and only CP_COND_EXEC the interrupt: */
@ -516,7 +510,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
{
struct adreno_platform_config *config = pdev->dev.platform_data;
struct msm_gpu *gpu = &adreno_gpu->base;
struct msm_mmu *mmu;
int ret;
adreno_gpu->funcs = funcs;
@ -541,77 +534,19 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
}
ret = request_firmware(&adreno_gpu->pfp, adreno_gpu->info->pfpfw, drm->dev);
if (ret) {
if (ret)
dev_err(drm->dev, "failed to load %s PFP firmware: %d\n",
adreno_gpu->info->pfpfw, ret);
return ret;
}
mmu = gpu->aspace->mmu;
if (mmu) {
ret = mmu->funcs->attach(mmu, NULL, 0);
if (ret)
return ret;
}
if (gpu->secure_aspace) {
mmu = gpu->secure_aspace->mmu;
if (mmu) {
ret = mmu->funcs->attach(mmu, NULL, 0);
if (ret)
return ret;
}
}
adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
MSM_BO_UNCACHED);
if (IS_ERR(adreno_gpu->memptrs_bo)) {
ret = PTR_ERR(adreno_gpu->memptrs_bo);
adreno_gpu->memptrs_bo = NULL;
dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
return ret;
}
adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo);
if (!adreno_gpu->memptrs) {
dev_err(drm->dev, "could not vmap memptrs\n");
return -ENOMEM;
}
ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace,
&adreno_gpu->memptrs_iova);
if (ret) {
dev_err(drm->dev, "could not map memptrs: %d\n", ret);
return ret;
}
return 0;
return ret;
}
void adreno_gpu_cleanup(struct adreno_gpu *gpu)
{
struct msm_gem_address_space *aspace = gpu->base.aspace;
if (gpu->memptrs_bo) {
if (gpu->memptrs_iova)
msm_gem_put_iova(gpu->memptrs_bo, aspace);
drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
}
release_firmware(gpu->pm4);
release_firmware(gpu->pfp);
msm_gpu_cleanup(&gpu->base);
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu);
msm_gem_address_space_put(aspace);
}
if (gpu->base.secure_aspace) {
aspace = gpu->base.secure_aspace;
aspace->mmu->funcs->detach(aspace->mmu);
msm_gem_address_space_put(aspace);
}
}
static void adreno_snapshot_os(struct msm_gpu *gpu,

View file

@ -83,22 +83,6 @@ struct adreno_info {
const struct adreno_info *adreno_info(struct adreno_rev rev);
#define _sizeof(member) \
sizeof(((struct adreno_rbmemptrs *) 0)->member[0])
#define _base(adreno_gpu, member) \
((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
#define rbmemptr(adreno_gpu, index, member) \
(_base((adreno_gpu), member) + ((index) * _sizeof(member)))
struct adreno_rbmemptrs {
volatile uint32_t rptr[MSM_GPU_MAX_RINGS];
volatile uint32_t fence[MSM_GPU_MAX_RINGS];
volatile uint64_t ttbr0[MSM_GPU_MAX_RINGS];
volatile unsigned int contextidr[MSM_GPU_MAX_RINGS];
};
struct adreno_counter {
u32 lo;
u32 hi;
@ -137,13 +121,6 @@ struct adreno_gpu {
/* firmware: */
const struct firmware *pm4, *pfp;
/* ringbuffer rptr/wptr: */
// TODO should this be in msm_ringbuffer? I think it would be
// different for z180..
struct adreno_rbmemptrs *memptrs;
struct drm_gem_object *memptrs_bo;
uint64_t memptrs_iova;
/*
* Register offsets are different between some GPUs.
* GPU specific offsets will be exported by GPU specific

View file

@ -494,7 +494,12 @@ int msm_gem_svm_new_handle(struct drm_device *dev, struct drm_file *file,
struct drm_gem_object *msm_gem_svm_new(struct drm_device *dev,
struct drm_file *file, uint64_t hostptr,
uint64_t size, uint32_t flags);
void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova);
void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova);
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace);
void msm_framebuffer_cleanup(struct drm_framebuffer *fb,

View file

@ -1285,3 +1285,51 @@ void msm_mn_invalidate_range_start(struct mmu_notifier *mn,
msm_gem_mn_put(msm_mn);
}
/*
* Helper function to consolidate in-kernel buffer allocations that usually need
* to allocate a buffer object, iova and a virtual address all in one shot
*/
static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova, bool locked)
{
void *vaddr;
struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
int ret;
if (IS_ERR(obj))
return ERR_CAST(obj);
ret = msm_gem_get_iova(obj, aspace, iova);
if (ret) {
drm_gem_object_unreference(obj);
return ERR_PTR(ret);
}
vaddr = msm_gem_vaddr(obj);
if (!vaddr) {
msm_gem_put_iova(obj, aspace);
drm_gem_object_unreference(obj);
return ERR_PTR(-ENOMEM);
}
*bo = obj;
return vaddr;
}
void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova)
{
return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova,
false);
}
void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova)
{
return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova,
true);
}

View file

@ -810,17 +810,39 @@ msm_gpu_create_address_space(struct msm_gpu *gpu, struct device *dev,
gpu->name, name, PTR_ERR(aspace));
iommu_domain_free(iommu);
aspace = NULL;
return NULL;
}
if (aspace->mmu) {
int ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
if (ret) {
dev_err(gpu->dev->dev,
"%s: failed to atach IOMMU '%s': %d\n",
gpu->name, name, ret);
msm_gem_address_space_put(aspace);
aspace = ERR_PTR(ret);
}
}
return aspace;
}
static void msm_gpu_destroy_address_space(struct msm_gem_address_space *aspace)
{
if (!IS_ERR_OR_NULL(aspace) && aspace->mmu)
aspace->mmu->funcs->detach(aspace->mmu);
msm_gem_address_space_put(aspace);
}
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config)
{
int i, ret, nr_rings;
void *memptrs;
uint64_t memptrs_iova;
if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
@ -903,10 +925,18 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
nr_rings = ARRAY_SIZE(gpu->rb);
}
/* Allocate one buffer to hold all the memptr records for the rings */
memptrs = msm_gem_kernel_new(drm, sizeof(struct msm_memptrs) * nr_rings,
MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo, &memptrs_iova);
if (IS_ERR(memptrs)) {
ret = PTR_ERR(memptrs);
goto fail;
}
/* Create ringbuffer(s): */
for (i = 0; i < nr_rings; i++) {
gpu->rb[i] = msm_ringbuffer_new(gpu, i);
gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
if (IS_ERR(gpu->rb[i])) {
ret = PTR_ERR(gpu->rb[i]);
gpu->rb[i] = NULL;
@ -914,6 +944,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
"could not create ringbuffer %d: %d\n", i, ret);
goto fail;
}
memptrs += sizeof(struct msm_memptrs);
memptrs_iova += sizeof(struct msm_memptrs);
}
gpu->nr_rings = nr_rings;
@ -935,11 +968,17 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return 0;
fail:
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
if (gpu->rb[i])
msm_ringbuffer_destroy(gpu->rb[i]);
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++)
msm_ringbuffer_destroy(gpu->rb[i]);
if (gpu->memptrs_bo) {
msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
}
msm_gpu_destroy_address_space(gpu->aspace);
msm_gpu_destroy_address_space(gpu->secure_aspace);
pm_runtime_disable(&pdev->dev);
return ret;
}
@ -957,16 +996,17 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
bs_fini(gpu);
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
if (!gpu->rb[i])
continue;
if (gpu->rb[i]->iova)
msm_gem_put_iova(gpu->rb[i]->bo, gpu->aspace);
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++)
msm_ringbuffer_destroy(gpu->rb[i]);
if (gpu->memptrs_bo) {
msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
}
msm_snapshot_destroy(gpu, gpu->snapshot);
pm_runtime_disable(&pdev->dev);
msm_gpu_destroy_address_space(gpu->aspace);
msm_gpu_destroy_address_space(gpu->secure_aspace);
}

View file

@ -131,6 +131,8 @@ struct msm_gpu {
struct pm_qos_request pm_qos_req_dma;
struct drm_gem_object *memptrs_bo;
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
struct msm_bus_scale_pdata *bus_scale_table;
uint32_t bsc;

View file

@ -18,7 +18,8 @@
#include "msm_ringbuffer.h"
#include "msm_gpu.h"
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id)
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
struct msm_memptrs *memptrs, uint64_t memptrs_iova)
{
struct msm_ringbuffer *ring;
int ret;
@ -42,6 +43,10 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id)
goto fail;
}
ring->memptrs = memptrs;
ring->memptrs_iova = memptrs_iova;
ring->start = msm_gem_vaddr(ring->bo);
ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
ring->next = ring->start;
@ -60,7 +65,10 @@ fail:
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
{
if (ring->bo)
if (ring && ring->bo) {
msm_gem_put_iova(ring->bo, ring->gpu->aspace);
drm_gem_object_unreference_unlocked(ring->bo);
}
kfree(ring);
}

View file

@ -20,6 +20,16 @@
#include "msm_drv.h"
#define rbmemptr(ring, member) \
((ring)->memptrs_iova + offsetof(struct msm_memptrs, member))
struct msm_memptrs {
volatile uint32_t rptr;
volatile uint32_t fence;
volatile uint64_t ttbr0;
volatile unsigned int contextidr;
};
struct msm_ringbuffer {
struct msm_gpu *gpu;
int id;
@ -29,9 +39,13 @@ struct msm_ringbuffer {
uint32_t submitted_fence;
spinlock_t lock;
struct list_head submits;
struct msm_memptrs *memptrs;
uint64_t memptrs_iova;
};
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id);
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
struct msm_memptrs *memptrs, uint64_t memptrs_iova);
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
/* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */