msm: kgsl: Do not allocate scratch memory for A3xx

A3xx device gets the ring buffer read pointer directly
from the GPU registers. So don’t allocate scratch memory
which can’t be used for A3xx GPU devices.

Change-Id: I95016dfc169b9fee74e978f5560592740f34515e
Signed-off-by: Hareesh Gundu <hareeshg@codeaurora.org>
This commit is contained in:
Hareesh Gundu 2017-08-22 18:55:50 +05:30 committed by Gerrit - the friendly Code Review server
parent cb8f9c4da7
commit 273f50746c
3 changed files with 20 additions and 15 deletions

View file

@ -1161,6 +1161,10 @@ static int adreno_init(struct kgsl_device *device)
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
int ret;
if (!adreno_is_a3xx(adreno_dev))
kgsl_sharedmem_set(device, &device->scratch, 0, 0,
device->scratch.size);
ret = kgsl_pwrctrl_change_state(device, KGSL_STATE_INIT);
if (ret)
return ret;

View file

@ -203,8 +203,9 @@ int adreno_ringbuffer_start(struct adreno_device *adreno_dev,
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
kgsl_sharedmem_set(device, &(rb->buffer_desc),
0, 0xAA, KGSL_RB_SIZE);
kgsl_sharedmem_writel(device, &device->scratch,
SCRATCH_RPTR_OFFSET(rb->id), 0);
if (!adreno_is_a3xx(adreno_dev))
kgsl_sharedmem_writel(device, &device->scratch,
SCRATCH_RPTR_OFFSET(rb->id), 0);
rb->wptr = 0;
rb->_wptr = 0;
rb->wptr_preempt_end = 0xFFFFFFFF;
@ -265,9 +266,16 @@ static int _adreno_ringbuffer_probe(struct adreno_device *adreno_dev,
int adreno_ringbuffer_probe(struct adreno_device *adreno_dev, bool nopreempt)
{
int status = 0;
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
int i;
int i, status;
if (!adreno_is_a3xx(adreno_dev)) {
status = kgsl_allocate_global(device, &device->scratch,
PAGE_SIZE, 0, 0, "scratch");
if (status != 0)
return status;
}
if (nopreempt == false && ADRENO_FEATURE(adreno_dev, ADRENO_PREEMPTION))
adreno_dev->num_ringbuffers = gpudev->num_prio_levels;
@ -303,9 +311,13 @@ static void _adreno_ringbuffer_close(struct adreno_device *adreno_dev,
void adreno_ringbuffer_close(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_ringbuffer *rb;
int i;
if (!adreno_is_a3xx(adreno_dev))
kgsl_free_global(device, &device->scratch);
FOR_EACH_RINGBUFFER(adreno_dev, rb, i)
_adreno_ringbuffer_close(adreno_dev, rb);
}

View file

@ -1120,8 +1120,6 @@ static int kgsl_open_device(struct kgsl_device *device)
atomic_inc(&device->active_cnt);
kgsl_sharedmem_set(device, &device->memstore, 0, 0,
device->memstore.size);
kgsl_sharedmem_set(device, &device->scratch, 0, 0,
device->scratch.size);
result = device->ftbl->init(device);
if (result)
@ -4724,11 +4722,6 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
if (status != 0)
goto error_close_mmu;
status = kgsl_allocate_global(device, &device->scratch,
PAGE_SIZE, 0, 0, "scratch");
if (status != 0)
goto error_free_memstore;
/*
* The default request type PM_QOS_REQ_ALL_CORES is
* applicable to all CPU cores that are online and
@ -4774,8 +4767,6 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
return 0;
error_free_memstore:
kgsl_free_global(device, &device->memstore);
error_close_mmu:
kgsl_mmu_close(device);
error_pwrctrl_close:
@ -4803,8 +4794,6 @@ void kgsl_device_platform_remove(struct kgsl_device *device)
idr_destroy(&device->context_idr);
kgsl_free_global(device, &device->scratch);
kgsl_free_global(device, &device->memstore);
kgsl_mmu_close(device);