msm: kgsl: Defer issue commands to worker thread

Currently submit ioctl getting blocked till the commands
gets added to ringbuffer incase inflight count is less
than context burst count. If the submit command happens
in GPU slumber state, it will add the GPU wakeup time to
submit IOCTL. This will add latency in preparing next frame
in CPU side. Defer commands submission to dispatcher worker,
if the GPU is in slumber state.

CRs-Fixed: 2055107
Change-Id: I099ba721e02bbcd8ccadb1bc518c7c1ef4fb7e21
Signed-off-by: Hareesh Gundu <hareeshg@codeaurora.org>
This commit is contained in:
Hareesh Gundu 2017-06-07 14:50:15 +05:30
parent 85baaeb2e2
commit 71c3b2e17c
4 changed files with 52 additions and 3 deletions

View file

@ -979,6 +979,13 @@ static void _adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
spin_unlock(&dispatcher->plist_lock);
}
static inline void _decrement_submit_now(struct kgsl_device *device)
{
spin_lock(&device->submit_lock);
device->submit_now--;
spin_unlock(&device->submit_lock);
}
/**
* adreno_dispatcher_issuecmds() - Issue commmands from pending contexts
* @adreno_dev: Pointer to the adreno device struct
@ -988,15 +995,29 @@ static void _adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
static void adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
{
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
spin_lock(&device->submit_lock);
/* If state transition to SLUMBER, schedule the work for later */
if (device->slumber == true) {
spin_unlock(&device->submit_lock);
goto done;
}
device->submit_now++;
spin_unlock(&device->submit_lock);
/* If the dispatcher is busy then schedule the work for later */
if (!mutex_trylock(&dispatcher->mutex)) {
adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
return;
_decrement_submit_now(device);
goto done;
}
_adreno_dispatcher_issuecmds(adreno_dev);
mutex_unlock(&dispatcher->mutex);
_decrement_submit_now(device);
return;
done:
adreno_dispatcher_schedule(device);
}
/**

View file

@ -4719,6 +4719,7 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
device->id, device->reg_phys, device->reg_len);
rwlock_init(&device->context_lock);
spin_lock_init(&device->submit_lock);
setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);

View file

@ -256,6 +256,11 @@ struct kgsl_device {
struct kgsl_pwrctrl pwrctrl;
int open_count;
/* For GPU inline submission */
uint32_t submit_now;
spinlock_t submit_lock;
bool slumber;
struct mutex mutex;
uint32_t state;
uint32_t requested_state;

View file

@ -2347,9 +2347,24 @@ void kgsl_idle_check(struct work_struct *work)
|| device->state == KGSL_STATE_NAP) {
if (!atomic_read(&device->active_cnt)) {
spin_lock(&device->submit_lock);
if (device->submit_now) {
spin_unlock(&device->submit_lock);
goto done;
}
/* Don't allow GPU inline submission in SLUMBER */
if (requested_state == KGSL_STATE_SLUMBER)
device->slumber = true;
spin_unlock(&device->submit_lock);
ret = kgsl_pwrctrl_change_state(device,
device->requested_state);
if (ret == -EBUSY) {
if (requested_state == KGSL_STATE_SLUMBER) {
spin_lock(&device->submit_lock);
device->slumber = false;
spin_unlock(&device->submit_lock);
}
/*
* If the GPU is currently busy, restore
* the requested state and reschedule
@ -2360,7 +2375,7 @@ void kgsl_idle_check(struct work_struct *work)
kgsl_schedule_work(&device->idle_check_ws);
}
}
done:
if (!ret)
kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
@ -2789,6 +2804,13 @@ static void kgsl_pwrctrl_set_state(struct kgsl_device *device,
trace_kgsl_pwr_set_state(device, state);
device->state = state;
device->requested_state = KGSL_STATE_NONE;
spin_lock(&device->submit_lock);
if (state == KGSL_STATE_SLUMBER || state == KGSL_STATE_SUSPEND)
device->slumber = true;
else
device->slumber = false;
spin_unlock(&device->submit_lock);
}
static void kgsl_pwrctrl_request_state(struct kgsl_device *device,