From 7412403004a296beb486d8ec66d5e3995a3ac0c5 Mon Sep 17 00:00:00 2001 From: Tim Murray Date: Mon, 20 Feb 2017 15:59:32 +0530 Subject: [PATCH] msm: kgsl: convert some workqueues to use kthreads. adreno_dispatch_work and _kgsl_event_worker are both low-latency low-runtime functions that are in the critical path of GPU rendering. Moving them out of workqueues and into a dedicated FIFO kthread avoids significant jitter. bug 30342017 Git-commit: 1a7a93bd33f48a369de29f6f2b56251127bf6ab4 Git-repo: https://android.googlesource.com/kernel/msm Change-Id: I83562f488c34c2ab001c8ea79e7f09b633c658bd Signed-off-by: Tim Murray Signed-off-by: Abhilash Kumar --- drivers/gpu/msm/adreno_dispatch.c | 6 +++--- drivers/gpu/msm/adreno_dispatch.h | 2 +- drivers/gpu/msm/kgsl.c | 14 ++++++++++++++ drivers/gpu/msm/kgsl.h | 5 ++++- drivers/gpu/msm/kgsl_events.c | 8 ++++---- 5 files changed, 26 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c index b4d0656d062b..8902c3175c79 100644 --- a/drivers/gpu/msm/adreno_dispatch.c +++ b/drivers/gpu/msm/adreno_dispatch.c @@ -2443,7 +2443,7 @@ static void _dispatcher_power_down(struct adreno_device *adreno_dev) mutex_unlock(&device->mutex); } -static void adreno_dispatcher_work(struct work_struct *work) +static void adreno_dispatcher_work(struct kthread_work *work) { struct adreno_dispatcher *dispatcher = container_of(work, struct adreno_dispatcher, work); @@ -2503,7 +2503,7 @@ void adreno_dispatcher_schedule(struct kgsl_device *device) struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher; - kgsl_schedule_work(&dispatcher->work); + queue_kthread_work(&kgsl_driver.worker, &dispatcher->work); } /** @@ -2799,7 +2799,7 @@ int adreno_dispatcher_init(struct adreno_device *adreno_dev) setup_timer(&dispatcher->fault_timer, adreno_dispatcher_fault_timer, (unsigned long) adreno_dev); - INIT_WORK(&dispatcher->work, adreno_dispatcher_work); + init_kthread_work(&dispatcher->work, adreno_dispatcher_work); init_completion(&dispatcher->idle_gate); complete_all(&dispatcher->idle_gate); diff --git a/drivers/gpu/msm/adreno_dispatch.h b/drivers/gpu/msm/adreno_dispatch.h index 72545db12f90..48f0cdc546ff 100644 --- a/drivers/gpu/msm/adreno_dispatch.h +++ b/drivers/gpu/msm/adreno_dispatch.h @@ -91,7 +91,7 @@ struct adreno_dispatcher { atomic_t fault; struct plist_head pending; spinlock_t plist_lock; - struct work_struct work; + struct kthread_work work; struct kobject kobj; struct completion idle_gate; unsigned int disp_preempt_fair_sched; diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 7584811f388a..01b8a4d587e9 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -4864,6 +4864,8 @@ static void kgsl_core_exit(void) static int __init kgsl_core_init(void) { int result = 0; + struct sched_param param = { .sched_priority = 2 }; + /* alloc major and minor device numbers */ result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX, "kgsl"); @@ -4929,6 +4931,18 @@ static int __init kgsl_core_init(void) kgsl_driver.mem_workqueue = alloc_workqueue("kgsl-mementry", WQ_MEM_RECLAIM, 0); + init_kthread_worker(&kgsl_driver.worker); + + kgsl_driver.worker_thread = kthread_run(kthread_worker_fn, + &kgsl_driver.worker, "kgsl_worker_thread"); + + if (IS_ERR(kgsl_driver.worker_thread)) { + pr_err("unable to start kgsl thread\n"); + goto err; + } + + sched_setscheduler(kgsl_driver.worker_thread, SCHED_FIFO, ¶m); + kgsl_events_init(); result = kgsl_drawobjs_cache_init(); diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h index 2a9ac899725c..faf38d1d2293 100644 --- a/drivers/gpu/msm/kgsl.h +++ b/drivers/gpu/msm/kgsl.h @@ -26,6 +26,7 @@ #include #include #include +#include #include /* @@ -152,6 +153,8 @@ struct kgsl_driver { unsigned int full_cache_threshold; struct workqueue_struct *workqueue; struct workqueue_struct *mem_workqueue; + struct kthread_worker worker; + struct task_struct *worker_thread; }; extern struct kgsl_driver kgsl_driver; @@ -301,7 +304,7 @@ struct kgsl_event { void *priv; struct list_head node; unsigned int created; - struct work_struct work; + struct kthread_work work; int result; struct kgsl_event_group *group; }; diff --git a/drivers/gpu/msm/kgsl_events.c b/drivers/gpu/msm/kgsl_events.c index 6e8abf36c50f..859511baba12 100644 --- a/drivers/gpu/msm/kgsl_events.c +++ b/drivers/gpu/msm/kgsl_events.c @@ -32,7 +32,7 @@ static inline void signal_event(struct kgsl_device *device, { list_del(&event->node); event->result = result; - queue_work(device->events_wq, &event->work); + queue_kthread_work(&kgsl_driver.worker, &event->work); } /** @@ -42,7 +42,7 @@ static inline void signal_event(struct kgsl_device *device, * Each event callback has its own work struct and is run on a event specific * workqeuue. This is the worker that queues up the event callback function. */ -static void _kgsl_event_worker(struct work_struct *work) +static void _kgsl_event_worker(struct kthread_work *work) { struct kgsl_event *event = container_of(work, struct kgsl_event, work); int id = KGSL_CONTEXT_ID(event->context); @@ -282,7 +282,7 @@ int kgsl_add_event(struct kgsl_device *device, struct kgsl_event_group *group, event->created = jiffies; event->group = group; - INIT_WORK(&event->work, _kgsl_event_worker); + init_kthread_work(&event->work, _kgsl_event_worker); trace_kgsl_register_event(KGSL_CONTEXT_ID(context), timestamp, func); @@ -297,7 +297,7 @@ int kgsl_add_event(struct kgsl_device *device, struct kgsl_event_group *group, if (timestamp_cmp(retired, timestamp) >= 0) { event->result = KGSL_EVENT_RETIRED; - queue_work(device->events_wq, &event->work); + queue_kthread_work(&kgsl_driver.worker, &event->work); spin_unlock(&group->lock); return 0; }