ANDROID: sched/rt: schedtune: Add boost retention to RT

Boosted RT tasks can be deboosted quickly, this makes boost usless
for RT tasks and causes lots of glitching. Use timers to prevent
de-boost too soon and wait for long enough such that next enqueue
happens after a threshold.

While this can be solved in the governor, there are following
advantages:
- The approach used is governor-independent
- Reduces boost group lock contention for frequently sleepers/wakers

Note:
Fixed build breakage due to schedfreq dependency which isn't used
for RT anymore.

Bug: 30210506

Change-Id: I428a2695cac06cc3458cdde0dea72315e4e66c00
Signed-off-by: Joel Fernandes <joelaf@google.com>
This commit is contained in:
Joel Fernandes 2017-09-11 17:10:37 -07:00
parent fe09418d6f
commit a81d322647
4 changed files with 154 additions and 0 deletions

View file

@ -1434,6 +1434,10 @@ struct sched_rt_entity {
unsigned long watchdog_stamp;
unsigned int time_slice;
/* Accesses for these must be guarded by rq->lock of the task's rq */
bool schedtune_enqueued;
struct hrtimer schedtune_timer;
struct sched_rt_entity *back;
#ifdef CONFIG_RT_GROUP_SCHED
struct sched_rt_entity *parent;

View file

@ -2200,6 +2200,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
init_dl_task_timer(&p->dl);
__dl_clear_params(p);
init_rt_schedtune_timer(&p->rt);
INIT_LIST_HEAD(&p->rt.run_list);
#ifdef CONFIG_PREEMPT_NOTIFIERS

View file

@ -7,6 +7,7 @@
#include <linux/slab.h>
#include <linux/irq_work.h>
#include <linux/hrtimer.h>
#include "walt.h"
#include "tune.h"
@ -975,6 +976,70 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
return 0;
}
#define RT_SCHEDTUNE_INTERVAL 50000000ULL
static enum hrtimer_restart rt_schedtune_timer(struct hrtimer *timer)
{
struct sched_rt_entity *rt_se = container_of(timer,
struct sched_rt_entity,
schedtune_timer);
struct task_struct *p = rt_task_of(rt_se);
struct rq *rq = task_rq(p);
raw_spin_lock(&rq->lock);
/*
* Nothing to do if:
* - task has switched runqueues
* - task isn't RT anymore
*/
if (rq != task_rq(p) || (p->sched_class != &rt_sched_class))
goto out;
/*
* If task got enqueued back during callback time, it means we raced
* with the enqueue on another cpu, that's Ok, just do nothing as
* enqueue path would have tried to cancel us and we shouldn't run
* Also check the schedtune_enqueued flag as class-switch on a
* sleeping task may have already canceled the timer and done dq
*/
if (p->on_rq || !rt_se->schedtune_enqueued)
goto out;
/*
* RT task is no longer active, cancel boost
*/
rt_se->schedtune_enqueued = false;
schedtune_dequeue_task(p, cpu_of(rq));
cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
out:
raw_spin_unlock(&rq->lock);
/*
* This can free the task_struct if no more references.
*/
put_task_struct(p);
return HRTIMER_NORESTART;
}
void init_rt_schedtune_timer(struct sched_rt_entity *rt_se)
{
struct hrtimer *timer = &rt_se->schedtune_timer;
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
timer->function = rt_schedtune_timer;
rt_se->schedtune_enqueued = false;
}
static void start_schedtune_timer(struct sched_rt_entity *rt_se)
{
struct hrtimer *timer = &rt_se->schedtune_timer;
hrtimer_start(timer, ns_to_ktime(RT_SCHEDTUNE_INTERVAL),
HRTIMER_MODE_REL_PINNED);
}
/*
* Update the current task's runtime statistics. Skip current tasks that
* are not in our scheduling class.
@ -1312,7 +1377,32 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p);
if (!schedtune_task_boost(p))
return;
/*
* If schedtune timer is active, that means a boost was already
* done, just cancel the timer so that deboost doesn't happen.
* Otherwise, increase the boost. If an enqueued timer was
* cancelled, put the task reference.
*/
if (hrtimer_try_to_cancel(&rt_se->schedtune_timer) == 1)
put_task_struct(p);
/*
* schedtune_enqueued can be true in the following situation:
* enqueue_task_rt grabs rq lock before timer fires
* or before its callback acquires rq lock
* schedtune_enqueued can be false if timer callback is running
* and timer just released rq lock, or if the timer finished
* running and canceling the boost
*/
if (rt_se->schedtune_enqueued)
return;
rt_se->schedtune_enqueued = true;
schedtune_enqueue_task(p, cpu_of(rq));
cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
}
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
@ -1324,7 +1414,19 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
walt_dec_cumulative_runnable_avg(rq, p);
dequeue_pushable_task(rq, p);
if (!rt_se->schedtune_enqueued)
return;
if (flags == DEQUEUE_SLEEP) {
get_task_struct(p);
start_schedtune_timer(rt_se);
return;
}
rt_se->schedtune_enqueued = false;
schedtune_dequeue_task(p, cpu_of(rq));
cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
}
/*
@ -1364,6 +1466,32 @@ static void yield_task_rt(struct rq *rq)
#ifdef CONFIG_SMP
static int find_lowest_rq(struct task_struct *task);
/*
* Perform a schedtune dequeue and cancelation of boost timers if needed.
* Should be called only with the rq->lock held.
*/
static void schedtune_dequeue_rt(struct rq *rq, struct task_struct *p)
{
struct sched_rt_entity *rt_se = &p->rt;
BUG_ON(!raw_spin_is_locked(&rq->lock));
if (!rt_se->schedtune_enqueued)
return;
/*
* Incase of class change cancel any active timers. If an enqueued
* timer was cancelled, put the task ref.
*/
if (hrtimer_try_to_cancel(&rt_se->schedtune_timer) == 1)
put_task_struct(p);
/* schedtune_enqueued is true, deboost it */
rt_se->schedtune_enqueued = false;
schedtune_dequeue_task(p, task_cpu(p));
cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
}
static int
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
int sibling_count_hint)
@ -1418,6 +1546,19 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
rcu_read_unlock();
out:
/*
* If previous CPU was different, make sure to cancel any active
* schedtune timers and deboost.
*/
if (task_cpu(p) != cpu) {
unsigned long fl;
struct rq *prq = task_rq(p);
raw_spin_lock_irqsave(&prq->lock, fl);
schedtune_dequeue_rt(prq, p);
raw_spin_unlock_irqrestore(&prq->lock, fl);
}
return cpu;
}
@ -2162,6 +2303,13 @@ static void rq_offline_rt(struct rq *rq)
*/
static void switched_from_rt(struct rq *rq, struct task_struct *p)
{
/*
* On class switch from rt, always cancel active schedtune timers,
* this handles the cases where we switch class for a task that is
* already rt-dequeued but has a running timer.
*/
schedtune_dequeue_rt(rq, p);
/*
* If there are other RT tasks then we will reschedule
* and the scheduling of the other RT tasks will handle

View file

@ -1408,6 +1408,7 @@ extern void resched_cpu(int cpu);
extern struct rt_bandwidth def_rt_bandwidth;
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
extern void init_rt_schedtune_timer(struct sched_rt_entity *rt_se);
extern struct dl_bandwidth def_dl_bandwidth;
extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);