sched: break the forever prev_cpu selection preference

The select_best_cpu() algorithm selects the previous CPU as the target
CPU if the task did not sleep for more than 2 msec (controlled by
/proc/sys/kernel/sched_select_prev_cpu_us). The complete CPU search is
not done for a long time for tasks which sleeps for a short duration
in between the long execution slices. Enforce a 100 msec threshold since
the last selection time to run the complete algorithm.

CRs-Fixed: 984463
Change-Id: I329eecc6bae8f130cd5598f6cee8ca5a01391cca
[joonwoop@codeaurora.org: fixed conflict in bias_to_prev_cpu() and sched.h
 where CONFIG_SCHED_QHMP used to be.]
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
This commit is contained in:
Pavankumar Kondeti 2016-03-04 13:36:43 +05:30 committed by Joonwoo Park
parent 7d1fabcef8
commit a68e39b7fd
3 changed files with 11 additions and 0 deletions

View file

@ -1482,6 +1482,7 @@ struct task_struct {
u32 init_load_pct; u32 init_load_pct;
u64 last_wake_ts; u64 last_wake_ts;
u64 last_switch_out_ts; u64 last_switch_out_ts;
u64 last_cpu_selected_ts;
struct related_thread_group *grp; struct related_thread_group *grp;
struct list_head grp_list; struct list_head grp_list;
u64 cpu_cycles; u64 cpu_cycles;

View file

@ -3056,6 +3056,7 @@ static inline void mark_task_starting(struct task_struct *p)
wallclock = sched_ktime_clock(); wallclock = sched_ktime_clock();
p->ravg.mark_start = p->last_wake_ts = wallclock; p->ravg.mark_start = p->last_wake_ts = wallclock;
p->last_cpu_selected_ts = wallclock;
p->last_switch_out_ts = 0; p->last_switch_out_ts = 0;
update_task_cpu_cycles(p, cpu_of(rq)); update_task_cpu_cycles(p, cpu_of(rq));
} }

View file

@ -2678,6 +2678,9 @@ static unsigned int __read_mostly
sched_short_sleep_task_threshold = 2000 * NSEC_PER_USEC; sched_short_sleep_task_threshold = 2000 * NSEC_PER_USEC;
unsigned int __read_mostly sysctl_sched_select_prev_cpu_us = 2000; unsigned int __read_mostly sysctl_sched_select_prev_cpu_us = 2000;
static unsigned int __read_mostly
sched_long_cpu_selection_threshold = 100 * NSEC_PER_MSEC;
unsigned int __read_mostly sysctl_sched_restrict_cluster_spill; unsigned int __read_mostly sysctl_sched_restrict_cluster_spill;
void update_up_down_migrate(void) void update_up_down_migrate(void)
@ -3455,6 +3458,7 @@ bias_to_prev_cpu(struct cpu_select_env *env, struct cluster_cpu_stats *stats)
struct sched_cluster *cluster; struct sched_cluster *cluster;
if (env->boost_type != SCHED_BOOST_NONE || env->reason || if (env->boost_type != SCHED_BOOST_NONE || env->reason ||
!task->ravg.mark_start ||
env->need_idle || !sched_short_sleep_task_threshold) env->need_idle || !sched_short_sleep_task_threshold)
return false; return false;
@ -3463,6 +3467,10 @@ bias_to_prev_cpu(struct cpu_select_env *env, struct cluster_cpu_stats *stats)
unlikely(!cpu_active(prev_cpu))) unlikely(!cpu_active(prev_cpu)))
return false; return false;
if (task->ravg.mark_start - task->last_cpu_selected_ts >=
sched_long_cpu_selection_threshold)
return false;
/* /*
* This function should be used by task wake up path only as it's * This function should be used by task wake up path only as it's
* assuming p->last_switch_out_ts as last sleep time. * assuming p->last_switch_out_ts as last sleep time.
@ -3604,6 +3612,7 @@ retry:
if (stats.best_capacity_cpu >= 0) if (stats.best_capacity_cpu >= 0)
target = stats.best_capacity_cpu; target = stats.best_capacity_cpu;
} }
p->last_cpu_selected_ts = sched_ktime_clock();
out: out:
rcu_read_unlock(); rcu_read_unlock();