sched: remove the notion of small tasks and small task packing
Task packing will now be determined solely on the basis of the power cost of task placement. All tasks are eligible for packing. Remove the notion of "small" tasks from the scheduler. Change-Id: I72d52d04b2677c6a8d0bc6aa7d50ff0f1a4f5ebb Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
This commit is contained in:
parent
f2ea07a155
commit
d590f25153
7 changed files with 72 additions and 257 deletions
|
@ -62,7 +62,6 @@ extern int sysctl_sched_freq_dec_notify;
|
||||||
#ifdef CONFIG_SCHED_HMP
|
#ifdef CONFIG_SCHED_HMP
|
||||||
extern unsigned int sysctl_sched_spill_nr_run;
|
extern unsigned int sysctl_sched_spill_nr_run;
|
||||||
extern unsigned int sysctl_sched_spill_load_pct;
|
extern unsigned int sysctl_sched_spill_load_pct;
|
||||||
extern unsigned int sysctl_sched_small_task_pct;
|
|
||||||
extern unsigned int sysctl_sched_upmigrate_pct;
|
extern unsigned int sysctl_sched_upmigrate_pct;
|
||||||
extern unsigned int sysctl_sched_downmigrate_pct;
|
extern unsigned int sysctl_sched_downmigrate_pct;
|
||||||
extern int sysctl_sched_upmigrate_min_nice;
|
extern int sysctl_sched_upmigrate_min_nice;
|
||||||
|
|
|
@ -115,10 +115,10 @@ TRACE_EVENT(sched_enq_deq_task,
|
||||||
|
|
||||||
TRACE_EVENT(sched_task_load,
|
TRACE_EVENT(sched_task_load,
|
||||||
|
|
||||||
TP_PROTO(struct task_struct *p, int small_task, int boost, int reason,
|
TP_PROTO(struct task_struct *p, int boost, int reason,
|
||||||
int sync, int prefer_idle),
|
int sync, int prefer_idle),
|
||||||
|
|
||||||
TP_ARGS(p, small_task, boost, reason, sync, prefer_idle),
|
TP_ARGS(p, boost, reason, sync, prefer_idle),
|
||||||
|
|
||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
__array( char, comm, TASK_COMM_LEN )
|
__array( char, comm, TASK_COMM_LEN )
|
||||||
|
@ -127,7 +127,6 @@ TRACE_EVENT(sched_task_load,
|
||||||
__field(unsigned int, sum_scaled )
|
__field(unsigned int, sum_scaled )
|
||||||
__field(unsigned int, period )
|
__field(unsigned int, period )
|
||||||
__field(unsigned int, demand )
|
__field(unsigned int, demand )
|
||||||
__field( int, small_task )
|
|
||||||
__field( int, boost )
|
__field( int, boost )
|
||||||
__field( int, reason )
|
__field( int, reason )
|
||||||
__field( int, sync )
|
__field( int, sync )
|
||||||
|
@ -141,18 +140,17 @@ TRACE_EVENT(sched_task_load,
|
||||||
__entry->sum_scaled = p->se.avg.runnable_avg_sum_scaled;
|
__entry->sum_scaled = p->se.avg.runnable_avg_sum_scaled;
|
||||||
__entry->period = p->se.avg.runnable_avg_period;
|
__entry->period = p->se.avg.runnable_avg_period;
|
||||||
__entry->demand = p->ravg.demand;
|
__entry->demand = p->ravg.demand;
|
||||||
__entry->small_task = small_task;
|
|
||||||
__entry->boost = boost;
|
__entry->boost = boost;
|
||||||
__entry->reason = reason;
|
__entry->reason = reason;
|
||||||
__entry->sync = sync;
|
__entry->sync = sync;
|
||||||
__entry->prefer_idle = prefer_idle;
|
__entry->prefer_idle = prefer_idle;
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("%d (%s): sum=%u, sum_scaled=%u, period=%u demand=%u small=%d boost=%d reason=%d sync=%d prefer_idle=%d",
|
TP_printk("%d (%s): sum=%u, sum_scaled=%u, period=%u demand=%u boost=%d reason=%d sync=%d prefer_idle=%d",
|
||||||
__entry->pid, __entry->comm, __entry->sum,
|
__entry->pid, __entry->comm, __entry->sum,
|
||||||
__entry->sum_scaled, __entry->period, __entry->demand,
|
__entry->sum_scaled, __entry->period, __entry->demand,
|
||||||
__entry->small_task, __entry->boost, __entry->reason,
|
__entry->boost, __entry->reason, __entry->sync,
|
||||||
__entry->sync, __entry->prefer_idle)
|
__entry->prefer_idle)
|
||||||
);
|
);
|
||||||
|
|
||||||
TRACE_EVENT(sched_cpu_load,
|
TRACE_EVENT(sched_cpu_load,
|
||||||
|
@ -168,7 +166,6 @@ TRACE_EVENT(sched_cpu_load,
|
||||||
__field(unsigned int, mostly_idle )
|
__field(unsigned int, mostly_idle )
|
||||||
__field(unsigned int, nr_running )
|
__field(unsigned int, nr_running )
|
||||||
__field(unsigned int, nr_big_tasks )
|
__field(unsigned int, nr_big_tasks )
|
||||||
__field(unsigned int, nr_small_tasks )
|
|
||||||
__field(unsigned int, load_scale_factor )
|
__field(unsigned int, load_scale_factor )
|
||||||
__field(unsigned int, capacity )
|
__field(unsigned int, capacity )
|
||||||
__field( u64, cumulative_runnable_avg )
|
__field( u64, cumulative_runnable_avg )
|
||||||
|
@ -186,7 +183,6 @@ TRACE_EVENT(sched_cpu_load,
|
||||||
__entry->mostly_idle = mostly_idle;
|
__entry->mostly_idle = mostly_idle;
|
||||||
__entry->nr_running = rq->nr_running;
|
__entry->nr_running = rq->nr_running;
|
||||||
__entry->nr_big_tasks = rq->hmp_stats.nr_big_tasks;
|
__entry->nr_big_tasks = rq->hmp_stats.nr_big_tasks;
|
||||||
__entry->nr_small_tasks = rq->hmp_stats.nr_small_tasks;
|
|
||||||
__entry->load_scale_factor = rq->load_scale_factor;
|
__entry->load_scale_factor = rq->load_scale_factor;
|
||||||
__entry->capacity = rq->capacity;
|
__entry->capacity = rq->capacity;
|
||||||
__entry->cumulative_runnable_avg = rq->hmp_stats.cumulative_runnable_avg;
|
__entry->cumulative_runnable_avg = rq->hmp_stats.cumulative_runnable_avg;
|
||||||
|
@ -198,13 +194,11 @@ TRACE_EVENT(sched_cpu_load,
|
||||||
__entry->temp = temp;
|
__entry->temp = temp;
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("cpu %u idle %d mostly_idle %d nr_run %u nr_big %u nr_small %u lsf %u capacity %u cr_avg %llu irqload %llu fcur %u fmax %u power_cost %u cstate %d temp %d",
|
TP_printk("cpu %u idle %d mostly_idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fcur %u fmax %u power_cost %u cstate %d temp %d",
|
||||||
__entry->cpu, __entry->idle, __entry->mostly_idle, __entry->nr_running,
|
__entry->cpu, __entry->idle, __entry->mostly_idle, __entry->nr_running,
|
||||||
__entry->nr_big_tasks, __entry->nr_small_tasks,
|
__entry->nr_big_tasks, __entry->load_scale_factor, __entry->capacity,
|
||||||
__entry->load_scale_factor, __entry->capacity,
|
__entry->cumulative_runnable_avg, __entry->irqload, __entry->cur_freq,
|
||||||
__entry->cumulative_runnable_avg, __entry->irqload,
|
__entry->max_freq, __entry->power_cost, __entry->cstate, __entry->temp)
|
||||||
__entry->cur_freq, __entry->max_freq,
|
|
||||||
__entry->power_cost, __entry->cstate, __entry->temp)
|
|
||||||
);
|
);
|
||||||
|
|
||||||
TRACE_EVENT(sched_set_boost,
|
TRACE_EVENT(sched_set_boost,
|
||||||
|
@ -310,7 +304,6 @@ TRACE_EVENT(sched_update_history,
|
||||||
__field(unsigned int, demand )
|
__field(unsigned int, demand )
|
||||||
__array( u32, hist, RAVG_HIST_SIZE_MAX)
|
__array( u32, hist, RAVG_HIST_SIZE_MAX)
|
||||||
__field(unsigned int, nr_big_tasks )
|
__field(unsigned int, nr_big_tasks )
|
||||||
__field(unsigned int, nr_small_tasks )
|
|
||||||
__field( int, cpu )
|
__field( int, cpu )
|
||||||
),
|
),
|
||||||
|
|
||||||
|
@ -324,18 +317,16 @@ TRACE_EVENT(sched_update_history,
|
||||||
memcpy(__entry->hist, p->ravg.sum_history,
|
memcpy(__entry->hist, p->ravg.sum_history,
|
||||||
RAVG_HIST_SIZE_MAX * sizeof(u32));
|
RAVG_HIST_SIZE_MAX * sizeof(u32));
|
||||||
__entry->nr_big_tasks = rq->hmp_stats.nr_big_tasks;
|
__entry->nr_big_tasks = rq->hmp_stats.nr_big_tasks;
|
||||||
__entry->nr_small_tasks = rq->hmp_stats.nr_small_tasks;
|
|
||||||
__entry->cpu = rq->cpu;
|
__entry->cpu = rq->cpu;
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("%d (%s): runtime %u samples %d event %s demand %u (hist: %u %u %u %u %u) cpu %d nr_big %u nr_small %u",
|
TP_printk("%d (%s): runtime %u samples %d event %s demand %u (hist: %u %u %u %u %u) cpu %d nr_big %u",
|
||||||
__entry->pid, __entry->comm,
|
__entry->pid, __entry->comm,
|
||||||
__entry->runtime, __entry->samples,
|
__entry->runtime, __entry->samples,
|
||||||
task_event_names[__entry->evt],
|
task_event_names[__entry->evt],
|
||||||
__entry->demand, __entry->hist[0],
|
__entry->demand, __entry->hist[0],
|
||||||
__entry->hist[1], __entry->hist[2], __entry->hist[3],
|
__entry->hist[1], __entry->hist[2], __entry->hist[3],
|
||||||
__entry->hist[4], __entry->cpu, __entry->nr_big_tasks,
|
__entry->hist[4], __entry->cpu, __entry->nr_big_tasks)
|
||||||
__entry->nr_small_tasks)
|
|
||||||
);
|
);
|
||||||
|
|
||||||
TRACE_EVENT(sched_reset_all_window_stats,
|
TRACE_EVENT(sched_reset_all_window_stats,
|
||||||
|
|
|
@ -2100,7 +2100,7 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
|
||||||
#endif
|
#endif
|
||||||
reset_cpu_hmp_stats(cpu, 1);
|
reset_cpu_hmp_stats(cpu, 1);
|
||||||
|
|
||||||
fixup_nr_big_small_task(cpu, 0);
|
fixup_nr_big_task(cpu, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sched_window_stats_policy != sysctl_sched_window_stats_policy) {
|
if (sched_window_stats_policy != sysctl_sched_window_stats_policy) {
|
||||||
|
@ -2522,23 +2522,23 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
|
||||||
* A changed min_max_freq or max_possible_freq (possible during bootup)
|
* A changed min_max_freq or max_possible_freq (possible during bootup)
|
||||||
* needs to trigger re-computation of load_scale_factor and capacity for
|
* needs to trigger re-computation of load_scale_factor and capacity for
|
||||||
* all possible cpus (even those offline). It also needs to trigger
|
* all possible cpus (even those offline). It also needs to trigger
|
||||||
* re-computation of nr_big/small_task count on all online cpus.
|
* re-computation of nr_big_task count on all online cpus.
|
||||||
*
|
*
|
||||||
* A changed rq->max_freq otoh needs to trigger re-computation of
|
* A changed rq->max_freq otoh needs to trigger re-computation of
|
||||||
* load_scale_factor and capacity for just the cluster of cpus involved.
|
* load_scale_factor and capacity for just the cluster of cpus involved.
|
||||||
* Since small task definition depends on max_load_scale_factor, a
|
* Since small task definition depends on max_load_scale_factor, a
|
||||||
* changed load_scale_factor of one cluster could influence small_task
|
* changed load_scale_factor of one cluster could influence
|
||||||
* classification of tasks in another cluster. Hence a changed
|
* classification of tasks in another cluster. Hence a changed
|
||||||
* rq->max_freq will need to trigger re-computation of nr_big/small_task
|
* rq->max_freq will need to trigger re-computation of nr_big_task
|
||||||
* count on all online cpus.
|
* count on all online cpus.
|
||||||
*
|
*
|
||||||
* While it should be sufficient for nr_big/small_tasks to be
|
* While it should be sufficient for nr_big_tasks to be
|
||||||
* re-computed for only online cpus, we have inadequate context
|
* re-computed for only online cpus, we have inadequate context
|
||||||
* information here (in policy notifier) with regard to hotplug-safety
|
* information here (in policy notifier) with regard to hotplug-safety
|
||||||
* context in which notification is issued. As a result, we can't use
|
* context in which notification is issued. As a result, we can't use
|
||||||
* get_online_cpus() here, as it can lead to deadlock. Until cpufreq is
|
* get_online_cpus() here, as it can lead to deadlock. Until cpufreq is
|
||||||
* fixed up to issue notification always in hotplug-safe context,
|
* fixed up to issue notification always in hotplug-safe context,
|
||||||
* re-compute nr_big/small_task for all possible cpus.
|
* re-compute nr_big_task for all possible cpus.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (orig_min_max_freq != min_max_freq ||
|
if (orig_min_max_freq != min_max_freq ||
|
||||||
|
@ -2552,7 +2552,7 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
|
||||||
* big or small. Make this change "atomic" so that tasks are accounted
|
* big or small. Make this change "atomic" so that tasks are accounted
|
||||||
* properly due to changed load_scale_factor
|
* properly due to changed load_scale_factor
|
||||||
*/
|
*/
|
||||||
pre_big_small_task_count_change(cpu_possible_mask);
|
pre_big_task_count_change(cpu_possible_mask);
|
||||||
for_each_cpu(i, cpus) {
|
for_each_cpu(i, cpus) {
|
||||||
struct rq *rq = cpu_rq(i);
|
struct rq *rq = cpu_rq(i);
|
||||||
|
|
||||||
|
@ -2589,7 +2589,7 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
|
||||||
|
|
||||||
__update_min_max_capacity();
|
__update_min_max_capacity();
|
||||||
check_for_up_down_migrate_update(policy->related_cpus);
|
check_for_up_down_migrate_update(policy->related_cpus);
|
||||||
post_big_small_task_count_change(cpu_possible_mask);
|
post_big_task_count_change(cpu_possible_mask);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -9300,7 +9300,7 @@ void __init sched_init(void)
|
||||||
rq->capacity = 1024;
|
rq->capacity = 1024;
|
||||||
rq->load_scale_factor = 1024;
|
rq->load_scale_factor = 1024;
|
||||||
rq->window_start = 0;
|
rq->window_start = 0;
|
||||||
rq->hmp_stats.nr_small_tasks = rq->hmp_stats.nr_big_tasks = 0;
|
rq->hmp_stats.nr_big_tasks = 0;
|
||||||
rq->hmp_flags = 0;
|
rq->hmp_flags = 0;
|
||||||
rq->mostly_idle_load = pct_to_real(20);
|
rq->mostly_idle_load = pct_to_real(20);
|
||||||
rq->mostly_idle_nr_run = 3;
|
rq->mostly_idle_nr_run = 3;
|
||||||
|
@ -10167,11 +10167,11 @@ static int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css,
|
||||||
* classification.
|
* classification.
|
||||||
*/
|
*/
|
||||||
get_online_cpus();
|
get_online_cpus();
|
||||||
pre_big_small_task_count_change(cpu_online_mask);
|
pre_big_task_count_change(cpu_online_mask);
|
||||||
|
|
||||||
tg->upmigrate_discouraged = discourage;
|
tg->upmigrate_discouraged = discourage;
|
||||||
|
|
||||||
post_big_small_task_count_change(cpu_online_mask);
|
post_big_task_count_change(cpu_online_mask);
|
||||||
put_online_cpus();
|
put_online_cpus();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -232,8 +232,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
||||||
#ifdef CONFIG_SCHED_HMP
|
#ifdef CONFIG_SCHED_HMP
|
||||||
SEQ_printf(m, " .%-30s: %d\n", "nr_big_tasks",
|
SEQ_printf(m, " .%-30s: %d\n", "nr_big_tasks",
|
||||||
cfs_rq->hmp_stats.nr_big_tasks);
|
cfs_rq->hmp_stats.nr_big_tasks);
|
||||||
SEQ_printf(m, " .%-30s: %d\n", "nr_small_tasks",
|
|
||||||
cfs_rq->hmp_stats.nr_small_tasks);
|
|
||||||
SEQ_printf(m, " .%-30s: %llu\n", "cumulative_runnable_avg",
|
SEQ_printf(m, " .%-30s: %llu\n", "cumulative_runnable_avg",
|
||||||
cfs_rq->hmp_stats.cumulative_runnable_avg);
|
cfs_rq->hmp_stats.cumulative_runnable_avg);
|
||||||
#endif
|
#endif
|
||||||
|
@ -331,7 +329,6 @@ do { \
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_SCHED_HMP
|
#ifdef CONFIG_SCHED_HMP
|
||||||
P(hmp_stats.nr_big_tasks);
|
P(hmp_stats.nr_big_tasks);
|
||||||
P(hmp_stats.nr_small_tasks);
|
|
||||||
SEQ_printf(m, " .%-30s: %llu\n", "hmp_stats.cumulative_runnable_avg",
|
SEQ_printf(m, " .%-30s: %llu\n", "hmp_stats.cumulative_runnable_avg",
|
||||||
rq->hmp_stats.cumulative_runnable_avg);
|
rq->hmp_stats.cumulative_runnable_avg);
|
||||||
#endif
|
#endif
|
||||||
|
@ -416,7 +413,6 @@ static void sched_debug_header(struct seq_file *m)
|
||||||
P(sysctl_sched_child_runs_first);
|
P(sysctl_sched_child_runs_first);
|
||||||
P(sysctl_sched_features);
|
P(sysctl_sched_features);
|
||||||
#ifdef CONFIG_SCHED_HMP
|
#ifdef CONFIG_SCHED_HMP
|
||||||
P(sched_small_task);
|
|
||||||
P(sched_upmigrate);
|
P(sched_upmigrate);
|
||||||
P(sched_downmigrate);
|
P(sched_downmigrate);
|
||||||
P(sched_init_task_load_windows);
|
P(sched_init_task_load_windows);
|
||||||
|
|
|
@ -2726,13 +2726,6 @@ unsigned int __read_mostly sysctl_sched_powerband_limit_pct;
|
||||||
unsigned int __read_mostly sched_spill_load;
|
unsigned int __read_mostly sched_spill_load;
|
||||||
unsigned int __read_mostly sysctl_sched_spill_load_pct = 100;
|
unsigned int __read_mostly sysctl_sched_spill_load_pct = 100;
|
||||||
|
|
||||||
/*
|
|
||||||
* Tasks whose bandwidth consumption on a cpu is less than
|
|
||||||
* sched_small_task are considered as small tasks.
|
|
||||||
*/
|
|
||||||
unsigned int __read_mostly sched_small_task;
|
|
||||||
unsigned int __read_mostly sysctl_sched_small_task_pct = 10;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Tasks with demand >= sched_heavy_task will have their
|
* Tasks with demand >= sched_heavy_task will have their
|
||||||
* window-based demand added to the previous window's CPU
|
* window-based demand added to the previous window's CPU
|
||||||
|
@ -2826,9 +2819,6 @@ void set_hmp_defaults(void)
|
||||||
sched_spill_load =
|
sched_spill_load =
|
||||||
pct_to_real(sysctl_sched_spill_load_pct);
|
pct_to_real(sysctl_sched_spill_load_pct);
|
||||||
|
|
||||||
sched_small_task =
|
|
||||||
pct_to_real(sysctl_sched_small_task_pct);
|
|
||||||
|
|
||||||
update_up_down_migrate();
|
update_up_down_migrate();
|
||||||
|
|
||||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||||
|
@ -2965,15 +2955,6 @@ static inline int is_big_task(struct task_struct *p)
|
||||||
return load > sched_upmigrate;
|
return load > sched_upmigrate;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Is a task "small" on the minimum capacity CPU */
|
|
||||||
static inline int is_small_task(struct task_struct *p)
|
|
||||||
{
|
|
||||||
u64 load = task_load(p);
|
|
||||||
load *= (u64)max_load_scale_factor;
|
|
||||||
load /= 1024;
|
|
||||||
return load < sched_small_task;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u64 cpu_load(int cpu)
|
static inline u64 cpu_load(int cpu)
|
||||||
{
|
{
|
||||||
struct rq *rq = cpu_rq(cpu);
|
struct rq *rq = cpu_rq(cpu);
|
||||||
|
@ -3246,105 +3227,6 @@ static unsigned int power_cost(u64 total_load, int cpu)
|
||||||
return power_cost_at_freq(cpu, task_freq);
|
return power_cost_at_freq(cpu, task_freq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int best_small_task_cpu(struct task_struct *p, int sync)
|
|
||||||
{
|
|
||||||
int best_busy_cpu = -1, fallback_cpu = -1;
|
|
||||||
int min_cstate_cpu = -1;
|
|
||||||
int min_cstate = INT_MAX;
|
|
||||||
int cpu_cost, min_cost = INT_MAX;
|
|
||||||
int i = task_cpu(p), prev_cpu;
|
|
||||||
int hmp_capable;
|
|
||||||
u64 tload, cpu_load, min_load = ULLONG_MAX;
|
|
||||||
cpumask_t temp;
|
|
||||||
cpumask_t search_cpu;
|
|
||||||
cpumask_t fb_search_cpu = CPU_MASK_NONE;
|
|
||||||
struct rq *rq;
|
|
||||||
|
|
||||||
cpumask_and(&temp, &mpc_mask, cpu_possible_mask);
|
|
||||||
hmp_capable = !cpumask_full(&temp);
|
|
||||||
|
|
||||||
cpumask_and(&search_cpu, tsk_cpus_allowed(p), cpu_online_mask);
|
|
||||||
if (unlikely(!cpumask_test_cpu(i, &search_cpu)))
|
|
||||||
i = cpumask_first(&search_cpu);
|
|
||||||
|
|
||||||
do {
|
|
||||||
rq = cpu_rq(i);
|
|
||||||
|
|
||||||
cpumask_clear_cpu(i, &search_cpu);
|
|
||||||
|
|
||||||
trace_sched_cpu_load(rq, idle_cpu(i),
|
|
||||||
mostly_idle_cpu_sync(i, cpu_load_sync(i, sync), sync),
|
|
||||||
sched_irqload(i), power_cost(scale_load_to_cpu(task_load(p),
|
|
||||||
i) + cpu_load_sync(i, sync), i), cpu_temp(i));
|
|
||||||
|
|
||||||
if (rq->max_possible_capacity == max_possible_capacity &&
|
|
||||||
hmp_capable) {
|
|
||||||
cpumask_and(&fb_search_cpu, &search_cpu,
|
|
||||||
&rq->freq_domain_cpumask);
|
|
||||||
cpumask_andnot(&search_cpu, &search_cpu,
|
|
||||||
&rq->freq_domain_cpumask);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (sched_cpu_high_irqload(i))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (idle_cpu(i) && rq->cstate) {
|
|
||||||
if (rq->cstate < min_cstate) {
|
|
||||||
min_cstate_cpu = i;
|
|
||||||
min_cstate = rq->cstate;
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
cpu_load = cpu_load_sync(i, sync);
|
|
||||||
if (mostly_idle_cpu_sync(i, cpu_load, sync))
|
|
||||||
return i;
|
|
||||||
} while ((i = cpumask_first(&search_cpu)) < nr_cpu_ids);
|
|
||||||
|
|
||||||
if (min_cstate_cpu != -1)
|
|
||||||
return min_cstate_cpu;
|
|
||||||
|
|
||||||
cpumask_and(&search_cpu, tsk_cpus_allowed(p), cpu_online_mask);
|
|
||||||
cpumask_andnot(&search_cpu, &search_cpu, &fb_search_cpu);
|
|
||||||
for_each_cpu(i, &search_cpu) {
|
|
||||||
rq = cpu_rq(i);
|
|
||||||
prev_cpu = (i == task_cpu(p));
|
|
||||||
|
|
||||||
if (sched_cpu_high_irqload(i))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
tload = scale_load_to_cpu(task_load(p), i);
|
|
||||||
cpu_load = cpu_load_sync(i, sync);
|
|
||||||
if (!spill_threshold_crossed(tload, cpu_load, rq)) {
|
|
||||||
if (cpu_load < min_load ||
|
|
||||||
(prev_cpu && cpu_load == min_load)) {
|
|
||||||
min_load = cpu_load;
|
|
||||||
best_busy_cpu = i;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (best_busy_cpu != -1)
|
|
||||||
return best_busy_cpu;
|
|
||||||
|
|
||||||
for_each_cpu(i, &fb_search_cpu) {
|
|
||||||
rq = cpu_rq(i);
|
|
||||||
prev_cpu = (i == task_cpu(p));
|
|
||||||
|
|
||||||
tload = scale_load_to_cpu(task_load(p), i);
|
|
||||||
cpu_load = cpu_load_sync(i, sync);
|
|
||||||
cpu_cost = power_cost(tload + cpu_load, i);
|
|
||||||
if (cpu_cost < min_cost ||
|
|
||||||
(prev_cpu && cpu_cost == min_cost)) {
|
|
||||||
fallback_cpu = i;
|
|
||||||
min_cost = cpu_cost;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fallback_cpu;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define UP_MIGRATION 1
|
#define UP_MIGRATION 1
|
||||||
#define DOWN_MIGRATION 2
|
#define DOWN_MIGRATION 2
|
||||||
#define IRQLOAD_MIGRATION 4
|
#define IRQLOAD_MIGRATION 4
|
||||||
|
@ -3458,7 +3340,6 @@ static int select_best_cpu(struct task_struct *p, int target, int reason,
|
||||||
int min_idle_cost = INT_MAX, min_busy_cost = INT_MAX;
|
int min_idle_cost = INT_MAX, min_busy_cost = INT_MAX;
|
||||||
u64 tload, cpu_load;
|
u64 tload, cpu_load;
|
||||||
u64 min_load = ULLONG_MAX, min_fallback_load = ULLONG_MAX;
|
u64 min_load = ULLONG_MAX, min_fallback_load = ULLONG_MAX;
|
||||||
int small_task = is_small_task(p);
|
|
||||||
int boost = sched_boost();
|
int boost = sched_boost();
|
||||||
int cstate, min_cstate = INT_MAX;
|
int cstate, min_cstate = INT_MAX;
|
||||||
int prefer_idle = -1;
|
int prefer_idle = -1;
|
||||||
|
@ -3474,7 +3355,6 @@ static int select_best_cpu(struct task_struct *p, int target, int reason,
|
||||||
if (wake_to_idle(p)) {
|
if (wake_to_idle(p)) {
|
||||||
prefer_idle = 1;
|
prefer_idle = 1;
|
||||||
prefer_idle_override = 1;
|
prefer_idle_override = 1;
|
||||||
small_task = 0;
|
|
||||||
/*
|
/*
|
||||||
* If wake to idle and sync are both set prefer wake to idle
|
* If wake to idle and sync are both set prefer wake to idle
|
||||||
* since sync is a weak hint that might not always be correct.
|
* since sync is a weak hint that might not always be correct.
|
||||||
|
@ -3482,12 +3362,6 @@ static int select_best_cpu(struct task_struct *p, int target, int reason,
|
||||||
sync = 0;
|
sync = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (small_task && !boost) {
|
|
||||||
best_cpu = best_small_task_cpu(p, sync);
|
|
||||||
prefer_idle = 0; /* For sched_task_load tracepoint */
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
|
|
||||||
trq = task_rq(p);
|
trq = task_rq(p);
|
||||||
cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_online_mask);
|
cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_online_mask);
|
||||||
for_each_cpu(i, &search_cpus) {
|
for_each_cpu(i, &search_cpus) {
|
||||||
|
@ -3632,7 +3506,7 @@ static int select_best_cpu(struct task_struct *p, int target, int reason,
|
||||||
if (min_cstate_cpu >= 0 && (prefer_idle > 0 || best_cpu < 0 ||
|
if (min_cstate_cpu >= 0 && (prefer_idle > 0 || best_cpu < 0 ||
|
||||||
!mostly_idle_cpu_sync(best_cpu, min_load, sync)))
|
!mostly_idle_cpu_sync(best_cpu, min_load, sync)))
|
||||||
best_cpu = min_cstate_cpu;
|
best_cpu = min_cstate_cpu;
|
||||||
done:
|
|
||||||
if (best_cpu < 0) {
|
if (best_cpu < 0) {
|
||||||
if (unlikely(fallback_idle_cpu < 0))
|
if (unlikely(fallback_idle_cpu < 0))
|
||||||
/*
|
/*
|
||||||
|
@ -3653,41 +3527,37 @@ done:
|
||||||
* tracepoint towards end to capture prefer_idle flag used for this
|
* tracepoint towards end to capture prefer_idle flag used for this
|
||||||
* instance of wakeup.
|
* instance of wakeup.
|
||||||
*/
|
*/
|
||||||
trace_sched_task_load(p, small_task, boost, reason, sync, prefer_idle);
|
trace_sched_task_load(p, boost, reason, sync, prefer_idle);
|
||||||
|
|
||||||
return best_cpu;
|
return best_cpu;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
inc_nr_big_small_task(struct hmp_sched_stats *stats, struct task_struct *p)
|
inc_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
|
||||||
{
|
{
|
||||||
if (!sched_enable_hmp || sched_disable_window_stats)
|
if (!sched_enable_hmp || sched_disable_window_stats)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (is_big_task(p))
|
if (is_big_task(p))
|
||||||
stats->nr_big_tasks++;
|
stats->nr_big_tasks++;
|
||||||
else if (is_small_task(p))
|
|
||||||
stats->nr_small_tasks++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
dec_nr_big_small_task(struct hmp_sched_stats *stats, struct task_struct *p)
|
dec_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
|
||||||
{
|
{
|
||||||
if (!sched_enable_hmp || sched_disable_window_stats)
|
if (!sched_enable_hmp || sched_disable_window_stats)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (is_big_task(p))
|
if (is_big_task(p))
|
||||||
stats->nr_big_tasks--;
|
stats->nr_big_tasks--;
|
||||||
else if (is_small_task(p))
|
|
||||||
stats->nr_small_tasks--;
|
|
||||||
|
|
||||||
BUG_ON(stats->nr_big_tasks < 0 || stats->nr_small_tasks < 0);
|
BUG_ON(stats->nr_big_tasks < 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
|
inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
|
||||||
{
|
{
|
||||||
inc_nr_big_small_task(&rq->hmp_stats, p);
|
inc_nr_big_task(&rq->hmp_stats, p);
|
||||||
if (change_cra)
|
if (change_cra)
|
||||||
inc_cumulative_runnable_avg(&rq->hmp_stats, p);
|
inc_cumulative_runnable_avg(&rq->hmp_stats, p);
|
||||||
}
|
}
|
||||||
|
@ -3695,14 +3565,14 @@ inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
|
||||||
static void
|
static void
|
||||||
dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
|
dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
|
||||||
{
|
{
|
||||||
dec_nr_big_small_task(&rq->hmp_stats, p);
|
dec_nr_big_task(&rq->hmp_stats, p);
|
||||||
if (change_cra)
|
if (change_cra)
|
||||||
dec_cumulative_runnable_avg(&rq->hmp_stats, p);
|
dec_cumulative_runnable_avg(&rq->hmp_stats, p);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra)
|
static void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra)
|
||||||
{
|
{
|
||||||
stats->nr_big_tasks = stats->nr_small_tasks = 0;
|
stats->nr_big_tasks = 0;
|
||||||
if (reset_cra)
|
if (reset_cra)
|
||||||
stats->cumulative_runnable_avg = 0;
|
stats->cumulative_runnable_avg = 0;
|
||||||
}
|
}
|
||||||
|
@ -3745,29 +3615,23 @@ static inline void reset_cfs_rq_hmp_stats(int cpu, int reset_cra) { }
|
||||||
* Return total number of tasks "eligible" to run on highest capacity cpu
|
* Return total number of tasks "eligible" to run on highest capacity cpu
|
||||||
*
|
*
|
||||||
* This is simply nr_big_tasks for cpus which are not of max_capacity and
|
* This is simply nr_big_tasks for cpus which are not of max_capacity and
|
||||||
* (nr_running - nr_small_tasks) for cpus of max_capacity
|
* nr_running for cpus of max_capacity
|
||||||
*/
|
*/
|
||||||
unsigned int nr_eligible_big_tasks(int cpu)
|
unsigned int nr_eligible_big_tasks(int cpu)
|
||||||
{
|
{
|
||||||
struct rq *rq = cpu_rq(cpu);
|
struct rq *rq = cpu_rq(cpu);
|
||||||
int nr_big = rq->hmp_stats.nr_big_tasks;
|
int nr_big = rq->hmp_stats.nr_big_tasks;
|
||||||
int nr = rq->nr_running;
|
int nr = rq->nr_running;
|
||||||
int nr_small = rq->hmp_stats.nr_small_tasks;
|
|
||||||
|
|
||||||
if (rq->max_possible_capacity != max_possible_capacity)
|
if (rq->max_possible_capacity != max_possible_capacity)
|
||||||
return nr_big;
|
return nr_big;
|
||||||
|
|
||||||
/* Consider all (except small) tasks on max_capacity cpu as big tasks */
|
return nr;
|
||||||
nr_big = nr - nr_small;
|
|
||||||
if (nr_big < 0)
|
|
||||||
nr_big = 0;
|
|
||||||
|
|
||||||
return nr_big;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* reset_cpu_hmp_stats - reset HMP stats for a cpu
|
* reset_cpu_hmp_stats - reset HMP stats for a cpu
|
||||||
* nr_big_tasks, nr_small_tasks
|
* nr_big_tasks
|
||||||
* cumulative_runnable_avg (iff reset_cra is true)
|
* cumulative_runnable_avg (iff reset_cra is true)
|
||||||
*/
|
*/
|
||||||
void reset_cpu_hmp_stats(int cpu, int reset_cra)
|
void reset_cpu_hmp_stats(int cpu, int reset_cra)
|
||||||
|
@ -3794,7 +3658,7 @@ _inc_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p, int change_cra)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Although below check is not strictly required (as
|
* Although below check is not strictly required (as
|
||||||
* inc/dec_nr_big_small_task and inc/dec_cumulative_runnable_avg called
|
* inc/dec_nr_big_task and inc/dec_cumulative_runnable_avg called
|
||||||
* from inc_cfs_rq_hmp_stats() have similar checks), we gain a bit on
|
* from inc_cfs_rq_hmp_stats() have similar checks), we gain a bit on
|
||||||
* efficiency by short-circuiting for_each_sched_entity() loop when
|
* efficiency by short-circuiting for_each_sched_entity() loop when
|
||||||
* !sched_enable_hmp || sched_disable_window_stats
|
* !sched_enable_hmp || sched_disable_window_stats
|
||||||
|
@ -3857,10 +3721,10 @@ static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
|
||||||
for_each_sched_entity(se) {
|
for_each_sched_entity(se) {
|
||||||
cfs_rq = cfs_rq_of(se);
|
cfs_rq = cfs_rq_of(se);
|
||||||
|
|
||||||
dec_nr_big_small_task(&cfs_rq->hmp_stats, p);
|
dec_nr_big_task(&cfs_rq->hmp_stats, p);
|
||||||
fixup_cumulative_runnable_avg(&cfs_rq->hmp_stats, p,
|
fixup_cumulative_runnable_avg(&cfs_rq->hmp_stats, p,
|
||||||
new_task_load);
|
new_task_load);
|
||||||
inc_nr_big_small_task(&cfs_rq->hmp_stats, p);
|
inc_nr_big_task(&cfs_rq->hmp_stats, p);
|
||||||
if (cfs_rq_throttled(cfs_rq))
|
if (cfs_rq_throttled(cfs_rq))
|
||||||
break;
|
break;
|
||||||
/*
|
/*
|
||||||
|
@ -3872,9 +3736,9 @@ static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
|
||||||
|
|
||||||
/* Fix up rq->hmp_stats only if we didn't find any throttled cfs_rq */
|
/* Fix up rq->hmp_stats only if we didn't find any throttled cfs_rq */
|
||||||
if (!se) {
|
if (!se) {
|
||||||
dec_nr_big_small_task(&rq->hmp_stats, p);
|
dec_nr_big_task(&rq->hmp_stats, p);
|
||||||
fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load);
|
fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load);
|
||||||
inc_nr_big_small_task(&rq->hmp_stats, p);
|
inc_nr_big_task(&rq->hmp_stats, p);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3885,14 +3749,14 @@ static int task_will_be_throttled(struct task_struct *p);
|
||||||
static void
|
static void
|
||||||
inc_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p)
|
inc_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p)
|
||||||
{
|
{
|
||||||
inc_nr_big_small_task(&rq->hmp_stats, p);
|
inc_nr_big_task(&rq->hmp_stats, p);
|
||||||
inc_cumulative_runnable_avg(&rq->hmp_stats, p);
|
inc_cumulative_runnable_avg(&rq->hmp_stats, p);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p)
|
dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p)
|
||||||
{
|
{
|
||||||
dec_nr_big_small_task(&rq->hmp_stats, p);
|
dec_nr_big_task(&rq->hmp_stats, p);
|
||||||
dec_cumulative_runnable_avg(&rq->hmp_stats, p);
|
dec_cumulative_runnable_avg(&rq->hmp_stats, p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3900,9 +3764,9 @@ static void
|
||||||
fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
|
fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
|
||||||
u32 new_task_load)
|
u32 new_task_load)
|
||||||
{
|
{
|
||||||
dec_nr_big_small_task(&rq->hmp_stats, p);
|
dec_nr_big_task(&rq->hmp_stats, p);
|
||||||
fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load);
|
fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load);
|
||||||
inc_nr_big_small_task(&rq->hmp_stats, p);
|
inc_nr_big_task(&rq->hmp_stats, p);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int task_will_be_throttled(struct task_struct *p)
|
static inline int task_will_be_throttled(struct task_struct *p)
|
||||||
|
@ -3913,21 +3777,20 @@ static inline int task_will_be_throttled(struct task_struct *p)
|
||||||
static void
|
static void
|
||||||
_inc_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p, int change_cra)
|
_inc_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p, int change_cra)
|
||||||
{
|
{
|
||||||
inc_nr_big_small_task(&rq->hmp_stats, p);
|
inc_nr_big_task(&rq->hmp_stats, p);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_CFS_BANDWIDTH */
|
#endif /* CONFIG_CFS_BANDWIDTH */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Walk runqueue of cpu and re-initialize 'nr_big_tasks' and 'nr_small_tasks'
|
* Walk runqueue of cpu and re-initialize 'nr_big_tasks' counters.
|
||||||
* counters.
|
|
||||||
*/
|
*/
|
||||||
void fixup_nr_big_small_task(int cpu, int reset_stats)
|
void fixup_nr_big_task(int cpu, int reset_stats)
|
||||||
{
|
{
|
||||||
struct rq *rq = cpu_rq(cpu);
|
struct rq *rq = cpu_rq(cpu);
|
||||||
struct task_struct *p;
|
struct task_struct *p;
|
||||||
|
|
||||||
/* fixup_nr_big_small_task() is called from two functions. In one of
|
/* fixup_nr_big_task() is called from two functions. In one of
|
||||||
* them stats are already reset, don't waste time resetting them again
|
* them stats are already reset, don't waste time resetting them again
|
||||||
*/
|
*/
|
||||||
if (reset_stats) {
|
if (reset_stats) {
|
||||||
|
@ -3940,7 +3803,7 @@ void fixup_nr_big_small_task(int cpu, int reset_stats)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Disable interrupts and grab runqueue lock of all cpus listed in @cpus */
|
/* Disable interrupts and grab runqueue lock of all cpus listed in @cpus */
|
||||||
void pre_big_small_task_count_change(const struct cpumask *cpus)
|
void pre_big_task_count_change(const struct cpumask *cpus)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -3951,16 +3814,15 @@ void pre_big_small_task_count_change(const struct cpumask *cpus)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reinitialize 'nr_big_tasks' and 'nr_small_tasks' counters on all affected
|
* Reinitialize 'nr_big_tasks' counters on all affected cpus
|
||||||
* cpus
|
|
||||||
*/
|
*/
|
||||||
void post_big_small_task_count_change(const struct cpumask *cpus)
|
void post_big_task_count_change(const struct cpumask *cpus)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/* Assumes local_irq_disable() keeps online cpumap stable */
|
/* Assumes local_irq_disable() keeps online cpumap stable */
|
||||||
for_each_cpu(i, cpus)
|
for_each_cpu(i, cpus)
|
||||||
fixup_nr_big_small_task(i, 1);
|
fixup_nr_big_task(i, 1);
|
||||||
|
|
||||||
for_each_cpu(i, cpus)
|
for_each_cpu(i, cpus)
|
||||||
raw_spin_unlock(&cpu_rq(i)->lock);
|
raw_spin_unlock(&cpu_rq(i)->lock);
|
||||||
|
@ -4085,24 +3947,22 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Big/Small task tunable change will need to re-classify tasks on
|
* Big task tunable change will need to re-classify tasks on
|
||||||
* runqueue as big and small and set their counters appropriately.
|
* runqueue as big and set their counters appropriately.
|
||||||
* sysctl interface affects secondary variables (*_pct), which is then
|
* sysctl interface affects secondary variables (*_pct), which is then
|
||||||
* "atomically" carried over to the primary variables. Atomic change
|
* "atomically" carried over to the primary variables. Atomic change
|
||||||
* includes taking runqueue lock of all online cpus and re-initiatizing
|
* includes taking runqueue lock of all online cpus and re-initiatizing
|
||||||
* their big/small counter values based on changed criteria.
|
* their big counter values based on changed criteria.
|
||||||
*/
|
*/
|
||||||
if ((data == &sysctl_sched_upmigrate_pct ||
|
if ((data == &sysctl_sched_upmigrate_pct || update_min_nice)) {
|
||||||
data == &sysctl_sched_small_task_pct || update_min_nice)) {
|
|
||||||
get_online_cpus();
|
get_online_cpus();
|
||||||
pre_big_small_task_count_change(cpu_online_mask);
|
pre_big_task_count_change(cpu_online_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
set_hmp_defaults();
|
set_hmp_defaults();
|
||||||
|
|
||||||
if ((data == &sysctl_sched_upmigrate_pct ||
|
if ((data == &sysctl_sched_upmigrate_pct || update_min_nice)) {
|
||||||
data == &sysctl_sched_small_task_pct || update_min_nice)) {
|
post_big_task_count_change(cpu_online_mask);
|
||||||
post_big_small_task_count_change(cpu_online_mask);
|
|
||||||
put_online_cpus();
|
put_online_cpus();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4190,9 +4050,6 @@ static inline int migration_needed(struct rq *rq, struct task_struct *p)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_small_task(p))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (sched_cpu_high_irqload(cpu_of(rq)))
|
if (sched_cpu_high_irqload(cpu_of(rq)))
|
||||||
return IRQLOAD_MIGRATION;
|
return IRQLOAD_MIGRATION;
|
||||||
|
|
||||||
|
@ -4313,11 +4170,6 @@ static inline int sched_boost(void)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int is_small_task(struct task_struct *p)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int is_big_task(struct task_struct *p)
|
static inline int is_big_task(struct task_struct *p)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -4828,14 +4680,13 @@ static inline void decay_scaled_stat(struct sched_avg *sa, u64 periods)
|
||||||
static void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq)
|
static void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq)
|
||||||
{
|
{
|
||||||
cfs_rq->hmp_stats.nr_big_tasks = 0;
|
cfs_rq->hmp_stats.nr_big_tasks = 0;
|
||||||
cfs_rq->hmp_stats.nr_small_tasks = 0;
|
|
||||||
cfs_rq->hmp_stats.cumulative_runnable_avg = 0;
|
cfs_rq->hmp_stats.cumulative_runnable_avg = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
|
static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
|
||||||
struct task_struct *p, int change_cra)
|
struct task_struct *p, int change_cra)
|
||||||
{
|
{
|
||||||
inc_nr_big_small_task(&cfs_rq->hmp_stats, p);
|
inc_nr_big_task(&cfs_rq->hmp_stats, p);
|
||||||
if (change_cra)
|
if (change_cra)
|
||||||
inc_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
|
inc_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
|
||||||
}
|
}
|
||||||
|
@ -4843,7 +4694,7 @@ static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
|
||||||
static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
|
static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
|
||||||
struct task_struct *p, int change_cra)
|
struct task_struct *p, int change_cra)
|
||||||
{
|
{
|
||||||
dec_nr_big_small_task(&cfs_rq->hmp_stats, p);
|
dec_nr_big_task(&cfs_rq->hmp_stats, p);
|
||||||
if (change_cra)
|
if (change_cra)
|
||||||
dec_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
|
dec_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
|
||||||
}
|
}
|
||||||
|
@ -4852,7 +4703,6 @@ static void inc_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
|
||||||
struct cfs_rq *cfs_rq)
|
struct cfs_rq *cfs_rq)
|
||||||
{
|
{
|
||||||
stats->nr_big_tasks += cfs_rq->hmp_stats.nr_big_tasks;
|
stats->nr_big_tasks += cfs_rq->hmp_stats.nr_big_tasks;
|
||||||
stats->nr_small_tasks += cfs_rq->hmp_stats.nr_small_tasks;
|
|
||||||
stats->cumulative_runnable_avg +=
|
stats->cumulative_runnable_avg +=
|
||||||
cfs_rq->hmp_stats.cumulative_runnable_avg;
|
cfs_rq->hmp_stats.cumulative_runnable_avg;
|
||||||
}
|
}
|
||||||
|
@ -4861,11 +4711,10 @@ static void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
|
||||||
struct cfs_rq *cfs_rq)
|
struct cfs_rq *cfs_rq)
|
||||||
{
|
{
|
||||||
stats->nr_big_tasks -= cfs_rq->hmp_stats.nr_big_tasks;
|
stats->nr_big_tasks -= cfs_rq->hmp_stats.nr_big_tasks;
|
||||||
stats->nr_small_tasks -= cfs_rq->hmp_stats.nr_small_tasks;
|
|
||||||
stats->cumulative_runnable_avg -=
|
stats->cumulative_runnable_avg -=
|
||||||
cfs_rq->hmp_stats.cumulative_runnable_avg;
|
cfs_rq->hmp_stats.cumulative_runnable_avg;
|
||||||
|
|
||||||
BUG_ON(stats->nr_big_tasks < 0 || stats->nr_small_tasks < 0 ||
|
BUG_ON(stats->nr_big_tasks < 0 ||
|
||||||
(s64)stats->cumulative_runnable_avg < 0);
|
(s64)stats->cumulative_runnable_avg < 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7604,7 +7453,6 @@ enum fbq_type { regular, remote, all };
|
||||||
#define LBF_NEED_BREAK 0x02
|
#define LBF_NEED_BREAK 0x02
|
||||||
#define LBF_DST_PINNED 0x04
|
#define LBF_DST_PINNED 0x04
|
||||||
#define LBF_SOME_PINNED 0x08
|
#define LBF_SOME_PINNED 0x08
|
||||||
#define LBF_IGNORE_SMALL_TASKS 0x10
|
|
||||||
#define LBF_SCHED_BOOST_ACTIVE_BALANCE 0x40
|
#define LBF_SCHED_BOOST_ACTIVE_BALANCE 0x40
|
||||||
#define LBF_BIG_TASK_ACTIVE_BALANCE 0x80
|
#define LBF_BIG_TASK_ACTIVE_BALANCE 0x80
|
||||||
#define LBF_HMP_ACTIVE_BALANCE (LBF_SCHED_BOOST_ACTIVE_BALANCE | \
|
#define LBF_HMP_ACTIVE_BALANCE (LBF_SCHED_BOOST_ACTIVE_BALANCE | \
|
||||||
|
@ -7789,9 +7637,6 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
|
||||||
!is_big_task(p))
|
!is_big_task(p))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (env->flags & LBF_IGNORE_SMALL_TASKS && is_small_task(p))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
twf = task_will_fit(p, env->dst_cpu);
|
twf = task_will_fit(p, env->dst_cpu);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -7908,9 +7753,7 @@ static int detach_tasks(struct lb_env *env)
|
||||||
if (env->imbalance <= 0)
|
if (env->imbalance <= 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (capacity(env->dst_rq) > capacity(env->src_rq))
|
if (capacity(env->dst_rq) < capacity(env->src_rq) &&
|
||||||
env->flags |= LBF_IGNORE_SMALL_TASKS;
|
|
||||||
else if (capacity(env->dst_rq) < capacity(env->src_rq) &&
|
|
||||||
!sched_boost())
|
!sched_boost())
|
||||||
env->flags |= LBF_IGNORE_BIG_TASKS;
|
env->flags |= LBF_IGNORE_BIG_TASKS;
|
||||||
|
|
||||||
|
@ -7977,10 +7820,9 @@ next:
|
||||||
list_move_tail(&p->se.group_node, tasks);
|
list_move_tail(&p->se.group_node, tasks);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (env->flags & (LBF_IGNORE_SMALL_TASKS | LBF_IGNORE_BIG_TASKS)
|
if (env->flags & LBF_IGNORE_BIG_TASKS && !detached) {
|
||||||
&& !detached) {
|
|
||||||
tasks = &env->src_rq->cfs_tasks;
|
tasks = &env->src_rq->cfs_tasks;
|
||||||
env->flags &= ~(LBF_IGNORE_SMALL_TASKS | LBF_IGNORE_BIG_TASKS);
|
env->flags &= ~LBF_IGNORE_BIG_TASKS;
|
||||||
env->loop = orig_loop;
|
env->loop = orig_loop;
|
||||||
goto redo;
|
goto redo;
|
||||||
}
|
}
|
||||||
|
@ -8152,7 +7994,7 @@ struct sg_lb_stats {
|
||||||
unsigned long group_util; /* Total utilization of the group */
|
unsigned long group_util; /* Total utilization of the group */
|
||||||
unsigned int sum_nr_running; /* Nr tasks running in the group */
|
unsigned int sum_nr_running; /* Nr tasks running in the group */
|
||||||
#ifdef CONFIG_SCHED_HMP
|
#ifdef CONFIG_SCHED_HMP
|
||||||
unsigned long sum_nr_big_tasks, sum_nr_small_tasks;
|
unsigned long sum_nr_big_tasks;
|
||||||
u64 group_cpu_load; /* Scaled load of all CPUs of the group */
|
u64 group_cpu_load; /* Scaled load of all CPUs of the group */
|
||||||
#endif
|
#endif
|
||||||
unsigned int idle_cpus;
|
unsigned int idle_cpus;
|
||||||
|
@ -8516,7 +8358,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
|
||||||
|
|
||||||
#ifdef CONFIG_SCHED_HMP
|
#ifdef CONFIG_SCHED_HMP
|
||||||
sgs->sum_nr_big_tasks += rq->hmp_stats.nr_big_tasks;
|
sgs->sum_nr_big_tasks += rq->hmp_stats.nr_big_tasks;
|
||||||
sgs->sum_nr_small_tasks += rq->hmp_stats.nr_small_tasks;
|
|
||||||
sgs->group_cpu_load += cpu_load(i);
|
sgs->group_cpu_load += cpu_load(i);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -10153,11 +9994,7 @@ static inline int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type)
|
||||||
&& rq->max_freq > rq->mostly_idle_freq)
|
&& rq->max_freq > rq->mostly_idle_freq)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (rq->nr_running >= 2 &&
|
if (rq->nr_running >= 2) {
|
||||||
(rq->nr_running - rq->hmp_stats.nr_small_tasks >= 2 ||
|
|
||||||
rq->nr_running > rq->mostly_idle_nr_run ||
|
|
||||||
cpu_load(cpu) > rq->mostly_idle_load)) {
|
|
||||||
|
|
||||||
if (rq->capacity == max_capacity)
|
if (rq->capacity == max_capacity)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
|
|
|
@ -351,7 +351,7 @@ struct cfs_bandwidth { };
|
||||||
#ifdef CONFIG_SCHED_HMP
|
#ifdef CONFIG_SCHED_HMP
|
||||||
|
|
||||||
struct hmp_sched_stats {
|
struct hmp_sched_stats {
|
||||||
int nr_big_tasks, nr_small_tasks;
|
int nr_big_tasks;
|
||||||
u64 cumulative_runnable_avg;
|
u64 cumulative_runnable_avg;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -996,7 +996,6 @@ extern cpumask_t mpc_mask;
|
||||||
extern unsigned long capacity_scale_cpu_efficiency(int cpu);
|
extern unsigned long capacity_scale_cpu_efficiency(int cpu);
|
||||||
extern unsigned long capacity_scale_cpu_freq(int cpu);
|
extern unsigned long capacity_scale_cpu_freq(int cpu);
|
||||||
extern unsigned int sched_mostly_idle_load;
|
extern unsigned int sched_mostly_idle_load;
|
||||||
extern unsigned int sched_small_task;
|
|
||||||
extern unsigned int sched_upmigrate;
|
extern unsigned int sched_upmigrate;
|
||||||
extern unsigned int sched_downmigrate;
|
extern unsigned int sched_downmigrate;
|
||||||
extern unsigned int sched_init_task_load_pelt;
|
extern unsigned int sched_init_task_load_pelt;
|
||||||
|
@ -1004,7 +1003,7 @@ extern unsigned int sched_init_task_load_windows;
|
||||||
extern unsigned int sched_heavy_task;
|
extern unsigned int sched_heavy_task;
|
||||||
extern unsigned int up_down_migrate_scale_factor;
|
extern unsigned int up_down_migrate_scale_factor;
|
||||||
extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
|
extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
|
||||||
extern void fixup_nr_big_small_task(int cpu, int reset_stats);
|
extern void fixup_nr_big_task(int cpu, int reset_stats);
|
||||||
extern unsigned int max_task_load(void);
|
extern unsigned int max_task_load(void);
|
||||||
extern void sched_account_irqtime(int cpu, struct task_struct *curr,
|
extern void sched_account_irqtime(int cpu, struct task_struct *curr,
|
||||||
u64 delta, u64 wallclock);
|
u64 delta, u64 wallclock);
|
||||||
|
@ -1117,7 +1116,7 @@ static inline int sched_cpu_high_irqload(int cpu)
|
||||||
|
|
||||||
struct hmp_sched_stats;
|
struct hmp_sched_stats;
|
||||||
|
|
||||||
static inline void fixup_nr_big_small_task(int cpu, int reset_stats)
|
static inline void fixup_nr_big_task(int cpu, int reset_stats)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1223,8 +1222,8 @@ static inline void clear_reserved(int cpu)
|
||||||
|
|
||||||
int mostly_idle_cpu(int cpu);
|
int mostly_idle_cpu(int cpu);
|
||||||
extern void check_for_migration(struct rq *rq, struct task_struct *p);
|
extern void check_for_migration(struct rq *rq, struct task_struct *p);
|
||||||
extern void pre_big_small_task_count_change(const struct cpumask *cpus);
|
extern void pre_big_task_count_change(const struct cpumask *cpus);
|
||||||
extern void post_big_small_task_count_change(const struct cpumask *cpus);
|
extern void post_big_task_count_change(const struct cpumask *cpus);
|
||||||
extern void set_hmp_defaults(void);
|
extern void set_hmp_defaults(void);
|
||||||
extern int power_delta_exceeded(unsigned int cpu_cost, unsigned int base_cost);
|
extern int power_delta_exceeded(unsigned int cpu_cost, unsigned int base_cost);
|
||||||
extern unsigned int power_cost_at_freq(int cpu, unsigned int freq);
|
extern unsigned int power_cost_at_freq(int cpu, unsigned int freq);
|
||||||
|
@ -1238,8 +1237,8 @@ extern int sched_boost(void);
|
||||||
#define sched_freq_legacy_mode 1
|
#define sched_freq_legacy_mode 1
|
||||||
|
|
||||||
static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
|
static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
|
||||||
static inline void pre_big_small_task_count_change(void) { }
|
static inline void pre_big_task_count_change(void) { }
|
||||||
static inline void post_big_small_task_count_change(void) { }
|
static inline void post_big_task_count_change(void) { }
|
||||||
static inline void set_hmp_defaults(void) { }
|
static inline void set_hmp_defaults(void) { }
|
||||||
|
|
||||||
static inline void clear_reserved(int cpu) { }
|
static inline void clear_reserved(int cpu) { }
|
||||||
|
|
|
@ -367,13 +367,6 @@ static struct ctl_table kern_table[] = {
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = sched_window_update_handler,
|
.proc_handler = sched_window_update_handler,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
.procname = "sched_small_task",
|
|
||||||
.data = &sysctl_sched_small_task_pct,
|
|
||||||
.maxlen = sizeof(unsigned int),
|
|
||||||
.mode = 0644,
|
|
||||||
.proc_handler = sched_hmp_proc_update_handler,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
.procname = "sched_spill_load",
|
.procname = "sched_spill_load",
|
||||||
.data = &sysctl_sched_spill_load_pct,
|
.data = &sysctl_sched_spill_load_pct,
|
||||||
|
|
Loading…
Add table
Reference in a new issue