sched: avoid CPUs with high irq activity

CPUs with significant IRQ activity will not be able to serve tasks
quickly. Avoid them if possible by disqualifying such CPUs from
being recognized as mostly idle.

Change-Id: I2c09272a4f259f0283b272455147d288fce11982
Signed-off-by: Steve Muckle <smuckle@codeaurora.org>
This commit is contained in:
Steve Muckle 2014-11-13 14:58:10 -08:00 committed by David Keitel
parent 4006da6ec4
commit d3abb1dd6b
3 changed files with 29 additions and 8 deletions

View file

@ -2809,12 +2809,10 @@ spill_threshold_crossed(struct task_struct *p, struct rq *rq, int cpu)
int mostly_idle_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
int mostly_idle;
mostly_idle = (cpu_load(cpu) <= rq->mostly_idle_load
&& rq->nr_running <= rq->mostly_idle_nr_run);
return mostly_idle;
return cpu_load(cpu) <= rq->mostly_idle_load
&& rq->nr_running <= rq->mostly_idle_nr_run
&& !sched_cpu_high_irqload(cpu);
}
static int boost_refcount;
@ -3066,7 +3064,7 @@ static int best_small_task_cpu(struct task_struct *p)
continue;
}
if (idle_cpu(i) && cstate) {
if (idle_cpu(i) && cstate && !sched_cpu_high_irqload(i)) {
if (cstate < min_cstate) {
min_cstate_cpu = i;
min_cstate = cstate;
@ -3154,7 +3152,7 @@ static int select_packing_target(struct task_struct *p, int best_cpu)
for_each_cpu(i, &search_cpus) {
int cost = power_cost(p, i);
if (cost < min_cost) {
if (cost < min_cost && !sched_cpu_high_irqload(i)) {
target = i;
min_cost = cost;
}

View file

@ -1663,7 +1663,7 @@ static int find_lowest_rq_hmp(struct task_struct *task)
if (sched_boost() && capacity(rq) != max_capacity)
continue;
if (cpu_cost < min_cost) {
if (cpu_cost < min_cost && !sched_cpu_high_irqload(i)) {
min_cost = cpu_cost;
best_cpu = i;
}

View file

@ -1031,6 +1031,27 @@ dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
#define real_to_pct(tunable) \
(div64_u64((u64)tunable * (u64)100, (u64)max_task_load()))
#define SCHED_HIGH_IRQ_TIMEOUT 3
static inline u64 sched_irqload(int cpu)
{
struct rq *rq = cpu_rq(cpu);
s64 delta;
delta = get_jiffies_64() - rq->irqload_ts;
BUG_ON(delta < 0);
if (delta < SCHED_HIGH_IRQ_TIMEOUT)
return rq->avg_irqload;
else
return 0;
}
#define SCHED_HIGH_IRQ_NS (10 * NSEC_PER_MSEC)
static inline int sched_cpu_high_irqload(int cpu)
{
return sched_irqload(cpu) >= SCHED_HIGH_IRQ_NS;
}
#else /* CONFIG_SCHED_HMP */
static inline int pct_task_load(struct task_struct *p) { return 0; }
@ -1065,6 +1086,8 @@ static inline void sched_account_irqtime(int cpu, struct task_struct *curr,
{
}
static inline int sched_cpu_high_irqload(int cpu) { return 0; }
#endif /* CONFIG_SCHED_HMP */
#ifdef CONFIG_SCHED_FREQ_INPUT