sched: Add CONFIG_SCHED_HMP Kconfig option
Add a compile-time flag to enable or disable scheduler features for HMP (heterogenous multi-processor) systems. Main feature deals with optimizing task placement for best power/performance tradeoff. Also extend features currently dependent on CONFIG_SCHED_FREQ_INPUT to be enabled for CONFIG_HMP as well. Change-Id: I03b3942709a80cc19f7b934a8089e1d84c14d72d Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org> [rameezmustafa@codeaurora.org]: Port to msm-3.18] Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org> [joonwoop@codeaurora.org: fixed minor ifdefry conflict.] Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
This commit is contained in:
parent
025dedac36
commit
551f83f5d6
6 changed files with 28 additions and 18 deletions
|
@ -1203,7 +1203,7 @@ struct sched_avg {
|
||||||
u64 last_update_time, load_sum;
|
u64 last_update_time, load_sum;
|
||||||
u32 util_sum, period_contrib;
|
u32 util_sum, period_contrib;
|
||||||
unsigned long load_avg, util_avg;
|
unsigned long load_avg, util_avg;
|
||||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
|
||||||
u32 runnable_avg_sum_scaled;
|
u32 runnable_avg_sum_scaled;
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
@ -1427,7 +1427,7 @@ struct task_struct {
|
||||||
const struct sched_class *sched_class;
|
const struct sched_class *sched_class;
|
||||||
struct sched_entity se;
|
struct sched_entity se;
|
||||||
struct sched_rt_entity rt;
|
struct sched_rt_entity rt;
|
||||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
|
||||||
struct ravg ravg;
|
struct ravg ravg;
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_CGROUP_SCHED
|
#ifdef CONFIG_CGROUP_SCHED
|
||||||
|
|
|
@ -1153,6 +1153,15 @@ config CGROUP_WRITEBACK
|
||||||
|
|
||||||
endif # CGROUPS
|
endif # CGROUPS
|
||||||
|
|
||||||
|
config SCHED_HMP
|
||||||
|
bool "Scheduler support for heterogenous multi-processor systems"
|
||||||
|
depends on SMP && FAIR_GROUP_SCHED
|
||||||
|
help
|
||||||
|
This feature will let the scheduler optimize task placement on
|
||||||
|
systems made of heterogeneous cpus i.e cpus that differ either
|
||||||
|
in their instructions per-cycle capability or the maximum
|
||||||
|
frequency they can attain.
|
||||||
|
|
||||||
config CHECKPOINT_RESTORE
|
config CHECKPOINT_RESTORE
|
||||||
bool "Checkpoint/restore support" if EXPERT
|
bool "Checkpoint/restore support" if EXPERT
|
||||||
select PROC_CHILDREN
|
select PROC_CHILDREN
|
||||||
|
|
|
@ -1746,7 +1746,7 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl
|
||||||
wq_worker_waking_up(p, cpu_of(rq));
|
wq_worker_waking_up(p, cpu_of(rq));
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
|
||||||
|
|
||||||
/* Window size (in ns) */
|
/* Window size (in ns) */
|
||||||
__read_mostly unsigned int sched_ravg_window = 10000000;
|
__read_mostly unsigned int sched_ravg_window = 10000000;
|
||||||
|
@ -1893,7 +1893,7 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int update_sum)
|
||||||
p->ravg.mark_start = wallclock;
|
p->ravg.mark_start = wallclock;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_SCHED_FREQ_INPUT */
|
#endif /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mark the task runnable and perform wakeup-preemption.
|
* Mark the task runnable and perform wakeup-preemption.
|
||||||
|
@ -7573,7 +7573,8 @@ void __init sched_init_smp(void)
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
|
|
||||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Maximum possible frequency across all cpus. Task demand and cpu
|
* Maximum possible frequency across all cpus. Task demand and cpu
|
||||||
* capacity (cpu_power) metrics are scaled in reference to it.
|
* capacity (cpu_power) metrics are scaled in reference to it.
|
||||||
|
@ -7660,7 +7661,7 @@ static int register_sched_callback(void)
|
||||||
*/
|
*/
|
||||||
core_initcall(register_sched_callback);
|
core_initcall(register_sched_callback);
|
||||||
|
|
||||||
#endif /* CONFIG_SCHED_FREQ_INPUT */
|
#endif /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
|
||||||
|
|
||||||
int in_sched_functions(unsigned long addr)
|
int in_sched_functions(unsigned long addr)
|
||||||
{
|
{
|
||||||
|
@ -7799,7 +7800,7 @@ void __init sched_init(void)
|
||||||
rq->online = 0;
|
rq->online = 0;
|
||||||
rq->idle_stamp = 0;
|
rq->idle_stamp = 0;
|
||||||
rq->avg_idle = 2*sysctl_sched_migration_cost;
|
rq->avg_idle = 2*sysctl_sched_migration_cost;
|
||||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
|
||||||
rq->cur_freq = 1;
|
rq->cur_freq = 1;
|
||||||
rq->max_freq = 1;
|
rq->max_freq = 1;
|
||||||
rq->min_freq = 1;
|
rq->min_freq = 1;
|
||||||
|
|
|
@ -2911,7 +2911,7 @@ static inline int idle_balance(struct rq *rq)
|
||||||
|
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
|
||||||
|
|
||||||
static inline unsigned int task_load(struct task_struct *p)
|
static inline unsigned int task_load(struct task_struct *p)
|
||||||
{
|
{
|
||||||
|
@ -2974,7 +2974,7 @@ static inline void decay_scaled_stat(struct sched_avg *sa, u64 periods)
|
||||||
periods);
|
periods);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* CONFIG_SCHED_FREQ_INPUT */
|
#else /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
add_to_scaled_stat(int cpu, struct sched_avg *sa, u64 delta)
|
add_to_scaled_stat(int cpu, struct sched_avg *sa, u64 delta)
|
||||||
|
@ -2985,7 +2985,7 @@ static inline void decay_scaled_stat(struct sched_avg *sa, u64 periods)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_SCHED_FREQ_INPUT */
|
#endif /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
|
||||||
|
|
||||||
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||||
{
|
{
|
||||||
|
|
|
@ -643,7 +643,7 @@ struct rq {
|
||||||
u64 max_idle_balance_cost;
|
u64 max_idle_balance_cost;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
|
||||||
/*
|
/*
|
||||||
* max_freq = user or thermal defined maximum
|
* max_freq = user or thermal defined maximum
|
||||||
* max_possible_freq = maximum supported by hardware
|
* max_possible_freq = maximum supported by hardware
|
||||||
|
@ -917,7 +917,7 @@ static inline void sched_ttwu_pending(void) { }
|
||||||
#include "stats.h"
|
#include "stats.h"
|
||||||
#include "auto_group.h"
|
#include "auto_group.h"
|
||||||
|
|
||||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
|
||||||
|
|
||||||
extern unsigned int sched_ravg_window;
|
extern unsigned int sched_ravg_window;
|
||||||
extern unsigned int max_possible_freq;
|
extern unsigned int max_possible_freq;
|
||||||
|
@ -938,7 +938,7 @@ dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
|
||||||
BUG_ON((s64)rq->cumulative_runnable_avg < 0);
|
BUG_ON((s64)rq->cumulative_runnable_avg < 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* CONFIG_SCHED_FREQ_INPUT */
|
#else /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
|
||||||
|
|
||||||
static inline int pct_task_load(struct task_struct *p) { return 0; }
|
static inline int pct_task_load(struct task_struct *p) { return 0; }
|
||||||
|
|
||||||
|
@ -954,7 +954,7 @@ dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
|
||||||
|
|
||||||
static inline void init_new_task_load(struct task_struct *p) { }
|
static inline void init_new_task_load(struct task_struct *p) { }
|
||||||
|
|
||||||
#endif /* CONFIG_SCHED_FREQ_INPUT */
|
#endif /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
|
||||||
|
|
||||||
#ifdef CONFIG_CGROUP_SCHED
|
#ifdef CONFIG_CGROUP_SCHED
|
||||||
|
|
||||||
|
@ -1289,15 +1289,15 @@ struct sched_class {
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
|
||||||
extern void
|
extern void
|
||||||
update_task_ravg(struct task_struct *p, struct rq *rq, int update_sum);
|
update_task_ravg(struct task_struct *p, struct rq *rq, int update_sum);
|
||||||
#else /* CONFIG_SCHED_FREQ_INPUT */
|
#else /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
|
||||||
static inline void
|
static inline void
|
||||||
update_task_ravg(struct task_struct *p, struct rq *rq, int update_sum)
|
update_task_ravg(struct task_struct *p, struct rq *rq, int update_sum)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_SCHED_FREQ_INPUT */
|
#endif /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
|
||||||
|
|
||||||
static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
|
static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
|
||||||
{
|
{
|
||||||
|
|
|
@ -292,7 +292,7 @@ static struct ctl_table kern_table[] = {
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = proc_dointvec,
|
.proc_handler = proc_dointvec,
|
||||||
},
|
},
|
||||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
|
||||||
{
|
{
|
||||||
.procname = "sched_window_stats_policy",
|
.procname = "sched_window_stats_policy",
|
||||||
.data = &sysctl_sched_window_stats_policy,
|
.data = &sysctl_sched_window_stats_policy,
|
||||||
|
|
Loading…
Add table
Reference in a new issue