Merge "sched: Add multiple load reporting policies for cpu frequency"
This commit is contained in:
commit
707cff6b1d
4 changed files with 42 additions and 1 deletions
|
@ -44,6 +44,7 @@ extern unsigned int sysctl_sched_wake_to_idle;
|
|||
#ifdef CONFIG_SCHED_HMP
|
||||
extern int sysctl_sched_freq_inc_notify;
|
||||
extern int sysctl_sched_freq_dec_notify;
|
||||
extern unsigned int sysctl_sched_freq_reporting_policy;
|
||||
extern unsigned int sysctl_sched_window_stats_policy;
|
||||
extern unsigned int sysctl_sched_ravg_hist_size;
|
||||
extern unsigned int sysctl_sched_cpu_high_irqload;
|
||||
|
|
|
@ -789,6 +789,12 @@ __read_mostly unsigned int sysctl_sched_new_task_windows = 5;
|
|||
|
||||
#define SCHED_FREQ_ACCOUNT_WAIT_TIME 0
|
||||
|
||||
/*
|
||||
* This governs what load needs to be used when reporting CPU busy time
|
||||
* to the cpufreq governor.
|
||||
*/
|
||||
__read_mostly unsigned int sysctl_sched_freq_reporting_policy;
|
||||
|
||||
/*
|
||||
* For increase, send notification if
|
||||
* freq_required - cur_freq > sysctl_sched_freq_inc_notify
|
||||
|
@ -2198,7 +2204,7 @@ void clear_top_tasks_bitmap(unsigned long *bitmap)
|
|||
* Note that sched_load_granule can change underneath us if we are not
|
||||
* holding any runqueue locks while calling the two functions below.
|
||||
*/
|
||||
static u32 __maybe_unused top_task_load(struct rq *rq)
|
||||
static u32 top_task_load(struct rq *rq)
|
||||
{
|
||||
int index = rq->prev_top;
|
||||
u8 prev = 1 - rq->curr_table;
|
||||
|
@ -3268,6 +3274,26 @@ static inline void account_load_subtractions(struct rq *rq)
|
|||
BUG_ON((s64)rq->nt_curr_runnable_sum < 0);
|
||||
}
|
||||
|
||||
static inline u64 freq_policy_load(struct rq *rq, u64 load)
|
||||
{
|
||||
unsigned int reporting_policy = sysctl_sched_freq_reporting_policy;
|
||||
|
||||
switch (reporting_policy) {
|
||||
case FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK:
|
||||
load = max_t(u64, load, top_task_load(rq));
|
||||
break;
|
||||
case FREQ_REPORT_TOP_TASK:
|
||||
load = top_task_load(rq);
|
||||
break;
|
||||
case FREQ_REPORT_CPU_LOAD:
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
return load;
|
||||
}
|
||||
|
||||
static inline void
|
||||
sync_window_start(struct rq *rq, struct group_cpu_time *cpu_time);
|
||||
|
||||
|
@ -3385,6 +3411,8 @@ void sched_get_cpus_busy(struct sched_load *busy,
|
|||
|
||||
load[i] += group_load[i];
|
||||
nload[i] += ngload[i];
|
||||
|
||||
load[i] = freq_policy_load(rq, load[i]);
|
||||
/*
|
||||
* Scale load in reference to cluster max_possible_freq.
|
||||
*
|
||||
|
|
|
@ -1034,6 +1034,10 @@ static inline void sched_ttwu_pending(void) { }
|
|||
#define WINDOW_STATS_AVG 3
|
||||
#define WINDOW_STATS_INVALID_POLICY 4
|
||||
|
||||
#define FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK 0
|
||||
#define FREQ_REPORT_CPU_LOAD 1
|
||||
#define FREQ_REPORT_TOP_TASK 2
|
||||
|
||||
#define MAJOR_TASK_PCT 85
|
||||
#define SCHED_UPMIGRATE_MIN_NICE 15
|
||||
#define EXITING_TASK_MARKER 0xdeaddead
|
||||
|
|
|
@ -296,6 +296,14 @@ static struct ctl_table kern_table[] = {
|
|||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
{
|
||||
.procname = "sched_freq_reporting_policy",
|
||||
.data = &sysctl_sched_freq_reporting_policy,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = &zero,
|
||||
},
|
||||
{
|
||||
.procname = "sched_freq_inc_notify",
|
||||
.data = &sysctl_sched_freq_inc_notify,
|
||||
|
|
Loading…
Add table
Reference in a new issue