sched: Remove all existence of CONFIG_SCHED_FREQ_INPUT
CONFIG_SCHED_FREQ_INPUT was created to keep parts of the scheduler dealing with frequency separate from other parts of the scheduler that deal with task placement. However, overtime the two features have become intricately linked whereby SCHED_FREQ_INPUT cannot be turned on without having SCHED_HMP turned on as well. Given this complex inter-dependency and the fact that all old, existing and future targets use both config options, remove this unnecessary feature separation. It will aid in making kernel upgrades a lot simpler and faster. Change-Id: Ia20e40d8a088d50909cc28f5be758fa3e9a4af6f Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
This commit is contained in:
parent
9b6911e89a
commit
62f2600ce9
9 changed files with 147 additions and 345 deletions
|
@ -189,9 +189,6 @@ struct ravg {
|
|||
u64 mark_start;
|
||||
u32 sum, demand;
|
||||
u32 sum_history[RAVG_HIST_SIZE];
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
u32 curr_window, prev_window;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct task_struct {
|
||||
|
@ -842,8 +839,7 @@ As mentioned in the introduction section the scheduler is in a unique
|
|||
position to assist with the determination of CPU frequency. Because
|
||||
the scheduler now maintains an estimate of per-task CPU demand, task
|
||||
activity can be tracked, aggregated and provided to the CPUfreq
|
||||
governor as a replacement for simple CPU busy time. CONFIG_SCHED_FREQ_INPUT
|
||||
kernel configuration variable needs to be enabled for this feature to be active.
|
||||
governor as a replacement for simple CPU busy time.
|
||||
|
||||
Two of the most popular CPUfreq governors, interactive and ondemand,
|
||||
utilize a window-based approach for measuring CPU busy time. This
|
||||
|
@ -1384,8 +1380,7 @@ cpus are being reset. Changes to below attributes result in such a reset:
|
|||
|
||||
*** 8.7 sched_migration_update_sum
|
||||
|
||||
Logged when CONFIG_SCHED_FREQ_INPUT feature is enabled and a task is migrating
|
||||
to another cpu.
|
||||
Logged when a task is migrating to another cpu.
|
||||
|
||||
<task>-0 [000] d..8 5020.404137: sched_migration_update_sum: cpu 0: cs 471278 ps 902463 nt_cs 0 nt_ps 0 pid 2645
|
||||
|
||||
|
|
|
@ -25,16 +25,6 @@ config CPU_FREQ_BOOST_SW
|
|||
bool
|
||||
depends on THERMAL
|
||||
|
||||
config SCHED_FREQ_INPUT
|
||||
bool "Scheduler inputs to cpufreq governor"
|
||||
depends on SCHED_HMP
|
||||
help
|
||||
This option enables support for scheduler based CPU utilization
|
||||
calculations which may then be used by any cpufreq governor. The
|
||||
scheduler keeps track of "recent" cpu demand of tasks, which can
|
||||
help determine need for changing frequency well in advance of what
|
||||
a governor would have been able to detect on its own.
|
||||
|
||||
config CPU_FREQ_STAT
|
||||
tristate "CPU frequency translation statistics"
|
||||
default y
|
||||
|
|
|
@ -1305,12 +1305,10 @@ struct ravg {
|
|||
u64 mark_start;
|
||||
u32 sum, demand;
|
||||
u32 sum_history[RAVG_HIST_SIZE_MAX];
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
u32 curr_window, prev_window;
|
||||
u16 active_windows;
|
||||
u32 pred_demand;
|
||||
u8 busy_buckets[NUM_BUSY_BUCKETS];
|
||||
#endif
|
||||
};
|
||||
|
||||
struct sched_entity {
|
||||
|
@ -2152,32 +2150,6 @@ static inline cputime_t task_gtime(struct task_struct *t)
|
|||
extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
|
||||
extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
|
||||
|
||||
struct sched_load {
|
||||
unsigned long prev_load;
|
||||
unsigned long new_task_load;
|
||||
unsigned long predicted_load;
|
||||
};
|
||||
|
||||
#if defined(CONFIG_SCHED_FREQ_INPUT)
|
||||
extern int sched_set_window(u64 window_start, unsigned int window_size);
|
||||
extern unsigned long sched_get_busy(int cpu);
|
||||
extern void sched_get_cpus_busy(struct sched_load *busy,
|
||||
const struct cpumask *query_cpus);
|
||||
extern void sched_set_io_is_busy(int val);
|
||||
#else
|
||||
static inline int sched_set_window(u64 window_start, unsigned int window_size)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline unsigned long sched_get_busy(int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void sched_get_cpus_busy(struct sched_load *busy,
|
||||
const struct cpumask *query_cpus) {};
|
||||
static inline void sched_set_io_is_busy(int val) {};
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Per process flags
|
||||
*/
|
||||
|
@ -2360,13 +2332,23 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
|
|||
}
|
||||
#endif
|
||||
|
||||
struct sched_load {
|
||||
unsigned long prev_load;
|
||||
unsigned long new_task_load;
|
||||
unsigned long predicted_load;
|
||||
};
|
||||
|
||||
extern int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle);
|
||||
extern u32 sched_get_wake_up_idle(struct task_struct *p);
|
||||
extern int sched_set_group_id(struct task_struct *p, unsigned int group_id);
|
||||
extern unsigned int sched_get_group_id(struct task_struct *p);
|
||||
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
|
||||
extern int sched_set_window(u64 window_start, unsigned int window_size);
|
||||
extern unsigned long sched_get_busy(int cpu);
|
||||
extern void sched_get_cpus_busy(struct sched_load *busy,
|
||||
const struct cpumask *query_cpus);
|
||||
extern void sched_set_io_is_busy(int val);
|
||||
extern int sched_set_boost(int enable);
|
||||
extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
|
||||
extern u32 sched_get_init_task_load(struct task_struct *p);
|
||||
|
@ -2383,6 +2365,19 @@ extern void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate,
|
|||
int wakeup_energy, int wakeup_latency);
|
||||
|
||||
#else /* CONFIG_SCHED_HMP */
|
||||
static inline int sched_set_window(u64 window_start, unsigned int window_size)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline unsigned long sched_get_busy(int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void sched_get_cpus_busy(struct sched_load *busy,
|
||||
const struct cpumask *query_cpus) {};
|
||||
|
||||
static inline void sched_set_io_is_busy(int val) {};
|
||||
|
||||
static inline int sched_set_boost(int enable)
|
||||
{
|
||||
return -EINVAL;
|
||||
|
|
|
@ -40,20 +40,14 @@ extern unsigned int sysctl_sched_min_granularity;
|
|||
extern unsigned int sysctl_sched_wakeup_granularity;
|
||||
extern unsigned int sysctl_sched_child_runs_first;
|
||||
extern unsigned int sysctl_sched_wake_to_idle;
|
||||
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
extern int sysctl_sched_freq_inc_notify;
|
||||
extern int sysctl_sched_freq_dec_notify;
|
||||
extern unsigned int sysctl_sched_window_stats_policy;
|
||||
extern unsigned int sysctl_sched_ravg_hist_size;
|
||||
extern unsigned int sysctl_sched_cpu_high_irqload;
|
||||
|
||||
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
|
||||
extern unsigned int sysctl_sched_init_task_load_pct;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
extern int sysctl_sched_freq_inc_notify;
|
||||
extern int sysctl_sched_freq_dec_notify;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
extern unsigned int sysctl_sched_spill_nr_run;
|
||||
extern unsigned int sysctl_sched_spill_load_pct;
|
||||
extern unsigned int sysctl_sched_upmigrate_pct;
|
||||
|
@ -65,11 +59,9 @@ extern unsigned int sysctl_sched_big_waker_task_load_pct;
|
|||
extern unsigned int sysctl_sched_select_prev_cpu_us;
|
||||
extern unsigned int sysctl_sched_enable_colocation;
|
||||
extern unsigned int sysctl_sched_restrict_cluster_spill;
|
||||
#if defined(CONFIG_SCHED_FREQ_INPUT)
|
||||
extern unsigned int sysctl_sched_new_task_windows;
|
||||
extern unsigned int sysctl_sched_pred_alert_freq;
|
||||
extern unsigned int sysctl_sched_freq_aggregate;
|
||||
#endif
|
||||
|
||||
#else /* CONFIG_SCHED_HMP */
|
||||
|
||||
|
|
|
@ -76,9 +76,7 @@ TRACE_EVENT(sched_enq_deq_task,
|
|||
__field(unsigned int, cpus_allowed )
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
__field(unsigned int, demand )
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
__field(unsigned int, pred_demand )
|
||||
#endif
|
||||
#endif
|
||||
),
|
||||
|
||||
|
@ -94,18 +92,13 @@ TRACE_EVENT(sched_enq_deq_task,
|
|||
__entry->cpus_allowed = cpus_allowed;
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
__entry->demand = p->ravg.demand;
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
__entry->pred_demand = p->ravg.pred_demand;
|
||||
#endif
|
||||
#endif
|
||||
),
|
||||
|
||||
TP_printk("cpu=%d %s comm=%s pid=%d prio=%d nr_running=%u cpu_load=%lu rt_nr_running=%u affine=%x"
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
" demand=%u"
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
" pred_demand=%u"
|
||||
#endif
|
||||
" demand=%u pred_demand=%u"
|
||||
#endif
|
||||
, __entry->cpu,
|
||||
__entry->enqueue ? "enqueue" : "dequeue",
|
||||
|
@ -113,10 +106,7 @@ TRACE_EVENT(sched_enq_deq_task,
|
|||
__entry->prio, __entry->nr_running,
|
||||
__entry->cpu_load, __entry->rt_nr_running, __entry->cpus_allowed
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
, __entry->demand
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
, __entry->pred_demand
|
||||
#endif
|
||||
, __entry->demand, __entry->pred_demand
|
||||
#endif
|
||||
)
|
||||
);
|
||||
|
@ -291,7 +281,6 @@ TRACE_EVENT(sched_update_task_ravg,
|
|||
__field(unsigned int, demand )
|
||||
__field(unsigned int, sum )
|
||||
__field( int, cpu )
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
__field(unsigned int, pred_demand )
|
||||
__field( u64, rq_cs )
|
||||
__field( u64, rq_ps )
|
||||
|
@ -304,7 +293,6 @@ TRACE_EVENT(sched_update_task_ravg,
|
|||
__field( u64, nt_cs )
|
||||
__field( u64, nt_ps )
|
||||
__field( u32, active_windows )
|
||||
#endif
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
@ -322,7 +310,6 @@ TRACE_EVENT(sched_update_task_ravg,
|
|||
__entry->demand = p->ravg.demand;
|
||||
__entry->sum = p->ravg.sum;
|
||||
__entry->irqtime = irqtime;
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
__entry->pred_demand = p->ravg.pred_demand;
|
||||
__entry->rq_cs = rq->curr_runnable_sum;
|
||||
__entry->rq_ps = rq->prev_runnable_sum;
|
||||
|
@ -335,28 +322,19 @@ TRACE_EVENT(sched_update_task_ravg,
|
|||
__entry->nt_cs = rq->nt_curr_runnable_sum;
|
||||
__entry->nt_ps = rq->nt_prev_runnable_sum;
|
||||
__entry->active_windows = p->ravg.active_windows;
|
||||
#endif
|
||||
),
|
||||
|
||||
TP_printk("wc %llu ws %llu delta %llu event %s cpu %d cur_freq %u cur_pid %d task %d (%s) ms %llu delta %llu demand %u sum %u irqtime %llu"
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
" pred_demand %u rq_cs %llu rq_ps %llu cur_window %u prev_window %u nt_cs %llu nt_ps %llu active_wins %u grp_cs %lld grp_ps %lld, grp_nt_cs %llu, grp_nt_ps: %llu"
|
||||
#endif
|
||||
TP_printk("wc %llu ws %llu delta %llu event %s cpu %d cur_freq %u cur_pid %d task %d (%s) ms %llu delta %llu demand %u sum %u irqtime %llu pred_demand %u rq_cs %llu rq_ps %llu cur_window %u prev_window %u nt_cs %llu nt_ps %llu active_wins %u grp_cs %lld grp_ps %lld, grp_nt_cs %llu, grp_nt_ps: %llu"
|
||||
, __entry->wallclock, __entry->win_start, __entry->delta,
|
||||
task_event_names[__entry->evt], __entry->cpu,
|
||||
__entry->cur_freq, __entry->cur_pid,
|
||||
__entry->pid, __entry->comm, __entry->mark_start,
|
||||
__entry->delta_m, __entry->demand,
|
||||
__entry->sum, __entry->irqtime
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
, __entry->pred_demand, __entry->rq_cs, __entry->rq_ps,
|
||||
__entry->curr_window, __entry->prev_window,
|
||||
__entry->nt_cs, __entry->nt_ps,
|
||||
__entry->active_windows,
|
||||
__entry->grp_cs, __entry->grp_ps,
|
||||
__entry->grp_nt_cs, __entry->grp_nt_ps
|
||||
#endif
|
||||
)
|
||||
__entry->sum, __entry->irqtime, __entry->pred_demand,
|
||||
__entry->rq_cs, __entry->rq_ps, __entry->curr_window,
|
||||
__entry->prev_window, __entry->nt_cs, __entry->nt_ps,
|
||||
__entry->active_windows, __entry->grp_cs,
|
||||
__entry->grp_ps, __entry->grp_nt_cs, __entry->grp_nt_ps)
|
||||
);
|
||||
|
||||
TRACE_EVENT(sched_get_task_cpu_cycles,
|
||||
|
@ -402,9 +380,7 @@ TRACE_EVENT(sched_update_history,
|
|||
__field( int, samples )
|
||||
__field(enum task_event, evt )
|
||||
__field(unsigned int, demand )
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
__field(unsigned int, pred_demand )
|
||||
#endif
|
||||
__array( u32, hist, RAVG_HIST_SIZE_MAX)
|
||||
__field(unsigned int, nr_big_tasks )
|
||||
__field( int, cpu )
|
||||
|
@ -417,27 +393,19 @@ TRACE_EVENT(sched_update_history,
|
|||
__entry->samples = samples;
|
||||
__entry->evt = evt;
|
||||
__entry->demand = p->ravg.demand;
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
__entry->pred_demand = p->ravg.pred_demand;
|
||||
#endif
|
||||
memcpy(__entry->hist, p->ravg.sum_history,
|
||||
RAVG_HIST_SIZE_MAX * sizeof(u32));
|
||||
__entry->nr_big_tasks = rq->hmp_stats.nr_big_tasks;
|
||||
__entry->cpu = rq->cpu;
|
||||
),
|
||||
|
||||
TP_printk("%d (%s): runtime %u samples %d event %s demand %u"
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
" pred_demand %u"
|
||||
#endif
|
||||
TP_printk("%d (%s): runtime %u samples %d event %s demand %u pred_demand %u"
|
||||
" (hist: %u %u %u %u %u) cpu %d nr_big %u",
|
||||
__entry->pid, __entry->comm,
|
||||
__entry->runtime, __entry->samples,
|
||||
task_event_names[__entry->evt],
|
||||
__entry->demand,
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
__entry->pred_demand,
|
||||
#endif
|
||||
__entry->demand, __entry->pred_demand,
|
||||
__entry->hist[0], __entry->hist[1],
|
||||
__entry->hist[2], __entry->hist[3],
|
||||
__entry->hist[4], __entry->cpu, __entry->nr_big_tasks)
|
||||
|
@ -476,8 +444,6 @@ TRACE_EVENT(sched_reset_all_window_stats,
|
|||
__entry->old_val, __entry->new_val)
|
||||
);
|
||||
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
|
||||
TRACE_EVENT(sched_update_pred_demand,
|
||||
|
||||
TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int pct,
|
||||
|
@ -637,8 +603,6 @@ TRACE_EVENT(sched_freq_alert,
|
|||
__entry->old_pred, __entry->new_pred)
|
||||
);
|
||||
|
||||
#endif /* CONFIG_SCHED_FREQ_INPUT */
|
||||
|
||||
#endif /* CONFIG_SCHED_HMP */
|
||||
|
||||
/*
|
||||
|
|
|
@ -1777,8 +1777,6 @@ __read_mostly unsigned int sysctl_sched_cpu_high_irqload = (10 * NSEC_PER_MSEC);
|
|||
|
||||
unsigned int __read_mostly sysctl_sched_enable_colocation = 1;
|
||||
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
|
||||
__read_mostly unsigned int sysctl_sched_new_task_windows = 5;
|
||||
|
||||
#define SCHED_FREQ_ACCOUNT_WAIT_TIME 0
|
||||
|
@ -1799,8 +1797,6 @@ static __read_mostly unsigned int sched_io_is_busy;
|
|||
|
||||
__read_mostly unsigned int sysctl_sched_pred_alert_freq = 10 * 1024 * 1024;
|
||||
|
||||
#endif /* CONFIG_SCHED_FREQ_INPUT */
|
||||
|
||||
/*
|
||||
* Maximum possible frequency across all cpus. Task demand and cpu
|
||||
* capacity (cpu_power) metrics are scaled in reference to it.
|
||||
|
@ -1838,9 +1834,7 @@ unsigned int __read_mostly sched_disable_window_stats;
|
|||
* in a window, it's considered to be generating majority of workload
|
||||
* for this window. Prediction could be adjusted for such tasks.
|
||||
*/
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
__read_mostly unsigned int sched_major_task_runtime = 10000000;
|
||||
#endif
|
||||
|
||||
static unsigned int sync_cpu;
|
||||
|
||||
|
@ -1953,8 +1947,6 @@ static inline u64 scale_exec_time(u64 delta, struct rq *rq)
|
|||
return delta;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
|
||||
static inline int cpu_is_waiting_on_io(struct rq *rq)
|
||||
{
|
||||
if (!sched_io_is_busy)
|
||||
|
@ -2671,28 +2663,6 @@ static inline u32 predict_and_update_buckets(struct rq *rq,
|
|||
|
||||
return pred_demand;
|
||||
}
|
||||
#define assign_ravg_pred_demand(x) (p->ravg.pred_demand = x)
|
||||
|
||||
#else /* CONFIG_SCHED_FREQ_INPUT */
|
||||
|
||||
static inline void
|
||||
update_task_pred_demand(struct rq *rq, struct task_struct *p, int event)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
|
||||
int event, u64 wallclock, u64 irqtime)
|
||||
{
|
||||
}
|
||||
|
||||
static inline u32 predict_and_update_buckets(struct rq *rq,
|
||||
struct task_struct *p, u32 runtime)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#define assign_ravg_pred_demand(x)
|
||||
|
||||
#endif /* CONFIG_SCHED_FREQ_INPUT */
|
||||
|
||||
static void update_task_cpu_cycles(struct task_struct *p, int cpu)
|
||||
{
|
||||
|
@ -2831,7 +2801,7 @@ static void update_history(struct rq *rq, struct task_struct *p,
|
|||
pred_demand);
|
||||
|
||||
p->ravg.demand = demand;
|
||||
assign_ravg_pred_demand(pred_demand);
|
||||
p->ravg.pred_demand = pred_demand;
|
||||
|
||||
done:
|
||||
trace_sched_update_history(rq, p, runtime, samples, event);
|
||||
|
@ -3074,10 +3044,8 @@ static inline void set_window_start(struct rq *rq)
|
|||
raw_spin_unlock(&rq->lock);
|
||||
double_rq_lock(rq, sync_rq);
|
||||
rq->window_start = cpu_rq(sync_cpu)->window_start;
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
|
||||
rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
|
||||
#endif
|
||||
raw_spin_unlock(&sync_rq->lock);
|
||||
}
|
||||
|
||||
|
@ -3222,10 +3190,8 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
|
|||
|
||||
if (window_start)
|
||||
rq->window_start = window_start;
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
|
||||
rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
|
||||
#endif
|
||||
reset_cpu_hmp_stats(cpu, 1);
|
||||
}
|
||||
|
||||
|
@ -3240,7 +3206,6 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
|
|||
new = sysctl_sched_ravg_hist_size;
|
||||
sched_ravg_hist_size = sysctl_sched_ravg_hist_size;
|
||||
}
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
else if (sched_freq_aggregate !=
|
||||
sysctl_sched_freq_aggregate) {
|
||||
reason = FREQ_AGGREGATE_CHANGE;
|
||||
|
@ -3248,7 +3213,6 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
|
|||
new = sysctl_sched_freq_aggregate;
|
||||
sched_freq_aggregate = sysctl_sched_freq_aggregate;
|
||||
}
|
||||
#endif
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
|
@ -3263,8 +3227,6 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
|
|||
sched_ktime_clock() - start_ts, reason, old, new);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
|
||||
static inline void
|
||||
sync_window_start(struct rq *rq, struct group_cpu_time *cpu_time);
|
||||
|
||||
|
@ -3585,12 +3547,6 @@ done:
|
|||
double_rq_unlock(src_rq, dest_rq);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { }
|
||||
|
||||
#endif /* CONFIG_SCHED_FREQ_INPUT */
|
||||
|
||||
#define sched_up_down_migrate_auto_update 1
|
||||
static void check_for_up_down_migrate_update(const struct cpumask *cpus)
|
||||
{
|
||||
|
@ -3663,8 +3619,6 @@ static void set_preferred_cluster(struct related_thread_group *grp)
|
|||
#define ADD_TASK 0
|
||||
#define REM_TASK 1
|
||||
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
|
||||
static void
|
||||
update_task_ravg(struct task_struct *p, struct rq *rq,
|
||||
int event, u64 wallclock, u64 irqtime);
|
||||
|
@ -3825,34 +3779,6 @@ _group_cpu_time(struct related_thread_group *grp, int cpu)
|
|||
return grp ? per_cpu_ptr(grp->cpu_time, cpu) : NULL;
|
||||
}
|
||||
|
||||
#else /* CONFIG_SCHED_FREQ_INPUT */
|
||||
|
||||
static inline void free_group_cputime(struct related_thread_group *grp) { }
|
||||
|
||||
static inline int alloc_group_cputime(struct related_thread_group *grp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void transfer_busy_time(struct rq *rq,
|
||||
struct related_thread_group *grp, struct task_struct *p, int event)
|
||||
{
|
||||
}
|
||||
|
||||
static struct group_cpu_time *
|
||||
task_group_cpu_time(struct task_struct *p, int cpu)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct group_cpu_time *
|
||||
_group_cpu_time(struct related_thread_group *grp, int cpu)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
struct related_thread_group *alloc_related_thread_group(int group_id)
|
||||
{
|
||||
struct related_thread_group *grp;
|
||||
|
@ -10951,7 +10877,6 @@ void __init sched_init(void)
|
|||
* like select_best_cpu()
|
||||
*/
|
||||
rq->cluster = &init_cluster;
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
|
||||
rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
|
||||
rq->old_busy_time = 0;
|
||||
|
@ -10959,7 +10884,6 @@ void __init sched_init(void)
|
|||
rq->old_busy_time_group = 0;
|
||||
rq->notifier_sent = 0;
|
||||
rq->hmp_stats.pred_demands_sum = 0;
|
||||
#endif
|
||||
#endif
|
||||
rq->max_idle_balance_cost = sysctl_sched_migration_cost;
|
||||
|
||||
|
|
|
@ -2712,10 +2712,9 @@ void set_hmp_defaults(void)
|
|||
|
||||
update_up_down_migrate();
|
||||
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
sched_major_task_runtime =
|
||||
mult_frac(sched_ravg_window, MAJOR_TASK_PCT, 100);
|
||||
#endif
|
||||
|
||||
sched_init_task_load_windows =
|
||||
div64_u64((u64)sysctl_sched_init_task_load_pct *
|
||||
(u64)sched_ravg_window, 100);
|
||||
|
@ -3652,7 +3651,7 @@ static void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra)
|
|||
stats->nr_big_tasks = 0;
|
||||
if (reset_cra) {
|
||||
stats->cumulative_runnable_avg = 0;
|
||||
set_pred_demands_sum(stats, 0);
|
||||
stats->pred_demands_sum = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3928,7 +3927,6 @@ void post_big_task_count_change(const struct cpumask *cpus)
|
|||
|
||||
DEFINE_MUTEX(policy_mutex);
|
||||
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
static inline int invalid_value_freq_input(unsigned int *data)
|
||||
{
|
||||
if (data == &sysctl_sched_freq_aggregate)
|
||||
|
@ -3936,12 +3934,6 @@ static inline int invalid_value_freq_input(unsigned int *data)
|
|||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static inline int invalid_value_freq_input(unsigned int *data)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int invalid_value(unsigned int *data)
|
||||
{
|
||||
|
@ -4641,12 +4633,6 @@ dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
|
|||
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
#define clear_ravg_pred_demand() (p->ravg.pred_demand = 0)
|
||||
#else
|
||||
#define clear_ravg_pred_demand()
|
||||
#endif
|
||||
|
||||
void init_new_task_load(struct task_struct *p)
|
||||
{
|
||||
int i;
|
||||
|
@ -4664,7 +4650,7 @@ void init_new_task_load(struct task_struct *p)
|
|||
(u64)sched_ravg_window, 100);
|
||||
|
||||
p->ravg.demand = init_load_windows;
|
||||
clear_ravg_pred_demand();
|
||||
p->ravg.pred_demand = 0;
|
||||
for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
|
||||
p->ravg.sum_history[i] = init_load_windows;
|
||||
}
|
||||
|
@ -4695,7 +4681,7 @@ static void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq)
|
|||
{
|
||||
cfs_rq->hmp_stats.nr_big_tasks = 0;
|
||||
cfs_rq->hmp_stats.cumulative_runnable_avg = 0;
|
||||
set_pred_demands_sum(&cfs_rq->hmp_stats, 0);
|
||||
cfs_rq->hmp_stats.pred_demands_sum = 0;
|
||||
}
|
||||
|
||||
static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
|
||||
|
@ -4720,8 +4706,7 @@ static void inc_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
|
|||
stats->nr_big_tasks += cfs_rq->hmp_stats.nr_big_tasks;
|
||||
stats->cumulative_runnable_avg +=
|
||||
cfs_rq->hmp_stats.cumulative_runnable_avg;
|
||||
set_pred_demands_sum(stats, stats->pred_demands_sum +
|
||||
cfs_rq->hmp_stats.pred_demands_sum);
|
||||
stats->pred_demands_sum += cfs_rq->hmp_stats.pred_demands_sum;
|
||||
}
|
||||
|
||||
static void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
|
||||
|
@ -4730,8 +4715,7 @@ static void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
|
|||
stats->nr_big_tasks -= cfs_rq->hmp_stats.nr_big_tasks;
|
||||
stats->cumulative_runnable_avg -=
|
||||
cfs_rq->hmp_stats.cumulative_runnable_avg;
|
||||
set_pred_demands_sum(stats, stats->pred_demands_sum -
|
||||
cfs_rq->hmp_stats.pred_demands_sum);
|
||||
stats->pred_demands_sum -= cfs_rq->hmp_stats.pred_demands_sum;
|
||||
|
||||
BUG_ON(stats->nr_big_tasks < 0 ||
|
||||
(s64)stats->cumulative_runnable_avg < 0);
|
||||
|
|
|
@ -366,9 +366,7 @@ struct cfs_bandwidth { };
|
|||
struct hmp_sched_stats {
|
||||
int nr_big_tasks;
|
||||
u64 cumulative_runnable_avg;
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
u64 pred_demands_sum;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct sched_cluster {
|
||||
|
@ -409,16 +407,12 @@ struct related_thread_group {
|
|||
struct sched_cluster *preferred_cluster;
|
||||
struct rcu_head rcu;
|
||||
u64 last_update;
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
struct group_cpu_time __percpu *cpu_time; /* one per cluster */
|
||||
#endif
|
||||
};
|
||||
|
||||
struct migration_sum_data {
|
||||
struct rq *src_rq, *dst_rq;
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
struct group_cpu_time *src_cpu_time, *dst_cpu_time;
|
||||
#endif
|
||||
};
|
||||
|
||||
extern struct list_head cluster_head;
|
||||
|
@ -435,7 +429,7 @@ struct cpu_cycle {
|
|||
#define for_each_sched_cluster(cluster) \
|
||||
list_for_each_entry_rcu(cluster, &cluster_head, list)
|
||||
|
||||
#endif
|
||||
#endif /* CONFIG_SCHED_HMP */
|
||||
|
||||
/* CFS-related fields in a runqueue */
|
||||
struct cfs_rq {
|
||||
|
@ -755,15 +749,9 @@ struct rq {
|
|||
unsigned int static_cpu_pwr_cost;
|
||||
struct task_struct *ed_task;
|
||||
struct cpu_cycle cc;
|
||||
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
u64 old_busy_time, old_busy_time_group;
|
||||
int notifier_sent;
|
||||
u64 old_estimated_time;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
u64 curr_runnable_sum;
|
||||
u64 prev_runnable_sum;
|
||||
u64 nt_curr_runnable_sum;
|
||||
|
@ -1045,6 +1033,8 @@ extern void init_new_task_load(struct task_struct *p);
|
|||
#define WINDOW_STATS_AVG 3
|
||||
#define WINDOW_STATS_INVALID_POLICY 4
|
||||
|
||||
#define MAJOR_TASK_PCT 85
|
||||
|
||||
extern struct mutex policy_mutex;
|
||||
extern unsigned int sched_ravg_window;
|
||||
extern unsigned int sched_disable_window_stats;
|
||||
|
@ -1065,11 +1055,7 @@ extern unsigned int sched_init_task_load_windows;
|
|||
extern unsigned int up_down_migrate_scale_factor;
|
||||
extern unsigned int sysctl_sched_restrict_cluster_spill;
|
||||
extern unsigned int sched_pred_alert_load;
|
||||
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
#define MAJOR_TASK_PCT 85
|
||||
extern unsigned int sched_major_task_runtime;
|
||||
#endif
|
||||
|
||||
extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
|
||||
extern unsigned int max_task_load(void);
|
||||
|
@ -1180,14 +1166,6 @@ static inline unsigned int task_load(struct task_struct *p)
|
|||
return p->ravg.demand;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
#define set_pred_demands_sum(stats, x) ((stats)->pred_demands_sum = (x))
|
||||
#define verify_pred_demands_sum(stat) BUG_ON((s64)(stat)->pred_demands_sum < 0)
|
||||
#else
|
||||
#define set_pred_demands_sum(stats, x)
|
||||
#define verify_pred_demands_sum(stat)
|
||||
#endif
|
||||
|
||||
static inline void
|
||||
inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
|
||||
struct task_struct *p)
|
||||
|
@ -1200,13 +1178,12 @@ inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
|
|||
task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
|
||||
|
||||
stats->cumulative_runnable_avg += task_load;
|
||||
set_pred_demands_sum(stats, stats->pred_demands_sum +
|
||||
p->ravg.pred_demand);
|
||||
stats->pred_demands_sum += p->ravg.pred_demand;
|
||||
}
|
||||
|
||||
static inline void
|
||||
dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
|
||||
struct task_struct *p)
|
||||
struct task_struct *p)
|
||||
{
|
||||
u32 task_load;
|
||||
|
||||
|
@ -1219,9 +1196,8 @@ dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
|
|||
|
||||
BUG_ON((s64)stats->cumulative_runnable_avg < 0);
|
||||
|
||||
set_pred_demands_sum(stats, stats->pred_demands_sum -
|
||||
p->ravg.pred_demand);
|
||||
verify_pred_demands_sum(stats);
|
||||
stats->pred_demands_sum -= p->ravg.pred_demand;
|
||||
BUG_ON((s64)stats->pred_demands_sum < 0);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -1235,12 +1211,10 @@ fixup_cumulative_runnable_avg(struct hmp_sched_stats *stats,
|
|||
stats->cumulative_runnable_avg += task_load_delta;
|
||||
BUG_ON((s64)stats->cumulative_runnable_avg < 0);
|
||||
|
||||
set_pred_demands_sum(stats, stats->pred_demands_sum +
|
||||
pred_demand_delta);
|
||||
verify_pred_demands_sum(stats);
|
||||
stats->pred_demands_sum += pred_demand_delta;
|
||||
BUG_ON((s64)stats->pred_demands_sum < 0);
|
||||
}
|
||||
|
||||
|
||||
#define pct_to_real(tunable) \
|
||||
(div64_u64((u64)tunable * (u64)max_task_load(), 100))
|
||||
|
||||
|
@ -1278,6 +1252,87 @@ struct related_thread_group *task_related_thread_group(struct task_struct *p)
|
|||
return rcu_dereference(p->grp);
|
||||
}
|
||||
|
||||
#define PRED_DEMAND_DELTA ((s64)new_pred_demand - p->ravg.pred_demand)
|
||||
|
||||
extern void
|
||||
check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups);
|
||||
|
||||
struct group_cpu_time {
|
||||
u64 curr_runnable_sum;
|
||||
u64 prev_runnable_sum;
|
||||
u64 nt_curr_runnable_sum;
|
||||
u64 nt_prev_runnable_sum;
|
||||
u64 window_start;
|
||||
};
|
||||
|
||||
/* Is frequency of two cpus synchronized with each other? */
|
||||
static inline int same_freq_domain(int src_cpu, int dst_cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(src_cpu);
|
||||
|
||||
if (src_cpu == dst_cpu)
|
||||
return 1;
|
||||
|
||||
return cpumask_test_cpu(dst_cpu, &rq->freq_domain_cpumask);
|
||||
}
|
||||
|
||||
#define BOOST_KICK 0
|
||||
#define CPU_RESERVED 1
|
||||
|
||||
static inline int is_reserved(int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
|
||||
return test_bit(CPU_RESERVED, &rq->hmp_flags);
|
||||
}
|
||||
|
||||
static inline int mark_reserved(int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
|
||||
/* Name boost_flags as hmp_flags? */
|
||||
return test_and_set_bit(CPU_RESERVED, &rq->hmp_flags);
|
||||
}
|
||||
|
||||
static inline void clear_reserved(int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
|
||||
clear_bit(CPU_RESERVED, &rq->hmp_flags);
|
||||
}
|
||||
|
||||
static inline u64 cpu_cravg_sync(int cpu, int sync)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
u64 load;
|
||||
|
||||
load = rq->hmp_stats.cumulative_runnable_avg;
|
||||
|
||||
/*
|
||||
* If load is being checked in a sync wakeup environment,
|
||||
* we may want to discount the load of the currently running
|
||||
* task.
|
||||
*/
|
||||
if (sync && cpu == smp_processor_id()) {
|
||||
if (load > rq->curr->ravg.demand)
|
||||
load -= rq->curr->ravg.demand;
|
||||
else
|
||||
load = 0;
|
||||
}
|
||||
|
||||
return load;
|
||||
}
|
||||
|
||||
extern void check_for_migration(struct rq *rq, struct task_struct *p);
|
||||
extern void pre_big_task_count_change(const struct cpumask *cpus);
|
||||
extern void post_big_task_count_change(const struct cpumask *cpus);
|
||||
extern void set_hmp_defaults(void);
|
||||
extern int power_delta_exceeded(unsigned int cpu_cost, unsigned int base_cost);
|
||||
extern unsigned int power_cost(int cpu, u64 demand);
|
||||
extern void reset_all_window_stats(u64 window_start, unsigned int window_size);
|
||||
extern void boost_kick(int cpu);
|
||||
extern int sched_boost(void);
|
||||
|
||||
#else /* CONFIG_SCHED_HMP */
|
||||
|
||||
struct hmp_sched_stats;
|
||||
|
@ -1340,41 +1395,8 @@ static inline int update_preferred_cluster(struct related_thread_group *grp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SCHED_HMP */
|
||||
|
||||
/*
|
||||
* Returns the rq capacity of any rq in a group. This does not play
|
||||
* well with groups where rq capacity can change independently.
|
||||
*/
|
||||
#define group_rq_capacity(group) cpu_capacity(group_first_cpu(group))
|
||||
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
#define PRED_DEMAND_DELTA ((s64)new_pred_demand - p->ravg.pred_demand)
|
||||
|
||||
extern void
|
||||
check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups);
|
||||
|
||||
struct group_cpu_time {
|
||||
u64 curr_runnable_sum;
|
||||
u64 prev_runnable_sum;
|
||||
u64 nt_curr_runnable_sum;
|
||||
u64 nt_prev_runnable_sum;
|
||||
u64 window_start;
|
||||
};
|
||||
|
||||
/* Is frequency of two cpus synchronized with each other? */
|
||||
static inline int same_freq_domain(int src_cpu, int dst_cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(src_cpu);
|
||||
|
||||
if (src_cpu == dst_cpu)
|
||||
return 1;
|
||||
|
||||
return cpumask_test_cpu(dst_cpu, &rq->freq_domain_cpumask);
|
||||
}
|
||||
|
||||
#else /* CONFIG_SCHED_FREQ_INPUT */
|
||||
|
||||
#define sched_enable_hmp 0
|
||||
#define sched_freq_legacy_mode 1
|
||||
#define sched_migration_fixup 0
|
||||
#define PRED_DEMAND_DELTA (0)
|
||||
|
||||
|
@ -1386,72 +1408,6 @@ static inline int same_freq_domain(int src_cpu, int dst_cpu)
|
|||
return 1;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SCHED_FREQ_INPUT */
|
||||
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
|
||||
#define BOOST_KICK 0
|
||||
#define CPU_RESERVED 1
|
||||
|
||||
static inline int is_reserved(int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
|
||||
return test_bit(CPU_RESERVED, &rq->hmp_flags);
|
||||
}
|
||||
|
||||
static inline int mark_reserved(int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
|
||||
/* Name boost_flags as hmp_flags? */
|
||||
return test_and_set_bit(CPU_RESERVED, &rq->hmp_flags);
|
||||
}
|
||||
|
||||
static inline void clear_reserved(int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
|
||||
clear_bit(CPU_RESERVED, &rq->hmp_flags);
|
||||
}
|
||||
|
||||
static inline u64 cpu_cravg_sync(int cpu, int sync)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
u64 load;
|
||||
|
||||
load = rq->hmp_stats.cumulative_runnable_avg;
|
||||
|
||||
/*
|
||||
* If load is being checked in a sync wakeup environment,
|
||||
* we may want to discount the load of the currently running
|
||||
* task.
|
||||
*/
|
||||
if (sync && cpu == smp_processor_id()) {
|
||||
if (load > rq->curr->ravg.demand)
|
||||
load -= rq->curr->ravg.demand;
|
||||
else
|
||||
load = 0;
|
||||
}
|
||||
|
||||
return load;
|
||||
}
|
||||
|
||||
extern void check_for_migration(struct rq *rq, struct task_struct *p);
|
||||
extern void pre_big_task_count_change(const struct cpumask *cpus);
|
||||
extern void post_big_task_count_change(const struct cpumask *cpus);
|
||||
extern void set_hmp_defaults(void);
|
||||
extern int power_delta_exceeded(unsigned int cpu_cost, unsigned int base_cost);
|
||||
extern unsigned int power_cost(int cpu, u64 demand);
|
||||
extern void reset_all_window_stats(u64 window_start, unsigned int window_size);
|
||||
extern void boost_kick(int cpu);
|
||||
extern int sched_boost(void);
|
||||
|
||||
#else /* CONFIG_SCHED_HMP */
|
||||
|
||||
#define sched_enable_hmp 0
|
||||
#define sched_freq_legacy_mode 1
|
||||
|
||||
static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
|
||||
static inline void pre_big_task_count_change(void) { }
|
||||
static inline void post_big_task_count_change(void) { }
|
||||
|
@ -1464,7 +1420,13 @@ static inline void clear_reserved(int cpu) { }
|
|||
#define trace_sched_cpu_load_cgroup(...)
|
||||
#define trace_sched_cpu_load_wakeup(...)
|
||||
|
||||
#endif /* CONFIG_SCHED_HMP */
|
||||
#endif /* CONFIG_SCHED_HMP */
|
||||
|
||||
/*
|
||||
* Returns the rq capacity of any rq in a group. This does not play
|
||||
* well with groups where rq capacity can change independently.
|
||||
*/
|
||||
#define group_rq_capacity(group) cpu_capacity(group_first_cpu(group))
|
||||
|
||||
#ifdef CONFIG_CGROUP_SCHED
|
||||
|
||||
|
|
|
@ -292,7 +292,7 @@ static struct ctl_table kern_table[] = {
|
|||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
{
|
||||
.procname = "sched_freq_inc_notify",
|
||||
.data = &sysctl_sched_freq_inc_notify,
|
||||
|
@ -309,8 +309,6 @@ static struct ctl_table kern_table[] = {
|
|||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = &zero,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
{
|
||||
.procname = "sched_cpu_high_irqload",
|
||||
.data = &sysctl_sched_cpu_high_irqload,
|
||||
|
@ -407,7 +405,6 @@ static struct ctl_table kern_table[] = {
|
|||
.mode = 0644,
|
||||
.proc_handler = sched_hmp_proc_update_handler,
|
||||
},
|
||||
#ifdef CONFIG_SCHED_FREQ_INPUT
|
||||
{
|
||||
.procname = "sched_new_task_windows",
|
||||
.data = &sysctl_sched_new_task_windows,
|
||||
|
@ -430,7 +427,6 @@ static struct ctl_table kern_table[] = {
|
|||
.mode = 0644,
|
||||
.proc_handler = sched_window_update_handler,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
.procname = "sched_boost",
|
||||
.data = &sysctl_sched_boost,
|
||||
|
|
Loading…
Add table
Reference in a new issue