sched: remove sysctl control for HMP and power-aware task placement
There is no real need to control HMP and power-aware task placement at runtime after kernel has booted. Boot-time control should be sufficient. Not allowing for runtime (sysctl) support simplifies the code quite a bit. Also rename sysctl_sched_enable_hmp_task_placement to be shorter. Change-Id: I60cae51a173c6f73b79cbf90c50ddd41a27604aa Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org> Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org> [joonwoop@codeaurora.org: fixed minor conflict. p->nr_cpus_allowed == 1 has moved to core.c Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
This commit is contained in:
parent
ad25ca2afb
commit
f27b626521
6 changed files with 60 additions and 46 deletions
|
@ -53,7 +53,6 @@ extern int sysctl_sched_freq_dec_notify_slack_pct;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SCHED_HMP
|
#ifdef CONFIG_SCHED_HMP
|
||||||
extern unsigned int sysctl_sched_enable_hmp_task_placement;
|
|
||||||
extern unsigned int sysctl_sched_spill_nr_run;
|
extern unsigned int sysctl_sched_spill_nr_run;
|
||||||
extern unsigned int sysctl_sched_mostly_idle_nr_run;
|
extern unsigned int sysctl_sched_mostly_idle_nr_run;
|
||||||
extern unsigned int sysctl_sched_spill_load_pct;
|
extern unsigned int sysctl_sched_spill_load_pct;
|
||||||
|
@ -62,7 +61,6 @@ extern unsigned int sysctl_sched_small_task_pct;
|
||||||
extern unsigned int sysctl_sched_upmigrate_pct;
|
extern unsigned int sysctl_sched_upmigrate_pct;
|
||||||
extern unsigned int sysctl_sched_downmigrate_pct;
|
extern unsigned int sysctl_sched_downmigrate_pct;
|
||||||
extern int sysctl_sched_upmigrate_min_nice;
|
extern int sysctl_sched_upmigrate_min_nice;
|
||||||
extern unsigned int sysctl_sched_enable_power_aware;
|
|
||||||
extern unsigned int sysctl_sched_powerband_limit_pct;
|
extern unsigned int sysctl_sched_powerband_limit_pct;
|
||||||
extern unsigned int sysctl_sched_boost;
|
extern unsigned int sysctl_sched_boost;
|
||||||
|
|
||||||
|
|
|
@ -1075,6 +1075,36 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
|
||||||
rq_clock_skip_update(rq, true);
|
rq_clock_skip_update(rq, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_SCHED_HMP
|
||||||
|
|
||||||
|
static int __init set_sched_enable_hmp(char *str)
|
||||||
|
{
|
||||||
|
int enable_hmp = 0;
|
||||||
|
|
||||||
|
get_option(&str, &enable_hmp);
|
||||||
|
|
||||||
|
sched_enable_hmp = !!enable_hmp;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
early_param("sched_enable_hmp", set_sched_enable_hmp);
|
||||||
|
|
||||||
|
static int __init set_sched_enable_power_aware(char *str)
|
||||||
|
{
|
||||||
|
int enable_power_aware = 0;
|
||||||
|
|
||||||
|
get_option(&str, &enable_power_aware);
|
||||||
|
|
||||||
|
sched_enable_power_aware = !!enable_power_aware;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
early_param("sched_enable_power_aware", set_sched_enable_power_aware);
|
||||||
|
|
||||||
|
#endif /* CONFIG_SCHED_HMP */
|
||||||
|
|
||||||
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
|
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
|
||||||
|
|
||||||
/* Window size (in ns) */
|
/* Window size (in ns) */
|
||||||
|
@ -1135,7 +1165,7 @@ int rq_freq_margin(struct rq *rq)
|
||||||
int margin;
|
int margin;
|
||||||
u64 demand;
|
u64 demand;
|
||||||
|
|
||||||
if (!sysctl_sched_enable_hmp_task_placement)
|
if (!sched_enable_hmp)
|
||||||
return INT_MAX;
|
return INT_MAX;
|
||||||
|
|
||||||
demand = scale_load_to_cpu(rq->prev_runnable_sum, rq->cpu);
|
demand = scale_load_to_cpu(rq->prev_runnable_sum, rq->cpu);
|
||||||
|
@ -1393,7 +1423,7 @@ static void init_cpu_efficiency(void)
|
||||||
int i, efficiency;
|
int i, efficiency;
|
||||||
unsigned int max = 0, min = UINT_MAX;
|
unsigned int max = 0, min = UINT_MAX;
|
||||||
|
|
||||||
if (!sysctl_sched_enable_hmp_task_placement)
|
if (!sched_enable_hmp)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for_each_possible_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
|
@ -1436,7 +1466,7 @@ static inline void set_window_start(struct rq *rq)
|
||||||
int cpu = cpu_of(rq);
|
int cpu = cpu_of(rq);
|
||||||
struct rq *sync_rq = cpu_rq(sync_cpu);
|
struct rq *sync_rq = cpu_rq(sync_cpu);
|
||||||
|
|
||||||
if (rq->window_start || !sysctl_sched_enable_hmp_task_placement)
|
if (rq->window_start || !sched_enable_hmp)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (cpu == sync_cpu) {
|
if (cpu == sync_cpu) {
|
||||||
|
@ -1720,7 +1750,7 @@ static int register_sched_callback(void)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!sysctl_sched_enable_hmp_task_placement)
|
if (!sched_enable_hmp)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ret = cpufreq_register_notifier(¬ifier_policy_block,
|
ret = cpufreq_register_notifier(¬ifier_policy_block,
|
||||||
|
@ -2112,8 +2142,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
||||||
p->se.nr_migrations++;
|
p->se.nr_migrations++;
|
||||||
perf_event_task_migrate(p);
|
perf_event_task_migrate(p);
|
||||||
|
|
||||||
if (sysctl_sched_enable_hmp_task_placement &&
|
if (sched_enable_hmp && (p->on_rq || p->state == TASK_WAKING))
|
||||||
(p->on_rq || p->state == TASK_WAKING))
|
|
||||||
fixup_busy_time(p, new_cpu);
|
fixup_busy_time(p, new_cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3672,7 +3701,7 @@ void sched_exec(void)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int dest_cpu;
|
int dest_cpu;
|
||||||
|
|
||||||
if (sysctl_sched_enable_hmp_task_placement)
|
if (sched_enable_hmp)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
||||||
|
|
|
@ -2564,7 +2564,7 @@ unsigned int max_task_load(void)
|
||||||
#ifdef CONFIG_SCHED_HMP
|
#ifdef CONFIG_SCHED_HMP
|
||||||
|
|
||||||
/* Use this knob to turn on or off HMP-aware task placement logic */
|
/* Use this knob to turn on or off HMP-aware task placement logic */
|
||||||
unsigned int __read_mostly sysctl_sched_enable_hmp_task_placement = 1;
|
unsigned int __read_mostly sched_enable_hmp = 1;
|
||||||
|
|
||||||
/* A cpu can no longer accomodate more tasks if:
|
/* A cpu can no longer accomodate more tasks if:
|
||||||
*
|
*
|
||||||
|
@ -2585,7 +2585,7 @@ unsigned int __read_mostly sysctl_sched_mostly_idle_nr_run = 3;
|
||||||
* Control whether or not individual CPU power consumption is used to
|
* Control whether or not individual CPU power consumption is used to
|
||||||
* guide task placement.
|
* guide task placement.
|
||||||
*/
|
*/
|
||||||
unsigned int __read_mostly sysctl_sched_enable_power_aware = 1;
|
unsigned int __read_mostly sched_enable_power_aware = 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This specifies the maximum percent power difference between 2
|
* This specifies the maximum percent power difference between 2
|
||||||
|
@ -2767,7 +2767,7 @@ int sched_set_boost(int enable)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!sysctl_sched_enable_hmp_task_placement)
|
if (!sched_enable_hmp)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
spin_lock_irqsave(&boost_lock, flags);
|
spin_lock_irqsave(&boost_lock, flags);
|
||||||
|
@ -2887,7 +2887,7 @@ unsigned int power_cost_at_freq(int cpu, unsigned int freq)
|
||||||
struct cpu_pstate_pwr *costs;
|
struct cpu_pstate_pwr *costs;
|
||||||
|
|
||||||
if (!per_cpu_info || !per_cpu_info[cpu].ptable ||
|
if (!per_cpu_info || !per_cpu_info[cpu].ptable ||
|
||||||
!sysctl_sched_enable_power_aware)
|
!sched_enable_power_aware)
|
||||||
/* When power aware scheduling is not in use, or CPU
|
/* When power aware scheduling is not in use, or CPU
|
||||||
* power data is not available, just use the CPU
|
* power data is not available, just use the CPU
|
||||||
* capacity as a rough stand-in for real CPU power
|
* capacity as a rough stand-in for real CPU power
|
||||||
|
@ -2918,7 +2918,7 @@ static unsigned int power_cost(struct task_struct *p, int cpu)
|
||||||
unsigned int task_freq;
|
unsigned int task_freq;
|
||||||
unsigned int cur_freq = cpu_rq(cpu)->cur_freq;
|
unsigned int cur_freq = cpu_rq(cpu)->cur_freq;
|
||||||
|
|
||||||
if (!sysctl_sched_enable_power_aware)
|
if (!sched_enable_power_aware)
|
||||||
return cpu_rq(cpu)->max_possible_capacity;
|
return cpu_rq(cpu)->max_possible_capacity;
|
||||||
|
|
||||||
/* calculate % of max freq needed */
|
/* calculate % of max freq needed */
|
||||||
|
@ -3075,7 +3075,7 @@ done:
|
||||||
|
|
||||||
void inc_nr_big_small_task(struct rq *rq, struct task_struct *p)
|
void inc_nr_big_small_task(struct rq *rq, struct task_struct *p)
|
||||||
{
|
{
|
||||||
if (!sysctl_sched_enable_hmp_task_placement)
|
if (!sched_enable_hmp)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (is_big_task(p))
|
if (is_big_task(p))
|
||||||
|
@ -3086,7 +3086,7 @@ void inc_nr_big_small_task(struct rq *rq, struct task_struct *p)
|
||||||
|
|
||||||
void dec_nr_big_small_task(struct rq *rq, struct task_struct *p)
|
void dec_nr_big_small_task(struct rq *rq, struct task_struct *p)
|
||||||
{
|
{
|
||||||
if (!sysctl_sched_enable_hmp_task_placement)
|
if (!sched_enable_hmp)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (is_big_task(p))
|
if (is_big_task(p))
|
||||||
|
@ -3154,7 +3154,7 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
|
||||||
unsigned int old_val = *data;
|
unsigned int old_val = *data;
|
||||||
|
|
||||||
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
|
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
|
||||||
if (ret || !write || !sysctl_sched_enable_hmp_task_placement)
|
if (ret || !write || !sched_enable_hmp)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if ((sysctl_sched_downmigrate_pct > sysctl_sched_upmigrate_pct) ||
|
if ((sysctl_sched_downmigrate_pct > sysctl_sched_upmigrate_pct) ||
|
||||||
|
@ -3278,7 +3278,7 @@ static inline int migration_needed(struct rq *rq, struct task_struct *p)
|
||||||
int nice = task_nice(p);
|
int nice = task_nice(p);
|
||||||
|
|
||||||
if (is_small_task(p) || p->state != TASK_RUNNING ||
|
if (is_small_task(p) || p->state != TASK_RUNNING ||
|
||||||
!sysctl_sched_enable_hmp_task_placement)
|
!sched_enable_hmp)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Todo: cgroup-based control? */
|
/* Todo: cgroup-based control? */
|
||||||
|
@ -3289,7 +3289,7 @@ static inline int migration_needed(struct rq *rq, struct task_struct *p)
|
||||||
if (!task_will_fit(p, cpu_of(rq)))
|
if (!task_will_fit(p, cpu_of(rq)))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (sysctl_sched_enable_power_aware &&
|
if (sched_enable_power_aware &&
|
||||||
lower_power_cpu_available(p, cpu_of(rq)))
|
lower_power_cpu_available(p, cpu_of(rq)))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
|
@ -3342,7 +3342,7 @@ static inline int nr_big_tasks(struct rq *rq)
|
||||||
|
|
||||||
#else /* CONFIG_SCHED_HMP */
|
#else /* CONFIG_SCHED_HMP */
|
||||||
|
|
||||||
#define sysctl_sched_enable_power_aware 0
|
#define sched_enable_power_aware 0
|
||||||
|
|
||||||
static inline int select_best_cpu(struct task_struct *p, int target)
|
static inline int select_best_cpu(struct task_struct *p, int target)
|
||||||
{
|
{
|
||||||
|
@ -3819,7 +3819,7 @@ add_to_scaled_stat(int cpu, struct sched_avg *sa, u64 delta)
|
||||||
u64 scaled_delta;
|
u64 scaled_delta;
|
||||||
int sf;
|
int sf;
|
||||||
|
|
||||||
if (!sysctl_sched_enable_hmp_task_placement)
|
if (!sched_enable_hmp)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (unlikely(cur_freq > max_possible_freq ||
|
if (unlikely(cur_freq > max_possible_freq ||
|
||||||
|
@ -3836,7 +3836,7 @@ add_to_scaled_stat(int cpu, struct sched_avg *sa, u64 delta)
|
||||||
|
|
||||||
static inline void decay_scaled_stat(struct sched_avg *sa, u64 periods)
|
static inline void decay_scaled_stat(struct sched_avg *sa, u64 periods)
|
||||||
{
|
{
|
||||||
if (!sysctl_sched_enable_hmp_task_placement)
|
if (!sched_enable_hmp)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
sa->runnable_avg_sum_scaled =
|
sa->runnable_avg_sum_scaled =
|
||||||
|
@ -5908,7 +5908,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
|
||||||
int want_affine = 0;
|
int want_affine = 0;
|
||||||
int sync = wake_flags & WF_SYNC;
|
int sync = wake_flags & WF_SYNC;
|
||||||
|
|
||||||
if (sysctl_sched_enable_hmp_task_placement)
|
if (sched_enable_hmp)
|
||||||
return select_best_cpu(p, prev_cpu);
|
return select_best_cpu(p, prev_cpu);
|
||||||
|
|
||||||
if (sd_flag & SD_BALANCE_WAKE)
|
if (sd_flag & SD_BALANCE_WAKE)
|
||||||
|
@ -7919,7 +7919,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
|
||||||
unsigned long busiest_load = 0, busiest_capacity = 1;
|
unsigned long busiest_load = 0, busiest_capacity = 1;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (sysctl_sched_enable_hmp_task_placement)
|
if (sched_enable_hmp)
|
||||||
return find_busiest_queue_hmp(env, group);
|
return find_busiest_queue_hmp(env, group);
|
||||||
|
|
||||||
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
|
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
|
||||||
|
@ -8422,7 +8422,7 @@ static int idle_balance(struct rq *this_rq)
|
||||||
*/
|
*/
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
sd = rcu_dereference_check_sched_domain(this_rq->sd);
|
sd = rcu_dereference_check_sched_domain(this_rq->sd);
|
||||||
if (sd && sysctl_sched_enable_power_aware) {
|
if (sd && sched_enable_power_aware) {
|
||||||
for_each_cpu(i, sched_domain_span(sd)) {
|
for_each_cpu(i, sched_domain_span(sd)) {
|
||||||
if (i == this_cpu || idle_cpu(i)) {
|
if (i == this_cpu || idle_cpu(i)) {
|
||||||
cost = power_cost_at_freq(i, 0);
|
cost = power_cost_at_freq(i, 0);
|
||||||
|
@ -8648,7 +8648,7 @@ static inline int find_new_ilb(int type)
|
||||||
{
|
{
|
||||||
int ilb;
|
int ilb;
|
||||||
|
|
||||||
if (sysctl_sched_enable_hmp_task_placement)
|
if (sched_enable_hmp)
|
||||||
return find_new_hmp_ilb(type);
|
return find_new_hmp_ilb(type);
|
||||||
|
|
||||||
ilb = cpumask_first(nohz.idle_cpus_mask);
|
ilb = cpumask_first(nohz.idle_cpus_mask);
|
||||||
|
@ -8902,7 +8902,7 @@ static int select_lowest_power_cpu(struct cpumask *cpus)
|
||||||
int lowest_power_cpu = -1;
|
int lowest_power_cpu = -1;
|
||||||
int lowest_power = INT_MAX;
|
int lowest_power = INT_MAX;
|
||||||
|
|
||||||
if (sysctl_sched_enable_power_aware) {
|
if (sched_enable_power_aware) {
|
||||||
for_each_cpu(i, cpus) {
|
for_each_cpu(i, cpus) {
|
||||||
cost = power_cost_at_freq(i, 0);
|
cost = power_cost_at_freq(i, 0);
|
||||||
if (cost < lowest_power) {
|
if (cost < lowest_power) {
|
||||||
|
@ -9029,7 +9029,7 @@ static inline int _nohz_kick_needed(struct rq *rq, int cpu, int *type)
|
||||||
{
|
{
|
||||||
unsigned long now = jiffies;
|
unsigned long now = jiffies;
|
||||||
|
|
||||||
if (sysctl_sched_enable_hmp_task_placement)
|
if (sched_enable_hmp)
|
||||||
return _nohz_kick_needed_hmp(rq, cpu, type);
|
return _nohz_kick_needed_hmp(rq, cpu, type);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -1385,7 +1385,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
|
||||||
struct task_struct *curr;
|
struct task_struct *curr;
|
||||||
struct rq *rq;
|
struct rq *rq;
|
||||||
|
|
||||||
if (sysctl_sched_enable_hmp_task_placement)
|
if (sched_enable_hmp)
|
||||||
return select_task_rq_rt_hmp(p, cpu, sd_flag, flags);
|
return select_task_rq_rt_hmp(p, cpu, sd_flag, flags);
|
||||||
|
|
||||||
/* For anything but wake ups, just return the task_cpu */
|
/* For anything but wake ups, just return the task_cpu */
|
||||||
|
@ -1680,7 +1680,7 @@ static int find_lowest_rq(struct task_struct *task)
|
||||||
int this_cpu = smp_processor_id();
|
int this_cpu = smp_processor_id();
|
||||||
int cpu = task_cpu(task);
|
int cpu = task_cpu(task);
|
||||||
|
|
||||||
if (sysctl_sched_enable_hmp_task_placement)
|
if (sched_enable_hmp)
|
||||||
return find_lowest_rq_hmp(task);
|
return find_lowest_rq_hmp(task);
|
||||||
|
|
||||||
/* Make sure the mask is initialized first */
|
/* Make sure the mask is initialized first */
|
||||||
|
|
|
@ -1016,7 +1016,8 @@ static inline unsigned long capacity_scale_cpu_freq(int cpu)
|
||||||
|
|
||||||
#ifdef CONFIG_SCHED_HMP
|
#ifdef CONFIG_SCHED_HMP
|
||||||
|
|
||||||
extern unsigned int sysctl_sched_enable_hmp_task_placement;
|
extern unsigned int sched_enable_hmp;
|
||||||
|
extern unsigned int sched_enable_power_aware;
|
||||||
|
|
||||||
int mostly_idle_cpu(int cpu);
|
int mostly_idle_cpu(int cpu);
|
||||||
extern void check_for_migration(struct rq *rq, struct task_struct *p);
|
extern void check_for_migration(struct rq *rq, struct task_struct *p);
|
||||||
|
@ -1029,7 +1030,7 @@ extern unsigned int power_cost_at_freq(int cpu, unsigned int freq);
|
||||||
|
|
||||||
#else /* CONFIG_SCHED_HMP */
|
#else /* CONFIG_SCHED_HMP */
|
||||||
|
|
||||||
#define sysctl_sched_enable_hmp_task_placement 0
|
#define sched_enable_hmp 0
|
||||||
|
|
||||||
static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
|
static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
|
||||||
static inline void pre_big_small_task_count_change(void) { }
|
static inline void pre_big_small_task_count_change(void) { }
|
||||||
|
|
|
@ -325,13 +325,6 @@ static struct ctl_table kern_table[] = {
|
||||||
},
|
},
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_SCHED_HMP
|
#ifdef CONFIG_SCHED_HMP
|
||||||
{
|
|
||||||
.procname = "sched_enable_hmp_task_placement",
|
|
||||||
.data = &sysctl_sched_enable_hmp_task_placement,
|
|
||||||
.maxlen = sizeof(unsigned int),
|
|
||||||
.mode = 0644,
|
|
||||||
.proc_handler = proc_dointvec,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
.procname = "sched_small_task",
|
.procname = "sched_small_task",
|
||||||
.data = &sysctl_sched_small_task_pct,
|
.data = &sysctl_sched_small_task_pct,
|
||||||
|
@ -395,13 +388,6 @@ static struct ctl_table kern_table[] = {
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = sched_hmp_proc_update_handler,
|
.proc_handler = sched_hmp_proc_update_handler,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
.procname = "sched_enable_power_aware",
|
|
||||||
.data = &sysctl_sched_enable_power_aware,
|
|
||||||
.maxlen = sizeof(unsigned int),
|
|
||||||
.mode = 0644,
|
|
||||||
.proc_handler = proc_dointvec,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
.procname = "sched_power_band_limit",
|
.procname = "sched_power_band_limit",
|
||||||
.data = &sysctl_sched_powerband_limit_pct,
|
.data = &sysctl_sched_powerband_limit_pct,
|
||||||
|
|
Loading…
Add table
Reference in a new issue