sched: eliminate sched_freq_account_wait_time knob
Kill unused scheduler knob sched_freq_account_wait_time. Change-Id: Ib74123ebd69dfa3f86cf7335099f50c12a6e93c3 Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
This commit is contained in:
parent
5160d93b6d
commit
462213d1ac
5 changed files with 23 additions and 54 deletions
|
@ -920,7 +920,7 @@ The above counters are resued for nt_curr_runnable_sum and
|
|||
nt_prev_runnable_sum.
|
||||
|
||||
"cpu demand" of a task includes its execution time and can also include its
|
||||
wait time. 'sched_freq_account_wait_time' tunable controls whether task's wait
|
||||
wait time. 'SCHED_FREQ_ACCOUNT_WAIT_TIME' controls whether task's wait
|
||||
time is included in its 'curr_window' and 'prev_window' counters or not.
|
||||
|
||||
Needless to say, curr_runnable_sum counter of a cpu is derived from curr_window
|
||||
|
@ -934,7 +934,7 @@ PICK_NEXT_TASK
|
|||
This represents beginning of execution for a task. Provided the task
|
||||
refers to a non-idle task, a portion of task's wait time that
|
||||
corresponds to the current window being tracked on a cpu is added to
|
||||
task's curr_window counter, provided sched_freq_account_wait_time is
|
||||
task's curr_window counter, provided SCHED_FREQ_ACCOUNT_WAIT_TIME is
|
||||
set. The same quantum is also added to cpu's curr_runnable_sum counter.
|
||||
The remaining portion, which corresponds to task's wait time in previous
|
||||
window is added to task's prev_window and cpu's prev_runnable_sum
|
||||
|
@ -969,7 +969,7 @@ TASK_MIGRATE
|
|||
this event reflects actions taken under PICK_NEXT_TASK (i.e its
|
||||
wait time is added to task's curr/prev_window counters as well
|
||||
as src_cpu's curr/prev_runnable_sum counters, provided
|
||||
sched_freq_account_wait_time tunable is non-zero). After that update,
|
||||
SCHED_FREQ_ACCOUNT_WAIT_TIME is non-zero). After that update,
|
||||
src_cpu's curr_runnable_sum is reduced by task's curr_window value
|
||||
and dst_cpu's curr_runnable_sum is increased by task's curr_window
|
||||
value, provided sched_migration_fixup = 1. Similarly, src_cpu's
|
||||
|
@ -1113,17 +1113,7 @@ tracking mechanism maintains per task. If default values are used for
|
|||
both this and sched_ravg_window then a total of 50ms of task history
|
||||
would be maintained in 5 10ms windows.
|
||||
|
||||
*** 7.11 sched_freq_account_wait_time
|
||||
|
||||
Appears at: /proc/sys/kernel/sched_freq_account_wait_time
|
||||
|
||||
Default value: 0
|
||||
|
||||
This controls whether a task's wait time is accounted in its curr_window and
|
||||
prev_window attributes and thus in a cpu's curr_runnable_sum and
|
||||
prev_runnable_sum counters.
|
||||
|
||||
*** 7.12 sched_migration_fixup
|
||||
*** 7.11 sched_migration_fixup
|
||||
|
||||
Appears at: /proc/sys/kernel/sched_migration_fixup
|
||||
|
||||
|
@ -1132,7 +1122,7 @@ Default value: 1
|
|||
This controls whether a cpu's busy time counters are adjusted during task
|
||||
migration.
|
||||
|
||||
*** 7.13 sched_freq_inc_notify
|
||||
*** 7.12 sched_freq_inc_notify
|
||||
|
||||
Appears at: /proc/sys/kernel/sched_freq_inc_notify
|
||||
|
||||
|
@ -1144,7 +1134,7 @@ exceeds sched_freq_inc_notify, where freq_required is the frequency calculated
|
|||
by scheduler to meet current task demand. Note that sched_freq_inc_notify is
|
||||
specified in kHz units.
|
||||
|
||||
*** 7.14 sched_freq_dec_notify
|
||||
*** 7.13 sched_freq_dec_notify
|
||||
|
||||
Appears at: /proc/sys/kernel/sched_freq_dec_notify
|
||||
|
||||
|
@ -1157,7 +1147,7 @@ exceeds sched_freq_dec_notify, where freq_required is the frequency calculated
|
|||
by scheduler to meet current task demand. Note that sched_freq_dec_notify is
|
||||
specified in kHz units.
|
||||
|
||||
** 7.15 sched_heavy_task
|
||||
*** 7.14 sched_heavy_task
|
||||
|
||||
Appears at: /proc/sys/kernel/sched_heavy_task
|
||||
|
||||
|
@ -1169,7 +1159,7 @@ comparison. Scheduler will request a raise in cpu frequency when heavy tasks
|
|||
wakeup after at least one window of sleep, where window size is defined by
|
||||
sched_ravg_window. Value 0 will disable this feature.
|
||||
|
||||
*** 7.16 sched_cpu_high_irqload
|
||||
*** 7.15 sched_cpu_high_irqload
|
||||
|
||||
Appears at: /proc/sys/kernel/sched_cpu_high_irqload
|
||||
|
||||
|
@ -1187,7 +1177,7 @@ longer eligible for placement. This will affect the task placement logic
|
|||
described above, causing the scheduler to try and steer tasks away from
|
||||
the CPU.
|
||||
|
||||
** 7.17 cpu.upmigrate_discourage
|
||||
*** 7.16 cpu.upmigrate_discourage
|
||||
|
||||
Default value : 0
|
||||
|
||||
|
@ -1203,7 +1193,7 @@ overcommitted scenario. See notes on sched_spill_nr_run and sched_spill_load for
|
|||
how overcommitment threshold is defined and also notes on
|
||||
'sched_upmigrate_min_nice' tunable.
|
||||
|
||||
*** 7.18 sched_static_cpu_pwr_cost
|
||||
*** 7.17 sched_static_cpu_pwr_cost
|
||||
|
||||
Default value: 0
|
||||
|
||||
|
@ -1218,7 +1208,7 @@ within a cluster and possibly have differing value between clusters as
|
|||
needed.
|
||||
|
||||
|
||||
*** 7.19 sched_static_cluster_pwr_cost
|
||||
*** 7.18 sched_static_cluster_pwr_cost
|
||||
|
||||
Default value: 0
|
||||
|
||||
|
@ -1229,7 +1219,7 @@ power mode. It ignores the actual D-state that a cluster may be in and assumes
|
|||
the worst case power cost of the highest D-state. It is means of biasing task
|
||||
placement away from idle clusters when necessary.
|
||||
|
||||
*** 7.20 sched_early_detection_duration
|
||||
*** 7.19 sched_early_detection_duration
|
||||
|
||||
Default value: 9500000
|
||||
|
||||
|
@ -1240,7 +1230,7 @@ tick for it to be eligible for the scheduler's early detection feature
|
|||
under scheduler boost. For more information on the feature itself please
|
||||
refer to section 5.2.1.
|
||||
|
||||
*** 7.21 sched_restrict_cluster_spill
|
||||
*** 7.20 sched_restrict_cluster_spill
|
||||
|
||||
Default value: 0
|
||||
|
||||
|
@ -1259,7 +1249,7 @@ CPU across all clusters. When this tunable is enabled, the RT tasks are
|
|||
restricted to the lowest possible power cluster.
|
||||
|
||||
|
||||
*** 7.22 sched_downmigrate
|
||||
*** 7.21 sched_downmigrate
|
||||
|
||||
Appears at: /proc/sys/kernel/sched_downmigrate
|
||||
|
||||
|
@ -1272,7 +1262,7 @@ its demand *in reference to the power-efficient cpu* drops less than 60%
|
|||
(sched_downmigrate).
|
||||
|
||||
|
||||
*** 7.23 sched_small_wakee_task_load
|
||||
*** 7.22 sched_small_wakee_task_load
|
||||
|
||||
Appears at: /proc/sys/kernel/sched_small_wakee_task_load
|
||||
|
||||
|
@ -1284,7 +1274,7 @@ categorized as small wakee tasks. Scheduler places small wakee tasks on the
|
|||
waker's cluster.
|
||||
|
||||
|
||||
*** 7.24 sched_big_waker_task_load
|
||||
*** 7.23 sched_big_waker_task_load
|
||||
|
||||
Appears at: /proc/sys/kernel/sched_big_waker_task_load
|
||||
|
||||
|
@ -1443,7 +1433,6 @@ cpus are being reset. Changes to below attributes result in such a reset:
|
|||
* sched_window_stats_policy (See Sec 2.4)
|
||||
* sched_ravg_hist_size (See Sec 7.11)
|
||||
* sched_migration_fixup (See Sec 7.12)
|
||||
* sched_freq_account_wait_time (See Sec 7.11)
|
||||
|
||||
<task>-0 [004] d.h4 12700.711489: sched_reset_all_windows_stats: time_taken 1123 window_start 0 window_size 0 reason POLICY_CHANGE old_val 0 new_val 1
|
||||
|
||||
|
|
|
@ -44,7 +44,6 @@ extern unsigned int sysctl_sched_wakeup_load_threshold;
|
|||
extern unsigned int sysctl_sched_window_stats_policy;
|
||||
extern unsigned int sysctl_sched_ravg_hist_size;
|
||||
extern unsigned int sysctl_sched_cpu_high_irqload;
|
||||
extern unsigned int sysctl_sched_freq_account_wait_time;
|
||||
extern unsigned int sysctl_sched_migration_fixup;
|
||||
extern unsigned int sysctl_sched_heavy_task_pct;
|
||||
extern unsigned int sysctl_sched_enable_power_aware;
|
||||
|
|
|
@ -1764,7 +1764,7 @@ struct cpu_cycle {
|
|||
|
||||
/*
|
||||
* sched_window_stats_policy, sched_ravg_hist_size,
|
||||
* sched_migration_fixup, sched_freq_account_wait_time have a 'sysctl' copy
|
||||
* sched_migration_fixup have a 'sysctl' copy
|
||||
* associated with them. This is required for atomic update of those variables
|
||||
* when being modifed via sysctl interface.
|
||||
*
|
||||
|
@ -1799,8 +1799,7 @@ __read_mostly unsigned int sysctl_sched_new_task_windows = 5;
|
|||
static __read_mostly unsigned int sched_migration_fixup = 1;
|
||||
__read_mostly unsigned int sysctl_sched_migration_fixup = 1;
|
||||
|
||||
static __read_mostly unsigned int sched_freq_account_wait_time;
|
||||
__read_mostly unsigned int sysctl_sched_freq_account_wait_time;
|
||||
#define SCHED_FREQ_ACCOUNT_WAIT_TIME 0
|
||||
|
||||
/*
|
||||
* For increase, send notification if
|
||||
|
@ -2166,11 +2165,11 @@ static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p,
|
|||
if (rq->curr == p)
|
||||
return 1;
|
||||
|
||||
return p->on_rq ? sched_freq_account_wait_time : 0;
|
||||
return p->on_rq ? SCHED_FREQ_ACCOUNT_WAIT_TIME : 0;
|
||||
}
|
||||
|
||||
/* TASK_MIGRATE, PICK_NEXT_TASK left */
|
||||
return sched_freq_account_wait_time;
|
||||
return SCHED_FREQ_ACCOUNT_WAIT_TIME;
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
@ -2391,7 +2390,7 @@ void update_task_pred_demand(struct rq *rq, struct task_struct *p, int event)
|
|||
return;
|
||||
|
||||
if (event != PUT_PREV_TASK && event != TASK_UPDATE &&
|
||||
(!sched_freq_account_wait_time ||
|
||||
(!SCHED_FREQ_ACCOUNT_WAIT_TIME ||
|
||||
(event != TASK_MIGRATE &&
|
||||
event != PICK_NEXT_TASK)))
|
||||
return;
|
||||
|
@ -2401,7 +2400,7 @@ void update_task_pred_demand(struct rq *rq, struct task_struct *p, int event)
|
|||
* related groups
|
||||
*/
|
||||
if (event == TASK_UPDATE) {
|
||||
if (!p->on_rq && !sched_freq_account_wait_time)
|
||||
if (!p->on_rq && !SCHED_FREQ_ACCOUNT_WAIT_TIME)
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -3186,7 +3185,6 @@ enum reset_reason_code {
|
|||
POLICY_CHANGE,
|
||||
HIST_SIZE_CHANGE,
|
||||
MIGRATION_FIXUP_CHANGE,
|
||||
FREQ_ACCOUNT_WAIT_TIME_CHANGE,
|
||||
FREQ_AGGREGATE_CHANGE,
|
||||
};
|
||||
|
||||
|
@ -3195,7 +3193,7 @@ const char *sched_window_reset_reasons[] = {
|
|||
"POLICY_CHANGE",
|
||||
"HIST_SIZE_CHANGE",
|
||||
"MIGRATION_FIXUP_CHANGE",
|
||||
"FREQ_ACCOUNT_WAIT_TIME_CHANGE"};
|
||||
};
|
||||
|
||||
/* Called with IRQs enabled */
|
||||
void reset_all_window_stats(u64 window_start, unsigned int window_size)
|
||||
|
@ -3269,13 +3267,6 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
|
|||
old = sched_migration_fixup;
|
||||
new = sysctl_sched_migration_fixup;
|
||||
sched_migration_fixup = sysctl_sched_migration_fixup;
|
||||
} else if (sched_freq_account_wait_time !=
|
||||
sysctl_sched_freq_account_wait_time) {
|
||||
reason = FREQ_ACCOUNT_WAIT_TIME_CHANGE;
|
||||
old = sched_freq_account_wait_time;
|
||||
new = sysctl_sched_freq_account_wait_time;
|
||||
sched_freq_account_wait_time =
|
||||
sysctl_sched_freq_account_wait_time;
|
||||
} else if (sched_freq_aggregate !=
|
||||
sysctl_sched_freq_aggregate) {
|
||||
reason = FREQ_AGGREGATE_CHANGE;
|
||||
|
|
|
@ -4055,9 +4055,6 @@ static inline int invalid_value_freq_input(unsigned int *data)
|
|||
if (data == &sysctl_sched_migration_fixup)
|
||||
return !(*data == 0 || *data == 1);
|
||||
|
||||
if (data == &sysctl_sched_freq_account_wait_time)
|
||||
return !(*data == 0 || *data == 1);
|
||||
|
||||
if (data == &sysctl_sched_freq_aggregate)
|
||||
return !(*data == 0 || *data == 1);
|
||||
|
||||
|
|
|
@ -323,13 +323,6 @@ static struct ctl_table kern_table[] = {
|
|||
.mode = 0644,
|
||||
.proc_handler = sched_window_update_handler,
|
||||
},
|
||||
{
|
||||
.procname = "sched_freq_account_wait_time",
|
||||
.data = &sysctl_sched_freq_account_wait_time,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = sched_window_update_handler,
|
||||
},
|
||||
{
|
||||
.procname = "sched_heavy_task",
|
||||
.data = &sysctl_sched_heavy_task_pct,
|
||||
|
|
Loading…
Add table
Reference in a new issue