FIXUP: sched: fix build for non-SMP target

Currently the build for a single-core (e.g. user-mode) Linux is broken
and this configuration is required (at least) to run some network tests.

The main issues for the current code support on single-core systems are:
1. {se,rq}::sched_avg is not available nor maintained for !SMP systems
   This means that load and utilisation signals are NOT available in single
   core systems. All the EAS code depends on these signals.
2. sched_group_energy is also SMP dependant. Again this means that all the
   EAS setup and preparation code (energyn model initialization) has to be
   properly guarded/disabled for !SMP systems.
3. SchedFreq depends on utilization signal, which is not available on
   !SMP systems.
4. SchedTune is useless on unicore systems if SchedFreq is not available.
5. WALT machinery is not required on single-core systems.

This patch addresses all these issues by enforcing some constraints for
single-core systems:
a) WALT, SchedTune and SchedTune are now dependant on SMP
b) The default governor for !SMP systems is INTERACTIVE
c) The energy model initialisation/build functions are
d) Other minor code re-arrangements and CONFIG_SMP guarding to enable
   single core builds.

Signed-off-by: Patrick Bellasi <patrick.bellasi@arm.com>
This commit is contained in:
Patrick Bellasi 2016-07-22 11:35:59 +01:00 committed by Amit Pundir
parent 13a60dc148
commit f0ba6a5d0c
7 changed files with 46 additions and 8 deletions

View file

@ -218,6 +218,7 @@ config CPU_FREQ_GOV_CONSERVATIVE
config CPU_FREQ_GOV_SCHED
bool "'sched' cpufreq governor"
depends on CPU_FREQ
depends on SMP
select CPU_FREQ_GOV_COMMON
help
'sched' - this governor scales cpu frequency from the

View file

@ -29,8 +29,16 @@
#define for_each_possible_sd_level(level) \
for (level = 0; level < NR_SD_LEVELS; level++)
#ifdef CONFIG_SMP
extern struct sched_group_energy *sge_array[NR_CPUS][NR_SD_LEVELS];
void init_sched_energy_costs(void);
#else
#define init_sched_energy_costs() do { } while (0)
#endif /* CONFIG_SMP */
#endif

View file

@ -636,6 +636,8 @@ TRACE_EVENT(sched_contrib_scale_f,
__entry->cpu_scale_factor)
);
#ifdef CONFIG_SMP
/*
* Tracepoint for accounting sched averages for tasks.
*/
@ -935,6 +937,8 @@ TRACE_EVENT(sched_tune_filter,
__entry->payoff, __entry->region)
);
#endif /* CONFIG_SMP */
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */

View file

@ -1256,6 +1256,7 @@ config SCHED_AUTOGROUP
config SCHED_TUNE
bool "Boosting for CFS tasks (EXPERIMENTAL)"
depends on SMP
help
This option enables the system-wide support for task boosting.
When this support is enabled a new sysctl interface is exposed to

View file

@ -12,9 +12,9 @@ CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
endif
obj-y += core.o loadavg.o clock.o cputime.o
obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o energy.o
obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
obj-y += wait.o completion.o idle.o
obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o energy.o
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
obj-$(CONFIG_SCHED_DEBUG) += debug.o

View file

@ -4166,8 +4166,14 @@ static inline void hrtick_update(struct rq *rq)
}
#endif
#ifdef CONFIG_SMP
static bool cpu_overutilized(int cpu);
static inline unsigned long boosted_cpu_util(int cpu);
#else
#define boosted_cpu_util(cpu) cpu_util(cpu)
#endif
#ifdef CONFIG_SMP
static void update_capacity_of(int cpu)
{
unsigned long req_cap;
@ -4180,8 +4186,7 @@ static void update_capacity_of(int cpu)
req_cap = req_cap * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
set_cfs_cpu_capacity(cpu, true, req_cap);
}
static bool cpu_overutilized(int cpu);
#endif
/*
* The enqueue_task method is called before nr_running is
@ -4193,8 +4198,10 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
#ifdef CONFIG_SMP
int task_new = flags & ENQUEUE_WAKEUP_NEW;
int task_wakeup = flags & ENQUEUE_WAKEUP;
#endif
for_each_sched_entity(se) {
if (se->on_rq)
@ -4226,8 +4233,12 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
update_cfs_shares(cfs_rq);
}
if (!se) {
if (!se)
add_nr_running(rq, 1);
#ifdef CONFIG_SMP
if (!se) {
if (!task_new && !rq->rd->overutilized &&
cpu_overutilized(rq->cpu))
rq->rd->overutilized = true;
@ -4244,6 +4255,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (task_new || task_wakeup)
update_capacity_of(cpu_of(rq));
}
#endif /* CONFIG_SMP */
hrtick_update(rq);
}
@ -4301,8 +4314,12 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
update_cfs_shares(cfs_rq);
}
if (!se) {
if (!se)
sub_nr_running(rq, 1);
#ifdef CONFIG_SMP
if (!se) {
schedtune_dequeue_task(p, cpu_of(rq));
/*
@ -4320,6 +4337,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
set_cfs_cpu_capacity(cpu_of(rq), false, 0);
}
}
#endif /* CONFIG_SMP */
hrtick_update(rq);
}
@ -5713,6 +5733,8 @@ static void task_dead_fair(struct task_struct *p)
{
remove_entity_load_avg(&p->se);
}
#else
#define task_fits_max(p, cpu) true
#endif /* CONFIG_SMP */
static unsigned long
@ -8737,10 +8759,13 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
if (static_branch_unlikely(&sched_numa_balancing))
task_tick_numa(rq, curr);
#ifdef CONFIG_SMP
if (!rq->rd->overutilized && cpu_overutilized(task_cpu(curr)))
rq->rd->overutilized = true;
rq->misfit_task = !task_fits_max(curr, rq->cpu);
#endif
}
/*

View file

@ -1277,6 +1277,7 @@ extern const struct sched_class idle_sched_class;
#ifdef CONFIG_SMP
extern void init_max_cpu_capacity(struct max_cpu_capacity *mcc);
extern void update_group_capacity(struct sched_domain *sd, int cpu);
extern void trigger_load_balance(struct rq *rq);
@ -1359,8 +1360,6 @@ unsigned long to_ratio(u64 period, u64 runtime);
extern void init_entity_runnable_average(struct sched_entity *se);
extern void init_max_cpu_capacity(struct max_cpu_capacity *mcc);
static inline void __add_nr_running(struct rq *rq, unsigned count)
{
unsigned prev_nr = rq->nr_running;