* refs/heads/tmp-ceee5bd BACKPORT: arm64: relocatable: suppress R_AARCH64_ABS64 relocations in vmlinux sched/core: fix have_sched_energy_data build warning sched/core: Warn if ENERGY_AWARE is enabled but data is missing sched: walt: Correct WALT window size initialization FROMLIST: sched/fair: Use wake_q length as a hint for wake_wide sched: WALT: account cumulative window demand sched/fair: remove useless variable in find_best_target sched/tune: access schedtune_initialized under CGROUP_SCHEDTUNE sched/fair: consider task utilization in group_max_util() sched/fair: consider task utilization in group_norm_util() sched/fair: enforce EAS mode sched/fair: ignore backup CPU when not valid sched/fair: trace energy_diff for non boosted tasks UPSTREAM: sched/fair: Sync task util before slow-path wakeup UPSTREAM: sched/fair: Fix usage of find_idlest_group() when the local group is idlest UPSTREAM: sched/fair: Fix usage of find_idlest_group() when no groups are allowed BACKPORT: sched/fair: Fix find_idlest_group when local group is not allowed UPSTREAM: sched/fair: Remove unnecessary comparison with -1 BACKPORT: sched/fair: Move select_task_rq_fair slow-path into its own function UPSTREAM: sched/fair: Force balancing on nohz balance if local group has capacity UPSTREAM: sched/core: Add missing update_rq_clock() call in set_user_nice() UPSTREAM: sched/core: Add missing update_rq_clock() call for task_hot() UPSTREAM: sched/core: Add missing update_rq_clock() in detach_task_cfs_rq() UPSTREAM: sched/core: Add missing update_rq_clock() in post_init_entity_util_avg() UPSTREAM: sched/core: Fix find_idlest_group() for fork BACKPORT: sched/fair: Fix PELT integrity for new tasks BACKPORT: sched/cgroup: Fix cpu_cgroup_fork() handling UPSTREAM: sched/fair: Fix and optimize the fork() path BACKPORT: sched/fair: Make it possible to account fair load avg consistently cpufreq/sched: Consider max cpu capacity when choosing frequencies Linux 4.4.95 FS-Cache: fix dereference of NULL user_key_payload fscrypto: require write access to mount to set encryption policy KEYS: Fix race between updating and finding a negative key fscrypt: fix dereference of NULL user_key_payload f2fs crypto: add missing locking for keyring_key access f2fs crypto: replace some BUG_ON()'s with error checks sched/autogroup: Fix autogroup_move_group() to never skip sched_move_task() parisc: Fix double-word compare and exchange in LWS code on 32-bit kernels parisc: Avoid trashing sr2 and sr3 in LWS code pkcs7: Prevent NULL pointer dereference, since sinfo is not always set. KEYS: don't let add_key() update an uninstantiated key lib/digsig: fix dereference of NULL user_key_payload KEYS: encrypted: fix dereference of NULL user_key_payload rtlwifi: rtl8821ae: Fix connection lost problem clockevents/drivers/cs5535: Improve resilience to spurious interrupts bus: mbus: fix window size calculation for 4GB windows brcmsmac: make some local variables 'static const' to reduce stack size i2c: ismt: Separate I2C block read from SMBus block read ALSA: hda: Remove superfluous '-' added by printk conversion ALSA: seq: Enable 'use' locking in all configurations drm/nouveau/mmu: flush tlbs before deleting page tables drm/nouveau/bsp/g92: disable by default can: esd_usb2: Fix can_dlc value for received RTR, frames usb: musb: Check for host-mode using is_host_active() on reset interrupt usb: musb: sunxi: Explicitly release USB PHY on exit can: gs_usb: fix busy loop if no more TX context is available ALSA: usb-audio: Add native DSD support for Pro-Ject Pre Box S2 Digital usb: hub: Allow reset retry for USB2 devices on connect bounce usb: quirks: add quirk for WORLDE MINI MIDI keyboard usb: cdc_acm: Add quirk for Elatec TWN3 USB: serial: metro-usb: add MS7820 device id USB: core: fix out-of-bounds access bug in usb_get_bos_descriptor() USB: devio: Revert "USB: devio: Don't corrupt user memory" ANDROID: binder: show high watermark of alloc->pages. ANDROID: binder: Add thread->process_todo flag. UPSTREAM: arm64: compat: Remove leftover variable declaration ANDROID: sched/fair: Select correct capacity state for energy_diff Revert "UPSTREAM: efi/libstub/arm64: Set -fpie when building the EFI stub" cpufreq: schedutil: clamp util to CPU maximum capacity FROMLIST: android: binder: Fix null ptr dereference in debug msg FROMLIST: android: binder: Change binder_shrinker to static cpufreq/sched: Use cpu max freq rather than policy max Conflicts: include/linux/sched.h kernel/sched/core.c kernel/sched/fair.c Change-Id: I2751f851df741f00e797deaf2119872b3dced655 Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
180 lines
4 KiB
C
180 lines
4 KiB
C
#include "sched.h"
|
|
|
|
/*
|
|
* stop-task scheduling class.
|
|
*
|
|
* The stop task is the highest priority task in the system, it preempts
|
|
* everything and will be preempted by nothing.
|
|
*
|
|
* See kernel/stop_machine.c
|
|
*/
|
|
|
|
#ifdef CONFIG_SMP
|
|
static int
|
|
select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags,
|
|
int sibling_count_hint)
|
|
{
|
|
return task_cpu(p); /* stop tasks as never migrate */
|
|
}
|
|
#endif /* CONFIG_SMP */
|
|
|
|
#ifdef CONFIG_SCHED_HMP
|
|
|
|
static void
|
|
inc_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p)
|
|
{
|
|
inc_cumulative_runnable_avg(&rq->hmp_stats, p);
|
|
}
|
|
|
|
static void
|
|
dec_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p)
|
|
{
|
|
dec_cumulative_runnable_avg(&rq->hmp_stats, p);
|
|
}
|
|
|
|
static void
|
|
fixup_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p,
|
|
u32 new_task_load, u32 new_pred_demand)
|
|
{
|
|
s64 task_load_delta = (s64)new_task_load - task_load(p);
|
|
s64 pred_demand_delta = PRED_DEMAND_DELTA;
|
|
|
|
fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
|
|
pred_demand_delta);
|
|
}
|
|
|
|
#else /* CONFIG_SCHED_HMP */
|
|
|
|
static inline void
|
|
inc_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) { }
|
|
|
|
static inline void
|
|
dec_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) { }
|
|
|
|
#endif /* CONFIG_SCHED_HMP */
|
|
|
|
static void
|
|
check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
/* we're never preempted */
|
|
}
|
|
|
|
static struct task_struct *
|
|
pick_next_task_stop(struct rq *rq, struct task_struct *prev)
|
|
{
|
|
struct task_struct *stop = rq->stop;
|
|
|
|
if (!stop || !task_on_rq_queued(stop))
|
|
return NULL;
|
|
|
|
put_prev_task(rq, prev);
|
|
|
|
stop->se.exec_start = rq_clock_task(rq);
|
|
|
|
return stop;
|
|
}
|
|
|
|
static void
|
|
enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
add_nr_running(rq, 1);
|
|
inc_hmp_sched_stats_stop(rq, p);
|
|
}
|
|
|
|
static void
|
|
dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
sub_nr_running(rq, 1);
|
|
dec_hmp_sched_stats_stop(rq, p);
|
|
}
|
|
|
|
static void yield_task_stop(struct rq *rq)
|
|
{
|
|
BUG(); /* the stop task should never yield, its pointless. */
|
|
}
|
|
|
|
static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
|
|
{
|
|
struct task_struct *curr = rq->curr;
|
|
u64 delta_exec;
|
|
|
|
delta_exec = rq_clock_task(rq) - curr->se.exec_start;
|
|
if (unlikely((s64)delta_exec < 0))
|
|
delta_exec = 0;
|
|
|
|
schedstat_set(curr->se.statistics.exec_max,
|
|
max(curr->se.statistics.exec_max, delta_exec));
|
|
|
|
curr->se.sum_exec_runtime += delta_exec;
|
|
account_group_exec_runtime(curr, delta_exec);
|
|
|
|
curr->se.exec_start = rq_clock_task(rq);
|
|
cpuacct_charge(curr, delta_exec);
|
|
}
|
|
|
|
static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
|
|
{
|
|
}
|
|
|
|
static void set_curr_task_stop(struct rq *rq)
|
|
{
|
|
struct task_struct *stop = rq->stop;
|
|
|
|
stop->se.exec_start = rq_clock_task(rq);
|
|
}
|
|
|
|
static void switched_to_stop(struct rq *rq, struct task_struct *p)
|
|
{
|
|
BUG(); /* its impossible to change to this class */
|
|
}
|
|
|
|
static void
|
|
prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
|
|
{
|
|
BUG(); /* how!?, what priority? */
|
|
}
|
|
|
|
static unsigned int
|
|
get_rr_interval_stop(struct rq *rq, struct task_struct *task)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static void update_curr_stop(struct rq *rq)
|
|
{
|
|
}
|
|
|
|
/*
|
|
* Simple, special scheduling class for the per-CPU stop tasks:
|
|
*/
|
|
const struct sched_class stop_sched_class = {
|
|
.next = &dl_sched_class,
|
|
|
|
.enqueue_task = enqueue_task_stop,
|
|
.dequeue_task = dequeue_task_stop,
|
|
.yield_task = yield_task_stop,
|
|
|
|
.check_preempt_curr = check_preempt_curr_stop,
|
|
|
|
.pick_next_task = pick_next_task_stop,
|
|
.put_prev_task = put_prev_task_stop,
|
|
|
|
#ifdef CONFIG_SMP
|
|
.select_task_rq = select_task_rq_stop,
|
|
.set_cpus_allowed = set_cpus_allowed_common,
|
|
#endif
|
|
|
|
.set_curr_task = set_curr_task_stop,
|
|
.task_tick = task_tick_stop,
|
|
|
|
.get_rr_interval = get_rr_interval_stop,
|
|
|
|
.prio_changed = prio_changed_stop,
|
|
.switched_to = switched_to_stop,
|
|
.update_curr = update_curr_stop,
|
|
#ifdef CONFIG_SCHED_HMP
|
|
.inc_hmp_sched_stats = inc_hmp_sched_stats_stop,
|
|
.dec_hmp_sched_stats = dec_hmp_sched_stats_stop,
|
|
.fixup_hmp_sched_stats = fixup_hmp_sched_stats_stop,
|
|
#endif
|
|
};
|