use a window based view of time in order to track task demand and CPU utilization in the scheduler. Window Assisted Load Tracking (WALT) implementation credits: Srivatsa Vaddagiri, Steve Muckle, Syed Rameez Mustafa, Joonwoo Park, Pavan Kumar Kondeti, Olav Haugan 2016-03-06: Integration with EAS/refactoring by Vikram Mulukutla and Todd Kjos Change-Id: I21408236836625d4e7d7de1843d20ed5ff36c708 Includes fixes for issues: eas/walt: Use walt_ktime_clock() instead of ktime_get_ns() to avoid a race resulting in watchdog resets BUG: 29353986 Change-Id: Ic1820e22a136f7c7ebd6f42e15f14d470f6bbbdb Handle walt accounting anomoly during resume During resume, there is a corner case where on wakeup, a task's prev_runnable_sum can go negative. This is a workaround that fixes the condition and warns (instead of crashing). BUG: 29464099 Change-Id: I173e7874324b31a3584435530281708145773508 Signed-off-by: Todd Kjos <tkjos@google.com> Signed-off-by: Srinath Sridharan <srinathsr@google.com> Signed-off-by: Juri Lelli <juri.lelli@arm.com> [jstultz: fwdported to 4.4] Signed-off-by: John Stultz <john.stultz@linaro.org>
57 lines
2.4 KiB
C
57 lines
2.4 KiB
C
/*
|
|
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 and
|
|
* only version 2 as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*/
|
|
|
|
#ifndef __WALT_H
|
|
#define __WALT_H
|
|
|
|
#ifdef CONFIG_SCHED_WALT
|
|
|
|
void walt_update_task_ravg(struct task_struct *p, struct rq *rq, int event,
|
|
u64 wallclock, u64 irqtime);
|
|
void walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p);
|
|
void walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p);
|
|
void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
|
|
struct task_struct *p);
|
|
void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
|
|
struct task_struct *p);
|
|
void walt_fixup_busy_time(struct task_struct *p, int new_cpu);
|
|
void walt_init_new_task_load(struct task_struct *p);
|
|
void walt_mark_task_starting(struct task_struct *p);
|
|
void walt_set_window_start(struct rq *rq);
|
|
void walt_migrate_sync_cpu(int cpu);
|
|
void walt_init_cpu_efficiency(void);
|
|
u64 walt_ktime_clock(void);
|
|
|
|
#else /* CONFIG_SCHED_WALT */
|
|
|
|
static inline void walt_update_task_ravg(struct task_struct *p, struct rq *rq,
|
|
int event, u64 wallclock, u64 irqtime) { }
|
|
static inline void walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) { }
|
|
static inline void walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) { }
|
|
static inline void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
|
|
struct task_struct *p) { }
|
|
static inline void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
|
|
struct task_struct *p) { }
|
|
static inline void walt_fixup_busy_time(struct task_struct *p, int new_cpu) { }
|
|
static inline void walt_init_new_task_load(struct task_struct *p) { }
|
|
static inline void walt_mark_task_starting(struct task_struct *p) { }
|
|
static inline void walt_set_window_start(struct rq *rq) { }
|
|
static inline void walt_migrate_sync_cpu(int cpu) { }
|
|
static inline void walt_init_cpu_efficiency(void) { }
|
|
static inline u64 walt_ktime_clock(void) { return 0; }
|
|
|
|
#endif /* CONFIG_SCHED_WALT */
|
|
|
|
extern unsigned int walt_disabled;
|
|
|
|
#endif
|