2013-02-07 09:46:59 -06:00
|
|
|
#ifndef _SCHED_SYSCTL_H
|
|
|
|
#define _SCHED_SYSCTL_H
|
|
|
|
|
|
|
|
#ifdef CONFIG_DETECT_HUNG_TASK
|
2013-09-23 16:43:58 +08:00
|
|
|
extern int sysctl_hung_task_check_count;
|
2013-02-07 09:46:59 -06:00
|
|
|
extern unsigned int sysctl_hung_task_panic;
|
|
|
|
extern unsigned long sysctl_hung_task_timeout_secs;
|
2014-01-20 17:34:13 +00:00
|
|
|
extern int sysctl_hung_task_warnings;
|
2013-02-07 09:46:59 -06:00
|
|
|
extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
|
|
|
|
void __user *buffer,
|
|
|
|
size_t *lenp, loff_t *ppos);
|
|
|
|
#else
|
|
|
|
/* Avoid need for ifdefs elsewhere in the code */
|
|
|
|
enum { sysctl_hung_task_timeout_secs = 0 };
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Default maximum number of active map areas, this limits the number of vmas
|
|
|
|
* per mm struct. Users can overwrite this number by sysctl but there is a
|
|
|
|
* problem.
|
|
|
|
*
|
|
|
|
* When a program's coredump is generated as ELF format, a section is created
|
|
|
|
* per a vma. In ELF, the number of sections is represented in unsigned short.
|
|
|
|
* This means the number of sections should be smaller than 65535 at coredump.
|
|
|
|
* Because the kernel adds some informative sections to a image of program at
|
|
|
|
* generating coredump, we need some margin. The number of extra sections is
|
|
|
|
* 1-3 now and depends on arch. We use "5" as safe margin, here.
|
2014-06-04 16:08:20 -07:00
|
|
|
*
|
|
|
|
* ELF extended numbering allows more than 65535 sections, so 16-bit bound is
|
|
|
|
* not a hard limit any more. Although some userspace tools can be surprised by
|
|
|
|
* that.
|
2013-02-07 09:46:59 -06:00
|
|
|
*/
|
|
|
|
#define MAPCOUNT_ELF_CORE_MARGIN (5)
|
|
|
|
#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
|
|
|
|
|
|
|
|
extern int sysctl_max_map_count;
|
|
|
|
|
|
|
|
extern unsigned int sysctl_sched_latency;
|
|
|
|
extern unsigned int sysctl_sched_min_granularity;
|
|
|
|
extern unsigned int sysctl_sched_wakeup_granularity;
|
|
|
|
extern unsigned int sysctl_sched_child_runs_first;
|
2016-07-29 14:04:11 +01:00
|
|
|
extern unsigned int sysctl_sched_sync_hint_enable;
|
2016-07-14 09:57:29 +01:00
|
|
|
extern unsigned int sysctl_sched_cstate_aware;
|
2016-05-31 09:08:38 -07:00
|
|
|
#ifdef CONFIG_SCHED_WALT
|
|
|
|
extern unsigned int sysctl_sched_use_walt_cpu_util;
|
|
|
|
extern unsigned int sysctl_sched_use_walt_task_util;
|
|
|
|
extern unsigned int sysctl_sched_walt_init_task_load_pct;
|
2016-07-22 13:21:15 +01:00
|
|
|
extern unsigned int sysctl_sched_walt_cpu_high_irqload;
|
2016-05-31 09:08:38 -07:00
|
|
|
#endif
|
2013-02-07 09:46:59 -06:00
|
|
|
|
|
|
|
enum sched_tunable_scaling {
|
|
|
|
SCHED_TUNABLESCALING_NONE,
|
|
|
|
SCHED_TUNABLESCALING_LOG,
|
|
|
|
SCHED_TUNABLESCALING_LINEAR,
|
|
|
|
SCHED_TUNABLESCALING_END,
|
|
|
|
};
|
|
|
|
extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
|
|
|
|
|
|
|
|
extern unsigned int sysctl_numa_balancing_scan_delay;
|
|
|
|
extern unsigned int sysctl_numa_balancing_scan_period_min;
|
|
|
|
extern unsigned int sysctl_numa_balancing_scan_period_max;
|
|
|
|
extern unsigned int sysctl_numa_balancing_scan_size;
|
|
|
|
|
|
|
|
#ifdef CONFIG_SCHED_DEBUG
|
|
|
|
extern unsigned int sysctl_sched_migration_cost;
|
|
|
|
extern unsigned int sysctl_sched_nr_migrate;
|
|
|
|
extern unsigned int sysctl_sched_time_avg;
|
|
|
|
extern unsigned int sysctl_sched_shares_window;
|
|
|
|
|
|
|
|
int sched_proc_update_handler(struct ctl_table *table, int write,
|
|
|
|
void __user *buffer, size_t *length,
|
|
|
|
loff_t *ppos);
|
|
|
|
#endif
|
2013-02-07 09:47:04 -06:00
|
|
|
|
|
|
|
/*
|
|
|
|
* control realtime throttling:
|
|
|
|
*
|
|
|
|
* /proc/sys/kernel/sched_rt_period_us
|
|
|
|
* /proc/sys/kernel/sched_rt_runtime_us
|
|
|
|
*/
|
2013-02-07 09:46:59 -06:00
|
|
|
extern unsigned int sysctl_sched_rt_period;
|
|
|
|
extern int sysctl_sched_rt_runtime;
|
|
|
|
|
|
|
|
#ifdef CONFIG_CFS_BANDWIDTH
|
|
|
|
extern unsigned int sysctl_sched_cfs_bandwidth_slice;
|
|
|
|
#endif
|
|
|
|
|
2015-06-22 18:11:44 +01:00
|
|
|
#ifdef CONFIG_SCHED_TUNE
|
|
|
|
extern unsigned int sysctl_sched_cfs_boost;
|
|
|
|
int sysctl_sched_cfs_boost_handler(struct ctl_table *table, int write,
|
|
|
|
void __user *buffer, size_t *length,
|
|
|
|
loff_t *ppos);
|
|
|
|
static inline unsigned int get_sysctl_sched_cfs_boost(void)
|
|
|
|
{
|
|
|
|
return sysctl_sched_cfs_boost;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline unsigned int get_sysctl_sched_cfs_boost(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-02-07 09:46:59 -06:00
|
|
|
#ifdef CONFIG_SCHED_AUTOGROUP
|
|
|
|
extern unsigned int sysctl_sched_autogroup_enabled;
|
|
|
|
#endif
|
|
|
|
|
2013-02-07 09:47:04 -06:00
|
|
|
extern int sched_rr_timeslice;
|
|
|
|
|
|
|
|
extern int sched_rr_handler(struct ctl_table *table, int write,
|
|
|
|
void __user *buffer, size_t *lenp,
|
|
|
|
loff_t *ppos);
|
|
|
|
|
|
|
|
extern int sched_rt_handler(struct ctl_table *table, int write,
|
2013-02-07 09:46:59 -06:00
|
|
|
void __user *buffer, size_t *lenp,
|
|
|
|
loff_t *ppos);
|
|
|
|
|
2014-01-23 15:53:13 -08:00
|
|
|
extern int sysctl_numa_balancing(struct ctl_table *table, int write,
|
|
|
|
void __user *buffer, size_t *lenp,
|
|
|
|
loff_t *ppos);
|
|
|
|
|
2013-02-07 09:46:59 -06:00
|
|
|
#endif /* _SCHED_SYSCTL_H */
|