softirq: defer softirq processing to ksoftirqd if CPU is busy with RT
Defer the softirq processing to ksoftirqd if a RT task is running or queued on the current CPU. This complements the RT task placement algorithm which tries to find a CPU that is not currently busy with softirqs. Currently NET_TX, NET_RX, BLOCK and TASKLET softirqs are only deferred as they can potentially run for long time. Change-Id: Id7665244af6bbd5a96d9e591cf26154e9eaa860c Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
This commit is contained in:
parent
ca652b3d74
commit
a66b3eb5af
3 changed files with 20 additions and 1 deletions
|
@ -2481,6 +2481,7 @@ extern void do_set_cpus_allowed(struct task_struct *p,
|
||||||
|
|
||||||
extern int set_cpus_allowed_ptr(struct task_struct *p,
|
extern int set_cpus_allowed_ptr(struct task_struct *p,
|
||||||
const struct cpumask *new_mask);
|
const struct cpumask *new_mask);
|
||||||
|
extern bool cpupri_check_rt(void);
|
||||||
#else
|
#else
|
||||||
static inline void do_set_cpus_allowed(struct task_struct *p,
|
static inline void do_set_cpus_allowed(struct task_struct *p,
|
||||||
const struct cpumask *new_mask)
|
const struct cpumask *new_mask)
|
||||||
|
@ -2493,6 +2494,10 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
static inline bool cpupri_check_rt(void)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct sched_load {
|
struct sched_load {
|
||||||
|
|
|
@ -279,3 +279,14 @@ void cpupri_cleanup(struct cpupri *cp)
|
||||||
for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
|
for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
|
||||||
free_cpumask_var(cp->pri_to_cpu[i].mask);
|
free_cpumask_var(cp->pri_to_cpu[i].mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* cpupri_check_rt - check if CPU has a RT task
|
||||||
|
* should be called from rcu-sched read section.
|
||||||
|
*/
|
||||||
|
bool cpupri_check_rt(void)
|
||||||
|
{
|
||||||
|
int cpu = raw_smp_processor_id();
|
||||||
|
|
||||||
|
return cpu_rq(cpu)->rd->cpupri.cpu_to_pri[cpu] > CPUPRI_NORMAL;
|
||||||
|
}
|
||||||
|
|
|
@ -234,6 +234,8 @@ static inline bool lockdep_softirq_start(void) { return false; }
|
||||||
static inline void lockdep_softirq_end(bool in_hardirq) { }
|
static inline void lockdep_softirq_end(bool in_hardirq) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define long_softirq_pending() (local_softirq_pending() & LONG_SOFTIRQ_MASK)
|
||||||
|
#define defer_for_rt() (long_softirq_pending() && cpupri_check_rt())
|
||||||
asmlinkage __visible void __do_softirq(void)
|
asmlinkage __visible void __do_softirq(void)
|
||||||
{
|
{
|
||||||
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
|
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
|
||||||
|
@ -297,6 +299,7 @@ restart:
|
||||||
pending = local_softirq_pending();
|
pending = local_softirq_pending();
|
||||||
if (pending) {
|
if (pending) {
|
||||||
if (time_before(jiffies, end) && !need_resched() &&
|
if (time_before(jiffies, end) && !need_resched() &&
|
||||||
|
!defer_for_rt() &&
|
||||||
--max_restart)
|
--max_restart)
|
||||||
goto restart;
|
goto restart;
|
||||||
|
|
||||||
|
@ -349,7 +352,7 @@ void irq_enter(void)
|
||||||
|
|
||||||
static inline void invoke_softirq(void)
|
static inline void invoke_softirq(void)
|
||||||
{
|
{
|
||||||
if (!force_irqthreads) {
|
if (!force_irqthreads && !defer_for_rt()) {
|
||||||
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
|
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
|
||||||
/*
|
/*
|
||||||
* We can safely execute softirq on the current stack if
|
* We can safely execute softirq on the current stack if
|
||||||
|
|
Loading…
Add table
Reference in a new issue