rcu: Switch synchronize_sched_expedited() to IPI

This commit switches synchronize_sched_expedited() from stop_one_cpu_nowait()
to smp_call_function_single(), thus moving from an IPI and a pair of
context switches to an IPI and a single pass through the scheduler.
Of course, if the scheduler actually does decide to switch to a different
task, there will still be a pair of context switches, but there would
likely have been a pair of context switches anyway, just a bit later.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
Paul E. McKenney 2015-08-06 16:50:39 -07:00
parent 5b74c45890
commit 6587a23b6b
2 changed files with 20 additions and 15 deletions

View file

@ -161,6 +161,8 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
static void invoke_rcu_core(void); static void invoke_rcu_core(void);
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
static void rcu_report_exp_rdp(struct rcu_state *rsp,
struct rcu_data *rdp, bool wake);
/* rcuc/rcub kthread realtime priority */ /* rcuc/rcub kthread realtime priority */
#ifdef CONFIG_RCU_KTHREAD_PRIO #ifdef CONFIG_RCU_KTHREAD_PRIO
@ -250,6 +252,12 @@ void rcu_sched_qs(void)
__this_cpu_read(rcu_sched_data.gpnum), __this_cpu_read(rcu_sched_data.gpnum),
TPS("cpuqs")); TPS("cpuqs"));
__this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false); __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
if (__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) {
__this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
rcu_report_exp_rdp(&rcu_sched_state,
this_cpu_ptr(&rcu_sched_data),
true);
}
} }
} }
@ -3555,8 +3563,8 @@ static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
* Report expedited quiescent state for specified rcu_data (CPU). * Report expedited quiescent state for specified rcu_data (CPU).
* Caller must hold the root rcu_node's exp_funnel_mutex. * Caller must hold the root rcu_node's exp_funnel_mutex.
*/ */
static void __maybe_unused rcu_report_exp_rdp(struct rcu_state *rsp, static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
struct rcu_data *rdp, bool wake) bool wake)
{ {
rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake); rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake);
} }
@ -3637,14 +3645,10 @@ static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
} }
/* Invoked on each online non-idle CPU for expedited quiescent state. */ /* Invoked on each online non-idle CPU for expedited quiescent state. */
static int synchronize_sched_expedited_cpu_stop(void *data) static void synchronize_sched_expedited_cpu_stop(void *data)
{ {
struct rcu_data *rdp = data; __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
struct rcu_state *rsp = rdp->rsp; resched_cpu(smp_processor_id());
/* Report the quiescent state. */
rcu_report_exp_rdp(rsp, rdp, true);
return 0;
} }
/* /*
@ -3659,6 +3663,7 @@ static void sync_sched_exp_select_cpus(struct rcu_state *rsp)
unsigned long mask_ofl_test; unsigned long mask_ofl_test;
unsigned long mask_ofl_ipi; unsigned long mask_ofl_ipi;
struct rcu_data *rdp; struct rcu_data *rdp;
int ret;
struct rcu_node *rnp; struct rcu_node *rnp;
sync_exp_reset_tree(rsp); sync_exp_reset_tree(rsp);
@ -3694,9 +3699,9 @@ static void sync_sched_exp_select_cpus(struct rcu_state *rsp)
if (!(mask_ofl_ipi & mask)) if (!(mask_ofl_ipi & mask))
continue; continue;
rdp = per_cpu_ptr(rsp->rda, cpu); rdp = per_cpu_ptr(rsp->rda, cpu);
stop_one_cpu_nowait(cpu, synchronize_sched_expedited_cpu_stop, ret = smp_call_function_single(cpu, synchronize_sched_expedited_cpu_stop, NULL, 0);
rdp, &rdp->exp_stop_work); if (!ret)
mask_ofl_ipi &= ~mask; mask_ofl_ipi &= ~mask;
} }
/* Report quiescent states for those that went offline. */ /* Report quiescent states for those that went offline. */
mask_ofl_test |= mask_ofl_ipi; mask_ofl_test |= mask_ofl_ipi;
@ -4201,6 +4206,9 @@ int rcu_cpu_notify(struct notifier_block *self,
rcu_cleanup_dying_cpu(rsp); rcu_cleanup_dying_cpu(rsp);
break; break;
case CPU_DYING_IDLE: case CPU_DYING_IDLE:
/* QS for any half-done expedited RCU-sched GP. */
rcu_sched_qs();
for_each_rcu_flavor(rsp) { for_each_rcu_flavor(rsp) {
rcu_cleanup_dying_idle_cpu(cpu, rsp); rcu_cleanup_dying_idle_cpu(cpu, rsp);
} }

View file

@ -324,9 +324,6 @@ struct rcu_data {
/* ticks this CPU has handled */ /* ticks this CPU has handled */
/* during and after the last grace */ /* during and after the last grace */
/* period it is aware of. */ /* period it is aware of. */
struct cpu_stop_work exp_stop_work;
/* Expedited grace-period control */
/* for CPU stopping. */
/* 2) batch handling */ /* 2) batch handling */
/* /*