timer: Don't wait for running timers when migrating during isolation

A CPU that is isolated needs to have its timers migrated off to
another CPU. If while migrating timers, there is a running
timer, acquiring the timer base lock after marking a CPU as
isolated will ensure that:

1) No more timers can be queued on to the isolated CPU, and
2) A running timer will finish execution on the to-be-isolated
   CPU, and so will any just expired timers since they're all
   taken off of the CPU's tvec1 in one go while the base lock
   is held.

Therefore there is no apparent reason to wait for the expired
timers to finish execution, and isolation can proceed to migrate
non-expired timers even when the expired ones are running
concurrently.

While we're here, also add a delay to the wait-loop inside
migrate_hrtimer_list to allow for store-exclusive fairness
when run_hrtimer is attempting to grab the hrtimer base
lock.

Change-Id: Ib697476c93c60e3d213aaa8fff0a2bcc2985bfce
Signed-off-by: Vikram Mulukutla <markivx@codeaurora.org>
This commit is contained in:
Vikram Mulukutla 2016-11-08 15:21:41 -08:00
parent 85d7e134cc
commit 4142e30898
2 changed files with 17 additions and 14 deletions

View file

@ -49,6 +49,7 @@
#include <linux/sched/deadline.h>
#include <linux/timer.h>
#include <linux/freezer.h>
#include <linux/delay.h>
#include <asm/uaccess.h>
@ -1648,6 +1649,12 @@ static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
raw_spin_unlock(&old_base->lock);
raw_spin_unlock(&new_base->lock);
cpu_relax();
/*
* cpu_relax may just be a barrier. Grant the
* run_hrtimer_list code some time to obtain the
* spinlock.
*/
udelay(2);
raw_spin_lock(&new_base->lock);
raw_spin_lock_nested(&old_base->lock,
SINGLE_DEPTH_NESTING);

View file

@ -1640,7 +1640,7 @@ static void migrate_timer_list(struct tvec_base *new_base,
}
}
static void __migrate_timers(int cpu, bool wait, bool remove_pinned)
static void __migrate_timers(int cpu, bool remove_pinned)
{
struct tvec_base *old_base;
struct tvec_base *new_base;
@ -1656,18 +1656,14 @@ static void __migrate_timers(int cpu, bool wait, bool remove_pinned)
spin_lock_irqsave(&new_base->lock, flags);
spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
if (wait) {
/* Ensure timers are done running before continuing */
while (old_base->running_timer) {
spin_unlock(&old_base->lock);
spin_unlock_irqrestore(&new_base->lock, flags);
cpu_relax();
spin_lock_irqsave(&new_base->lock, flags);
spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
}
} else {
/*
* If we're in the hotplug path, kill the system if there's a running
* timer. It's ok to have a running timer in the isolation case - the
* currently running or just expired timers are off of the timer wheel
* and so everything else can be migrated off.
*/
if (!cpu_online(cpu))
BUG_ON(old_base->running_timer);
}
for (i = 0; i < TVR_SIZE; i++)
migrate_timer_list(new_base, old_base->tv1.vec + i,
@ -1692,12 +1688,12 @@ static void __migrate_timers(int cpu, bool wait, bool remove_pinned)
static void migrate_timers(int cpu)
{
BUG_ON(cpu_online(cpu));
__migrate_timers(cpu, false, true);
__migrate_timers(cpu, true);
}
void timer_quiesce_cpu(void *cpup)
{
__migrate_timers(*(int *)cpup, true, false);
__migrate_timers(*(int *)cpup, false);
}
static int timer_cpu_notify(struct notifier_block *self,