hrtimer: Don't drop the base lock when migration during isolation

The current code drops the base lock and wait for the running
hrtimer's expiry event to be processed on the isolated CPU.
This leaves a window, where the running hrtimer can migrate
to a different CPU or even get freed. The pinned hrtimers that
are maintained in a temporarily list also can get freed while
the lock is dropped. The only reason for waiting for the running
hrtimer is to make sure that this hrtimer is migrated away from
the isolated CPU. This is a problem only if this hrtimer gets
rearmed from it's callback. As the possibility of this race is
very rare, it is better to have this limitation instead of fixing
the above mentioned bugs with more intrusive changes.

Change-Id: I14ba67cacb321d8b561195935592bb9979996a27
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
This commit is contained in:
Pavankumar Kondeti 2017-05-16 15:29:04 +05:30
parent 60be71604a
commit 3b8631c0d5

View file

@ -49,7 +49,6 @@
#include <linux/sched/deadline.h> #include <linux/sched/deadline.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/freezer.h> #include <linux/freezer.h>
#include <linux/delay.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
@ -1631,42 +1630,22 @@ static void init_hrtimers_cpu(int cpu)
} }
#if defined(CONFIG_HOTPLUG_CPU) #if defined(CONFIG_HOTPLUG_CPU)
static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base, static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
struct hrtimer_cpu_base *new_base, struct hrtimer_clock_base *new_base,
unsigned int i,
bool wait,
bool remove_pinned) bool remove_pinned)
{ {
struct hrtimer *timer; struct hrtimer *timer;
struct timerqueue_node *node; struct timerqueue_node *node;
struct timerqueue_head pinned; struct timerqueue_head pinned;
int is_pinned; int is_pinned;
struct hrtimer_clock_base *old_c_base = &old_base->clock_base[i]; bool is_hotplug = !cpu_online(old_base->cpu_base->cpu);
struct hrtimer_clock_base *new_c_base = &new_base->clock_base[i];
timerqueue_init_head(&pinned); timerqueue_init_head(&pinned);
while ((node = timerqueue_getnext(&old_c_base->active))) { while ((node = timerqueue_getnext(&old_base->active))) {
timer = container_of(node, struct hrtimer, node); timer = container_of(node, struct hrtimer, node);
if (wait) { if (is_hotplug)
/* Ensure timers are done running before continuing */
while (hrtimer_callback_running(timer)) {
raw_spin_unlock(&old_base->lock);
raw_spin_unlock(&new_base->lock);
cpu_relax();
/*
* cpu_relax may just be a barrier. Grant the
* run_hrtimer_list code some time to obtain the
* spinlock.
*/
udelay(2);
raw_spin_lock(&new_base->lock);
raw_spin_lock_nested(&old_base->lock,
SINGLE_DEPTH_NESTING);
}
} else {
BUG_ON(hrtimer_callback_running(timer)); BUG_ON(hrtimer_callback_running(timer));
}
debug_deactivate(timer); debug_deactivate(timer);
/* /*
@ -1674,7 +1653,7 @@ static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
* timer could be seen as !active and just vanish away * timer could be seen as !active and just vanish away
* under us on another CPU * under us on another CPU
*/ */
__remove_hrtimer(timer, old_c_base, HRTIMER_STATE_ENQUEUED, 0); __remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0);
is_pinned = timer->state & HRTIMER_STATE_PINNED; is_pinned = timer->state & HRTIMER_STATE_PINNED;
if (!remove_pinned && is_pinned) { if (!remove_pinned && is_pinned) {
@ -1682,7 +1661,7 @@ static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
continue; continue;
} }
timer->base = new_c_base; timer->base = new_base;
/* /*
* Enqueue the timers on the new cpu. This does not * Enqueue the timers on the new cpu. This does not
* reprogram the event device in case the timer * reprogram the event device in case the timer
@ -1691,7 +1670,7 @@ static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
* sort out already expired timers and reprogram the * sort out already expired timers and reprogram the
* event device. * event device.
*/ */
enqueue_hrtimer(timer, new_c_base); enqueue_hrtimer(timer, new_base);
} }
/* Re-queue pinned timers for non-hotplug usecase */ /* Re-queue pinned timers for non-hotplug usecase */
@ -1699,11 +1678,11 @@ static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
timer = container_of(node, struct hrtimer, node); timer = container_of(node, struct hrtimer, node);
timerqueue_del(&pinned, &timer->node); timerqueue_del(&pinned, &timer->node);
enqueue_hrtimer(timer, old_c_base); enqueue_hrtimer(timer, old_base);
} }
} }
static void __migrate_hrtimers(int scpu, bool wait, bool remove_pinned) static void __migrate_hrtimers(int scpu, bool remove_pinned)
{ {
struct hrtimer_cpu_base *old_base, *new_base; struct hrtimer_cpu_base *old_base, *new_base;
unsigned long flags; unsigned long flags;
@ -1720,8 +1699,8 @@ static void __migrate_hrtimers(int scpu, bool wait, bool remove_pinned)
raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
migrate_hrtimer_list(old_base, new_base, i, wait, migrate_hrtimer_list(&old_base->clock_base[i],
remove_pinned); &new_base->clock_base[i], remove_pinned);
} }
raw_spin_unlock(&old_base->lock); raw_spin_unlock(&old_base->lock);
@ -1737,12 +1716,12 @@ static void migrate_hrtimers(int scpu)
BUG_ON(cpu_online(scpu)); BUG_ON(cpu_online(scpu));
tick_cancel_sched_timer(scpu); tick_cancel_sched_timer(scpu);
__migrate_hrtimers(scpu, false, true); __migrate_hrtimers(scpu, true);
} }
void hrtimer_quiesce_cpu(void *cpup) void hrtimer_quiesce_cpu(void *cpup)
{ {
__migrate_hrtimers(*(int *)cpup, true, false); __migrate_hrtimers(*(int *)cpup, false);
} }
#endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_HOTPLUG_CPU */