clocksource: add API to force re-selection of the best clocksource

As the best clocksource is not selected till core boot completion,
only periodic tick timer works and it increases jiffies by one at
every tick updates. If interrupt is disabled more than one tick(10ms),
timer interrupts are missed and jiffies can't be updated at every
10ms and it can be behind the real time. So add API to force re-
selection of the best clocksource among registered clocksources so
that the best clocksource can be selected whenever it is available.

Change-Id: I481de3cdf1df8f0e35ed10aee7ab3882bf7a35b3
Signed-off-by: Se Wang (Patrick) Oh <sewango@codeaurora.org>
Signed-off-by: Prasad Sodagudi <psodagud@codeaurora.org>
This commit is contained in:
Se Wang (Patrick) Oh 2015-05-29 14:57:05 -07:00 committed by Prasad Sodagudi
parent ded703d63e
commit 54547c9f75
2 changed files with 35 additions and 14 deletions

View file

@ -186,6 +186,7 @@ extern void clocksource_suspend(void);
extern void clocksource_resume(void);
extern struct clocksource * __init clocksource_default_clock(void);
extern void clocksource_mark_unstable(struct clocksource *cs);
extern void clocksource_select_force(void);
extern u64
clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles);

View file

@ -108,7 +108,7 @@ static int finished_booting;
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
static void clocksource_watchdog_work(struct work_struct *work);
static void clocksource_select(void);
static void clocksource_select(bool force);
static LIST_HEAD(watchdog_list);
static struct clocksource *watchdog;
@ -415,7 +415,7 @@ static int clocksource_watchdog_kthread(void *data)
{
mutex_lock(&clocksource_mutex);
if (__clocksource_watchdog_kthread())
clocksource_select();
clocksource_select(false);
mutex_unlock(&clocksource_mutex);
return 0;
}
@ -555,11 +555,12 @@ static inline void clocksource_update_max_deferment(struct clocksource *cs)
#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur)
static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur,
bool force)
{
struct clocksource *cs;
if (!finished_booting || list_empty(&clocksource_list))
if ((!finished_booting && !force) || list_empty(&clocksource_list))
return NULL;
/*
@ -577,13 +578,13 @@ static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur)
return NULL;
}
static void __clocksource_select(bool skipcur)
static void __clocksource_select(bool skipcur, bool force)
{
bool oneshot = tick_oneshot_mode_active();
struct clocksource *best, *cs;
/* Find the best suitable clocksource */
best = clocksource_find_best(oneshot, skipcur);
best = clocksource_find_best(oneshot, skipcur, force);
if (!best)
return;
@ -623,22 +624,40 @@ static void __clocksource_select(bool skipcur)
* Select the clocksource with the best rating, or the clocksource,
* which is selected by userspace override.
*/
static void clocksource_select(void)
static void clocksource_select(bool force)
{
__clocksource_select(false);
return __clocksource_select(false, force);
}
static void clocksource_select_fallback(void)
{
__clocksource_select(true);
__clocksource_select(true, false);
}
#else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */
static inline void clocksource_select(void) { }
static inline void clocksource_select(bool force) { }
static inline void clocksource_select_fallback(void) { }
#endif
/**
* clocksource_select_force - Force re-selection of the best clocksource
* among registered clocksources
*
* clocksource_select() can't select the best clocksource before
* calling clocksource_done_booting() and since clocksource_select()
* should be called with clocksource_mutex held, provide a new API
* can be called from other files to select best clockrouce irrespective
* of finished_booting flag.
*/
void clocksource_select_force(void)
{
mutex_lock(&clocksource_mutex);
clocksource_select(true);
mutex_unlock(&clocksource_mutex);
}
/*
* clocksource_done_booting - Called near the end of core bootup
*
@ -655,7 +674,7 @@ static int __init clocksource_done_booting(void)
* Run the watchdog first to eliminate unstable clock sources
*/
__clocksource_watchdog_kthread();
clocksource_select();
clocksource_select(false);
mutex_unlock(&clocksource_mutex);
return 0;
}
@ -744,6 +763,7 @@ void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq
}
EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
/**
* __clocksource_register_scale - Used to install new clocksources
* @cs: clocksource to be registered
@ -765,7 +785,7 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
mutex_lock(&clocksource_mutex);
clocksource_enqueue(cs);
clocksource_enqueue_watchdog(cs);
clocksource_select();
clocksource_select(false);
clocksource_select_watchdog(false);
mutex_unlock(&clocksource_mutex);
return 0;
@ -788,7 +808,7 @@ void clocksource_change_rating(struct clocksource *cs, int rating)
{
mutex_lock(&clocksource_mutex);
__clocksource_change_rating(cs, rating);
clocksource_select();
clocksource_select(false);
clocksource_select_watchdog(false);
mutex_unlock(&clocksource_mutex);
}
@ -892,7 +912,7 @@ static ssize_t sysfs_override_clocksource(struct device *dev,
ret = sysfs_get_uname(buf, override_name, count);
if (ret >= 0)
clocksource_select();
clocksource_select(false);
mutex_unlock(&clocksource_mutex);