to the ftrace function callback infrastructure. It's introducing a way to allow different functions to call directly different trampolines instead of all calling the same "mcount" one. The only user of this for now is the function graph tracer, which always had a different trampoline, but the function tracer trampoline was called and did basically nothing, and then the function graph tracer trampoline was called. The difference now, is that the function graph tracer trampoline can be called directly if a function is only being traced by the function graph trampoline. If function tracing is also happening on the same function, the old way is still done. The accounting for this takes up more memory when function graph tracing is activated, as it needs to keep track of which functions it uses. I have a new way that wont take as much memory, but it's not ready yet for this merge window, and will have to wait for the next one. Another big change was the removal of the ftrace_start/stop() calls that were used by the suspend/resume code that stopped function tracing when entering into suspend and resume paths. The stop of ftrace was done because there was some function that would crash the system if one called smp_processor_id()! The stop/start was a big hammer to solve the issue at the time, which was when ftrace was first introduced into Linux. Now ftrace has better infrastructure to debug such issues, and I found the problem function and labeled it with "notrace" and function tracing can now safely be activated all the way down into the guts of suspend and resume. Other changes include clean ups of uprobe code. Clean up of the trace_seq() code. And other various small fixes and clean ups to ftrace and tracing. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJT35zXAAoJEKQekfcNnQGuOz0H/38zqM0nLFhrgvz3EPk2UOjn xqpX8qyb2V7TJZL+IqeXU2a5cQZl5ba0D4WtBGpxbTae3CJYiuQ87iKUNFoH0om5 FDpn80igb368k8V3qRdRsziKVCCf0XBd/NkHJXc0ZkfXGyzB2Ga4bBxALxp2gj9y bnO+vKo6+tWYKG4hyQb4P3LRXUrK8/LWEsPr39cH2QH1Rdj69Lx9CgrCdUVJmwcb Bj8hEiLXL/RYCFNn79A3wNTUvW0rG/AOIf4SLqXtasSRZ0ToaU0ZyDnrNv+0Ol47 rX8tSk+LfXchL9hpIvjCf1vlAYq3pO02favteR/jip3lx/dTjEDE4RJ9qtJzZ4Q= =fwQY -----END PGP SIGNATURE----- Merge tag 'trace-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace Pull tracing updates from Steven Rostedt: "This pull request has a lot of work done. The main thing is the changes to the ftrace function callback infrastructure. It's introducing a way to allow different functions to call directly different trampolines instead of all calling the same "mcount" one. The only user of this for now is the function graph tracer, which always had a different trampoline, but the function tracer trampoline was called and did basically nothing, and then the function graph tracer trampoline was called. The difference now, is that the function graph tracer trampoline can be called directly if a function is only being traced by the function graph trampoline. If function tracing is also happening on the same function, the old way is still done. The accounting for this takes up more memory when function graph tracing is activated, as it needs to keep track of which functions it uses. I have a new way that wont take as much memory, but it's not ready yet for this merge window, and will have to wait for the next one. Another big change was the removal of the ftrace_start/stop() calls that were used by the suspend/resume code that stopped function tracing when entering into suspend and resume paths. The stop of ftrace was done because there was some function that would crash the system if one called smp_processor_id()! The stop/start was a big hammer to solve the issue at the time, which was when ftrace was first introduced into Linux. Now ftrace has better infrastructure to debug such issues, and I found the problem function and labeled it with "notrace" and function tracing can now safely be activated all the way down into the guts of suspend and resume Other changes include clean ups of uprobe code, clean up of the trace_seq() code, and other various small fixes and clean ups to ftrace and tracing" * tag 'trace-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (57 commits) ftrace: Add warning if tramp hash does not match nr_trampolines ftrace: Fix trampoline hash update check on rec->flags ring-buffer: Use rb_page_size() instead of open coded head_page size ftrace: Rename ftrace_ops field from trampolines to nr_trampolines tracing: Convert local function_graph functions to static ftrace: Do not copy old hash when resetting tracing: let user specify tracing_thresh after selecting function_graph ring-buffer: Always run per-cpu ring buffer resize with schedule_work_on() tracing: Remove function_trace_stop and HAVE_FUNCTION_TRACE_MCOUNT_TEST s390/ftrace: remove check of obsolete variable function_trace_stop arm64, ftrace: Remove check of obsolete variable function_trace_stop Blackfin: ftrace: Remove check of obsolete variable function_trace_stop metag: ftrace: Remove check of obsolete variable function_trace_stop microblaze: ftrace: Remove check of obsolete variable function_trace_stop MIPS: ftrace: Remove check of obsolete variable function_trace_stop parisc: ftrace: Remove check of obsolete variable function_trace_stop sh: ftrace: Remove check of obsolete variable function_trace_stop sparc64,ftrace: Remove check of obsolete variable function_trace_stop tile: ftrace: Remove check of obsolete variable function_trace_stop ftrace: x86: Remove check of obsolete variable function_trace_stop ...
441 lines
11 KiB
C
441 lines
11 KiB
C
/*
|
|
* kernel/power/suspend.c - Suspend to RAM and standby functionality.
|
|
*
|
|
* Copyright (c) 2003 Patrick Mochel
|
|
* Copyright (c) 2003 Open Source Development Lab
|
|
* Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
|
|
*
|
|
* This file is released under the GPLv2.
|
|
*/
|
|
|
|
#include <linux/string.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/init.h>
|
|
#include <linux/console.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/cpuidle.h>
|
|
#include <linux/syscalls.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/io.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/list.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/export.h>
|
|
#include <linux/suspend.h>
|
|
#include <linux/syscore_ops.h>
|
|
#include <linux/ftrace.h>
|
|
#include <trace/events/power.h>
|
|
#include <linux/compiler.h>
|
|
|
|
#include "power.h"
|
|
|
|
struct pm_sleep_state pm_states[PM_SUSPEND_MAX] = {
|
|
[PM_SUSPEND_FREEZE] = { .label = "freeze", .state = PM_SUSPEND_FREEZE },
|
|
[PM_SUSPEND_STANDBY] = { .label = "standby", },
|
|
[PM_SUSPEND_MEM] = { .label = "mem", },
|
|
};
|
|
|
|
static const struct platform_suspend_ops *suspend_ops;
|
|
static const struct platform_freeze_ops *freeze_ops;
|
|
|
|
static bool need_suspend_ops(suspend_state_t state)
|
|
{
|
|
return state > PM_SUSPEND_FREEZE;
|
|
}
|
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head);
|
|
static bool suspend_freeze_wake;
|
|
|
|
void freeze_set_ops(const struct platform_freeze_ops *ops)
|
|
{
|
|
lock_system_sleep();
|
|
freeze_ops = ops;
|
|
unlock_system_sleep();
|
|
}
|
|
|
|
static void freeze_begin(void)
|
|
{
|
|
suspend_freeze_wake = false;
|
|
}
|
|
|
|
static void freeze_enter(void)
|
|
{
|
|
cpuidle_use_deepest_state(true);
|
|
cpuidle_resume();
|
|
wait_event(suspend_freeze_wait_head, suspend_freeze_wake);
|
|
cpuidle_pause();
|
|
cpuidle_use_deepest_state(false);
|
|
}
|
|
|
|
void freeze_wake(void)
|
|
{
|
|
suspend_freeze_wake = true;
|
|
wake_up(&suspend_freeze_wait_head);
|
|
}
|
|
EXPORT_SYMBOL_GPL(freeze_wake);
|
|
|
|
static bool valid_state(suspend_state_t state)
|
|
{
|
|
/*
|
|
* PM_SUSPEND_STANDBY and PM_SUSPEND_MEM states need low level
|
|
* support and need to be valid to the low level
|
|
* implementation, no valid callback implies that none are valid.
|
|
*/
|
|
return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
|
|
}
|
|
|
|
/*
|
|
* If this is set, the "mem" label always corresponds to the deepest sleep state
|
|
* available, the "standby" label corresponds to the second deepest sleep state
|
|
* available (if any), and the "freeze" label corresponds to the remaining
|
|
* available sleep state (if there is one).
|
|
*/
|
|
static bool relative_states;
|
|
|
|
static int __init sleep_states_setup(char *str)
|
|
{
|
|
relative_states = !strncmp(str, "1", 1);
|
|
if (relative_states) {
|
|
pm_states[PM_SUSPEND_MEM].state = PM_SUSPEND_FREEZE;
|
|
pm_states[PM_SUSPEND_FREEZE].state = 0;
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
__setup("relative_sleep_states=", sleep_states_setup);
|
|
|
|
/**
|
|
* suspend_set_ops - Set the global suspend method table.
|
|
* @ops: Suspend operations to use.
|
|
*/
|
|
void suspend_set_ops(const struct platform_suspend_ops *ops)
|
|
{
|
|
suspend_state_t i;
|
|
int j = PM_SUSPEND_MAX - 1;
|
|
|
|
lock_system_sleep();
|
|
|
|
suspend_ops = ops;
|
|
for (i = PM_SUSPEND_MEM; i >= PM_SUSPEND_STANDBY; i--)
|
|
if (valid_state(i))
|
|
pm_states[j--].state = i;
|
|
else if (!relative_states)
|
|
pm_states[j--].state = 0;
|
|
|
|
pm_states[j--].state = PM_SUSPEND_FREEZE;
|
|
while (j >= PM_SUSPEND_MIN)
|
|
pm_states[j--].state = 0;
|
|
|
|
unlock_system_sleep();
|
|
}
|
|
EXPORT_SYMBOL_GPL(suspend_set_ops);
|
|
|
|
/**
|
|
* suspend_valid_only_mem - Generic memory-only valid callback.
|
|
*
|
|
* Platform drivers that implement mem suspend only and only need to check for
|
|
* that in their .valid() callback can use this instead of rolling their own
|
|
* .valid() callback.
|
|
*/
|
|
int suspend_valid_only_mem(suspend_state_t state)
|
|
{
|
|
return state == PM_SUSPEND_MEM;
|
|
}
|
|
EXPORT_SYMBOL_GPL(suspend_valid_only_mem);
|
|
|
|
static int suspend_test(int level)
|
|
{
|
|
#ifdef CONFIG_PM_DEBUG
|
|
if (pm_test_level == level) {
|
|
printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
|
|
mdelay(5000);
|
|
return 1;
|
|
}
|
|
#endif /* !CONFIG_PM_DEBUG */
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* suspend_prepare - Prepare for entering system sleep state.
|
|
*
|
|
* Common code run for every system sleep state that can be entered (except for
|
|
* hibernation). Run suspend notifiers, allocate the "suspend" console and
|
|
* freeze processes.
|
|
*/
|
|
static int suspend_prepare(suspend_state_t state)
|
|
{
|
|
int error;
|
|
|
|
if (need_suspend_ops(state) && (!suspend_ops || !suspend_ops->enter))
|
|
return -EPERM;
|
|
|
|
pm_prepare_console();
|
|
|
|
error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
|
|
if (error)
|
|
goto Finish;
|
|
|
|
trace_suspend_resume(TPS("freeze_processes"), 0, true);
|
|
error = suspend_freeze_processes();
|
|
trace_suspend_resume(TPS("freeze_processes"), 0, false);
|
|
if (!error)
|
|
return 0;
|
|
|
|
suspend_stats.failed_freeze++;
|
|
dpm_save_failed_step(SUSPEND_FREEZE);
|
|
Finish:
|
|
pm_notifier_call_chain(PM_POST_SUSPEND);
|
|
pm_restore_console();
|
|
return error;
|
|
}
|
|
|
|
/* default implementation */
|
|
void __weak arch_suspend_disable_irqs(void)
|
|
{
|
|
local_irq_disable();
|
|
}
|
|
|
|
/* default implementation */
|
|
void __weak arch_suspend_enable_irqs(void)
|
|
{
|
|
local_irq_enable();
|
|
}
|
|
|
|
/**
|
|
* suspend_enter - Make the system enter the given sleep state.
|
|
* @state: System sleep state to enter.
|
|
* @wakeup: Returns information that the sleep state should not be re-entered.
|
|
*
|
|
* This function should be called after devices have been suspended.
|
|
*/
|
|
static int suspend_enter(suspend_state_t state, bool *wakeup)
|
|
{
|
|
int error;
|
|
|
|
if (need_suspend_ops(state) && suspend_ops->prepare) {
|
|
error = suspend_ops->prepare();
|
|
if (error)
|
|
goto Platform_finish;
|
|
}
|
|
|
|
error = dpm_suspend_end(PMSG_SUSPEND);
|
|
if (error) {
|
|
printk(KERN_ERR "PM: Some devices failed to power down\n");
|
|
goto Platform_finish;
|
|
}
|
|
|
|
if (need_suspend_ops(state) && suspend_ops->prepare_late) {
|
|
error = suspend_ops->prepare_late();
|
|
if (error)
|
|
goto Platform_wake;
|
|
}
|
|
|
|
if (suspend_test(TEST_PLATFORM))
|
|
goto Platform_wake;
|
|
|
|
/*
|
|
* PM_SUSPEND_FREEZE equals
|
|
* frozen processes + suspended devices + idle processors.
|
|
* Thus we should invoke freeze_enter() soon after
|
|
* all the devices are suspended.
|
|
*/
|
|
if (state == PM_SUSPEND_FREEZE) {
|
|
trace_suspend_resume(TPS("machine_suspend"), state, true);
|
|
freeze_enter();
|
|
trace_suspend_resume(TPS("machine_suspend"), state, false);
|
|
goto Platform_wake;
|
|
}
|
|
|
|
error = disable_nonboot_cpus();
|
|
if (error || suspend_test(TEST_CPUS))
|
|
goto Enable_cpus;
|
|
|
|
arch_suspend_disable_irqs();
|
|
BUG_ON(!irqs_disabled());
|
|
|
|
error = syscore_suspend();
|
|
if (!error) {
|
|
*wakeup = pm_wakeup_pending();
|
|
if (!(suspend_test(TEST_CORE) || *wakeup)) {
|
|
trace_suspend_resume(TPS("machine_suspend"),
|
|
state, true);
|
|
error = suspend_ops->enter(state);
|
|
trace_suspend_resume(TPS("machine_suspend"),
|
|
state, false);
|
|
events_check_enabled = false;
|
|
}
|
|
syscore_resume();
|
|
}
|
|
|
|
arch_suspend_enable_irqs();
|
|
BUG_ON(irqs_disabled());
|
|
|
|
Enable_cpus:
|
|
enable_nonboot_cpus();
|
|
|
|
Platform_wake:
|
|
if (need_suspend_ops(state) && suspend_ops->wake)
|
|
suspend_ops->wake();
|
|
|
|
dpm_resume_start(PMSG_RESUME);
|
|
|
|
Platform_finish:
|
|
if (need_suspend_ops(state) && suspend_ops->finish)
|
|
suspend_ops->finish();
|
|
|
|
return error;
|
|
}
|
|
|
|
/**
|
|
* suspend_devices_and_enter - Suspend devices and enter system sleep state.
|
|
* @state: System sleep state to enter.
|
|
*/
|
|
int suspend_devices_and_enter(suspend_state_t state)
|
|
{
|
|
int error;
|
|
bool wakeup = false;
|
|
|
|
if (need_suspend_ops(state) && !suspend_ops)
|
|
return -ENOSYS;
|
|
|
|
if (need_suspend_ops(state) && suspend_ops->begin) {
|
|
error = suspend_ops->begin(state);
|
|
if (error)
|
|
goto Close;
|
|
} else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->begin) {
|
|
error = freeze_ops->begin();
|
|
if (error)
|
|
goto Close;
|
|
}
|
|
suspend_console();
|
|
suspend_test_start();
|
|
error = dpm_suspend_start(PMSG_SUSPEND);
|
|
if (error) {
|
|
pr_err("PM: Some devices failed to suspend, or early wake event detected\n");
|
|
goto Recover_platform;
|
|
}
|
|
suspend_test_finish("suspend devices");
|
|
if (suspend_test(TEST_DEVICES))
|
|
goto Recover_platform;
|
|
|
|
do {
|
|
error = suspend_enter(state, &wakeup);
|
|
} while (!error && !wakeup && need_suspend_ops(state)
|
|
&& suspend_ops->suspend_again && suspend_ops->suspend_again());
|
|
|
|
Resume_devices:
|
|
suspend_test_start();
|
|
dpm_resume_end(PMSG_RESUME);
|
|
suspend_test_finish("resume devices");
|
|
resume_console();
|
|
Close:
|
|
if (need_suspend_ops(state) && suspend_ops->end)
|
|
suspend_ops->end();
|
|
else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end)
|
|
freeze_ops->end();
|
|
|
|
return error;
|
|
|
|
Recover_platform:
|
|
if (need_suspend_ops(state) && suspend_ops->recover)
|
|
suspend_ops->recover();
|
|
goto Resume_devices;
|
|
}
|
|
|
|
/**
|
|
* suspend_finish - Clean up before finishing the suspend sequence.
|
|
*
|
|
* Call platform code to clean up, restart processes, and free the console that
|
|
* we've allocated. This routine is not called for hibernation.
|
|
*/
|
|
static void suspend_finish(void)
|
|
{
|
|
suspend_thaw_processes();
|
|
pm_notifier_call_chain(PM_POST_SUSPEND);
|
|
pm_restore_console();
|
|
}
|
|
|
|
/**
|
|
* enter_state - Do common work needed to enter system sleep state.
|
|
* @state: System sleep state to enter.
|
|
*
|
|
* Make sure that no one else is trying to put the system into a sleep state.
|
|
* Fail if that's not the case. Otherwise, prepare for system suspend, make the
|
|
* system enter the given sleep state and clean up after wakeup.
|
|
*/
|
|
static int enter_state(suspend_state_t state)
|
|
{
|
|
int error;
|
|
|
|
trace_suspend_resume(TPS("suspend_enter"), state, true);
|
|
if (state == PM_SUSPEND_FREEZE) {
|
|
#ifdef CONFIG_PM_DEBUG
|
|
if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) {
|
|
pr_warning("PM: Unsupported test mode for freeze state,"
|
|
"please choose none/freezer/devices/platform.\n");
|
|
return -EAGAIN;
|
|
}
|
|
#endif
|
|
} else if (!valid_state(state)) {
|
|
return -EINVAL;
|
|
}
|
|
if (!mutex_trylock(&pm_mutex))
|
|
return -EBUSY;
|
|
|
|
if (state == PM_SUSPEND_FREEZE)
|
|
freeze_begin();
|
|
|
|
trace_suspend_resume(TPS("sync_filesystems"), 0, true);
|
|
printk(KERN_INFO "PM: Syncing filesystems ... ");
|
|
sys_sync();
|
|
printk("done.\n");
|
|
trace_suspend_resume(TPS("sync_filesystems"), 0, false);
|
|
|
|
pr_debug("PM: Preparing system for %s sleep\n", pm_states[state].label);
|
|
error = suspend_prepare(state);
|
|
if (error)
|
|
goto Unlock;
|
|
|
|
if (suspend_test(TEST_FREEZER))
|
|
goto Finish;
|
|
|
|
trace_suspend_resume(TPS("suspend_enter"), state, false);
|
|
pr_debug("PM: Entering %s sleep\n", pm_states[state].label);
|
|
pm_restrict_gfp_mask();
|
|
error = suspend_devices_and_enter(state);
|
|
pm_restore_gfp_mask();
|
|
|
|
Finish:
|
|
pr_debug("PM: Finishing wakeup.\n");
|
|
suspend_finish();
|
|
Unlock:
|
|
mutex_unlock(&pm_mutex);
|
|
return error;
|
|
}
|
|
|
|
/**
|
|
* pm_suspend - Externally visible function for suspending the system.
|
|
* @state: System sleep state to enter.
|
|
*
|
|
* Check if the value of @state represents one of the supported states,
|
|
* execute enter_state() and update system suspend statistics.
|
|
*/
|
|
int pm_suspend(suspend_state_t state)
|
|
{
|
|
int error;
|
|
|
|
if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
|
|
return -EINVAL;
|
|
|
|
error = enter_state(state);
|
|
if (error) {
|
|
suspend_stats.fail++;
|
|
dpm_save_failed_errno(error);
|
|
} else {
|
|
suspend_stats.success++;
|
|
}
|
|
return error;
|
|
}
|
|
EXPORT_SYMBOL(pm_suspend);
|