Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6: PCI PM: Make pci_prepare_to_sleep() disable wake-up if needed radeonfb: Use __pci_complete_power_transition() PCI PM: Introduce __pci_[start|complete]_power_transition() (rev. 2) PCI PM: Restore config spaces of all devices during early resume PCI PM: Make pci_set_power_state() handle devices with no PM support PCI PM: Put devices into low power states during late suspend (rev. 2) PCI PM: Move pci_restore_standard_config to pci-driver.c PCI PM: Use pci_set_power_state during early resume PCI PM: Consistently use variable name "error" for pm call return values kexec: Change kexec jump code ordering PM: Change hibernation code ordering PM: Change suspend code ordering PM: Rework handling of interrupts during suspend-resume PM: Introduce functions for suspending and resuming device interrupts
This commit is contained in:
commit
53d8f67082
18 changed files with 474 additions and 269 deletions
|
@ -1190,8 +1190,10 @@ static int suspend(int vetoable)
|
||||||
struct apm_user *as;
|
struct apm_user *as;
|
||||||
|
|
||||||
device_suspend(PMSG_SUSPEND);
|
device_suspend(PMSG_SUSPEND);
|
||||||
local_irq_disable();
|
|
||||||
device_power_down(PMSG_SUSPEND);
|
device_power_down(PMSG_SUSPEND);
|
||||||
|
|
||||||
|
local_irq_disable();
|
||||||
sysdev_suspend(PMSG_SUSPEND);
|
sysdev_suspend(PMSG_SUSPEND);
|
||||||
|
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
@ -1209,9 +1211,12 @@ static int suspend(int vetoable)
|
||||||
if (err != APM_SUCCESS)
|
if (err != APM_SUCCESS)
|
||||||
apm_error("suspend", err);
|
apm_error("suspend", err);
|
||||||
err = (err == APM_SUCCESS) ? 0 : -EIO;
|
err = (err == APM_SUCCESS) ? 0 : -EIO;
|
||||||
|
|
||||||
sysdev_resume();
|
sysdev_resume();
|
||||||
device_power_up(PMSG_RESUME);
|
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
|
||||||
|
device_power_up(PMSG_RESUME);
|
||||||
|
|
||||||
device_resume(PMSG_RESUME);
|
device_resume(PMSG_RESUME);
|
||||||
queue_event(APM_NORMAL_RESUME, NULL);
|
queue_event(APM_NORMAL_RESUME, NULL);
|
||||||
spin_lock(&user_list_lock);
|
spin_lock(&user_list_lock);
|
||||||
|
@ -1228,8 +1233,9 @@ static void standby(void)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
local_irq_disable();
|
|
||||||
device_power_down(PMSG_SUSPEND);
|
device_power_down(PMSG_SUSPEND);
|
||||||
|
|
||||||
|
local_irq_disable();
|
||||||
sysdev_suspend(PMSG_SUSPEND);
|
sysdev_suspend(PMSG_SUSPEND);
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
|
||||||
|
@ -1239,8 +1245,9 @@ static void standby(void)
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
sysdev_resume();
|
sysdev_resume();
|
||||||
device_power_up(PMSG_RESUME);
|
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
|
||||||
|
device_power_up(PMSG_RESUME);
|
||||||
}
|
}
|
||||||
|
|
||||||
static apm_event_t get_event(void)
|
static apm_event_t get_event(void)
|
||||||
|
|
|
@ -23,6 +23,7 @@
|
||||||
#include <linux/pm.h>
|
#include <linux/pm.h>
|
||||||
#include <linux/resume-trace.h>
|
#include <linux/resume-trace.h>
|
||||||
#include <linux/rwsem.h>
|
#include <linux/rwsem.h>
|
||||||
|
#include <linux/interrupt.h>
|
||||||
|
|
||||||
#include "../base.h"
|
#include "../base.h"
|
||||||
#include "power.h"
|
#include "power.h"
|
||||||
|
@ -349,7 +350,8 @@ static int resume_device_noirq(struct device *dev, pm_message_t state)
|
||||||
* Execute the appropriate "noirq resume" callback for all devices marked
|
* Execute the appropriate "noirq resume" callback for all devices marked
|
||||||
* as DPM_OFF_IRQ.
|
* as DPM_OFF_IRQ.
|
||||||
*
|
*
|
||||||
* Must be called with interrupts disabled and only one CPU running.
|
* Must be called under dpm_list_mtx. Device drivers should not receive
|
||||||
|
* interrupts while it's being executed.
|
||||||
*/
|
*/
|
||||||
static void dpm_power_up(pm_message_t state)
|
static void dpm_power_up(pm_message_t state)
|
||||||
{
|
{
|
||||||
|
@ -370,14 +372,13 @@ static void dpm_power_up(pm_message_t state)
|
||||||
* device_power_up - Turn on all devices that need special attention.
|
* device_power_up - Turn on all devices that need special attention.
|
||||||
* @state: PM transition of the system being carried out.
|
* @state: PM transition of the system being carried out.
|
||||||
*
|
*
|
||||||
* Power on system devices, then devices that required we shut them down
|
* Call the "early" resume handlers and enable device drivers to receive
|
||||||
* with interrupts disabled.
|
* interrupts.
|
||||||
*
|
|
||||||
* Must be called with interrupts disabled.
|
|
||||||
*/
|
*/
|
||||||
void device_power_up(pm_message_t state)
|
void device_power_up(pm_message_t state)
|
||||||
{
|
{
|
||||||
dpm_power_up(state);
|
dpm_power_up(state);
|
||||||
|
resume_device_irqs();
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(device_power_up);
|
EXPORT_SYMBOL_GPL(device_power_up);
|
||||||
|
|
||||||
|
@ -602,16 +603,17 @@ static int suspend_device_noirq(struct device *dev, pm_message_t state)
|
||||||
* device_power_down - Shut down special devices.
|
* device_power_down - Shut down special devices.
|
||||||
* @state: PM transition of the system being carried out.
|
* @state: PM transition of the system being carried out.
|
||||||
*
|
*
|
||||||
* Power down devices that require interrupts to be disabled.
|
* Prevent device drivers from receiving interrupts and call the "late"
|
||||||
* Then power down system devices.
|
* suspend handlers.
|
||||||
*
|
*
|
||||||
* Must be called with interrupts disabled and only one CPU running.
|
* Must be called under dpm_list_mtx.
|
||||||
*/
|
*/
|
||||||
int device_power_down(pm_message_t state)
|
int device_power_down(pm_message_t state)
|
||||||
{
|
{
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
int error = 0;
|
int error = 0;
|
||||||
|
|
||||||
|
suspend_device_irqs();
|
||||||
list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
|
list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
|
||||||
error = suspend_device_noirq(dev, state);
|
error = suspend_device_noirq(dev, state);
|
||||||
if (error) {
|
if (error) {
|
||||||
|
@ -621,7 +623,7 @@ int device_power_down(pm_message_t state)
|
||||||
dev->power.status = DPM_OFF_IRQ;
|
dev->power.status = DPM_OFF_IRQ;
|
||||||
}
|
}
|
||||||
if (error)
|
if (error)
|
||||||
dpm_power_up(resume_event(state));
|
device_power_up(resume_event(state));
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(device_power_down);
|
EXPORT_SYMBOL_GPL(device_power_down);
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
#include <linux/pm.h>
|
#include <linux/pm.h>
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
|
#include <linux/interrupt.h>
|
||||||
|
|
||||||
#include "base.h"
|
#include "base.h"
|
||||||
|
|
||||||
|
@ -369,6 +370,13 @@ int sysdev_suspend(pm_message_t state)
|
||||||
struct sysdev_driver *drv, *err_drv;
|
struct sysdev_driver *drv, *err_drv;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
pr_debug("Checking wake-up interrupts\n");
|
||||||
|
|
||||||
|
/* Return error code if there are any wake-up interrupts pending */
|
||||||
|
ret = check_wakeup_irqs();
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
pr_debug("Suspending System Devices\n");
|
pr_debug("Suspending System Devices\n");
|
||||||
|
|
||||||
list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) {
|
list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) {
|
||||||
|
|
|
@ -352,53 +352,60 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
|
||||||
{
|
{
|
||||||
struct pci_dev * pci_dev = to_pci_dev(dev);
|
struct pci_dev * pci_dev = to_pci_dev(dev);
|
||||||
struct pci_driver * drv = pci_dev->driver;
|
struct pci_driver * drv = pci_dev->driver;
|
||||||
int i = 0;
|
|
||||||
|
pci_dev->state_saved = false;
|
||||||
|
|
||||||
if (drv && drv->suspend) {
|
if (drv && drv->suspend) {
|
||||||
pci_power_t prev = pci_dev->current_state;
|
pci_power_t prev = pci_dev->current_state;
|
||||||
|
int error;
|
||||||
|
|
||||||
pci_dev->state_saved = false;
|
error = drv->suspend(pci_dev, state);
|
||||||
|
suspend_report_result(drv->suspend, error);
|
||||||
|
if (error)
|
||||||
|
return error;
|
||||||
|
|
||||||
i = drv->suspend(pci_dev, state);
|
if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
|
||||||
suspend_report_result(drv->suspend, i);
|
|
||||||
if (i)
|
|
||||||
return i;
|
|
||||||
|
|
||||||
if (pci_dev->state_saved)
|
|
||||||
goto Fixup;
|
|
||||||
|
|
||||||
if (pci_dev->current_state != PCI_D0
|
|
||||||
&& pci_dev->current_state != PCI_UNKNOWN) {
|
&& pci_dev->current_state != PCI_UNKNOWN) {
|
||||||
WARN_ONCE(pci_dev->current_state != prev,
|
WARN_ONCE(pci_dev->current_state != prev,
|
||||||
"PCI PM: Device state not saved by %pF\n",
|
"PCI PM: Device state not saved by %pF\n",
|
||||||
drv->suspend);
|
drv->suspend);
|
||||||
goto Fixup;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pci_save_state(pci_dev);
|
|
||||||
/*
|
|
||||||
* This is for compatibility with existing code with legacy PM support.
|
|
||||||
*/
|
|
||||||
pci_pm_set_unknown_state(pci_dev);
|
|
||||||
|
|
||||||
Fixup:
|
|
||||||
pci_fixup_device(pci_fixup_suspend, pci_dev);
|
pci_fixup_device(pci_fixup_suspend, pci_dev);
|
||||||
|
|
||||||
return i;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
|
static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
|
||||||
{
|
{
|
||||||
struct pci_dev * pci_dev = to_pci_dev(dev);
|
struct pci_dev * pci_dev = to_pci_dev(dev);
|
||||||
struct pci_driver * drv = pci_dev->driver;
|
struct pci_driver * drv = pci_dev->driver;
|
||||||
int i = 0;
|
|
||||||
|
|
||||||
if (drv && drv->suspend_late) {
|
if (drv && drv->suspend_late) {
|
||||||
i = drv->suspend_late(pci_dev, state);
|
pci_power_t prev = pci_dev->current_state;
|
||||||
suspend_report_result(drv->suspend_late, i);
|
int error;
|
||||||
|
|
||||||
|
error = drv->suspend_late(pci_dev, state);
|
||||||
|
suspend_report_result(drv->suspend_late, error);
|
||||||
|
if (error)
|
||||||
|
return error;
|
||||||
|
|
||||||
|
if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
|
||||||
|
&& pci_dev->current_state != PCI_UNKNOWN) {
|
||||||
|
WARN_ONCE(pci_dev->current_state != prev,
|
||||||
|
"PCI PM: Device state not saved by %pF\n",
|
||||||
|
drv->suspend_late);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return i;
|
|
||||||
|
if (!pci_dev->state_saved)
|
||||||
|
pci_save_state(pci_dev);
|
||||||
|
|
||||||
|
pci_pm_set_unknown_state(pci_dev);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pci_legacy_resume_early(struct device *dev)
|
static int pci_legacy_resume_early(struct device *dev)
|
||||||
|
@ -423,6 +430,23 @@ static int pci_legacy_resume(struct device *dev)
|
||||||
|
|
||||||
/* Auxiliary functions used by the new power management framework */
|
/* Auxiliary functions used by the new power management framework */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* pci_restore_standard_config - restore standard config registers of PCI device
|
||||||
|
* @pci_dev: PCI device to handle
|
||||||
|
*/
|
||||||
|
static int pci_restore_standard_config(struct pci_dev *pci_dev)
|
||||||
|
{
|
||||||
|
pci_update_current_state(pci_dev, PCI_UNKNOWN);
|
||||||
|
|
||||||
|
if (pci_dev->current_state != PCI_D0) {
|
||||||
|
int error = pci_set_power_state(pci_dev, PCI_D0);
|
||||||
|
if (error)
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
return pci_dev->state_saved ? pci_restore_state(pci_dev) : 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev)
|
static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev)
|
||||||
{
|
{
|
||||||
pci_restore_standard_config(pci_dev);
|
pci_restore_standard_config(pci_dev);
|
||||||
|
@ -443,7 +467,6 @@ static void pci_pm_default_suspend(struct pci_dev *pci_dev)
|
||||||
/* Disable non-bridge devices without PM support */
|
/* Disable non-bridge devices without PM support */
|
||||||
if (!pci_is_bridge(pci_dev))
|
if (!pci_is_bridge(pci_dev))
|
||||||
pci_disable_enabled_device(pci_dev);
|
pci_disable_enabled_device(pci_dev);
|
||||||
pci_save_state(pci_dev);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
|
static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
|
||||||
|
@ -493,13 +516,13 @@ static int pci_pm_suspend(struct device *dev)
|
||||||
if (pci_has_legacy_pm_support(pci_dev))
|
if (pci_has_legacy_pm_support(pci_dev))
|
||||||
return pci_legacy_suspend(dev, PMSG_SUSPEND);
|
return pci_legacy_suspend(dev, PMSG_SUSPEND);
|
||||||
|
|
||||||
|
pci_dev->state_saved = false;
|
||||||
|
|
||||||
if (!pm) {
|
if (!pm) {
|
||||||
pci_pm_default_suspend(pci_dev);
|
pci_pm_default_suspend(pci_dev);
|
||||||
goto Fixup;
|
goto Fixup;
|
||||||
}
|
}
|
||||||
|
|
||||||
pci_dev->state_saved = false;
|
|
||||||
|
|
||||||
if (pm->suspend) {
|
if (pm->suspend) {
|
||||||
pci_power_t prev = pci_dev->current_state;
|
pci_power_t prev = pci_dev->current_state;
|
||||||
int error;
|
int error;
|
||||||
|
@ -509,24 +532,14 @@ static int pci_pm_suspend(struct device *dev)
|
||||||
if (error)
|
if (error)
|
||||||
return error;
|
return error;
|
||||||
|
|
||||||
if (pci_dev->state_saved)
|
if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
|
||||||
goto Fixup;
|
|
||||||
|
|
||||||
if (pci_dev->current_state != PCI_D0
|
|
||||||
&& pci_dev->current_state != PCI_UNKNOWN) {
|
&& pci_dev->current_state != PCI_UNKNOWN) {
|
||||||
WARN_ONCE(pci_dev->current_state != prev,
|
WARN_ONCE(pci_dev->current_state != prev,
|
||||||
"PCI PM: State of device not saved by %pF\n",
|
"PCI PM: State of device not saved by %pF\n",
|
||||||
pm->suspend);
|
pm->suspend);
|
||||||
goto Fixup;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!pci_dev->state_saved) {
|
|
||||||
pci_save_state(pci_dev);
|
|
||||||
if (!pci_is_bridge(pci_dev))
|
|
||||||
pci_prepare_to_sleep(pci_dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
Fixup:
|
Fixup:
|
||||||
pci_fixup_device(pci_fixup_suspend, pci_dev);
|
pci_fixup_device(pci_fixup_suspend, pci_dev);
|
||||||
|
|
||||||
|
@ -536,21 +549,43 @@ static int pci_pm_suspend(struct device *dev)
|
||||||
static int pci_pm_suspend_noirq(struct device *dev)
|
static int pci_pm_suspend_noirq(struct device *dev)
|
||||||
{
|
{
|
||||||
struct pci_dev *pci_dev = to_pci_dev(dev);
|
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||||
struct device_driver *drv = dev->driver;
|
struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||||
int error = 0;
|
|
||||||
|
|
||||||
if (pci_has_legacy_pm_support(pci_dev))
|
if (pci_has_legacy_pm_support(pci_dev))
|
||||||
return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
|
return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
|
||||||
|
|
||||||
if (drv && drv->pm && drv->pm->suspend_noirq) {
|
if (!pm) {
|
||||||
error = drv->pm->suspend_noirq(dev);
|
pci_save_state(pci_dev);
|
||||||
suspend_report_result(drv->pm->suspend_noirq, error);
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!error)
|
if (pm->suspend_noirq) {
|
||||||
pci_pm_set_unknown_state(pci_dev);
|
pci_power_t prev = pci_dev->current_state;
|
||||||
|
int error;
|
||||||
|
|
||||||
return error;
|
error = pm->suspend_noirq(dev);
|
||||||
|
suspend_report_result(pm->suspend_noirq, error);
|
||||||
|
if (error)
|
||||||
|
return error;
|
||||||
|
|
||||||
|
if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
|
||||||
|
&& pci_dev->current_state != PCI_UNKNOWN) {
|
||||||
|
WARN_ONCE(pci_dev->current_state != prev,
|
||||||
|
"PCI PM: State of device not saved by %pF\n",
|
||||||
|
pm->suspend_noirq);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!pci_dev->state_saved) {
|
||||||
|
pci_save_state(pci_dev);
|
||||||
|
if (!pci_is_bridge(pci_dev))
|
||||||
|
pci_prepare_to_sleep(pci_dev);
|
||||||
|
}
|
||||||
|
|
||||||
|
pci_pm_set_unknown_state(pci_dev);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pci_pm_resume_noirq(struct device *dev)
|
static int pci_pm_resume_noirq(struct device *dev)
|
||||||
|
@ -617,13 +652,13 @@ static int pci_pm_freeze(struct device *dev)
|
||||||
if (pci_has_legacy_pm_support(pci_dev))
|
if (pci_has_legacy_pm_support(pci_dev))
|
||||||
return pci_legacy_suspend(dev, PMSG_FREEZE);
|
return pci_legacy_suspend(dev, PMSG_FREEZE);
|
||||||
|
|
||||||
|
pci_dev->state_saved = false;
|
||||||
|
|
||||||
if (!pm) {
|
if (!pm) {
|
||||||
pci_pm_default_suspend(pci_dev);
|
pci_pm_default_suspend(pci_dev);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
pci_dev->state_saved = false;
|
|
||||||
|
|
||||||
if (pm->freeze) {
|
if (pm->freeze) {
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
|
@ -633,9 +668,6 @@ static int pci_pm_freeze(struct device *dev)
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!pci_dev->state_saved)
|
|
||||||
pci_save_state(pci_dev);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -643,20 +675,25 @@ static int pci_pm_freeze_noirq(struct device *dev)
|
||||||
{
|
{
|
||||||
struct pci_dev *pci_dev = to_pci_dev(dev);
|
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||||
struct device_driver *drv = dev->driver;
|
struct device_driver *drv = dev->driver;
|
||||||
int error = 0;
|
|
||||||
|
|
||||||
if (pci_has_legacy_pm_support(pci_dev))
|
if (pci_has_legacy_pm_support(pci_dev))
|
||||||
return pci_legacy_suspend_late(dev, PMSG_FREEZE);
|
return pci_legacy_suspend_late(dev, PMSG_FREEZE);
|
||||||
|
|
||||||
if (drv && drv->pm && drv->pm->freeze_noirq) {
|
if (drv && drv->pm && drv->pm->freeze_noirq) {
|
||||||
|
int error;
|
||||||
|
|
||||||
error = drv->pm->freeze_noirq(dev);
|
error = drv->pm->freeze_noirq(dev);
|
||||||
suspend_report_result(drv->pm->freeze_noirq, error);
|
suspend_report_result(drv->pm->freeze_noirq, error);
|
||||||
|
if (error)
|
||||||
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!error)
|
if (!pci_dev->state_saved)
|
||||||
pci_pm_set_unknown_state(pci_dev);
|
pci_save_state(pci_dev);
|
||||||
|
|
||||||
return error;
|
pci_pm_set_unknown_state(pci_dev);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pci_pm_thaw_noirq(struct device *dev)
|
static int pci_pm_thaw_noirq(struct device *dev)
|
||||||
|
@ -699,46 +736,56 @@ static int pci_pm_poweroff(struct device *dev)
|
||||||
{
|
{
|
||||||
struct pci_dev *pci_dev = to_pci_dev(dev);
|
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||||
struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||||
int error = 0;
|
|
||||||
|
|
||||||
if (pci_has_legacy_pm_support(pci_dev))
|
if (pci_has_legacy_pm_support(pci_dev))
|
||||||
return pci_legacy_suspend(dev, PMSG_HIBERNATE);
|
return pci_legacy_suspend(dev, PMSG_HIBERNATE);
|
||||||
|
|
||||||
|
pci_dev->state_saved = false;
|
||||||
|
|
||||||
if (!pm) {
|
if (!pm) {
|
||||||
pci_pm_default_suspend(pci_dev);
|
pci_pm_default_suspend(pci_dev);
|
||||||
goto Fixup;
|
goto Fixup;
|
||||||
}
|
}
|
||||||
|
|
||||||
pci_dev->state_saved = false;
|
|
||||||
|
|
||||||
if (pm->poweroff) {
|
if (pm->poweroff) {
|
||||||
|
int error;
|
||||||
|
|
||||||
error = pm->poweroff(dev);
|
error = pm->poweroff(dev);
|
||||||
suspend_report_result(pm->poweroff, error);
|
suspend_report_result(pm->poweroff, error);
|
||||||
|
if (error)
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
Fixup:
|
||||||
|
pci_fixup_device(pci_fixup_suspend, pci_dev);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int pci_pm_poweroff_noirq(struct device *dev)
|
||||||
|
{
|
||||||
|
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||||
|
struct device_driver *drv = dev->driver;
|
||||||
|
|
||||||
|
if (pci_has_legacy_pm_support(to_pci_dev(dev)))
|
||||||
|
return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
|
||||||
|
|
||||||
|
if (!drv || !drv->pm)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (drv->pm->poweroff_noirq) {
|
||||||
|
int error;
|
||||||
|
|
||||||
|
error = drv->pm->poweroff_noirq(dev);
|
||||||
|
suspend_report_result(drv->pm->poweroff_noirq, error);
|
||||||
|
if (error)
|
||||||
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!pci_dev->state_saved && !pci_is_bridge(pci_dev))
|
if (!pci_dev->state_saved && !pci_is_bridge(pci_dev))
|
||||||
pci_prepare_to_sleep(pci_dev);
|
pci_prepare_to_sleep(pci_dev);
|
||||||
|
|
||||||
Fixup:
|
return 0;
|
||||||
pci_fixup_device(pci_fixup_suspend, pci_dev);
|
|
||||||
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int pci_pm_poweroff_noirq(struct device *dev)
|
|
||||||
{
|
|
||||||
struct device_driver *drv = dev->driver;
|
|
||||||
int error = 0;
|
|
||||||
|
|
||||||
if (pci_has_legacy_pm_support(to_pci_dev(dev)))
|
|
||||||
return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
|
|
||||||
|
|
||||||
if (drv && drv->pm && drv->pm->poweroff_noirq) {
|
|
||||||
error = drv->pm->poweroff_noirq(dev);
|
|
||||||
suspend_report_result(drv->pm->poweroff_noirq, error);
|
|
||||||
}
|
|
||||||
|
|
||||||
return error;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pci_pm_restore_noirq(struct device *dev)
|
static int pci_pm_restore_noirq(struct device *dev)
|
||||||
|
|
|
@ -426,7 +426,6 @@ static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
|
||||||
* given PCI device
|
* given PCI device
|
||||||
* @dev: PCI device to handle.
|
* @dev: PCI device to handle.
|
||||||
* @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
|
* @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
|
||||||
* @wait: If 'true', wait for the device to change its power state
|
|
||||||
*
|
*
|
||||||
* RETURN VALUE:
|
* RETURN VALUE:
|
||||||
* -EINVAL if the requested state is invalid.
|
* -EINVAL if the requested state is invalid.
|
||||||
|
@ -435,12 +434,15 @@ static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
|
||||||
* 0 if device already is in the requested state.
|
* 0 if device already is in the requested state.
|
||||||
* 0 if device's power state has been successfully changed.
|
* 0 if device's power state has been successfully changed.
|
||||||
*/
|
*/
|
||||||
static int
|
static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
|
||||||
pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait)
|
|
||||||
{
|
{
|
||||||
u16 pmcsr;
|
u16 pmcsr;
|
||||||
bool need_restore = false;
|
bool need_restore = false;
|
||||||
|
|
||||||
|
/* Check if we're already there */
|
||||||
|
if (dev->current_state == state)
|
||||||
|
return 0;
|
||||||
|
|
||||||
if (!dev->pm_cap)
|
if (!dev->pm_cap)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
|
@ -451,10 +453,7 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait)
|
||||||
* Can enter D0 from any state, but if we can only go deeper
|
* Can enter D0 from any state, but if we can only go deeper
|
||||||
* to sleep if we're already in a low power state
|
* to sleep if we're already in a low power state
|
||||||
*/
|
*/
|
||||||
if (dev->current_state == state) {
|
if (state != PCI_D0 && dev->current_state <= PCI_D3cold
|
||||||
/* we're already there */
|
|
||||||
return 0;
|
|
||||||
} else if (state != PCI_D0 && dev->current_state <= PCI_D3cold
|
|
||||||
&& dev->current_state > state) {
|
&& dev->current_state > state) {
|
||||||
dev_err(&dev->dev, "invalid power transition "
|
dev_err(&dev->dev, "invalid power transition "
|
||||||
"(from state %d to %d)\n", dev->current_state, state);
|
"(from state %d to %d)\n", dev->current_state, state);
|
||||||
|
@ -481,10 +480,8 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait)
|
||||||
break;
|
break;
|
||||||
case PCI_UNKNOWN: /* Boot-up */
|
case PCI_UNKNOWN: /* Boot-up */
|
||||||
if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
|
if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
|
||||||
&& !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) {
|
&& !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
|
||||||
need_restore = true;
|
need_restore = true;
|
||||||
wait = true;
|
|
||||||
}
|
|
||||||
/* Fall-through: force to D0 */
|
/* Fall-through: force to D0 */
|
||||||
default:
|
default:
|
||||||
pmcsr = 0;
|
pmcsr = 0;
|
||||||
|
@ -494,9 +491,6 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait)
|
||||||
/* enter specified state */
|
/* enter specified state */
|
||||||
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
|
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
|
||||||
|
|
||||||
if (!wait)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/* Mandatory power management transition delays */
|
/* Mandatory power management transition delays */
|
||||||
/* see PCI PM 1.1 5.6.1 table 18 */
|
/* see PCI PM 1.1 5.6.1 table 18 */
|
||||||
if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
|
if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
|
||||||
|
@ -521,7 +515,7 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait)
|
||||||
if (need_restore)
|
if (need_restore)
|
||||||
pci_restore_bars(dev);
|
pci_restore_bars(dev);
|
||||||
|
|
||||||
if (wait && dev->bus->self)
|
if (dev->bus->self)
|
||||||
pcie_aspm_pm_state_change(dev->bus->self);
|
pcie_aspm_pm_state_change(dev->bus->self);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -545,6 +539,53 @@ void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* pci_platform_power_transition - Use platform to change device power state
|
||||||
|
* @dev: PCI device to handle.
|
||||||
|
* @state: State to put the device into.
|
||||||
|
*/
|
||||||
|
static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
|
||||||
|
{
|
||||||
|
int error;
|
||||||
|
|
||||||
|
if (platform_pci_power_manageable(dev)) {
|
||||||
|
error = platform_pci_set_power_state(dev, state);
|
||||||
|
if (!error)
|
||||||
|
pci_update_current_state(dev, state);
|
||||||
|
} else {
|
||||||
|
error = -ENODEV;
|
||||||
|
/* Fall back to PCI_D0 if native PM is not supported */
|
||||||
|
pci_update_current_state(dev, PCI_D0);
|
||||||
|
}
|
||||||
|
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* __pci_start_power_transition - Start power transition of a PCI device
|
||||||
|
* @dev: PCI device to handle.
|
||||||
|
* @state: State to put the device into.
|
||||||
|
*/
|
||||||
|
static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
|
||||||
|
{
|
||||||
|
if (state == PCI_D0)
|
||||||
|
pci_platform_power_transition(dev, PCI_D0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* __pci_complete_power_transition - Complete power transition of a PCI device
|
||||||
|
* @dev: PCI device to handle.
|
||||||
|
* @state: State to put the device into.
|
||||||
|
*
|
||||||
|
* This function should not be called directly by device drivers.
|
||||||
|
*/
|
||||||
|
int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
|
||||||
|
{
|
||||||
|
return state > PCI_D0 ?
|
||||||
|
pci_platform_power_transition(dev, state) : -EINVAL;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* pci_set_power_state - Set the power state of a PCI device
|
* pci_set_power_state - Set the power state of a PCI device
|
||||||
* @dev: PCI device to handle.
|
* @dev: PCI device to handle.
|
||||||
|
@ -577,30 +618,21 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
|
||||||
*/
|
*/
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (state == PCI_D0 && platform_pci_power_manageable(dev)) {
|
/* Check if we're already there */
|
||||||
/*
|
if (dev->current_state == state)
|
||||||
* Allow the platform to change the state, for example via ACPI
|
return 0;
|
||||||
* _PR0, _PS0 and some such, but do not trust it.
|
|
||||||
*/
|
__pci_start_power_transition(dev, state);
|
||||||
int ret = platform_pci_set_power_state(dev, PCI_D0);
|
|
||||||
if (!ret)
|
|
||||||
pci_update_current_state(dev, PCI_D0);
|
|
||||||
}
|
|
||||||
/* This device is quirked not to be put into D3, so
|
/* This device is quirked not to be put into D3, so
|
||||||
don't put it in D3 */
|
don't put it in D3 */
|
||||||
if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
|
if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error = pci_raw_set_power_state(dev, state, true);
|
error = pci_raw_set_power_state(dev, state);
|
||||||
|
|
||||||
if (state > PCI_D0 && platform_pci_power_manageable(dev)) {
|
if (!__pci_complete_power_transition(dev, state))
|
||||||
/* Allow the platform to finalize the transition */
|
error = 0;
|
||||||
int ret = platform_pci_set_power_state(dev, state);
|
|
||||||
if (!ret) {
|
|
||||||
pci_update_current_state(dev, state);
|
|
||||||
error = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
@ -1231,7 +1263,7 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
|
||||||
if (target_state == PCI_POWER_ERROR)
|
if (target_state == PCI_POWER_ERROR)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
pci_enable_wake(dev, target_state, true);
|
pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
|
||||||
|
|
||||||
error = pci_set_power_state(dev, target_state);
|
error = pci_set_power_state(dev, target_state);
|
||||||
|
|
||||||
|
@ -1380,50 +1412,6 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
|
||||||
"unable to preallocate PCI-X save buffer\n");
|
"unable to preallocate PCI-X save buffer\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* pci_restore_standard_config - restore standard config registers of PCI device
|
|
||||||
* @dev: PCI device to handle
|
|
||||||
*
|
|
||||||
* This function assumes that the device's configuration space is accessible.
|
|
||||||
* If the device needs to be powered up, the function will wait for it to
|
|
||||||
* change the state.
|
|
||||||
*/
|
|
||||||
int pci_restore_standard_config(struct pci_dev *dev)
|
|
||||||
{
|
|
||||||
pci_power_t prev_state;
|
|
||||||
int error;
|
|
||||||
|
|
||||||
pci_update_current_state(dev, PCI_D0);
|
|
||||||
|
|
||||||
prev_state = dev->current_state;
|
|
||||||
if (prev_state == PCI_D0)
|
|
||||||
goto Restore;
|
|
||||||
|
|
||||||
error = pci_raw_set_power_state(dev, PCI_D0, false);
|
|
||||||
if (error)
|
|
||||||
return error;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This assumes that we won't get a bus in B2 or B3 from the BIOS, but
|
|
||||||
* we've made this assumption forever and it appears to be universally
|
|
||||||
* satisfied.
|
|
||||||
*/
|
|
||||||
switch(prev_state) {
|
|
||||||
case PCI_D3cold:
|
|
||||||
case PCI_D3hot:
|
|
||||||
mdelay(pci_pm_d3_delay);
|
|
||||||
break;
|
|
||||||
case PCI_D2:
|
|
||||||
udelay(PCI_PM_D2_DELAY);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
pci_update_current_state(dev, PCI_D0);
|
|
||||||
|
|
||||||
Restore:
|
|
||||||
return dev->state_saved ? pci_restore_state(dev) : 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* pci_enable_ari - enable ARI forwarding if hardware support it
|
* pci_enable_ari - enable ARI forwarding if hardware support it
|
||||||
* @dev: the PCI device
|
* @dev: the PCI device
|
||||||
|
|
|
@ -49,7 +49,6 @@ extern void pci_disable_enabled_device(struct pci_dev *dev);
|
||||||
extern void pci_pm_init(struct pci_dev *dev);
|
extern void pci_pm_init(struct pci_dev *dev);
|
||||||
extern void platform_pci_wakeup_init(struct pci_dev *dev);
|
extern void platform_pci_wakeup_init(struct pci_dev *dev);
|
||||||
extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
|
extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
|
||||||
extern int pci_restore_standard_config(struct pci_dev *dev);
|
|
||||||
|
|
||||||
static inline bool pci_is_bridge(struct pci_dev *pci_dev)
|
static inline bool pci_is_bridge(struct pci_dev *pci_dev)
|
||||||
{
|
{
|
||||||
|
|
|
@ -2582,7 +2582,7 @@ static void radeon_set_suspend(struct radeonfb_info *rinfo, int suspend)
|
||||||
* calling pci_set_power_state()
|
* calling pci_set_power_state()
|
||||||
*/
|
*/
|
||||||
radeonfb_whack_power_state(rinfo, PCI_D2);
|
radeonfb_whack_power_state(rinfo, PCI_D2);
|
||||||
pci_set_power_state(rinfo->pdev, PCI_D2);
|
__pci_complete_power_transition(rinfo->pdev, PCI_D2);
|
||||||
} else {
|
} else {
|
||||||
printk(KERN_DEBUG "radeonfb (%s): switching to D0 state...\n",
|
printk(KERN_DEBUG "radeonfb (%s): switching to D0 state...\n",
|
||||||
pci_name(rinfo->pdev));
|
pci_name(rinfo->pdev));
|
||||||
|
|
|
@ -39,12 +39,6 @@ static int xen_suspend(void *data)
|
||||||
|
|
||||||
BUG_ON(!irqs_disabled());
|
BUG_ON(!irqs_disabled());
|
||||||
|
|
||||||
err = device_power_down(PMSG_SUSPEND);
|
|
||||||
if (err) {
|
|
||||||
printk(KERN_ERR "xen_suspend: device_power_down failed: %d\n",
|
|
||||||
err);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
err = sysdev_suspend(PMSG_SUSPEND);
|
err = sysdev_suspend(PMSG_SUSPEND);
|
||||||
if (err) {
|
if (err) {
|
||||||
printk(KERN_ERR "xen_suspend: sysdev_suspend failed: %d\n",
|
printk(KERN_ERR "xen_suspend: sysdev_suspend failed: %d\n",
|
||||||
|
@ -69,7 +63,6 @@ static int xen_suspend(void *data)
|
||||||
xen_mm_unpin_all();
|
xen_mm_unpin_all();
|
||||||
|
|
||||||
sysdev_resume();
|
sysdev_resume();
|
||||||
device_power_up(PMSG_RESUME);
|
|
||||||
|
|
||||||
if (!*cancelled) {
|
if (!*cancelled) {
|
||||||
xen_irq_resume();
|
xen_irq_resume();
|
||||||
|
@ -108,6 +101,12 @@ static void do_suspend(void)
|
||||||
/* XXX use normal device tree? */
|
/* XXX use normal device tree? */
|
||||||
xenbus_suspend();
|
xenbus_suspend();
|
||||||
|
|
||||||
|
err = device_power_down(PMSG_SUSPEND);
|
||||||
|
if (err) {
|
||||||
|
printk(KERN_ERR "device_power_down failed: %d\n", err);
|
||||||
|
goto resume_devices;
|
||||||
|
}
|
||||||
|
|
||||||
err = stop_machine(xen_suspend, &cancelled, cpumask_of(0));
|
err = stop_machine(xen_suspend, &cancelled, cpumask_of(0));
|
||||||
if (err) {
|
if (err) {
|
||||||
printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
|
printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
|
||||||
|
@ -120,6 +119,9 @@ static void do_suspend(void)
|
||||||
} else
|
} else
|
||||||
xenbus_suspend_cancel();
|
xenbus_suspend_cancel();
|
||||||
|
|
||||||
|
device_power_up(PMSG_RESUME);
|
||||||
|
|
||||||
|
resume_devices:
|
||||||
device_resume(PMSG_RESUME);
|
device_resume(PMSG_RESUME);
|
||||||
|
|
||||||
/* Make sure timer events get retriggered on all CPUs */
|
/* Make sure timer events get retriggered on all CPUs */
|
||||||
|
|
|
@ -117,6 +117,15 @@ extern void disable_irq_nosync(unsigned int irq);
|
||||||
extern void disable_irq(unsigned int irq);
|
extern void disable_irq(unsigned int irq);
|
||||||
extern void enable_irq(unsigned int irq);
|
extern void enable_irq(unsigned int irq);
|
||||||
|
|
||||||
|
/* The following three functions are for the core kernel use only. */
|
||||||
|
extern void suspend_device_irqs(void);
|
||||||
|
extern void resume_device_irqs(void);
|
||||||
|
#ifdef CONFIG_PM_SLEEP
|
||||||
|
extern int check_wakeup_irqs(void);
|
||||||
|
#else
|
||||||
|
static inline int check_wakeup_irqs(void) { return 0; }
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
|
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
|
||||||
|
|
||||||
extern cpumask_var_t irq_default_affinity;
|
extern cpumask_var_t irq_default_affinity;
|
||||||
|
|
|
@ -67,6 +67,7 @@ typedef void (*irq_flow_handler_t)(unsigned int irq,
|
||||||
#define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */
|
#define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */
|
||||||
#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */
|
#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */
|
||||||
#define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/
|
#define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/
|
||||||
|
#define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */
|
||||||
|
|
||||||
#ifdef CONFIG_IRQ_PER_CPU
|
#ifdef CONFIG_IRQ_PER_CPU
|
||||||
# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
|
# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
|
||||||
|
|
|
@ -689,6 +689,7 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size);
|
||||||
/* Power management related routines */
|
/* Power management related routines */
|
||||||
int pci_save_state(struct pci_dev *dev);
|
int pci_save_state(struct pci_dev *dev);
|
||||||
int pci_restore_state(struct pci_dev *dev);
|
int pci_restore_state(struct pci_dev *dev);
|
||||||
|
int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state);
|
||||||
int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
|
int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
|
||||||
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
|
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
|
||||||
bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
|
bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
|
||||||
|
|
|
@ -4,3 +4,4 @@ obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
|
||||||
obj-$(CONFIG_PROC_FS) += proc.o
|
obj-$(CONFIG_PROC_FS) += proc.o
|
||||||
obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
|
obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
|
||||||
obj-$(CONFIG_NUMA_MIGRATE_IRQ_DESC) += numa_migrate.o
|
obj-$(CONFIG_NUMA_MIGRATE_IRQ_DESC) += numa_migrate.o
|
||||||
|
obj-$(CONFIG_PM_SLEEP) += pm.o
|
||||||
|
|
|
@ -12,6 +12,8 @@ extern void compat_irq_chip_set_default_handler(struct irq_desc *desc);
|
||||||
|
|
||||||
extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
|
extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
|
||||||
unsigned long flags);
|
unsigned long flags);
|
||||||
|
extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
|
||||||
|
extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
|
||||||
|
|
||||||
extern struct lock_class_key irq_desc_lock_class;
|
extern struct lock_class_key irq_desc_lock_class;
|
||||||
extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr);
|
extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr);
|
||||||
|
|
|
@ -162,6 +162,20 @@ static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
|
||||||
|
{
|
||||||
|
if (suspend) {
|
||||||
|
if (!desc->action || (desc->action->flags & IRQF_TIMER))
|
||||||
|
return;
|
||||||
|
desc->status |= IRQ_SUSPENDED;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!desc->depth++) {
|
||||||
|
desc->status |= IRQ_DISABLED;
|
||||||
|
desc->chip->disable(irq);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* disable_irq_nosync - disable an irq without waiting
|
* disable_irq_nosync - disable an irq without waiting
|
||||||
* @irq: Interrupt to disable
|
* @irq: Interrupt to disable
|
||||||
|
@ -182,10 +196,7 @@ void disable_irq_nosync(unsigned int irq)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
spin_lock_irqsave(&desc->lock, flags);
|
||||||
if (!desc->depth++) {
|
__disable_irq(desc, irq, false);
|
||||||
desc->status |= IRQ_DISABLED;
|
|
||||||
desc->chip->disable(irq);
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(disable_irq_nosync);
|
EXPORT_SYMBOL(disable_irq_nosync);
|
||||||
|
@ -215,15 +226,21 @@ void disable_irq(unsigned int irq)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(disable_irq);
|
EXPORT_SYMBOL(disable_irq);
|
||||||
|
|
||||||
static void __enable_irq(struct irq_desc *desc, unsigned int irq)
|
void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
|
||||||
{
|
{
|
||||||
|
if (resume)
|
||||||
|
desc->status &= ~IRQ_SUSPENDED;
|
||||||
|
|
||||||
switch (desc->depth) {
|
switch (desc->depth) {
|
||||||
case 0:
|
case 0:
|
||||||
|
err_out:
|
||||||
WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
|
WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
|
||||||
break;
|
break;
|
||||||
case 1: {
|
case 1: {
|
||||||
unsigned int status = desc->status & ~IRQ_DISABLED;
|
unsigned int status = desc->status & ~IRQ_DISABLED;
|
||||||
|
|
||||||
|
if (desc->status & IRQ_SUSPENDED)
|
||||||
|
goto err_out;
|
||||||
/* Prevent probing on this irq: */
|
/* Prevent probing on this irq: */
|
||||||
desc->status = status | IRQ_NOPROBE;
|
desc->status = status | IRQ_NOPROBE;
|
||||||
check_irq_resend(desc, irq);
|
check_irq_resend(desc, irq);
|
||||||
|
@ -253,7 +270,7 @@ void enable_irq(unsigned int irq)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
spin_lock_irqsave(&desc->lock, flags);
|
||||||
__enable_irq(desc, irq);
|
__enable_irq(desc, irq, false);
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(enable_irq);
|
EXPORT_SYMBOL(enable_irq);
|
||||||
|
@ -511,7 +528,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
||||||
*/
|
*/
|
||||||
if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) {
|
if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) {
|
||||||
desc->status &= ~IRQ_SPURIOUS_DISABLED;
|
desc->status &= ~IRQ_SPURIOUS_DISABLED;
|
||||||
__enable_irq(desc, irq);
|
__enable_irq(desc, irq, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
|
|
79
kernel/irq/pm.c
Normal file
79
kernel/irq/pm.c
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
/*
|
||||||
|
* linux/kernel/irq/pm.c
|
||||||
|
*
|
||||||
|
* Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
|
||||||
|
*
|
||||||
|
* This file contains power management functions related to interrupts.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/irq.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/interrupt.h>
|
||||||
|
|
||||||
|
#include "internals.h"
|
||||||
|
|
||||||
|
/**
|
||||||
|
* suspend_device_irqs - disable all currently enabled interrupt lines
|
||||||
|
*
|
||||||
|
* During system-wide suspend or hibernation device interrupts need to be
|
||||||
|
* disabled at the chip level and this function is provided for this purpose.
|
||||||
|
* It disables all interrupt lines that are enabled at the moment and sets the
|
||||||
|
* IRQ_SUSPENDED flag for them.
|
||||||
|
*/
|
||||||
|
void suspend_device_irqs(void)
|
||||||
|
{
|
||||||
|
struct irq_desc *desc;
|
||||||
|
int irq;
|
||||||
|
|
||||||
|
for_each_irq_desc(irq, desc) {
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&desc->lock, flags);
|
||||||
|
__disable_irq(desc, irq, true);
|
||||||
|
spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
for_each_irq_desc(irq, desc)
|
||||||
|
if (desc->status & IRQ_SUSPENDED)
|
||||||
|
synchronize_irq(irq);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(suspend_device_irqs);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs()
|
||||||
|
*
|
||||||
|
* Enable all interrupt lines previously disabled by suspend_device_irqs() that
|
||||||
|
* have the IRQ_SUSPENDED flag set.
|
||||||
|
*/
|
||||||
|
void resume_device_irqs(void)
|
||||||
|
{
|
||||||
|
struct irq_desc *desc;
|
||||||
|
int irq;
|
||||||
|
|
||||||
|
for_each_irq_desc(irq, desc) {
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
if (!(desc->status & IRQ_SUSPENDED))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&desc->lock, flags);
|
||||||
|
__enable_irq(desc, irq, true);
|
||||||
|
spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(resume_device_irqs);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* check_wakeup_irqs - check if any wake-up interrupts are pending
|
||||||
|
*/
|
||||||
|
int check_wakeup_irqs(void)
|
||||||
|
{
|
||||||
|
struct irq_desc *desc;
|
||||||
|
int irq;
|
||||||
|
|
||||||
|
for_each_irq_desc(irq, desc)
|
||||||
|
if ((desc->status & IRQ_WAKEUP) && (desc->status & IRQ_PENDING))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -1450,11 +1450,7 @@ int kernel_kexec(void)
|
||||||
error = device_suspend(PMSG_FREEZE);
|
error = device_suspend(PMSG_FREEZE);
|
||||||
if (error)
|
if (error)
|
||||||
goto Resume_console;
|
goto Resume_console;
|
||||||
error = disable_nonboot_cpus();
|
|
||||||
if (error)
|
|
||||||
goto Resume_devices;
|
|
||||||
device_pm_lock();
|
device_pm_lock();
|
||||||
local_irq_disable();
|
|
||||||
/* At this point, device_suspend() has been called,
|
/* At this point, device_suspend() has been called,
|
||||||
* but *not* device_power_down(). We *must*
|
* but *not* device_power_down(). We *must*
|
||||||
* device_power_down() now. Otherwise, drivers for
|
* device_power_down() now. Otherwise, drivers for
|
||||||
|
@ -1464,12 +1460,15 @@ int kernel_kexec(void)
|
||||||
*/
|
*/
|
||||||
error = device_power_down(PMSG_FREEZE);
|
error = device_power_down(PMSG_FREEZE);
|
||||||
if (error)
|
if (error)
|
||||||
goto Enable_irqs;
|
goto Resume_devices;
|
||||||
|
error = disable_nonboot_cpus();
|
||||||
|
if (error)
|
||||||
|
goto Enable_cpus;
|
||||||
|
local_irq_disable();
|
||||||
/* Suspend system devices */
|
/* Suspend system devices */
|
||||||
error = sysdev_suspend(PMSG_FREEZE);
|
error = sysdev_suspend(PMSG_FREEZE);
|
||||||
if (error)
|
if (error)
|
||||||
goto Power_up_devices;
|
goto Enable_irqs;
|
||||||
} else
|
} else
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
|
@ -1483,13 +1482,13 @@ int kernel_kexec(void)
|
||||||
#ifdef CONFIG_KEXEC_JUMP
|
#ifdef CONFIG_KEXEC_JUMP
|
||||||
if (kexec_image->preserve_context) {
|
if (kexec_image->preserve_context) {
|
||||||
sysdev_resume();
|
sysdev_resume();
|
||||||
Power_up_devices:
|
|
||||||
device_power_up(PMSG_RESTORE);
|
|
||||||
Enable_irqs:
|
Enable_irqs:
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
device_pm_unlock();
|
Enable_cpus:
|
||||||
enable_nonboot_cpus();
|
enable_nonboot_cpus();
|
||||||
|
device_power_up(PMSG_RESTORE);
|
||||||
Resume_devices:
|
Resume_devices:
|
||||||
|
device_pm_unlock();
|
||||||
device_resume(PMSG_RESTORE);
|
device_resume(PMSG_RESTORE);
|
||||||
Resume_console:
|
Resume_console:
|
||||||
resume_console();
|
resume_console();
|
||||||
|
|
|
@ -214,7 +214,7 @@ static int create_image(int platform_mode)
|
||||||
return error;
|
return error;
|
||||||
|
|
||||||
device_pm_lock();
|
device_pm_lock();
|
||||||
local_irq_disable();
|
|
||||||
/* At this point, device_suspend() has been called, but *not*
|
/* At this point, device_suspend() has been called, but *not*
|
||||||
* device_power_down(). We *must* call device_power_down() now.
|
* device_power_down(). We *must* call device_power_down() now.
|
||||||
* Otherwise, drivers for some devices (e.g. interrupt controllers)
|
* Otherwise, drivers for some devices (e.g. interrupt controllers)
|
||||||
|
@ -225,13 +225,25 @@ static int create_image(int platform_mode)
|
||||||
if (error) {
|
if (error) {
|
||||||
printk(KERN_ERR "PM: Some devices failed to power down, "
|
printk(KERN_ERR "PM: Some devices failed to power down, "
|
||||||
"aborting hibernation\n");
|
"aborting hibernation\n");
|
||||||
goto Enable_irqs;
|
goto Unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
error = platform_pre_snapshot(platform_mode);
|
||||||
|
if (error || hibernation_test(TEST_PLATFORM))
|
||||||
|
goto Platform_finish;
|
||||||
|
|
||||||
|
error = disable_nonboot_cpus();
|
||||||
|
if (error || hibernation_test(TEST_CPUS)
|
||||||
|
|| hibernation_testmode(HIBERNATION_TEST))
|
||||||
|
goto Enable_cpus;
|
||||||
|
|
||||||
|
local_irq_disable();
|
||||||
|
|
||||||
sysdev_suspend(PMSG_FREEZE);
|
sysdev_suspend(PMSG_FREEZE);
|
||||||
if (error) {
|
if (error) {
|
||||||
printk(KERN_ERR "PM: Some devices failed to power down, "
|
printk(KERN_ERR "PM: Some devices failed to power down, "
|
||||||
"aborting hibernation\n");
|
"aborting hibernation\n");
|
||||||
goto Power_up_devices;
|
goto Enable_irqs;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (hibernation_test(TEST_CORE))
|
if (hibernation_test(TEST_CORE))
|
||||||
|
@ -247,17 +259,28 @@ static int create_image(int platform_mode)
|
||||||
restore_processor_state();
|
restore_processor_state();
|
||||||
if (!in_suspend)
|
if (!in_suspend)
|
||||||
platform_leave(platform_mode);
|
platform_leave(platform_mode);
|
||||||
|
|
||||||
Power_up:
|
Power_up:
|
||||||
sysdev_resume();
|
sysdev_resume();
|
||||||
/* NOTE: device_power_up() is just a resume() for devices
|
/* NOTE: device_power_up() is just a resume() for devices
|
||||||
* that suspended with irqs off ... no overall powerup.
|
* that suspended with irqs off ... no overall powerup.
|
||||||
*/
|
*/
|
||||||
Power_up_devices:
|
|
||||||
device_power_up(in_suspend ?
|
|
||||||
(error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
|
|
||||||
Enable_irqs:
|
Enable_irqs:
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
|
||||||
|
Enable_cpus:
|
||||||
|
enable_nonboot_cpus();
|
||||||
|
|
||||||
|
Platform_finish:
|
||||||
|
platform_finish(platform_mode);
|
||||||
|
|
||||||
|
device_power_up(in_suspend ?
|
||||||
|
(error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
|
||||||
|
|
||||||
|
Unlock:
|
||||||
device_pm_unlock();
|
device_pm_unlock();
|
||||||
|
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -291,25 +314,9 @@ int hibernation_snapshot(int platform_mode)
|
||||||
if (hibernation_test(TEST_DEVICES))
|
if (hibernation_test(TEST_DEVICES))
|
||||||
goto Recover_platform;
|
goto Recover_platform;
|
||||||
|
|
||||||
error = platform_pre_snapshot(platform_mode);
|
error = create_image(platform_mode);
|
||||||
if (error || hibernation_test(TEST_PLATFORM))
|
/* Control returns here after successful restore */
|
||||||
goto Finish;
|
|
||||||
|
|
||||||
error = disable_nonboot_cpus();
|
|
||||||
if (!error) {
|
|
||||||
if (hibernation_test(TEST_CPUS))
|
|
||||||
goto Enable_cpus;
|
|
||||||
|
|
||||||
if (hibernation_testmode(HIBERNATION_TEST))
|
|
||||||
goto Enable_cpus;
|
|
||||||
|
|
||||||
error = create_image(platform_mode);
|
|
||||||
/* Control returns here after successful restore */
|
|
||||||
}
|
|
||||||
Enable_cpus:
|
|
||||||
enable_nonboot_cpus();
|
|
||||||
Finish:
|
|
||||||
platform_finish(platform_mode);
|
|
||||||
Resume_devices:
|
Resume_devices:
|
||||||
device_resume(in_suspend ?
|
device_resume(in_suspend ?
|
||||||
(error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
|
(error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
|
||||||
|
@ -331,19 +338,33 @@ int hibernation_snapshot(int platform_mode)
|
||||||
* kernel.
|
* kernel.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int resume_target_kernel(void)
|
static int resume_target_kernel(bool platform_mode)
|
||||||
{
|
{
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
device_pm_lock();
|
device_pm_lock();
|
||||||
local_irq_disable();
|
|
||||||
error = device_power_down(PMSG_QUIESCE);
|
error = device_power_down(PMSG_QUIESCE);
|
||||||
if (error) {
|
if (error) {
|
||||||
printk(KERN_ERR "PM: Some devices failed to power down, "
|
printk(KERN_ERR "PM: Some devices failed to power down, "
|
||||||
"aborting resume\n");
|
"aborting resume\n");
|
||||||
goto Enable_irqs;
|
goto Unlock;
|
||||||
}
|
}
|
||||||
sysdev_suspend(PMSG_QUIESCE);
|
|
||||||
|
error = platform_pre_restore(platform_mode);
|
||||||
|
if (error)
|
||||||
|
goto Cleanup;
|
||||||
|
|
||||||
|
error = disable_nonboot_cpus();
|
||||||
|
if (error)
|
||||||
|
goto Enable_cpus;
|
||||||
|
|
||||||
|
local_irq_disable();
|
||||||
|
|
||||||
|
error = sysdev_suspend(PMSG_QUIESCE);
|
||||||
|
if (error)
|
||||||
|
goto Enable_irqs;
|
||||||
|
|
||||||
/* We'll ignore saved state, but this gets preempt count (etc) right */
|
/* We'll ignore saved state, but this gets preempt count (etc) right */
|
||||||
save_processor_state();
|
save_processor_state();
|
||||||
error = restore_highmem();
|
error = restore_highmem();
|
||||||
|
@ -366,11 +387,23 @@ static int resume_target_kernel(void)
|
||||||
swsusp_free();
|
swsusp_free();
|
||||||
restore_processor_state();
|
restore_processor_state();
|
||||||
touch_softlockup_watchdog();
|
touch_softlockup_watchdog();
|
||||||
|
|
||||||
sysdev_resume();
|
sysdev_resume();
|
||||||
device_power_up(PMSG_RECOVER);
|
|
||||||
Enable_irqs:
|
Enable_irqs:
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
|
||||||
|
Enable_cpus:
|
||||||
|
enable_nonboot_cpus();
|
||||||
|
|
||||||
|
Cleanup:
|
||||||
|
platform_restore_cleanup(platform_mode);
|
||||||
|
|
||||||
|
device_power_up(PMSG_RECOVER);
|
||||||
|
|
||||||
|
Unlock:
|
||||||
device_pm_unlock();
|
device_pm_unlock();
|
||||||
|
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -390,19 +423,10 @@ int hibernation_restore(int platform_mode)
|
||||||
pm_prepare_console();
|
pm_prepare_console();
|
||||||
suspend_console();
|
suspend_console();
|
||||||
error = device_suspend(PMSG_QUIESCE);
|
error = device_suspend(PMSG_QUIESCE);
|
||||||
if (error)
|
|
||||||
goto Finish;
|
|
||||||
|
|
||||||
error = platform_pre_restore(platform_mode);
|
|
||||||
if (!error) {
|
if (!error) {
|
||||||
error = disable_nonboot_cpus();
|
error = resume_target_kernel(platform_mode);
|
||||||
if (!error)
|
device_resume(PMSG_RECOVER);
|
||||||
error = resume_target_kernel();
|
|
||||||
enable_nonboot_cpus();
|
|
||||||
}
|
}
|
||||||
platform_restore_cleanup(platform_mode);
|
|
||||||
device_resume(PMSG_RECOVER);
|
|
||||||
Finish:
|
|
||||||
resume_console();
|
resume_console();
|
||||||
pm_restore_console();
|
pm_restore_console();
|
||||||
return error;
|
return error;
|
||||||
|
@ -438,38 +462,46 @@ int hibernation_platform_enter(void)
|
||||||
goto Resume_devices;
|
goto Resume_devices;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
device_pm_lock();
|
||||||
|
|
||||||
|
error = device_power_down(PMSG_HIBERNATE);
|
||||||
|
if (error)
|
||||||
|
goto Unlock;
|
||||||
|
|
||||||
error = hibernation_ops->prepare();
|
error = hibernation_ops->prepare();
|
||||||
if (error)
|
if (error)
|
||||||
goto Resume_devices;
|
goto Platofrm_finish;
|
||||||
|
|
||||||
error = disable_nonboot_cpus();
|
error = disable_nonboot_cpus();
|
||||||
if (error)
|
if (error)
|
||||||
goto Finish;
|
goto Platofrm_finish;
|
||||||
|
|
||||||
device_pm_lock();
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
error = device_power_down(PMSG_HIBERNATE);
|
sysdev_suspend(PMSG_HIBERNATE);
|
||||||
if (!error) {
|
hibernation_ops->enter();
|
||||||
sysdev_suspend(PMSG_HIBERNATE);
|
/* We should never get here */
|
||||||
hibernation_ops->enter();
|
while (1);
|
||||||
/* We should never get here */
|
|
||||||
while (1);
|
|
||||||
}
|
|
||||||
local_irq_enable();
|
|
||||||
device_pm_unlock();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We don't need to reenable the nonboot CPUs or resume consoles, since
|
* We don't need to reenable the nonboot CPUs or resume consoles, since
|
||||||
* the system is going to be halted anyway.
|
* the system is going to be halted anyway.
|
||||||
*/
|
*/
|
||||||
Finish:
|
Platofrm_finish:
|
||||||
hibernation_ops->finish();
|
hibernation_ops->finish();
|
||||||
|
|
||||||
|
device_power_up(PMSG_RESTORE);
|
||||||
|
|
||||||
|
Unlock:
|
||||||
|
device_pm_unlock();
|
||||||
|
|
||||||
Resume_devices:
|
Resume_devices:
|
||||||
entering_platform_hibernation = false;
|
entering_platform_hibernation = false;
|
||||||
device_resume(PMSG_RESTORE);
|
device_resume(PMSG_RESTORE);
|
||||||
resume_console();
|
resume_console();
|
||||||
|
|
||||||
Close:
|
Close:
|
||||||
hibernation_ops->end();
|
hibernation_ops->end();
|
||||||
|
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -287,17 +287,32 @@ void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
|
||||||
*/
|
*/
|
||||||
static int suspend_enter(suspend_state_t state)
|
static int suspend_enter(suspend_state_t state)
|
||||||
{
|
{
|
||||||
int error = 0;
|
int error;
|
||||||
|
|
||||||
device_pm_lock();
|
device_pm_lock();
|
||||||
arch_suspend_disable_irqs();
|
|
||||||
BUG_ON(!irqs_disabled());
|
|
||||||
|
|
||||||
if ((error = device_power_down(PMSG_SUSPEND))) {
|
error = device_power_down(PMSG_SUSPEND);
|
||||||
|
if (error) {
|
||||||
printk(KERN_ERR "PM: Some devices failed to power down\n");
|
printk(KERN_ERR "PM: Some devices failed to power down\n");
|
||||||
goto Done;
|
goto Done;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (suspend_ops->prepare) {
|
||||||
|
error = suspend_ops->prepare();
|
||||||
|
if (error)
|
||||||
|
goto Power_up_devices;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (suspend_test(TEST_PLATFORM))
|
||||||
|
goto Platfrom_finish;
|
||||||
|
|
||||||
|
error = disable_nonboot_cpus();
|
||||||
|
if (error || suspend_test(TEST_CPUS))
|
||||||
|
goto Enable_cpus;
|
||||||
|
|
||||||
|
arch_suspend_disable_irqs();
|
||||||
|
BUG_ON(!irqs_disabled());
|
||||||
|
|
||||||
error = sysdev_suspend(PMSG_SUSPEND);
|
error = sysdev_suspend(PMSG_SUSPEND);
|
||||||
if (!error) {
|
if (!error) {
|
||||||
if (!suspend_test(TEST_CORE))
|
if (!suspend_test(TEST_CORE))
|
||||||
|
@ -305,11 +320,22 @@ static int suspend_enter(suspend_state_t state)
|
||||||
sysdev_resume();
|
sysdev_resume();
|
||||||
}
|
}
|
||||||
|
|
||||||
device_power_up(PMSG_RESUME);
|
|
||||||
Done:
|
|
||||||
arch_suspend_enable_irqs();
|
arch_suspend_enable_irqs();
|
||||||
BUG_ON(irqs_disabled());
|
BUG_ON(irqs_disabled());
|
||||||
|
|
||||||
|
Enable_cpus:
|
||||||
|
enable_nonboot_cpus();
|
||||||
|
|
||||||
|
Platfrom_finish:
|
||||||
|
if (suspend_ops->finish)
|
||||||
|
suspend_ops->finish();
|
||||||
|
|
||||||
|
Power_up_devices:
|
||||||
|
device_power_up(PMSG_RESUME);
|
||||||
|
|
||||||
|
Done:
|
||||||
device_pm_unlock();
|
device_pm_unlock();
|
||||||
|
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -341,23 +367,8 @@ int suspend_devices_and_enter(suspend_state_t state)
|
||||||
if (suspend_test(TEST_DEVICES))
|
if (suspend_test(TEST_DEVICES))
|
||||||
goto Recover_platform;
|
goto Recover_platform;
|
||||||
|
|
||||||
if (suspend_ops->prepare) {
|
suspend_enter(state);
|
||||||
error = suspend_ops->prepare();
|
|
||||||
if (error)
|
|
||||||
goto Resume_devices;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (suspend_test(TEST_PLATFORM))
|
|
||||||
goto Finish;
|
|
||||||
|
|
||||||
error = disable_nonboot_cpus();
|
|
||||||
if (!error && !suspend_test(TEST_CPUS))
|
|
||||||
suspend_enter(state);
|
|
||||||
|
|
||||||
enable_nonboot_cpus();
|
|
||||||
Finish:
|
|
||||||
if (suspend_ops->finish)
|
|
||||||
suspend_ops->finish();
|
|
||||||
Resume_devices:
|
Resume_devices:
|
||||||
suspend_test_start();
|
suspend_test_start();
|
||||||
device_resume(PMSG_RESUME);
|
device_resume(PMSG_RESUME);
|
||||||
|
|
Loading…
Add table
Reference in a new issue