Merge "mhi: core: add support for MHI to operate on slave mode"
This commit is contained in:
commit
5bd64ca62c
6 changed files with 375 additions and 53 deletions
|
@ -580,11 +580,15 @@ struct mhi_device_ctxt {
|
|||
void *mhi_ipc_log;
|
||||
|
||||
/* Shadow functions since not all device supports runtime pm */
|
||||
int (*bus_master_rt_get)(struct pci_dev *pci_dev);
|
||||
void (*bus_master_rt_put)(struct pci_dev *pci_dev);
|
||||
void (*runtime_get)(struct mhi_device_ctxt *mhi_dev_ctxt);
|
||||
void (*runtime_put)(struct mhi_device_ctxt *mhi_dev_ctxt);
|
||||
void (*assert_wake)(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||
bool force_set);
|
||||
void (*deassert_wake)(struct mhi_device_ctxt *mhi_dev_ctxt);
|
||||
|
||||
struct completion cmd_complete;
|
||||
};
|
||||
|
||||
struct mhi_device_driver {
|
||||
|
@ -687,8 +691,10 @@ void mhi_notify_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
|
|||
enum MHI_CB_REASON reason);
|
||||
void mhi_notify_client(struct mhi_client_handle *client_handle,
|
||||
enum MHI_CB_REASON reason);
|
||||
void mhi_runtime_get(struct mhi_device_ctxt *mhi_dev_ctxt);
|
||||
void mhi_runtime_put(struct mhi_device_ctxt *mhi_dev_ctxt);
|
||||
void mhi_master_mode_runtime_get(struct mhi_device_ctxt *mhi_dev_ctxt);
|
||||
void mhi_master_mode_runtime_put(struct mhi_device_ctxt *mhi_dev_ctxt);
|
||||
void mhi_slave_mode_runtime_get(struct mhi_device_ctxt *mhi_dev_ctxt);
|
||||
void mhi_slave_mode_runtime_put(struct mhi_device_ctxt *mhi_dev_ctxt);
|
||||
void mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt);
|
||||
void mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||
bool force_set);
|
||||
|
|
|
@ -196,7 +196,7 @@ static int mhi_pci_probe(struct pci_dev *pcie_device,
|
|||
tasklet_init(&mhi_dev_ctxt->ev_task,
|
||||
mhi_ctrl_ev_task,
|
||||
(unsigned long)mhi_dev_ctxt);
|
||||
|
||||
init_completion(&mhi_dev_ctxt->cmd_complete);
|
||||
mhi_dev_ctxt->flags.link_up = 1;
|
||||
|
||||
/* Setup bus scale */
|
||||
|
@ -277,8 +277,8 @@ static int mhi_pci_probe(struct pci_dev *pcie_device,
|
|||
/* setup shadow pm functions */
|
||||
mhi_dev_ctxt->assert_wake = mhi_assert_device_wake;
|
||||
mhi_dev_ctxt->deassert_wake = mhi_deassert_device_wake;
|
||||
mhi_dev_ctxt->runtime_get = mhi_runtime_get;
|
||||
mhi_dev_ctxt->runtime_put = mhi_runtime_put;
|
||||
mhi_dev_ctxt->runtime_get = mhi_master_mode_runtime_get;
|
||||
mhi_dev_ctxt->runtime_put = mhi_master_mode_runtime_put;
|
||||
|
||||
mutex_lock(&mhi_dev_ctxt->pm_lock);
|
||||
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include "mhi.h"
|
||||
#include "mhi_hwio.h"
|
||||
#include "mhi_macros.h"
|
||||
#include "mhi_bhi.h"
|
||||
#include "mhi_trace.h"
|
||||
|
||||
static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||
|
@ -1560,17 +1561,27 @@ int mhi_get_epid(struct mhi_client_handle *client_handle)
|
|||
return MHI_EPID;
|
||||
}
|
||||
|
||||
void mhi_runtime_get(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||
void mhi_master_mode_runtime_get(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||
{
|
||||
pm_runtime_get(&mhi_dev_ctxt->pcie_device->dev);
|
||||
}
|
||||
|
||||
void mhi_runtime_put(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||
void mhi_master_mode_runtime_put(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||
{
|
||||
pm_runtime_mark_last_busy(&mhi_dev_ctxt->pcie_device->dev);
|
||||
pm_runtime_put_noidle(&mhi_dev_ctxt->pcie_device->dev);
|
||||
}
|
||||
|
||||
void mhi_slave_mode_runtime_get(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||
{
|
||||
mhi_dev_ctxt->bus_master_rt_get(mhi_dev_ctxt->pcie_device);
|
||||
}
|
||||
|
||||
void mhi_slave_mode_runtime_put(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||
{
|
||||
mhi_dev_ctxt->bus_master_rt_put(mhi_dev_ctxt->pcie_device);
|
||||
}
|
||||
|
||||
/*
|
||||
* mhi_assert_device_wake - Set WAKE_DB register
|
||||
* force_set - if true, will set bit regardless of counts
|
||||
|
@ -1675,6 +1686,120 @@ int mhi_deregister_channel(struct mhi_client_handle *client_handle)
|
|||
}
|
||||
EXPORT_SYMBOL(mhi_deregister_channel);
|
||||
|
||||
int mhi_register_device(struct mhi_device *mhi_device,
|
||||
const char *node_name,
|
||||
unsigned long user_data)
|
||||
{
|
||||
const struct device_node *of_node;
|
||||
struct mhi_device_ctxt *mhi_dev_ctxt = NULL, *itr;
|
||||
struct pcie_core_info *core_info;
|
||||
struct pci_dev *pci_dev = mhi_device->pci_dev;
|
||||
u32 domain = pci_domain_nr(pci_dev->bus);
|
||||
u32 bus = pci_dev->bus->number;
|
||||
u32 dev_id = pci_dev->device;
|
||||
u32 slot = PCI_SLOT(pci_dev->devfn);
|
||||
int ret, i;
|
||||
|
||||
of_node = of_parse_phandle(mhi_device->dev->of_node, node_name, 0);
|
||||
if (!of_node)
|
||||
return -EINVAL;
|
||||
|
||||
if (!mhi_device_drv)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
/* Traverse thru the list */
|
||||
mutex_lock(&mhi_device_drv->lock);
|
||||
list_for_each_entry(itr, &mhi_device_drv->head, node) {
|
||||
struct platform_device *pdev = itr->plat_dev;
|
||||
struct pcie_core_info *core = &itr->core;
|
||||
|
||||
if (pdev->dev.of_node == of_node &&
|
||||
core->domain == domain &&
|
||||
core->bus == bus &&
|
||||
core->dev_id == dev_id &&
|
||||
core->slot == slot) {
|
||||
mhi_dev_ctxt = itr;
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&mhi_device_drv->lock);
|
||||
|
||||
/* perhaps we've not probed yet */
|
||||
if (!mhi_dev_ctxt)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
|
||||
"Registering Domain:%02u Bus:%04u dev:0x%04x slot:%04u\n",
|
||||
domain, bus, dev_id, slot);
|
||||
|
||||
/* Set up pcie dev info */
|
||||
mhi_dev_ctxt->pcie_device = pci_dev;
|
||||
mhi_dev_ctxt->mhi_pm_state = MHI_PM_DISABLE;
|
||||
INIT_WORK(&mhi_dev_ctxt->process_m1_worker, process_m1_transition);
|
||||
mutex_init(&mhi_dev_ctxt->pm_lock);
|
||||
rwlock_init(&mhi_dev_ctxt->pm_xfer_lock);
|
||||
spin_lock_init(&mhi_dev_ctxt->dev_wake_lock);
|
||||
tasklet_init(&mhi_dev_ctxt->ev_task, mhi_ctrl_ev_task,
|
||||
(unsigned long)mhi_dev_ctxt);
|
||||
init_completion(&mhi_dev_ctxt->cmd_complete);
|
||||
mhi_dev_ctxt->flags.link_up = 1;
|
||||
core_info = &mhi_dev_ctxt->core;
|
||||
core_info->manufact_id = pci_dev->vendor;
|
||||
core_info->pci_master = false;
|
||||
|
||||
/* Go thru resources and set up */
|
||||
for (i = 0; i < ARRAY_SIZE(mhi_device->resources); i++) {
|
||||
const struct resource *res = &mhi_device->resources[i];
|
||||
|
||||
switch (resource_type(res)) {
|
||||
case IORESOURCE_MEM:
|
||||
/* bus master already mapped it */
|
||||
core_info->bar0_base = (void __iomem *)res->start;
|
||||
core_info->bar0_end = (void __iomem *)res->end;
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
|
||||
"bar mapped to:0x%llx - 0x%llx (virtual)\n",
|
||||
res->start, res->end);
|
||||
break;
|
||||
case IORESOURCE_IRQ:
|
||||
core_info->irq_base = (u32)res->start;
|
||||
core_info->max_nr_msis = (u32)resource_size(res);
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
|
||||
"irq mapped to: %u size:%u\n",
|
||||
core_info->irq_base,
|
||||
core_info->max_nr_msis);
|
||||
break;
|
||||
};
|
||||
}
|
||||
|
||||
if (!core_info->bar0_base || !core_info->irq_base)
|
||||
return -EINVAL;
|
||||
|
||||
mhi_dev_ctxt->bus_master_rt_get = mhi_device->pm_runtime_get;
|
||||
mhi_dev_ctxt->bus_master_rt_put = mhi_device->pm_runtime_noidle;
|
||||
if (!mhi_dev_ctxt->bus_master_rt_get ||
|
||||
!mhi_dev_ctxt->bus_master_rt_put)
|
||||
return -EINVAL;
|
||||
|
||||
ret = mhi_ctxt_init(mhi_dev_ctxt);
|
||||
if (ret) {
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
|
||||
"MHI Initialization failed, ret %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
mhi_init_debugfs(mhi_dev_ctxt);
|
||||
|
||||
/* setup shadow pm functions */
|
||||
mhi_dev_ctxt->assert_wake = mhi_assert_device_wake;
|
||||
mhi_dev_ctxt->deassert_wake = mhi_deassert_device_wake;
|
||||
mhi_dev_ctxt->runtime_get = mhi_slave_mode_runtime_get;
|
||||
mhi_dev_ctxt->runtime_put = mhi_slave_mode_runtime_put;
|
||||
mhi_device->mhi_dev_ctxt = mhi_dev_ctxt;
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exit success\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(mhi_register_device);
|
||||
|
||||
void mhi_process_db_brstmode(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||
void __iomem *io_addr,
|
||||
uintptr_t chan,
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include "mhi_sys.h"
|
||||
#include "mhi.h"
|
||||
#include "mhi_hwio.h"
|
||||
#include "mhi_bhi.h"
|
||||
|
||||
/* Write only sysfs attributes */
|
||||
static DEVICE_ATTR(MHI_M0, S_IWUSR, NULL, sysfs_init_m0);
|
||||
|
@ -57,14 +58,12 @@ int mhi_pci_suspend(struct device *dev)
|
|||
return r;
|
||||
}
|
||||
|
||||
int mhi_runtime_suspend(struct device *dev)
|
||||
static int mhi_pm_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||
bool force_m3)
|
||||
{
|
||||
int r = 0;
|
||||
struct mhi_device_ctxt *mhi_dev_ctxt = dev_get_drvdata(dev);
|
||||
|
||||
mutex_lock(&mhi_dev_ctxt->pm_lock);
|
||||
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
|
||||
"Entered with State:0x%x %s\n",
|
||||
mhi_dev_ctxt->mhi_pm_state,
|
||||
|
@ -74,17 +73,16 @@ int mhi_runtime_suspend(struct device *dev)
|
|||
if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE ||
|
||||
mhi_dev_ctxt->mhi_pm_state == MHI_PM_M3) {
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
|
||||
"Already in active state, exiting\n");
|
||||
"Already in M3 State\n");
|
||||
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||
mutex_unlock(&mhi_dev_ctxt->pm_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (unlikely(atomic_read(&mhi_dev_ctxt->counters.device_wake))) {
|
||||
if (unlikely(atomic_read(&mhi_dev_ctxt->counters.device_wake) &&
|
||||
force_m3 == false)){
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
|
||||
"Busy, Aborting Runtime Suspend\n");
|
||||
"Busy, Aborting M3\n");
|
||||
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||
mutex_unlock(&mhi_dev_ctxt->pm_lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
@ -98,8 +96,7 @@ int mhi_runtime_suspend(struct device *dev)
|
|||
mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
|
||||
"Failed to get M0||M1 event, timeout, current state:%s\n",
|
||||
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
|
||||
r = -EIO;
|
||||
goto rpm_suspend_exit;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Allowing M3 State\n");
|
||||
|
@ -116,20 +113,66 @@ int mhi_runtime_suspend(struct device *dev)
|
|||
mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
|
||||
"Failed to get M3 event, timeout, current state:%s\n",
|
||||
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
|
||||
r = -EIO;
|
||||
goto rpm_suspend_exit;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mhi_pm_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||
{
|
||||
int r;
|
||||
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
|
||||
"Entered with State:0x%x %s\n",
|
||||
mhi_dev_ctxt->mhi_pm_state,
|
||||
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
|
||||
|
||||
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||
mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3_EXIT;
|
||||
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||
|
||||
/* Set and wait for M0 Event */
|
||||
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||
mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M0);
|
||||
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||
r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
|
||||
mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
|
||||
mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
|
||||
msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
|
||||
if (!r) {
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
|
||||
"Failed to get M0 event, timeout\n");
|
||||
r = -EIO;
|
||||
} else
|
||||
r = 0;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
int mhi_runtime_suspend(struct device *dev)
|
||||
{
|
||||
int r = 0;
|
||||
struct mhi_device_ctxt *mhi_dev_ctxt = dev_get_drvdata(dev);
|
||||
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Enter\n");
|
||||
|
||||
mutex_lock(&mhi_dev_ctxt->pm_lock);
|
||||
r = mhi_pm_initiate_m3(mhi_dev_ctxt, false);
|
||||
if (r) {
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "abort due to ret:%d\n", r);
|
||||
mutex_unlock(&mhi_dev_ctxt->pm_lock);
|
||||
return r;
|
||||
}
|
||||
r = mhi_turn_off_pcie_link(mhi_dev_ctxt);
|
||||
if (r) {
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
|
||||
"Failed to Turn off link ret:%d\n",
|
||||
r);
|
||||
"Failed to Turn off link ret:%d\n", r);
|
||||
}
|
||||
|
||||
rpm_suspend_exit:
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited\n");
|
||||
mutex_unlock(&mhi_dev_ctxt->pm_lock);
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited with ret:%d\n", r);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -167,32 +210,10 @@ int mhi_runtime_resume(struct device *dev)
|
|||
|
||||
/* turn on link */
|
||||
r = mhi_turn_on_pcie_link(mhi_dev_ctxt);
|
||||
if (r) {
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
|
||||
"Failed to resume link\n");
|
||||
if (r)
|
||||
goto rpm_resume_exit;
|
||||
}
|
||||
|
||||
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||
mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3_EXIT;
|
||||
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||
|
||||
/* Set and wait for M0 Event */
|
||||
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||
mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M0);
|
||||
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||
r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
|
||||
mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
|
||||
mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
|
||||
msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
|
||||
if (!r) {
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
|
||||
"Failed to get M0 event, timeout\n");
|
||||
r = -EIO;
|
||||
goto rpm_resume_exit;
|
||||
}
|
||||
r = 0; /* no errors */
|
||||
|
||||
r = mhi_pm_initiate_m0(mhi_dev_ctxt);
|
||||
rpm_resume_exit:
|
||||
mutex_unlock(&mhi_dev_ctxt->pm_lock);
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited with :%d\n", r);
|
||||
|
@ -216,6 +237,97 @@ int mhi_pci_resume(struct device *dev)
|
|||
return r;
|
||||
}
|
||||
|
||||
static int mhi_pm_slave_mode_power_on(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||
{
|
||||
int ret_val;
|
||||
u32 timeout = mhi_dev_ctxt->poll_reset_timeout_ms;
|
||||
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered\n");
|
||||
mutex_lock(&mhi_dev_ctxt->pm_lock);
|
||||
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||
mhi_dev_ctxt->mhi_pm_state = MHI_PM_POR;
|
||||
ret_val = set_mhi_base_state(mhi_dev_ctxt);
|
||||
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||
|
||||
if (ret_val) {
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
|
||||
"Error Setting MHI Base State %d\n", ret_val);
|
||||
goto unlock_pm_lock;
|
||||
}
|
||||
|
||||
if (mhi_dev_ctxt->base_state != STATE_TRANSITION_BHI) {
|
||||
ret_val = -EIO;
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
|
||||
"Invalid Base State, cur_state:%s\n",
|
||||
state_transition_str(mhi_dev_ctxt->base_state));
|
||||
goto unlock_pm_lock;
|
||||
}
|
||||
|
||||
reinit_completion(&mhi_dev_ctxt->cmd_complete);
|
||||
init_mhi_base_state(mhi_dev_ctxt);
|
||||
|
||||
/*
|
||||
* Keep wake in Active until AMSS, @ AMSS we will
|
||||
* decrement counts
|
||||
*/
|
||||
read_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||
mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, false);
|
||||
read_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||
|
||||
ret_val = wait_for_completion_timeout(&mhi_dev_ctxt->cmd_complete,
|
||||
msecs_to_jiffies(timeout));
|
||||
if (!ret_val || mhi_dev_ctxt->dev_exec_env != MHI_EXEC_ENV_AMSS)
|
||||
ret_val = -EIO;
|
||||
else
|
||||
ret_val = 0;
|
||||
|
||||
if (ret_val) {
|
||||
read_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||
mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
|
||||
read_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||
}
|
||||
|
||||
unlock_pm_lock:
|
||||
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exit with ret:%d\n", ret_val);
|
||||
mutex_unlock(&mhi_dev_ctxt->pm_lock);
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
static int mhi_pm_slave_mode_suspend(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||
{
|
||||
int r;
|
||||
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered\n");
|
||||
mutex_lock(&mhi_dev_ctxt->pm_lock);
|
||||
|
||||
r = mhi_pm_initiate_m3(mhi_dev_ctxt, false);
|
||||
if (r)
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "abort due to ret:%d\n", r);
|
||||
mutex_unlock(&mhi_dev_ctxt->pm_lock);
|
||||
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exit with ret:%d\n", r);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int mhi_pm_slave_mode_resume(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||
{
|
||||
int r;
|
||||
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered\n");
|
||||
mutex_lock(&mhi_dev_ctxt->pm_lock);
|
||||
|
||||
r = mhi_pm_initiate_m0(mhi_dev_ctxt);
|
||||
if (r)
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
|
||||
"M3 exit failed ret:%d\n", r);
|
||||
mutex_unlock(&mhi_dev_ctxt->pm_lock);
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exit with ret:%d\n", r);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
int mhi_init_pm_sysfs(struct device *dev)
|
||||
{
|
||||
return sysfs_create_group(&dev->kobj, &mhi_attribute_group);
|
||||
|
@ -344,3 +456,30 @@ exit:
|
|||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited...\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
int mhi_pm_control_device(struct mhi_device *mhi_device,
|
||||
enum mhi_dev_ctrl ctrl)
|
||||
{
|
||||
struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->mhi_dev_ctxt;
|
||||
|
||||
if (!mhi_dev_ctxt)
|
||||
return -EINVAL;
|
||||
|
||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
|
||||
"Entered with cmd:%d\n", ctrl);
|
||||
|
||||
switch (ctrl) {
|
||||
case MHI_DEV_CTRL_INIT:
|
||||
return bhi_probe(mhi_dev_ctxt);
|
||||
case MHI_DEV_CTRL_POWER_ON:
|
||||
return mhi_pm_slave_mode_power_on(mhi_dev_ctxt);
|
||||
case MHI_DEV_CTRL_SUSPEND:
|
||||
return mhi_pm_slave_mode_suspend(mhi_dev_ctxt);
|
||||
case MHI_DEV_CTRL_RESUME:
|
||||
return mhi_pm_slave_mode_resume(mhi_dev_ctxt);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL(mhi_pm_control_device);
|
||||
|
|
|
@ -524,6 +524,7 @@ static int process_amss_transition(
|
|||
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||
ring_all_ev_dbs(mhi_dev_ctxt);
|
||||
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||
complete(&mhi_dev_ctxt->cmd_complete);
|
||||
|
||||
/*
|
||||
* runtime_allow will decrement usage_count, counts were
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#define MHI_MAX_MTU 0xFFFF
|
||||
|
||||
struct mhi_client_config;
|
||||
struct mhi_device_ctxt;
|
||||
|
||||
enum MHI_CLIENT_CHANNEL {
|
||||
MHI_CLIENT_LOOPBACK_OUT = 0,
|
||||
|
@ -71,11 +72,11 @@ enum MHI_CLIENT_CHANNEL {
|
|||
};
|
||||
|
||||
enum MHI_CB_REASON {
|
||||
MHI_CB_XFER = 0x0,
|
||||
MHI_CB_MHI_DISABLED = 0x4,
|
||||
MHI_CB_MHI_ENABLED = 0x8,
|
||||
MHI_CB_CHAN_RESET_COMPLETE = 0x10,
|
||||
MHI_CB_reserved = 0x80000000,
|
||||
MHI_CB_XFER,
|
||||
MHI_CB_MHI_DISABLED,
|
||||
MHI_CB_MHI_ENABLED,
|
||||
MHI_CB_MHI_SHUTDOWN,
|
||||
MHI_CB_SYS_ERROR,
|
||||
};
|
||||
|
||||
enum MHI_FLAGS {
|
||||
|
@ -122,6 +123,35 @@ struct __packed bhi_vec_entry {
|
|||
u64 size;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct mhi_device - IO resources for MHI
|
||||
* @dev: device node points to of_node
|
||||
* @pdev: pci device node
|
||||
* @resource: bar memory space and IRQ resources
|
||||
* @pm_runtime_get: fp for bus masters rpm pm_runtime_get
|
||||
* @pm_runtime_noidle: fp for bus masters rpm pm_runtime_noidle
|
||||
* @mhi_dev_ctxt: private data for host
|
||||
*/
|
||||
struct mhi_device {
|
||||
struct device *dev;
|
||||
struct pci_dev *pci_dev;
|
||||
struct resource resources[2];
|
||||
int (*pm_runtime_get)(struct pci_dev *pci_dev);
|
||||
void (*pm_runtime_noidle)(struct pci_dev *pci_dev);
|
||||
struct mhi_device_ctxt *mhi_dev_ctxt;
|
||||
};
|
||||
|
||||
enum mhi_dev_ctrl {
|
||||
MHI_DEV_CTRL_INIT,
|
||||
MHI_DEV_CTRL_DE_INIT,
|
||||
MHI_DEV_CTRL_SUSPEND,
|
||||
MHI_DEV_CTRL_RESUME,
|
||||
MHI_DEV_CTRL_POWER_OFF,
|
||||
MHI_DEV_CTRL_POWER_ON,
|
||||
MHI_DEV_CTRL_RAM_DUMP,
|
||||
MHI_DEV_CTRL_NOTIFY_LINK_ERROR,
|
||||
};
|
||||
|
||||
/**
|
||||
* mhi_is_device_ready - Check if MHI is ready to register clients
|
||||
*
|
||||
|
@ -133,6 +163,27 @@ struct __packed bhi_vec_entry {
|
|||
bool mhi_is_device_ready(const struct device * const dev,
|
||||
const char *node_name);
|
||||
|
||||
/**
|
||||
* mhi_resgister_device - register hardware resources with MHI
|
||||
*
|
||||
* @mhi_device: resources to be used
|
||||
* @node_name: DT node name
|
||||
* @userdata: cb data for client
|
||||
* @Return 0 on success
|
||||
*/
|
||||
int mhi_register_device(struct mhi_device *mhi_device,
|
||||
const char *node_name,
|
||||
unsigned long user_data);
|
||||
|
||||
/**
|
||||
* mhi_pm_control_device - power management control api
|
||||
* @mhi_device: registered device structure
|
||||
* @ctrl: specific command
|
||||
* @Return 0 on success
|
||||
*/
|
||||
int mhi_pm_control_device(struct mhi_device *mhi_device,
|
||||
enum mhi_dev_ctrl ctrl);
|
||||
|
||||
/**
|
||||
* mhi_deregister_channel - de-register callbacks from MHI
|
||||
*
|
||||
|
|
Loading…
Add table
Reference in a new issue