scsi: ufs: add support for hibern8 on idle

In order to save power we should put the UFS link into hibern8 as soon as
UFS link is idle and power measurement of active usecases (like audio/video
playback/recording) show that putting UFS link in hibern8 @ 10ms of idle
(if not earlier) would save significant power.

Our current available solution is to do hibern8 with clock gating @idle
timeout of 150ms. As clock gating has huge latencies (7ms each in enter and
exit), we cannot bring down the idle timeout to <=10ms without degrading
UFS throughput. Hence this change has added support to enter into hibern8
with another idle timer.

Change-Id: I5a31f18fc21015d4a68236da9fd94f3f016e1d44
Signed-off-by: Subhash Jadavani <subhashj@codeaurora.org>
[subhashj@codeaurora.org: resolved trivial merge conflicts]
Signed-off-by: Subhash Jadavani <subhashj@codeaurora.org>
This commit is contained in:
Subhash Jadavani 2014-10-08 19:08:10 -07:00 committed by David Keitel
parent cfd27bd4b7
commit ab18ee44ce
4 changed files with 376 additions and 31 deletions

View file

@ -1024,7 +1024,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
hba->caps |= UFSHCD_CAP_CLK_GATING | hba->caps |= UFSHCD_CAP_CLK_GATING |
UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; hba->caps |= UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE;
ufs_qcom_setup_clocks(hba, true); ufs_qcom_setup_clocks(hba, true);
goto out; goto out;

View file

@ -746,6 +746,18 @@ static const char *ufschd_clk_gating_state_to_string(
} }
} }
static const char *ufshcd_hibern8_on_idle_state_to_string(
enum ufshcd_hibern8_on_idle_state state)
{
switch (state) {
case HIBERN8_ENTERED: return "HIBERN8_ENTERED";
case HIBERN8_EXITED: return "HIBERN8_EXITED";
case REQ_HIBERN8_ENTER: return "REQ_HIBERN8_ENTER";
case REQ_HIBERN8_EXIT: return "REQ_HIBERN8_EXIT";
default: return "UNKNOWN_STATE";
}
}
static u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba) static u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
{ {
/* HCI version < 2.0 supports UniPro 1.41 */ /* HCI version < 2.0 supports UniPro 1.41 */
@ -900,6 +912,13 @@ static void ufshcd_gate_work(struct work_struct *work)
spin_unlock_irqrestore(hba->host->host_lock, flags); spin_unlock_irqrestore(hba->host->host_lock, flags);
if (ufshcd_is_hibern8_on_idle_allowed(hba))
/*
* Hibern8 enter work (on Idle) needs clocks to be ON hence
* make sure that it is flushed before turning off the clocks.
*/
flush_delayed_work(&hba->hibern8_on_idle.enter_work);
/* put the link into hibern8 mode before turning off clocks */ /* put the link into hibern8 mode before turning off clocks */
if (ufshcd_can_hibern8_during_gating(hba)) { if (ufshcd_can_hibern8_during_gating(hba)) {
if (ufshcd_uic_hibern8_enter(hba)) { if (ufshcd_uic_hibern8_enter(hba)) {
@ -1070,6 +1089,246 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
cancel_delayed_work_sync(&hba->clk_gating.gate_work); cancel_delayed_work_sync(&hba->clk_gating.gate_work);
} }
/**
* ufshcd_hibern8_hold - Make sure that link is not in hibern8.
*
* @hba: per adapter instance
* @async: This indicates whether caller wants to exit hibern8 asynchronously.
*
* Exit from hibern8 mode and set the link as active.
*
* Return 0 on success, non-zero on failure.
*/
int ufshcd_hibern8_hold(struct ufs_hba *hba, bool async)
{
int rc = 0;
unsigned long flags;
if (!ufshcd_is_hibern8_on_idle_allowed(hba))
goto out;
spin_lock_irqsave(hba->host->host_lock, flags);
hba->hibern8_on_idle.active_reqs++;
start:
switch (hba->hibern8_on_idle.state) {
case HIBERN8_EXITED:
break;
case REQ_HIBERN8_ENTER:
if (cancel_delayed_work(&hba->hibern8_on_idle.enter_work)) {
hba->hibern8_on_idle.state = HIBERN8_EXITED;
trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
ufshcd_hibern8_on_idle_state_to_string(
hba->hibern8_on_idle.state));
break;
}
/*
* If we here, it means Hibern8 enter work is either done or
* currently running. Hence, fall through to cancel hibern8
* work and exit hibern8.
*/
case HIBERN8_ENTERED:
scsi_block_requests(hba->host);
hba->hibern8_on_idle.state = REQ_HIBERN8_EXIT;
trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
ufshcd_hibern8_on_idle_state_to_string(
hba->hibern8_on_idle.state));
schedule_work(&hba->hibern8_on_idle.exit_work);
/*
* fall through to check if we should wait for this
* work to be done or not.
*/
case REQ_HIBERN8_EXIT:
if (async) {
rc = -EAGAIN;
hba->hibern8_on_idle.active_reqs--;
break;
} else {
spin_unlock_irqrestore(hba->host->host_lock, flags);
flush_work(&hba->hibern8_on_idle.exit_work);
/* Make sure state is HIBERN8_EXITED before returning */
spin_lock_irqsave(hba->host->host_lock, flags);
goto start;
}
default:
dev_err(hba->dev, "%s: H8 is in invalid state %d\n",
__func__, hba->hibern8_on_idle.state);
break;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
out:
return rc;
}
/* host lock must be held before calling this variant */
static void __ufshcd_hibern8_release(struct ufs_hba *hba)
{
unsigned long delay_in_jiffies;
if (!ufshcd_is_hibern8_on_idle_allowed(hba))
return;
hba->hibern8_on_idle.active_reqs--;
BUG_ON(hba->hibern8_on_idle.active_reqs < 0);
if (hba->hibern8_on_idle.active_reqs
|| hba->hibern8_on_idle.is_suspended
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
|| hba->lrb_in_use || hba->outstanding_tasks
|| hba->active_uic_cmd || hba->uic_async_done)
return;
hba->hibern8_on_idle.state = REQ_HIBERN8_ENTER;
trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
ufshcd_hibern8_on_idle_state_to_string(
hba->hibern8_on_idle.state));
/*
* Scheduling the delayed work after 1 jiffies will make the work to
* get schedule any time from 0ms to 1000/HZ ms which is not desirable
* for hibern8 enter work as it may impact the performance if it gets
* scheduled almost immediately. Hence make sure that hibern8 enter
* work gets scheduled atleast after 2 jiffies (any time between
* 1000/HZ ms to 2000/HZ ms).
*/
delay_in_jiffies = msecs_to_jiffies(hba->hibern8_on_idle.delay_ms);
if (delay_in_jiffies == 1)
delay_in_jiffies++;
schedule_delayed_work(&hba->hibern8_on_idle.enter_work,
delay_in_jiffies);
}
void ufshcd_hibern8_release(struct ufs_hba *hba)
{
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
__ufshcd_hibern8_release(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static void ufshcd_hibern8_enter_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
hibern8_on_idle.enter_work.work);
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->hibern8_on_idle.is_suspended) {
hba->hibern8_on_idle.state = HIBERN8_EXITED;
trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
ufshcd_hibern8_on_idle_state_to_string(
hba->hibern8_on_idle.state));
goto rel_lock;
}
if (hba->hibern8_on_idle.active_reqs
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
|| hba->lrb_in_use || hba->outstanding_tasks
|| hba->active_uic_cmd || hba->uic_async_done)
goto rel_lock;
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (ufshcd_is_link_active(hba) && ufshcd_uic_hibern8_enter(hba)) {
/* Enter failed */
hba->hibern8_on_idle.state = HIBERN8_EXITED;
trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
ufshcd_hibern8_on_idle_state_to_string(
hba->hibern8_on_idle.state));
goto out;
}
ufshcd_set_link_hibern8(hba);
/*
* In case you are here to cancel this work the hibern8_on_idle.state
* would be marked as REQ_HIBERN8_EXIT. In this case keep the state
* as REQ_HIBERN8_EXIT which would anyway imply that we are in hibern8
* and a request to exit from it is pending. By doing this way,
* we keep the state machine in tact and this would ultimately
* prevent from doing cancel work multiple times when there are
* new requests arriving before the current cancel work is done.
*/
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->hibern8_on_idle.state == REQ_HIBERN8_ENTER) {
hba->hibern8_on_idle.state = HIBERN8_ENTERED;
trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
ufshcd_hibern8_on_idle_state_to_string(
hba->hibern8_on_idle.state));
}
rel_lock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
out:
return;
}
static void ufshcd_hibern8_exit_work(struct work_struct *work)
{
int ret;
unsigned long flags;
struct ufs_hba *hba = container_of(work, struct ufs_hba,
hibern8_on_idle.exit_work);
cancel_delayed_work_sync(&hba->hibern8_on_idle.enter_work);
spin_lock_irqsave(hba->host->host_lock, flags);
if ((hba->hibern8_on_idle.state == HIBERN8_EXITED)
|| ufshcd_is_link_active(hba)) {
hba->hibern8_on_idle.state = HIBERN8_EXITED;
spin_unlock_irqrestore(hba->host->host_lock, flags);
goto unblock_reqs;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* Exit from hibern8 */
if (ufshcd_is_link_hibern8(hba)) {
ret = ufshcd_uic_hibern8_exit(hba);
if (!ret) {
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_set_link_active(hba);
hba->hibern8_on_idle.state = HIBERN8_EXITED;
trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
ufshcd_hibern8_on_idle_state_to_string(
hba->hibern8_on_idle.state));
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
}
unblock_reqs:
scsi_unblock_requests(hba->host);
}
static void ufshcd_init_hibern8_on_idle(struct ufs_hba *hba)
{
if (!ufshcd_is_hibern8_on_idle_allowed(hba))
return;
INIT_DELAYED_WORK(&hba->hibern8_on_idle.enter_work,
ufshcd_hibern8_enter_work);
INIT_WORK(&hba->hibern8_on_idle.exit_work, ufshcd_hibern8_exit_work);
hba->hibern8_on_idle.delay_ms = 10;
hba->hibern8_on_idle.state = HIBERN8_EXITED;
}
static void ufshcd_exit_hibern8_on_idle(struct ufs_hba *hba)
{
if (!ufshcd_is_hibern8_on_idle_allowed(hba))
return;
/* Don't have anything to do for now */
}
static void ufshcd_hold_all(struct ufs_hba *hba)
{
ufshcd_hold(hba, false);
ufshcd_hibern8_hold(hba, false);
}
static void ufshcd_release_all(struct ufs_hba *hba)
{
ufshcd_hibern8_release(hba);
ufshcd_release(hba);
}
/* Must be called with host lock acquired */ /* Must be called with host lock acquired */
static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
{ {
@ -1297,7 +1556,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
int ret; int ret;
unsigned long flags; unsigned long flags;
ufshcd_hold(hba, false); ufshcd_hold_all(hba);
mutex_lock(&hba->uic_cmd_mutex); mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba); ufshcd_add_delay_before_dme_cmd(hba);
@ -1308,7 +1567,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
mutex_unlock(&hba->uic_cmd_mutex); mutex_unlock(&hba->uic_cmd_mutex);
ufshcd_release(hba); ufshcd_release_all(hba);
return ret; return ret;
} }
@ -1652,6 +1911,14 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
} }
WARN_ON(hba->clk_gating.state != CLKS_ON); WARN_ON(hba->clk_gating.state != CLKS_ON);
err = ufshcd_hibern8_hold(hba, true);
if (err) {
clear_bit_unlock(tag, &hba->lrb_in_use);
err = SCSI_MLQUEUE_HOST_BUSY;
ufshcd_release(hba);
goto out;
}
WARN_ON(hba->hibern8_on_idle.state != HIBERN8_EXITED);
lrbp = &hba->lrb[tag]; lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd); WARN_ON(lrbp->cmd);
@ -1669,6 +1936,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (err) { if (err) {
lrbp->cmd = NULL; lrbp->cmd = NULL;
clear_bit_unlock(tag, &hba->lrb_in_use); clear_bit_unlock(tag, &hba->lrb_in_use);
ufshcd_release_all(hba);
goto out; goto out;
} }
@ -1927,7 +2195,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
BUG_ON(!hba); BUG_ON(!hba);
ufshcd_hold(hba, false); ufshcd_hold_all(hba);
mutex_lock(&hba->dev_cmd.lock); mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index, ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector); selector);
@ -1971,7 +2239,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
out_unlock: out_unlock:
mutex_unlock(&hba->dev_cmd.lock); mutex_unlock(&hba->dev_cmd.lock);
ufshcd_release(hba); ufshcd_release_all(hba);
return err; return err;
} }
@ -1995,7 +2263,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
BUG_ON(!hba); BUG_ON(!hba);
ufshcd_hold(hba, false); ufshcd_hold_all(hba);
if (!attr_val) { if (!attr_val) {
dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
__func__, opcode); __func__, opcode);
@ -2035,7 +2303,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
out_unlock: out_unlock:
mutex_unlock(&hba->dev_cmd.lock); mutex_unlock(&hba->dev_cmd.lock);
out: out:
ufshcd_release(hba); ufshcd_release_all(hba);
return err; return err;
} }
@ -2100,7 +2368,7 @@ int ufshcd_query_descriptor(struct ufs_hba *hba,
BUG_ON(!hba); BUG_ON(!hba);
ufshcd_hold(hba, false); ufshcd_hold_all(hba);
if (!desc_buf) { if (!desc_buf) {
dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
__func__, opcode); __func__, opcode);
@ -2150,7 +2418,7 @@ int ufshcd_query_descriptor(struct ufs_hba *hba,
out_unlock: out_unlock:
mutex_unlock(&hba->dev_cmd.lock); mutex_unlock(&hba->dev_cmd.lock);
out: out:
ufshcd_release(hba); ufshcd_release_all(hba);
return err; return err;
} }
@ -2827,7 +3095,7 @@ int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us)
bool timeout = false; bool timeout = false;
ktime_t start = ktime_get(); ktime_t start = ktime_get();
ufshcd_hold(hba, false); ufshcd_hold_all(hba);
spin_lock_irqsave(hba->host->host_lock, flags); spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) { if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
ret = -EBUSY; ret = -EBUSY;
@ -2862,7 +3130,7 @@ int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us)
} }
out: out:
spin_unlock_irqrestore(hba->host->host_lock, flags); spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_release(hba); ufshcd_release_all(hba);
return ret; return ret;
} }
@ -2892,10 +3160,9 @@ static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
uic_cmd.command = UIC_CMD_DME_SET; uic_cmd.command = UIC_CMD_DME_SET;
uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
uic_cmd.argument3 = mode; uic_cmd.argument3 = mode;
ufshcd_hold(hba, false); ufshcd_hold_all(hba);
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
ufshcd_release(hba); ufshcd_release_all(hba);
out: out:
return ret; return ret;
} }
@ -3417,7 +3684,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
int err = 0; int err = 0;
int retries; int retries;
ufshcd_hold(hba, false); ufshcd_hold_all(hba);
mutex_lock(&hba->dev_cmd.lock); mutex_lock(&hba->dev_cmd.lock);
for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
@ -3429,7 +3696,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
} }
mutex_unlock(&hba->dev_cmd.lock); mutex_unlock(&hba->dev_cmd.lock);
ufshcd_release(hba); ufshcd_release_all(hba);
if (err) if (err)
dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
@ -3835,6 +4102,7 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
/* Do not touch lrbp after scsi done */ /* Do not touch lrbp after scsi done */
cmd->scsi_done(cmd); cmd->scsi_done(cmd);
__ufshcd_release(hba); __ufshcd_release(hba);
__ufshcd_hibern8_release(hba);
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) { } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
if (hba->dev_cmd.complete) if (hba->dev_cmd.complete)
complete(hba->dev_cmd.complete); complete(hba->dev_cmd.complete);
@ -4125,7 +4393,7 @@ static void ufshcd_err_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, eh_work); hba = container_of(work, struct ufs_hba, eh_work);
pm_runtime_get_sync(hba->dev); pm_runtime_get_sync(hba->dev);
ufshcd_hold(hba, false); ufshcd_hold_all(hba);
spin_lock_irqsave(hba->host->host_lock, flags); spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->ufshcd_state == UFSHCD_STATE_RESET) { if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
@ -4204,7 +4472,7 @@ static void ufshcd_err_handler(struct work_struct *work)
out: out:
scsi_unblock_requests(hba->host); scsi_unblock_requests(hba->host);
ufshcd_release(hba); ufshcd_release_all(hba);
pm_runtime_put_sync(hba->dev); pm_runtime_put_sync(hba->dev);
} }
@ -4401,7 +4669,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
* the maximum wait time is bounded by %TM_CMD_TIMEOUT. * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
*/ */
wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot)); wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
ufshcd_hold(hba, false); ufshcd_hold_all(hba);
spin_lock_irqsave(host->host_lock, flags); spin_lock_irqsave(host->host_lock, flags);
task_req_descp = hba->utmrdl_base_addr; task_req_descp = hba->utmrdl_base_addr;
@ -4453,7 +4721,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
ufshcd_put_tm_slot(hba, free_slot); ufshcd_put_tm_slot(hba, free_slot);
wake_up(&hba->tm_tag_wq); wake_up(&hba->tm_tag_wq);
ufshcd_release(hba); ufshcd_release_all(hba);
return err; return err;
} }
@ -4549,7 +4817,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
return ufshcd_eh_host_reset_handler(cmd); return ufshcd_eh_host_reset_handler(cmd);
ufshcd_hold(hba, false); ufshcd_hold_all(hba);
/* If command is already aborted/completed, return SUCCESS */ /* If command is already aborted/completed, return SUCCESS */
if (!(test_bit(tag, &hba->outstanding_reqs))) if (!(test_bit(tag, &hba->outstanding_reqs)))
goto out; goto out;
@ -4630,10 +4898,10 @@ out:
} }
/* /*
* This ufshcd_release() corresponds to the original scsi cmd that got * This ufshcd_release_all() corresponds to the original scsi cmd that
* aborted here (as we won't get any IRQ for it). * got aborted here (as we won't get any IRQ for it).
*/ */
ufshcd_release(hba); ufshcd_release_all(hba);
return err; return err;
} }
@ -4718,7 +4986,7 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
hba = shost_priv(cmd->device->host); hba = shost_priv(cmd->device->host);
ufshcd_hold(hba, false); ufshcd_hold_all(hba);
/* /*
* Check if there is any race with fatal error handling. * Check if there is any race with fatal error handling.
* If so, wait for it to complete. Even though fatal error * If so, wait for it to complete. Even though fatal error
@ -4753,7 +5021,7 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
ufshcd_clear_eh_in_progress(hba); ufshcd_clear_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags); spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_release(hba); ufshcd_release_all(hba);
return err; return err;
} }
@ -5169,7 +5437,13 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
{ {
struct ufs_hba *hba = (struct ufs_hba *)data; struct ufs_hba *hba = (struct ufs_hba *)data;
/*
* Don't allow clock gating and hibern8 enter for faster device
* detection.
*/
ufshcd_hold_all(hba);
ufshcd_probe_hba(hba); ufshcd_probe_hba(hba);
ufshcd_release_all(hba);
} }
/** /**
@ -6048,8 +6322,10 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
* If we can't transition into any of the low power modes * If we can't transition into any of the low power modes
* just gate the clocks. * just gate the clocks.
*/ */
ufshcd_hold(hba, false); WARN_ON(hba->hibern8_on_idle.active_reqs);
ufshcd_hold_all(hba);
hba->clk_gating.is_suspended = true; hba->clk_gating.is_suspended = true;
hba->hibern8_on_idle.is_suspended = true;
if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE && if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
req_link_state == UIC_LINK_ACTIVE_STATE) { req_link_state == UIC_LINK_ACTIVE_STATE) {
@ -6096,6 +6372,9 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (ret) if (ret)
goto set_dev_active; goto set_dev_active;
if (ufshcd_is_link_hibern8(hba))
hba->hibern8_on_idle.state = HIBERN8_ENTERED;
ufshcd_vreg_set_lpm(hba); ufshcd_vreg_set_lpm(hba);
disable_clks: disable_clks:
@ -6150,8 +6429,9 @@ set_dev_active:
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
ufshcd_disable_auto_bkops(hba); ufshcd_disable_auto_bkops(hba);
enable_gating: enable_gating:
hba->hibern8_on_idle.is_suspended = false;
hba->clk_gating.is_suspended = false; hba->clk_gating.is_suspended = false;
ufshcd_release(hba); ufshcd_release_all(hba);
out: out:
hba->pm_op_in_progress = 0; hba->pm_op_in_progress = 0;
@ -6205,10 +6485,12 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (ufshcd_is_link_hibern8(hba)) { if (ufshcd_is_link_hibern8(hba)) {
ret = ufshcd_uic_hibern8_exit(hba); ret = ufshcd_uic_hibern8_exit(hba);
if (!ret) if (!ret) {
ufshcd_set_link_active(hba); ufshcd_set_link_active(hba);
else hba->hibern8_on_idle.state = HIBERN8_EXITED;
} else {
goto vendor_suspend; goto vendor_suspend;
}
} else if (ufshcd_is_link_off(hba)) { } else if (ufshcd_is_link_off(hba)) {
ret = ufshcd_host_reset_and_restore(hba); ret = ufshcd_host_reset_and_restore(hba);
/* /*
@ -6231,16 +6513,19 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
*/ */
ufshcd_urgent_bkops(hba); ufshcd_urgent_bkops(hba);
hba->clk_gating.is_suspended = false; hba->clk_gating.is_suspended = false;
hba->hibern8_on_idle.is_suspended = false;
if (hba->clk_scaling.is_allowed) if (hba->clk_scaling.is_allowed)
ufshcd_resume_clkscaling(hba); ufshcd_resume_clkscaling(hba);
/* Schedule clock gating in case of no access to UFS device yet */ /* Schedule clock gating in case of no access to UFS device yet */
ufshcd_release(hba); ufshcd_release_all(hba);
goto out; goto out;
set_old_link_state: set_old_link_state:
ufshcd_link_state_transition(hba, old_link_state, 0); ufshcd_link_state_transition(hba, old_link_state, 0);
if (ufshcd_is_link_hibern8(hba))
hba->hibern8_on_idle.state = HIBERN8_ENTERED;
vendor_suspend: vendor_suspend:
ufshcd_vops_suspend(hba, pm_op); ufshcd_vops_suspend(hba, pm_op);
disable_vreg: disable_vreg:
@ -6583,6 +6868,7 @@ void ufshcd_remove(struct ufs_hba *hba)
ufshcd_hba_stop(hba, true); ufshcd_hba_stop(hba, true);
ufshcd_exit_clk_gating(hba); ufshcd_exit_clk_gating(hba);
ufshcd_exit_hibern8_on_idle(hba);
if (ufshcd_is_clkscaling_supported(hba)) { if (ufshcd_is_clkscaling_supported(hba)) {
device_remove_file(hba->dev, &hba->clk_scaling.enable_attr); device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
devfreq_remove_device(hba->devfreq); devfreq_remove_device(hba->devfreq);
@ -6931,6 +7217,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
init_waitqueue_head(&hba->dev_cmd.tag_wq); init_waitqueue_head(&hba->dev_cmd.tag_wq);
ufshcd_init_clk_gating(hba); ufshcd_init_clk_gating(hba);
ufshcd_init_hibern8_on_idle(hba);
/* /*
* In order to avoid any spurious interrupt immediately after * In order to avoid any spurious interrupt immediately after

View file

@ -343,6 +343,35 @@ struct ufs_clk_gating {
int active_reqs; int active_reqs;
}; };
/* Hibern8 state */
enum ufshcd_hibern8_on_idle_state {
HIBERN8_ENTERED,
HIBERN8_EXITED,
REQ_HIBERN8_ENTER,
REQ_HIBERN8_EXIT,
};
/**
* struct ufs_hibern8_on_idle - UFS Hibern8 on idle related data
* @enter_work: worker to put UFS link in hibern8 after some delay as
* specified in delay_ms
* @exit_work: worker to bring UFS link out of hibern8
* @state: the current hibern8 state
* @delay_ms: hibern8 enter delay in ms
* @is_suspended: hibern8 enter is suspended when set to 1 which can be used
* during suspend/resume
* @active_reqs: number of requests that are pending and should be waited for
* completion before scheduling delayed "enter_work".
*/
struct ufs_hibern8_on_idle {
struct delayed_work enter_work;
struct work_struct exit_work;
enum ufshcd_hibern8_on_idle_state state;
unsigned long delay_ms;
bool is_suspended;
int active_reqs;
};
struct ufs_clk_scaling { struct ufs_clk_scaling {
ktime_t busy_start_t; ktime_t busy_start_t;
bool is_busy_started; bool is_busy_started;
@ -450,6 +479,7 @@ enum ts_types {
* @clk_list_head: UFS host controller clocks list node head * @clk_list_head: UFS host controller clocks list node head
* @pwr_info: holds current power mode * @pwr_info: holds current power mode
* @max_pwr_info: keeps the device max valid pwm * @max_pwr_info: keeps the device max valid pwm
* @hibern8_on_idle: UFS Hibern8 on idle related data
*/ */
struct ufs_hba { struct ufs_hba {
void __iomem *mmio_base; void __iomem *mmio_base;
@ -591,6 +621,8 @@ struct ufs_hba {
struct ufs_pwr_mode_info max_pwr_info; struct ufs_pwr_mode_info max_pwr_info;
struct ufs_clk_gating clk_gating; struct ufs_clk_gating clk_gating;
struct ufs_hibern8_on_idle hibern8_on_idle;
/* Control to enable/disable host capabilities */ /* Control to enable/disable host capabilities */
u32 caps; u32 caps;
/* Allow dynamic clk gating */ /* Allow dynamic clk gating */
@ -607,6 +639,8 @@ struct ufs_hba {
* CAUTION: Enabling this might reduce overall UFS throughput. * CAUTION: Enabling this might reduce overall UFS throughput.
*/ */
#define UFSHCD_CAP_INTR_AGGR (1 << 4) #define UFSHCD_CAP_INTR_AGGR (1 << 4)
/* Allow standalone Hibern8 enter on idle */
#define UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE (1 << 5)
struct devfreq *devfreq; struct devfreq *devfreq;
struct ufs_clk_scaling clk_scaling; struct ufs_clk_scaling clk_scaling;
@ -630,6 +664,10 @@ static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
{ {
return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND; return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
} }
static inline bool ufshcd_is_hibern8_on_idle_allowed(struct ufs_hba *hba)
{
return hba->caps & UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE;
}
static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba) static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
{ {

View file

@ -39,6 +39,26 @@ TRACE_EVENT(ufshcd_clk_gating,
__get_str(dev_name), __get_str(state)) __get_str(dev_name), __get_str(state))
); );
TRACE_EVENT(ufshcd_hibern8_on_idle,
TP_PROTO(const char *dev_name, const char *state),
TP_ARGS(dev_name, state),
TP_STRUCT__entry(
__string(dev_name, dev_name)
__string(state, state)
),
TP_fast_assign(
__assign_str(dev_name, dev_name);
__assign_str(state, state);
),
TP_printk("%s: state changed to %s",
__get_str(dev_name), __get_str(state))
);
TRACE_EVENT(ufshcd_clk_scaling, TRACE_EVENT(ufshcd_clk_scaling,
TP_PROTO(const char *dev_name, const char *state, const char *clk, TP_PROTO(const char *dev_name, const char *state, const char *clk,