scsi: ufs: optimize clock, pm_qos, hibern8 handling in queuecommand
ufshcd_queuecommand() vote for the resources in this order: clocks, pm_qos latency, hibern8 exit. If any of these votes are not already applied, each one has to be applied asynchronously and in that case we are releasing all the previously applied resource votes (for example, if hibern8 exit has to be completed asynchronously, we release the votes for pm_qos and clocks as well). This is not a optimal solution instead we should skip scheduling the unvoting work for already voted resources. Change-Id: Ie700d9b3bf64370a5885787f7313d41adb5b3566 Signed-off-by: Subhash Jadavani <subhashj@codeaurora.org>
This commit is contained in:
parent
c991cc0db0
commit
7248753f68
3 changed files with 26 additions and 26 deletions
|
@ -462,7 +462,7 @@ static int ufsdbg_host_regs_show(struct seq_file *file, void *data)
|
||||||
ufsdbg_pr_buf_to_std(file, hba->mmio_base, UFSHCI_REG_SPACE_SIZE,
|
ufsdbg_pr_buf_to_std(file, hba->mmio_base, UFSHCI_REG_SPACE_SIZE,
|
||||||
"host regs");
|
"host regs");
|
||||||
pm_runtime_put_sync(hba->dev);
|
pm_runtime_put_sync(hba->dev);
|
||||||
ufshcd_release(hba);
|
ufshcd_release(hba, false);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1148,7 +1148,7 @@ out:
|
||||||
}
|
}
|
||||||
|
|
||||||
/* host lock must be held before calling this variant */
|
/* host lock must be held before calling this variant */
|
||||||
static void __ufshcd_release(struct ufs_hba *hba)
|
static void __ufshcd_release(struct ufs_hba *hba, bool no_sched)
|
||||||
{
|
{
|
||||||
if (!ufshcd_is_clkgating_allowed(hba))
|
if (!ufshcd_is_clkgating_allowed(hba))
|
||||||
return;
|
return;
|
||||||
|
@ -1159,7 +1159,7 @@ static void __ufshcd_release(struct ufs_hba *hba)
|
||||||
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
|
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
|
||||||
|| hba->lrb_in_use || hba->outstanding_tasks
|
|| hba->lrb_in_use || hba->outstanding_tasks
|
||||||
|| hba->active_uic_cmd || hba->uic_async_done
|
|| hba->active_uic_cmd || hba->uic_async_done
|
||||||
|| ufshcd_eh_in_progress(hba))
|
|| ufshcd_eh_in_progress(hba) || no_sched)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
hba->clk_gating.state = REQ_CLKS_OFF;
|
hba->clk_gating.state = REQ_CLKS_OFF;
|
||||||
|
@ -1170,12 +1170,12 @@ static void __ufshcd_release(struct ufs_hba *hba)
|
||||||
msecs_to_jiffies(hba->clk_gating.delay_ms));
|
msecs_to_jiffies(hba->clk_gating.delay_ms));
|
||||||
}
|
}
|
||||||
|
|
||||||
void ufshcd_release(struct ufs_hba *hba)
|
void ufshcd_release(struct ufs_hba *hba, bool no_sched)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||||
__ufshcd_release(hba);
|
__ufshcd_release(hba, no_sched);
|
||||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ufshcd_release);
|
EXPORT_SYMBOL_GPL(ufshcd_release);
|
||||||
|
@ -1226,7 +1226,7 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (value) {
|
if (value) {
|
||||||
ufshcd_release(hba);
|
ufshcd_release(hba, false);
|
||||||
} else {
|
} else {
|
||||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||||
hba->clk_gating.active_reqs++;
|
hba->clk_gating.active_reqs++;
|
||||||
|
@ -1353,7 +1353,7 @@ out:
|
||||||
}
|
}
|
||||||
|
|
||||||
/* host lock must be held before calling this variant */
|
/* host lock must be held before calling this variant */
|
||||||
static void __ufshcd_hibern8_release(struct ufs_hba *hba)
|
static void __ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
|
||||||
{
|
{
|
||||||
unsigned long delay_in_jiffies;
|
unsigned long delay_in_jiffies;
|
||||||
|
|
||||||
|
@ -1368,7 +1368,7 @@ static void __ufshcd_hibern8_release(struct ufs_hba *hba)
|
||||||
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
|
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
|
||||||
|| hba->lrb_in_use || hba->outstanding_tasks
|
|| hba->lrb_in_use || hba->outstanding_tasks
|
||||||
|| hba->active_uic_cmd || hba->uic_async_done
|
|| hba->active_uic_cmd || hba->uic_async_done
|
||||||
|| ufshcd_eh_in_progress(hba))
|
|| ufshcd_eh_in_progress(hba) || no_sched)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
hba->hibern8_on_idle.state = REQ_HIBERN8_ENTER;
|
hba->hibern8_on_idle.state = REQ_HIBERN8_ENTER;
|
||||||
|
@ -1391,12 +1391,12 @@ static void __ufshcd_hibern8_release(struct ufs_hba *hba)
|
||||||
delay_in_jiffies);
|
delay_in_jiffies);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ufshcd_hibern8_release(struct ufs_hba *hba)
|
void ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||||
__ufshcd_hibern8_release(hba);
|
__ufshcd_hibern8_release(hba, no_sched);
|
||||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1477,7 +1477,7 @@ static void ufshcd_hibern8_exit_work(struct work_struct *work)
|
||||||
if (ufshcd_is_link_hibern8(hba)) {
|
if (ufshcd_is_link_hibern8(hba)) {
|
||||||
ufshcd_hold(hba, false);
|
ufshcd_hold(hba, false);
|
||||||
ret = ufshcd_uic_hibern8_exit(hba);
|
ret = ufshcd_uic_hibern8_exit(hba);
|
||||||
ufshcd_release(hba);
|
ufshcd_release(hba, false);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||||
ufshcd_set_link_active(hba);
|
ufshcd_set_link_active(hba);
|
||||||
|
@ -1662,24 +1662,24 @@ static int ufshcd_pm_qos_hold(struct ufs_hba *hba, bool async)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Host lock is assumed to be held by caller */
|
/* Host lock is assumed to be held by caller */
|
||||||
static void __ufshcd_pm_qos_release(struct ufs_hba *hba)
|
static void __ufshcd_pm_qos_release(struct ufs_hba *hba, bool no_sched)
|
||||||
{
|
{
|
||||||
if (!hba->pm_qos.cpu_dma_latency_us)
|
if (!hba->pm_qos.cpu_dma_latency_us)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (--hba->pm_qos.active_reqs)
|
if (--hba->pm_qos.active_reqs || no_sched)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
hba->pm_qos.state = PM_QOS_REQ_UNVOTE;
|
hba->pm_qos.state = PM_QOS_REQ_UNVOTE;
|
||||||
schedule_work(&hba->pm_qos.unvote_work);
|
schedule_work(&hba->pm_qos.unvote_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ufshcd_pm_qos_release(struct ufs_hba *hba)
|
static void ufshcd_pm_qos_release(struct ufs_hba *hba, bool no_sched)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||||
__ufshcd_pm_qos_release(hba);
|
__ufshcd_pm_qos_release(hba, no_sched);
|
||||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1785,9 +1785,9 @@ static void ufshcd_hold_all(struct ufs_hba *hba)
|
||||||
|
|
||||||
static void ufshcd_release_all(struct ufs_hba *hba)
|
static void ufshcd_release_all(struct ufs_hba *hba)
|
||||||
{
|
{
|
||||||
ufshcd_hibern8_release(hba);
|
ufshcd_hibern8_release(hba, false);
|
||||||
ufshcd_pm_qos_release(hba);
|
ufshcd_pm_qos_release(hba, false);
|
||||||
ufshcd_release(hba);
|
ufshcd_release(hba, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Must be called with host lock acquired */
|
/* Must be called with host lock acquired */
|
||||||
|
@ -2412,7 +2412,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
||||||
if (err) {
|
if (err) {
|
||||||
err = SCSI_MLQUEUE_HOST_BUSY;
|
err = SCSI_MLQUEUE_HOST_BUSY;
|
||||||
clear_bit_unlock(tag, &hba->lrb_in_use);
|
clear_bit_unlock(tag, &hba->lrb_in_use);
|
||||||
ufshcd_release(hba);
|
ufshcd_release(hba, true);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2420,8 +2420,8 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
||||||
if (err) {
|
if (err) {
|
||||||
clear_bit_unlock(tag, &hba->lrb_in_use);
|
clear_bit_unlock(tag, &hba->lrb_in_use);
|
||||||
err = SCSI_MLQUEUE_HOST_BUSY;
|
err = SCSI_MLQUEUE_HOST_BUSY;
|
||||||
ufshcd_pm_qos_release(hba);
|
ufshcd_pm_qos_release(hba, true);
|
||||||
ufshcd_release(hba);
|
ufshcd_release(hba, true);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
WARN_ON(hba->hibern8_on_idle.state != HIBERN8_EXITED);
|
WARN_ON(hba->hibern8_on_idle.state != HIBERN8_EXITED);
|
||||||
|
@ -4707,9 +4707,9 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
|
||||||
clear_bit_unlock(index, &hba->lrb_in_use);
|
clear_bit_unlock(index, &hba->lrb_in_use);
|
||||||
/* Do not touch lrbp after scsi done */
|
/* Do not touch lrbp after scsi done */
|
||||||
cmd->scsi_done(cmd);
|
cmd->scsi_done(cmd);
|
||||||
__ufshcd_release(hba);
|
__ufshcd_release(hba, false);
|
||||||
__ufshcd_pm_qos_release(hba);
|
__ufshcd_pm_qos_release(hba, false);
|
||||||
__ufshcd_hibern8_release(hba);
|
__ufshcd_hibern8_release(hba, false);
|
||||||
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
|
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
|
||||||
if (hba->dev_cmd.complete) {
|
if (hba->dev_cmd.complete) {
|
||||||
ufshcd_cond_add_cmd_trace(hba, index,
|
ufshcd_cond_add_cmd_trace(hba, index,
|
||||||
|
@ -8076,7 +8076,7 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
|
||||||
}
|
}
|
||||||
hba->clk_scaling.is_allowed = value;
|
hba->clk_scaling.is_allowed = value;
|
||||||
|
|
||||||
ufshcd_release(hba);
|
ufshcd_release(hba, false);
|
||||||
pm_runtime_put_sync(hba->dev);
|
pm_runtime_put_sync(hba->dev);
|
||||||
out:
|
out:
|
||||||
return count;
|
return count;
|
||||||
|
|
|
@ -972,7 +972,7 @@ int ufshcd_query_descriptor(struct ufs_hba *hba, enum query_opcode opcode,
|
||||||
enum desc_idn idn, u8 index, u8 selector, u8 *desc_buf, int *buf_len);
|
enum desc_idn idn, u8 index, u8 selector, u8 *desc_buf, int *buf_len);
|
||||||
|
|
||||||
int ufshcd_hold(struct ufs_hba *hba, bool async);
|
int ufshcd_hold(struct ufs_hba *hba, bool async);
|
||||||
void ufshcd_release(struct ufs_hba *hba);
|
void ufshcd_release(struct ufs_hba *hba, bool no_sched);
|
||||||
int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us);
|
int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us);
|
||||||
int ufshcd_change_power_mode(struct ufs_hba *hba,
|
int ufshcd_change_power_mode(struct ufs_hba *hba,
|
||||||
struct ufs_pa_layer_attr *pwr_mode);
|
struct ufs_pa_layer_attr *pwr_mode);
|
||||||
|
|
Loading…
Add table
Reference in a new issue