scsi: ufs: block requests during clock scaling
UFS clock scaling may do couple of things such as scaling UFS controller clock frequencies and UFS interface gear scaling. To ensure that these operations are completed successfully, driver need to make sure that there are no regular transfer commands allowed until clock scaling is completed. Currently we are only blocking the scsi command requests during gear scaling but not during clock frequency scaling (and related operations). So first fix is to block scsi requests for entire clock scaling operation, we are doing this by invoking scsi_block_requests() and then waiting for all pending requests (doorbells) to be completed. But this approach can not always ensure that no new request will be issued while clock scaling is in progress. Think of a scenario where one request just got queued via ->queuecommand() (but still haven't set the doorbell) when no doorbells were pending and assume that clock scaling function has also started executing in parallel. In this case clock scaling function blocks the new scsi requests from being issued and wait for all the pending doorbells to be cleared. As there were no doorbells pending when it checks for it, it decides to go ahead with clock scaling but now the command with was just issued via ->queuecommand() sets the doorbell and gets issued by controller. This change fixes such race condition by using the read-write semaphore approach which basically blocks all the requests until clock scaling work finishes but would allow multiple requests to be queued in parallel if clock scaling work isn't running. Change-Id: I99beed27bbc5b768fb3cee8b84cb5392619cace7 Signed-off-by: Subhash Jadavani <subhashj@codeaurora.org>
This commit is contained in:
parent
289b6c51a1
commit
39c7c51bec
2 changed files with 46 additions and 15 deletions
|
@ -2492,6 +2492,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
|||
BUG();
|
||||
}
|
||||
|
||||
if (!down_read_trylock(&hba->clk_scaling_lock))
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
switch (hba->ufshcd_state) {
|
||||
case UFSHCD_STATE_OPERATIONAL:
|
||||
|
@ -2601,6 +2604,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
|||
out_unlock:
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
out:
|
||||
up_read(&hba->clk_scaling_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -2789,6 +2793,8 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
|
|||
struct completion wait;
|
||||
unsigned long flags;
|
||||
|
||||
down_read(&hba->clk_scaling_lock);
|
||||
|
||||
/*
|
||||
* Get free slot, sleep if slots are unavailable.
|
||||
* Even though we use wait_event() which sleeps indefinitely,
|
||||
|
@ -2820,6 +2826,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
|
|||
out_put_tag:
|
||||
ufshcd_put_dev_cmd_tag(hba, tag);
|
||||
wake_up(&hba->dev_cmd.tag_wq);
|
||||
up_read(&hba->clk_scaling_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -8369,7 +8376,6 @@ static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
|
|||
static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
|
||||
{
|
||||
#define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G2
|
||||
#define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
|
||||
int ret = 0;
|
||||
struct ufs_pa_layer_attr new_pwr_info;
|
||||
|
||||
|
@ -8397,21 +8403,8 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
|
|||
|
||||
/* check if the power mode needs to be changed or not? */
|
||||
if (memcmp(&new_pwr_info, &hba->pwr_info,
|
||||
sizeof(struct ufs_pa_layer_attr))) {
|
||||
/*
|
||||
* make sure that there are no outstanding requests when
|
||||
* power mode change is requested
|
||||
*/
|
||||
scsi_block_requests(hba->host);
|
||||
if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
|
||||
ret = -EBUSY;
|
||||
goto out_unblock;
|
||||
}
|
||||
|
||||
sizeof(struct ufs_pa_layer_attr)))
|
||||
ret = ufshcd_change_power_mode(hba, &new_pwr_info);
|
||||
out_unblock:
|
||||
scsi_unblock_requests(hba->host);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
|
||||
|
@ -8422,6 +8415,31 @@ out_unblock:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
|
||||
{
|
||||
#define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
|
||||
int ret = 0;
|
||||
/*
|
||||
* make sure that there are no outstanding requests when
|
||||
* clock scaling is in progress
|
||||
*/
|
||||
scsi_block_requests(hba->host);
|
||||
down_write(&hba->clk_scaling_lock);
|
||||
if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
|
||||
ret = -EBUSY;
|
||||
up_write(&hba->clk_scaling_lock);
|
||||
scsi_unblock_requests(hba->host);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
|
||||
{
|
||||
up_write(&hba->clk_scaling_lock);
|
||||
scsi_unblock_requests(hba->host);
|
||||
}
|
||||
|
||||
/**
|
||||
* ufshcd_devfreq_scale - scale up/down UFS clocks and gear
|
||||
* @hba: per adapter instance
|
||||
|
@ -8435,6 +8453,12 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = ufshcd_clock_scaling_prepare(hba);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* let's not get into low power until clock scaling is completed */
|
||||
ufshcd_hibern8_hold(hba, false);
|
||||
/* scale down the gear before scaling down clocks */
|
||||
if (!scale_up) {
|
||||
ret = ufshcd_scale_gear(hba, false);
|
||||
|
@ -8460,6 +8484,8 @@ scale_up_gear:
|
|||
if (!scale_up)
|
||||
ufshcd_scale_gear(hba, true);
|
||||
out:
|
||||
ufshcd_clock_scaling_unprepare(hba);
|
||||
ufshcd_hibern8_release(hba, false);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -8764,6 +8790,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
|
|||
/* Initialize mutex for device management commands */
|
||||
mutex_init(&hba->dev_cmd.lock);
|
||||
|
||||
init_rwsem(&hba->clk_scaling_lock);
|
||||
|
||||
/* Initialize device management tag acquire wait queue */
|
||||
init_waitqueue_head(&hba->dev_cmd.tag_wq);
|
||||
|
||||
|
|
|
@ -45,6 +45,7 @@
|
|||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/types.h>
|
||||
|
@ -851,6 +852,8 @@ struct ufs_hba {
|
|||
|
||||
enum bkops_status urgent_bkops_lvl;
|
||||
bool is_urgent_bkops_lvl_checked;
|
||||
|
||||
struct rw_semaphore clk_scaling_lock;
|
||||
};
|
||||
|
||||
/* Returns true if clocks can be gated. Otherwise false */
|
||||
|
|
Loading…
Add table
Reference in a new issue