scsi: ufs: add reference counting for scsi block requests

Currently we call the scsi_block_requests()/scsi_unblock_requests()
whenever we want to block/unblock scsi requests but as there is no
reference counting, nesting of these calls could leave us in undesired
state sometime. Consider following call flow sequence:
1. func1() calls scsi_block_requests() but calls func2() before
   calling scsi_unblock_requests()
2. func2() calls scsi_block_requests()
3. func2() calls scsi_unblock_requests()
4. func1() calls scsi_unblock_requests()

As there is no reference counting, we will have scsi requests unblocked
after #3 instead of it to be unblocked only after #4. Though we may not
have failures seen with this, we might run into some failures in future.
Better solution would be to fix this by adding reference counting.

Change-Id: I1eaa39c0716cf41120269cf389ebb322e3006da1
Signed-off-by: Subhash Jadavani <subhashj@codeaurora.org>
This commit is contained in:
Subhash Jadavani 2015-07-20 10:31:04 -07:00 committed by David Keitel
parent dc63550fa3
commit 00d5062d03
4 changed files with 52 additions and 16 deletions

View file

@ -1217,11 +1217,11 @@ static int ufsdbg_config_pwr_mode(struct ufs_hba *hba,
int ret;
pm_runtime_get_sync(hba->dev);
scsi_block_requests(hba->host);
ufshcd_scsi_block_requests(hba);
ret = ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US);
if (!ret)
ret = ufshcd_change_power_mode(hba, desired_pwr_mode);
scsi_unblock_requests(hba->host);
ufshcd_scsi_unblock_requests(hba);
pm_runtime_put_sync(hba->dev);
return ret;
@ -1312,7 +1312,7 @@ static int ufsdbg_dme_read(void *data, u64 *attr_val, bool peer)
attr_id = peer ? hba->debugfs_files.dme_peer_attr_id :
hba->debugfs_files.dme_local_attr_id;
pm_runtime_get_sync(hba->dev);
scsi_block_requests(hba->host);
ufshcd_scsi_block_requests(hba);
ret = ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US);
if (!ret) {
if ((attr_id >= MPHY_RX_ATTR_ADDR_START)
@ -1324,7 +1324,7 @@ static int ufsdbg_dme_read(void *data, u64 *attr_val, bool peer)
ret = read_func(hba, attr_sel, &read_val);
}
scsi_unblock_requests(hba->host);
ufshcd_scsi_unblock_requests(hba);
pm_runtime_put_sync(hba->dev);
if (!ret)

View file

@ -733,7 +733,7 @@ static int ufs_qcom_crypto_engine_eh(struct ufs_hba *hba)
* Host reset will be handled in a seperate workqueue
* and will be triggered from ufshcd_check_errors.
*/
scsi_block_requests(hba->host);
ufshcd_scsi_block_requests(hba);
ufshcd_abort_outstanding_transfer_requests(hba,
DID_TARGET_FAILURE);

View file

@ -397,6 +397,36 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba)
}
}
void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
{
unsigned long flags;
bool unblock = false;
spin_lock_irqsave(hba->host->host_lock, flags);
hba->scsi_block_reqs_cnt--;
unblock = !hba->scsi_block_reqs_cnt;
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (unblock)
scsi_unblock_requests(hba->host);
}
EXPORT_SYMBOL(ufshcd_scsi_unblock_requests);
static inline void __ufshcd_scsi_block_requests(struct ufs_hba *hba)
{
if (!hba->scsi_block_reqs_cnt++)
scsi_block_requests(hba->host);
}
void ufshcd_scsi_block_requests(struct ufs_hba *hba)
{
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
__ufshcd_scsi_block_requests(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
EXPORT_SYMBOL(ufshcd_scsi_block_requests);
/* replace non-printable or non-ASCII characters with spaces */
static inline void ufshcd_remove_non_printable(char *val)
{
@ -1156,7 +1186,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
unblock_reqs:
if (hba->clk_scaling.is_allowed)
ufshcd_resume_clkscaling(hba);
scsi_unblock_requests(hba->host);
ufshcd_scsi_unblock_requests(hba);
}
/**
@ -1197,7 +1227,7 @@ start:
* work and to enable clocks.
*/
case CLKS_OFF:
scsi_block_requests(hba->host);
__ufshcd_scsi_block_requests(hba);
hba->clk_gating.state = REQ_CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
@ -1472,7 +1502,7 @@ start:
* work and exit hibern8.
*/
case HIBERN8_ENTERED:
scsi_block_requests(hba->host);
__ufshcd_scsi_block_requests(hba);
hba->hibern8_on_idle.state = REQ_HIBERN8_EXIT;
trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
hba->hibern8_on_idle.state);
@ -1635,7 +1665,7 @@ static void ufshcd_hibern8_exit_work(struct work_struct *work)
}
}
unblock_reqs:
scsi_unblock_requests(hba->host);
ufshcd_scsi_unblock_requests(hba);
}
static ssize_t ufshcd_hibern8_on_idle_delay_show(struct device *dev,
@ -5115,7 +5145,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, eeh_work);
pm_runtime_get_sync(hba->dev);
scsi_block_requests(hba->host);
ufshcd_scsi_block_requests(hba);
err = ufshcd_get_ee_status(hba, &status);
if (err) {
dev_err(hba->dev, "%s: failed to get exception status %d\n",
@ -5129,7 +5159,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
ufshcd_bkops_exception_event_handler(hba);
out:
scsi_unblock_requests(hba->host);
ufshcd_scsi_unblock_requests(hba);
pm_runtime_put_sync(hba->dev);
return;
}
@ -5392,7 +5422,7 @@ skip_err_handling:
ufshcd_clear_eh_in_progress(hba);
out:
spin_unlock_irqrestore(hba->host->host_lock, flags);
scsi_unblock_requests(hba->host);
ufshcd_scsi_unblock_requests(hba);
ufshcd_release_all(hba);
pm_runtime_put_sync(hba->dev);
}
@ -5498,7 +5528,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
/* handle fatal errors only when link is functional */
if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
/* block commands from scsi mid-layer */
scsi_block_requests(hba->host);
__ufshcd_scsi_block_requests(hba);
hba->ufshcd_state = UFSHCD_STATE_ERROR;
schedule_work(&hba->eh_work);
@ -8232,12 +8262,12 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
* make sure that there are no outstanding requests when
* clock scaling is in progress
*/
scsi_block_requests(hba->host);
ufshcd_scsi_block_requests(hba);
down_write(&hba->clk_scaling_lock);
if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
scsi_unblock_requests(hba->host);
ufshcd_scsi_unblock_requests(hba);
}
return ret;
@ -8246,7 +8276,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
{
up_write(&hba->clk_scaling_lock);
scsi_unblock_requests(hba->host);
ufshcd_scsi_unblock_requests(hba);
}
/**

View file

@ -648,6 +648,7 @@ struct ufs_stats {
* @urgent_bkops_lvl: keeps track of urgent bkops level for device
* @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
* device is known or not.
* @scsi_block_reqs_cnt: reference counting for scsi block requests
*/
struct ufs_hba {
void __iomem *mmio_base;
@ -844,6 +845,8 @@ struct ufs_hba {
/* If set, don't gate device ref_clk during clock gating */
bool no_ref_clk_gating;
int scsi_block_reqs_cnt;
};
/* Returns true if clocks can be gated. Otherwise false */
@ -1038,6 +1041,9 @@ void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba,
int result);
u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
void ufshcd_scsi_block_requests(struct ufs_hba *hba);
void ufshcd_scsi_unblock_requests(struct ufs_hba *hba);
/* Wrapper functions for safely calling variant operations */
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
{