scsi: ufs: convert statistics macros to functions

Kernel coding style requires to use actual functions for
function-like or lengthy macros.
The statistics macros are such, so replace them with functions.

Change-Id: I464768ccb7ba5bba4149dc45adacd777c08a2413
Signed-off-by: Gilad Broner <gbroner@codeaurora.org>
This commit is contained in:
Gilad Broner 2015-03-02 10:15:05 +02:00 committed by David Keitel
parent 1478bf7fa3
commit cd64baf8a1

View file

@ -69,34 +69,39 @@ static int ufshcd_tag_req_type(struct request *rq)
return rq_type; return rq_type;
} }
#define UFSHCD_UPDATE_ERROR_STATS(hba, type) \ static void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
do { \ {
if (type < UFS_ERR_MAX) \ if (type < UFS_ERR_MAX)
hba->ufs_stats.err_stats[type]++; \ hba->ufs_stats.err_stats[type]++;
} while (0) }
#define UFSHCD_UPDATE_TAG_STATS(hba, tag) \ static void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
do { \ {
struct request *rq = hba->lrb[task_tag].cmd ? \ struct request *rq =
hba->lrb[task_tag].cmd->request : NULL; \ hba->lrb[tag].cmd ? hba->lrb[tag].cmd->request : NULL;
u64 **tag_stats = hba->ufs_stats.tag_stats; \ u64 **tag_stats = hba->ufs_stats.tag_stats;
int rq_type; \ int rq_type;
if (!hba->ufs_stats.enabled) \
break; \
tag_stats[tag][TS_TAG]++; \
if (!rq || !(rq->cmd_type & REQ_TYPE_FS)) \
break; \
WARN_ON(hba->ufs_stats.q_depth > hba->nutrs); \
rq_type = ufshcd_tag_req_type(rq); \
tag_stats[hba->ufs_stats.q_depth++][rq_type]++; \
} while (0)
#define UFSHCD_UPDATE_TAG_STATS_COMPLETION(hba, cmd) \ if (!hba->ufs_stats.enabled)
do { \ return;
struct request *rq = cmd ? cmd->request : NULL; \
if (rq && rq->cmd_type & REQ_TYPE_FS) \ tag_stats[tag][TS_TAG]++;
hba->ufs_stats.q_depth--; \ if (!rq || !(rq->cmd_type & REQ_TYPE_FS))
} while (0) return;
WARN_ON(hba->ufs_stats.q_depth > hba->nutrs);
rq_type = ufshcd_tag_req_type(rq);
tag_stats[hba->ufs_stats.q_depth++][rq_type]++;
}
static void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
struct scsi_cmnd *cmd)
{
struct request *rq = cmd ? cmd->request : NULL;
if (rq && rq->cmd_type & REQ_TYPE_FS)
hba->ufs_stats.q_depth--;
}
static void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) static void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{ {
@ -131,15 +136,23 @@ static void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
} }
#else #else
#define UFSHCD_UPDATE_TAG_STATS(hba, tag) static inline void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
#define UFSHCD_UPDATE_TAG_STATS_COMPLETION(hba, cmd) {
#define UFSDBG_ADD_DEBUGFS(hba) }
#define UFSDBG_REMOVE_DEBUGFS(hba)
#define UFSHCD_UPDATE_ERROR_STATS(hba, type) static inline void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
struct scsi_cmnd *cmd)
{
}
static inline void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
{
}
static inline static inline
void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{} {
}
#endif #endif
#define UFSHCD_REQ_SENSE_SIZE 18 #define UFSHCD_REQ_SENSE_SIZE 18
@ -2023,7 +2036,7 @@ int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
/* Make sure that doorbell is committed immediately */ /* Make sure that doorbell is committed immediately */
wmb(); wmb();
ufshcd_cond_add_cmd_trace(hba, task_tag, "send"); ufshcd_cond_add_cmd_trace(hba, task_tag, "send");
UFSHCD_UPDATE_TAG_STATS(hba, task_tag); ufshcd_update_tag_stats(hba, task_tag);
return ret; return ret;
} }
@ -3910,7 +3923,7 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
ktime_to_us(ktime_sub(ktime_get(), start)), ret); ktime_to_us(ktime_sub(ktime_get(), start)), ret);
if (ret) { if (ret) {
UFSHCD_UPDATE_ERROR_STATS(hba, UFS_ERR_HIBERN8_ENTER); ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_ENTER);
dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d", dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d",
__func__, ret); __func__, ret);
/* /*
@ -3952,7 +3965,7 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
ktime_to_us(ktime_sub(ktime_get(), start)), ret); ktime_to_us(ktime_sub(ktime_get(), start)), ret);
if (ret) { if (ret) {
UFSHCD_UPDATE_ERROR_STATS(hba, UFS_ERR_HIBERN8_EXIT); ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_EXIT);
dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d", dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d",
__func__, ret); __func__, ret);
ret = ufshcd_link_recovery(hba); ret = ufshcd_link_recovery(hba);
@ -4097,7 +4110,7 @@ int ufshcd_change_power_mode(struct ufs_hba *hba,
| pwr_mode->pwr_tx); | pwr_mode->pwr_tx);
if (ret) { if (ret) {
UFSHCD_UPDATE_ERROR_STATS(hba, UFS_ERR_POWER_MODE_CHANGE); ufshcd_update_error_stats(hba, UFS_ERR_POWER_MODE_CHANGE);
dev_err(hba->dev, dev_err(hba->dev,
"%s: power mode change failed %d\n", __func__, ret); "%s: power mode change failed %d\n", __func__, ret);
} else { } else {
@ -4384,11 +4397,11 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
ret = ufshcd_dme_link_startup(hba); ret = ufshcd_dme_link_startup(hba);
if (ret) if (ret)
UFSHCD_UPDATE_ERROR_STATS(hba, UFS_ERR_LINKSTARTUP); ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
/* check if device is detected by inter-connect layer */ /* check if device is detected by inter-connect layer */
if (!ret && !ufshcd_is_device_present(hba)) { if (!ret && !ufshcd_is_device_present(hba)) {
UFSHCD_UPDATE_ERROR_STATS(hba, UFS_ERR_LINKSTARTUP); ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
dev_err(hba->dev, "%s: Device not present\n", __func__); dev_err(hba->dev, "%s: Device not present\n", __func__);
ret = -ENXIO; ret = -ENXIO;
goto out; goto out;
@ -4869,7 +4882,7 @@ void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
cmd = lrbp->cmd; cmd = lrbp->cmd;
if (cmd) { if (cmd) {
ufshcd_cond_add_cmd_trace(hba, index, "failed"); ufshcd_cond_add_cmd_trace(hba, index, "failed");
UFSHCD_UPDATE_ERROR_STATS(hba, ufshcd_update_error_stats(hba,
UFS_ERR_INT_FATAL_ERRORS); UFS_ERR_INT_FATAL_ERRORS);
scsi_dma_unmap(cmd); scsi_dma_unmap(cmd);
cmd->result = result; cmd->result = result;
@ -4913,7 +4926,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
cmd = lrbp->cmd; cmd = lrbp->cmd;
if (cmd) { if (cmd) {
ufshcd_cond_add_cmd_trace(hba, index, "complete"); ufshcd_cond_add_cmd_trace(hba, index, "complete");
UFSHCD_UPDATE_TAG_STATS_COMPLETION(hba, cmd); ufshcd_update_tag_stats_completion(hba, cmd);
result = ufshcd_transfer_rsp_status(hba, lrbp); result = ufshcd_transfer_rsp_status(hba, lrbp);
scsi_dma_unmap(cmd); scsi_dma_unmap(cmd);
cmd->result = result; cmd->result = result;
@ -5443,15 +5456,15 @@ skip_pending_xfer_clear:
unsigned long max_doorbells = (1UL << hba->nutrs) - 1; unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
if (hba->saved_err & INT_FATAL_ERRORS || crypto_engine_err) if (hba->saved_err & INT_FATAL_ERRORS || crypto_engine_err)
UFSHCD_UPDATE_ERROR_STATS(hba, ufshcd_update_error_stats(hba,
UFS_ERR_INT_FATAL_ERRORS); UFS_ERR_INT_FATAL_ERRORS);
if (hba->saved_err & UIC_ERROR) if (hba->saved_err & UIC_ERROR)
UFSHCD_UPDATE_ERROR_STATS(hba, ufshcd_update_error_stats(hba,
UFS_ERR_INT_UIC_ERROR); UFS_ERR_INT_UIC_ERROR);
if (err_xfer || err_tm) if (err_xfer || err_tm)
UFSHCD_UPDATE_ERROR_STATS(hba, ufshcd_update_error_stats(hba,
UFS_ERR_CLEAR_PEND_XFER_TM); UFS_ERR_CLEAR_PEND_XFER_TM);
/* /*
@ -5905,7 +5918,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
lrbp = &hba->lrb[tag]; lrbp = &hba->lrb[tag];
UFSHCD_UPDATE_ERROR_STATS(hba, UFS_ERR_TASK_ABORT); ufshcd_update_error_stats(hba, UFS_ERR_TASK_ABORT);
/* /*
* Task abort to the device W-LUN is illegal. When this command * Task abort to the device W-LUN is illegal. When this command
@ -6164,7 +6177,7 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
ufshcd_set_eh_in_progress(hba); ufshcd_set_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags); spin_unlock_irqrestore(hba->host->host_lock, flags);
UFSHCD_UPDATE_ERROR_STATS(hba, UFS_ERR_EH); ufshcd_update_error_stats(hba, UFS_ERR_EH);
err = ufshcd_reset_and_restore(hba); err = ufshcd_reset_and_restore(hba);
spin_lock_irqsave(hba->host->host_lock, flags); spin_lock_irqsave(hba->host->host_lock, flags);
@ -7698,7 +7711,7 @@ set_link_active:
if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) { if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) {
ufshcd_set_link_active(hba); ufshcd_set_link_active(hba);
} else if (ufshcd_is_link_off(hba)) { } else if (ufshcd_is_link_off(hba)) {
UFSHCD_UPDATE_ERROR_STATS(hba, UFS_ERR_VOPS_SUSPEND); ufshcd_update_error_stats(hba, UFS_ERR_VOPS_SUSPEND);
if (!ufshcd_host_reset_and_restore(hba)) if (!ufshcd_host_reset_and_restore(hba))
/* Clear UNIT ATTENTION condition on all LUs */ /* Clear UNIT ATTENTION condition on all LUs */
ufshcd_send_request_sense_all_lus(hba); ufshcd_send_request_sense_all_lus(hba);
@ -7718,7 +7731,7 @@ out:
hba->pm_op_in_progress = 0; hba->pm_op_in_progress = 0;
if (ret) if (ret)
UFSHCD_UPDATE_ERROR_STATS(hba, UFS_ERR_SUSPEND); ufshcd_update_error_stats(hba, UFS_ERR_SUSPEND);
return ret; return ret;
} }
@ -7836,7 +7849,7 @@ out:
hba->pm_op_in_progress = 0; hba->pm_op_in_progress = 0;
if (ret) if (ret)
UFSHCD_UPDATE_ERROR_STATS(hba, UFS_ERR_RESUME); ufshcd_update_error_stats(hba, UFS_ERR_RESUME);
return ret; return ret;
} }