msm: mdss: release mdp_busy flag at isr

Release mdp_busy flag a CMD_MDP_DONE isr while doing
hs_clk_lane recovery to mitigate possibility of causing
false mdp_busy time out whey system load is heavy. Also
checking CMD_MDP_DONE isr status before declare mdp_busy
timeout.

CRs-Fixed: 823949
Change-Id: Ia5fe60c9799944b8867c262661c4cd97cffba1c7
Signed-off-by: Kuogee Hsieh <khsieh@codeaurora.org>
This commit is contained in:
Kuogee Hsieh 2015-04-21 10:02:07 -07:00 committed by David Keitel
parent ea354ea3e4
commit 67500148ea
2 changed files with 86 additions and 31 deletions

View file

@ -153,6 +153,7 @@ enum dsi_pm_type {
#define DSI_CMD_DST_FORMAT_RGB666 7 #define DSI_CMD_DST_FORMAT_RGB666 7
#define DSI_CMD_DST_FORMAT_RGB888 8 #define DSI_CMD_DST_FORMAT_RGB888 8
#define DSI_INTR_DESJEW_MASK BIT(31)
#define DSI_INTR_DYNAMIC_REFRESH_MASK BIT(29) #define DSI_INTR_DYNAMIC_REFRESH_MASK BIT(29)
#define DSI_INTR_DYNAMIC_REFRESH_DONE BIT(28) #define DSI_INTR_DYNAMIC_REFRESH_DONE BIT(28)
#define DSI_INTR_ERROR_MASK BIT(25) #define DSI_INTR_ERROR_MASK BIT(25)
@ -168,6 +169,15 @@ enum dsi_pm_type {
/* Update this if more interrupt masks are added in future chipsets */ /* Update this if more interrupt masks are added in future chipsets */
#define DSI_INTR_TOTAL_MASK 0x2222AA02 #define DSI_INTR_TOTAL_MASK 0x2222AA02
#define DSI_INTR_MASK_ALL \
(DSI_INTR_DESJEW_MASK | \
DSI_INTR_DYNAMIC_REFRESH_MASK | \
DSI_INTR_ERROR_MASK | \
DSI_INTR_BTA_DONE_MASK | \
DSI_INTR_VIDEO_DONE_MASK | \
DSI_INTR_CMD_MDP_DONE_MASK | \
DSI_INTR_CMD_DMA_DONE_MASK)
#define DSI_CMD_TRIGGER_NONE 0x0 /* mdp trigger */ #define DSI_CMD_TRIGGER_NONE 0x0 /* mdp trigger */
#define DSI_CMD_TRIGGER_TE 0x02 #define DSI_CMD_TRIGGER_TE 0x02
#define DSI_CMD_TRIGGER_SW 0x04 #define DSI_CMD_TRIGGER_SW 0x04

View file

@ -512,6 +512,9 @@ static void mdss_dsi_wait_clk_lane_to_stop(struct mdss_dsi_ctrl_pdata *ctrl)
/* clear clk lane tx stop -- bit 20 */ /* clear clk lane tx stop -- bit 20 */
mdss_dsi_cfg_lane_ctrl(ctrl, BIT(20), 0); mdss_dsi_cfg_lane_ctrl(ctrl, BIT(20), 0);
} }
static void mdss_dsi_stop_hs_clk_lane(struct mdss_dsi_ctrl_pdata *ctrl);
/* /*
* mdss_dsi_start_hs_clk_lane: * mdss_dsi_start_hs_clk_lane:
* this function is work around solution for 8994 dsi clk lane * this function is work around solution for 8994 dsi clk lane
@ -519,6 +522,10 @@ static void mdss_dsi_wait_clk_lane_to_stop(struct mdss_dsi_ctrl_pdata *ctrl)
*/ */
static void mdss_dsi_start_hs_clk_lane(struct mdss_dsi_ctrl_pdata *ctrl) static void mdss_dsi_start_hs_clk_lane(struct mdss_dsi_ctrl_pdata *ctrl)
{ {
/* make sure clk lane is stopped */
mdss_dsi_stop_hs_clk_lane(ctrl);
mutex_lock(&ctrl->clk_lane_mutex); mutex_lock(&ctrl->clk_lane_mutex);
mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 1); mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 1);
if (ctrl->clk_lane_cnt) { if (ctrl->clk_lane_cnt) {
@ -541,21 +548,16 @@ static void mdss_dsi_start_hs_clk_lane(struct mdss_dsi_ctrl_pdata *ctrl)
* this function is work around solution for 8994 dsi clk lane * this function is work around solution for 8994 dsi clk lane
* may stuck at HS problem * may stuck at HS problem
*/ */
static void mdss_dsi_stop_hs_clk_lane(struct mdss_dsi_ctrl_pdata *ctrl, static void mdss_dsi_stop_hs_clk_lane(struct mdss_dsi_ctrl_pdata *ctrl)
u32 term)
{ {
u32 fifo = 0; u32 fifo = 0;
u32 lane = 0; u32 lane = 0;
unsigned long flags;
mutex_lock(&ctrl->clk_lane_mutex); mutex_lock(&ctrl->clk_lane_mutex);
mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 1); if (ctrl->clk_lane_cnt == 0) /* stopped already */
if (ctrl->clk_lane_cnt != 1) {
pr_err("%s: ndx=%d wait had been done, cnt=%d\n",
__func__, ctrl->ndx, ctrl->clk_lane_cnt);
goto release; goto release;
}
mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 1);
/* fifo */ /* fifo */
if (readl_poll_timeout(((ctrl->ctrl_base) + 0x000c), if (readl_poll_timeout(((ctrl->ctrl_base) + 0x000c),
fifo, fifo,
@ -574,7 +576,6 @@ static void mdss_dsi_stop_hs_clk_lane(struct mdss_dsi_ctrl_pdata *ctrl,
pr_err("%s: datalane NOT stopped, lane=%x\n", pr_err("%s: datalane NOT stopped, lane=%x\n",
__func__, lane); __func__, lane);
} }
end: end:
/* stop force clk lane hs */ /* stop force clk lane hs */
mdss_dsi_cfg_lane_ctrl(ctrl, BIT(28), 0); mdss_dsi_cfg_lane_ctrl(ctrl, BIT(28), 0);
@ -582,14 +583,7 @@ end:
mdss_dsi_wait_clk_lane_to_stop(ctrl); mdss_dsi_wait_clk_lane_to_stop(ctrl);
ctrl->clk_lane_cnt = 0; ctrl->clk_lane_cnt = 0;
release: release:
if (term == DSI_MDP_TERM) {
spin_lock_irqsave(&ctrl->mdp_lock, flags);
ctrl->mdp_busy = false;
complete(&ctrl->mdp_comp);
spin_unlock_irqrestore(&ctrl->mdp_lock, flags);
}
pr_debug("%s: ndx=%d, cnt=%d\n", __func__, pr_debug("%s: ndx=%d, cnt=%d\n", __func__,
ctrl->ndx, ctrl->clk_lane_cnt); ctrl->ndx, ctrl->clk_lane_cnt);
@ -623,10 +617,10 @@ static void mdss_dsi_cmd_stop_hs_clk_lane(struct mdss_dsi_ctrl_pdata *ctrl)
mctrl = mdss_dsi_get_other_ctrl(ctrl); mctrl = mdss_dsi_get_other_ctrl(ctrl);
if (mctrl) if (mctrl)
mdss_dsi_stop_hs_clk_lane(mctrl, DSI_CMD_TERM); mdss_dsi_stop_hs_clk_lane(mctrl);
} }
mdss_dsi_stop_hs_clk_lane(ctrl, DSI_CMD_TERM); mdss_dsi_stop_hs_clk_lane(ctrl);
} }
static void mdss_dsi_ctl_phy_reset(struct mdss_dsi_ctrl_pdata *ctrl) static void mdss_dsi_ctl_phy_reset(struct mdss_dsi_ctrl_pdata *ctrl)
@ -1931,10 +1925,55 @@ void mdss_dsi_cmd_mdp_start(struct mdss_dsi_ctrl_pdata *ctrl)
spin_unlock_irqrestore(&ctrl->mdp_lock, flag); spin_unlock_irqrestore(&ctrl->mdp_lock, flag);
} }
static int mdss_dsi_mdp_busy_tout_check(struct mdss_dsi_ctrl_pdata *ctrl)
{
unsigned long flag;
u32 isr;
bool stop_hs_clk = false;
int tout = 1;
/*
* two possible scenario:
* 1) DSI_INTR_CMD_MDP_DONE set but isr not fired
* 2) DSI_INTR_CMD_MDP_DONE set and cleared (isr fired)
* but event_thread not wakeup
*/
mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 1);
spin_lock_irqsave(&ctrl->mdp_lock, flag);
isr = MIPI_INP(ctrl->ctrl_base + 0x0110);
if (isr & DSI_INTR_CMD_MDP_DONE) {
WARN(1, "INTR_CMD_MDP_DONE set but isr not fired\n");
isr &= DSI_INTR_MASK_ALL;
isr |= DSI_INTR_CMD_MDP_DONE; /* clear this isr only */
MIPI_OUTP(ctrl->ctrl_base + 0x0110, isr);
mdss_dsi_disable_irq_nosync(ctrl, DSI_MDP_TERM);
ctrl->mdp_busy = false;
if (ctrl->cmd_clk_ln_recovery_en &&
ctrl->panel_mode == DSI_CMD_MODE) {
/* has hs_lane_recovery do the work */
stop_hs_clk = true;
}
tout = 0; /* recovered */
}
spin_unlock_irqrestore(&ctrl->mdp_lock, flag);
if (stop_hs_clk)
mdss_dsi_stop_hs_clk_lane(ctrl);
complete_all(&ctrl->mdp_comp);
mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 0);
return tout;
}
void mdss_dsi_cmd_mdp_busy(struct mdss_dsi_ctrl_pdata *ctrl) void mdss_dsi_cmd_mdp_busy(struct mdss_dsi_ctrl_pdata *ctrl)
{ {
unsigned long flags; unsigned long flags;
int need_wait = 0; int need_wait = 0;
int rc;
pr_debug("%s: start pid=%d\n", pr_debug("%s: start pid=%d\n",
__func__, current->pid); __func__, current->pid);
@ -1949,11 +1988,18 @@ void mdss_dsi_cmd_mdp_busy(struct mdss_dsi_ctrl_pdata *ctrl)
/* wait until DMA finishes the current job */ /* wait until DMA finishes the current job */
pr_debug("%s: pending pid=%d\n", pr_debug("%s: pending pid=%d\n",
__func__, current->pid); __func__, current->pid);
if (!wait_for_completion_timeout(&ctrl->mdp_comp, rc = wait_for_completion_timeout(&ctrl->mdp_comp,
msecs_to_jiffies(DMA_TX_TIMEOUT))) { msecs_to_jiffies(DMA_TX_TIMEOUT));
spin_lock_irqsave(&ctrl->mdp_lock, flags);
if (!ctrl->mdp_busy)
rc = 1;
spin_unlock_irqrestore(&ctrl->mdp_lock, flags);
if (!rc) {
if (mdss_dsi_mdp_busy_tout_check(ctrl)) {
pr_err("%s: timeout error\n", __func__); pr_err("%s: timeout error\n", __func__);
MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl", "dsi0_phy", MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl",
"dsi1_ctrl", "dsi1_phy", "panic"); "dsi0_phy", "dsi1_ctrl", "dsi1_phy", "panic");
}
} }
} }
pr_debug("%s: done pid=%d\n", __func__, current->pid); pr_debug("%s: done pid=%d\n", __func__, current->pid);
@ -2037,10 +2083,8 @@ int mdss_dsi_cmdlist_commit(struct mdss_dsi_ctrl_pdata *ctrl, int from_mdp)
/* /*
* when partial update enabled, the roi of pinfo * when partial update enabled, the roi of pinfo
* is updated before mdp kickoff. Either width or * is updated before mdp kickoff. Either width or
* height of roi is 0, then it is false kickoff so * height of roi is non zero, then really kickoff
* no mdp_busy flag set needed. * will followed.
* when partial update disabled, mdp_busy flag
* alway set.
*/ */
if (!roi || (roi->w != 0 || roi->h != 0)) { if (!roi || (roi->w != 0 || roi->h != 0)) {
if (ctrl->cmd_clk_ln_recovery_en && if (ctrl->cmd_clk_ln_recovery_en &&
@ -2261,7 +2305,7 @@ static int dsi_event_thread(void *data)
} }
if (todo & DSI_EV_STOP_HS_CLK_LANE) if (todo & DSI_EV_STOP_HS_CLK_LANE)
mdss_dsi_stop_hs_clk_lane(ctrl, arg); mdss_dsi_stop_hs_clk_lane(ctrl);
} }
return 0; return 0;
@ -2469,12 +2513,13 @@ irqreturn_t mdss_dsi_isr(int irq, void *ptr)
mdss_dsi_disable_irq_nosync(ctrl, DSI_MDP_TERM); mdss_dsi_disable_irq_nosync(ctrl, DSI_MDP_TERM);
if (ctrl->cmd_clk_ln_recovery_en && if (ctrl->cmd_clk_ln_recovery_en &&
ctrl->panel_mode == DSI_CMD_MODE) { ctrl->panel_mode == DSI_CMD_MODE) {
/* stop force clk lane hs */
mdss_dsi_cfg_lane_ctrl(ctrl, BIT(28), 0);
dsi_send_events(ctrl, DSI_EV_STOP_HS_CLK_LANE, dsi_send_events(ctrl, DSI_EV_STOP_HS_CLK_LANE,
DSI_MDP_TERM); DSI_MDP_TERM);
} else {
ctrl->mdp_busy = false;
complete(&ctrl->mdp_comp);
} }
ctrl->mdp_busy = false;
complete_all(&ctrl->mdp_comp);
spin_unlock(&ctrl->mdp_lock); spin_unlock(&ctrl->mdp_lock);
} }