drm/msm/sde: remove redundant CRTC event caching

Currently both sde_crtc_atomic_begin() and
sde_crtc_atomic_flush() add the CRTC state event
to the cached sde_crtc->event.

This has a potential NULL ptr issue in the
case of vblank event firing in between sde_crtc_atomic_begin() and
sde_crtc_atomic_flush() because the upstream DRM vblank API
send_vblank_event() doesn't consider the case when the VBLANK
interrupt could have already freed any pending vblank events.

Remove the caching from sde_crtc_atomic_begin() to avoid this
condition.

Also make sure that a page_flip event was indeed submitted before
signaling the complete_flip() by setting a PENDING_FLIP flag right
after HW flush.

Change-Id: Ib201d2851e57bf22ec1f00814fc2e4dd2f35bfa1
Signed-off-by: Abhinav Kumar <abhinavk@codeaurora.org>
Signed-off-by: Yunyun Cao <yunyunc@codeaurora.org>
This commit is contained in:
Abhinav Kumar 2017-03-24 21:54:23 -07:00 committed by Yunyun Cao
parent e206db34cb
commit 13b76dbaeb
4 changed files with 76 additions and 13 deletions

View file

@ -51,6 +51,9 @@
#define LEFT_MIXER 0
#define RIGHT_MIXER 1
/* indicates pending page flip events */
#define PENDING_FLIP 0x2
static inline struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv = crtc->dev->dev_private;
@ -399,13 +402,18 @@ static void sde_crtc_vblank_cb(void *data)
{
struct drm_crtc *crtc = (struct drm_crtc *)data;
struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
unsigned pending;
pending = atomic_xchg(&sde_crtc->pending, 0);
/* keep statistics on vblank callback - with auto reset via debugfs */
if (ktime_equal(sde_crtc->vblank_cb_time, ktime_set(0, 0)))
sde_crtc->vblank_cb_time = ktime_get();
else
sde_crtc->vblank_cb_count++;
_sde_crtc_complete_flip(crtc, NULL);
if (pending & PENDING_FLIP)
_sde_crtc_complete_flip(crtc, NULL);
drm_crtc_handle_vblank(crtc);
DRM_DEBUG_VBL("crtc%d\n", crtc->base.id);
SDE_EVT32_IRQ(DRMID(crtc));
@ -519,6 +527,28 @@ static void sde_crtc_frame_event_cb(void *data, u32 event)
queue_kthread_work(&priv->disp_thread[pipe_id].worker, &fevent->work);
}
/**
* sde_crtc_request_flip_cb - callback to request page_flip events
* Once the HW flush is complete , userspace must be notified of
* PAGE_FLIP completed event in the next vblank event.
* Using this callback, a hint is set to signal any callers waiting
* for a PAGE_FLIP complete event.
* This is called within the enc_spinlock.
* @data: Pointer to cb private data
*/
static void sde_crtc_request_flip_cb(void *data)
{
struct drm_crtc *crtc = (struct drm_crtc *)data;
struct sde_crtc *sde_crtc;
if (!crtc) {
SDE_ERROR("invalid parameters\n");
return;
}
sde_crtc = to_sde_crtc(crtc);
atomic_or(PENDING_FLIP, &sde_crtc->pending);
}
void sde_crtc_complete_commit(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@ -679,7 +709,6 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
{
struct sde_crtc *sde_crtc;
struct drm_device *dev;
unsigned long flags;
u32 i;
if (!crtc) {
@ -701,14 +730,6 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
if (!sde_crtc->num_mixers)
_sde_crtc_setup_mixers(crtc);
if (sde_crtc->event) {
WARN_ON(sde_crtc->event);
} else {
spin_lock_irqsave(&dev->event_lock, flags);
sde_crtc->event = crtc->state->event;
spin_unlock_irqrestore(&dev->event_lock, flags);
}
/* Reset flush mask from previous commit */
for (i = 0; i < ARRAY_SIZE(sde_crtc->mixers); i++) {
struct sde_hw_ctl *ctl = sde_crtc->mixers[i].hw_ctl;
@ -763,7 +784,8 @@ static void sde_crtc_atomic_flush(struct drm_crtc *crtc,
dev = crtc->dev;
if (sde_crtc->event) {
SDE_DEBUG("already received sde_crtc->event\n");
SDE_ERROR("%s already received sde_crtc->event\n",
sde_crtc->name);
} else {
spin_lock_irqsave(&dev->event_lock, flags);
sde_crtc->event = crtc->state->event;
@ -999,6 +1021,7 @@ static void sde_crtc_disable(struct drm_crtc *crtc)
if (encoder->crtc != crtc)
continue;
sde_encoder_register_frame_event_callback(encoder, NULL, NULL);
sde_encoder_register_request_flip_callback(encoder, NULL, NULL);
}
memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
@ -1039,6 +1062,8 @@ static void sde_crtc_enable(struct drm_crtc *crtc)
continue;
sde_encoder_register_frame_event_callback(encoder,
sde_crtc_frame_event_cb, (void *)crtc);
sde_encoder_register_request_flip_callback(encoder,
sde_crtc_request_flip_cb, (void *)crtc);
}
for (i = 0; i < sde_crtc->num_mixers; i++) {

View file

@ -89,6 +89,7 @@ struct sde_crtc_frame_event {
* @frame_pending : Whether or not an update is pending
* @frame_events : static allocation of in-flight frame events
* @frame_event_list : available frame event list
* @pending : Whether any page-flip events are pending signal
* @spin_lock : spin lock for frame event, transaction status, etc...
*/
struct sde_crtc {
@ -109,7 +110,7 @@ struct sde_crtc {
/* output fence support */
struct sde_fence output_fence;
atomic_t pending;
struct sde_hw_stage_cfg stage_cfg;
struct dentry *debugfs_root;

View file

@ -83,6 +83,8 @@
* Bit0 = phys_encs[0] etc.
* @crtc_frame_event_cb: callback handler for frame event
* @crtc_frame_event_cb_data: callback handler private data
* @crtc_request_flip_cb: callback handler for requesting page-flip event
* @crtc_request_flip_cb_data: callback handler private data
* @crtc_frame_event: callback event
* @frame_done_timeout: frame done timeout in Hz
* @frame_done_timer: watchdog timer for frame done event
@ -107,8 +109,9 @@ struct sde_encoder_virt {
DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
void (*crtc_frame_event_cb)(void *, u32 event);
void *crtc_frame_event_cb_data;
void (*crtc_request_flip_cb)(void *);
void *crtc_request_flip_cb_data;
u32 crtc_frame_event;
atomic_t frame_done_timeout;
struct timer_list frame_done_timer;
};
@ -584,6 +587,24 @@ void sde_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
}
void sde_encoder_register_request_flip_callback(struct drm_encoder *drm_enc,
void (*request_flip_cb)(void *),
void *request_flip_cb_data)
{
struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
unsigned long lock_flags;
if (!drm_enc) {
SDE_ERROR("invalid encoder\n");
return;
}
spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
sde_enc->crtc_request_flip_cb = request_flip_cb;
sde_enc->crtc_request_flip_cb_data = request_flip_cb_data;
spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
}
static void sde_encoder_frame_done_callback(
struct drm_encoder *drm_enc,
struct sde_encoder_phys *ready_phys, u32 event)
@ -759,6 +780,11 @@ static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc)
pending_flush);
}
/* HW flush has happened, request a flip complete event now */
if (sde_enc->crtc_request_flip_cb)
sde_enc->crtc_request_flip_cb(
sde_enc->crtc_request_flip_cb_data);
_sde_encoder_trigger_start(sde_enc->cur_master);
spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);

View file

@ -71,6 +71,17 @@ void sde_encoder_register_vblank_callback(struct drm_encoder *encoder,
void sde_encoder_register_frame_event_callback(struct drm_encoder *encoder,
void (*cb)(void *, u32), void *data);
/**
* sde_encoder_register_request_flip_callback - provide callback to encoder that
* will be called after HW flush is complete to request
* a page flip event from CRTC.
* @encoder: encoder pointer
* @cb: callback pointer, provide NULL to deregister
* @data: user data provided to callback
*/
void sde_encoder_register_request_flip_callback(struct drm_encoder *encoder,
void (*cb)(void *), void *data);
/**
* sde_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
* path (i.e. ctl flush and start) at next appropriate time.