Revert "drm/msm/sde: add resource manager to enable dual dsi"

This reverts 'commit e14b3005bd ("drm/msm/sde: add
resource manager to enable dual dsi")'.
This is partial change for display drm driver,
that will break drm/sde merge commit.

Change-Id: I2d4f915aa5d3382ce22aa1b4d6e02183ab2f7c5c
Signed-off-by: Narendra Muppalla <NarendraM@codeaurora.org>
This commit is contained in:
Narendra Muppalla 2017-01-25 11:23:56 -08:00
parent 314869eb56
commit 6aeb68e3f4
9 changed files with 147 additions and 655 deletions

View file

@ -43,7 +43,6 @@ msm-y := \
sde/sde_encoder_phys_vid.o \ sde/sde_encoder_phys_vid.o \
sde/sde_encoder_phys_cmd.o \ sde/sde_encoder_phys_cmd.o \
sde/sde_irq.o \ sde/sde_irq.o \
sde/sde_kms_utils.o \
sde/sde_kms.o \ sde/sde_kms.o \
sde/sde_plane.o \ sde/sde_plane.o \
msm_atomic.o \ msm_atomic.o \

View file

@ -19,11 +19,40 @@
#include "sde_kms.h" #include "sde_kms.h"
#include "sde_hw_lm.h" #include "sde_hw_lm.h"
#include "sde_hw_mdp_ctl.h" #include "sde_hw_mdp_ctl.h"
#include "sde_crtc.h"
#define CTL(i) (CTL_0 + (i)) #define CRTC_DUAL_MIXERS 2
#define LM(i) (LM_0 + (i)) #define PENDING_FLIP 2
#define INTF(i) (INTF_0 + (i))
#define CRTC_HW_MIXER_MAXSTAGES(c, idx) ((c)->mixer[idx].sblk->maxblendstages)
struct sde_crtc_mixer {
struct sde_hw_dspp *hw_dspp;
struct sde_hw_mixer *hw_lm;
struct sde_hw_ctl *hw_ctl;
u32 flush_mask;
};
struct sde_crtc {
struct drm_crtc base;
char name[8];
struct drm_plane *plane;
struct drm_plane *planes[8];
struct drm_encoder *encoder;
int id;
bool enabled;
spinlock_t lm_lock; /* protect registers */
/* HW Resources reserved for the crtc */
u32 num_ctls;
u32 num_mixers;
struct sde_crtc_mixer mixer[CRTC_DUAL_MIXERS];
/*if there is a pending flip, these will be non-null */
struct drm_pending_vblank_event *event;
};
#define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
static struct sde_kms *get_kms(struct drm_crtc *crtc) static struct sde_kms *get_kms(struct drm_crtc *crtc)
{ {
@ -31,91 +60,89 @@ static struct sde_kms *get_kms(struct drm_crtc *crtc)
return to_sde_kms(priv->kms); return to_sde_kms(priv->kms);
} }
static inline struct sde_hw_ctl *sde_crtc_rm_get_ctl_path(enum sde_ctl idx,
void __iomem *addr,
struct sde_mdss_cfg *m)
{
/*
* This module keeps track of the requested hw resources state,
* if the requested resource is being used it returns NULL,
* otherwise it returns the hw driver struct
*/
return sde_hw_ctl_init(idx, addr, m);
}
static inline struct sde_hw_mixer *sde_crtc_rm_get_mixer(enum sde_lm idx,
void __iomem *addr,
struct sde_mdss_cfg *m)
{
/*
* This module keeps track of the requested hw resources state,
* if the requested resource is being used it returns NULL,
* otherwise it returns the hw driver struct
*/
return sde_hw_lm_init(idx, addr, m);
}
static int sde_crtc_reserve_hw_resources(struct drm_crtc *crtc, static int sde_crtc_reserve_hw_resources(struct drm_crtc *crtc,
struct drm_encoder *encoder) struct drm_encoder *encoder)
{ {
/*
* Assign CRTC resources
* num_ctls;
* num_mixers;
* sde_lm mixer[CRTC_MAX_PIPES];
* sde_ctl ctl[CRTC_MAX_PIPES];
*/
struct sde_crtc *sde_crtc = to_sde_crtc(crtc); struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
struct sde_kms *sde_kms = get_kms(crtc); struct sde_kms *kms = get_kms(crtc);
struct sde_encoder_hw_resources enc_hw_res; enum sde_lm lm_id[CRTC_DUAL_MIXERS];
const struct sde_hw_res_map *plat_hw_res_map; enum sde_ctl ctl_id[CRTC_DUAL_MIXERS];
enum sde_lm unused_lm_id[CRTC_DUAL_MIXERS] = {0}; int i;
enum sde_lm lm_idx;
int i, count = 0;
if (!sde_kms) { if (!kms) {
DBG("[%s] invalid kms", __func__); DBG("[%s] invalid kms\n", __func__);
return -EINVAL; return -EINVAL;
} }
if (!sde_kms->mmio) if (!kms->mmio)
return -EINVAL; return -EINVAL;
/* Get unused LMs */ /*
for (i = 0; i < sde_kms->catalog->mixer_count; i++) { * simple check validate against catalog
if (!sde_rm_get_mixer(sde_kms, LM(i))) { */
unused_lm_id[count++] = LM(i); sde_crtc->num_ctls = 1;
if (count == CRTC_DUAL_MIXERS) sde_crtc->num_mixers = 1;
break; ctl_id[0] = CTL_0;
} lm_id[0] = LM_0;
}
/* query encoder resources */ /*
sde_encoder_get_hw_resources(sde_crtc->encoder, &enc_hw_res); * need to also enable MDP core clock and AHB CLK
* before touching HW driver
/* parse encoder hw resources, find CTL paths */ */
for (i = CTL_0; i <= sde_kms->catalog->ctl_count; i++) { DBG("%s Enable clocks\n", __func__);
WARN_ON(sde_crtc->num_ctls > CRTC_DUAL_MIXERS); sde_enable(kms);
if (enc_hw_res.ctls[i]) { for (i = 0; i < sde_crtc->num_ctls; i++) {
struct sde_crtc_mixer *mixer = sde_crtc->mixer[i].hw_ctl = sde_crtc_rm_get_ctl_path(ctl_id[i],
&sde_crtc->mixer[sde_crtc->num_ctls]; kms->mmio, kms->catalog);
mixer->hw_ctl = sde_rm_get_ctl_path(sde_kms, i); if (!sde_crtc->mixer[i].hw_ctl) {
if (IS_ERR_OR_NULL(mixer->hw_ctl)) {
DBG("[%s], Invalid ctl_path", __func__); DBG("[%s], Invalid ctl_path", __func__);
return -EACCES; return -EACCES;
} }
sde_crtc->num_ctls++;
}
} }
/* shortcut this process if encoder has no ctl paths */ for (i = 0; i < sde_crtc->num_mixers; i++) {
if (!sde_crtc->num_ctls) sde_crtc->mixer[i].hw_lm = sde_crtc_rm_get_mixer(lm_id[i],
return 0; kms->mmio, kms->catalog);
if (!sde_crtc->mixer[i].hw_lm) {
/* DBG("[%s], Invalid ctl_path", __func__);
* Get default LMs if specified in platform config
* other wise acquire the free LMs
*/
for (i = INTF_0; i <= sde_kms->catalog->intf_count; i++) {
if (enc_hw_res.intfs[i]) {
struct sde_crtc_mixer *mixer =
&sde_crtc->mixer[sde_crtc->num_mixers];
plat_hw_res_map = sde_rm_get_res_map(sde_kms, i);
lm_idx = plat_hw_res_map->lm;
if (!lm_idx)
lm_idx = unused_lm_id[sde_crtc->num_mixers];
DBG("Acquiring LM %d", lm_idx);
mixer->hw_lm = sde_rm_acquire_mixer(sde_kms, lm_idx);
if (IS_ERR_OR_NULL(mixer->hw_lm)) {
DBG("[%s], Invalid mixer", __func__);
return -EACCES; return -EACCES;
} }
/* interface info */
mixer->intf_idx = i;
mixer->mode = enc_hw_res.intfs[i];
sde_crtc->num_mixers++;
} }
} /*
* need to disable MDP core clock and AHB CLK
DBG("control paths %d, num_mixers %d, lm[0] %d, ctl[0] %d ", */
sde_crtc->num_ctls, sde_crtc->num_mixers, sde_disable(kms);
sde_crtc->mixer[0].hw_lm->idx,
sde_crtc->mixer[0].hw_ctl->idx);
if (sde_crtc->num_mixers == CRTC_DUAL_MIXERS)
DBG("lm[1] %d, ctl[1], %d",
sde_crtc->mixer[1].hw_lm->idx,
sde_crtc->mixer[1].hw_ctl->idx);
return 0; return 0;
} }
@ -251,7 +278,6 @@ static void blend_setup(struct drm_crtc *crtc)
unsigned long flags; unsigned long flags;
int i, j, plane_cnt = 0; int i, j, plane_cnt = 0;
DBG("");
spin_lock_irqsave(&sde_crtc->lm_lock, flags); spin_lock_irqsave(&sde_crtc->lm_lock, flags);
/* ctl could be reserved already */ /* ctl could be reserved already */
@ -327,104 +353,10 @@ out:
spin_unlock_irqrestore(&sde_crtc->lm_lock, flags); spin_unlock_irqrestore(&sde_crtc->lm_lock, flags);
} }
/* if file!=NULL, this is preclose potential cancel-flip path */
static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
{
struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_pending_vblank_event *event;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
event = sde_crtc->event;
if (event) {
/* if regular vblank case (!file) or if cancel-flip from
* preclose on file that requested flip, then send the
* event:
*/
if (!file || (event->base.file_priv == file)) {
sde_crtc->event = NULL;
DBG("%s: send event: %pK", sde_crtc->name, event);
drm_send_vblank_event(dev, sde_crtc->id, event);
}
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
static void sde_crtc_vblank_cb(void *data)
{
struct drm_crtc *crtc = (struct drm_crtc *)data;
struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
unsigned pending;
/* unregister callback */
sde_encoder_register_vblank_callback(sde_crtc->encoder, NULL, NULL);
pending = atomic_xchg(&sde_crtc->pending, 0);
if (pending & PENDING_FLIP)
complete_flip(crtc, NULL);
}
static int frame_flushed(struct sde_crtc *sde_crtc)
{
struct vsync_info vsync;
/* encoder get vsync_info */
/* if frame_count does not match frame is flushed */
sde_encoder_get_vsync_info(sde_crtc->encoder, &vsync);
return (vsync.frame_count & sde_crtc->vsync_count);
}
void sde_crtc_wait_for_commit_done(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
u32 pending;
int i, ret;
/* ref count the vblank event */
ret = drm_crtc_vblank_get(crtc);
if (ret)
return;
/* register callback */
sde_encoder_register_vblank_callback(sde_crtc->encoder,
sde_crtc_vblank_cb,
(void *)crtc);
/* wait */
pending = atomic_read(&sde_crtc->pending);
if (pending & PENDING_FLIP) {
wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
(frame_flushed(sde_crtc) != 0),
msecs_to_jiffies(CRTC_MAX_WAIT_ONE_FRAME));
if (ret <= 0)
dev_warn(dev->dev, "vblank time out, crtc=%d\n",
sde_crtc->id);
}
for (i = 0; i < sde_crtc->num_ctls; i++)
sde_crtc->mixer[i].flush_mask = 0;
/* release */
drm_crtc_vblank_put(crtc);
}
static void request_pending(struct drm_crtc *crtc, u32 pending) static void request_pending(struct drm_crtc *crtc, u32 pending)
{ {
struct sde_crtc *sde_crtc = to_sde_crtc(crtc); DBG("");
struct vsync_info vsync;
/* request vsync info, cache the current frame count */
sde_encoder_get_vsync_info(sde_crtc->encoder, &vsync);
sde_crtc->vsync_count = vsync.frame_count;
atomic_or(pending, &sde_crtc->pending);
} }
/** /**
* Flush the CTL PATH * Flush the CTL PATH
*/ */
@ -437,12 +369,14 @@ static u32 crtc_flush_all(struct drm_crtc *crtc)
DBG(""); DBG("");
for (i = 0; i < sde_crtc->num_ctls; i++) { for (i = 0; i < sde_crtc->num_ctls; i++) {
/*
* Query flush_mask from encoder
* and append to the ctl_path flush_mask
*/
ctl = sde_crtc->mixer[i].hw_ctl; ctl = sde_crtc->mixer[i].hw_ctl;
ctl->ops.get_bitmask_intf(ctl, ctl->ops.get_bitmask_intf(ctl,
&(sde_crtc->mixer[i].flush_mask), &(sde_crtc->mixer[i].flush_mask),
sde_crtc->mixer[i].intf_idx); INTF_1);
DBG("Flushing CTL_ID %d, flush_mask %x", ctl->idx,
sde_crtc->mixer[i].flush_mask);
ctl->ops.setup_flush(ctl, ctl->ops.setup_flush(ctl,
sde_crtc->mixer[i].flush_mask); sde_crtc->mixer[i].flush_mask);
} }
@ -491,7 +425,7 @@ static void sde_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
unsigned long flags; unsigned long flags;
DBG("%s: event: %pK", sde_crtc->name, crtc->state->event); DBG("");
WARN_ON(sde_crtc->event); WARN_ON(sde_crtc->event);
@ -671,6 +605,6 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev,
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
DBG("%s: Successfully initialized crtc", __func__); DBG("%s: Successfully initialized crtc\n", __func__);
return crtc; return crtc;
} }

View file

@ -1,79 +0,0 @@
/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _SDE_CRTC_H_
#define _SDE_CRTC_H_
#include "drm_crtc.h"
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
#define CRTC_DUAL_MIXERS 2
#define PENDING_FLIP 2
/* worst case one frame wait time based on 30 FPS : 33.33ms*/
#define CRTC_MAX_WAIT_ONE_FRAME 34
#define CRTC_HW_MIXER_MAXSTAGES(c, idx) ((c)->mixer[idx].sblk->maxblendstages)
/**
* struct sde_crtc_mixer - stores the map for each virtual pipeline in the CRTC
* @hw_dspp : DSPP HW Driver context
* @hw_lm : LM HW Driver context
* @hw_ctl : CTL Path HW driver context
* @intf_idx : Interface idx
* @mode : Interface mode Active/CMD
* @flush_mask : Flush mask value for this commit
*/
struct sde_crtc_mixer {
struct sde_hw_dspp *hw_dspp;
struct sde_hw_mixer *hw_lm;
struct sde_hw_ctl *hw_ctl;
enum sde_intf intf_idx;
enum sde_intf_mode mode;
u32 flush_mask;
};
/**
* struct sde_crtc - virtualized CRTC data structure
* @base : Base drm crtc structure
* @name : ASCII description of this crtc
* @encoder : Associated drm encoder object
* @id : Unique crtc identifier
* @lm_lock : LM register access spinlock
* @num_ctls : Number of ctl paths in use
* @num_mixers : Number of mixers in use
* @mixer : List of active mixers
* @event : Pointer to last received drm vblank event
* @pending : Whether or not an update is pending
* @vsync_count : Running count of received vsync events
*/
struct sde_crtc {
struct drm_crtc base;
char name[8];
struct drm_encoder *encoder;
int id;
spinlock_t lm_lock; /* protect registers */
/* HW Resources reserved for the crtc */
u32 num_ctls;
u32 num_mixers;
struct sde_crtc_mixer mixer[CRTC_DUAL_MIXERS];
/*if there is a pending flip, these will be non-null */
struct drm_pending_vblank_event *event;
atomic_t pending;
u32 vsync_count;
};
#define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
#endif /* _SDE_CRTC_H_ */

View file

@ -201,7 +201,6 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
{ {
struct sde_encoder_virt *sde_enc = NULL; struct sde_encoder_virt *sde_enc = NULL;
int i = 0; int i = 0;
bool splitmode = false;
DBG(""); DBG("");
@ -212,23 +211,11 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
sde_enc = to_sde_encoder_virt(drm_enc); sde_enc = to_sde_encoder_virt(drm_enc);
/*
* Panel is driven by two interfaces ,each interface drives half of
* the horizontal
*/
if (sde_enc->num_phys_encs == 2)
splitmode = true;
for (i = 0; i < sde_enc->num_phys_encs; i++) { for (i = 0; i < sde_enc->num_phys_encs; i++) {
struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
if (phys) {
phys->phys_ops.mode_set(phys, if (phys && phys->phys_ops.mode_set)
mode, phys->phys_ops.mode_set(phys, mode, adjusted_mode);
adjusted_mode,
splitmode);
if (memcmp(mode, adjusted_mode, sizeof(*mode)) != 0)
DRM_ERROR("adjusted modes not supported\n");
}
} }
} }
@ -236,7 +223,6 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
{ {
struct sde_encoder_virt *sde_enc = NULL; struct sde_encoder_virt *sde_enc = NULL;
int i = 0; int i = 0;
bool splitmode = false;
DBG(""); DBG("");
@ -249,19 +235,10 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
bs_set(sde_enc, 1); bs_set(sde_enc, 1);
if (sde_enc->num_phys_encs == 2)
splitmode = true;
for (i = 0; i < sde_enc->num_phys_encs; i++) { for (i = 0; i < sde_enc->num_phys_encs; i++) {
struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
if (phys && phys->phys_ops.enable) if (phys && phys->phys_ops.enable)
/* enable/disable dual interface top config */
if (phys->phys_ops.enable_split_config)
phys->phys_ops.enable_split_config(phys,
splitmode);
phys->phys_ops.enable(phys); phys->phys_ops.enable(phys);
} }
} }
@ -403,11 +380,13 @@ static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc,
* h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
* h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
*/ */
const struct sde_hw_res_map *hw_res_map = NULL;
enum sde_intf intf_idx = INTF_MAX; enum sde_intf intf_idx = INTF_MAX;
enum sde_ctl ctl_idx = CTL_MAX; enum sde_ctl ctl_idx = CTL_0;
u32 controller_id = disp_info->h_tile_instance[i]; u32 controller_id = disp_info->h_tile_instance[i];
if (intf_type == INTF_HDMI)
ctl_idx = CTL_2;
DBG("h_tile_instance %d = %d", i, controller_id); DBG("h_tile_instance %d = %d", i, controller_id);
intf_idx = sde_encoder_get_intf(sde_kms->catalog, intf_idx = sde_encoder_get_intf(sde_kms->catalog,
@ -417,12 +396,6 @@ static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc,
ret = -EINVAL; ret = -EINVAL;
} }
hw_res_map = sde_rm_get_res_map(sde_kms, intf_idx);
if (IS_ERR_OR_NULL(hw_res_map))
ret = -EINVAL;
else
ctl_idx = hw_res_map->ctl;
/* Create both VID and CMD Phys Encoders here */ /* Create both VID and CMD Phys Encoders here */
if (!ret) if (!ret)
ret = sde_encoder_virt_add_phys_vid_enc( ret = sde_encoder_virt_add_phys_vid_enc(
@ -488,25 +461,6 @@ void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags); spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags);
} }
void sde_encoder_get_vsync_info(struct drm_encoder *drm_enc,
struct vsync_info *vsync)
{
struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
struct sde_encoder_phys *phys;
DBG("");
if (!vsync) {
DRM_ERROR("Invalid pointer");
return;
}
/* we get the vsync info from the intf at index 0: master index */
phys = sde_enc->phys_encs[0];
if (phys)
phys->phys_ops.get_vsync_info(phys, vsync);
}
/* encoders init, /* encoders init,
* initialize encoder based on displays * initialize encoder based on displays
*/ */

View file

@ -30,8 +30,7 @@ struct sde_encoder_virt_ops {
struct sde_encoder_phys_ops { struct sde_encoder_phys_ops {
void (*mode_set)(struct sde_encoder_phys *encoder, void (*mode_set)(struct sde_encoder_phys *encoder,
struct drm_display_mode *mode, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode, struct drm_display_mode *adjusted_mode);
bool splitmode);
bool (*mode_fixup)(struct sde_encoder_phys *encoder, bool (*mode_fixup)(struct sde_encoder_phys *encoder,
const struct drm_display_mode *mode, const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode); struct drm_display_mode *adjusted_mode);
@ -40,10 +39,6 @@ struct sde_encoder_phys_ops {
void (*destroy)(struct sde_encoder_phys *encoder); void (*destroy)(struct sde_encoder_phys *encoder);
void (*get_hw_resources)(struct sde_encoder_phys *encoder, void (*get_hw_resources)(struct sde_encoder_phys *encoder,
struct sde_encoder_hw_resources *hw_res); struct sde_encoder_hw_resources *hw_res);
void (*get_vsync_info)(struct sde_encoder_phys *enc,
struct vsync_info *vsync);
void (*enable_split_config)(struct sde_encoder_phys *enc,
bool enable);
}; };
struct sde_encoder_phys { struct sde_encoder_phys {

View file

@ -1,4 +1,5 @@
/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. /*
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and * it under the terms of the GNU General Public License version 2 and
@ -8,6 +9,7 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details. * GNU General Public License for more details.
*
*/ */
#include "msm_drv.h" #include "msm_drv.h"
@ -17,7 +19,6 @@
#include "sde_encoder_phys.h" #include "sde_encoder_phys.h"
#include "sde_mdp_formats.h" #include "sde_mdp_formats.h"
#include "sde_hw_mdp_top.h"
#define VBLANK_TIMEOUT msecs_to_jiffies(100) #define VBLANK_TIMEOUT msecs_to_jiffies(100)
@ -231,26 +232,14 @@ static void sde_encoder_phys_vid_flush_intf(struct sde_encoder_phys *phys_enc)
ctl->idx, flush_mask, intf->idx); ctl->idx, flush_mask, intf->idx);
} }
static void sde_encoder_phys_vid_mode_set(struct sde_encoder_phys *phys_enc, static void sde_encoder_phys_vid_mode_set(
struct sde_encoder_phys *phys_enc,
struct drm_display_mode *mode, struct drm_display_mode *mode,
struct drm_display_mode struct drm_display_mode *adj_mode)
*adjusted_mode,
bool splitmode)
{ {
mode = adjusted_mode; phys_enc->cached_mode = *adj_mode;
phys_enc->cached_mode = *adjusted_mode; DBG("intf %d, caching mode:", phys_enc->hw_intf->idx);
if (splitmode) { drm_mode_debug_printmodeline(adj_mode);
phys_enc->cached_mode.hdisplay >>= 1;
phys_enc->cached_mode.htotal >>= 1;
phys_enc->cached_mode.hsync_start >>= 1;
phys_enc->cached_mode.hsync_end >>= 1;
}
DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
mode->base.id, mode->name, mode->vrefresh, mode->clock,
mode->hdisplay, mode->hsync_start, mode->hsync_end, mode->htotal,
mode->vdisplay, mode->vsync_start, mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
} }
static void sde_encoder_phys_vid_setup_timing_engine( static void sde_encoder_phys_vid_setup_timing_engine(
@ -439,57 +428,8 @@ static void sde_encoder_phys_vid_get_hw_resources(
struct sde_encoder_phys *phys_enc, struct sde_encoder_phys *phys_enc,
struct sde_encoder_hw_resources *hw_res) struct sde_encoder_hw_resources *hw_res)
{ {
struct msm_drm_private *priv = phys_enc->parent->dev->dev_private;
struct sde_kms *sde_kms = to_sde_kms(priv->kms);
const struct sde_hw_res_map *hw_res_map;
DBG("Intf %d\n", phys_enc->hw_intf->idx);
hw_res->intfs[phys_enc->hw_intf->idx] = INTF_MODE_VIDEO;
/*
* defaults should not be in use,
* otherwise signal/return failure
*/
hw_res_map = sde_rm_get_res_map(sde_kms, phys_enc->hw_intf->idx);
/* This is video mode panel so PINGPONG will be in by-pass mode
* only assign ctl path.For cmd panel check if pp_split is
* enabled, override default map
*/
hw_res->ctls[hw_res_map->ctl] = true;
}
/**
* video mode will use the intf (get_status)
* cmd mode will use the pingpong (get_vsync_info)
* to get this information
*/
static void sde_encoder_intf_get_vsync_info(struct sde_encoder_phys *phys_enc,
struct vsync_info *vsync)
{
struct intf_status status;
DBG(""); DBG("");
phys_enc->hw_intf->ops.get_status(phys_enc->hw_intf, &status); hw_res->intfs[phys_enc->hw_intf->idx] = true;
vsync->frame_count = status.frame_count;
vsync->line_count = status.line_count;
DBG(" sde_encoder_intf_get_vsync_info, count %d", vsync->frame_count);
}
static void sde_encoder_intf_split_config(struct sde_encoder_phys *phys_enc,
bool enable)
{
struct msm_drm_private *priv = phys_enc->parent->dev->dev_private;
struct sde_kms *sde_kms = to_sde_kms(priv->kms);
struct sde_hw_mdp *mdp = sde_hw_mdptop_init(MDP_TOP, sde_kms->mmio,
sde_kms->catalog);
struct split_pipe_cfg cfg;
DBG("%p", mdp);
cfg.en = true;
cfg.mode = INTF_MODE_VIDEO;
if (!IS_ERR_OR_NULL(mdp))
mdp->ops.setup_split_pipe(mdp, &cfg);
} }
static void sde_encoder_phys_vid_init_cbs(struct sde_encoder_phys_ops *ops) static void sde_encoder_phys_vid_init_cbs(struct sde_encoder_phys_ops *ops)
@ -500,8 +440,6 @@ static void sde_encoder_phys_vid_init_cbs(struct sde_encoder_phys_ops *ops)
ops->disable = sde_encoder_phys_vid_disable; ops->disable = sde_encoder_phys_vid_disable;
ops->destroy = sde_encoder_phys_vid_destroy; ops->destroy = sde_encoder_phys_vid_destroy;
ops->get_hw_resources = sde_encoder_phys_vid_get_hw_resources; ops->get_hw_resources = sde_encoder_phys_vid_get_hw_resources;
ops->get_vsync_info = sde_encoder_intf_get_vsync_info;
ops->enable_split_config = sde_encoder_intf_split_config;
} }
struct sde_encoder_phys *sde_encoder_phys_vid_init( struct sde_encoder_phys *sde_encoder_phys_vid_init(
@ -534,7 +472,8 @@ struct sde_encoder_phys *sde_encoder_phys_vid_init(
goto fail; goto fail;
} }
phys_enc->hw_ctl = sde_rm_acquire_ctl_path(sde_kms, ctl_idx); phys_enc->hw_ctl = sde_hw_ctl_init(ctl_idx, sde_kms->mmio,
sde_kms->catalog);
if (!phys_enc->hw_ctl) { if (!phys_enc->hw_ctl) {
ret = -ENOMEM; ret = -ENOMEM;
goto fail; goto fail;

View file

@ -21,21 +21,18 @@ static const char * const iommu_ports[] = {
"mdp_0", "mdp_0",
}; };
static const struct sde_hw_res_map res_table[INTF_MAX] = {
{ SDE_NONE, SDE_NONE, SDE_NONE, SDE_NONE},
{ INTF_0, SDE_NONE, SDE_NONE, SDE_NONE},
{ INTF_1, LM_0, PINGPONG_0, CTL_0},
{ INTF_2, LM_1, PINGPONG_1, CTL_1},
{ INTF_3, SDE_NONE, SDE_NONE, CTL_2},
};
#define DEFAULT_MDP_SRC_CLK 200000000 #define DEFAULT_MDP_SRC_CLK 200000000
int sde_disable(struct sde_kms *sde_kms) int sde_disable(struct sde_kms *sde_kms)
{ {
DBG(""); DBG("");
clk_disable_unprepare(sde_kms->ahb_clk);
clk_disable_unprepare(sde_kms->axi_clk);
clk_disable_unprepare(sde_kms->core_clk);
if (sde_kms->lut_clk)
clk_disable_unprepare(sde_kms->lut_clk);
return 0; return 0;
} }
@ -69,7 +66,6 @@ static void sde_complete_commit(struct msm_kms *kms,
static void sde_wait_for_crtc_commit_done(struct msm_kms *kms, static void sde_wait_for_crtc_commit_done(struct msm_kms *kms,
struct drm_crtc *crtc) struct drm_crtc *crtc)
{ {
sde_crtc_wait_for_commit_done(crtc);
} }
static int modeset_init(struct sde_kms *sde_kms) static int modeset_init(struct sde_kms *sde_kms)
{ {
@ -459,7 +455,6 @@ struct msm_kms *sde_kms_init(struct drm_device *dev)
clk_set_rate(sde_kms->src_clk, DEFAULT_MDP_SRC_CLK); clk_set_rate(sde_kms->src_clk, DEFAULT_MDP_SRC_CLK);
sde_enable(sde_kms); sde_enable(sde_kms);
sde_kms->hw_res.res_table = res_table;
/* /*
* Now we need to read the HW catalog and initialize resources such as * Now we need to read the HW catalog and initialize resources such as
@ -484,7 +479,9 @@ struct msm_kms *sde_kms_init(struct drm_device *dev)
dev->mode_config.max_width = catalog->mixer[0].sblk->maxwidth; dev->mode_config.max_width = catalog->mixer[0].sblk->maxwidth;
dev->mode_config.max_height = 4096; dev->mode_config.max_height = 4096;
sde_kms->hw_intr = sde_rm_acquire_intr(sde_kms); sde_enable(sde_kms);
sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
sde_disable(sde_kms);
if (IS_ERR_OR_NULL(sde_kms->hw_intr)) if (IS_ERR_OR_NULL(sde_kms->hw_intr))
goto fail; goto fail;

View file

@ -17,8 +17,7 @@
#include "msm_kms.h" #include "msm_kms.h"
#include "mdp/mdp_kms.h" #include "mdp/mdp_kms.h"
#include "sde_hw_catalog.h" #include "sde_hw_catalog.h"
#include "sde_hw_mdp_ctl.h" #include "sde_hw_mdss.h"
#include "sde_hw_lm.h"
#include "sde_hw_interrupts.h" #include "sde_hw_interrupts.h"
/* /*
@ -43,38 +42,6 @@ struct sde_irq {
spinlock_t cb_lock; spinlock_t cb_lock;
}; };
/**
* struct sde_hw_res_map : Default resource table identifying default
* hw resource map. Primarily used for forcing DSI to use CTL_0/1
* and Pingpong 0/1, if the field is set to SDE_NONE means any HW
* intstance for that tpye is allowed as long as it is unused.
*/
struct sde_hw_res_map {
enum sde_intf intf;
enum sde_lm lm;
enum sde_pingpong pp;
enum sde_ctl ctl;
};
/* struct sde_hw_resource_manager : Resource mananger maintains the current
* platform configuration and manages shared
* hw resources ex:ctl_path hw driver context
* is needed by CRTCs/PLANEs/ENCODERs
* @ctl : table of control path hw driver contexts allocated
* @mixer : list of mixer hw drivers contexts allocated
* @intr : pointer to hw interrupt context
* @res_table : pointer to default hw_res table for this platform
* @feature_map :BIT map for default enabled features ex:specifies if PP_SPLIT
* is enabled/disabled by defalt for this platform
*/
struct sde_hw_resource_manager {
struct sde_hw_ctl *ctl[CTL_MAX];
struct sde_hw_mixer *mixer[LM_MAX];
struct sde_hw_intr *intr;
const struct sde_hw_res_map *res_table;
bool feature_map;
};
struct sde_kms { struct sde_kms {
struct msm_kms base; struct msm_kms base;
struct drm_device *dev; struct drm_device *dev;
@ -107,7 +74,6 @@ struct sde_kms {
struct sde_hw_intr *hw_intr; struct sde_hw_intr *hw_intr;
struct sde_irq irq_obj; struct sde_irq irq_obj;
struct sde_hw_resource_manager hw_res;
}; };
struct vsync_info { struct vsync_info {
@ -142,36 +108,6 @@ struct sde_plane_state {
int sde_disable(struct sde_kms *sde_kms); int sde_disable(struct sde_kms *sde_kms);
int sde_enable(struct sde_kms *sde_kms); int sde_enable(struct sde_kms *sde_kms);
/**
* HW resource manager functions
* @sde_rm_acquire_ctl_path : Allocates control path
* @sde_rm_get_ctl_path : returns control path driver context for already
* acquired ctl path
* @sde_rm_release_ctl_path : Frees control path driver context
* @sde_rm_acquire_mixer : Allocates mixer hw driver context
* @sde_rm_get_mixer : returns mixer context for already
* acquired mixer
* @sde_rm_release_mixer : Frees mixer hw driver context
* @sde_rm_get_hw_res_map : Returns map for the passed INTF
*/
struct sde_hw_ctl *sde_rm_acquire_ctl_path(struct sde_kms *sde_kms,
enum sde_ctl idx);
struct sde_hw_ctl *sde_rm_get_ctl_path(struct sde_kms *sde_kms,
enum sde_ctl idx);
void sde_rm_release_ctl_path(struct sde_kms *sde_kms,
enum sde_ctl idx);
struct sde_hw_mixer *sde_rm_acquire_mixer(struct sde_kms *sde_kms,
enum sde_lm idx);
struct sde_hw_mixer *sde_rm_get_mixer(struct sde_kms *sde_kms,
enum sde_lm idx);
void sde_rm_release_mixer(struct sde_kms *sde_kms,
enum sde_lm idx);
struct sde_hw_intr *sde_rm_acquire_intr(struct sde_kms *sde_kms);
struct sde_hw_intr *sde_rm_get_intr(struct sde_kms *sde_kms);
const struct sde_hw_res_map *sde_rm_get_res_map(struct sde_kms *sde_kms,
enum sde_intf idx);
/** /**
* IRQ functions * IRQ functions
*/ */
@ -264,41 +200,31 @@ void sde_disable_all_irqs(struct sde_kms *sde_kms);
int sde_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); int sde_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
void sde_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); void sde_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
/**
* Plane functions
*/
enum sde_sspp sde_plane_pipe(struct drm_plane *plane); enum sde_sspp sde_plane_pipe(struct drm_plane *plane);
struct drm_plane *sde_plane_init(struct drm_device *dev, uint32_t pipe, struct drm_plane *sde_plane_init(struct drm_device *dev, uint32_t pipe,
bool private_plane); bool private_plane);
/**
* CRTC functions
*/
uint32_t sde_crtc_vblank(struct drm_crtc *crtc); uint32_t sde_crtc_vblank(struct drm_crtc *crtc);
void sde_crtc_wait_for_commit_done(struct drm_crtc *crtc);
void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
void sde_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
void sde_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_crtc *sde_crtc_init(struct drm_device *dev,
struct drm_encoder *encoder, struct drm_encoder *encoder,
struct drm_plane *plane, int id); struct drm_plane *plane, int id);
/**
* Encoder functions and data types
*/
struct sde_encoder_hw_resources { struct sde_encoder_hw_resources {
enum sde_intf_mode intfs[INTF_MAX]; bool intfs[INTF_MAX];
bool pingpongs[PINGPONG_MAX]; bool pingpongs[PINGPONG_MAX];
bool ctls[CTL_MAX];
bool pingpongsplit;
}; };
void sde_encoder_get_hw_resources(struct drm_encoder *encoder, void sde_encoder_get_hw_resources(struct drm_encoder *encoder,
struct sde_encoder_hw_resources *hw_res); struct sde_encoder_hw_resources *hw_res);
void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc, void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
void (*cb)(void *), void *data); void (*cb)(void *), void *data);
void sde_encoders_init(struct drm_device *dev); void sde_encoders_init(struct drm_device *dev);
void sde_encoder_get_vsync_info(struct drm_encoder *encoder,
struct vsync_info *vsync);
int sde_irq_domain_init(struct sde_kms *sde_kms);
int sde_irq_domain_fini(struct sde_kms *sde_kms);
#endif /* __sde_kms_H__ */ #endif /* __sde_kms_H__ */

View file

@ -1,173 +0,0 @@
/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "sde_kms.h"
#include "sde_hw_lm.h"
#include "sde_hw_mdp_ctl.h"
struct sde_hw_intr *sde_rm_acquire_intr(struct sde_kms *sde_kms)
{
struct sde_hw_intr *hw_intr;
if (!sde_kms) {
DRM_ERROR("Invalid KMS Driver");
return ERR_PTR(-EINVAL);
}
if (sde_kms->hw_res.intr) {
DRM_ERROR("intr already in use ");
return ERR_PTR(-ENODEV);
}
sde_enable(sde_kms);
hw_intr = sde_hw_intr_init(sde_kms->mmio,
sde_kms->catalog);
sde_disable(sde_kms);
if (!IS_ERR_OR_NULL(hw_intr))
sde_kms->hw_res.intr = hw_intr;
return hw_intr;
}
struct sde_hw_intr *sde_rm_get_intr(struct sde_kms *sde_kms)
{
if (!sde_kms) {
DRM_ERROR("Invalid KMS Driver");
return ERR_PTR(-EINVAL);
}
return sde_kms->hw_res.intr;
}
struct sde_hw_ctl *sde_rm_acquire_ctl_path(struct sde_kms *sde_kms,
enum sde_ctl idx)
{
struct sde_hw_ctl *hw_ctl;
if (!sde_kms) {
DRM_ERROR("Invalid KMS driver");
return ERR_PTR(-EINVAL);
}
if ((idx == SDE_NONE) || (idx > sde_kms->catalog->ctl_count)) {
DRM_ERROR("Invalid Ctl Path Idx %d", idx);
return ERR_PTR(-EINVAL);
}
if (sde_kms->hw_res.ctl[idx]) {
DRM_ERROR("CTL path %d already in use ", idx);
return ERR_PTR(-ENODEV);
}
sde_enable(sde_kms);
hw_ctl = sde_hw_ctl_init(idx, sde_kms->mmio, sde_kms->catalog);
sde_disable(sde_kms);
if (!IS_ERR_OR_NULL(hw_ctl))
sde_kms->hw_res.ctl[idx] = hw_ctl;
return hw_ctl;
}
struct sde_hw_ctl *sde_rm_get_ctl_path(struct sde_kms *sde_kms,
enum sde_ctl idx)
{
if (!sde_kms) {
DRM_ERROR("Invalid KMS Driver");
return ERR_PTR(-EINVAL);
}
if ((idx == SDE_NONE) || (idx > sde_kms->catalog->ctl_count)) {
DRM_ERROR("Invalid Ctl path Idx %d", idx);
return ERR_PTR(-EINVAL);
}
return sde_kms->hw_res.ctl[idx];
}
void sde_rm_release_ctl_path(struct sde_kms *sde_kms, enum sde_ctl idx)
{
if (!sde_kms) {
DRM_ERROR("Invalid pointer\n");
return;
}
if ((idx == SDE_NONE) || (idx > sde_kms->catalog->ctl_count)) {
DRM_ERROR("Invalid Ctl path Idx %d", idx);
return;
}
}
struct sde_hw_mixer *sde_rm_acquire_mixer(struct sde_kms *sde_kms,
enum sde_lm idx)
{
struct sde_hw_mixer *mixer;
if (!sde_kms) {
DRM_ERROR("Invalid KMS Driver");
return ERR_PTR(-EINVAL);
}
if ((idx == SDE_NONE) || (idx > sde_kms->catalog->mixer_count)) {
DBG("Invalid mixer id %d", idx);
return ERR_PTR(-EINVAL);
}
if (sde_kms->hw_res.mixer[idx]) {
DRM_ERROR("mixer %d already in use ", idx);
return ERR_PTR(-ENODEV);
}
sde_enable(sde_kms);
mixer = sde_hw_lm_init(idx, sde_kms->mmio, sde_kms->catalog);
sde_disable(sde_kms);
if (!IS_ERR_OR_NULL(mixer))
sde_kms->hw_res.mixer[idx] = mixer;
return mixer;
}
struct sde_hw_mixer *sde_rm_get_mixer(struct sde_kms *sde_kms,
enum sde_lm idx)
{
if (!sde_kms) {
DRM_ERROR("Invalid KMS Driver");
return ERR_PTR(-EINVAL);
}
if ((idx == SDE_NONE) || (idx > sde_kms->catalog->mixer_count)) {
DRM_ERROR("Invalid mixer id %d", idx);
return ERR_PTR(-EINVAL);
}
return sde_kms->hw_res.mixer[idx];
}
const struct sde_hw_res_map *sde_rm_get_res_map(struct sde_kms *sde_kms,
enum sde_intf idx)
{
if (!sde_kms) {
DRM_ERROR("Invalid KMS Driver");
return ERR_PTR(-EINVAL);
}
if ((idx == SDE_NONE) || (idx > sde_kms->catalog->intf_count)) {
DRM_ERROR("Invalid intf id %d", idx);
return ERR_PTR(-EINVAL);
}
DBG(" Platform Resource map for INTF %d -> lm %d, pp %d ctl %d",
sde_kms->hw_res.res_table[idx].intf,
sde_kms->hw_res.res_table[idx].lm,
sde_kms->hw_res.res_table[idx].pp,
sde_kms->hw_res.res_table[idx].ctl);
return &(sde_kms->hw_res.res_table[idx]);
}