Merge "drm/msm/sde: simplify encoder display probe logic"

This commit is contained in:
Linux Build Service Account 2017-01-13 08:07:46 -08:00 committed by Gerrit - the friendly Code Review server
commit bc2d4c423e
21 changed files with 2735 additions and 357 deletions

View file

@ -40,6 +40,8 @@ msm-y := \
mdp/mdp5/mdp5_smp.o \
sde/sde_crtc.o \
sde/sde_encoder.o \
sde/sde_encoder_phys_vid.o \
sde/sde_encoder_phys_cmd.o \
sde/sde_irq.o \
sde/sde_kms.o \
sde/sde_plane.o \
@ -86,4 +88,5 @@ obj-$(CONFIG_DRM_MSM) += sde/sde_hw_catalog.o \
sde/sde_hw_mdp_util.o \
sde/sde_hw_sspp.o \
sde/sde_hw_wb.o \
sde/sde_hw_pingpong.o
sde/sde_hw_pingpong.o \
sde/sde_mdp_formats.o

View file

@ -10,6 +10,7 @@
* GNU General Public License for more details.
*/
#include <linux/sort.h>
#include <drm/drm_mode.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
@ -17,73 +18,433 @@
#include "sde_kms.h"
#include "sde_hw_lm.h"
#include "sde_hw_mdss.h"
#include "sde_hw_mdp_ctl.h"
#define CRTC_DUAL_MIXERS 2
#define PENDING_FLIP 2
#define CRTC_HW_MIXER_MAXSTAGES(c, idx) ((c)->mixer[idx].sblk->maxblendstages)
struct sde_crtc_mixer {
struct sde_hw_dspp *hw_dspp;
struct sde_hw_mixer *hw_lm;
struct sde_hw_ctl *hw_ctl;
u32 flush_mask;
};
struct sde_crtc {
struct drm_crtc base;
char name[8];
struct drm_plane *plane;
struct drm_plane *planes[8];
struct drm_encoder *encoder;
int id;
bool enabled;
enum sde_lm mixer;
enum sde_ctl ctl_path;
spinlock_t lm_lock; /* protect registers */
/* HW Resources reserved for the crtc */
u32 num_ctls;
u32 num_mixers;
struct sde_crtc_mixer mixer[CRTC_DUAL_MIXERS];
/*if there is a pending flip, these will be non-null */
struct drm_pending_vblank_event *event;
};
#define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
static struct sde_kms *get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv = crtc->dev->dev_private;
return to_sde_kms(to_mdp_kms(priv->kms));
}
static inline struct sde_hw_ctl *sde_crtc_rm_get_ctl_path(enum sde_ctl idx,
void __iomem *addr,
struct sde_mdss_cfg *m)
{
/*
* This module keeps track of the requested hw resources state,
* if the requested resource is being used it returns NULL,
* otherwise it returns the hw driver struct
*/
return sde_hw_ctl_init(idx, addr, m);
}
static inline struct sde_hw_mixer *sde_crtc_rm_get_mixer(enum sde_lm idx,
void __iomem *addr,
struct sde_mdss_cfg *m)
{
/*
* This module keeps track of the requested hw resources state,
* if the requested resource is being used it returns NULL,
* otherwise it returns the hw driver struct
*/
return sde_hw_lm_init(idx, addr, m);
}
static int sde_crtc_reserve_hw_resources(struct drm_crtc *crtc,
struct drm_encoder *encoder)
{
/*
* Assign CRTC resources
* num_ctls;
* num_mixers;
* sde_lm mixer[CRTC_MAX_PIPES];
* sde_ctl ctl[CRTC_MAX_PIPES];
*/
struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
struct sde_kms *kms = get_kms(crtc);
enum sde_lm lm_id[CRTC_DUAL_MIXERS];
enum sde_ctl ctl_id[CRTC_DUAL_MIXERS];
int i;
if (!kms) {
DBG("[%s] invalid kms\n", __func__);
return -EINVAL;
}
if (!kms->mmio)
return -EINVAL;
/*
* simple check validate against catalog
*/
sde_crtc->num_ctls = 1;
sde_crtc->num_mixers = 1;
ctl_id[0] = CTL_0;
lm_id[0] = LM_0;
/*
* need to also enable MDP core clock and AHB CLK
* before touching HW driver
*/
DBG("%s Enable clocks\n", __func__);
sde_enable(kms);
for (i = 0; i < sde_crtc->num_ctls; i++) {
sde_crtc->mixer[i].hw_ctl = sde_crtc_rm_get_ctl_path(ctl_id[i],
kms->mmio, kms->catalog);
if (!sde_crtc->mixer[i].hw_ctl) {
DBG("[%s], Invalid ctl_path", __func__);
return -EACCES;
}
}
for (i = 0; i < sde_crtc->num_mixers; i++) {
sde_crtc->mixer[i].hw_lm = sde_crtc_rm_get_mixer(lm_id[i],
kms->mmio, kms->catalog);
if (!sde_crtc->mixer[i].hw_lm) {
DBG("[%s], Invalid ctl_path", __func__);
return -EACCES;
}
}
/*
* need to disable MDP core clock and AHB CLK
*/
sde_disable(kms);
return 0;
}
static void sde_crtc_destroy(struct drm_crtc *crtc)
{
struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
DBG("");
drm_crtc_cleanup(crtc);
kfree(sde_crtc);
}
static void sde_crtc_dpms(struct drm_crtc *crtc, int mode)
{
}
static bool sde_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
DBG("");
return true;
}
static int sde_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
static void sde_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
struct sde_crtc_mixer *mixer = sde_crtc->mixer;
struct drm_device *dev = crtc->dev;
struct sde_hw_mixer *lm;
unsigned long flags;
struct drm_display_mode *mode;
struct sde_hw_mixer_cfg cfg;
u32 mixer_width;
int i;
int rc;
DBG("");
if (WARN_ON(!crtc->state))
return;
mode = &crtc->state->adjusted_mode;
DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
sde_crtc->name, mode->base.id, mode->name,
mode->vrefresh, mode->clock,
mode->hdisplay, mode->hsync_start,
mode->hsync_end, mode->htotal,
mode->vdisplay, mode->vsync_start,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
/*
* reserve mixer(s) if not already avaialable
* if dual mode, mixer_width = half mode width
* program mode configuration on mixer(s)
*/
if ((sde_crtc->num_ctls == 0) ||
(sde_crtc->num_mixers == 0)) {
rc = sde_crtc_reserve_hw_resources(crtc, sde_crtc->encoder);
if (rc) {
dev_err(dev->dev, " error reserving HW resource for this CRTC\n");
return;
}
}
if (sde_crtc->num_mixers == CRTC_DUAL_MIXERS)
mixer_width = mode->hdisplay >> 1;
else
mixer_width = mode->hdisplay;
spin_lock_irqsave(&sde_crtc->lm_lock, flags);
for (i = 0; i < sde_crtc->num_mixers; i++) {
lm = mixer[i].hw_lm;
cfg.out_width = mixer_width;
cfg.out_height = mode->vdisplay;
cfg.right_mixer = (i == 0) ? false : true;
cfg.flags = 0;
lm->ops.setup_mixer_out(lm, &cfg);
}
spin_unlock_irqrestore(&sde_crtc->lm_lock, flags);
}
static void sde_crtc_get_blend_cfg(struct sde_hw_blend_cfg *cfg,
struct sde_plane_state *pstate)
{
const struct mdp_format *format;
struct drm_plane *plane;
format = to_mdp_format(
msm_framebuffer_format(pstate->base.fb));
plane = pstate->base.plane;
cfg->fg.alpha_sel = ALPHA_FG_CONST;
cfg->bg.alpha_sel = ALPHA_BG_CONST;
cfg->fg.const_alpha = pstate->alpha;
cfg->bg.const_alpha = 0xFF - pstate->alpha;
if (format->alpha_enable && pstate->premultiplied) {
cfg->fg.alpha_sel = ALPHA_FG_CONST;
cfg->bg.alpha_sel = ALPHA_FG_PIXEL;
if (pstate->alpha != 0xff) {
cfg->bg.const_alpha = pstate->alpha;
cfg->bg.inv_alpha_sel = 1;
cfg->bg.mod_alpha = 1;
} else {
cfg->bg.inv_mode_alpha = 1;
}
} else if (format->alpha_enable) {
cfg->fg.alpha_sel = ALPHA_FG_PIXEL;
cfg->bg.alpha_sel = ALPHA_FG_PIXEL;
if (pstate->alpha != 0xff) {
cfg->bg.const_alpha = pstate->alpha;
cfg->fg.mod_alpha = 1;
cfg->bg.inv_alpha_sel = 1;
cfg->bg.mod_alpha = 1;
cfg->bg.inv_mode_alpha = 1;
} else {
cfg->bg.inv_mode_alpha = 1;
}
}
}
static void blend_setup(struct drm_crtc *crtc)
{
struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
struct sde_crtc_mixer *mixer = sde_crtc->mixer;
struct drm_plane *plane;
struct sde_plane_state *pstate, *pstates[SDE_STAGE_MAX] = {0};
struct sde_hw_stage_cfg stage_cfg;
struct sde_hw_blend_cfg blend;
struct sde_hw_ctl *ctl;
struct sde_hw_mixer *lm;
u32 flush_mask = 0;
unsigned long flags;
int i, j, plane_cnt = 0;
spin_lock_irqsave(&sde_crtc->lm_lock, flags);
/* ctl could be reserved already */
if (!sde_crtc->num_ctls)
goto out;
/* initialize stage cfg */
memset(&stage_cfg, 0, sizeof(stage_cfg));
memset(&blend, 0, sizeof(blend));
/* Collect all plane information */
drm_atomic_crtc_for_each_plane(plane, crtc) {
pstate = to_sde_plane_state(plane->state);
pstates[pstate->stage] = pstate;
plane_cnt++;
for (i = 0; i < sde_crtc->num_mixers; i++) {
stage_cfg.stage[pstate->stage][i] =
sde_plane_pipe(plane);
/* Cache the flushmask for this layer
* sourcesplit is always enabled, so this layer will
* be staged on both the mixers
*/
ctl = mixer[i].hw_ctl;
ctl->ops.get_bitmask_sspp(ctl, &flush_mask,
sde_plane_pipe(plane));
}
}
/*
* If there is no base layer, enable border color.
* currently border color is always black
*/
if ((stage_cfg.stage[SDE_STAGE_BASE][0] == SSPP_NONE) &&
plane_cnt) {
stage_cfg.border_enable = 1;
DBG("Border Color is enabled\n");
}
/* Program hw */
for (i = 0; i < sde_crtc->num_mixers; i++) {
if (!mixer[i].hw_lm)
continue;
if (!mixer[i].hw_ctl)
continue;
ctl = mixer[i].hw_ctl;
lm = mixer[i].hw_lm;
/* stage config */
ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
&stage_cfg);
/* stage config flush mask */
mixer[i].flush_mask = flush_mask;
/* get the flush mask for mixer */
ctl->ops.get_bitmask_mixer(ctl, &mixer[i].flush_mask,
mixer[i].hw_lm->idx);
/* blend config */
for (j = SDE_STAGE_0; j < SDE_STAGE_MAX; j++) {
if (!pstates[j])
continue;
sde_crtc_get_blend_cfg(&blend, pstates[j]);
blend.fg.alpha_sel = ALPHA_FG_CONST;
blend.bg.alpha_sel = ALPHA_BG_CONST;
blend.fg.const_alpha = pstate->alpha;
blend.bg.const_alpha = 0xFF - pstate->alpha;
lm->ops.setup_blend_config(lm, j, &blend);
}
}
out:
spin_unlock_irqrestore(&sde_crtc->lm_lock, flags);
}
static void request_pending(struct drm_crtc *crtc, u32 pending)
{
DBG("");
}
/**
* Flush the CTL PATH
*/
static u32 crtc_flush_all(struct drm_crtc *crtc)
{
struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
struct sde_hw_ctl *ctl;
int i;
DBG("");
for (i = 0; i < sde_crtc->num_ctls; i++) {
/*
* Query flush_mask from encoder
* and append to the ctl_path flush_mask
*/
ctl = sde_crtc->mixer[i].hw_ctl;
ctl->ops.get_bitmask_intf(ctl,
&(sde_crtc->mixer[i].flush_mask),
INTF_1);
ctl->ops.setup_flush(ctl,
sde_crtc->mixer[i].flush_mask);
}
return 0;
}
static void sde_crtc_prepare(struct drm_crtc *crtc)
static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
struct drm_device *dev = crtc->dev;
unsigned long flags;
DBG("");
WARN_ON(sde_crtc->event);
spin_lock_irqsave(&dev->event_lock, flags);
sde_crtc->event = crtc->state->event;
spin_unlock_irqrestore(&dev->event_lock, flags);
/*
* If no CTL has been allocated in sde_crtc_atomic_check(),
* it means we are trying to flush a CRTC whose state is disabled:
* nothing else needs to be done.
*/
if (unlikely(!sde_crtc->num_ctls))
return;
blend_setup(crtc);
/*
* PP_DONE irq is only used by command mode for now.
* It is better to request pending before FLUSH and START trigger
* to make sure no pp_done irq missed.
* This is safe because no pp_done will happen before SW trigger
* in command mode.
*/
}
static void sde_crtc_commit(struct drm_crtc *crtc)
static void sde_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
}
struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
struct drm_device *dev = crtc->dev;
unsigned long flags;
static int sde_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
return 0;
}
DBG("");
static void sde_crtc_load_lut(struct drm_crtc *crtc)
{
}
WARN_ON(sde_crtc->event);
static int sde_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *new_fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags)
{
return 0;
spin_lock_irqsave(&dev->event_lock, flags);
sde_crtc->event = crtc->state->event;
spin_unlock_irqrestore(&dev->event_lock, flags);
/*
* If no CTL has been allocated in sde_crtc_atomic_check(),
* it means we are trying to flush a CRTC whose state is disabled:
* nothing else needs to be done.
*/
if (unlikely(!sde_crtc->num_ctls))
return;
crtc_flush_all(crtc);
request_pending(crtc, PENDING_FLIP);
}
static int sde_crtc_set_property(struct drm_crtc *crtc,
@ -92,21 +453,111 @@ static int sde_crtc_set_property(struct drm_crtc *crtc,
return -EINVAL;
}
static int sde_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file, uint32_t handle,
uint32_t width, uint32_t height)
{
return 0;
}
static int sde_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
return 0;
}
static void sde_crtc_disable(struct drm_crtc *crtc)
{
DBG("");
}
static void sde_crtc_enable(struct drm_crtc *crtc)
{
DBG("");
}
struct plane_state {
struct drm_plane *plane;
struct sde_plane_state *state;
};
static int pstate_cmp(const void *a, const void *b)
{
struct plane_state *pa = (struct plane_state *)a;
struct plane_state *pb = (struct plane_state *)b;
return pa->state->zpos - pb->state->zpos;
}
static int sde_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
struct sde_kms *sde_kms = get_kms(crtc);
struct drm_plane *plane;
struct drm_device *dev = crtc->dev;
struct plane_state pstates[SDE_STAGE_MAX];
int max_stages = CRTC_HW_MIXER_MAXSTAGES(sde_kms->catalog, 0);
int cnt = 0, i;
DBG("%s: check", sde_crtc->name);
/* verify that there are not too many planes attached to crtc
* and that we don't have conflicting mixer stages:
*/
drm_atomic_crtc_state_for_each_plane(plane, state) {
struct drm_plane_state *pstate;
if (cnt >= (max_stages)) {
dev_err(dev->dev, "too many planes!\n");
return -EINVAL;
}
pstate = state->state->plane_states[drm_plane_index(plane)];
/* plane might not have changed, in which case take
* current state:
*/
if (!pstate)
pstate = plane->state;
pstates[cnt].plane = plane;
pstates[cnt].state = to_sde_plane_state(pstate);
cnt++;
}
/* assign a stage based on sorted zpos property */
sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
for (i = 0; i < cnt; i++) {
pstates[i].state->stage = SDE_STAGE_0 + i;
DBG("%s: assign pipe %d on stage=%d", sde_crtc->name,
sde_plane_pipe(pstates[i].plane),
pstates[i].state->stage);
}
return 0;
}
static const struct drm_crtc_funcs sde_crtc_funcs = {
.set_config = drm_crtc_helper_set_config,
.set_config = drm_atomic_helper_set_config,
.destroy = sde_crtc_destroy,
.page_flip = sde_crtc_page_flip,
.page_flip = drm_atomic_helper_page_flip,
.set_property = sde_crtc_set_property,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.cursor_set = sde_crtc_cursor_set,
.cursor_move = sde_crtc_cursor_move,
};
static const struct drm_crtc_helper_funcs sde_crtc_helper_funcs = {
.dpms = sde_crtc_dpms,
.mode_fixup = sde_crtc_mode_fixup,
.mode_set = sde_crtc_mode_set,
.prepare = sde_crtc_prepare,
.commit = sde_crtc_commit,
.mode_set_base = sde_crtc_mode_set_base,
.load_lut = sde_crtc_load_lut,
.mode_set_nofb = sde_crtc_mode_set_nofb,
.disable = sde_crtc_disable,
.enable = sde_crtc_enable,
.atomic_check = sde_crtc_atomic_check,
.atomic_begin = sde_crtc_atomic_begin,
.atomic_flush = sde_crtc_atomic_flush,
};
uint32_t sde_crtc_vblank(struct drm_crtc *crtc)
@ -118,20 +569,20 @@ void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
{
}
void sde_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
static void sde_crtc_install_properties(struct drm_crtc *crtc,
struct drm_mode_object *obj)
{
}
void sde_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
{
}
/* initialize crtc */
struct drm_crtc *sde_crtc_init(struct drm_device *dev,
struct drm_encoder *encoder,
struct drm_plane *plane, int id)
{
struct drm_crtc *crtc = NULL;
struct sde_crtc *sde_crtc;
int rc;
sde_crtc = kzalloc(sizeof(*sde_crtc), GFP_KERNEL);
if (!sde_crtc)
@ -140,9 +591,21 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev,
crtc = &sde_crtc->base;
sde_crtc->id = id;
sde_crtc->encoder = encoder;
/* find out if we need one or two lms */
sde_crtc_install_properties(crtc, &crtc->base);
drm_crtc_init_with_planes(dev, crtc, plane, NULL, &sde_crtc_funcs);
drm_crtc_helper_add(crtc, &sde_crtc_helper_funcs);
plane->crtc = crtc;
rc = sde_crtc_reserve_hw_resources(crtc, encoder);
if (rc) {
dev_err(dev->dev, " error reserving HW resource for this CRTC\n");
return ERR_PTR(-EINVAL);
}
DBG("%s: Successfully initialized crtc\n", __func__);
return crtc;
}

View file

@ -10,86 +10,514 @@
* GNU General Public License for more details.
*/
#include "msm_drv.h"
#include "sde_kms.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
struct sde_encoder {
struct drm_encoder base;
int intf;
#include "sde_hwio.h"
#include "sde_hw_catalog.h"
#include "sde_hw_intf.h"
#include "sde_hw_mdp_ctl.h"
#include "sde_mdp_formats.h"
#include "sde_encoder_phys.h"
#include "display_manager.h"
#define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
#ifdef CONFIG_QCOM_BUS_SCALING
#include <linux/msm-bus.h>
#include <linux/msm-bus-board.h>
#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
{ \
.src = MSM_BUS_MASTER_MDP_PORT0, \
.dst = MSM_BUS_SLAVE_EBI_CH0, \
.ab = (ab_val), \
.ib = (ib_val), \
}
static struct msm_bus_vectors mdp_bus_vectors[] = {
MDP_BUS_VECTOR_ENTRY(0, 0),
MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000),
};
#define to_sde_encoder(x) container_of(x, struct sde_encoder, base)
static void sde_encoder_destroy(struct drm_encoder *encoder)
static struct msm_bus_paths mdp_bus_usecases[] = { {
.num_paths = 1,
.vectors =
&mdp_bus_vectors[0],
}, {
.num_paths = 1,
.vectors =
&mdp_bus_vectors[1],
}
};
static struct msm_bus_scale_pdata mdp_bus_scale_table = {
.usecase = mdp_bus_usecases,
.num_usecases = ARRAY_SIZE(mdp_bus_usecases),
.name = "mdss_mdp",
};
static void bs_init(struct sde_encoder_virt *sde_enc)
{
struct sde_encoder *sde_encoder = to_sde_encoder(encoder);
drm_encoder_cleanup(encoder);
kfree(sde_encoder);
sde_enc->bus_scaling_client =
msm_bus_scale_register_client(&mdp_bus_scale_table);
DBG("bus scale client: %08x", sde_enc->bus_scaling_client);
}
static void bs_fini(struct sde_encoder_virt *sde_enc)
{
if (sde_enc->bus_scaling_client) {
msm_bus_scale_unregister_client(sde_enc->bus_scaling_client);
sde_enc->bus_scaling_client = 0;
}
}
static void bs_set(struct sde_encoder_virt *sde_enc, int idx)
{
if (sde_enc->bus_scaling_client) {
DBG("set bus scaling: %d", idx);
idx = 1;
msm_bus_scale_client_update_request(sde_enc->bus_scaling_client,
idx);
}
}
#else
static void bs_init(struct sde_encoder_virt *sde_enc)
{
}
static void bs_fini(struct sde_encoder_virt *sde_enc)
{
}
static void bs_set(struct sde_encoder_virt *sde_enc, int idx)
{
}
#endif
void sde_encoder_get_hw_resources(struct drm_encoder *drm_enc,
struct sde_encoder_hw_resources *hw_res)
{
struct sde_encoder_virt *sde_enc = NULL;
int i = 0;
DBG("");
if (!hw_res || !drm_enc) {
DRM_ERROR("Invalid pointer");
return;
}
sde_enc = to_sde_encoder_virt(drm_enc);
/* Query resources used by phys encs, expected to be without overlap */
memset(hw_res, 0, sizeof(*hw_res));
for (i = 0; i < sde_enc->num_phys_encs; i++) {
struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
if (phys && phys->phys_ops.get_hw_resources)
phys->phys_ops.get_hw_resources(phys, hw_res);
}
}
static void sde_encoder_destroy(struct drm_encoder *drm_enc)
{
struct sde_encoder_virt *sde_enc = NULL;
int i = 0;
DBG("");
if (!drm_enc) {
DRM_ERROR("Invalid pointer");
return;
}
sde_enc = to_sde_encoder_virt(drm_enc);
for (i = 0; i < ARRAY_SIZE(sde_enc->phys_encs); i++) {
struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
if (phys && phys->phys_ops.destroy) {
phys->phys_ops.destroy(phys);
--sde_enc->num_phys_encs;
sde_enc->phys_encs[i] = NULL;
}
}
if (sde_enc->num_phys_encs) {
DRM_ERROR("Expected num_phys_encs to be 0 not %d\n",
sde_enc->num_phys_encs);
}
drm_encoder_cleanup(drm_enc);
bs_fini(sde_enc);
kfree(sde_enc);
}
static bool sde_encoder_virt_mode_fixup(struct drm_encoder *drm_enc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct sde_encoder_virt *sde_enc = NULL;
int i = 0;
bool ret = true;
DBG("");
if (!drm_enc) {
DRM_ERROR("Invalid pointer");
return false;
}
sde_enc = to_sde_encoder_virt(drm_enc);
for (i = 0; i < sde_enc->num_phys_encs; i++) {
struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
if (phys && phys->phys_ops.mode_fixup) {
ret =
phys->phys_ops.mode_fixup(phys, mode,
adjusted_mode);
if (!ret) {
DBG("Mode unsupported by phys_enc %d", i);
break;
}
if (sde_enc->num_phys_encs > 1) {
DBG("ModeFix only checking 1 phys_enc");
break;
}
}
}
return ret;
}
static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct sde_encoder_virt *sde_enc = NULL;
int i = 0;
DBG("");
if (!drm_enc) {
DRM_ERROR("Invalid pointer");
return;
}
sde_enc = to_sde_encoder_virt(drm_enc);
for (i = 0; i < sde_enc->num_phys_encs; i++) {
struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
if (phys && phys->phys_ops.mode_set)
phys->phys_ops.mode_set(phys, mode, adjusted_mode);
}
}
static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
{
struct sde_encoder_virt *sde_enc = NULL;
int i = 0;
DBG("");
if (!drm_enc) {
DRM_ERROR("Invalid pointer");
return;
}
sde_enc = to_sde_encoder_virt(drm_enc);
bs_set(sde_enc, 1);
for (i = 0; i < sde_enc->num_phys_encs; i++) {
struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
if (phys && phys->phys_ops.enable)
phys->phys_ops.enable(phys);
}
}
static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
{
struct sde_encoder_virt *sde_enc = NULL;
int i = 0;
DBG("");
if (!drm_enc) {
DRM_ERROR("Invalid pointer");
return;
}
sde_enc = to_sde_encoder_virt(drm_enc);
for (i = 0; i < sde_enc->num_phys_encs; i++) {
struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
if (phys && phys->phys_ops.disable)
phys->phys_ops.disable(phys);
}
bs_set(sde_enc, 0);
}
static const struct drm_encoder_helper_funcs sde_encoder_helper_funcs = {
.mode_fixup = sde_encoder_virt_mode_fixup,
.mode_set = sde_encoder_virt_mode_set,
.disable = sde_encoder_virt_disable,
.enable = sde_encoder_virt_enable,
};
static const struct drm_encoder_funcs sde_encoder_funcs = {
.destroy = sde_encoder_destroy,
};
static void sde_encoder_dpms(struct drm_encoder *encoder, int mode)
static enum sde_intf sde_encoder_get_intf(struct sde_mdss_cfg *catalog,
enum sde_intf_type type, u32 controller_id)
{
int i = 0;
DBG("");
for (i = 0; i < catalog->intf_count; i++) {
if (catalog->intf[i].type == type
&& catalog->intf[i].controller_id == controller_id) {
return catalog->intf[i].id;
}
}
return INTF_MAX;
}
static bool sde_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static void sde_encoder_vblank_callback(struct drm_encoder *drm_enc)
{
return true;
struct sde_encoder_virt *sde_enc = NULL;
unsigned long lock_flags;
DBG("");
if (!drm_enc) {
DRM_ERROR("Invalid pointer");
return;
}
sde_enc = to_sde_encoder_virt(drm_enc);
spin_lock_irqsave(&sde_enc->spin_lock, lock_flags);
if (sde_enc->kms_vblank_callback)
sde_enc->kms_vblank_callback(sde_enc->kms_vblank_callback_data);
spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags);
}
static void sde_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static int sde_encoder_virt_add_phys_vid_enc(struct sde_encoder_virt *sde_enc,
struct sde_kms *sde_kms,
enum sde_intf intf_idx,
enum sde_ctl ctl_idx)
{
int ret = 0;
DBG("");
if (sde_enc->num_phys_encs >= ARRAY_SIZE(sde_enc->phys_encs)) {
DRM_ERROR("Too many video encoders %d, unable to add\n",
sde_enc->num_phys_encs);
ret = -EINVAL;
} else {
struct sde_encoder_virt_ops parent_ops = {
sde_encoder_vblank_callback
};
struct sde_encoder_phys *enc =
sde_encoder_phys_vid_init(sde_kms, intf_idx, ctl_idx,
&sde_enc->base,
parent_ops);
if (IS_ERR(enc))
ret = PTR_ERR(enc);
if (!ret) {
sde_enc->phys_encs[sde_enc->num_phys_encs] = enc;
++sde_enc->num_phys_encs;
}
}
return ret;
}
static void sde_encoder_prepare(struct drm_encoder *encoder)
static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc,
struct sde_kms *sde_kms,
struct display_info *disp_info,
int *drm_enc_mode)
{
int ret = 0;
int i = 0;
enum sde_intf_type intf_type = INTF_NONE;
DBG("");
if (disp_info->intf == DISPLAY_INTF_DSI) {
*drm_enc_mode = DRM_MODE_ENCODER_DSI;
intf_type = INTF_DSI;
} else if (disp_info->intf == DISPLAY_INTF_HDMI) {
*drm_enc_mode = DRM_MODE_ENCODER_TMDS;
intf_type = INTF_HDMI;
} else {
DRM_ERROR("Unsupported display interface type");
return -EINVAL;
}
WARN_ON(disp_info->num_of_h_tiles < 1);
DBG("dsi_info->num_of_h_tiles %d", disp_info->num_of_h_tiles);
for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
/*
* Left-most tile is at index 0, content is controller id
* h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
* h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
*/
enum sde_intf intf_idx = INTF_MAX;
enum sde_ctl ctl_idx = CTL_0;
u32 controller_id = disp_info->h_tile_instance[i];
if (intf_type == INTF_HDMI)
ctl_idx = CTL_2;
DBG("h_tile_instance %d = %d", i, controller_id);
intf_idx = sde_encoder_get_intf(sde_kms->catalog,
intf_type, controller_id);
if (intf_idx == INTF_MAX) {
DBG("Error: could not get the interface id");
ret = -EINVAL;
}
/* Create both VID and CMD Phys Encoders here */
if (!ret)
ret = sde_encoder_virt_add_phys_vid_enc(
sde_enc, sde_kms, intf_idx, ctl_idx);
}
return ret;
}
static void sde_encoder_commit(struct drm_encoder *encoder)
static struct drm_encoder *sde_encoder_virt_init(
struct drm_device *dev, struct display_info *disp_info)
{
}
struct msm_drm_private *priv = dev->dev_private;
struct sde_kms *sde_kms = to_sde_kms(to_mdp_kms(priv->kms));
struct drm_encoder *drm_enc = NULL;
struct sde_encoder_virt *sde_enc = NULL;
int drm_enc_mode = DRM_MODE_ENCODER_NONE;
int ret = 0;
static const struct drm_encoder_helper_funcs sde_encoder_helper_funcs = {
.dpms = sde_encoder_dpms,
.mode_fixup = sde_encoder_mode_fixup,
.mode_set = sde_encoder_mode_set,
.prepare = sde_encoder_prepare,
.commit = sde_encoder_commit,
};
DBG("");
/* initialize encoder */
struct drm_encoder *sde_encoder_init(struct drm_device *dev, int intf)
{
struct drm_encoder *encoder = NULL;
struct sde_encoder *sde_encoder;
int ret;
sde_encoder = kzalloc(sizeof(*sde_encoder), GFP_KERNEL);
if (!sde_encoder) {
sde_enc = kzalloc(sizeof(*sde_enc), GFP_KERNEL);
if (!sde_enc) {
ret = -ENOMEM;
goto fail;
}
sde_encoder->intf = intf;
encoder = &sde_encoder->base;
ret = sde_encoder_setup_display(sde_enc, sde_kms, disp_info,
&drm_enc_mode);
if (ret)
goto fail;
drm_encoder_init(dev, encoder, &sde_encoder_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &sde_encoder_helper_funcs);
spin_lock_init(&sde_enc->spin_lock);
drm_enc = &sde_enc->base;
drm_encoder_init(dev, drm_enc, &sde_encoder_funcs, drm_enc_mode);
drm_encoder_helper_add(drm_enc, &sde_encoder_helper_funcs);
bs_init(sde_enc);
return encoder;
DBG("Created encoder");
return drm_enc;
fail:
if (encoder)
sde_encoder_destroy(encoder);
DRM_ERROR("Failed to create encoder\n");
if (drm_enc)
sde_encoder_destroy(drm_enc);
return ERR_PTR(ret);
}
void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
void (*cb)(void *), void *data)
{
struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
unsigned long lock_flags;
DBG("");
spin_lock_irqsave(&sde_enc->spin_lock, lock_flags);
sde_enc->kms_vblank_callback = cb;
sde_enc->kms_vblank_callback_data = data;
spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags);
}
/* encoders init,
* initialize encoder based on displays
*/
void sde_encoders_init(struct drm_device *dev)
{
struct msm_drm_private *priv = NULL;
struct display_manager *disp_man = NULL;
u32 i = 0;
u32 num_displays = 0;
DBG("");
if (!dev || !dev->dev_private) {
DRM_ERROR("Invalid pointer");
return;
}
priv = dev->dev_private;
priv->num_encoders = 0;
if (!priv->kms || !priv->dm) {
DRM_ERROR("Invalid pointer");
return;
}
disp_man = priv->dm;
num_displays = display_manager_get_count(disp_man);
DBG("num_displays %d", num_displays);
if (num_displays > ARRAY_SIZE(priv->encoders)) {
num_displays = ARRAY_SIZE(priv->encoders);
DRM_ERROR("Too many displays found, capping to %d",
num_displays);
}
for (i = 0; i < num_displays; i++) {
struct display_info info = { 0 };
struct drm_encoder *enc = NULL;
u32 ret = 0;
ret = display_manager_get_info_by_index(disp_man, i, &info);
if (ret) {
DRM_ERROR("Failed to get display info, %d", ret);
return;
}
enc = sde_encoder_virt_init(dev, &info);
if (IS_ERR_OR_NULL(enc)) {
DRM_ERROR("Encoder initialization failed");
return;
}
ret = display_manager_drm_init_by_index(disp_man, i, enc);
if (ret) {
DRM_ERROR("Display drm_init failed, %d", ret);
return;
}
priv->encoders[priv->num_encoders++] = enc;
}
}

View file

@ -0,0 +1,80 @@
/*
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __SDE_ENCODER_PHYS_H__
#define __SDE_ENCODER_PHYS_H__
#include "sde_kms.h"
#include "sde_hw_intf.h"
#include "sde_hw_mdp_ctl.h"
#define MAX_PHYS_ENCODERS_PER_VIRTUAL 4
struct sde_encoder_phys;
struct sde_encoder_virt_ops {
void (*handle_vblank_virt)(struct drm_encoder *);
};
struct sde_encoder_phys_ops {
void (*mode_set)(struct sde_encoder_phys *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
bool (*mode_fixup)(struct sde_encoder_phys *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void (*enable)(struct sde_encoder_phys *encoder);
void (*disable)(struct sde_encoder_phys *encoder);
void (*destroy)(struct sde_encoder_phys *encoder);
void (*get_hw_resources)(struct sde_encoder_phys *encoder,
struct sde_encoder_hw_resources *hw_res);
};
struct sde_encoder_phys {
struct drm_encoder *parent;
struct sde_encoder_virt_ops parent_ops;
struct sde_encoder_phys_ops phys_ops;
struct sde_hw_intf *hw_intf;
struct sde_hw_ctl *hw_ctl;
struct mdp_kms *mdp_kms;
struct drm_display_mode cached_mode;
bool enabled;
spinlock_t spin_lock;
};
struct sde_encoder_phys_vid {
struct sde_encoder_phys base;
struct mdp_irq vblank_irq;
};
struct sde_encoder_virt {
struct drm_encoder base;
spinlock_t spin_lock;
uint32_t bus_scaling_client;
int num_phys_encs;
struct sde_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
void (*kms_vblank_callback)(void *);
void *kms_vblank_callback_data;
};
struct sde_encoder_phys *sde_encoder_phys_vid_init(struct sde_kms *sde_kms,
enum sde_intf intf_idx,
enum sde_ctl ctl_idx,
struct drm_encoder *parent,
struct sde_encoder_virt_ops
parent_ops);
#endif /* __sde_encoder_phys_H__ */

View file

@ -0,0 +1,25 @@
/*
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include "msm_drv.h"
#include "sde_kms.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "sde_hwio.h"
#include "sde_hw_catalog.h"
#include "sde_hw_intf.h"
#include "sde_mdp_formats.h"
#include "sde_encoder_phys.h"

View file

@ -0,0 +1,408 @@
/*
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include "msm_drv.h"
#include "sde_kms.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "sde_encoder_phys.h"
#include "sde_mdp_formats.h"
#define to_sde_encoder_phys_vid(x) \
container_of(x, struct sde_encoder_phys_vid, base)
static void drm_mode_to_intf_timing_params(
const struct sde_encoder_phys *phys_enc,
const struct drm_display_mode *mode,
struct intf_timing_params *timing)
{
memset(timing, 0, sizeof(*timing));
/*
* https://www.kernel.org/doc/htmldocs/drm/ch02s05.html
* Active Region Front Porch Sync Back Porch
* <-----------------><------------><-----><----------->
* <- [hv]display --->
* <--------- [hv]sync_start ------>
* <----------------- [hv]sync_end ------->
* <---------------------------- [hv]total ------------->
*/
timing->width = mode->hdisplay; /* active width */
timing->height = mode->vdisplay; /* active height */
timing->xres = timing->width;
timing->yres = timing->height;
timing->h_back_porch = mode->htotal - mode->hsync_end;
timing->h_front_porch = mode->hsync_start - mode->hdisplay;
timing->v_back_porch = mode->vtotal - mode->vsync_end;
timing->v_front_porch = mode->vsync_start - mode->vdisplay;
timing->hsync_pulse_width = mode->hsync_end - mode->hsync_start;
timing->vsync_pulse_width = mode->vsync_end - mode->vsync_start;
timing->hsync_polarity = (mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
timing->vsync_polarity = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
timing->border_clr = 0;
timing->underflow_clr = 0xff;
timing->hsync_skew = mode->hskew;
/* DSI controller cannot handle active-low sync signals. */
if (phys_enc->hw_intf->cap->type == INTF_DSI) {
timing->hsync_polarity = 0;
timing->vsync_polarity = 0;
}
/*
* For edp only:
* DISPLAY_V_START = (VBP * HCYCLE) + HBP
* DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
*/
/*
* if (vid_enc->hw->cap->type == INTF_EDP) {
* display_v_start += mode->htotal - mode->hsync_start;
* display_v_end -= mode->hsync_start - mode->hdisplay;
* }
*/
}
static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
{
u32 active = timing->xres;
u32 inactive =
timing->h_back_porch + timing->h_front_porch +
timing->hsync_pulse_width;
return active + inactive;
}
static inline u32 get_vertical_total(const struct intf_timing_params *timing)
{
u32 active = timing->yres;
u32 inactive =
timing->v_back_porch + timing->v_front_porch +
timing->vsync_pulse_width;
return active + inactive;
}
/*
* programmable_fetch_get_num_lines:
* Number of fetch lines in vertical front porch
* @timing: Pointer to the intf timing information for the requested mode
*
* Returns the number of fetch lines in vertical front porch at which mdp
* can start fetching the next frame.
*
* Number of needed prefetch lines is anything that cannot be absorbed in the
* start of frame time (back porch + vsync pulse width).
*
* Some panels have very large VFP, however we only need a total number of
* lines based on the chip worst case latencies.
*/
static u32 programmable_fetch_get_num_lines(
struct sde_encoder_phys *phys_enc,
const struct intf_timing_params *timing)
{
u32 worst_case_needed_lines =
phys_enc->hw_intf->cap->prog_fetch_lines_worst_case;
u32 start_of_frame_lines =
timing->v_back_porch + timing->vsync_pulse_width;
u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
u32 actual_vfp_lines = 0;
/* Fetch must be outside active lines, otherwise undefined. */
if (start_of_frame_lines >= worst_case_needed_lines) {
DBG("Programmable fetch is not needed due to large vbp+vsw");
actual_vfp_lines = 0;
} else if (timing->v_front_porch < needed_vfp_lines) {
/* Warn fetch needed, but not enough porch in panel config */
pr_warn_once
("low vbp+vfp may lead to perf issues in some cases\n");
DBG("Less vfp than fetch requires, using entire vfp");
actual_vfp_lines = timing->v_front_porch;
} else {
DBG("Room in vfp for needed prefetch");
actual_vfp_lines = needed_vfp_lines;
}
DBG("v_front_porch %u v_back_porch %u vsync_pulse_width %u",
timing->v_front_porch, timing->v_back_porch,
timing->vsync_pulse_width);
DBG("wc_lines %u needed_vfp_lines %u actual_vfp_lines %u",
worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
return actual_vfp_lines;
}
/*
* programmable_fetch_config: Programs HW to prefetch lines by offsetting
* the start of fetch into the vertical front porch for cases where the
* vsync pulse width and vertical back porch time is insufficient
*
* Gets # of lines to pre-fetch, then calculate VSYNC counter value.
* HW layer requires VSYNC counter of first pixel of tgt VFP line.
*
* @timing: Pointer to the intf timing information for the requested mode
*/
static void programmable_fetch_config(struct sde_encoder_phys *phys_enc,
const struct intf_timing_params *timing)
{
struct intf_prog_fetch f = { 0 };
u32 vfp_fetch_lines = 0;
u32 horiz_total = 0;
u32 vert_total = 0;
u32 vfp_fetch_start_vsync_counter = 0;
unsigned long lock_flags;
if (WARN_ON_ONCE(!phys_enc->hw_intf->ops.setup_prg_fetch))
return;
vfp_fetch_lines = programmable_fetch_get_num_lines(phys_enc, timing);
if (vfp_fetch_lines) {
vert_total = get_vertical_total(timing);
horiz_total = get_horizontal_total(timing);
vfp_fetch_start_vsync_counter =
(vert_total - vfp_fetch_lines) * horiz_total + 1;
f.enable = 1;
f.fetch_start = vfp_fetch_start_vsync_counter;
}
DBG("vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u",
vfp_fetch_lines, vfp_fetch_start_vsync_counter);
spin_lock_irqsave(&phys_enc->spin_lock, lock_flags);
phys_enc->hw_intf->ops.setup_prg_fetch(phys_enc->hw_intf, &f);
spin_unlock_irqrestore(&phys_enc->spin_lock, lock_flags);
}
static bool sde_encoder_phys_vid_mode_fixup(
struct sde_encoder_phys *phys_enc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
DBG("");
/*
* Modifying mode has consequences when the mode comes back to us
*/
return true;
}
static void sde_encoder_phys_vid_mode_set(
struct sde_encoder_phys *phys_enc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
mode = adjusted_mode;
phys_enc->cached_mode = *adjusted_mode;
DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
mode->base.id, mode->name, mode->vrefresh, mode->clock,
mode->hdisplay, mode->hsync_start, mode->hsync_end, mode->htotal,
mode->vdisplay, mode->vsync_start, mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
}
static void sde_encoder_phys_vid_setup_timing_engine(
struct sde_encoder_phys *phys_enc)
{
struct drm_display_mode *mode = &phys_enc->cached_mode;
struct intf_timing_params p = { 0 };
struct sde_mdp_format_params *sde_fmt_params = NULL;
u32 fmt_fourcc = DRM_FORMAT_RGB888;
u32 fmt_mod = 0;
unsigned long lock_flags;
struct sde_hw_intf_cfg intf_cfg = { 0 };
if (WARN_ON(!phys_enc->hw_intf->ops.setup_timing_gen))
return;
if (WARN_ON(!phys_enc->hw_ctl->ops.setup_intf_cfg))
return;
DBG("enable mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
mode->base.id, mode->name, mode->vrefresh, mode->clock,
mode->hdisplay, mode->hsync_start, mode->hsync_end, mode->htotal,
mode->vdisplay, mode->vsync_start, mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
drm_mode_to_intf_timing_params(phys_enc, mode, &p);
sde_fmt_params = sde_mdp_get_format_params(fmt_fourcc, fmt_mod);
intf_cfg.intf = phys_enc->hw_intf->idx;
intf_cfg.wb = SDE_NONE;
spin_lock_irqsave(&phys_enc->spin_lock, lock_flags);
phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf, &p,
sde_fmt_params);
phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
spin_unlock_irqrestore(&phys_enc->spin_lock, lock_flags);
programmable_fetch_config(phys_enc, &p);
}
static void sde_encoder_phys_vid_wait_for_vblank(
struct sde_encoder_phys_vid *vid_enc)
{
DBG("");
mdp_irq_wait(vid_enc->base.mdp_kms, vid_enc->vblank_irq.irqmask);
}
static void sde_encoder_phys_vid_vblank_irq(struct mdp_irq *irq,
uint32_t irqstatus)
{
struct sde_encoder_phys_vid *vid_enc =
container_of(irq, struct sde_encoder_phys_vid,
vblank_irq);
struct sde_encoder_phys *phys_enc = &vid_enc->base;
phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent);
}
static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc)
{
struct sde_encoder_phys_vid *vid_enc =
to_sde_encoder_phys_vid(phys_enc);
unsigned long lock_flags;
DBG("");
if (WARN_ON(phys_enc->enabled))
return;
if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
return;
sde_encoder_phys_vid_setup_timing_engine(phys_enc);
spin_lock_irqsave(&phys_enc->spin_lock, lock_flags);
phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 1);
spin_unlock_irqrestore(&phys_enc->spin_lock, lock_flags);
phys_enc->enabled = true;
mdp_irq_register(phys_enc->mdp_kms, &vid_enc->vblank_irq);
DBG("Registered IRQ for intf %d mask 0x%X", phys_enc->hw_intf->idx,
vid_enc->vblank_irq.irqmask);
}
static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc)
{
struct sde_encoder_phys_vid *vid_enc =
to_sde_encoder_phys_vid(phys_enc);
unsigned long lock_flags;
DBG("");
if (WARN_ON(!phys_enc->enabled))
return;
if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
return;
spin_lock_irqsave(&phys_enc->spin_lock, lock_flags);
phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 0);
spin_unlock_irqrestore(&phys_enc->spin_lock, lock_flags);
/*
* Wait for a vsync so we know the ENABLE=0 latched before
* the (connector) source of the vsync's gets disabled,
* otherwise we end up in a funny state if we re-enable
* before the disable latches, which results that some of
* the settings changes for the new modeset (like new
* scanout buffer) don't latch properly..
*/
sde_encoder_phys_vid_wait_for_vblank(vid_enc);
mdp_irq_unregister(phys_enc->mdp_kms, &vid_enc->vblank_irq);
phys_enc->enabled = false;
}
static void sde_encoder_phys_vid_destroy(struct sde_encoder_phys *phys_enc)
{
struct sde_encoder_phys_vid *vid_enc =
to_sde_encoder_phys_vid(phys_enc);
DBG("");
kfree(phys_enc->hw_intf);
kfree(vid_enc);
}
static void sde_encoder_phys_vid_get_hw_resources(
struct sde_encoder_phys *phys_enc,
struct sde_encoder_hw_resources *hw_res)
{
DBG("");
hw_res->intfs[phys_enc->hw_intf->idx] = true;
}
static void sde_encoder_phys_vid_init_cbs(struct sde_encoder_phys_ops *ops)
{
ops->mode_set = sde_encoder_phys_vid_mode_set;
ops->mode_fixup = sde_encoder_phys_vid_mode_fixup;
ops->enable = sde_encoder_phys_vid_enable;
ops->disable = sde_encoder_phys_vid_disable;
ops->destroy = sde_encoder_phys_vid_destroy;
ops->get_hw_resources = sde_encoder_phys_vid_get_hw_resources;
}
struct sde_encoder_phys *sde_encoder_phys_vid_init(
struct sde_kms *sde_kms,
enum sde_intf intf_idx,
enum sde_ctl ctl_idx,
struct drm_encoder *parent,
struct sde_encoder_virt_ops parent_ops)
{
struct sde_encoder_phys *phys_enc = NULL;
struct sde_encoder_phys_vid *vid_enc = NULL;
u32 irq_mask = 0x8000000;
int ret = 0;
DBG("");
vid_enc = kzalloc(sizeof(*vid_enc), GFP_KERNEL);
if (!vid_enc) {
ret = -ENOMEM;
goto fail;
}
phys_enc = &vid_enc->base;
phys_enc->hw_intf =
sde_hw_intf_init(intf_idx, sde_kms->mmio, sde_kms->catalog);
if (!phys_enc->hw_intf) {
ret = -ENOMEM;
goto fail;
}
phys_enc->hw_ctl = sde_hw_ctl_init(ctl_idx, sde_kms->mmio,
sde_kms->catalog);
if (!phys_enc->hw_ctl) {
ret = -ENOMEM;
goto fail;
}
sde_encoder_phys_vid_init_cbs(&phys_enc->phys_ops);
phys_enc->parent = parent;
phys_enc->parent_ops = parent_ops;
phys_enc->mdp_kms = &sde_kms->base;
vid_enc->vblank_irq.irq = sde_encoder_phys_vid_vblank_irq;
vid_enc->vblank_irq.irqmask = irq_mask;
spin_lock_init(&phys_enc->spin_lock);
DBG("Created sde_encoder_phys_vid for intf %d", phys_enc->hw_intf->idx);
return phys_enc;
fail:
DRM_ERROR("Failed to create encoder\n");
if (vid_enc)
sde_encoder_phys_vid_destroy(phys_enc);
return ERR_PTR(ret);
}

View file

@ -371,10 +371,14 @@ struct sde_cdm_cfg {
* @base register offset of this block
* @features bit mask identifying sub-blocks/features
* @type: Interface type(DSI, DP, HDMI)
* @controller_id: Controller Instance ID in case of multiple of intf type
* @prog_fetch_lines_worst_case Worst case latency num lines needed to prefetch
*/
struct sde_intf_cfg {
SDE_HW_BLK_INFO;
u32 type; /* interface type*/
u32 controller_id;
u32 prog_fetch_lines_worst_case;
};
/**

View file

@ -171,13 +171,13 @@ static inline int set_cfg_1xx_init(struct sde_mdss_cfg *cfg)
{.id = SSPP_VIG3, .base = 0x0000b000,
.features = VIG_17X_MASK, .sblk = &layer},
{.id = SSPP_RGB0, .base = 0x00001500,
{.id = SSPP_RGB0, .base = 0x00015000,
.features = RGB_17X_MASK, .sblk = &layer},
{.id = SSPP_RGB1, .base = 0x00001700,
{.id = SSPP_RGB1, .base = 0x00017000,
.features = RGB_17X_MASK, .sblk = &layer},
{.id = SSPP_RGB2, .base = 0x00001900,
{.id = SSPP_RGB2, .base = 0x00019000,
.features = RGB_17X_MASK, .sblk = &layer},
{.id = SSPP_RGB3, .base = 0x00001B00,
{.id = SSPP_RGB3, .base = 0x0001B000,
.features = RGB_17X_MASK, .sblk = &layer},
{.id = SSPP_DMA0, .base = 0x00025000,
@ -244,13 +244,17 @@ static inline int set_cfg_1xx_init(struct sde_mdss_cfg *cfg)
.intf_count = 4,
.intf = {
{.id = INTF_0, .base = 0x0006B000,
.type = INTF_NONE},
.type = INTF_NONE, .controller_id = 0,
.prog_fetch_lines_worst_case = 21},
{.id = INTF_1, .base = 0x0006B800,
.type = INTF_DSI},
.type = INTF_DSI, .controller_id = 0,
.prog_fetch_lines_worst_case = 21},
{.id = INTF_2, .base = 0x0006C000,
.type = INTF_DSI},
.type = INTF_DSI, .controller_id = 1,
.prog_fetch_lines_worst_case = 21},
{.id = INTF_3, .base = 0x0006C800,
.type = INTF_HDMI},
.type = INTF_HDMI, .controller_id = 0,
.prog_fetch_lines_worst_case = 21},
},
.wb_count = 3,
.wb = {

View file

@ -70,7 +70,8 @@ static struct sde_intf_cfg *_intf_offset(enum sde_intf intf,
int i;
for (i = 0; i < m->intf_count; i++) {
if (intf == m->intf[i].id) {
if ((intf == m->intf[i].id) &&
(m->intf[i].type != INTF_NONE)) {
b->base_off = addr;
b->blk_off = m->intf[i].base;
b->hwversion = m->hwversion;
@ -158,13 +159,13 @@ static void sde_hw_intf_setup_timing_engine(struct sde_hw_intf *ctx,
(hsync_polarity << 0); /* HSYNC Polarity */
if (!fmt->is_yuv)
panel_format = (fmt->bits[0] |
(fmt->bits[1] << 2) |
(fmt->bits[2] << 4) |
panel_format = (fmt->bits[C0_G_Y] |
(fmt->bits[C1_B_Cb] << 2) |
(fmt->bits[C2_R_Cr] << 4) |
(0x21 << 8));
else
/* Interface treats all the pixel data in RGB888 format */
panel_format |= (COLOR_8BIT |
panel_format = (COLOR_8BIT |
(COLOR_8BIT << 2) |
(COLOR_8BIT << 4) |
(0x21 << 8));
@ -354,8 +355,9 @@ struct sde_hw_intf *sde_hw_intf_init(enum sde_intf idx,
return ERR_PTR(-ENOMEM);
cfg = _intf_offset(idx, m, addr, &c->hw);
if (!cfg) {
if (IS_ERR_OR_NULL(cfg)) {
kfree(c);
pr_err("Error Panic\n");
return ERR_PTR(-EINVAL);
}
@ -371,3 +373,9 @@ struct sde_hw_intf *sde_hw_intf_init(enum sde_intf idx,
*/
return c;
}
void sde_hw_intf_deinit(struct sde_hw_intf *intf)
{
kfree(intf);
}

View file

@ -15,6 +15,7 @@
#include "sde_hw_catalog.h"
#include "sde_hw_mdss.h"
#include "sde_hw_mdp_util.h"
struct sde_hw_intf;
@ -100,4 +101,6 @@ struct sde_hw_intf *sde_hw_intf_init(enum sde_intf idx,
void __iomem *addr,
struct sde_mdss_cfg *m);
void sde_hw_intf_deinit(struct sde_hw_intf *intf);
#endif /*_SDE_HW_INTF_H */

View file

@ -58,7 +58,7 @@ static inline int _stage_offset(struct sde_hw_mixer *ctx, enum sde_stage stage)
return -EINVAL;
if ((stage - SDE_STAGE_0) <= sblk->maxblendstages)
return sblk->blendstage_base[stage];
return sblk->blendstage_base[stage - 1];
else
return -EINVAL;
}
@ -126,7 +126,7 @@ static void sde_hw_lm_setup_blendcfg(struct sde_hw_mixer *ctx,
SDE_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off,
fg->const_alpha);
SDE_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off,
SDE_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off,
bg->const_alpha);
SDE_REG_WRITE(c, LM_OP_MODE, blend_op);
}

View file

@ -15,9 +15,9 @@
#include "sde_hw_mdp_ctl.h"
#define CTL_LAYER(lm) \
(((lm) == 5) ? (0x024) : ((lm) * 0x004))
(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
#define CTL_LAYER_EXT(lm) \
(0x40 + ((lm) * 0x004))
(0x40 + (((lm) - LM_0) * 0x004))
#define CTL_TOP 0x014
#define CTL_FLUSH 0x018
#define CTL_START 0x01C
@ -61,15 +61,14 @@ static int _mixer_stages(const struct sde_lm_cfg *mixer, int count,
return stages;
}
static inline void sde_hw_ctl_setup_flush(struct sde_hw_ctl *ctx, u32 flushbits,
u8 force_start)
static inline void sde_hw_ctl_force_start(struct sde_hw_ctl *ctx)
{
struct sde_hw_blk_reg_map *c = &ctx->hw;
SDE_REG_WRITE(&ctx->hw, CTL_START, 0x1);
}
SDE_REG_WRITE(c, CTL_FLUSH, flushbits);
if (force_start)
SDE_REG_WRITE(c, CTL_START, 0x1);
static inline void sde_hw_ctl_setup_flush(struct sde_hw_ctl *ctx, u32 flushbits)
{
SDE_REG_WRITE(&ctx->hw, CTL_FLUSH, flushbits);
}
static inline int sde_hw_ctl_get_bitmask_sspp(struct sde_hw_ctl *ctx,
@ -222,7 +221,7 @@ static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
struct sde_hw_stage_cfg *cfg)
{
struct sde_hw_blk_reg_map *c = &ctx->hw;
u32 mixercfg, mixercfg_ext;
u32 mixercfg, mixercfg_ext = 0;
int i, j;
u8 stages;
int pipes_per_stage;
@ -237,8 +236,8 @@ static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
else
pipes_per_stage = 1;
mixercfg = cfg->border_enable >> 24; /* BORDER_OUT */
;
mixercfg = cfg->border_enable << 24; /* BORDER_OUT */
for (i = 0; i <= stages; i++) {
for (j = 0; j < pipes_per_stage; j++) {
switch (cfg->stage[i][j]) {
@ -298,17 +297,38 @@ static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
SDE_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
}
static void sde_hw_ctl_intf_cfg(struct sde_hw_ctl *ctx,
struct sde_hw_intf_cfg *cfg)
{
struct sde_hw_blk_reg_map *c = &ctx->hw;
u32 intf_cfg = 0;
intf_cfg |= (cfg->intf & 0xF) << 4;
if (cfg->wb)
intf_cfg |= (cfg->wb & 0x3) + 2;
if (cfg->mode_3d) {
intf_cfg |= BIT(19);
intf_cfg |= (cfg->mode_3d - 1) << 20;
}
SDE_REG_WRITE(c, CTL_TOP, intf_cfg);
}
static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops,
unsigned long cap)
{
ops->setup_flush = sde_hw_ctl_setup_flush;
ops->setup_start = sde_hw_ctl_force_start;
ops->setup_intf_cfg = sde_hw_ctl_intf_cfg;
ops->reset = sde_hw_ctl_reset_control;
ops->setup_blendstage = sde_hw_ctl_setup_blendstage;
ops->get_bitmask_sspp = sde_hw_ctl_get_bitmask_sspp;
ops->get_bitmask_mixer = sde_hw_ctl_get_bitmask_mixer;
ops->get_bitmask_dspp = sde_hw_ctl_get_bitmask_dspp;
ops->get_bitmask_intf = sde_hw_ctl_get_bitmask_intf;
ops->get_bitmask_cdm = sde_hw_ctl_get_bitmask_cdm;
ops->setup_blendstage = sde_hw_ctl_setup_blendstage;
};
struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx,
@ -323,8 +343,9 @@ struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx,
return ERR_PTR(-ENOMEM);
cfg = _ctl_offset(idx, m, addr, &c->hw);
if (cfg) {
if (IS_ERR_OR_NULL(cfg)) {
kfree(c);
pr_err("Error Panic\n");
return ERR_PTR(-EINVAL);
}
@ -336,3 +357,8 @@ struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx,
return c;
}
void sde_hw_ctl_destroy(struct sde_hw_ctl *ctx)
{
kfree(ctx);
}

View file

@ -27,14 +27,46 @@ struct sde_hw_stage_cfg {
u8 border_enable;
};
/**
* struct sde_hw_intf_cfg :Desbribes how the mdp writes data to
* output interface
* @intf : Interface id
* @wb: writeback id
* @mode_3d: 3d mux configuration
*/
struct sde_hw_intf_cfg {
enum sde_intf intf;
enum sde_wb wb;
enum sde_3d_blend_mode mode_3d;
};
/**
* struct sde_hw_ctl_ops - Interface to the wb Hw driver functions
* Assumption is these functions will be called after clocks are enabled
*/
struct sde_hw_ctl_ops {
/**
* kickoff hw operation for Sw controlled interfaces
* DSI cmd mode and WB interface are SW controlled
* @ctx : ctl path ctx pointer
*/
void (*setup_start)(struct sde_hw_ctl *ctx);
/**
* FLUSH the modules for this control path
* @ctx : ctl path ctx pointer
* @flushbits : module flushmask
*/
void (*setup_flush)(struct sde_hw_ctl *ctx,
u32 flushbits,
u8 force_start);
u32 flushbits);
/**
* Setup ctl_path interface config
* @ctx
* @cfg : interface config structure pointer
*/
void (*setup_intf_cfg)(struct sde_hw_ctl *ctx,
struct sde_hw_intf_cfg *cfg);
int (*reset)(struct sde_hw_ctl *c);
@ -87,7 +119,7 @@ struct sde_hw_ctl {
/**
* sde_hw_ctl_init(): Initializes the ctl_path hw driver object.
* should be called before accessing every mixer.
* should be called before accessing every ctl path registers.
* @idx: ctl_path index for which driver object is required
* @addr: mapped register io address of MDP
* @m : pointer to mdss catalog data
@ -96,4 +128,10 @@ struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx,
void __iomem *addr,
struct sde_mdss_cfg *m);
/**
* sde_hw_ctl_destroy(): Destroys ctl driver context
* should be called to free the context
*/
void sde_hw_ctl_destroy(struct sde_hw_ctl *ctx);
#endif /*_SDE_HW_MDP_CTL_H */

View file

@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/err.h>
#define SDE_NONE 0
#define SDE_CSC_MATRIX_COEFF_SIZE 9
#define SDE_CSC_CLAMP_SIZE 6
#define SDE_CSC_BIAS_SIZE 3
@ -57,7 +58,7 @@ enum sde_sspp_type {
};
enum sde_lm {
LM_0 = 0,
LM_0 = 1,
LM_1,
LM_2,
LM_3,
@ -79,7 +80,7 @@ enum sde_stage {
SDE_STAGE_MAX
};
enum sde_dspp {
DSPP_0 = 0,
DSPP_0 = 1,
DSPP_1,
DSPP_2,
DSPP_3,
@ -87,7 +88,7 @@ enum sde_dspp {
};
enum sde_ctl {
CTL_0 = 0,
CTL_0 = 1,
CTL_1,
CTL_2,
CTL_3,
@ -96,13 +97,13 @@ enum sde_ctl {
};
enum sde_cdm {
CDM_0 = 0,
CDM_0 = 1,
CDM_1,
CDM_MAX
};
enum sde_pingpong {
PINGPONG_0 = 0,
PINGPONG_0 = 1,
PINGPONG_1,
PINGPONG_2,
PINGPONG_3,
@ -111,7 +112,7 @@ enum sde_pingpong {
};
enum sde_intf {
INTF_0 = 0,
INTF_0 = 1,
INTF_1,
INTF_2,
INTF_3,
@ -208,12 +209,10 @@ enum sde_mdp_fetch_type {
* expected by the HW programming.
*/
enum {
COLOR_4BIT,
COLOR_5BIT,
COLOR_6BIT,
COLOR_8BIT,
COLOR_ALPHA_1BIT = 0,
COLOR_ALPHA_4BIT = 1,
COLOR_1BIT = 0,
COLOR_5BIT = 1,
COLOR_6BIT = 2,
COLOR_8BIT = 3,
};
enum sde_alpha_blend_type {
@ -224,6 +223,26 @@ enum sde_alpha_blend_type {
ALPHA_MAX
};
/**
* enum sde_3d_blend_mode
* Desribes how the 3d data is blended
* @BLEND_3D_NONE : 3d blending not enabled
* @BLEND_3D_FRAME_INT : Frame interleaving
* @BLEND_3D_H_ROW_INT : Horizontal row interleaving
* @BLEND_3D_V_ROW_INT : vertical row interleaving
* @BLEND_3D_COL_INT : column interleaving
* @BLEND_3D_MAX :
*/
enum sde_3d_blend_mode {
BLEND_3D_NONE = 0,
BLEND_3D_FRAME_INT,
BLEND_3D_H_ROW_INT,
BLEND_3D_V_ROW_INT,
BLEND_3D_COL_INT,
BLEND_3D_MAX
};
struct addr_info {
u32 plane[SDE_MAX_PLANES];
};

View file

@ -182,7 +182,7 @@ static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx,
u32 opmode = 0;
u32 idx;
if (!_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
return;
opmode = SDE_REG_READ(c, SSPP_SRC_OP_MODE + idx);
@ -210,7 +210,7 @@ static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx,
src_format = (chroma_samp << 23) | (fmt->fetch_planes << 19) |
(fmt->bits[C3_ALPHA] << 6) | (fmt->bits[C2_R_Cr] << 4) |
(fmt->bits[C0_G_Y] << 0);
(fmt->bits[C1_B_Cb] << 2) | (fmt->bits[C0_G_Y] << 0);
if (flags & SDE_SSPP_ROT_90)
src_format |= BIT(11); /* ROT90 */
@ -235,12 +235,9 @@ static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx,
}
/* if this is YUV pixel format, enable CSC */
if (fmt->is_yuv) {
_sspp_setup_opmode(ctx, CSC, 0x0);
} else {
if (fmt->is_yuv)
src_format |= BIT(15);
_sspp_setup_opmode(ctx, CSC, 0x1);
}
_sspp_setup_opmode(ctx, CSC, fmt->is_yuv);
opmode |= MDSS_MDP_OP_PE_OVERRIDE;
@ -260,8 +257,8 @@ static void sde_hw_sspp_setup_pe_config(struct sde_hw_pipe *ctx,
struct sde_hw_blk_reg_map *c = &ctx->hw;
u8 color;
u32 lr_pe[4], tb_pe[4], tot_req_pixels[4];
const u32 bytemask = 0xffff;
const u8 shortmask = 0xff;
const u32 bytemask = 0xff;
const u32 shortmask = 0xffff;
u32 idx;
if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
@ -283,7 +280,7 @@ static void sde_hw_sspp_setup_pe_config(struct sde_hw_pipe *ctx,
((pe_ext->top_ftch[color] & bytemask) << 8)|
(pe_ext->top_rpt[color] & bytemask);
tot_req_pixels[color] = (((cfg->src.height +
tot_req_pixels[color] = (((pe_ext->roi_h[color] +
pe_ext->num_ext_pxls_top[color] +
pe_ext->num_ext_pxls_btm[color]) & shortmask) << 16) |
((pe_ext->roi_w[color] +
@ -323,30 +320,30 @@ static void sde_hw_sspp_setup_scalar(struct sde_hw_pipe *ctx,
scale_config = BIT(0) | BIT(1);
/* RGB/YUV config */
scale_config |= (pe_ext->horz_filter[0] & mask) << 8;
scale_config |= (pe_ext->vert_filter[0] & mask) << 10;
scale_config |= (pe_ext->horz_filter[SDE_SSPP_COMP_LUMA] & mask) << 8;
scale_config |= (pe_ext->vert_filter[SDE_SSPP_COMP_LUMA] & mask) << 10;
/* Aplha config*/
scale_config |= (pe_ext->horz_filter[3] & mask) << 16;
scale_config |= (pe_ext->vert_filter[3] & mask) << 18;
scale_config |= (pe_ext->horz_filter[SDE_SSPP_COMP_ALPHA] & mask) << 16;
scale_config |= (pe_ext->vert_filter[SDE_SSPP_COMP_ALPHA] & mask) << 18;
SDE_REG_WRITE(c, SCALE_CONFIG + idx, scale_config);
SDE_REG_WRITE(c, COMP0_3_INIT_PHASE_X + idx,
pe_ext->init_phase_x[0]);
pe_ext->init_phase_x[SDE_SSPP_COMP_LUMA]);
SDE_REG_WRITE(c, COMP0_3_INIT_PHASE_Y + idx,
pe_ext->init_phase_y[0]);
pe_ext->init_phase_y[SDE_SSPP_COMP_LUMA]);
SDE_REG_WRITE(c, COMP0_3_PHASE_STEP_X + idx,
pe_ext->phase_step_x[0]);
pe_ext->phase_step_x[SDE_SSPP_COMP_LUMA]);
SDE_REG_WRITE(c, COMP0_3_PHASE_STEP_Y + idx,
pe_ext->phase_step_y[0]);
pe_ext->phase_step_y[SDE_SSPP_COMP_LUMA]);
SDE_REG_WRITE(c, COMP1_2_INIT_PHASE_X + idx,
pe_ext->init_phase_x[1]);
pe_ext->init_phase_x[SDE_SSPP_COMP_CHROMA]);
SDE_REG_WRITE(c, COMP1_2_INIT_PHASE_Y + idx,
pe_ext->init_phase_y[1]);
pe_ext->init_phase_y[SDE_SSPP_COMP_CHROMA]);
SDE_REG_WRITE(c, COMP1_2_PHASE_STEP_X + idx,
pe_ext->phase_step_x[1]);
pe_ext->phase_step_x[SDE_SSPP_COMP_CHROMA]);
SDE_REG_WRITE(c, COMP1_2_PHASE_STEP_Y + idx,
pe_ext->phase_step_y[0]);
pe_ext->phase_step_y[SDE_SSPP_COMP_CHROMA]);
}
/**
@ -365,7 +362,7 @@ static void sde_hw_sspp_setup_rects(struct sde_hw_pipe *ctx,
return;
/* program pixel extension override */
if (!pe_ext)
if (pe_ext)
sde_hw_sspp_setup_pe_config(ctx, cfg, pe_ext);
/* src and dest rect programming */
@ -388,10 +385,8 @@ static void sde_hw_sspp_setup_rects(struct sde_hw_pipe *ctx,
if (test_bit(SDE_SSPP_SCALAR_RGB, &ctx->cap->features) ||
test_bit(SDE_SSPP_SCALAR_QSEED2, &ctx->cap->features)) {
/* program decimation */
if (!cfg->horz_decimation)
decimation = (cfg->horz_decimation - 1) << 8;
if (!cfg->vert_decimation)
decimation |= (cfg->vert_decimation - 1);
decimation = ((1 << cfg->horz_decimation) - 1) << 8;
decimation |= ((1 << cfg->vert_decimation) - 1);
sde_hw_sspp_setup_scalar(ctx, pe_ext);
}
@ -421,7 +416,6 @@ static void sde_hw_sspp_setup_sourceaddress(struct sde_hw_pipe *ctx,
for (i = 0; i < cfg->src.num_planes; i++)
SDE_REG_WRITE(c, SSPP_SRC0_ADDR + idx + i*0x4,
cfg->addr.plane[i]);
}
static void sde_hw_sspp_setup_csc_8bit(struct sde_hw_pipe *ctx,
@ -476,7 +470,6 @@ static void sde_hw_sspp_setup_solidfill(struct sde_hw_pipe *ctx,
static void sde_hw_sspp_setup_histogram_v1(struct sde_hw_pipe *ctx,
void *cfg)
{
}
static void sde_hw_sspp_setup_memcolor(struct sde_hw_pipe *ctx,
@ -589,3 +582,8 @@ struct sde_hw_pipe *sde_hw_sspp_init(enum sde_sspp idx,
return c;
}
void sde_hw_sspp_destroy(struct sde_hw_pipe *ctx)
{
kfree(ctx);
}

View file

@ -15,7 +15,6 @@
#include "sde_hw_catalog.h"
#include "sde_hw_mdss.h"
#include "sde_mdp_formats.h"
#include "sde_hw_mdp_util.h"
struct sde_hw_pipe;
@ -29,6 +28,15 @@ struct sde_hw_pipe;
#define SDE_SSPP_SOURCE_ROTATED_90 0x8
#define SDE_SSPP_ROT_90 0x10
/**
* Component indices
*/
enum {
SDE_SSPP_COMP_LUMA = 0,
SDE_SSPP_COMP_CHROMA = 1,
SDE_SSPP_COMP_ALPHA = 3
};
enum {
SDE_MDP_FRAME_LINEAR,
SDE_MDP_FRAME_TILE_A4X,
@ -88,6 +96,7 @@ struct sde_hw_pixel_ext {
int btm_rpt[SDE_MAX_PLANES];
uint32_t roi_w[SDE_MAX_PLANES];
uint32_t roi_h[SDE_MAX_PLANES];
/*
* Filter type to be used for scaling in horizontal and vertical
@ -262,5 +271,12 @@ struct sde_hw_pipe *sde_hw_sspp_init(enum sde_sspp idx,
void __iomem *addr,
struct sde_mdss_cfg *m);
/**
* sde_hw_sspp_destroy(): Destroys SSPP driver context
* should be called during Hw pipe cleanup.
* @ctx: Pointer to SSPP driver context returned by sde_hw_sspp_init
*/
void sde_hw_sspp_destroy(struct sde_hw_pipe *ctx);
#endif /*_SDE_HW_SSPP_H */

View file

@ -12,31 +12,63 @@
#include <drm/drm_crtc.h>
#include "msm_drv.h"
#include "msm_mmu.h"
#include "sde_kms.h"
#include "sde_hw_mdss.h"
#include "sde_hw_intf.h"
static int modeset_init_intf(struct sde_kms *sde_kms, int intf_num)
static const char * const iommu_ports[] = {
"mdp_0",
};
#define DEFAULT_MDP_SRC_CLK 200000000
int sde_disable(struct sde_kms *sde_kms)
{
struct sde_mdss_cfg *catalog = sde_kms->catalog;
u32 intf_type = catalog->intf[intf_num].type;
DBG("");
switch (intf_type) {
case INTF_NONE:
break;
case INTF_DSI:
break;
case INTF_LCDC:
break;
case INTF_HDMI:
break;
case INTF_EDP:
default:
break;
}
clk_disable_unprepare(sde_kms->ahb_clk);
clk_disable_unprepare(sde_kms->axi_clk);
clk_disable_unprepare(sde_kms->core_clk);
if (sde_kms->lut_clk)
clk_disable_unprepare(sde_kms->lut_clk);
return 0;
}
int sde_enable(struct sde_kms *sde_kms)
{
DBG("");
clk_prepare_enable(sde_kms->ahb_clk);
clk_prepare_enable(sde_kms->axi_clk);
clk_prepare_enable(sde_kms->core_clk);
if (sde_kms->lut_clk)
clk_prepare_enable(sde_kms->lut_clk);
return 0;
}
static void sde_prepare_commit(struct msm_kms *kms,
struct drm_atomic_state *state)
{
struct sde_kms *sde_kms = to_sde_kms(to_mdp_kms(kms));
sde_enable(sde_kms);
}
static void sde_complete_commit(struct msm_kms *kms,
struct drm_atomic_state *state)
{
struct sde_kms *sde_kms = to_sde_kms(to_mdp_kms(kms));
sde_disable(sde_kms);
}
static void sde_wait_for_crtc_commit_done(struct msm_kms *kms,
struct drm_crtc *crtc)
{
}
static int modeset_init(struct sde_kms *sde_kms)
{
struct msm_drm_private *priv = sde_kms->dev->dev_private;
@ -62,8 +94,9 @@ static int modeset_init(struct sde_kms *sde_kms)
|| !num_private_planes)
primary = false;
plane = sde_plane_init(dev, primary);
plane = sde_plane_init(dev, catalog->sspp[i].id, primary);
if (IS_ERR(plane)) {
pr_err("%s: sde_plane_init failed", __func__);
ret = PTR_ERR(plane);
goto fail;
}
@ -71,7 +104,7 @@ static int modeset_init(struct sde_kms *sde_kms)
if (primary)
primary_planes[primary_planes_idx++] = plane;
if (num_private_planes)
if (primary && num_private_planes)
num_private_planes--;
}
@ -81,15 +114,21 @@ static int modeset_init(struct sde_kms *sde_kms)
goto fail;
}
/* Create one CRTC per mixer */
for (i = 0; i < catalog->mixer_count; i++) {
/*
* Each mixer receives a private plane. We start
* Enumerate displays supported
*/
sde_encoders_init(dev);
/* Create one CRTC per display */
for (i = 0; i < priv->num_encoders; i++) {
/*
* Each CRTC receives a private plane. We start
* with first RGB, and then DMA and then VIG.
*/
struct drm_crtc *crtc;
crtc = sde_crtc_init(dev, NULL, primary_planes[i], i);
crtc = sde_crtc_init(dev, priv->encoders[i],
primary_planes[i], i);
if (IS_ERR(crtc)) {
ret = PTR_ERR(crtc);
goto fail;
@ -97,11 +136,13 @@ static int modeset_init(struct sde_kms *sde_kms)
priv->crtcs[priv->num_crtcs++] = crtc;
}
for (i = 0; i < catalog->intf_count; i++) {
ret = modeset_init_intf(sde_kms, i);
if (ret)
goto fail;
}
/*
* Iterate through the list of encoders and
* set the possible CRTCs
*/
for (i = 0; i < priv->num_encoders; i++)
priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
return 0;
fail:
return ret;
@ -137,6 +178,9 @@ static const struct mdp_kms_funcs kms_funcs = {
.irq_postinstall = sde_irq_postinstall,
.irq_uninstall = sde_irq_uninstall,
.irq = sde_irq,
.prepare_commit = sde_prepare_commit,
.complete_commit = sde_complete_commit,
.wait_for_crtc_commit_done = sde_wait_for_crtc_commit_done,
.enable_vblank = sde_enable_vblank,
.disable_vblank = sde_disable_vblank,
.get_format = mdp_get_format,
@ -184,6 +228,7 @@ struct sde_kms *sde_hw_setup(struct platform_device *pdev)
ret = PTR_ERR(sde_kms->mmio);
goto fail;
}
pr_err("Mapped Mdp address space @%pK", sde_kms->mmio);
sde_kms->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
if (IS_ERR(sde_kms->vbif)) {
@ -247,8 +292,27 @@ struct sde_kms *sde_hw_setup(struct platform_device *pdev)
get_clk(pdev, &sde_kms->mmagic_clk, "mmagic_clk", false);
get_clk(pdev, &sde_kms->iommu_clk, "iommu_clk", false);
if (sde_kms->mmagic) {
ret = regulator_enable(sde_kms->mmagic);
if (ret) {
dev_err(sde_kms->dev->dev,
"failed to enable mmagic GDSC: %d\n", ret);
goto fail;
}
}
if (sde_kms->mmagic_clk) {
clk_prepare_enable(sde_kms->mmagic_clk);
if (ret) {
dev_err(sde_kms->dev->dev, "failed to enable mmagic_clk\n");
goto undo_gdsc;
}
}
return sde_kms;
undo_gdsc:
if (sde_kms->mmagic)
regulator_disable(sde_kms->mmagic);
fail:
if (kms)
sde_destroy(kms);
@ -256,6 +320,111 @@ fail:
return ERR_PTR(ret);
}
static int sde_translation_ctrl_pwr(struct sde_kms *sde_kms, bool on)
{
struct device *dev = sde_kms->dev->dev;
int ret;
if (on) {
if (sde_kms->iommu_clk) {
ret = clk_prepare_enable(sde_kms->iommu_clk);
if (ret) {
dev_err(dev, "failed to enable iommu_clk\n");
goto undo_mmagic_clk;
}
}
} else {
if (sde_kms->iommu_clk)
clk_disable_unprepare(sde_kms->iommu_clk);
if (sde_kms->mmagic_clk)
clk_disable_unprepare(sde_kms->mmagic_clk);
if (sde_kms->mmagic)
regulator_disable(sde_kms->mmagic);
}
return 0;
undo_mmagic_clk:
if (sde_kms->mmagic_clk)
clk_disable_unprepare(sde_kms->mmagic_clk);
return ret;
}
int sde_mmu_init(struct sde_kms *sde_kms)
{
struct sde_mdss_cfg *catalog = sde_kms->catalog;
struct sde_hw_intf *intf = NULL;
struct iommu_domain *iommu;
struct msm_mmu *mmu;
int i, ret;
/*
* Make sure things are off before attaching iommu (bootloader could
* have left things on, in which case we'll start getting faults if
* we don't disable):
*/
sde_enable(sde_kms);
for (i = 0; i < catalog->intf_count; i++) {
intf = sde_hw_intf_init(catalog->intf[i].id,
sde_kms->mmio,
catalog);
if (!IS_ERR_OR_NULL(intf)) {
intf->ops.enable_timing(intf, 0x0);
sde_hw_intf_deinit(intf);
}
}
sde_disable(sde_kms);
msleep(20);
iommu = iommu_domain_alloc(&platform_bus_type);
if (!IS_ERR_OR_NULL(iommu)) {
mmu = msm_smmu_new(sde_kms->dev->dev, MSM_SMMU_DOMAIN_UNSECURE);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
dev_err(sde_kms->dev->dev,
"failed to init iommu: %d\n", ret);
iommu_domain_free(iommu);
goto fail;
}
ret = sde_translation_ctrl_pwr(sde_kms, true);
if (ret) {
dev_err(sde_kms->dev->dev,
"failed to power iommu: %d\n", ret);
mmu->funcs->destroy(mmu);
goto fail;
}
ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret) {
dev_err(sde_kms->dev->dev,
"failed to attach iommu: %d\n", ret);
mmu->funcs->destroy(mmu);
goto fail;
}
} else {
dev_info(sde_kms->dev->dev,
"no iommu, fallback to phys contig buffers for scanout\n");
mmu = NULL;
}
sde_kms->mmu = mmu;
sde_kms->mmu_id = msm_register_mmu(sde_kms->dev, mmu);
if (sde_kms->mmu_id < 0) {
ret = sde_kms->mmu_id;
dev_err(sde_kms->dev->dev,
"failed to register sde iommu: %d\n", ret);
goto fail;
}
return 0;
fail:
return ret;
}
struct msm_kms *sde_kms_init(struct drm_device *dev)
{
struct platform_device *pdev = dev->platformdev;
@ -282,10 +451,20 @@ struct msm_kms *sde_kms_init(struct drm_device *dev)
sde_kms->catalog = catalog;
/* we need to set a default rate before enabling.
* Set a safe rate first, before initializing catalog
* later set more optimal rate based on bandwdith/clock
* requirements
*/
clk_set_rate(sde_kms->src_clk, DEFAULT_MDP_SRC_CLK);
sde_enable(sde_kms);
/*
* Now we need to read the HW catalog and initialize resources such as
* clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
*/
sde_mmu_init(sde_kms);
/*
* modeset_init should create the DRM related objects i.e. CRTCs,
@ -296,6 +475,14 @@ struct msm_kms *sde_kms_init(struct drm_device *dev)
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
/*
* we can assume the max crtc width is equal to the max supported
* by LM_0
* Also fixing the max height to 4k
*/
dev->mode_config.max_width = catalog->mixer[0].sblk->maxwidth;
dev->mode_config.max_height = 4096;
return msm_kms;
fail:

View file

@ -26,6 +26,7 @@ struct sde_kms {
struct sde_mdss_cfg *catalog;
struct msm_mmu *mmu;
int mmu_id;
/* io/register spaces: */
void __iomem *mmio, *vbif;
@ -86,18 +87,8 @@ int sde_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
void sde_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
enum sde_sspp sde_plane_pipe(struct drm_plane *plane);
void sde_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj);
void sde_plane_set_scanout(struct drm_plane *plane,
struct drm_framebuffer *fb);
int sde_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
void sde_plane_complete_flip(struct drm_plane *plane);
struct drm_plane *sde_plane_init(struct drm_device *dev, bool private_plane);
struct drm_plane *sde_plane_init(struct drm_device *dev, uint32_t pipe,
bool private_plane);
uint32_t sde_crtc_vblank(struct drm_crtc *crtc);
@ -108,7 +99,16 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev,
struct drm_encoder *encoder,
struct drm_plane *plane, int id);
struct drm_encoder *sde_encoder_init(struct drm_device *dev, int intf);
struct sde_encoder_hw_resources {
bool intfs[INTF_MAX];
bool pingpongs[PINGPONG_MAX];
};
void sde_encoder_get_hw_resources(struct drm_encoder *encoder,
struct sde_encoder_hw_resources *hw_res);
void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
void (*cb)(void *), void *data);
void sde_encoders_init(struct drm_device *dev);
int sde_irq_domain_init(struct sde_kms *sde_kms);
int sde_irq_domain_fini(struct sde_kms *sde_kms);

View file

@ -0,0 +1,134 @@
/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include "sde_mdp_formats.h"
static struct sde_mdp_format_params sde_mdp_format_map[] = {
INTERLEAVED_RGB_FMT(ARGB8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
true, 4, 0),
INTERLEAVED_RGB_FMT(ABGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
true, 4, 0),
INTERLEAVED_RGB_FMT(RGBA8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
true, 4, 0),
INTERLEAVED_RGB_FMT(BGRA8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
true, 4, 0),
INTERLEAVED_RGB_FMT(XRGB8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
true, 4, 0),
INTERLEAVED_RGB_FMT(RGB888,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, 0,
false, 3, 0),
INTERLEAVED_RGB_FMT(BGR888,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, 0,
false, 3, 0),
INTERLEAVED_RGB_FMT(RGB565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, 0,
false, 2, 0),
INTERLEAVED_RGB_FMT(BGR565,
0, 5, 6, 5,
C2_R_Cr, C0_G_Y, C1_B_Cb, 0,
false, 2, 0),
PSEDUO_YUV_FMT(NV12,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C2_R_Cr,
SDE_MDP_CHROMA_420, 0),
PSEDUO_YUV_FMT(NV21,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C1_B_Cb,
SDE_MDP_CHROMA_420, 0),
PSEDUO_YUV_FMT(NV16,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C2_R_Cr,
SDE_MDP_CHROMA_H2V1, 0),
PSEDUO_YUV_FMT(NV61,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C1_B_Cb,
SDE_MDP_CHROMA_H2V1, 0),
INTERLEAVED_YUV_FMT(VYUY,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y,
false, SDE_MDP_CHROMA_H2V1, 4, 2,
0),
INTERLEAVED_YUV_FMT(UYVY,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y,
false, SDE_MDP_CHROMA_H2V1, 4, 2,
0),
INTERLEAVED_YUV_FMT(YUYV,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr,
false, SDE_MDP_CHROMA_H2V1, 4, 2,
0),
INTERLEAVED_YUV_FMT(YVYU,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb,
false, SDE_MDP_CHROMA_H2V1, 4, 2,
0),
PLANAR_YUV_FMT(YUV420,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C1_B_Cb, C0_G_Y,
false, SDE_MDP_CHROMA_420, 2,
0),
PLANAR_YUV_FMT(YVU420,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C2_R_Cr, C0_G_Y,
false, SDE_MDP_CHROMA_420, 2,
0),
};
struct sde_mdp_format_params *sde_mdp_get_format_params(u32 format,
u32 fmt_modifier)
{
u32 i = 0;
struct sde_mdp_format_params *fmt = NULL;
for (i = 0; i < sizeof(sde_mdp_format_map)/sizeof(*sde_mdp_format_map);
i++)
if (format == sde_mdp_format_map[i].format) {
fmt = &sde_mdp_format_map[i];
break;
}
return fmt;
}

View file

@ -58,6 +58,7 @@ alpha, chroma, count, bp, flg) \
.is_yuv = true, \
.flag = flg \
}
#define PSEDUO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg) \
{ \
.format = DRM_FORMAT_ ## fmt, \
@ -92,122 +93,12 @@ alpha, chroma, count, bp, flg) \
.flag = flg \
}
static struct sde_mdp_format_params sde_mdp_format_map[] = {
INTERLEAVED_RGB_FMT(ARGB8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
true, 4, 0),
INTERLEAVED_RGB_FMT(ABGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
true, 4, 0),
INTERLEAVED_RGB_FMT(RGBA8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
true, 4, 0),
INTERLEAVED_RGB_FMT(BGRA8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
true, 4, 0),
INTERLEAVED_RGB_FMT(XRGB8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
true, 4, 0),
INTERLEAVED_RGB_FMT(RGB888,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, 0,
false, 3, 0),
INTERLEAVED_RGB_FMT(BGR888,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, 0,
false, 3, 0),
INTERLEAVED_RGB_FMT(RGB565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, 0,
false, 2, 0),
INTERLEAVED_RGB_FMT(BGR565,
0, 5, 6, 5,
C2_R_Cr, C0_G_Y, C1_B_Cb, 0,
false, 2, 0),
PSEDUO_YUV_FMT(NV12,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C2_R_Cr,
SDE_MDP_CHROMA_420, 0),
PSEDUO_YUV_FMT(NV21,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C1_B_Cb,
SDE_MDP_CHROMA_420, 0),
PSEDUO_YUV_FMT(NV16,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C2_R_Cr,
SDE_MDP_CHROMA_H2V1, 0),
PSEDUO_YUV_FMT(NV61,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C1_B_Cb,
SDE_MDP_CHROMA_H2V1, 0),
INTERLEAVED_YUV_FMT(VYUY,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y,
false, SDE_MDP_CHROMA_H2V1, 4, 2,
0),
INTERLEAVED_YUV_FMT(UYVY,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y,
false, SDE_MDP_CHROMA_H2V1, 4, 2,
0),
INTERLEAVED_YUV_FMT(YUYV,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr,
false, SDE_MDP_CHROMA_H2V1, 4, 2,
0),
INTERLEAVED_YUV_FMT(YVYU,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb,
false, SDE_MDP_CHROMA_H2V1, 4, 2,
0),
PLANAR_YUV_FMT(YUV420,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C1_B_Cb, C0_G_Y,
false, SDE_MDP_CHROMA_420, 2,
0),
PLANAR_YUV_FMT(YVU420,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C2_R_Cr, C0_G_Y,
false, SDE_MDP_CHROMA_420, 2,
0),
};
/**
* sde_mdp_get_format_params(): Returns sde format structure pointer.
* @format: DRM format
* @fmt_modifier: DRM format modifier
*/
struct sde_mdp_format_params *sde_mdp_get_format_params(u32 format,
u32 fmt_modifier)
{
u32 i = 0;
struct sde_mdp_format_params *fmt = NULL;
for (i = 0; i < ARRAY_SIZE(sde_mdp_format_map); i++)
if (format == sde_mdp_format_map[i].format) {
fmt = &sde_mdp_format_map[i];
break;
}
return fmt;
}
u32 fmt_modifier);
#endif /*_SDE_MDP_FORMATS_H */

View file

@ -11,103 +11,746 @@
*/
#include "sde_kms.h"
#include "sde_hwio.h"
#include "sde_hw_mdp_ctl.h"
#include "sde_mdp_formats.h"
#include "sde_hw_sspp.h"
#define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci))
#define PHASE_STEP_SHIFT 21
#define PHASE_STEP_UNIT_SCALE ((int) (1 << PHASE_STEP_SHIFT))
#define PHASE_RESIDUAL 15
#define SDE_PLANE_FEATURE_SCALER \
(BIT(SDE_SSPP_SCALAR_QSEED2)| \
BIT(SDE_SSPP_SCALAR_QSEED3)| \
BIT(SDE_SSPP_SCALAR_RGB))
#ifndef SDE_PLANE_DEBUG_START
#define SDE_PLANE_DEBUG_START()
#endif
#ifndef SDE_PLANE_DEBUG_END
#define SDE_PLANE_DEBUG_END()
#endif
struct sde_plane {
struct drm_plane base;
const char *name;
int mmu_id;
enum sde_sspp pipe;
uint32_t features; /* capabilities from catalog */
uint32_t flush_mask; /* used to commit pipe registers */
uint32_t nformats;
uint32_t formats[32];
struct sde_hw_pipe *pipe_hw;
struct sde_hw_pipe_cfg pipe_cfg;
struct sde_hw_pixel_ext pixel_ext;
};
#define to_sde_plane(x) container_of(x, struct sde_plane, base)
static int sde_plane_update(struct drm_plane *plane,
static bool sde_plane_enabled(struct drm_plane_state *state)
{
return state->fb && state->crtc;
}
static void sde_plane_set_scanout(struct drm_plane *plane,
struct sde_hw_pipe_cfg *pipe_cfg, struct drm_framebuffer *fb)
{
struct sde_plane *psde = to_sde_plane(plane);
int i;
if (pipe_cfg && fb && psde->pipe_hw->ops.setup_sourceaddress) {
/* stride */
i = min_t(int, ARRAY_SIZE(fb->pitches), SDE_MAX_PLANES);
while (i) {
--i;
pipe_cfg->src.ystride[i] = fb->pitches[i];
}
/* address */
for (i = 0; i < ARRAY_SIZE(pipe_cfg->addr.plane); ++i)
pipe_cfg->addr.plane[i] = msm_framebuffer_iova(fb,
psde->mmu_id, i);
/* hw driver */
psde->pipe_hw->ops.setup_sourceaddress(psde->pipe_hw, pipe_cfg);
}
}
static void sde_plane_scale_helper(struct drm_plane *plane,
uint32_t src, uint32_t dst, uint32_t *phase_steps,
enum sde_hw_filter *filter, struct sde_mdp_format_params *fmt,
uint32_t chroma_subsampling)
{
/* calcualte phase steps, leave init phase as zero */
phase_steps[SDE_SSPP_COMP_LUMA] =
mult_frac(1 << PHASE_STEP_SHIFT, src, dst);
phase_steps[SDE_SSPP_COMP_CHROMA] =
phase_steps[SDE_SSPP_COMP_LUMA] / chroma_subsampling;
/* calculate scaler config, if necessary */
if (src != dst) {
filter[SDE_SSPP_COMP_ALPHA] = (src < dst) ?
SDE_MDP_SCALE_FILTER_BIL :
SDE_MDP_SCALE_FILTER_PCMN;
if (fmt->is_yuv)
filter[SDE_SSPP_COMP_LUMA] = SDE_MDP_SCALE_FILTER_CA;
else
filter[SDE_SSPP_COMP_LUMA] =
filter[SDE_SSPP_COMP_ALPHA];
}
}
/* CIFIX: clean up fmt/subsampling params once we're using fourcc formats */
static void _sde_plane_pixel_ext_helper(struct drm_plane *plane,
uint32_t src, uint32_t dst, uint32_t decimated_src,
uint32_t *phase_steps, uint32_t *out_src, int *out_edge1,
int *out_edge2, struct sde_mdp_format_params *fmt,
uint32_t chroma_subsampling, bool post_compare)
{
/* CIFIX: adapted from mdss_mdp_pipe_calc_pixel_extn() */
int64_t edge1, edge2, caf;
uint32_t src_work;
int i, tmp;
if (plane && phase_steps && out_src && out_edge1 && out_edge2 && fmt) {
/* enable CAF for YUV formats */
if (fmt->is_yuv)
caf = PHASE_STEP_UNIT_SCALE;
else
caf = 0;
for (i = 0; i < SDE_MAX_PLANES; i++) {
src_work = decimated_src;
if (i == 1 || i == 2)
src_work /= chroma_subsampling;
if (post_compare)
src = src_work;
if (!(fmt->is_yuv) && (src == dst)) {
/* unity */
edge1 = 0;
edge2 = 0;
} else if (dst >= src) {
/* upscale */
edge1 = (1 << PHASE_RESIDUAL);
edge1 -= caf;
edge2 = (1 << PHASE_RESIDUAL);
edge2 += (dst - 1) * *(phase_steps + i);
edge2 -= (src_work - 1) * PHASE_STEP_UNIT_SCALE;
edge2 += caf;
edge2 = -(edge2);
} else {
/* downscale */
edge1 = 0;
edge2 = (dst - 1) * *(phase_steps + i);
edge2 -= (src_work - 1) * PHASE_STEP_UNIT_SCALE;
edge2 += *(phase_steps + i);
edge2 = -(edge2);
}
/* only enable CAF for luma plane */
caf = 0;
/* populate output arrays */
*(out_src + i) = src_work;
/* edge updates taken from __pxl_extn_helper */
/* CIFIX: why are we casting first to uint32_t? */
if (edge1 >= 0) {
tmp = (uint32_t)edge1;
tmp >>= PHASE_STEP_SHIFT;
*(out_edge1 + i) = -tmp;
} else {
tmp = (uint32_t)(-edge1);
*(out_edge1 + i) = (tmp + PHASE_STEP_UNIT_SCALE
- 1) >> PHASE_STEP_SHIFT;
}
if (edge2 >= 0) {
tmp = (uint32_t)edge2;
tmp >>= PHASE_STEP_SHIFT;
*(out_edge2 + i) = -tmp;
} else {
tmp = (uint32_t)(-edge2);
*(out_edge2 + i) = (tmp + PHASE_STEP_UNIT_SCALE
- 1) >> PHASE_STEP_SHIFT;
}
}
}
}
static int sde_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
struct sde_plane *psde = to_sde_plane(plane);
struct sde_plane_state *pstate;
const struct mdp_format *format;
uint32_t nplanes, pix_format, tmp;
int i;
struct sde_mdp_format_params *fmt;
struct sde_hw_pixel_ext *pe;
int ret = 0;
SDE_PLANE_DEBUG_START();
nplanes = drm_format_num_planes(fb->pixel_format);
pstate = to_sde_plane_state(plane->state);
format = to_mdp_format(msm_framebuffer_format(fb));
pix_format = format->base.pixel_format;
/* src values are in Q16 fixed point, convert to integer */
src_x = src_x >> 16;
src_y = src_y >> 16;
src_w = src_w >> 16;
src_h = src_h >> 16;
DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", psde->name,
fb->base.id, src_x, src_y, src_w, src_h,
crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
/* update format configuration */
memset(&(psde->pipe_cfg), 0, sizeof(struct sde_hw_pipe_cfg));
psde->pipe_cfg.src.format = sde_mdp_get_format_params(pix_format,
0/* CIFIX: fmt_modifier */);
psde->pipe_cfg.src.width = fb->width;
psde->pipe_cfg.src.height = fb->height;
psde->pipe_cfg.src.num_planes = nplanes;
sde_plane_set_scanout(plane, &psde->pipe_cfg, fb);
psde->pipe_cfg.src_rect.x = src_x;
psde->pipe_cfg.src_rect.y = src_y;
psde->pipe_cfg.src_rect.w = src_w;
psde->pipe_cfg.src_rect.h = src_h;
psde->pipe_cfg.dst_rect.x = crtc_x;
psde->pipe_cfg.dst_rect.y = crtc_y;
psde->pipe_cfg.dst_rect.w = crtc_w;
psde->pipe_cfg.dst_rect.h = crtc_h;
psde->pipe_cfg.horz_decimation = 0;
psde->pipe_cfg.vert_decimation = 0;
/* get sde pixel format definition */
fmt = psde->pipe_cfg.src.format;
/* update pixel extensions */
pe = &(psde->pixel_ext);
if (!pe->enable_pxl_ext) {
uint32_t chroma_subsample_h, chroma_subsample_v;
chroma_subsample_h = psde->pipe_cfg.horz_decimation ? 1 :
drm_format_horz_chroma_subsampling(pix_format);
chroma_subsample_v = psde->pipe_cfg.vert_decimation ? 1 :
drm_format_vert_chroma_subsampling(pix_format);
memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
/* calculate phase steps */
sde_plane_scale_helper(plane, src_w, crtc_w,
pe->phase_step_x,
pe->horz_filter, fmt, chroma_subsample_h);
sde_plane_scale_helper(plane, src_h, crtc_h,
pe->phase_step_y,
pe->vert_filter, fmt, chroma_subsample_v);
/* calculate left/right/top/bottom pixel extentions */
tmp = DECIMATED_DIMENSION(src_w,
psde->pipe_cfg.horz_decimation);
if (fmt->is_yuv)
tmp &= ~0x1;
_sde_plane_pixel_ext_helper(plane, src_w, crtc_w, tmp,
pe->phase_step_x,
pe->roi_w,
pe->num_ext_pxls_left,
pe->num_ext_pxls_right, fmt,
chroma_subsample_h, 0);
tmp = DECIMATED_DIMENSION(src_h,
psde->pipe_cfg.vert_decimation);
_sde_plane_pixel_ext_helper(plane, src_h, crtc_h, tmp,
pe->phase_step_y,
pe->roi_h,
pe->num_ext_pxls_top,
pe->num_ext_pxls_btm, fmt,
chroma_subsample_v, 1);
/* CIFIX: port "Single pixel rgb scale adjustment"? */
for (i = 0; i < SDE_MAX_PLANES; i++) {
if (pe->num_ext_pxls_left[i] >= 0)
pe->left_rpt[i] =
pe->num_ext_pxls_left[i];
else
pe->left_ftch[i] =
pe->num_ext_pxls_left[i];
if (pe->num_ext_pxls_right[i] >= 0)
pe->right_rpt[i] =
pe->num_ext_pxls_right[i];
else
pe->right_ftch[i] =
pe->num_ext_pxls_right[i];
if (pe->num_ext_pxls_top[i] >= 0)
pe->top_rpt[i] =
pe->num_ext_pxls_top[i];
else
pe->top_ftch[i] =
pe->num_ext_pxls_top[i];
if (pe->num_ext_pxls_btm[i] >= 0)
pe->btm_rpt[i] =
pe->num_ext_pxls_btm[i];
else
pe->btm_ftch[i] =
pe->num_ext_pxls_btm[i];
}
}
if (psde->pipe_hw->ops.setup_sourceformat)
psde->pipe_hw->ops.setup_sourceformat(psde->pipe_hw,
&psde->pipe_cfg, 0 /* CIFIX: flags */);
if (psde->pipe_hw->ops.setup_rects)
psde->pipe_hw->ops.setup_rects(psde->pipe_hw,
&psde->pipe_cfg, &psde->pixel_ext);
/* update csc */
SDE_PLANE_DEBUG_END();
return ret;
}
static int sde_plane_prepare_fb(struct drm_plane *plane,
const struct drm_plane_state *new_state)
{
struct drm_framebuffer *fb = new_state->fb;
struct sde_plane *psde = to_sde_plane(plane);
if (!new_state->fb)
return 0;
SDE_PLANE_DEBUG_START();
SDE_PLANE_DEBUG_END();
DBG("%s: prepare: FB[%u]", psde->name, fb->base.id);
return msm_framebuffer_prepare(fb, psde->mmu_id);
}
static void sde_plane_cleanup_fb(struct drm_plane *plane,
const struct drm_plane_state *old_state)
{
struct drm_framebuffer *fb = old_state->fb;
struct sde_plane *psde = to_sde_plane(plane);
if (!fb)
return;
SDE_PLANE_DEBUG_START();
SDE_PLANE_DEBUG_END();
DBG("%s: cleanup: FB[%u]", psde->name, fb->base.id);
msm_framebuffer_cleanup(fb, psde->mmu_id);
}
static int sde_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct sde_plane *psde = to_sde_plane(plane);
struct drm_plane_state *old_state = plane->state;
const struct mdp_format *format;
SDE_PLANE_DEBUG_START();
SDE_PLANE_DEBUG_END();
DBG("%s: check (%d -> %d)", psde->name,
sde_plane_enabled(old_state), sde_plane_enabled(state));
if (sde_plane_enabled(state)) {
/* CIFIX: don't use mdp format? */
format = to_mdp_format(msm_framebuffer_format(state->fb));
if (MDP_FORMAT_IS_YUV(format) &&
(!(psde->features & SDE_PLANE_FEATURE_SCALER) ||
!(psde->features & BIT(SDE_SSPP_CSC)))) {
dev_err(plane->dev->dev,
"Pipe doesn't support YUV\n");
return -EINVAL;
}
if (!(psde->features & SDE_PLANE_FEATURE_SCALER) &&
(((state->src_w >> 16) != state->crtc_w) ||
((state->src_h >> 16) != state->crtc_h))) {
dev_err(plane->dev->dev,
"Pipe doesn't support scaling (%dx%d -> %dx%d)\n",
state->src_w >> 16, state->src_h >> 16,
state->crtc_w, state->crtc_h);
return -EINVAL;
}
}
if (sde_plane_enabled(state) && sde_plane_enabled(old_state)) {
/* we cannot change SMP block configuration during scanout: */
bool full_modeset = false;
if (state->fb->pixel_format != old_state->fb->pixel_format) {
DBG("%s: pixel_format change!", psde->name);
full_modeset = true;
}
if (state->src_w != old_state->src_w) {
DBG("%s: src_w change!", psde->name);
full_modeset = true;
}
if (to_sde_plane_state(old_state)->pending) {
DBG("%s: still pending!", psde->name);
full_modeset = true;
}
if (full_modeset) {
struct drm_crtc_state *crtc_state =
drm_atomic_get_crtc_state(state->state,
state->crtc);
crtc_state->mode_changed = true;
to_sde_plane_state(state)->mode_changed = true;
}
} else {
to_sde_plane_state(state)->mode_changed = true;
}
return 0;
}
static int sde_plane_disable(struct drm_plane *plane)
static void sde_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
return 0;
struct sde_plane *sde_plane = to_sde_plane(plane);
struct drm_plane_state *state = plane->state;
DBG("%s: update", sde_plane->name);
SDE_PLANE_DEBUG_START();
if (!sde_plane_enabled(state)) {
to_sde_plane_state(state)->pending = true;
} else if (to_sde_plane_state(state)->mode_changed) {
int ret;
to_sde_plane_state(state)->pending = true;
ret = sde_plane_mode_set(plane,
state->crtc, state->fb,
state->crtc_x, state->crtc_y,
state->crtc_w, state->crtc_h,
state->src_x, state->src_y,
state->src_w, state->src_h);
/* atomic_check should have ensured that this doesn't fail */
WARN_ON(ret < 0);
} else {
sde_plane_set_scanout(plane, &sde_plane->pipe_cfg, state->fb);
}
SDE_PLANE_DEBUG_END();
}
/* helper to install properties which are common to planes and crtcs */
static void sde_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj)
{
struct drm_device *dev = plane->dev;
struct msm_drm_private *dev_priv = dev->dev_private;
struct drm_property *prop;
SDE_PLANE_DEBUG_START();
#define INSTALL_PROPERTY(name, NAME, init_val, fnc, ...) do { \
prop = dev_priv->plane_property[PLANE_PROP_##NAME]; \
if (!prop) { \
prop = drm_property_##fnc(dev, 0, #name, \
##__VA_ARGS__); \
if (!prop) { \
dev_warn(dev->dev, \
"Create property %s failed\n", \
#name); \
return; \
} \
dev_priv->plane_property[PLANE_PROP_##NAME] = prop; \
} \
drm_object_attach_property(&plane->base, prop, init_val); \
} while (0)
#define INSTALL_RANGE_PROPERTY(name, NAME, min, max, init_val) \
INSTALL_PROPERTY(name, NAME, init_val, \
create_range, min, max)
#define INSTALL_ENUM_PROPERTY(name, NAME, init_val) \
INSTALL_PROPERTY(name, NAME, init_val, \
create_enum, name##_prop_enum_list, \
ARRAY_SIZE(name##_prop_enum_list))
INSTALL_RANGE_PROPERTY(zpos, ZPOS, 1, 255, 1);
#undef INSTALL_RANGE_PROPERTY
#undef INSTALL_ENUM_PROPERTY
#undef INSTALL_PROPERTY
SDE_PLANE_DEBUG_END();
}
static int sde_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state, struct drm_property *property,
uint64_t val)
{
struct drm_device *dev = plane->dev;
struct sde_plane_state *pstate;
struct msm_drm_private *dev_priv = dev->dev_private;
int ret = 0;
SDE_PLANE_DEBUG_START();
pstate = to_sde_plane_state(state);
#define SET_PROPERTY(name, NAME, type) do { \
if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \
pstate->name = (type)val; \
DBG("Set property %s %d", #name, (type)val); \
goto done; \
} \
} while (0)
SET_PROPERTY(zpos, ZPOS, uint8_t);
dev_err(dev->dev, "Invalid property\n");
ret = -EINVAL;
done:
SDE_PLANE_DEBUG_END();
return ret;
#undef SET_PROPERTY
}
static int sde_plane_set_property(struct drm_plane *plane,
struct drm_property *property, uint64_t val)
{
int rc;
SDE_PLANE_DEBUG_START();
rc = sde_plane_atomic_set_property(plane, plane->state, property,
val);
SDE_PLANE_DEBUG_END();
return rc;
}
static int sde_plane_atomic_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property, uint64_t *val)
{
struct drm_device *dev = plane->dev;
struct sde_plane_state *pstate;
struct msm_drm_private *dev_priv = dev->dev_private;
int ret = 0;
SDE_PLANE_DEBUG_START();
pstate = to_sde_plane_state(state);
#define GET_PROPERTY(name, NAME, type) do { \
if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \
*val = pstate->name; \
DBG("Get property %s %lld", #name, *val); \
goto done; \
} \
} while (0)
GET_PROPERTY(zpos, ZPOS, uint8_t);
dev_err(dev->dev, "Invalid property\n");
ret = -EINVAL;
done:
SDE_PLANE_DEBUG_END();
return ret;
#undef SET_PROPERTY
}
static void sde_plane_destroy(struct drm_plane *plane)
{
struct sde_plane *sde_plane = to_sde_plane(plane);
struct msm_drm_private *priv = plane->dev->dev_private;
struct sde_plane *psde = to_sde_plane(plane);
if (priv->kms)
sde_plane_disable(plane);
SDE_PLANE_DEBUG_START();
if (psde->pipe_hw)
sde_hw_sspp_destroy(psde->pipe_hw);
drm_plane_helper_disable(plane);
drm_plane_cleanup(plane);
kfree(sde_plane);
kfree(psde);
SDE_PLANE_DEBUG_END();
}
/* helper to install properties which are common to planes and crtcs */
void sde_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj)
static void sde_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
SDE_PLANE_DEBUG_START();
if (state->fb)
drm_framebuffer_unreference(state->fb);
kfree(to_sde_plane_state(state));
SDE_PLANE_DEBUG_END();
}
int sde_plane_set_property(struct drm_plane *plane,
struct drm_property *property, uint64_t val)
static struct drm_plane_state *
sde_plane_duplicate_state(struct drm_plane *plane)
{
return -EINVAL;
struct sde_plane_state *pstate;
if (WARN_ON(!plane->state))
return NULL;
SDE_PLANE_DEBUG_START();
pstate = kmemdup(to_sde_plane_state(plane->state),
sizeof(*pstate), GFP_KERNEL);
if (pstate && pstate->base.fb)
drm_framebuffer_reference(pstate->base.fb);
pstate->mode_changed = false;
pstate->pending = false;
SDE_PLANE_DEBUG_END();
return &pstate->base;
}
static void sde_plane_reset(struct drm_plane *plane)
{
struct sde_plane_state *pstate;
SDE_PLANE_DEBUG_START();
if (plane->state && plane->state->fb)
drm_framebuffer_unreference(plane->state->fb);
kfree(to_sde_plane_state(plane->state));
pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
memset(pstate, 0, sizeof(struct sde_plane_state));
/* assign default blend parameters */
pstate->alpha = 255;
pstate->premultiplied = 0;
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
pstate->zpos = STAGE_BASE;
else
pstate->zpos = STAGE0 + drm_plane_index(plane);
pstate->base.plane = plane;
plane->state = &pstate->base;
SDE_PLANE_DEBUG_END();
}
static const struct drm_plane_funcs sde_plane_funcs = {
.update_plane = sde_plane_update,
.disable_plane = sde_plane_disable,
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = sde_plane_destroy,
.set_property = sde_plane_set_property,
.atomic_set_property = sde_plane_atomic_set_property,
.atomic_get_property = sde_plane_atomic_get_property,
.reset = sde_plane_reset,
.atomic_duplicate_state = sde_plane_duplicate_state,
.atomic_destroy_state = sde_plane_destroy_state,
};
void sde_plane_set_scanout(struct drm_plane *plane,
struct drm_framebuffer *fb)
{
}
static const struct drm_plane_helper_funcs sde_plane_helper_funcs = {
.prepare_fb = sde_plane_prepare_fb,
.cleanup_fb = sde_plane_cleanup_fb,
.atomic_check = sde_plane_atomic_check,
.atomic_update = sde_plane_atomic_update,
};
int sde_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
enum sde_sspp sde_plane_pipe(struct drm_plane *plane)
{
return 0;
struct sde_plane *sde_plane = to_sde_plane(plane);
return sde_plane->pipe;
}
/* initialize plane */
struct drm_plane *sde_plane_init(struct drm_device *dev, bool private_plane)
struct drm_plane *sde_plane_init(struct drm_device *dev, uint32_t pipe,
bool private_plane)
{
static const char tmp_name[] = "---";
struct drm_plane *plane = NULL;
struct sde_plane *sde_plane;
struct sde_plane *psde;
struct sde_hw_ctl *sde_ctl;
struct msm_drm_private *priv;
struct sde_kms *kms;
struct sde_mdss_cfg *sde_cat;
int ret;
enum drm_plane_type type;
sde_plane = kzalloc(sizeof(*sde_plane), GFP_KERNEL);
if (!sde_plane) {
priv = dev->dev_private;
kms = to_sde_kms(to_mdp_kms(priv->kms));
psde = kzalloc(sizeof(*psde), GFP_KERNEL);
if (!psde) {
ret = -ENOMEM;
goto fail;
}
plane = &sde_plane->base;
memset(psde, 0, sizeof(*psde));
plane = &psde->base;
psde->pipe = pipe;
psde->name = tmp_name;
if (kms) {
/* mmu id for buffer mapping */
psde->mmu_id = kms->mmu_id;
/* check catalog for features mask */
sde_cat = kms->catalog;
if (sde_cat)
psde->features = sde_cat->sspp[pipe].features;
}
psde->nformats = mdp_get_formats(psde->formats,
ARRAY_SIZE(psde->formats),
!(psde->features & BIT(SDE_SSPP_CSC)) ||
!(psde->features & SDE_PLANE_FEATURE_SCALER));
type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
drm_universal_plane_init(dev, plane, 0xff, &sde_plane_funcs,
sde_plane->formats, sde_plane->nformats,
ret = drm_universal_plane_init(dev, plane, 0xff, &sde_plane_funcs,
psde->formats, psde->nformats,
type);
if (ret)
goto fail;
drm_plane_helper_add(plane, &sde_plane_helper_funcs);
sde_plane_install_properties(plane, &plane->base);
psde->pipe_hw = sde_hw_sspp_init(pipe, kms->mmio, sde_cat);
if (IS_ERR(psde->pipe_hw)) {
ret = PTR_ERR(psde->pipe_hw);
psde->pipe_hw = NULL;
goto fail;
}
/* cache flush mask for later */
sde_ctl = sde_hw_ctl_init(CTL_0, kms->mmio, sde_cat);
if (!IS_ERR(sde_ctl)) {
if (sde_ctl->ops.get_bitmask_sspp)
sde_ctl->ops.get_bitmask_sspp(sde_ctl,
&psde->flush_mask, pipe);
sde_hw_ctl_destroy(sde_ctl);
}
pr_err("%s: Successfully created plane\n", __func__);
return plane;
fail:
pr_err("%s: Plane creation failed\n", __func__);
if (plane)
sde_plane_destroy(plane);