Merge "msm: sde: add early display handoff feature"

This commit is contained in:
Linux Build Service Account 2017-07-28 16:48:57 -07:00 committed by Gerrit - the friendly Code Review server
commit 81dcb8ec19
13 changed files with 929 additions and 6 deletions

View file

@ -3,6 +3,7 @@ Qualcomm Technologies,Inc. Adreno/Snapdragon display controller
Required properties:
Optional properties:
- contiguous-region: reserved memory for HDMI and DSI buffer.
- qcom,sde-plane-id-map: plane id mapping for virtual plane.
- qcom,sde-plane-id: each virtual plane mapping node.
- reg: reg property.
@ -17,6 +18,8 @@ Optional properties:
Example:
&mdss_mdp {
contiguous-region = <&cont_splash_mem &cont_splash_mem_hdmi>;
qcom,sde-plane-id-map {
qcom,sde-plane-id@0 {
reg = <0x0>;

View file

@ -48,6 +48,7 @@ msm_drm-y := \
sde/sde_backlight.o \
sde/sde_color_processing.o \
sde/sde_vbif.o \
sde/sde_splash.o \
sde_dbg_evtlog.o \
sde_io_util.o \
dba_bridge.o \

View file

@ -1253,6 +1253,13 @@ static int _sde_hdmi_hpd_enable(struct sde_hdmi *sde_hdmi)
uint32_t hpd_ctrl;
int i, ret;
unsigned long flags;
struct drm_connector *connector;
struct msm_drm_private *priv;
struct sde_kms *sde_kms;
connector = hdmi->connector;
priv = connector->dev->dev_private;
sde_kms = to_sde_kms(priv->kms);
for (i = 0; i < config->hpd_reg_cnt; i++) {
ret = regulator_enable(hdmi->hpd_regs[i]);
@ -1292,9 +1299,11 @@ static int _sde_hdmi_hpd_enable(struct sde_hdmi *sde_hdmi)
}
}
sde_hdmi_set_mode(hdmi, false);
_sde_hdmi_phy_reset(hdmi);
sde_hdmi_set_mode(hdmi, true);
if (!sde_kms->splash_info.handoff) {
sde_hdmi_set_mode(hdmi, false);
_sde_hdmi_phy_reset(hdmi);
sde_hdmi_set_mode(hdmi, true);
}
hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
@ -2863,6 +2872,7 @@ int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc)
struct msm_drm_private *priv = NULL;
struct hdmi *hdmi;
struct platform_device *pdev;
struct sde_kms *sde_kms;
DBG("");
if (!display || !display->drm_dev || !enc) {
@ -2921,6 +2931,19 @@ int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc)
enc->bridge = hdmi->bridge;
priv->bridges[priv->num_bridges++] = hdmi->bridge;
/*
* After initialising HDMI bridge, we need to check
* whether the early display is enabled for HDMI.
* If yes, we need to increase refcount of hdmi power
* clocks. This can skip the clock disabling operation in
* clock_late_init when finding clk.count == 1.
*/
sde_kms = to_sde_kms(priv->kms);
if (sde_kms->splash_info.handoff) {
sde_hdmi_bridge_power_on(hdmi->bridge);
hdmi->power_on = true;
}
mutex_unlock(&display->display_lock);
return 0;

View file

@ -357,6 +357,13 @@ int sde_hdmi_set_property(struct drm_connector *connector,
int property_index,
uint64_t value,
void *display);
/**
* sde_hdmi_bridge_power_on -- A wrapper of _sde_hdmi_bridge_power_on.
* @bridge: Handle to the drm bridge.
*
* Return: void.
*/
void sde_hdmi_bridge_power_on(struct drm_bridge *bridge);
/**
* sde_hdmi_get_property() - get the connector properties

View file

@ -841,6 +841,11 @@ static bool _sde_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
return true;
}
void sde_hdmi_bridge_power_on(struct drm_bridge *bridge)
{
_sde_hdmi_bridge_power_on(bridge);
}
static const struct drm_bridge_funcs _sde_hdmi_bridge_funcs = {
.pre_enable = _sde_hdmi_bridge_pre_enable,
.enable = _sde_hdmi_bridge_enable,

View file

@ -46,6 +46,8 @@ struct msm_mmu_funcs {
void (*destroy)(struct msm_mmu *mmu);
void (*enable)(struct msm_mmu *mmu);
void (*disable)(struct msm_mmu *mmu);
int (*set_property)(struct msm_mmu *mmu,
enum iommu_attr attr, void *data);
};
struct msm_mmu {

View file

@ -170,12 +170,36 @@ static void msm_smmu_destroy(struct msm_mmu *mmu)
kfree(smmu);
}
/* user can call this API to set the attribute of smmu*/
static int msm_smmu_set_property(struct msm_mmu *mmu,
enum iommu_attr attr, void *data)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
struct iommu_domain *domain;
int ret = 0;
if (!client)
return -EINVAL;
domain = client->mmu_mapping->domain;
if (!domain)
return -EINVAL;
ret = iommu_domain_set_attr(domain, attr, data);
if (ret)
DRM_ERROR("set domain attribute failed\n");
return ret;
}
static const struct msm_mmu_funcs funcs = {
.attach = msm_smmu_attach,
.detach = msm_smmu_detach,
.map = msm_smmu_map,
.unmap = msm_smmu_unmap,
.destroy = msm_smmu_destroy,
.set_property = msm_smmu_set_property,
};
static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {

View file

@ -604,6 +604,7 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
struct sde_kms *sde_kms;
struct sde_kms_info *info;
struct sde_connector *c_conn = NULL;
struct sde_splash_info *sinfo;
int rc;
if (!dev || !dev->dev_private || !encoder) {
@ -757,6 +758,10 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
SDE_DEBUG("connector %d attach encoder %d\n",
c_conn->base.base.id, encoder->base.id);
sinfo = &sde_kms->splash_info;
if (sinfo && sinfo->handoff)
sde_splash_setup_connector_count(sinfo, connector_type);
priv->connectors[priv->num_connectors++] = &c_conn->base;
return &c_conn->base;

View file

@ -600,14 +600,22 @@ void sde_crtc_complete_commit(struct drm_crtc *crtc,
{
struct sde_crtc *sde_crtc;
struct sde_crtc_state *cstate;
struct drm_connector *conn;
struct drm_device *dev;
struct msm_drm_private *priv;
struct sde_kms *sde_kms;
int i;
if (!crtc || !crtc->state) {
if (!crtc || !crtc->state || !crtc->dev) {
SDE_ERROR("invalid crtc\n");
return;
}
dev = crtc->dev;
priv = dev->dev_private;
sde_crtc = to_sde_crtc(crtc);
sde_kms = _sde_crtc_get_kms(crtc);
cstate = to_sde_crtc_state(crtc->state);
SDE_EVT32(DRMID(crtc));
@ -616,6 +624,20 @@ void sde_crtc_complete_commit(struct drm_crtc *crtc,
for (i = 0; i < cstate->num_connectors; ++i)
sde_connector_complete_commit(cstate->connectors[i]);
if (!sde_kms->splash_info.handoff &&
sde_kms->splash_info.lk_is_exited) {
mutex_lock(&dev->mode_config.mutex);
drm_for_each_connector(conn, crtc->dev) {
if (conn->state->crtc != crtc)
continue;
sde_splash_clean_up_free_resource(priv->kms,
&priv->phandle,
conn->connector_type);
}
mutex_unlock(&dev->mode_config.mutex);
}
}
/**

View file

@ -343,6 +343,9 @@ static void sde_kms_prepare_commit(struct msm_kms *kms,
struct drm_device *dev = sde_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
if (sde_kms->splash_info.handoff)
sde_splash_clean_up_exit_lk(kms);
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
}
@ -985,8 +988,15 @@ static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
sde_hw_catalog_deinit(sde_kms->catalog);
sde_kms->catalog = NULL;
if (sde_kms->splash_info.handoff) {
if (sde_kms->core_client)
sde_splash_destroy(&sde_kms->splash_info,
&priv->phandle, sde_kms->core_client);
}
if (sde_kms->core_client)
sde_power_client_destroy(&priv->phandle, sde_kms->core_client);
sde_power_client_destroy(&priv->phandle,
sde_kms->core_client);
sde_kms->core_client = NULL;
if (sde_kms->vbif[VBIF_NRT])
@ -1098,6 +1108,24 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
continue;
}
/* Attaching smmu means IOMMU HW starts to work immediately.
* However, display HW in LK is still accessing memory
* while the memory map is not done yet.
* So first set DOMAIN_ATTR_EARLY_MAP attribute 1 to bypass
* stage 1 translation in IOMMU HW.
*/
if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
sde_kms->splash_info.handoff) {
ret = mmu->funcs->set_property(mmu,
DOMAIN_ATTR_EARLY_MAP,
&sde_kms->splash_info.handoff);
if (ret) {
SDE_ERROR("failed to set map att: %d\n", ret);
mmu->funcs->destroy(mmu);
goto fail;
}
}
aspace = msm_gem_smmu_address_space_create(sde_kms->dev->dev,
mmu, "sde");
if (IS_ERR(aspace)) {
@ -1115,6 +1143,19 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
goto fail;
}
/*
* It's safe now to map the physical memory blcok LK accesses.
*/
if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
sde_kms->splash_info.handoff) {
ret = sde_splash_smmu_map(sde_kms->dev, mmu,
&sde_kms->splash_info);
if (ret) {
SDE_ERROR("map rsv mem failed: %d\n", ret);
msm_gem_address_space_put(aspace);
goto fail;
}
}
}
return 0;
@ -1129,6 +1170,7 @@ static int sde_kms_hw_init(struct msm_kms *kms)
struct sde_kms *sde_kms;
struct drm_device *dev;
struct msm_drm_private *priv;
struct sde_splash_info *sinfo;
int i, rc = -EINVAL;
if (!kms) {
@ -1218,6 +1260,33 @@ static int sde_kms_hw_init(struct msm_kms *kms)
goto power_error;
}
rc = sde_splash_parse_dt(dev);
if (rc) {
SDE_ERROR("parse dt for splash info failed: %d\n", rc);
goto power_error;
}
/*
* Read the DISP_INTF_SEL register to check
* whether early display is enabled in LK.
*/
rc = sde_splash_get_handoff_status(kms);
if (rc) {
SDE_ERROR("get early splash status failed: %d\n", rc);
goto power_error;
}
/*
* when LK has enabled early display, sde_splash_init should be
* called first. This function will first do bandwidth voting job
* because display hardware is accessing AHB data bus, otherwise
* device reboot will happen. Second is to check if the memory is
* reserved.
*/
sinfo = &sde_kms->splash_info;
if (sinfo->handoff)
sde_splash_init(&priv->phandle, kms);
for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
u32 vbif_idx = sde_kms->catalog->vbif[i].id;
@ -1292,7 +1361,10 @@ static int sde_kms_hw_init(struct msm_kms *kms)
*/
dev->mode_config.allow_fb_modifiers = true;
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
if (!sde_kms->splash_info.handoff)
sde_power_resource_enable(&priv->phandle,
sde_kms->core_client, false);
return 0;
drm_obj_init_err:

View file

@ -34,6 +34,7 @@
#include "sde_power_handle.h"
#include "sde_irq.h"
#include "sde_core_perf.h"
#include "sde_splash.h"
#define DRMID(x) ((x) ? (x)->base.id : -1)
@ -157,6 +158,9 @@ struct sde_kms {
bool has_danger_ctrl;
void **hdmi_displays;
int hdmi_display_count;
/* splash handoff structure */
struct sde_splash_info splash_info;
};
struct vsync_info {

View file

@ -0,0 +1,631 @@
/*
* Copyright (c) 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/of_address.h>
#include <linux/debugfs.h>
#include <linux/memblock.h>
#include "msm_drv.h"
#include "msm_mmu.h"
#include "sde_kms.h"
#include "sde_hw_mdss.h"
#include "sde_hw_util.h"
#include "sde_hw_intf.h"
#include "sde_hw_catalog.h"
#define MDP_SSPP_TOP0_OFF 0x1000
#define DISP_INTF_SEL 0x004
#define SPLIT_DISPLAY_EN 0x2F4
/* scratch registers */
#define SCRATCH_REGISTER_0 0x014
#define SCRATCH_REGISTER_1 0x018
#define SCRATCH_REGISTER_2 0x01C
#define SDE_LK_RUNNING_VALUE 0xC001CAFE
#define SDE_LK_SHUT_DOWN_VALUE 0xDEADDEAD
#define SDE_LK_EXIT_VALUE 0xDEADBEEF
#define SDE_LK_EXIT_MAX_LOOP 20
/*
* In order to free reseved memory from bootup, and we are not
* able to call the __init free functions, so we need to free
* this memory by ourselves using the free_reserved_page() function.
*/
static void _sde_splash_free_bootup_memory_to_system(phys_addr_t phys,
size_t size)
{
unsigned long pfn_start, pfn_end, pfn_idx;
memblock_free(phys, size);
pfn_start = phys >> PAGE_SHIFT;
pfn_end = (phys + size) >> PAGE_SHIFT;
for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++)
free_reserved_page(pfn_to_page(pfn_idx));
}
static int _sde_splash_parse_dt_get_lk_pool_node(struct drm_device *dev,
struct sde_splash_info *sinfo)
{
struct device_node *parent, *node;
struct resource r;
int ret = 0;
if (!sinfo)
return -EINVAL;
parent = of_find_node_by_path("/reserved-memory");
if (!parent)
return -EINVAL;
node = of_find_node_by_name(parent, "lk_pool");
if (!node) {
SDE_ERROR("mem reservation for lk_pool is not presented\n");
ret = -EINVAL;
goto parent_node_err;
}
/* find the mode */
if (of_address_to_resource(node, 0, &r)) {
ret = -EINVAL;
goto child_node_err;
}
sinfo->lk_pool_paddr = (dma_addr_t)r.start;
sinfo->lk_pool_size = r.end - r.start;
DRM_INFO("lk_pool: addr:%pK, size:%pK\n",
(void *)sinfo->lk_pool_paddr,
(void *)sinfo->lk_pool_size);
child_node_err:
of_node_put(node);
parent_node_err:
of_node_put(parent);
return ret;
}
static int _sde_splash_parse_dt_get_display_node(struct drm_device *dev,
struct sde_splash_info *sinfo)
{
unsigned long size = 0;
dma_addr_t start;
struct device_node *node;
int ret = 0, i = 0, len = 0;
/* get reserved memory for display module */
if (of_get_property(dev->dev->of_node, "contiguous-region", &len))
sinfo->splash_mem_num = len / sizeof(u32);
else
sinfo->splash_mem_num = 0;
sinfo->splash_mem_paddr =
kmalloc(sizeof(phys_addr_t) * sinfo->splash_mem_num,
GFP_KERNEL);
if (!sinfo->splash_mem_paddr) {
SDE_ERROR("alloc splash_mem_paddr failed\n");
return -ENOMEM;
}
sinfo->splash_mem_size =
kmalloc(sizeof(size_t) * sinfo->splash_mem_num,
GFP_KERNEL);
if (!sinfo->splash_mem_size) {
SDE_ERROR("alloc splash_mem_size failed\n");
goto error;
}
sinfo->obj = kmalloc(sizeof(struct drm_gem_object *) *
sinfo->splash_mem_num, GFP_KERNEL);
if (!sinfo->obj) {
SDE_ERROR("construct splash gem objects failed\n");
goto error;
}
for (i = 0; i < sinfo->splash_mem_num; i++) {
node = of_parse_phandle(dev->dev->of_node,
"contiguous-region", i);
if (node) {
struct resource r;
ret = of_address_to_resource(node, 0, &r);
if (ret)
return ret;
size = r.end - r.start;
start = (dma_addr_t)r.start;
sinfo->splash_mem_paddr[i] = start;
sinfo->splash_mem_size[i] = size;
DRM_INFO("blk: %d, addr:%pK, size:%pK\n",
i, (void *)sinfo->splash_mem_paddr[i],
(void *)sinfo->splash_mem_size[i]);
of_node_put(node);
}
}
return ret;
error:
kfree(sinfo->splash_mem_paddr);
sinfo->splash_mem_paddr = NULL;
kfree(sinfo->splash_mem_size);
sinfo->splash_mem_size = NULL;
return -ENOMEM;
}
static bool _sde_splash_lk_check(struct sde_hw_intr *intr)
{
return (SDE_LK_RUNNING_VALUE == SDE_REG_READ(&intr->hw,
SCRATCH_REGISTER_1)) ? true : false;
}
/**
* _sde_splash_notify_lk_to_exit.
*
* Function to monitor LK's status and tell it to exit.
*/
static void _sde_splash_notify_lk_exit(struct sde_hw_intr *intr)
{
int i = 0;
/* first is to write exit signal to scratch register*/
SDE_REG_WRITE(&intr->hw, SCRATCH_REGISTER_1, SDE_LK_SHUT_DOWN_VALUE);
while ((SDE_LK_EXIT_VALUE !=
SDE_REG_READ(&intr->hw, SCRATCH_REGISTER_1)) &&
(++i < SDE_LK_EXIT_MAX_LOOP)) {
DRM_INFO("wait for LK's exit");
msleep(20);
}
if (i == SDE_LK_EXIT_MAX_LOOP)
SDE_ERROR("Loop LK's exit failed\n");
}
static int _sde_splash_gem_new(struct drm_device *dev,
struct sde_splash_info *sinfo)
{
int i, ret;
for (i = 0; i < sinfo->splash_mem_num; i++) {
sinfo->obj[i] = msm_gem_new(dev,
sinfo->splash_mem_size[i], MSM_BO_UNCACHED);
if (IS_ERR(sinfo->obj[i])) {
ret = PTR_ERR(sinfo->obj[i]);
SDE_ERROR("failed to allocate gem, ret=%d\n", ret);
goto error;
}
}
return 0;
error:
for (i = 0; i < sinfo->splash_mem_num; i++) {
if (sinfo->obj[i])
msm_gem_free_object(sinfo->obj[i]);
sinfo->obj[i] = NULL;
}
return ret;
}
static int _sde_splash_get_pages(struct drm_gem_object *obj, phys_addr_t phys)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **p;
dma_addr_t paddr;
int npages = obj->size >> PAGE_SHIFT;
int i;
p = drm_malloc_ab(npages, sizeof(struct page *));
if (!p)
return -ENOMEM;
paddr = phys;
for (i = 0; i < npages; i++) {
p[i] = phys_to_page(paddr);
paddr += PAGE_SIZE;
}
msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
if (IS_ERR(msm_obj->sgt)) {
SDE_ERROR("failed to allocate sgt\n");
return -ENOMEM;
}
msm_obj->pages = p;
return 0;
}
static void _sde_splash_destroy_gem_object(struct msm_gem_object *msm_obj)
{
if (msm_obj->pages) {
sg_free_table(msm_obj->sgt);
kfree(msm_obj->sgt);
drm_free_large(msm_obj->pages);
msm_obj->pages = NULL;
}
}
static void _sde_splash_destroy_splash_node(struct sde_splash_info *sinfo)
{
kfree(sinfo->splash_mem_paddr);
sinfo->splash_mem_paddr = NULL;
kfree(sinfo->splash_mem_size);
sinfo->splash_mem_size = NULL;
}
static int _sde_splash_free_resource(struct msm_mmu *mmu,
struct sde_splash_info *sinfo, enum splash_connector_type conn)
{
struct msm_gem_object *msm_obj = to_msm_bo(sinfo->obj[conn]);
if (!msm_obj)
return -EINVAL;
if (mmu->funcs && mmu->funcs->unmap)
mmu->funcs->unmap(mmu, sinfo->splash_mem_paddr[conn],
msm_obj->sgt, NULL);
_sde_splash_free_bootup_memory_to_system(sinfo->splash_mem_paddr[conn],
sinfo->splash_mem_size[conn]);
_sde_splash_destroy_gem_object(msm_obj);
return 0;
}
__ref int sde_splash_init(struct sde_power_handle *phandle, struct msm_kms *kms)
{
struct sde_kms *sde_kms;
struct sde_splash_info *sinfo;
int i = 0;
if (!phandle || !kms) {
SDE_ERROR("invalid phandle/kms\n");
return -EINVAL;
}
sde_kms = to_sde_kms(kms);
sinfo = &sde_kms->splash_info;
sinfo->dsi_connector_cnt = 0;
sinfo->hdmi_connector_cnt = 0;
sde_power_data_bus_bandwidth_ctrl(phandle,
sde_kms->core_client, true);
for (i = 0; i < sinfo->splash_mem_num; i++) {
if (!memblock_is_reserved(sinfo->splash_mem_paddr[i])) {
SDE_ERROR("failed to reserve memory\n");
/* withdraw the vote when failed. */
sde_power_data_bus_bandwidth_ctrl(phandle,
sde_kms->core_client, false);
return -EINVAL;
}
}
return 0;
}
void sde_splash_destroy(struct sde_splash_info *sinfo,
struct sde_power_handle *phandle,
struct sde_power_client *pclient)
{
struct msm_gem_object *msm_obj;
int i = 0;
if (!sinfo || !phandle || !pclient) {
SDE_ERROR("invalid sde_kms/phandle/pclient\n");
return;
}
for (i = 0; i < sinfo->splash_mem_num; i++) {
msm_obj = to_msm_bo(sinfo->obj[i]);
if (msm_obj)
_sde_splash_destroy_gem_object(msm_obj);
}
sde_power_data_bus_bandwidth_ctrl(phandle, pclient, false);
_sde_splash_destroy_splash_node(sinfo);
}
/*
* sde_splash_parse_dt.
* In the function, it will parse and reserve two kinds of memory node.
* First is to get the reserved memory for display buffers.
* Second is to get the memory node LK's code stack is running on.
*/
int sde_splash_parse_dt(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct sde_kms *sde_kms;
struct sde_splash_info *sinfo;
if (!priv || !priv->kms) {
SDE_ERROR("Invalid kms\n");
return -EINVAL;
}
sde_kms = to_sde_kms(priv->kms);
sinfo = &sde_kms->splash_info;
if (_sde_splash_parse_dt_get_display_node(dev, sinfo)) {
SDE_ERROR("get display node failed\n");
return -EINVAL;
}
if (_sde_splash_parse_dt_get_lk_pool_node(dev, sinfo)) {
SDE_ERROR("get LK pool node failed\n");
return -EINVAL;
}
return 0;
}
int sde_splash_get_handoff_status(struct msm_kms *kms)
{
uint32_t intf_sel = 0;
uint32_t split_display = 0;
uint32_t num_of_display_on = 0;
uint32_t i = 0;
struct sde_kms *sde_kms = to_sde_kms(kms);
struct sde_rm *rm;
struct sde_hw_blk_reg_map *c;
struct sde_splash_info *sinfo;
struct sde_mdss_cfg *catalog;
sinfo = &sde_kms->splash_info;
if (!sinfo) {
SDE_ERROR("%s(%d): invalid splash info\n",
__func__, __LINE__);
return -EINVAL;
}
rm = &sde_kms->rm;
if (!rm || !rm->hw_mdp) {
SDE_ERROR("invalid rm.\n");
return -EINVAL;
}
c = &rm->hw_mdp->hw;
if (c) {
intf_sel = SDE_REG_READ(c, DISP_INTF_SEL);
split_display = SDE_REG_READ(c, SPLIT_DISPLAY_EN);
}
catalog = sde_kms->catalog;
if (intf_sel != 0) {
for (i = 0; i < catalog->intf_count; i++)
if ((intf_sel >> i*8) & 0x000000FF)
num_of_display_on++;
/*
* For split display enabled - DSI0, DSI1 interfaces are
* considered as single display. So decrement
* 'num_of_display_on' by 1
*/
if (split_display)
num_of_display_on--;
}
if (num_of_display_on) {
sinfo->handoff = true;
sinfo->program_scratch_regs = true;
} else {
sinfo->handoff = false;
sinfo->program_scratch_regs = false;
}
sinfo->lk_is_exited = false;
return 0;
}
int sde_splash_smmu_map(struct drm_device *dev, struct msm_mmu *mmu,
struct sde_splash_info *sinfo)
{
struct msm_gem_object *msm_obj;
int i = 0, ret = 0;
if (!mmu || !sinfo)
return -EINVAL;
/* first is to construct drm_gem_objects for splash memory */
if (_sde_splash_gem_new(dev, sinfo))
return -ENOMEM;
/* second is to contruct sgt table for calling smmu map */
for (i = 0; i < sinfo->splash_mem_num; i++) {
if (_sde_splash_get_pages(sinfo->obj[i],
sinfo->splash_mem_paddr[i]))
return -ENOMEM;
}
for (i = 0; i < sinfo->splash_mem_num; i++) {
msm_obj = to_msm_bo(sinfo->obj[i]);
if (mmu->funcs && mmu->funcs->map) {
ret = mmu->funcs->map(mmu, sinfo->splash_mem_paddr[i],
msm_obj->sgt, IOMMU_READ | IOMMU_NOEXEC, NULL);
if (!ret) {
SDE_ERROR("Map blk %d @%pK failed.\n",
i, (void *)sinfo->splash_mem_paddr[i]);
return ret;
}
}
}
return ret ? 0 : -ENOMEM;
}
void sde_splash_setup_connector_count(struct sde_splash_info *sinfo,
int connector_type)
{
switch (connector_type) {
case DRM_MODE_CONNECTOR_HDMIA:
sinfo->hdmi_connector_cnt++;
break;
case DRM_MODE_CONNECTOR_DSI:
sinfo->dsi_connector_cnt++;
break;
default:
SDE_ERROR("invalid connector_type %d\n", connector_type);
}
}
int sde_splash_clean_up_free_resource(struct msm_kms *kms,
struct sde_power_handle *phandle, int connector_type)
{
struct sde_kms *sde_kms;
struct sde_splash_info *sinfo;
struct msm_mmu *mmu;
int ret = 0;
static bool hdmi_is_released;
static bool dsi_is_released;
if (!phandle || !kms) {
SDE_ERROR("invalid phandle/kms.\n");
return -EINVAL;
}
sde_kms = to_sde_kms(kms);
sinfo = &sde_kms->splash_info;
if (!sinfo) {
SDE_ERROR("%s(%d): invalid splash info\n", __func__, __LINE__);
return -EINVAL;
}
/* When both hdmi's and dsi's resource are freed,
* 1. Destroy splash node objects.
* 2. Decrease ref count in bandwidth voting function.
*/
if (sinfo->hdmi_connector_cnt == 0 &&
sinfo->dsi_connector_cnt == 0) {
DRM_INFO("HDMI and DSI resource handoff is completed\n");
sinfo->lk_is_exited = false;
_sde_splash_destroy_splash_node(sinfo);
sde_power_data_bus_bandwidth_ctrl(phandle,
sde_kms->core_client, false);
return 0;
}
mmu = sde_kms->aspace[0]->mmu;
switch (connector_type) {
case DRM_MODE_CONNECTOR_HDMIA:
if (!hdmi_is_released)
sinfo->hdmi_connector_cnt--;
if ((sinfo->hdmi_connector_cnt == 0) && (!hdmi_is_released)) {
hdmi_is_released = true;
ret = _sde_splash_free_resource(mmu,
sinfo, SPLASH_HDMI);
}
break;
case DRM_MODE_CONNECTOR_DSI:
if (!dsi_is_released)
sinfo->dsi_connector_cnt--;
if ((sinfo->dsi_connector_cnt == 0) && (!dsi_is_released)) {
dsi_is_released = true;
ret = _sde_splash_free_resource(mmu,
sinfo, SPLASH_DSI);
}
break;
default:
ret = -EINVAL;
SDE_ERROR("%s: invalid connector_type %d\n",
__func__, connector_type);
}
return ret;
}
/*
* In below function, it will
* 1. Notify LK to exit and wait for exiting is done.
* 2. Set DOMAIN_ATTR_EARLY_MAP to 1 to enable stage 1 translation in iommu.
*/
int sde_splash_clean_up_exit_lk(struct msm_kms *kms)
{
struct sde_splash_info *sinfo;
struct msm_mmu *mmu;
struct sde_kms *sde_kms = to_sde_kms(kms);
int ret;
sinfo = &sde_kms->splash_info;
if (!sinfo) {
SDE_ERROR("%s(%d): invalid splash info\n", __func__, __LINE__);
return -EINVAL;
}
/* Monitor LK's status and tell it to exit. */
if (sinfo->program_scratch_regs) {
if (_sde_splash_lk_check(sde_kms->hw_intr))
_sde_splash_notify_lk_exit(sde_kms->hw_intr);
sinfo->handoff = false;
sinfo->program_scratch_regs = false;
}
if (!sde_kms->aspace[0] || !sde_kms->aspace[0]->mmu) {
/* We do not return fault value here, to ensure
* flag "lk_is_exited" is set.
*/
SDE_ERROR("invalid mmu\n");
WARN_ON(1);
} else {
mmu = sde_kms->aspace[0]->mmu;
/* After LK has exited, set early domain map attribute
* to 1 to enable stage 1 translation in iommu driver.
*/
if (mmu->funcs && mmu->funcs->set_property) {
ret = mmu->funcs->set_property(mmu,
DOMAIN_ATTR_EARLY_MAP, &sinfo->handoff);
if (ret)
SDE_ERROR("set_property failed\n");
}
}
sinfo->lk_is_exited = true;
return 0;
}

View file

@ -0,0 +1,124 @@
/**
* Copyright (c) 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef SDE_SPLASH_H_
#define SDE_SPLASH_H_
#include "msm_kms.h"
#include "msm_mmu.h"
enum splash_connector_type {
SPLASH_DSI = 0,
SPLASH_HDMI,
};
struct sde_splash_info {
/* handoff flag */
bool handoff;
/* flag of display scratch registers */
bool program_scratch_regs;
/* to indicate LK is totally exited */
bool lk_is_exited;
/* memory node used for display buffer */
uint32_t splash_mem_num;
/* physical address of memory node for display buffer */
phys_addr_t *splash_mem_paddr;
/* size of memory node */
size_t *splash_mem_size;
/* constructed gem objects for smmu mapping */
struct drm_gem_object **obj;
/* physical address of lk pool */
phys_addr_t lk_pool_paddr;
/* memory size of lk pool */
size_t lk_pool_size;
/* registered hdmi connector count */
uint32_t hdmi_connector_cnt;
/* registered dst connector count */
uint32_t dsi_connector_cnt;
};
/* APIs for early splash handoff functions */
/**
* sde_splash_get_handoff_status.
*
* This function will read DISP_INTF_SEL regsiter to get
* the status of early splash.
*/
int sde_splash_get_handoff_status(struct msm_kms *kms);
/**
* sde_splash_init
*
* This function will do bandwidth vote and reserved memory
*/
int sde_splash_init(struct sde_power_handle *phandle, struct msm_kms *kms);
/**
*sde_splash_setup_connector_count
*
* To count connector numbers for DSI and HDMI respectively.
*/
void sde_splash_setup_connector_count(struct sde_splash_info *sinfo,
int connector_type);
/**
* sde_splash_clean_up_exit_lk.
*
* Tell LK to exit, and clean up the resource.
*/
int sde_splash_clean_up_exit_lk(struct msm_kms *kms);
/**
* sde_splash_clean_up_free_resource.
*
* According to input connector_type, free
* HDMI's and DSI's resource respectively.
*/
int sde_splash_clean_up_free_resource(struct msm_kms *kms,
struct sde_power_handle *phandle, int connector_type);
/**
* sde_splash_parse_dt.
*
* Parse reserved memory block from DT for early splash.
*/
int sde_splash_parse_dt(struct drm_device *dev);
/**
* sde_splash_smmu_map.
*
* Map the physical memory LK visited into iommu driver.
*/
int sde_splash_smmu_map(struct drm_device *dev, struct msm_mmu *mmu,
struct sde_splash_info *sinfo);
/**
* sde_splash_destroy
*
* Destroy the resource in failed case.
*/
void sde_splash_destroy(struct sde_splash_info *sinfo,
struct sde_power_handle *phandle,
struct sde_power_client *pclient);
#endif