msm: mdss: share MDP smmu device mappings with other mdss clients

Rotator and MDP share same stream ID on sdm600 target,
hence share the smmu device with rotator device to map/unmap
its buffers.

The change will also handle different secure usecase concurrencies
like, mdp running in secure and rotator in non-secure and vice versa.

Change-Id: I3ff118baed3984d63e9a9fe94289d99523c7b3e9
Signed-off-by: Kalyan Thota <kalyant@codeaurora.org>
This commit is contained in:
Kalyan Thota 2017-01-14 12:21:29 +05:30
parent 741fc8ee90
commit e8b77a585b
12 changed files with 405 additions and 63 deletions

View file

@ -78,8 +78,6 @@ u32 sde_apply_comp_ratio_factor(u32 quota,
#define RES_UHD (3840*2160)
#define RES_WQXGA (2560*1600)
#define XIN_HALT_TIMEOUT_US 0x4000
#define MDSS_MDP_HW_REV_320 0x30020000 /* sdm660 */
#define MDSS_MDP_HW_REV_330 0x30030000 /* sdm630 */
static int sde_mdp_wait_for_xin_halt(u32 xin_id)
{

View file

@ -24,6 +24,9 @@
#include "sde_rotator_smmu.h"
#include "sde_rotator_formats.h"
#define MDSS_MDP_HW_REV_320 0x30020000 /* sdm660 */
#define MDSS_MDP_HW_REV_330 0x30030000 /* sdm630 */
struct sde_mult_factor {
uint32_t numer;
uint32_t denom;
@ -164,7 +167,9 @@ struct sde_rot_data_type {
int iommu_attached;
int iommu_ref_cnt;
int (*iommu_ctrl)(int enable);
int (*secure_session_ctrl)(int enable);
int (*wait_for_transition)(int state, int request);
struct sde_rot_vbif_debug_bus *nrt_vbif_dbg_bus;
u32 nrt_vbif_dbg_bus_size;
@ -173,7 +178,7 @@ struct sde_rot_data_type {
void *sde_rot_hw;
int sec_cam_en;
bool callback_request;
struct ion_client *iclient;
};

View file

@ -530,7 +530,7 @@ static int sde_rotator_import_buffer(struct sde_layer_buffer *buffer,
return ret;
}
static int sde_rotator_secure_session_ctrl(bool enable)
static int _sde_rotator_secure_session_ctrl(bool enable)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
uint32_t sid_info;
@ -603,6 +603,39 @@ static int sde_rotator_secure_session_ctrl(bool enable)
return resp;
}
static int sde_rotator_secure_session_ctrl(bool enable)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
int ret = -EINVAL;
/**
* wait_for_transition and secure_session_control are filled by client
* callback.
*/
if (mdata->wait_for_transition && mdata->secure_session_ctrl &&
mdata->callback_request) {
ret = mdata->wait_for_transition(mdata->sec_cam_en, enable);
if (ret) {
SDEROT_ERR("failed Secure wait for transition %d\n",
ret);
} else {
if (mdata->sec_cam_en ^ enable) {
mdata->sec_cam_en = enable;
ret = mdata->secure_session_ctrl(enable);
if (ret)
mdata->sec_cam_en = 0;
}
}
} else if (!mdata->callback_request) {
ret = _sde_rotator_secure_session_ctrl(enable);
}
if (ret)
SDEROT_ERR("failed %d sde_rotator_secure_session %d\n",
ret, mdata->callback_request);
return ret;
}
static int sde_rotator_map_and_check_data(struct sde_rot_entry *entry)
{

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -45,6 +45,15 @@ struct sde_smmu_domain {
unsigned long size;
};
int sde_smmu_set_dma_direction(int dir)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
return ((mdata->mdss_version == MDSS_MDP_HW_REV_320) ||
(mdata->mdss_version == MDSS_MDP_HW_REV_330)) ?
DMA_BIDIRECTIONAL : dir;
}
static inline bool sde_smmu_is_valid_domain_type(
struct sde_rot_data_type *mdata, int domain_type)
{
@ -335,8 +344,8 @@ int sde_smmu_map_dma_buf(struct dma_buf *dma_buf,
return -EINVAL;
}
rc = msm_dma_map_sg_lazy(sde_smmu->dev, table->sgl, table->nents, dir,
dma_buf);
rc = msm_dma_map_sg_lazy(sde_smmu->dev, table->sgl, table->nents,
sde_smmu_set_dma_direction(dir), dma_buf);
if (rc != table->nents) {
SDEROT_ERR("dma map sg failed\n");
return -ENOMEM;
@ -357,13 +366,46 @@ void sde_smmu_unmap_dma_buf(struct sg_table *table, int domain,
return;
}
msm_dma_unmap_sg(sde_smmu->dev, table->sgl, table->nents, dir,
dma_buf);
msm_dma_unmap_sg(sde_smmu->dev, table->sgl, table->nents,
sde_smmu_set_dma_direction(dir), dma_buf);
}
static DEFINE_MUTEX(sde_smmu_ref_cnt_lock);
static void sde_smmu_callback(struct mdss_smmu_intf *smmu)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
if (!smmu)
return;
/* Copy mmu device info into sde private structure */
mdata->iommu_ctrl = smmu->iommu_ctrl;
mdata->wait_for_transition = smmu->wait_for_transition;
mdata->secure_session_ctrl = smmu->secure_session_ctrl;
if (smmu->is_secure) {
mdata->sde_smmu[SDE_IOMMU_DOMAIN_ROT_SECURE].dev = smmu->dev;
mdata->sde_smmu[SDE_IOMMU_DOMAIN_ROT_SECURE].domain =
SDE_IOMMU_DOMAIN_ROT_SECURE;
} else {
mdata->sde_smmu[SDE_IOMMU_DOMAIN_ROT_UNSECURE].dev = smmu->dev;
mdata->sde_smmu[SDE_IOMMU_DOMAIN_ROT_UNSECURE].domain =
SDE_IOMMU_DOMAIN_ROT_UNSECURE;
}
SDEROT_INFO("sde_smmu_callback registered domain: %d\n",
smmu->is_secure);
}
int sde_smmu_ctrl(int enable)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
return ((mdata->iommu_ctrl) ?
mdata->iommu_ctrl(enable) : -EINVAL);
}
static int _sde_smmu_ctrl(int enable)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
int rc = 0;
@ -442,13 +484,24 @@ int sde_smmu_secure_ctrl(int enable)
void sde_smmu_device_create(struct device *dev)
{
struct device_node *parent, *child;
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
bool child_rot_sec = false;
bool child_rot_nsec = false;
parent = dev->of_node;
for_each_child_of_node(parent, child) {
if (of_device_is_compatible(child, SMMU_SDE_ROT_SEC))
if (of_device_is_compatible(child, SMMU_SDE_ROT_SEC)) {
of_platform_device_create(child, NULL, dev);
else if (of_device_is_compatible(child, SMMU_SDE_ROT_UNSEC))
child_rot_sec = true;
} else if (of_device_is_compatible(child, SMMU_SDE_ROT_UNSEC)) {
of_platform_device_create(child, NULL, dev);
child_rot_nsec = true;
}
}
if (!child_rot_sec || !child_rot_nsec) {
mdss_smmu_request_mappings(sde_smmu_callback);
mdata->callback_request = true;
}
}
@ -616,6 +669,8 @@ int sde_smmu_probe(struct platform_device *pdev)
sde_smmu_enable_power(sde_smmu, false);
sde_smmu->dev = dev;
mdata->iommu_ctrl = _sde_smmu_ctrl;
SDEROT_INFO(
"iommu v2 domain[%d] mapping and clk register successful!\n",
smmu_domain.domain);

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -17,6 +17,7 @@
#include <linux/types.h>
#include <linux/device.h>
#include <linux/dma-buf.h>
#include <linux/mdss_smmu_ext.h>
#include "sde_rotator_io_util.h"
@ -28,11 +29,6 @@ enum sde_iommu_domain_type {
int sde_smmu_init(struct device *dev);
static inline int sde_smmu_dma_data_direction(int dir)
{
return dir;
}
int sde_smmu_ctrl(int enable);
struct dma_buf_attachment *sde_smmu_dma_buf_attach(
@ -47,4 +43,5 @@ void sde_smmu_unmap_dma_buf(struct sg_table *table, int domain,
int sde_smmu_secure_ctrl(int enable);
int sde_smmu_set_dma_direction(int dir);
#endif /* SDE_ROTATOR_SMMU_H */

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2012, 2015-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012, 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -730,7 +730,8 @@ static int sde_mdp_put_img(struct sde_mdp_img_data *data, bool rotator,
}
if (!data->skip_detach) {
dma_buf_unmap_attachment(data->srcp_attachment,
data->srcp_table, dir);
data->srcp_table,
sde_smmu_set_dma_direction(dir));
dma_buf_detach(data->srcp_dma_buf,
data->srcp_attachment);
if (!(data->flags & SDE_ROT_EXT_DMA_BUF)) {
@ -792,7 +793,8 @@ static int sde_mdp_get_img(struct sde_fb_data *img,
SDEROT_DBG("%d attach=%p\n", __LINE__, data->srcp_attachment);
data->srcp_table =
dma_buf_map_attachment(data->srcp_attachment, dir);
dma_buf_map_attachment(data->srcp_attachment,
sde_smmu_set_dma_direction(dir));
if (IS_ERR(data->srcp_table)) {
SDEROT_ERR("%d Failed to map attachment\n", __LINE__);
ret = PTR_ERR(data->srcp_table);
@ -919,7 +921,8 @@ static int sde_mdp_map_buffer(struct sde_mdp_img_data *data, bool rotator,
return ret;
err_unmap:
dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table, dir);
dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
sde_smmu_set_dma_direction(dir));
dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
if (!(data->flags & SDE_ROT_EXT_DMA_BUF)) {
dma_buf_put(data->srcp_dma_buf);

View file

@ -22,6 +22,7 @@
#include <linux/irqreturn.h>
#include <linux/irqdomain.h>
#include <linux/mdss_io_util.h>
#include <linux/mdss_smmu_ext.h>
#include <linux/msm-bus.h>
#include <linux/file.h>
@ -216,14 +217,14 @@ struct reg_bus_client {
};
struct mdss_smmu_client {
struct device *dev;
struct mdss_smmu_intf base;
struct dma_iommu_mapping *mmu_mapping;
struct dss_module_power mp;
struct reg_bus_client *reg_bus_clt;
bool domain_attached;
bool handoff_pending;
void __iomem *mmu_base;
int domain;
struct list_head _client;
};
struct mdss_mdp_qseed3_lut_tbl {
@ -531,6 +532,8 @@ struct mdss_data_type {
struct mdss_mdp_destination_scaler *ds;
u32 sec_disp_en;
u32 sec_cam_en;
u32 sec_session_cnt;
wait_queue_head_t secure_waitq;
};
extern struct mdss_data_type *mdss_res;
@ -573,6 +576,7 @@ struct mdss_util_intf {
int (*iommu_ctrl)(int enable);
void (*iommu_lock)(void);
void (*iommu_unlock)(void);
int (*secure_session_ctrl)(int enable);
void (*bus_bandwidth_ctrl)(int enable);
int (*bus_scale_set_quota)(int client, u64 ab_quota, u64 ib_quota);
int (*panel_intf_status)(u32 disp_num, u32 intf_type);

View file

@ -98,6 +98,7 @@ static DEFINE_SPINLOCK(mdss_mdp_intr_lock);
static DEFINE_MUTEX(mdp_clk_lock);
static DEFINE_MUTEX(mdp_iommu_ref_cnt_lock);
static DEFINE_MUTEX(mdp_fs_idle_pc_lock);
static DEFINE_MUTEX(mdp_sec_ref_cnt_lock);
static struct mdss_panel_intf pan_types[] = {
{"dsi", MDSS_PANEL_INTF_DSI},
@ -2145,6 +2146,7 @@ static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata)
mdss_set_quirk(mdata, MDSS_QUIRK_DSC_2SLICE_PU_THRPUT);
mdss_set_quirk(mdata, MDSS_QUIRK_MMSS_GDSC_COLLAPSE);
mdss_set_quirk(mdata, MDSS_QUIRK_MDP_CLK_SET_RATE);
mdss_set_quirk(mdata, MDSS_QUIRK_DMA_BI_DIR);
mdata->has_wb_ubwc = true;
set_bit(MDSS_CAPS_10_BIT_SUPPORTED, mdata->mdss_caps_map);
set_bit(MDSS_CAPS_SEC_DETACH_SMMU, mdata->mdss_caps_map);
@ -2851,6 +2853,7 @@ static int mdss_mdp_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&mdata->reg_bus_clist);
atomic_set(&mdata->sd_client_count, 0);
atomic_set(&mdata->active_intf_cnt, 0);
init_waitqueue_head(&mdata->secure_waitq);
mdss_res->mdss_util = mdss_get_util_intf();
if (mdss_res->mdss_util == NULL) {
@ -5156,6 +5159,27 @@ int mdss_mdp_secure_session_ctrl(unsigned int enable, u64 flags)
int ret = 0;
uint32_t sid_info;
struct scm_desc desc;
bool changed = false;
mutex_lock(&mdp_sec_ref_cnt_lock);
if (enable) {
if (mdata->sec_session_cnt == 0)
changed = true;
mdata->sec_session_cnt++;
} else {
if (mdata->sec_session_cnt != 0) {
mdata->sec_session_cnt--;
if (mdata->sec_session_cnt == 0)
changed = true;
} else {
pr_warn("%s: ref_count is not balanced\n",
__func__);
}
}
if (!changed)
goto end;
if (test_bit(MDSS_CAPS_SEC_DETACH_SMMU, mdata->mdss_caps_map)) {
/*
@ -5179,7 +5203,8 @@ int mdss_mdp_secure_session_ctrl(unsigned int enable, u64 flags)
desc.args[3] = VMID_CP_CAMERA_PREVIEW;
mdata->sec_cam_en = 1;
} else {
return 0;
ret = 0;
goto end;
}
/* detach smmu contexts */
@ -5187,7 +5212,8 @@ int mdss_mdp_secure_session_ctrl(unsigned int enable, u64 flags)
if (ret) {
pr_err("Error while detaching smmu contexts ret = %d\n",
ret);
return -EINVAL;
ret = -EINVAL;
goto end;
}
/* let the driver think smmu is still attached */
@ -5199,7 +5225,8 @@ int mdss_mdp_secure_session_ctrl(unsigned int enable, u64 flags)
if (ret) {
pr_err("Error scm_call MEM_PROTECT_SD_CTRL(%u) ret=%dm resp=%x\n",
enable, ret, resp);
return -EINVAL;
ret = -EINVAL;
goto end;
}
resp = desc.ret[0];
@ -5232,7 +5259,8 @@ int mdss_mdp_secure_session_ctrl(unsigned int enable, u64 flags)
if (ret) {
pr_err("Error while attaching smmu contexts ret = %d\n",
ret);
return -EINVAL;
ret = -EINVAL;
goto end;
}
}
MDSS_XLOG(enable);
@ -5255,10 +5283,11 @@ int mdss_mdp_secure_session_ctrl(unsigned int enable, u64 flags)
pr_debug("scm_call MEM_PROTECT_SD_CTRL(%u): ret=%d, resp=%x\n",
enable, ret, resp);
}
if (ret)
return ret;
return resp;
end:
mutex_unlock(&mdp_sec_ref_cnt_lock);
return ret;
}
static inline int mdss_mdp_suspend_sub(struct mdss_data_type *mdata)

View file

@ -1314,10 +1314,13 @@ static inline struct clk *mdss_mdp_get_clk(u32 clk_idx)
static inline void mdss_update_sd_client(struct mdss_data_type *mdata,
bool status)
{
if (status)
if (status) {
atomic_inc(&mdata->sd_client_count);
else
} else {
atomic_add_unless(&mdss_res->sd_client_count, -1, 0);
if (!atomic_read(&mdss_res->sd_client_count))
wake_up_all(&mdata->secure_waitq);
}
}
static inline void mdss_update_sc_client(struct mdss_data_type *mdata,

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2007-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2007-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -28,6 +28,8 @@
#include <linux/msm_dma_iommu_mapping.h>
#include <linux/qcom_iommu.h>
#include <linux/mdss_smmu_ext.h>
#include <asm/dma-iommu.h>
#include "soc/qcom/secure_buffer.h"
@ -40,6 +42,18 @@
static DEFINE_MUTEX(mdp_iommu_lock);
static struct mdss_smmu_private smmu_private;
struct msm_smmu_notifier_data {
struct list_head _user;
msm_smmu_handler_t callback;
};
struct mdss_smmu_private *mdss_smmu_get_private(void)
{
return &smmu_private;
}
void mdss_iommu_lock(void)
{
mutex_lock(&mdp_iommu_lock);
@ -50,6 +64,111 @@ void mdss_iommu_unlock(void)
mutex_unlock(&mdp_iommu_lock);
}
static int mdss_smmu_secure_wait(int State, int request)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
int rc = 0;
/**
* Case1: MDP in Secure Display and Rotator in Non Secure
*/
if (!State && !request && mdss_get_sd_client_cnt()) {
rc = wait_event_timeout(mdata->secure_waitq,
(mdss_get_sd_client_cnt() == 0),
KOFF_TIMEOUT);
if (rc <= 0) {
pr_err("timed out waiting for Secure transtion: %d\n",
mdss_get_sd_client_cnt());
rc = -EINVAL;
}
}
return rc;
}
static int mdss_smmu_secure_session_ctrl(int enable)
{
int rc = 0;
/**
* Currently client requests only enable/disable.
* TODO: Secure camera is hardcoded need to extend.
*/
rc = mdss_mdp_secure_session_ctrl(enable,
MDP_SECURE_CAMERA_OVERLAY_SESSION);
if (rc)
pr_err("%s: mdss_mdp_secure_session_ctrl failed : %d\n",
__func__, rc);
return rc;
}
static inline bool all_devices_probed(struct mdss_smmu_private *prv)
{
struct device_node *child;
struct mdss_smmu_client *tmp;
int d_cnt = 0;
int p_cnt = 0;
if (!prv->pdev)
return 0;
for_each_child_of_node(prv->pdev, child) {
if (is_mdss_smmu_compatible_device(child->name))
d_cnt++;
}
list_for_each_entry(tmp, &prv->smmu_device_list, _client) {
p_cnt++;
}
return (d_cnt && (d_cnt == p_cnt) ? true : false);
}
void mdss_iommu_notify_users(struct mdss_smmu_private *prv)
{
struct msm_smmu_notifier_data *notify;
struct mdss_smmu_client *client;
/* Initiate callbacks for all the users who registered before probe */
if (all_devices_probed(prv)) {
list_for_each_entry(notify, &prv->user_list, _user) {
list_for_each_entry(client,
&prv->smmu_device_list, _client)
notify->callback(&client->base);
}
}
}
int mdss_smmu_request_mappings(msm_smmu_handler_t callback)
{
struct mdss_smmu_client *client;
struct msm_smmu_notifier_data *ndata;
struct mdss_smmu_private *prv = mdss_smmu_get_private();
int ret = 0;
mutex_lock(&prv->smmu_reg_lock);
if (!all_devices_probed(prv)) {
ndata = kzalloc(sizeof(struct msm_smmu_notifier_data),
GFP_KERNEL);
if (!ndata) {
ret = -ENOMEM;
goto done;
}
ndata->callback = callback;
list_add(&ndata->_user, &prv->user_list);
goto done;
}
/* Probe already done mappings are available */
list_for_each_entry(client, &prv->smmu_device_list, _client) {
callback(&client->base);
}
done:
mutex_unlock(&prv->smmu_reg_lock);
return ret;
}
static int mdss_smmu_util_parse_dt_clock(struct platform_device *pdev,
struct dss_module_power *mp)
{
@ -182,7 +301,7 @@ static int mdss_smmu_attach_v2(struct mdss_data_type *mdata)
continue;
mdss_smmu = mdss_smmu_get_cb(i);
if (mdss_smmu && mdss_smmu->dev) {
if (mdss_smmu && mdss_smmu->base.dev) {
if (!mdss_smmu->handoff_pending) {
rc = mdss_smmu_enable_power(mdss_smmu, true);
if (rc) {
@ -196,8 +315,9 @@ static int mdss_smmu_attach_v2(struct mdss_data_type *mdata)
if (!mdss_smmu->domain_attached &&
mdss_smmu_is_valid_domain_condition(mdata,
i, true)) {
rc = arm_iommu_attach_device(mdss_smmu->dev,
mdss_smmu->mmu_mapping);
rc = arm_iommu_attach_device(
mdss_smmu->base.dev,
mdss_smmu->mmu_mapping);
if (rc) {
pr_err("iommu attach device failed for domain[%d] with err:%d\n",
i, rc);
@ -219,8 +339,8 @@ static int mdss_smmu_attach_v2(struct mdss_data_type *mdata)
err:
for (i--; i >= 0; i--) {
mdss_smmu = mdss_smmu_get_cb(i);
if (mdss_smmu && mdss_smmu->dev) {
arm_iommu_detach_device(mdss_smmu->dev);
if (mdss_smmu && mdss_smmu->base.dev) {
arm_iommu_detach_device(mdss_smmu->base.dev);
mdss_smmu_enable_power(mdss_smmu, false);
mdss_smmu->domain_attached = false;
}
@ -246,7 +366,7 @@ static int mdss_smmu_detach_v2(struct mdss_data_type *mdata)
continue;
mdss_smmu = mdss_smmu_get_cb(i);
if (mdss_smmu && mdss_smmu->dev) {
if (mdss_smmu && mdss_smmu->base.dev) {
if (!mdss_smmu->handoff_pending &&
mdss_smmu->domain_attached &&
mdss_smmu_is_valid_domain_condition(mdata,
@ -257,7 +377,7 @@ static int mdss_smmu_detach_v2(struct mdss_data_type *mdata)
* leave the smmu clocks on and only detach the
* smmu contexts
*/
arm_iommu_detach_device(mdss_smmu->dev);
arm_iommu_detach_device(mdss_smmu->base.dev);
mdss_smmu->domain_attached = false;
pr_debug("iommu v2 domain[%i] detached\n", i);
} else {
@ -289,7 +409,7 @@ static struct dma_buf_attachment *mdss_smmu_dma_buf_attach_v2(
return NULL;
}
return dma_buf_attach(dma_buf, mdss_smmu->dev);
return dma_buf_attach(dma_buf, mdss_smmu->base.dev);
}
/*
@ -310,8 +430,8 @@ static int mdss_smmu_map_dma_buf_v2(struct dma_buf *dma_buf,
return -EINVAL;
}
ATRACE_BEGIN("map_buffer");
rc = msm_dma_map_sg_lazy(mdss_smmu->dev, table->sgl, table->nents, dir,
dma_buf);
rc = msm_dma_map_sg_lazy(mdss_smmu->base.dev, table->sgl, table->nents,
dir, dma_buf);
if (rc != table->nents) {
pr_err("dma map sg failed\n");
return -ENOMEM;
@ -332,7 +452,7 @@ static void mdss_smmu_unmap_dma_buf_v2(struct sg_table *table, int domain,
}
ATRACE_BEGIN("unmap_buffer");
msm_dma_unmap_sg(mdss_smmu->dev, table->sgl, table->nents, dir,
msm_dma_unmap_sg(mdss_smmu->base.dev, table->sgl, table->nents, dir,
dma_buf);
ATRACE_END("unmap_buffer");
}
@ -354,7 +474,7 @@ static int mdss_smmu_dma_alloc_coherent_v2(struct device *dev, size_t size,
return -EINVAL;
}
cpu_addr = dma_alloc_coherent(mdss_smmu->dev, size, iova, gfp);
cpu_addr = dma_alloc_coherent(mdss_smmu->base.dev, size, iova, gfp);
if (!cpu_addr) {
pr_err("dma alloc coherent failed!\n");
return -ENOMEM;
@ -373,7 +493,7 @@ static void mdss_smmu_dma_free_coherent_v2(struct device *dev, size_t size,
return;
}
dma_free_coherent(mdss_smmu->dev, size, cpu_addr, iova);
dma_free_coherent(mdss_smmu->base.dev, size, cpu_addr, iova);
}
/*
@ -440,7 +560,7 @@ static int mdss_smmu_dsi_map_buffer_v2(phys_addr_t phys, unsigned int domain,
return -EINVAL;
}
*dma_addr = dma_map_single(mdss_smmu->dev, cpu_addr, size, dir);
*dma_addr = dma_map_single(mdss_smmu->base.dev, cpu_addr, size, dir);
if (IS_ERR_VALUE(*dma_addr)) {
pr_err("dma map single failed\n");
return -ENOMEM;
@ -458,7 +578,7 @@ static void mdss_smmu_dsi_unmap_buffer_v2(dma_addr_t dma_addr, int domain,
}
if (is_mdss_iommu_attached())
dma_unmap_single(mdss_smmu->dev, dma_addr, size, dir);
dma_unmap_single(mdss_smmu->base.dev, dma_addr, size, dir);
}
int mdss_smmu_fault_handler(struct iommu_domain *domain, struct device *dev,
@ -497,7 +617,7 @@ static void mdss_smmu_deinit_v2(struct mdss_data_type *mdata)
for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
mdss_smmu = mdss_smmu_get_cb(i);
if (mdss_smmu && mdss_smmu->dev)
if (mdss_smmu && mdss_smmu->base.dev)
arm_iommu_release_mapping(mdss_smmu->mmu_mapping);
}
}
@ -537,19 +657,27 @@ static void mdss_smmu_ops_init(struct mdss_data_type *mdata)
void mdss_smmu_device_create(struct device *dev)
{
struct device_node *parent, *child;
struct mdss_smmu_private *prv = mdss_smmu_get_private();
parent = dev->of_node;
for_each_child_of_node(parent, child) {
if (is_mdss_smmu_compatible_device(child->name))
of_platform_device_create(child, NULL, dev);
}
prv->pdev = parent;
}
int mdss_smmu_init(struct mdss_data_type *mdata, struct device *dev)
{
mdss_smmu_device_create(dev);
mdss_smmu_ops_init(mdata);
mdata->mdss_util->iommu_lock = mdss_iommu_lock;
mdata->mdss_util->iommu_unlock = mdss_iommu_unlock;
mdata->mdss_util->iommu_ctrl = mdss_iommu_ctrl;
mdata->mdss_util->secure_session_ctrl =
mdss_smmu_secure_session_ctrl;
mdss_smmu_device_create(dev);
mdss_smmu_ops_init(mdata);
return 0;
}
@ -584,6 +712,7 @@ int mdss_smmu_probe(struct platform_device *pdev)
{
struct device *dev;
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
struct mdss_smmu_private *prv = mdss_smmu_get_private();
struct mdss_smmu_client *mdss_smmu;
int rc = 0;
struct mdss_smmu_domain smmu_domain;
@ -625,8 +754,9 @@ int mdss_smmu_probe(struct platform_device *pdev)
}
mdss_smmu = &mdata->mdss_smmu[smmu_domain.domain];
mdss_smmu->domain = smmu_domain.domain;
mdss_smmu->base.domain = smmu_domain.domain;
mp = &mdss_smmu->mp;
mdss_smmu->base.is_secure = false;
memset(mp, 0, sizeof(struct dss_module_power));
if (of_find_property(pdev->dev.of_node,
@ -692,6 +822,7 @@ int mdss_smmu_probe(struct platform_device *pdev)
pr_err("couldn't set secure pixel vmid\n");
goto release_mapping;
}
mdss_smmu->base.is_secure = true;
}
if (!mdata->handoff_pending)
@ -699,7 +830,7 @@ int mdss_smmu_probe(struct platform_device *pdev)
else
mdss_smmu->handoff_pending = true;
mdss_smmu->dev = dev;
mdss_smmu->base.dev = dev;
address = of_get_address_by_name(pdev->dev.of_node, "mmu_cb", 0, 0);
if (address) {
@ -713,6 +844,15 @@ int mdss_smmu_probe(struct platform_device *pdev)
pr_debug("unable to map context bank base\n");
}
mdss_smmu->base.iommu_ctrl = mdata->mdss_util->iommu_ctrl;
mdss_smmu->base.secure_session_ctrl =
mdata->mdss_util->secure_session_ctrl;
mdss_smmu->base.wait_for_transition = mdss_smmu_secure_wait;
list_add(&mdss_smmu->_client, &prv->smmu_device_list);
mdss_iommu_notify_users(prv);
pr_info("iommu v2 domain[%d] mapping and clk register successful!\n",
smmu_domain.domain);
return 0;
@ -736,8 +876,8 @@ int mdss_smmu_remove(struct platform_device *pdev)
for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
mdss_smmu = mdss_smmu_get_cb(i);
if (mdss_smmu && mdss_smmu->dev &&
(mdss_smmu->dev == &pdev->dev))
if (mdss_smmu && mdss_smmu->base.dev &&
(mdss_smmu->base.dev == &pdev->dev))
arm_iommu_release_mapping(mdss_smmu->mmu_mapping);
}
return 0;
@ -755,7 +895,18 @@ static struct platform_driver mdss_smmu_driver = {
static int mdss_smmu_register_driver(void)
{
return platform_driver_register(&mdss_smmu_driver);
struct mdss_smmu_private *prv = mdss_smmu_get_private();
int ret;
INIT_LIST_HEAD(&prv->smmu_device_list);
INIT_LIST_HEAD(&prv->user_list);
mutex_init(&prv->smmu_reg_lock);
ret = platform_driver_register(&mdss_smmu_driver);
if (ret)
pr_err("mdss_smmu_register_driver() failed!\n");
return ret;
}
static int __init mdss_smmu_driver_init(void)
@ -771,6 +922,15 @@ module_init(mdss_smmu_driver_init);
static void __exit mdss_smmu_driver_cleanup(void)
{
struct mdss_smmu_private *prv = mdss_smmu_get_private();
struct msm_smmu_notifier_data *node;
struct list_head *pos, *q;
list_for_each_safe(pos, q, &prv->user_list) {
node = list_entry(pos, struct msm_smmu_notifier_data, _user);
list_del(&node->_user);
kfree(node);
}
platform_driver_unregister(&mdss_smmu_driver);
}
module_exit(mdss_smmu_driver_cleanup);

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2007-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2007-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -39,6 +39,13 @@ struct mdss_smmu_domain {
unsigned long size;
};
struct mdss_smmu_private {
struct device_node *pdev;
struct list_head smmu_device_list;
struct list_head user_list;
struct mutex smmu_reg_lock;
};
void mdss_smmu_register(struct device *dev);
int mdss_smmu_init(struct mdss_data_type *mdata, struct device *dev);
@ -135,13 +142,12 @@ static inline int mdss_smmu_get_domain_type(u64 flags, bool rotator)
if (flags & MDP_SECURE_OVERLAY_SESSION) {
type = (rotator &&
mdata->mdss_smmu[MDSS_IOMMU_DOMAIN_ROT_SECURE].dev) ?
MDSS_IOMMU_DOMAIN_ROT_SECURE : MDSS_IOMMU_DOMAIN_SECURE;
mdata->mdss_smmu[MDSS_IOMMU_DOMAIN_ROT_SECURE].base.dev) ?
MDSS_IOMMU_DOMAIN_ROT_SECURE : MDSS_IOMMU_DOMAIN_SECURE;
} else {
type = (rotator &&
mdata->mdss_smmu[MDSS_IOMMU_DOMAIN_ROT_UNSECURE].dev) ?
MDSS_IOMMU_DOMAIN_ROT_UNSECURE :
MDSS_IOMMU_DOMAIN_UNSECURE;
mdata->mdss_smmu[MDSS_IOMMU_DOMAIN_ROT_UNSECURE].base.dev) ?
MDSS_IOMMU_DOMAIN_ROT_UNSECURE : MDSS_IOMMU_DOMAIN_UNSECURE;
}
return type;
}

View file

@ -0,0 +1,49 @@
/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef MDSS_SMMU_EXT_H
#define MDSS_SMMU_EXT_H
/**
* struct msm_smmu:interface exposed to the clients which use smmu driver.
* @dev: smmu device for attach/dettach
* @domain: domain for the context bank.
* @is_secure: bool variable to check for secure domain.
* @iommu_ctrl: iommu ctrl function for enable/disable attach.
* @secure_session_ctrl: ctrl function for enable/disable session.
* @wait_for_transition:function to wait till secure transtion is complete.
*/
struct mdss_smmu_intf {
struct device *dev;
int domain;
bool is_secure;
int (*iommu_ctrl)(int);
int (*secure_session_ctrl)(int);
int (*wait_for_transition)(int state, int request);
};
typedef void (*msm_smmu_handler_t) (struct mdss_smmu_intf *smmu);
/**
* mdss_smmu_request_mappings: function to request smmu mappings.
* Client driver can request smmu dev via this API.
* dev will be returned in the same call context
* if probe is not finished then dev will be
* returned once it is completed.
* @callback: callback function that is called to return smmu
* dev
*/
int mdss_smmu_request_mappings(msm_smmu_handler_t callback);
#endif /* MDSS_SMMU_EXT_H */