msm: mdss: use DMA_BIDIRECTIONAL for 8937

Simultaneous buffer maps with read and write permissions are
possible due to the asynchronous nature of MDSS driver.

Client stages the same buffer on MDP & ROTATOR. It calls the map
API for both at the same time. Due to concurrency, the source buffer
is mapped by MDP before and is ref-counted. ROTATOR API called later never
updates the permission which is required for dst buffer.
This leads to permission fault. Requesting the map with DMA_BIDIRECTIONAL
flag to fix the issue.

Change-Id: Ieb819820b19d163fee541dd571c5a58dc78be7d3
Signed-off-by: Kalyan Thota <kalyant@codeaurora.org>
[cip@codeaurora.org: Resolved merge conflict]
Signed-off-by: Clarence Ip <cip@codeaurora.org>
This commit is contained in:
Kalyan Thota 2015-11-19 20:41:24 +05:30 committed by David Keitel
parent dfe5ee688c
commit 87345f4cde
5 changed files with 26 additions and 9 deletions

View file

@ -155,6 +155,7 @@ enum mdss_hw_quirk {
MDSS_QUIRK_DOWNSCALE_HANG,
MDSS_QUIRK_DSC_RIGHT_ONLY_PU,
MDSS_QUIRK_DSC_2SLICE_PU_THRPUT,
MDSS_QUIRK_DMA_BI_DIR,
MDSS_QUIRK_MAX,
};

View file

@ -1289,6 +1289,7 @@ static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata)
set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map);
mdss_mdp_init_default_prefill_factors(mdata);
set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
mdss_set_quirk(mdata, MDSS_QUIRK_DMA_BI_DIR);
break;
default:
mdata->max_target_zorder = 4; /* excluding base layer */

View file

@ -88,7 +88,7 @@ static int mdss_mdp_splash_alloc_memory(struct msm_fb_data_type *mfd,
}
sinfo->size = buf_size;
dma_buf_begin_cpu_access(sinfo->dma_buf, 0, size, DMA_FROM_DEVICE);
dma_buf_begin_cpu_access(sinfo->dma_buf, 0, size, DMA_BIDIRECTIONAL);
sinfo->splash_buffer = dma_buf_kmap(sinfo->dma_buf, 0);
if (IS_ERR(sinfo->splash_buffer)) {
pr_err("ion kernel memory mapping failed\n");
@ -132,7 +132,8 @@ static void mdss_mdp_splash_free_memory(struct msm_fb_data_type *mfd)
if (!mdata || !mdata->iclient || !sinfo->dma_buf)
return;
dma_buf_end_cpu_access(sinfo->dma_buf, 0, sinfo->size, DMA_FROM_DEVICE);
dma_buf_end_cpu_access(sinfo->dma_buf, 0, sinfo->size,
DMA_BIDIRECTIONAL);
dma_buf_kunmap(sinfo->dma_buf, 0, sinfo->splash_buffer);
mdss_smmu_unmap_dma_buf(sinfo->table, MDSS_IOMMU_DOMAIN_UNSECURE, 0,

View file

@ -1047,7 +1047,8 @@ static int mdss_mdp_put_img(struct mdss_mdp_img_data *data, bool rotator,
}
if (!data->skip_detach) {
dma_buf_unmap_attachment(data->srcp_attachment,
data->srcp_table, dir);
data->srcp_table,
mdss_smmu_dma_data_direction(dir));
dma_buf_detach(data->srcp_dma_buf,
data->srcp_attachment);
dma_buf_put(data->srcp_dma_buf);
@ -1121,7 +1122,8 @@ static int mdss_mdp_get_img(struct msmfb_data *img,
}
data->srcp_table =
dma_buf_map_attachment(data->srcp_attachment, dir);
dma_buf_map_attachment(data->srcp_attachment,
mdss_smmu_dma_data_direction(dir));
if (IS_ERR(data->srcp_table)) {
ret = PTR_ERR(data->srcp_table);
goto err_detach;
@ -1252,7 +1254,8 @@ static int mdss_mdp_map_buffer(struct mdss_mdp_img_data *data, bool rotator,
return ret;
err_unmap:
dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table, dir);
dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
mdss_smmu_dma_data_direction(dir));
dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
dma_buf_put(data->srcp_dma_buf);
return ret;

View file

@ -41,6 +41,14 @@ struct mdss_smmu_domain {
void mdss_smmu_register(struct device *dev);
int mdss_smmu_init(struct mdss_data_type *mdata, struct device *dev);
static inline int mdss_smmu_dma_data_direction(int dir)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
return (mdss_has_quirk(mdata, MDSS_QUIRK_DMA_BI_DIR)) ?
DMA_BIDIRECTIONAL : dir;
}
static inline bool is_mdss_smmu_compatible_device(const char *str)
{
/* check the prefix */
@ -177,7 +185,8 @@ static inline int mdss_smmu_map_dma_buf(struct dma_buf *dma_buf,
return -ENOSYS;
return mdata->smmu_ops.smmu_map_dma_buf(dma_buf, table,
domain, iova, size, dir);
domain, iova, size,
mdss_smmu_dma_data_direction(dir));
}
static inline void mdss_smmu_unmap_dma_buf(struct sg_table *table, int domain,
@ -185,7 +194,8 @@ static inline void mdss_smmu_unmap_dma_buf(struct sg_table *table, int domain,
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
if (mdata->smmu_ops.smmu_unmap_dma_buf)
mdata->smmu_ops.smmu_unmap_dma_buf(table, domain, dir, dma_buf);
mdata->smmu_ops.smmu_unmap_dma_buf(table, domain,
mdss_smmu_dma_data_direction(dir), dma_buf);
}
static inline int mdss_smmu_dma_alloc_coherent(struct device *dev, size_t size,
@ -246,7 +256,8 @@ static inline int mdss_smmu_dsi_map_buffer(phys_addr_t phys,
return -ENOSYS;
return mdata->smmu_ops.smmu_dsi_map_buffer(phys, domain, size,
dma_addr, cpu_addr, dir);
dma_addr, cpu_addr,
mdss_smmu_dma_data_direction(dir));
}
static inline void mdss_smmu_dsi_unmap_buffer(dma_addr_t dma_addr, int domain,
@ -255,7 +266,7 @@ static inline void mdss_smmu_dsi_unmap_buffer(dma_addr_t dma_addr, int domain,
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
if (mdata->smmu_ops.smmu_dsi_unmap_buffer)
mdata->smmu_ops.smmu_dsi_unmap_buffer(dma_addr, domain,
size, dir);
size, mdss_smmu_dma_data_direction(dir));
}
static inline void mdss_smmu_deinit(struct mdss_data_type *mdata)