msm: mdss: Support iommu version v2 on MPD3

Ferrum uses iommu version v2, but where as MDP3
driver is based on iommu version v0. So this code
change adds support for iommu version v2 in MDP3 driver.

Change-Id: I754b46a5faf2e22e863bb6d077cc66f4a8e28ece
Signed-off-by: Sandeep Panda <spanda@codeaurora.org>
This commit is contained in:
Sandeep Panda 2014-08-11 18:22:37 +05:30 committed by David Keitel
parent c4501255d6
commit 05d6044951
4 changed files with 85 additions and 458 deletions

View file

@ -15,7 +15,6 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/dma-buf.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@ -130,9 +129,9 @@ struct mdp3_bus_handle_map mdp3_bus_handle[MDP3_BUS_HANDLE_MAX] = {
};
struct mdp3_iommu_domain_map mdp3_iommu_domains[MDP3_IOMMU_DOMAIN_MAX] = {
[MDP3_PPP_IOMMU_DOMAIN] = {
.domain_type = MDP3_PPP_IOMMU_DOMAIN,
.client_name = "mdp_ppp",
[MDP3_IOMMU_DOMAIN_UNSECURE] = {
.domain_type = MDP3_IOMMU_DOMAIN_UNSECURE,
.client_name = "mdp_ns",
.partitions = {
{
.start = SZ_128K,
@ -141,13 +140,13 @@ struct mdp3_iommu_domain_map mdp3_iommu_domains[MDP3_IOMMU_DOMAIN_MAX] = {
},
.npartitions = 1,
},
[MDP3_DMA_IOMMU_DOMAIN] = {
.domain_type = MDP3_DMA_IOMMU_DOMAIN,
.client_name = "mdp_dma",
[MDP3_IOMMU_DOMAIN_SECURE] = {
.domain_type = MDP3_IOMMU_DOMAIN_SECURE,
.client_name = "mdp_secure",
.partitions = {
{
.start = SZ_128K,
.size = SZ_1G - SZ_128K,
.start = SZ_1G,
.size = SZ_1G,
},
},
.npartitions = 1,
@ -155,30 +154,16 @@ struct mdp3_iommu_domain_map mdp3_iommu_domains[MDP3_IOMMU_DOMAIN_MAX] = {
};
struct mdp3_iommu_ctx_map mdp3_iommu_contexts[MDP3_IOMMU_CTX_MAX] = {
[MDP3_IOMMU_CTX_PPP_0] = {
.ctx_type = MDP3_IOMMU_CTX_PPP_0,
.domain = &mdp3_iommu_domains[MDP3_PPP_IOMMU_DOMAIN],
.ctx_name = "mdpe_0",
[MDP3_IOMMU_CTX_MDP_0] = {
.ctx_type = MDP3_IOMMU_CTX_MDP_0,
.domain = &mdp3_iommu_domains[MDP3_IOMMU_DOMAIN_UNSECURE],
.ctx_name = "mdp_0",
.attached = 0,
},
[MDP3_IOMMU_CTX_PPP_1] = {
.ctx_type = MDP3_IOMMU_CTX_PPP_1,
.domain = &mdp3_iommu_domains[MDP3_PPP_IOMMU_DOMAIN],
.ctx_name = "mdpe_1",
.attached = 0,
},
[MDP3_IOMMU_CTX_DMA_0] = {
.ctx_type = MDP3_IOMMU_CTX_DMA_0,
.domain = &mdp3_iommu_domains[MDP3_DMA_IOMMU_DOMAIN],
.ctx_name = "mdps_0",
.attached = 0,
},
[MDP3_IOMMU_CTX_DMA_1] = {
.ctx_type = MDP3_IOMMU_CTX_DMA_1,
.domain = &mdp3_iommu_domains[MDP3_DMA_IOMMU_DOMAIN],
.ctx_name = "mdps_1",
[MDP3_IOMMU_CTX_MDP_1] = {
.ctx_type = MDP3_IOMMU_CTX_MDP_1,
.domain = &mdp3_iommu_domains[MDP3_IOMMU_DOMAIN_SECURE],
.ctx_name = "mdp_1",
.attached = 0,
},
};
@ -654,13 +639,13 @@ void mdp3_bus_bw_iommu_enable(int enable, int client)
if (enable && ref_cnt == 1) {
if (mdp3_res->allow_iommu_update)
mdp3_iommu_enable(client);
mdp3_iommu_enable();
ab = bus_handle->restore_ab;
ib = bus_handle->restore_ib;
mdp3_bus_scale_set_quota(client, ab, ib);
} else if (!enable && ref_cnt == 0) {
mdp3_bus_scale_set_quota(client, 0, 0);
mdp3_iommu_disable(client);
mdp3_iommu_disable();
} else if (ref_cnt < 0) {
pr_err("Ref count < 0, bus client=%d, ref_cnt=%d",
client_idx, ref_cnt);
@ -967,7 +952,7 @@ static int mdp3_res_init(void)
static void mdp3_res_deinit(void)
{
mdp3_bus_scale_unregister();
mdp3_iommu_dettach(MDP3_IOMMU_CTX_DMA_0);
mdp3_iommu_dettach(MDP3_IOMMU_CTX_MDP_0);
mdp3_iommu_deinit();
if (!IS_ERR_OR_NULL(mdp3_res->ion_client))
@ -1243,338 +1228,17 @@ void mdp3_enable_regulator(int enable)
mdp3_batfet_ctrl(enable);
}
static void mdp3_iommu_heap_unmap_iommu(struct mdp3_iommu_meta *meta)
{
unsigned int domain_num;
unsigned int partition_num = 0;
struct iommu_domain *domain;
domain_num = (mdp3_res->domains + MDP3_PPP_IOMMU_DOMAIN)->domain_idx;
domain = msm_get_iommu_domain(domain_num);
if (!domain) {
pr_err("Could not get domain %d. Corruption?\n", domain_num);
return;
}
iommu_unmap_range(domain, meta->iova_addr, meta->mapped_size);
msm_free_iova_address(meta->iova_addr, domain_num, partition_num,
meta->mapped_size);
return;
}
static void mdp3_iommu_meta_destroy(struct kref *kref)
{
struct mdp3_iommu_meta *meta =
container_of(kref, struct mdp3_iommu_meta, ref);
rb_erase(&meta->node, &mdp3_res->iommu_root);
mdp3_iommu_heap_unmap_iommu(meta);
dma_buf_put(meta->dbuf);
kfree(meta);
}
static void mdp3_iommu_meta_put(struct mdp3_iommu_meta *meta)
{
/* Need to lock here to prevent race against map/unmap */
mutex_lock(&mdp3_res->iommu_lock);
kref_put(&meta->ref, mdp3_iommu_meta_destroy);
mutex_unlock(&mdp3_res->iommu_lock);
}
static struct mdp3_iommu_meta *mdp3_iommu_meta_lookup(struct sg_table *table)
{
struct rb_root *root = &mdp3_res->iommu_root;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct mdp3_iommu_meta *entry = NULL;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct mdp3_iommu_meta, node);
if (table < entry->table)
p = &(*p)->rb_left;
else if (table > entry->table)
p = &(*p)->rb_right;
else
return entry;
}
return NULL;
}
void mdp3_unmap_iommu(struct ion_client *client, struct ion_handle *handle)
{
struct mdp3_iommu_meta *meta;
struct sg_table *table;
table = ion_sg_table(client, handle);
mutex_lock(&mdp3_res->iommu_lock);
meta = mdp3_iommu_meta_lookup(table);
if (!meta) {
WARN(1, "%s: buffer was never mapped for %p\n", __func__,
handle);
mutex_unlock(&mdp3_res->iommu_lock);
goto out;
}
mutex_unlock(&mdp3_res->iommu_lock);
mdp3_iommu_meta_put(meta);
out:
return;
}
static void mdp3_iommu_meta_add(struct mdp3_iommu_meta *meta)
{
struct rb_root *root = &mdp3_res->iommu_root;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct mdp3_iommu_meta *entry;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct mdp3_iommu_meta, node);
if (meta->table < entry->table) {
p = &(*p)->rb_left;
} else if (meta->table > entry->table) {
p = &(*p)->rb_right;
} else {
pr_err("%s: handle %p already exists\n", __func__,
entry->handle);
BUG();
}
}
rb_link_node(&meta->node, parent, p);
rb_insert_color(&meta->node, root);
}
static int mdp3_iommu_map_iommu(struct mdp3_iommu_meta *meta,
unsigned long align, unsigned long iova_length,
unsigned int padding, unsigned long flags)
{
struct iommu_domain *domain;
int ret = 0;
unsigned long size;
unsigned long unmap_size;
struct sg_table *table;
int prot = IOMMU_WRITE | IOMMU_READ;
unsigned int domain_num = (mdp3_res->domains +
MDP3_PPP_IOMMU_DOMAIN)->domain_idx;
unsigned int partition_num = 0;
size = meta->size;
table = meta->table;
/* Use the biggest alignment to allow bigger IOMMU mappings.
* Use the first entry since the first entry will always be the
* biggest entry. To take advantage of bigger mapping sizes both the
* VA and PA addresses have to be aligned to the biggest size.
*/
if (sg_dma_len(table->sgl) > align)
align = sg_dma_len(table->sgl);
ret = msm_allocate_iova_address(domain_num, partition_num,
meta->mapped_size, align,
(unsigned long *)&meta->iova_addr);
if (ret)
goto out;
domain = msm_get_iommu_domain(domain_num);
if (!domain) {
ret = -ENOMEM;
goto out1;
}
/* Adding padding to before buffer */
if (padding) {
unsigned long phys_addr = sg_phys(table->sgl);
ret = msm_iommu_map_extra(domain, meta->iova_addr, phys_addr,
padding, SZ_4K, prot);
if (ret)
goto out1;
}
/* Mapping actual buffer */
ret = iommu_map_range(domain, meta->iova_addr + padding,
table->sgl, size, prot);
if (ret) {
pr_err("%s: could not map %pa in domain %p\n",
__func__, &meta->iova_addr, domain);
unmap_size = padding;
goto out2;
}
/* Adding padding to end of buffer */
if (padding) {
unsigned long phys_addr = sg_phys(table->sgl);
unsigned long extra_iova_addr = meta->iova_addr +
padding + size;
ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
padding, SZ_4K, prot);
if (ret) {
unmap_size = padding + size;
goto out2;
}
}
return ret;
out2:
iommu_unmap_range(domain, meta->iova_addr, unmap_size);
out1:
msm_free_iova_address(meta->iova_addr, domain_num, partition_num,
iova_length);
out:
return ret;
}
static struct mdp3_iommu_meta *mdp3_iommu_meta_create(struct ion_client *client,
struct ion_handle *handle, struct sg_table *table, unsigned long size,
unsigned long align, unsigned long iova_length, unsigned int padding,
unsigned long flags, dma_addr_t *iova)
{
struct mdp3_iommu_meta *meta;
int ret;
meta = kzalloc(sizeof(*meta), GFP_KERNEL);
if (!meta)
return ERR_PTR(-ENOMEM);
meta->handle = handle;
meta->table = table;
meta->size = size;
meta->mapped_size = iova_length;
meta->dbuf = ion_share_dma_buf(client, handle);
kref_init(&meta->ref);
ret = mdp3_iommu_map_iommu(meta,
align, iova_length, padding, flags);
if (ret < 0) {
pr_err("%s: Unable to map buffer\n", __func__);
goto out;
}
*iova = meta->iova_addr;
mdp3_iommu_meta_add(meta);
return meta;
out:
kfree(meta);
return ERR_PTR(ret);
}
/*
* PPP hw reads in tiles of 16 which might be outside mapped region
* need to map buffers ourseleve to add extra padding
*/
int mdp3_self_map_iommu(struct ion_client *client, struct ion_handle *handle,
unsigned long align, unsigned long padding, dma_addr_t *iova,
unsigned long *buffer_size, unsigned long flags,
unsigned long iommu_flags)
{
struct mdp3_iommu_meta *iommu_meta = NULL;
struct sg_table *table;
struct scatterlist *sg;
unsigned long size = 0, iova_length = 0;
int ret = 0;
int i;
table = ion_sg_table(client, handle);
if (IS_ERR_OR_NULL(table))
return PTR_ERR(table);
for_each_sg(table->sgl, sg, table->nents, i)
size += sg_dma_len(sg);
padding = PAGE_ALIGN(padding);
/* Adding 16 lines padding before and after buffer */
iova_length = size + 2 * padding;
if (size & ~PAGE_MASK) {
pr_debug("%s: buffer size %lx is not aligned to %lx", __func__,
size, PAGE_SIZE);
ret = -EINVAL;
goto out;
}
if (iova_length & ~PAGE_MASK) {
pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
iova_length, PAGE_SIZE);
ret = -EINVAL;
goto out;
}
mutex_lock(&mdp3_res->iommu_lock);
iommu_meta = mdp3_iommu_meta_lookup(table);
if (!iommu_meta) {
iommu_meta = mdp3_iommu_meta_create(client, handle, table, size,
align, iova_length, padding, flags, iova);
if (!IS_ERR_OR_NULL(iommu_meta)) {
iommu_meta->flags = iommu_flags;
ret = 0;
} else {
ret = PTR_ERR(iommu_meta);
mutex_unlock(&mdp3_res->iommu_lock);
pr_err("%s: meta_create failed err=%d", __func__, ret);
return ret;
}
} else {
if (iommu_meta->flags != iommu_flags) {
pr_err("%s: handle %p is already mapped with diff flag\n",
__func__, handle);
ret = -EINVAL;
goto out_unlock;
} else if (iommu_meta->mapped_size != iova_length) {
pr_err("%s: handle %p is already mapped with diff len\n",
__func__, handle);
ret = -EINVAL;
goto out_unlock;
} else {
kref_get(&iommu_meta->ref);
*iova = iommu_meta->iova_addr;
}
}
BUG_ON(iommu_meta->size != size);
mutex_unlock(&mdp3_res->iommu_lock);
*iova = *iova + padding;
*buffer_size = size;
return ret;
out_unlock:
mutex_unlock(&mdp3_res->iommu_lock);
out:
mdp3_iommu_meta_put(iommu_meta);
return ret;
}
int mdp3_put_img(struct mdp3_img_data *data, int client)
int mdp3_put_img(struct mdp3_img_data *data)
{
struct ion_client *iclient = mdp3_res->ion_client;
int dom;
int dom = (mdp3_res->domains + MDP3_IOMMU_DOMAIN_UNSECURE)->domain_idx;
if (data->flags & MDP_MEMORY_ID_TYPE_FB) {
pr_info("mdp3_put_img fb mem buf=0x%pa\n", &data->addr);
fdput(data->srcp_f);
memset(&data->srcp_f, 0, sizeof(struct fd));
} else if (!IS_ERR_OR_NULL(data->srcp_ihdl)) {
if (client == MDP3_CLIENT_DMA_P) {
dom = (mdp3_res->domains +
MDP3_DMA_IOMMU_DOMAIN)->domain_idx;
ion_unmap_iommu(iclient, data->srcp_ihdl, dom, 0);
} else {
mdp3_unmap_iommu(iclient, data->srcp_ihdl);
}
ion_unmap_iommu(iclient, data->srcp_ihdl, dom, 0);
ion_free(iclient, data->srcp_ihdl);
data->srcp_ihdl = NULL;
} else {
@ -1583,8 +1247,7 @@ int mdp3_put_img(struct mdp3_img_data *data, int client)
return 0;
}
int mdp3_get_img(struct msmfb_data *img, struct mdp3_img_data *data,
int client)
int mdp3_get_img(struct msmfb_data *img, struct mdp3_img_data *data)
{
struct fd f;
int ret = -EINVAL;
@ -1592,7 +1255,7 @@ int mdp3_get_img(struct msmfb_data *img, struct mdp3_img_data *data,
unsigned long *len;
dma_addr_t *start;
struct ion_client *iclient = mdp3_res->ion_client;
int dom;
int dom = (mdp3_res->domains + MDP3_IOMMU_DOMAIN_UNSECURE)->domain_idx;
start = &data->addr;
len = (unsigned long *) &data->len;
@ -1633,15 +1296,8 @@ int mdp3_get_img(struct msmfb_data *img, struct mdp3_img_data *data,
data->srcp_ihdl = NULL;
return ret;
}
if (client == MDP3_CLIENT_DMA_P) {
dom = (mdp3_res->domains +
MDP3_DMA_IOMMU_DOMAIN)->domain_idx;
ret = ion_map_iommu(iclient, data->srcp_ihdl, dom,
0, SZ_4K, 0, start, len, 0, 0);
} else {
ret = mdp3_self_map_iommu(iclient, data->srcp_ihdl,
SZ_4K, data->padding, start, len, 0, 0);
}
ret = ion_map_iommu(iclient, data->srcp_ihdl, dom,
0, SZ_4K, 0, start, len, 0, 0);
if (IS_ERR_VALUE(ret)) {
ion_free(iclient, data->srcp_ihdl);
pr_err("failed to map ion handle (%d)\n", ret);
@ -1656,53 +1312,59 @@ done:
pr_debug("mem=%d ihdl=%p buf=0x%pa len=0x%x\n", img->memory_id,
data->srcp_ihdl, &data->addr, data->len);
} else {
mdp3_put_img(data, client);
mdp3_put_img(data);
return -EINVAL;
}
return ret;
}
int mdp3_iommu_enable(int client)
int mdp3_iommu_enable()
{
int rc;
int i, rc = 0;
if (client == MDP3_CLIENT_DMA_P) {
rc = mdp3_iommu_attach(MDP3_IOMMU_CTX_DMA_0);
if (mdp3_res->iommu_ref_cnt == 0) {
for (i = 0; i < MDP3_IOMMU_CTX_MAX; i++) {
rc = mdp3_iommu_attach(i);
if (rc) {
WARN(1, "IOMMU attach failed for ctx: %d\n", i);
for (i--; i >= 0; i--)
mdp3_iommu_dettach(i);
}
}
}
if (!rc)
mdp3_res->iommu_ref_cnt++;
return rc;
}
int mdp3_iommu_disable()
{
int i, rc = 0;
if (mdp3_res->iommu_ref_cnt) {
mdp3_res->iommu_ref_cnt--;
if (mdp3_res->iommu_ref_cnt == 0) {
for (i = 0; i < MDP3_IOMMU_CTX_MAX; i++)
rc = mdp3_iommu_dettach(i);
}
} else {
rc = mdp3_iommu_attach(MDP3_IOMMU_CTX_PPP_0);
rc |= mdp3_iommu_attach(MDP3_IOMMU_CTX_PPP_1);
pr_err("iommu ref count unbalanced\n");
}
return rc;
}
int mdp3_iommu_disable(int client)
{
int rc;
if (client == MDP3_CLIENT_DMA_P) {
rc = mdp3_iommu_dettach(MDP3_IOMMU_CTX_DMA_0);
} else {
rc = mdp3_iommu_dettach(MDP3_IOMMU_CTX_PPP_0);
rc |= mdp3_iommu_dettach(MDP3_IOMMU_CTX_PPP_1);
}
return rc;
}
int mdp3_iommu_is_attached(int client)
int mdp3_iommu_is_attached()
{
struct mdp3_iommu_ctx_map *context_map;
int context = MDP3_IOMMU_CTX_DMA_0;
if (!mdp3_res->iommu_contexts)
return 0;
if (client == MDP3_CLIENT_PPP)
context = MDP3_IOMMU_CTX_PPP_0;
context_map = mdp3_res->iommu_contexts + context;
context_map = mdp3_res->iommu_contexts + MDP3_IOMMU_CTX_MDP_0;
return context_map->attached;
}
@ -1775,7 +1437,7 @@ static int mdp3_alloc(struct msm_fb_data_type *mfd)
return -ENOMEM;
}
dom = mdp3_res->domains[MDP3_DMA_IOMMU_DOMAIN].domain_idx;
dom = mdp3_res->domains[MDP3_IOMMU_DOMAIN_UNSECURE].domain_idx;
ret = msm_iommu_map_contig_buffer(phys, dom, 0, size, SZ_4K, 0,
&mfd->iova);
@ -1804,7 +1466,7 @@ void mdp3_free(struct msm_fb_data_type *mfd)
}
size = mfd->fbi->fix.smem_len;
dom = mdp3_res->domains[MDP3_DMA_IOMMU_DOMAIN].domain_idx;
dom = mdp3_res->domains[MDP3_IOMMU_DOMAIN_UNSECURE].domain_idx;
msm_iommu_unmap_contig_buffer(mfd->iova, dom, 0, size);
mfd->fbi->screen_base = NULL;
@ -1885,7 +1547,7 @@ static int mdp3_fb_mem_get_iommu_domain(void)
{
if (!mdp3_res)
return -ENODEV;
return mdp3_res->domains[MDP3_DMA_IOMMU_DOMAIN].domain_idx;
return mdp3_res->domains[MDP3_IOMMU_DOMAIN_UNSECURE].domain_idx;
}
int mdp3_get_cont_spash_en(void)

View file

@ -44,16 +44,14 @@ enum {
};
enum {
MDP3_DMA_IOMMU_DOMAIN,
MDP3_PPP_IOMMU_DOMAIN,
MDP3_IOMMU_DOMAIN_UNSECURE,
MDP3_IOMMU_DOMAIN_SECURE,
MDP3_IOMMU_DOMAIN_MAX,
};
enum {
MDP3_IOMMU_CTX_PPP_0,
MDP3_IOMMU_CTX_PPP_1,
MDP3_IOMMU_CTX_DMA_0,
MDP3_IOMMU_CTX_DMA_1,
MDP3_IOMMU_CTX_MDP_0,
MDP3_IOMMU_CTX_MDP_1,
MDP3_IOMMU_CTX_MAX
};
@ -62,12 +60,6 @@ enum {
MDP3_CLIENT_PPP,
};
enum {
DI_PARTITION_NUM = 0,
DI_DOMAIN_NUM = 1,
DI_MAX,
};
struct mdp3_bus_handle_map {
struct msm_bus_vectors *bus_vector;
struct msm_bus_paths *usecases;
@ -96,19 +88,6 @@ struct mdp3_iommu_ctx_map {
int attached;
};
struct mdp3_iommu_meta {
struct rb_node node;
struct ion_handle *handle;
struct rb_root iommu_maps;
struct kref ref;
struct sg_table *table;
struct dma_buf *dbuf;
int mapped_size;
unsigned long size;
dma_addr_t iova_addr;
unsigned long flags;
};
#define MDP3_MAX_INTR 28
struct mdp3_intr_cb {
@ -136,10 +115,10 @@ struct mdp3_hw_resource {
struct ion_client *ion_client;
struct mdp3_iommu_domain_map *domains;
struct mdp3_iommu_ctx_map *iommu_contexts;
unsigned int iommu_ref_cnt;
bool allow_iommu_update;
struct ion_handle *ion_handle;
struct mutex iommu_lock;
struct rb_root iommu_root;
struct mdp3_dma dma[MDP3_DMA_MAX];
struct mdp3_intf intf[MDP3_DMA_OUTPUT_SEL_MAX];
@ -168,7 +147,6 @@ struct mdp3_hw_resource {
struct mdp3_img_data {
dma_addr_t addr;
u32 len;
u32 padding;
u32 flags;
int p_need;
struct file *srcp_file;
@ -189,12 +167,11 @@ int mdp3_clk_set_rate(int clk_type, unsigned long clk_rate, int client);
int mdp3_clk_enable(int enable, int dsi_clk);
int mdp3_res_update(int enable, int dsi_clk, int client);
int mdp3_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota);
int mdp3_put_img(struct mdp3_img_data *data, int client);
int mdp3_get_img(struct msmfb_data *img, struct mdp3_img_data *data,
int client);
int mdp3_iommu_enable(int client);
int mdp3_iommu_disable(int client);
int mdp3_iommu_is_attached(int client);
int mdp3_put_img(struct mdp3_img_data *data);
int mdp3_get_img(struct msmfb_data *img, struct mdp3_img_data *data);
int mdp3_iommu_enable(void);
int mdp3_iommu_disable(void);
int mdp3_iommu_is_attached(void);
void mdp3_free(struct msm_fb_data_type *mfd);
int mdp3_parse_dt_splash(struct msm_fb_data_type *mfd);
void mdp3_release_splash_memory(struct msm_fb_data_type *mfd);

View file

@ -52,7 +52,7 @@ static void mdp3_bufq_deinit(struct mdp3_buffer_queue *bufq)
while (count-- && (bufq->pop_idx >= 0)) {
struct mdp3_img_data *data = &bufq->img_data[bufq->pop_idx];
bufq->pop_idx = (bufq->pop_idx + 1) % MDP3_MAX_BUF_QUEUE;
mdp3_put_img(data, MDP3_CLIENT_DMA_P);
mdp3_put_img(data);
}
bufq->count = 0;
bufq->push_idx = 0;
@ -746,7 +746,7 @@ static int mdp3_ctrl_reset_cmd(struct msm_fb_data_type *mfd)
goto reset_error;
}
rc = mdp3_iommu_enable(MDP3_CLIENT_DMA_P);
rc = mdp3_iommu_enable();
if (rc) {
pr_err("fail to attach dma iommu\n");
goto reset_error;
@ -818,7 +818,7 @@ static int mdp3_ctrl_reset(struct msm_fb_data_type *mfd)
goto reset_error;
}
rc = mdp3_iommu_enable(MDP3_CLIENT_DMA_P);
rc = mdp3_iommu_enable();
if (rc) {
pr_err("fail to attach dma iommu\n");
goto reset_error;
@ -946,7 +946,7 @@ static int mdp3_overlay_queue_buffer(struct msm_fb_data_type *mfd,
struct mdp3_img_data data;
struct mdp3_dma *dma = mdp3_session->dma;
rc = mdp3_get_img(img, &data, MDP3_CLIENT_DMA_P);
rc = mdp3_get_img(img, &data);
if (rc) {
pr_err("fail to get overlay buffer\n");
return rc;
@ -954,14 +954,14 @@ static int mdp3_overlay_queue_buffer(struct msm_fb_data_type *mfd,
if (data.len < dma->source_config.stride * dma->source_config.height) {
pr_err("buf length is smaller than required by dma configuration\n");
mdp3_put_img(&data, MDP3_CLIENT_DMA_P);
mdp3_put_img(&data);
return -EINVAL;
}
rc = mdp3_bufq_push(&mdp3_session->bufq_in, &data);
if (rc) {
pr_err("fail to queue the overlay buffer, buffer drop\n");
mdp3_put_img(&data, MDP3_CLIENT_DMA_P);
mdp3_put_img(&data);
return rc;
}
return 0;
@ -1065,7 +1065,7 @@ static int mdp3_ctrl_display_commit_kickoff(struct msm_fb_data_type *mfd,
mdp3_release_splash_memory(mfd);
data = mdp3_bufq_pop(&mdp3_session->bufq_out);
if (data)
mdp3_put_img(data, MDP3_CLIENT_DMA_P);
mdp3_put_img(data);
}
if (mdp3_session->first_commit) {

View file

@ -123,22 +123,12 @@ int mdp3_ppp_get_img(struct mdp_img *img, struct mdp_blit_req *req,
struct mdp3_img_data *data)
{
struct msmfb_data fb_data;
uint32_t stride;
int bpp = ppp_bpp(img->format);
if (bpp <= 0) {
pr_err("%s incorrect format %d\n", __func__, img->format);
return -EINVAL;
}
fb_data.flags = img->priv;
fb_data.memory_id = img->memory_id;
fb_data.offset = 0;
stride = img->width * bpp;
data->padding = 16 * stride;
return mdp3_get_img(&fb_data, data, MDP3_CLIENT_PPP);
return mdp3_get_img(&fb_data, data);
}
/* Check format */
@ -1073,10 +1063,8 @@ static void mdp3_ppp_blit_wq_handler(struct work_struct *work)
&req->src_data[i],
&req->dst_data[i]);
}
mdp3_put_img(&req->src_data[i],
MDP3_CLIENT_PPP);
mdp3_put_img(&req->dst_data[i],
MDP3_CLIENT_PPP);
mdp3_put_img(&req->src_data[i]);
mdp3_put_img(&req->dst_data[i]);
}
}
/* Signal to release fence */
@ -1151,7 +1139,7 @@ int mdp3_ppp_parse_req(void __user *p,
rc = mdp3_ppp_get_img(&req->req_list[i].dst,
&req->req_list[i], &req->dst_data[i]);
if (rc < 0 || req->dst_data[i].len == 0) {
mdp3_put_img(&req->src_data[i], MDP3_CLIENT_PPP);
mdp3_put_img(&req->src_data[i]);
pr_err("mdp_ppp: couldn't retrieve dest img from mem\n");
goto parse_err_1;
}
@ -1194,8 +1182,8 @@ parse_err_2:
put_unused_fd(req->cur_rel_fen_fd);
parse_err_1:
for (i--; i >= 0; i--) {
mdp3_put_img(&req->src_data[i], MDP3_CLIENT_PPP);
mdp3_put_img(&req->dst_data[i], MDP3_CLIENT_PPP);
mdp3_put_img(&req->src_data[i]);
mdp3_put_img(&req->dst_data[i]);
}
mdp3_ppp_deinit_buf_sync(req);
mutex_unlock(&ppp_stat->req_mutex);