msm: mdss: Remove smmu v1 related code

smmu v1 is not supported in 3.18. Remove smmu v1 specific
changes in display.

Change-Id: I9deb060b6d3539acb9ee4b3fa05f491e0fc1db43
Signed-off-by: Veera Sundaram Sankaran <veeras@codeaurora.org>
[cip@codeaurora.org: Resolved merge conflict]
Signed-off-by: Clarence Ip <cip@codeaurora.org>
This commit is contained in:
Clarence Ip 2016-02-23 19:25:53 -05:00 committed by David Keitel
parent 3475ec6865
commit 7f66a9d175
12 changed files with 29 additions and 434 deletions

View file

@ -21,8 +21,10 @@
#include <linux/workqueue.h>
#include <linux/irqreturn.h>
#include <linux/mdss_io_util.h>
#include <linux/msm_iommu_domains.h>
#include <linux/msm-bus.h>
#include <linux/file.h>
#include <linux/dma-direction.h>
#include "mdss_panel.h"
@ -58,15 +60,6 @@ enum mdss_bus_vote_type {
VOTE_INDEX_80_MHZ,
};
struct mdss_iommu_map_type {
char *client_name;
char *ctx_name;
struct device *ctx;
struct msm_iova_partition partitions[1];
int npartitions;
int domain_idx;
};
struct mdss_hw_settings {
char __iomem *reg;
u32 val;

View file

@ -131,8 +131,7 @@ void mdss_misr_crc_collect(struct mdss_data_type *mdata, int block_id);
int mdss_create_xlog_debug(struct mdss_debug_data *mdd);
void mdss_xlog(const char *name, int line, int flag, ...);
void mdss_xlog_tout_handler_default(const char *name, ...);
int mdss_xlog_tout_handler_iommu(struct iommu_domain *domain,
struct device *dev, unsigned long iova, int flags, void *token);
#else
static inline int mdss_debugfs_init(struct mdss_data_type *mdata) { return 0; }
static inline int mdss_debugfs_remove(struct mdss_data_type *mdata)
@ -160,8 +159,6 @@ static inline void mdss_xlog_dump(void) { }
static inline void mdss_xlog(const char *name, int line, int flag...) { }
static inline void mdss_dsi_debug_check_te(struct mdss_panel_data *pdata) { }
static inline void mdss_xlog_tout_handler_default(const char *name, ...) { }
static inline int mdss_xlog_tout_handler_iommu(struct iommu_domain *domain,
struct device *dev, unsigned long iova, int flags, void *token) { }
#endif
static inline int mdss_debug_register_io(const char *name,

View file

@ -402,20 +402,6 @@ void mdss_xlog_tout_handler_default(const char *name, ...)
mdss_xlog_dump_array(blk_arr, blk_len, dead, name);
}
int mdss_xlog_tout_handler_iommu(struct iommu_domain *domain,
struct device *dev, unsigned long iova, int flags, void *token)
{
if (!mdss_xlog_is_enabled(MDSS_XLOG_IOMMU))
return 0;
mdss_dump_reg_by_blk("mdp");
mdss_dump_reg_by_blk("vbif");
mdss_xlog_dump_all();
panic("mdp iommu");
return 0;
}
static int mdss_xlog_dump_open(struct inode *inode, struct file *file)
{
/* non-seekable */

View file

@ -21,8 +21,6 @@
#include <linux/iopoll.h>
#include <linux/kthread.h>
#include <linux/msm_iommu_domains.h>
#include "mdss_dsi_cmd.h"
#include "mdss_dsi.h"
#include "mdss_smmu.h"

View file

@ -21,8 +21,6 @@
#include <linux/iopoll.h>
#include <linux/kthread.h>
#include <linux/msm_iommu_domains.h>
#include "mdss.h"
#include "mdss_dsi.h"
#include "mdss_panel.h"

View file

@ -46,12 +46,8 @@
#include <linux/file.h>
#include <linux/kthread.h>
#include <linux/dma-buf.h>
#include <linux/msm_iommu_domains.h>
#include <sync.h>
#include <sw_sync.h>
#include <linux/qcom_iommu.h>
#include <linux/msm_iommu_domains.h>
#include "mdss_fb.h"
#include "mdss_mdp_splash_logo.h"

View file

@ -41,8 +41,6 @@
#include <linux/uaccess.h>
#include <linux/clk/msm-clk.h>
#include <linux/qcom_iommu.h>
#include <linux/msm_iommu_domains.h>
#include <linux/msm-bus.h>
#include <linux/msm-bus-board.h>
#include <soc/qcom/scm.h>

View file

@ -27,7 +27,6 @@
#include <linux/sort.h>
#include <sw_sync.h>
#include <linux/msm_iommu_domains.h>
#include <soc/qcom/event_timer.h>
#include <linux/msm-bus.h>
#include "mdss.h"

View file

@ -16,13 +16,11 @@
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/msm_ion.h>
#include <linux/qcom_iommu.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/major.h>
#include <media/msm_media_info.h>
#include <linux/msm_iommu_domains.h>
#include <linux/dma-buf.h>
#include "mdss_fb.h"

View file

@ -20,8 +20,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/clk/msm-clk.h>
#include <linux/qcom_iommu.h>
#include <linux/msm_iommu_domains.h>
#include <linux/dma-mapping.h>
#include <linux/dma-buf.h>
@ -33,31 +31,6 @@
#include "mdss_mdp.h"
#include "mdss_smmu.h"
struct mdss_iommu_map_type mdss_iommu_map[MDSS_IOMMU_MAX_DOMAIN] = {
[MDSS_IOMMU_DOMAIN_UNSECURE] = {
.client_name = "mdp_ns",
.ctx_name = "mdp_0",
.partitions = {
{
.start = SZ_128K,
.size = SZ_1G - SZ_128K,
},
},
.npartitions = 1,
},
[MDSS_IOMMU_DOMAIN_SECURE] = {
.client_name = "mdp_secure",
.ctx_name = "mdp_1",
.partitions = {
{
.start = SZ_1G,
.size = SZ_1G,
},
},
.npartitions = 1,
},
};
static int mdss_smmu_util_parse_dt_clock(struct platform_device *pdev,
struct dss_module_power *mp)
{
@ -126,48 +99,6 @@ static int mdss_smmu_clk_register(struct platform_device *pdev,
return 0;
}
/*
* mdss_smmu_attach_v1()
*
* Attaches to the SMMU domain. Attaching should be done everytime before using
* the SMMU resources.
*/
static int mdss_smmu_attach_v1(struct mdss_data_type *mdata)
{
struct iommu_domain *domain;
struct mdss_iommu_map_type *iomap;
int i, rc = 0;
for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
if (!mdss_smmu_is_valid_domain_type(mdata, i))
continue;
iomap = mdata->iommu_map + i;
domain = msm_get_iommu_domain(iomap->domain_idx);
if (!domain) {
WARN(1, "could not attach iommu client %s to ctx %s\n",
iomap->client_name, iomap->ctx_name);
continue;
}
rc = iommu_attach_device(domain, iomap->ctx);
if (rc) {
WARN(1, "mdp::iommu device attach failed rc:%d\n", rc);
for (i--; i >= 0; i--) {
if (!mdss_smmu_is_valid_domain_type(mdata, i))
continue;
iomap = mdata->iommu_map + i;
iommu_detach_device(domain, iomap->ctx);
}
goto end;
}
}
end:
return rc;
}
static int mdss_smmu_enable_power(struct dss_module_power *mp, bool enable)
{
int rc = 0;
@ -270,35 +201,6 @@ err:
return rc;
}
/*
* mdss_smmu_detach_v1()
*
* Detaches from the smmu domain. Should be done immediately after the SMMU
* resource usage, in order to save power.
*/
static int mdss_smmu_detach_v1(struct mdss_data_type *mdata)
{
struct iommu_domain *domain;
struct mdss_iommu_map_type *iomap;
int i;
for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
if (!mdss_smmu_is_valid_domain_type(mdata, i))
continue;
iomap = mdata->iommu_map + i;
domain = msm_get_iommu_domain(iomap->domain_idx);
if (!domain) {
pr_err("unable to get iommu domain(%d)\n",
iomap->domain_idx);
continue;
}
iommu_detach_device(domain, iomap->ctx);
}
return 0;
}
/*
* mdss_smmu_v2_detach()
*
@ -324,29 +226,11 @@ static int mdss_smmu_detach_v2(struct mdss_data_type *mdata)
return 0;
}
static int mdss_smmu_get_domain_id_v1(u32 type)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
return mdata->iommu_map[type].domain_idx;
}
static int mdss_smmu_get_domain_id_v2(u32 type)
{
return type;
}
/*
* mdss_smmu_dma_buf_attach_v1()
*
* Helps in attaching the dma buffer to the device. This api returns reference
* to an attachment structure, which is then used for scatterlist operations.
*/
static struct dma_buf_attachment *mdss_smmu_dma_buf_attach_v1(
struct dma_buf *dma_buf, struct device *dev, int domain)
{
return dma_buf_attach(dma_buf, dev);
}
/*
* mdss_smmu_dma_buf_attach_v2()
*
@ -365,20 +249,6 @@ static struct dma_buf_attachment *mdss_smmu_dma_buf_attach_v2(
return dma_buf_attach(dma_buf, mdss_smmu->dev);
}
/*
* mdss_smmu_map_dma_buf_v1()
*
* Maps existing buffer into the SMMU domain and sets the sets the virtual
* address in @iova
*/
static int mdss_smmu_map_dma_buf_v1(struct dma_buf *dma_buf,
struct sg_table *table, int domain, dma_addr_t *iova,
unsigned long *size, int dir)
{
return msm_map_dma_buf(dma_buf, table, mdss_smmu_get_domain_id(domain),
0, SZ_4K, 0, iova, size, 0, 0);
}
/*
* mdss_smmu_map_dma_buf_v2()
*
@ -407,12 +277,6 @@ static int mdss_smmu_map_dma_buf_v2(struct dma_buf *dma_buf,
return 0;
}
static void mdss_smmu_unmap_dma_buf_v1(struct sg_table *table, int domain,
int dir)
{
msm_unmap_dma_buf(table, mdss_smmu_get_domain_id(domain), 0);
}
static void mdss_smmu_unmap_dma_buf_v2(struct sg_table *table, int domain,
int dir)
{
@ -425,38 +289,6 @@ static void mdss_smmu_unmap_dma_buf_v2(struct sg_table *table, int domain,
dma_unmap_sg(mdss_smmu->dev, table->sgl, table->nents, dir);
}
/*
* mdss_smmu_dma_alloc_coherent_v1()
*
* This routine allocates a region of @size bytes of consistent memory. It also
* returns a dma_handle which can be used as the physical address.
* dma_alloc_coherent returns a pointer to the allocated region (@cpu_addr) in
* the processor's virtual address space. This subroutine also takes care of the
* mapping of the buffer to the SMMU domain which sets the (@iova) the virtual
* address
*/
static int mdss_smmu_dma_alloc_coherent_v1(struct device *dev, size_t size,
dma_addr_t *phys, dma_addr_t *iova, void *cpu_addr,
gfp_t gfp, int domain)
{
int ret = 0;
cpu_addr = dma_alloc_coherent(dev, size, phys, gfp);
if (!cpu_addr) {
pr_err("dma alloc coherent failed!\n");
return -ENOMEM;
}
ret = msm_iommu_map_contig_buffer(*phys,
mdss_smmu_get_domain_id(domain), 0,
size, SZ_4K, 0, iova);
if (IS_ERR_VALUE(ret)) {
pr_err("map contig buffer failed rc:%d\n", ret);
dma_free_coherent(dev, size, cpu_addr, *phys);
}
return ret;
}
/*
* mdss_smmu_dma_alloc_coherent_v2()
*
@ -484,14 +316,6 @@ static int mdss_smmu_dma_alloc_coherent_v2(struct device *dev, size_t size,
return 0;
}
static void mdss_smmu_dma_free_coherent_v1(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t phys, dma_addr_t iova, int domain)
{
msm_iommu_unmap_contig_buffer(phys, mdss_smmu_get_domain_id(domain),
0, size);
dma_free_coherent(dev, size, cpu_addr, phys);
}
static void mdss_smmu_dma_free_coherent_v2(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t phys, dma_addr_t iova, int domain)
{
@ -504,25 +328,6 @@ static void mdss_smmu_dma_free_coherent_v2(struct device *dev, size_t size,
dma_free_coherent(mdss_smmu->dev, size, cpu_addr, iova);
}
/*
* mdss_smmu_map_v1()
*
* Maps the address to the SMMU domain. Both the virtual address and the
* physical one, as well as the size of the mapping should be aligned (atleast)
* to the size of the smallest page supported by the hardware.
*/
static int mdss_smmu_map_v1(int domain, phys_addr_t iova, phys_addr_t phys,
int gfp_order, int prot)
{
struct iommu_domain *iommu_domain = msm_get_iommu_domain(
mdss_smmu_get_domain_id(domain));
if (!iommu_domain) {
pr_err("mdss iommu domain get failed in smmu map\n");
return -EINVAL;
}
return iommu_map(iommu_domain, iova, phys, gfp_order, prot);
}
/*
* mdss_smmu_map_v1()
*
@ -542,17 +347,6 @@ static int mdss_smmu_map_v2(int domain, phys_addr_t iova, phys_addr_t phys,
iova, phys, gfp_order, prot);
}
static void mdss_smmu_unmap_v1(int domain, unsigned long iova, int gfp_order)
{
struct iommu_domain *iommu_domain = msm_get_iommu_domain(
mdss_smmu_get_domain_id(domain));
if (!iommu_domain) {
pr_err("mdss iommu domain get failed in smmu unmap\n");
return;
}
iommu_unmap(iommu_domain, iova, gfp_order);
}
static void mdss_smmu_unmap_v2(int domain, unsigned long iova, int gfp_order)
{
struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
@ -564,17 +358,6 @@ static void mdss_smmu_unmap_v2(int domain, unsigned long iova, int gfp_order)
iommu_unmap(mdss_smmu->mmu_mapping->domain, iova, gfp_order);
}
/*
* mdss_smmu_dsi_alloc_buf_v1()
*
* Allocates the buffer and mapping is handled later
*/
static char *mdss_smmu_dsi_alloc_buf_v1(struct device *dev, int size,
dma_addr_t *dmap, gfp_t gfp)
{
return dma_alloc_writecombine(dev, size, dmap, GFP_KERNEL);
}
/*
* mdss_smmUdsi_alloc_buf_v2()
*
@ -586,24 +369,6 @@ static char *mdss_smmu_dsi_alloc_buf_v2(struct device *dev, int size,
return kzalloc(size, GFP_KERNEL);
}
/*
* mdss_smmu_dsi_map_buffer_v1()
*
* Maps the buffer allocated with mdss_smmu_dsi_alloc_buf_v1 to the SMMU domain
*/
static int mdss_smmu_dsi_map_buffer_v1(phys_addr_t phys, unsigned int domain,
unsigned long size, dma_addr_t *dma_addr, void *cpu_addr,
int dir)
{
msm_iommu_map_contig_buffer(phys, mdss_smmu_get_domain_id(domain), 0,
size, SZ_4K, 0, dma_addr);
if (IS_ERR_VALUE(*dma_addr)) {
pr_err("dma map contig buffer failed\n");
return -ENOMEM;
}
return 0;
}
/*
* mdss_smmu_dsi_map_buffer_v2()
*
@ -629,14 +394,6 @@ static int mdss_smmu_dsi_map_buffer_v2(phys_addr_t phys, unsigned int domain,
return 0;
}
static void mdss_smmu_dsi_unmap_buffer_v1(dma_addr_t dma_addr, int domain,
unsigned long size, int dir)
{
if (is_mdss_iommu_attached())
msm_iommu_unmap_contig_buffer(dma_addr,
mdss_smmu_get_domain_id(domain), 0, size);
}
static void mdss_smmu_dsi_unmap_buffer_v2(dma_addr_t dma_addr, int domain,
unsigned long size, int dir)
{
@ -650,33 +407,7 @@ static void mdss_smmu_dsi_unmap_buffer_v2(dma_addr_t dma_addr, int domain,
dma_unmap_single(mdss_smmu->dev, dma_addr, size, dir);
}
static void mdss_smmu_deinit_v1(struct mdss_data_type *mdata)
{
struct iommu_domain *domain;
struct mdss_iommu_map_type *iomap;
int i;
if (!mdata->iommu_map) {
pr_warn("iommu not initialized\n");
return;
}
for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
if (!mdss_smmu_is_valid_domain_type(mdata, i))
continue;
iomap = &mdss_iommu_map[i];
domain = msm_get_iommu_domain(iomap->domain_idx);
if (!domain) {
pr_err("unable to get iommu domain(%d)\n",
iomap->domain_idx);
return;
}
iomap->domain_idx = msm_unregister_domain(domain);
}
mdata->iommu_map = NULL;
}
static void mdss_smmu_deinit_v2(struct mdss_data_type *mata)
{
@ -690,80 +421,27 @@ static void mdss_smmu_deinit_v2(struct mdss_data_type *mata)
}
}
static void mdss_smmu_ops_init(struct mdss_data_type *mdata, int smmu_version)
static void mdss_smmu_ops_init(struct mdss_data_type *mdata)
{
switch (smmu_version) {
case MDSS_SMMU_V1:
mdata->smmu_ops.smmu_attach = mdss_smmu_attach_v1;
mdata->smmu_ops.smmu_detach = mdss_smmu_detach_v1;
mdata->smmu_ops.smmu_get_domain_id = mdss_smmu_get_domain_id_v1;
mdata->smmu_ops.smmu_dma_buf_attach =
mdss_smmu_dma_buf_attach_v1;
mdata->smmu_ops.smmu_map_dma_buf = mdss_smmu_map_dma_buf_v1;
mdata->smmu_ops.smmu_unmap_dma_buf = mdss_smmu_unmap_dma_buf_v1;
mdata->smmu_ops.smmu_dma_alloc_coherent =
mdss_smmu_dma_alloc_coherent_v1;
mdata->smmu_ops.smmu_dma_free_coherent =
mdss_smmu_dma_free_coherent_v1;
mdata->smmu_ops.smmu_map = mdss_smmu_map_v1;
mdata->smmu_ops.smmu_unmap = mdss_smmu_unmap_v1;
mdata->smmu_ops.smmu_dsi_alloc_buf = mdss_smmu_dsi_alloc_buf_v1;
mdata->smmu_ops.smmu_dsi_map_buffer =
mdss_smmu_dsi_map_buffer_v1;
mdata->smmu_ops.smmu_dsi_unmap_buffer =
mdss_smmu_dsi_unmap_buffer_v1;
mdata->smmu_ops.smmu_deinit = mdss_smmu_deinit_v1;
break;
case MDSS_SMMU_V2:
mdata->smmu_ops.smmu_attach = mdss_smmu_attach_v2;
mdata->smmu_ops.smmu_detach = mdss_smmu_detach_v2;
mdata->smmu_ops.smmu_get_domain_id = mdss_smmu_get_domain_id_v2;
mdata->smmu_ops.smmu_dma_buf_attach =
mdss_smmu_dma_buf_attach_v2;
mdata->smmu_ops.smmu_map_dma_buf = mdss_smmu_map_dma_buf_v2;
mdata->smmu_ops.smmu_unmap_dma_buf = mdss_smmu_unmap_dma_buf_v2;
mdata->smmu_ops.smmu_dma_alloc_coherent =
mdss_smmu_dma_alloc_coherent_v2;
mdata->smmu_ops.smmu_dma_free_coherent =
mdss_smmu_dma_free_coherent_v2;
mdata->smmu_ops.smmu_map = mdss_smmu_map_v2;
mdata->smmu_ops.smmu_unmap = mdss_smmu_unmap_v2;
mdata->smmu_ops.smmu_dsi_alloc_buf = mdss_smmu_dsi_alloc_buf_v2;
mdata->smmu_ops.smmu_dsi_map_buffer =
mdss_smmu_dsi_map_buffer_v2;
mdata->smmu_ops.smmu_dsi_unmap_buffer =
mdss_smmu_dsi_unmap_buffer_v2;
mdata->smmu_ops.smmu_deinit = mdss_smmu_deinit_v2;
break;
default:
pr_err("smmu ops init failed - invalid smmu version:%d",
smmu_version);
}
}
/*
* mdss_smmu_find_version()
* @dev: mdss_mdp device
*
* It parses through the child devices of mdss_mdp device which is passed
* to this function and finds for smmu v2 related devices. If it exists it is
* termed as MDSS_SMMU_V2 else MDSS_SMMU_V1.
*/
static int mdss_smmu_find_version(struct device *dev)
{
struct device_node *parent, *child;
int version = MDSS_SMMU_V1;
parent = dev->of_node;
for_each_child_of_node(parent, child) {
if (is_mdss_smmu_compatible_device(child->name)) {
version = MDSS_SMMU_V2;
break;
}
}
return version;
mdata->smmu_ops.smmu_attach = mdss_smmu_attach_v2;
mdata->smmu_ops.smmu_detach = mdss_smmu_detach_v2;
mdata->smmu_ops.smmu_get_domain_id = mdss_smmu_get_domain_id_v2;
mdata->smmu_ops.smmu_dma_buf_attach =
mdss_smmu_dma_buf_attach_v2;
mdata->smmu_ops.smmu_map_dma_buf = mdss_smmu_map_dma_buf_v2;
mdata->smmu_ops.smmu_unmap_dma_buf = mdss_smmu_unmap_dma_buf_v2;
mdata->smmu_ops.smmu_dma_alloc_coherent =
mdss_smmu_dma_alloc_coherent_v2;
mdata->smmu_ops.smmu_dma_free_coherent =
mdss_smmu_dma_free_coherent_v2;
mdata->smmu_ops.smmu_map = mdss_smmu_map_v2;
mdata->smmu_ops.smmu_unmap = mdss_smmu_unmap_v2;
mdata->smmu_ops.smmu_dsi_alloc_buf = mdss_smmu_dsi_alloc_buf_v2;
mdata->smmu_ops.smmu_dsi_map_buffer =
mdss_smmu_dsi_map_buffer_v2;
mdata->smmu_ops.smmu_dsi_unmap_buffer =
mdss_smmu_dsi_unmap_buffer_v2;
mdata->smmu_ops.smmu_deinit = mdss_smmu_deinit_v2;
}
/*
@ -787,56 +465,8 @@ void mdss_smmu_device_create(struct device *dev)
int mdss_smmu_init(struct mdss_data_type *mdata, struct device *dev)
{
struct msm_iova_layout layout;
struct iommu_domain *domain;
struct mdss_iommu_map_type *iomap;
int i, smmu_version;
smmu_version = mdss_smmu_find_version(dev);
if (smmu_version == MDSS_SMMU_V2) {
mdss_smmu_device_create(dev);
goto end;
}
if (mdata->iommu_map) {
pr_warn("iommu already initialized\n");
return 0;
}
for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
if (!mdss_smmu_is_valid_domain_type(mdata, i))
continue;
iomap = &mdss_iommu_map[i];
layout.client_name = iomap->client_name;
layout.partitions = iomap->partitions;
layout.npartitions = iomap->npartitions;
layout.is_secure = (i == MDSS_IOMMU_DOMAIN_SECURE);
iomap->domain_idx = msm_register_domain(&layout);
if (IS_ERR_VALUE(iomap->domain_idx))
return -EINVAL;
domain = msm_get_iommu_domain(iomap->domain_idx);
if (!domain) {
pr_err("unable to get iommu domain(%d)\n",
iomap->domain_idx);
return -EINVAL;
}
iomap->ctx = msm_iommu_get_ctx(iomap->ctx_name);
if (!iomap->ctx) {
pr_warn("unable to get iommu ctx(%s)\n",
iomap->ctx_name);
return -EINVAL;
}
}
mdata->iommu_map = mdss_iommu_map;
end:
mdss_smmu_ops_init(mdata, smmu_version);
mdss_smmu_device_create(dev);
mdss_smmu_ops_init(mdata);
return 0;
}

View file

@ -17,7 +17,6 @@
#include <linux/msm_ion.h>
#include <linux/msm_mdp.h>
#include <linux/mdss_io_util.h>
#include <linux/msm_iommu_domains.h>
#include "mdss.h"
#include "mdss_mdp.h"

View file

@ -259,6 +259,7 @@ header-y += major.h
header-y += map_to_7segment.h
header-y += matroxfb.h
header-y += mdio.h
header-y += mdss_rotator.h
header-y += media.h
header-y += media-bus-format.h
header-y += mei.h
@ -297,6 +298,8 @@ header-y += msm_audio_alac.h
header-y += msm_audio_ape.h
header-y += msm_ion.h
header-y += msm_kgsl.h
header-y += msm_mdp.h
header-y += msm_mdp_ext.h
header-y += msm_rmnet.h
header-y += mtio.h
header-y += nbd.h