drm/exynos: fixed duplicated page allocation bug.
this patch fixes that buf->pages is allocated two times when it allocates physically continuous memory region and removes unnecessary codes. Signed-off-by: Inki Dae <inki.dae@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
This commit is contained in:
parent
dcf9af8228
commit
61db75d83c
1 changed files with 9 additions and 25 deletions
|
@ -34,7 +34,7 @@
|
||||||
static int lowlevel_buffer_allocate(struct drm_device *dev,
|
static int lowlevel_buffer_allocate(struct drm_device *dev,
|
||||||
unsigned int flags, struct exynos_drm_gem_buf *buf)
|
unsigned int flags, struct exynos_drm_gem_buf *buf)
|
||||||
{
|
{
|
||||||
dma_addr_t start_addr, end_addr;
|
dma_addr_t start_addr;
|
||||||
unsigned int npages, page_size, i = 0;
|
unsigned int npages, page_size, i = 0;
|
||||||
struct scatterlist *sgl;
|
struct scatterlist *sgl;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -84,19 +84,6 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
|
||||||
goto err1;
|
goto err1;
|
||||||
}
|
}
|
||||||
|
|
||||||
start_addr = buf->dma_addr;
|
|
||||||
end_addr = buf->dma_addr + buf->size;
|
|
||||||
|
|
||||||
buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
|
|
||||||
if (!buf->pages) {
|
|
||||||
DRM_ERROR("failed to allocate pages.\n");
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto err2;
|
|
||||||
}
|
|
||||||
|
|
||||||
start_addr = buf->dma_addr;
|
|
||||||
end_addr = buf->dma_addr + buf->size;
|
|
||||||
|
|
||||||
buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
|
buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
|
||||||
if (!buf->pages) {
|
if (!buf->pages) {
|
||||||
DRM_ERROR("failed to allocate pages.\n");
|
DRM_ERROR("failed to allocate pages.\n");
|
||||||
|
@ -105,20 +92,17 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
sgl = buf->sgt->sgl;
|
sgl = buf->sgt->sgl;
|
||||||
|
start_addr = buf->dma_addr;
|
||||||
|
|
||||||
while (i < npages) {
|
while (i < npages) {
|
||||||
buf->pages[i] = phys_to_page(start_addr);
|
buf->pages[i] = phys_to_page(start_addr);
|
||||||
sg_set_page(sgl, buf->pages[i], page_size, 0);
|
sg_set_page(sgl, buf->pages[i], page_size, 0);
|
||||||
sg_dma_address(sgl) = start_addr;
|
sg_dma_address(sgl) = start_addr;
|
||||||
start_addr += page_size;
|
start_addr += page_size;
|
||||||
if (end_addr - start_addr < page_size)
|
|
||||||
break;
|
|
||||||
sgl = sg_next(sgl);
|
sgl = sg_next(sgl);
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
|
|
||||||
buf->pages[i] = phys_to_page(start_addr);
|
|
||||||
|
|
||||||
DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
|
DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
|
||||||
(unsigned long)buf->kvaddr,
|
(unsigned long)buf->kvaddr,
|
||||||
(unsigned long)buf->dma_addr,
|
(unsigned long)buf->dma_addr,
|
||||||
|
|
Loading…
Add table
Reference in a new issue