gpu: ion: Refactor common mapping functions out of system heap
The system heap contained several general purpose functions to map buffers to the kernel and userspace. This patch refactors those into ion_heap.c so they can be used by other heaps. Signed-off-by: Rebecca Schultz Zavin <rebecca@android.com> [jstultz: modified patch to apply to staging directory] Signed-off-by: John Stultz <john.stultz@linaro.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
cd69488c7b
commit
8898227ed5
3 changed files with 91 additions and 78 deletions
|
@ -15,9 +15,84 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
|
#include <linux/mm.h>
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
|
#include <linux/vmalloc.h>
|
||||||
#include "ion.h"
|
#include "ion.h"
|
||||||
#include "ion_priv.h"
|
#include "ion_priv.h"
|
||||||
|
|
||||||
|
void *ion_heap_map_kernel(struct ion_heap *heap,
|
||||||
|
struct ion_buffer *buffer)
|
||||||
|
{
|
||||||
|
struct scatterlist *sg;
|
||||||
|
int i, j;
|
||||||
|
void *vaddr;
|
||||||
|
pgprot_t pgprot;
|
||||||
|
struct sg_table *table = buffer->sg_table;
|
||||||
|
int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
|
||||||
|
struct page **pages = vmalloc(sizeof(struct page *) * npages);
|
||||||
|
struct page **tmp = pages;
|
||||||
|
|
||||||
|
if (!pages)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (buffer->flags & ION_FLAG_CACHED)
|
||||||
|
pgprot = PAGE_KERNEL;
|
||||||
|
else
|
||||||
|
pgprot = pgprot_writecombine(PAGE_KERNEL);
|
||||||
|
|
||||||
|
for_each_sg(table->sgl, sg, table->nents, i) {
|
||||||
|
int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
|
||||||
|
struct page *page = sg_page(sg);
|
||||||
|
BUG_ON(i >= npages);
|
||||||
|
for (j = 0; j < npages_this_entry; j++) {
|
||||||
|
*(tmp++) = page++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
vaddr = vmap(pages, npages, VM_MAP, pgprot);
|
||||||
|
vfree(pages);
|
||||||
|
|
||||||
|
return vaddr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ion_heap_unmap_kernel(struct ion_heap *heap,
|
||||||
|
struct ion_buffer *buffer)
|
||||||
|
{
|
||||||
|
vunmap(buffer->vaddr);
|
||||||
|
}
|
||||||
|
|
||||||
|
int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
|
||||||
|
struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
struct sg_table *table = buffer->sg_table;
|
||||||
|
unsigned long addr = vma->vm_start;
|
||||||
|
unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
|
||||||
|
struct scatterlist *sg;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for_each_sg(table->sgl, sg, table->nents, i) {
|
||||||
|
struct page *page = sg_page(sg);
|
||||||
|
unsigned long remainder = vma->vm_end - addr;
|
||||||
|
unsigned long len = sg_dma_len(sg);
|
||||||
|
|
||||||
|
if (offset >= sg_dma_len(sg)) {
|
||||||
|
offset -= sg_dma_len(sg);
|
||||||
|
continue;
|
||||||
|
} else if (offset) {
|
||||||
|
page += offset / PAGE_SIZE;
|
||||||
|
len = sg_dma_len(sg) - offset;
|
||||||
|
offset = 0;
|
||||||
|
}
|
||||||
|
len = min(len, remainder);
|
||||||
|
remap_pfn_range(vma, addr, page_to_pfn(page), len,
|
||||||
|
vma->vm_page_prot);
|
||||||
|
addr += len;
|
||||||
|
if (addr >= vma->vm_end)
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
|
struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
|
||||||
{
|
{
|
||||||
struct ion_heap *heap = NULL;
|
struct ion_heap *heap = NULL;
|
||||||
|
|
|
@ -177,6 +177,16 @@ void ion_device_destroy(struct ion_device *dev);
|
||||||
*/
|
*/
|
||||||
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
|
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* some helpers for common operations on buffers using the sg_table
|
||||||
|
* and vaddr fields
|
||||||
|
*/
|
||||||
|
void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *);
|
||||||
|
void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
|
||||||
|
int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
|
||||||
|
struct vm_area_struct *);
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* functions for creating and destroying the built in ion heaps.
|
* functions for creating and destroying the built in ion heaps.
|
||||||
* architectures can add their own custom architecture specific
|
* architectures can add their own custom architecture specific
|
||||||
|
|
|
@ -224,7 +224,7 @@ void ion_system_heap_free(struct ion_buffer *buffer)
|
||||||
struct ion_system_heap *sys_heap = container_of(heap,
|
struct ion_system_heap *sys_heap = container_of(heap,
|
||||||
struct ion_system_heap,
|
struct ion_system_heap,
|
||||||
heap);
|
heap);
|
||||||
struct sg_table *table = buffer->priv_virt;
|
struct sg_table *table = buffer->sg_table;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
LIST_HEAD(pages);
|
LIST_HEAD(pages);
|
||||||
int i;
|
int i;
|
||||||
|
@ -247,86 +247,14 @@ void ion_system_heap_unmap_dma(struct ion_heap *heap,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *ion_system_heap_map_kernel(struct ion_heap *heap,
|
|
||||||
struct ion_buffer *buffer)
|
|
||||||
{
|
|
||||||
struct scatterlist *sg;
|
|
||||||
int i, j;
|
|
||||||
void *vaddr;
|
|
||||||
pgprot_t pgprot;
|
|
||||||
struct sg_table *table = buffer->priv_virt;
|
|
||||||
int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
|
|
||||||
struct page **pages = vmalloc(sizeof(struct page *) * npages);
|
|
||||||
struct page **tmp = pages;
|
|
||||||
|
|
||||||
if (!pages)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (buffer->flags & ION_FLAG_CACHED)
|
|
||||||
pgprot = PAGE_KERNEL;
|
|
||||||
else
|
|
||||||
pgprot = pgprot_writecombine(PAGE_KERNEL);
|
|
||||||
|
|
||||||
for_each_sg(table->sgl, sg, table->nents, i) {
|
|
||||||
int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
|
|
||||||
struct page *page = sg_page(sg);
|
|
||||||
BUG_ON(i >= npages);
|
|
||||||
for (j = 0; j < npages_this_entry; j++) {
|
|
||||||
*(tmp++) = page++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
vaddr = vmap(pages, npages, VM_MAP, pgprot);
|
|
||||||
vfree(pages);
|
|
||||||
|
|
||||||
return vaddr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ion_system_heap_unmap_kernel(struct ion_heap *heap,
|
|
||||||
struct ion_buffer *buffer)
|
|
||||||
{
|
|
||||||
vunmap(buffer->vaddr);
|
|
||||||
}
|
|
||||||
|
|
||||||
int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
|
|
||||||
struct vm_area_struct *vma)
|
|
||||||
{
|
|
||||||
struct sg_table *table = buffer->priv_virt;
|
|
||||||
unsigned long addr = vma->vm_start;
|
|
||||||
unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
|
|
||||||
struct scatterlist *sg;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for_each_sg(table->sgl, sg, table->nents, i) {
|
|
||||||
struct page *page = sg_page(sg);
|
|
||||||
unsigned long remainder = vma->vm_end - addr;
|
|
||||||
unsigned long len = sg_dma_len(sg);
|
|
||||||
|
|
||||||
if (offset >= sg_dma_len(sg)) {
|
|
||||||
offset -= sg_dma_len(sg);
|
|
||||||
continue;
|
|
||||||
} else if (offset) {
|
|
||||||
page += offset / PAGE_SIZE;
|
|
||||||
len = sg_dma_len(sg) - offset;
|
|
||||||
offset = 0;
|
|
||||||
}
|
|
||||||
len = min(len, remainder);
|
|
||||||
remap_pfn_range(vma, addr, page_to_pfn(page), len,
|
|
||||||
vma->vm_page_prot);
|
|
||||||
addr += len;
|
|
||||||
if (addr >= vma->vm_end)
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct ion_heap_ops system_heap_ops = {
|
static struct ion_heap_ops system_heap_ops = {
|
||||||
.allocate = ion_system_heap_allocate,
|
.allocate = ion_system_heap_allocate,
|
||||||
.free = ion_system_heap_free,
|
.free = ion_system_heap_free,
|
||||||
.map_dma = ion_system_heap_map_dma,
|
.map_dma = ion_system_heap_map_dma,
|
||||||
.unmap_dma = ion_system_heap_unmap_dma,
|
.unmap_dma = ion_system_heap_unmap_dma,
|
||||||
.map_kernel = ion_system_heap_map_kernel,
|
.map_kernel = ion_heap_map_kernel,
|
||||||
.unmap_kernel = ion_system_heap_unmap_kernel,
|
.unmap_kernel = ion_heap_unmap_kernel,
|
||||||
.map_user = ion_system_heap_map_user,
|
.map_user = ion_heap_map_user,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
|
static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
|
||||||
|
@ -468,8 +396,8 @@ static struct ion_heap_ops kmalloc_ops = {
|
||||||
.phys = ion_system_contig_heap_phys,
|
.phys = ion_system_contig_heap_phys,
|
||||||
.map_dma = ion_system_contig_heap_map_dma,
|
.map_dma = ion_system_contig_heap_map_dma,
|
||||||
.unmap_dma = ion_system_contig_heap_unmap_dma,
|
.unmap_dma = ion_system_contig_heap_unmap_dma,
|
||||||
.map_kernel = ion_system_heap_map_kernel,
|
.map_kernel = ion_heap_map_kernel,
|
||||||
.unmap_kernel = ion_system_heap_unmap_kernel,
|
.unmap_kernel = ion_heap_unmap_kernel,
|
||||||
.map_user = ion_system_contig_heap_map_user,
|
.map_user = ion_system_contig_heap_map_user,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue