ion: fix sparse warnings
Fix sparse warnings in ion. Signed-off-by: Colin Cross <ccross@android.com> Signed-off-by: John Stultz <john.stultz@linaro.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
5c6a470557
commit
f63958d80c
7 changed files with 37 additions and 38 deletions
|
@ -669,7 +669,7 @@ static int ion_debug_client_show(struct seq_file *s, void *unused)
|
||||||
struct ion_client *client = s->private;
|
struct ion_client *client = s->private;
|
||||||
struct rb_node *n;
|
struct rb_node *n;
|
||||||
size_t sizes[ION_NUM_HEAP_IDS] = {0};
|
size_t sizes[ION_NUM_HEAP_IDS] = {0};
|
||||||
const char *names[ION_NUM_HEAP_IDS] = {0};
|
const char *names[ION_NUM_HEAP_IDS] = {NULL};
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
mutex_lock(&client->lock);
|
mutex_lock(&client->lock);
|
||||||
|
@ -887,7 +887,7 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
|
||||||
mutex_unlock(&buffer->lock);
|
mutex_unlock(&buffer->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||||
{
|
{
|
||||||
struct ion_buffer *buffer = vma->vm_private_data;
|
struct ion_buffer *buffer = vma->vm_private_data;
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
|
@ -939,7 +939,7 @@ static void ion_vm_close(struct vm_area_struct *vma)
|
||||||
mutex_unlock(&buffer->lock);
|
mutex_unlock(&buffer->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct vm_operations_struct ion_vma_ops = {
|
static struct vm_operations_struct ion_vma_ops = {
|
||||||
.open = ion_vm_open,
|
.open = ion_vm_open,
|
||||||
.close = ion_vm_close,
|
.close = ion_vm_close,
|
||||||
.fault = ion_vm_fault,
|
.fault = ion_vm_fault,
|
||||||
|
@ -1030,7 +1030,7 @@ static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
|
||||||
mutex_unlock(&buffer->lock);
|
mutex_unlock(&buffer->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct dma_buf_ops dma_buf_ops = {
|
static struct dma_buf_ops dma_buf_ops = {
|
||||||
.map_dma_buf = ion_map_dma_buf,
|
.map_dma_buf = ion_map_dma_buf,
|
||||||
.unmap_dma_buf = ion_unmap_dma_buf,
|
.unmap_dma_buf = ion_unmap_dma_buf,
|
||||||
.mmap = ion_mmap,
|
.mmap = ion_mmap,
|
||||||
|
|
|
@ -85,8 +85,8 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer)
|
||||||
buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
|
buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
|
static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
|
||||||
struct ion_buffer *buffer)
|
struct ion_buffer *buffer)
|
||||||
{
|
{
|
||||||
struct sg_table *table;
|
struct sg_table *table;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -104,8 +104,8 @@ struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
|
||||||
return table;
|
return table;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
|
static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
|
||||||
struct ion_buffer *buffer)
|
struct ion_buffer *buffer)
|
||||||
{
|
{
|
||||||
sg_free_table(buffer->sg_table);
|
sg_free_table(buffer->sg_table);
|
||||||
}
|
}
|
||||||
|
|
|
@ -115,14 +115,14 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
|
||||||
kfree(table);
|
kfree(table);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
|
static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
|
||||||
struct ion_buffer *buffer)
|
struct ion_buffer *buffer)
|
||||||
{
|
{
|
||||||
return buffer->priv_virt;
|
return buffer->priv_virt;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
|
static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
|
||||||
struct ion_buffer *buffer)
|
struct ion_buffer *buffer)
|
||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,8 +44,8 @@ struct ion_cma_buffer_info {
|
||||||
* This function could be replaced by dma_common_get_sgtable
|
* This function could be replaced by dma_common_get_sgtable
|
||||||
* as soon as it will avalaible.
|
* as soon as it will avalaible.
|
||||||
*/
|
*/
|
||||||
int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||||
void *cpu_addr, dma_addr_t handle, size_t size)
|
void *cpu_addr, dma_addr_t handle, size_t size)
|
||||||
{
|
{
|
||||||
struct page *page = virt_to_page(cpu_addr);
|
struct page *page = virt_to_page(cpu_addr);
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -137,16 +137,16 @@ static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
|
static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
|
||||||
struct ion_buffer *buffer)
|
struct ion_buffer *buffer)
|
||||||
{
|
{
|
||||||
struct ion_cma_buffer_info *info = buffer->priv_virt;
|
struct ion_cma_buffer_info *info = buffer->priv_virt;
|
||||||
|
|
||||||
return info->table;
|
return info->table;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ion_cma_heap_unmap_dma(struct ion_heap *heap,
|
static void ion_cma_heap_unmap_dma(struct ion_heap *heap,
|
||||||
struct ion_buffer *buffer)
|
struct ion_buffer *buffer)
|
||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -162,7 +162,8 @@ static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
|
||||||
buffer->size);
|
buffer->size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *ion_cma_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
|
static void *ion_cma_map_kernel(struct ion_heap *heap,
|
||||||
|
struct ion_buffer *buffer)
|
||||||
{
|
{
|
||||||
struct ion_cma_buffer_info *info = buffer->priv_virt;
|
struct ion_cma_buffer_info *info = buffer->priv_virt;
|
||||||
/* kernel memory mapping has been done at allocation time */
|
/* kernel memory mapping has been done at allocation time */
|
||||||
|
|
|
@ -38,7 +38,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
|
||||||
struct page **tmp = pages;
|
struct page **tmp = pages;
|
||||||
|
|
||||||
if (!pages)
|
if (!pages)
|
||||||
return 0;
|
return NULL;
|
||||||
|
|
||||||
if (buffer->flags & ION_FLAG_CACHED)
|
if (buffer->flags & ION_FLAG_CACHED)
|
||||||
pgprot = PAGE_KERNEL;
|
pgprot = PAGE_KERNEL;
|
||||||
|
@ -193,7 +193,7 @@ size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
|
||||||
return total_drained;
|
return total_drained;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ion_heap_deferred_free(void *data)
|
static int ion_heap_deferred_free(void *data)
|
||||||
{
|
{
|
||||||
struct ion_heap *heap = data;
|
struct ion_heap *heap = data;
|
||||||
|
|
||||||
|
|
|
@ -134,7 +134,7 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
|
||||||
int i;
|
int i;
|
||||||
bool high;
|
bool high;
|
||||||
|
|
||||||
high = gfp_mask & __GFP_HIGHMEM;
|
high = !!(gfp_mask & __GFP_HIGHMEM);
|
||||||
|
|
||||||
if (nr_to_scan == 0)
|
if (nr_to_scan == 0)
|
||||||
return ion_page_pool_total(pool, high);
|
return ion_page_pool_total(pool, high);
|
||||||
|
|
|
@ -26,11 +26,9 @@
|
||||||
#include "ion.h"
|
#include "ion.h"
|
||||||
#include "ion_priv.h"
|
#include "ion_priv.h"
|
||||||
|
|
||||||
static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
|
static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
|
||||||
__GFP_NOWARN | __GFP_NORETRY) &
|
__GFP_NORETRY) & ~__GFP_WAIT;
|
||||||
~__GFP_WAIT;
|
static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
|
||||||
static unsigned int low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
|
|
||||||
__GFP_NOWARN);
|
|
||||||
static const unsigned int orders[] = {8, 4, 0};
|
static const unsigned int orders[] = {8, 4, 0};
|
||||||
static const int num_orders = ARRAY_SIZE(orders);
|
static const int num_orders = ARRAY_SIZE(orders);
|
||||||
static int order_to_index(unsigned int order)
|
static int order_to_index(unsigned int order)
|
||||||
|
@ -76,12 +74,12 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
|
||||||
gfp_flags = high_order_gfp_flags;
|
gfp_flags = high_order_gfp_flags;
|
||||||
page = alloc_pages(gfp_flags, order);
|
page = alloc_pages(gfp_flags, order);
|
||||||
if (!page)
|
if (!page)
|
||||||
return 0;
|
return NULL;
|
||||||
ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
|
ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
|
||||||
DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
}
|
}
|
||||||
if (!page)
|
if (!page)
|
||||||
return 0;
|
return NULL;
|
||||||
|
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
@ -187,7 +185,7 @@ err:
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ion_system_heap_free(struct ion_buffer *buffer)
|
static void ion_system_heap_free(struct ion_buffer *buffer)
|
||||||
{
|
{
|
||||||
struct ion_heap *heap = buffer->heap;
|
struct ion_heap *heap = buffer->heap;
|
||||||
struct ion_system_heap *sys_heap = container_of(heap,
|
struct ion_system_heap *sys_heap = container_of(heap,
|
||||||
|
@ -211,14 +209,14 @@ void ion_system_heap_free(struct ion_buffer *buffer)
|
||||||
kfree(table);
|
kfree(table);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
|
static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
|
||||||
struct ion_buffer *buffer)
|
struct ion_buffer *buffer)
|
||||||
{
|
{
|
||||||
return buffer->priv_virt;
|
return buffer->priv_virt;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ion_system_heap_unmap_dma(struct ion_heap *heap,
|
static void ion_system_heap_unmap_dma(struct ion_heap *heap,
|
||||||
struct ion_buffer *buffer)
|
struct ion_buffer *buffer)
|
||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -403,7 +401,7 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ion_system_contig_heap_free(struct ion_buffer *buffer)
|
static void ion_system_contig_heap_free(struct ion_buffer *buffer)
|
||||||
{
|
{
|
||||||
struct sg_table *table = buffer->priv_virt;
|
struct sg_table *table = buffer->priv_virt;
|
||||||
struct page *page = sg_page(table->sgl);
|
struct page *page = sg_page(table->sgl);
|
||||||
|
@ -427,14 +425,14 @@ static int ion_system_contig_heap_phys(struct ion_heap *heap,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
|
static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
|
||||||
struct ion_buffer *buffer)
|
struct ion_buffer *buffer)
|
||||||
{
|
{
|
||||||
return buffer->priv_virt;
|
return buffer->priv_virt;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
|
static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
|
||||||
struct ion_buffer *buffer)
|
struct ion_buffer *buffer)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue