ion: system_secure_heap: Increase secure pool size by amount requested

Allocating an amount from the secure pool, and then freeing that amount
back to the secure pool will not necessarily increase the pool size. Fix
this by allocating from the nonsecure pool, and then freeing to the secure
pool.

Change-Id: I1da12d5c8f9e1f0330cb2c4ff77cd73521df46d9
Signed-off-by: Patrick Daly <pdaly@codeaurora.org>
This commit is contained in:
Patrick Daly 2016-05-16 19:25:31 -07:00
parent 008f057bba
commit 8b39dc7879

View file

@ -124,6 +124,43 @@ static int ion_system_secure_heap_allocate(struct ion_heap *heap,
return ret; return ret;
} }
static void process_one_prefetch(struct ion_heap *sys_heap,
struct prefetch_info *info)
{
struct ion_buffer buffer;
struct sg_table *sg_table;
int ret;
buffer.heap = sys_heap;
buffer.flags = 0;
ret = sys_heap->ops->allocate(sys_heap, &buffer, info->size,
PAGE_SIZE, buffer.flags);
if (ret) {
pr_debug("%s: Failed to prefetch 0x%zx, ret = %d\n",
__func__, info->size, ret);
return;
}
sg_table = sys_heap->ops->map_dma(sys_heap, &buffer);
if (IS_ERR_OR_NULL(sg_table))
goto out;
ret = ion_system_secure_heap_assign_sg(sg_table,
get_secure_vmid(info->vmid));
if (ret)
goto unmap;
/* Now free it to the secure heap */
buffer.heap = sys_heap;
buffer.flags = info->vmid;
unmap:
sys_heap->ops->unmap_dma(sys_heap, &buffer);
out:
sys_heap->ops->free(&buffer);
}
static void ion_system_secure_heap_prefetch_work(struct work_struct *work) static void ion_system_secure_heap_prefetch_work(struct work_struct *work)
{ {
struct ion_system_secure_heap *secure_heap = container_of(work, struct ion_system_secure_heap *secure_heap = container_of(work,
@ -131,42 +168,20 @@ static void ion_system_secure_heap_prefetch_work(struct work_struct *work)
prefetch_work); prefetch_work);
struct ion_heap *sys_heap = secure_heap->sys_heap; struct ion_heap *sys_heap = secure_heap->sys_heap;
struct prefetch_info *info, *tmp; struct prefetch_info *info, *tmp;
unsigned long flags, size; unsigned long flags;
struct ion_buffer *buffer;
int ret;
int vmid_flags;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
return;
spin_lock_irqsave(&secure_heap->work_lock, flags); spin_lock_irqsave(&secure_heap->work_lock, flags);
list_for_each_entry_safe(info, tmp, list_for_each_entry_safe(info, tmp,
&secure_heap->prefetch_list, list) { &secure_heap->prefetch_list, list) {
list_del(&info->list); list_del(&info->list);
spin_unlock_irqrestore(&secure_heap->work_lock, flags); spin_unlock_irqrestore(&secure_heap->work_lock, flags);
size = info->size;
vmid_flags = info->vmid; process_one_prefetch(sys_heap, info);
kfree(info); kfree(info);
/* buffer->heap used by free() */
buffer->heap = &secure_heap->heap;
buffer->flags = vmid_flags;
ret = sys_heap->ops->allocate(sys_heap, buffer, size,
PAGE_SIZE, 0);
if (ret) {
pr_debug("%s: Failed to get %zx allocation for %s, ret = %d\n",
__func__, info->size, secure_heap->heap.name,
ret);
spin_lock_irqsave(&secure_heap->work_lock, flags);
continue;
}
ion_system_secure_heap_free(buffer);
spin_lock_irqsave(&secure_heap->work_lock, flags); spin_lock_irqsave(&secure_heap->work_lock, flags);
} }
spin_unlock_irqrestore(&secure_heap->work_lock, flags); spin_unlock_irqrestore(&secure_heap->work_lock, flags);
kfree(buffer);
} }
static int alloc_prefetch_info( static int alloc_prefetch_info(