ion_page_pool: Remove prealloc tracking

A separate heap will be used for the preallocation feature.
Also remove an associated ion_buffer private flag.

Change-Id: I8df74307d4c3461198de0a58f6b8ec7c8bef1f12
Signed-off-by: Patrick Daly <pdaly@codeaurora.org>
This commit is contained in:
Patrick Daly 2016-04-14 18:17:08 -07:00
parent e94b446eac
commit 66e96d0834
5 changed files with 15 additions and 63 deletions

View file

@ -54,8 +54,7 @@ static void ion_page_pool_free_pages(struct ion_page_pool *pool,
__free_pages(page, pool->order);
}
static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page,
bool prefetch)
static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
{
mutex_lock(&pool->mutex);
if (PageHighMem(page)) {
@ -65,15 +64,11 @@ static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page,
list_add_tail(&page->lru, &pool->low_items);
pool->low_count++;
}
if (!prefetch)
pool->nr_unreserved++;
mutex_unlock(&pool->mutex);
return 0;
}
static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high,
bool prefetch)
static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
{
struct page *page;
@ -87,13 +82,6 @@ static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high,
pool->low_count--;
}
if (prefetch) {
BUG_ON(!pool->nr_unreserved);
pool->nr_unreserved--;
}
pool->nr_unreserved = min_t(int, pool->high_count + pool->low_count,
pool->nr_unreserved);
list_del(&page->lru);
return page;
}
@ -108,9 +96,9 @@ void *ion_page_pool_alloc(struct ion_page_pool *pool, bool *from_pool)
if (mutex_trylock(&pool->mutex)) {
if (pool->high_count)
page = ion_page_pool_remove(pool, true, false);
page = ion_page_pool_remove(pool, true);
else if (pool->low_count)
page = ion_page_pool_remove(pool, false, false);
page = ion_page_pool_remove(pool, false);
mutex_unlock(&pool->mutex);
}
if (!page) {
@ -120,27 +108,6 @@ void *ion_page_pool_alloc(struct ion_page_pool *pool, bool *from_pool)
return page;
}
void *ion_page_pool_prefetch(struct ion_page_pool *pool, bool *from_pool)
{
struct page *page = NULL;
BUG_ON(!pool);
*from_pool = true;
if (mutex_trylock(&pool->mutex)) {
if (pool->high_count && pool->nr_unreserved > 0)
page = ion_page_pool_remove(pool, true, true);
else if (pool->low_count && pool->nr_unreserved > 0)
page = ion_page_pool_remove(pool, false, true);
mutex_unlock(&pool->mutex);
}
if (!page) {
page = ion_page_pool_alloc_pages(pool);
*from_pool = false;
}
return page;
}
/*
* Tries to allocate from only the specified Pool and returns NULL otherwise
*/
@ -152,24 +119,22 @@ void *ion_page_pool_alloc_pool_only(struct ion_page_pool *pool)
if (mutex_trylock(&pool->mutex)) {
if (pool->high_count)
page = ion_page_pool_remove(pool, true, false);
page = ion_page_pool_remove(pool, true);
else if (pool->low_count)
page = ion_page_pool_remove(pool, false, false);
page = ion_page_pool_remove(pool, false);
mutex_unlock(&pool->mutex);
}
return page;
}
void ion_page_pool_free(struct ion_page_pool *pool, struct page *page,
bool prefetch)
void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
{
int ret;
BUG_ON(pool->order != compound_order(page));
ret = ion_page_pool_add(pool, page, prefetch);
/* FIXME? For a secure page, not hyp unassigned in this err path */
ret = ion_page_pool_add(pool, page);
if (ret)
ion_page_pool_free_pages(pool, page);
}
@ -208,9 +173,9 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
mutex_lock(&pool->mutex);
if (pool->low_count) {
page = ion_page_pool_remove(pool, false, false);
page = ion_page_pool_remove(pool, false);
} else if (high && pool->high_count) {
page = ion_page_pool_remove(pool, true, false);
page = ion_page_pool_remove(pool, true);
} else {
mutex_unlock(&pool->mutex);
break;
@ -233,7 +198,6 @@ struct ion_page_pool *ion_page_pool_create(struct device *dev, gfp_t gfp_mask,
pool->dev = dev;
pool->high_count = 0;
pool->low_count = 0;
pool->nr_unreserved = 0;
INIT_LIST_HEAD(&pool->low_items);
INIT_LIST_HEAD(&pool->high_items);
pool->gfp_mask = gfp_mask | __GFP_COMP;

View file

@ -415,8 +415,6 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
* struct ion_page_pool - pagepool struct
* @high_count: number of highmem items in the pool
* @low_count: number of lowmem items in the pool
* @nr_unreserved: number of items in the pool which have not been reserved
* by a prefetch allocation
* @high_items: list of highmem items
* @low_items: list of lowmem items
* @mutex: lock protecting this struct and especially the count
@ -433,7 +431,6 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
struct ion_page_pool {
int high_count;
int low_count;
int nr_unreserved;
struct list_head high_items;
struct list_head low_items;
struct mutex mutex;
@ -448,10 +445,9 @@ struct ion_page_pool *ion_page_pool_create(struct device *dev, gfp_t gfp_mask,
void ion_page_pool_destroy(struct ion_page_pool *);
void *ion_page_pool_alloc(struct ion_page_pool *, bool *from_pool);
void *ion_page_pool_alloc_pool_only(struct ion_page_pool *);
void ion_page_pool_free(struct ion_page_pool *, struct page *, bool prefetch);
void ion_page_pool_free(struct ion_page_pool *, struct page *);
void ion_page_pool_free_immediate(struct ion_page_pool *, struct page *);
int ion_page_pool_total(struct ion_page_pool *pool, bool high);
void *ion_page_pool_prefetch(struct ion_page_pool *pool, bool *from_pool);
#ifdef CONFIG_ION_POOL_CACHE_POLICY
static inline void ion_page_pool_alloc_set_cache_policy

View file

@ -78,7 +78,6 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
bool *from_pool)
{
bool cached = ion_buffer_cached(buffer);
bool prefetch = buffer->flags & ION_FLAG_POOL_PREFETCH;
struct page *page;
struct ion_page_pool *pool;
int vmid = get_secure_vmid(buffer->flags);
@ -92,10 +91,7 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
else
pool = heap->cached_pools[order_to_index(order)];
if (prefetch)
page = ion_page_pool_prefetch(pool, from_pool);
else
page = ion_page_pool_alloc(pool, from_pool);
page = ion_page_pool_alloc(pool, from_pool);
} else {
gfp_t gfp_mask = low_order_gfp_flags;
if (order)
@ -119,7 +115,6 @@ static void free_buffer_page(struct ion_system_heap *heap,
unsigned int order)
{
bool cached = ion_buffer_cached(buffer);
bool prefetch = buffer->flags & ION_FLAG_POOL_PREFETCH;
int vmid = get_secure_vmid(buffer->flags);
if (!(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC)) {
@ -134,7 +129,7 @@ static void free_buffer_page(struct ion_system_heap *heap,
if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)
ion_page_pool_free_immediate(pool, page);
else
ion_page_pool_free(pool, page, prefetch);
ion_page_pool_free(pool, page);
} else {
__free_pages(page, order);
}
@ -452,7 +447,7 @@ out1:
/* Restore pages to secure pool */
list_for_each_entry_safe(page, tmp, &pages, lru) {
list_del(&page->lru);
ion_page_pool_free(pool, page, false);
ion_page_pool_free(pool, page);
}
return 0;
out2:

View file

@ -151,8 +151,7 @@ static void ion_system_secure_heap_prefetch_work(struct work_struct *work)
/* buffer->heap used by free() */
buffer->heap = &secure_heap->heap;
buffer->flags = ION_FLAG_POOL_PREFETCH;
buffer->flags |= vmid_flags;
buffer->flags = vmid_flags;
ret = sys_heap->ops->allocate(sys_heap, buffer, size,
PAGE_SIZE, 0);
if (ret) {

View file

@ -110,8 +110,6 @@ enum cp_mem_usage {
*/
#define ION_FLAG_POOL_FORCE_ALLOC (1 << 16)
#define ION_FLAG_POOL_PREFETCH (1 << 27)
/**
* Deprecated! Please use the corresponding ION_FLAG_*
*/