Merge "mm: zbud: fix the locking scenarios with zcache"

This commit is contained in:
Linux Build Service Account 2016-08-29 00:49:23 -07:00 committed by Gerrit - the friendly Code Review server
commit 105d1fddef

View file

@ -357,6 +357,7 @@ int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
struct zbud_header *zhdr = NULL; struct zbud_header *zhdr = NULL;
enum buddy bud; enum buddy bud;
struct page *page; struct page *page;
unsigned long flags;
int found = 0; int found = 0;
if (!size || (gfp & __GFP_HIGHMEM)) if (!size || (gfp & __GFP_HIGHMEM))
@ -364,7 +365,7 @@ int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE) if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
return -ENOSPC; return -ENOSPC;
chunks = size_to_chunks(size); chunks = size_to_chunks(size);
spin_lock_bh(&pool->lock); spin_lock_irqsave(&pool->lock, flags);
/* First, try to find an unbuddied zbud page. */ /* First, try to find an unbuddied zbud page. */
zhdr = NULL; zhdr = NULL;
@ -383,11 +384,11 @@ int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
} }
/* Couldn't find unbuddied zbud page, create new one */ /* Couldn't find unbuddied zbud page, create new one */
spin_unlock_bh(&pool->lock); spin_unlock_irqrestore(&pool->lock, flags);
page = alloc_page(gfp); page = alloc_page(gfp);
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
spin_lock_bh(&pool->lock); spin_lock_irqsave(&pool->lock, flags);
pool->pages_nr++; pool->pages_nr++;
zhdr = init_zbud_page(page); zhdr = init_zbud_page(page);
bud = FIRST; bud = FIRST;
@ -415,7 +416,7 @@ found:
*handle = encode_handle(zhdr, bud); *handle = encode_handle(zhdr, bud);
if ((gfp & __GFP_ZERO) && found) if ((gfp & __GFP_ZERO) && found)
memset((void *)*handle, 0, size); memset((void *)*handle, 0, size);
spin_unlock_bh(&pool->lock); spin_unlock_irqrestore(&pool->lock, flags);
return 0; return 0;
} }
@ -434,8 +435,9 @@ void zbud_free(struct zbud_pool *pool, unsigned long handle)
{ {
struct zbud_header *zhdr; struct zbud_header *zhdr;
int freechunks; int freechunks;
unsigned long flags;
spin_lock_bh(&pool->lock); spin_lock_irqsave(&pool->lock, flags);
zhdr = handle_to_zbud_header(handle); zhdr = handle_to_zbud_header(handle);
/* If first buddy, handle will be page aligned */ /* If first buddy, handle will be page aligned */
@ -446,7 +448,7 @@ void zbud_free(struct zbud_pool *pool, unsigned long handle)
if (zhdr->under_reclaim) { if (zhdr->under_reclaim) {
/* zbud page is under reclaim, reclaim will free */ /* zbud page is under reclaim, reclaim will free */
spin_unlock_bh(&pool->lock); spin_unlock_irqrestore(&pool->lock, flags);
return; return;
} }
@ -464,7 +466,7 @@ void zbud_free(struct zbud_pool *pool, unsigned long handle)
list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
} }
spin_unlock_bh(&pool->lock); spin_unlock_irqrestore(&pool->lock, flags);
} }
#define list_tail_entry(ptr, type, member) \ #define list_tail_entry(ptr, type, member) \
@ -509,12 +511,13 @@ int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries)
{ {
int i, ret, freechunks; int i, ret, freechunks;
struct zbud_header *zhdr; struct zbud_header *zhdr;
unsigned long flags;
unsigned long first_handle = 0, last_handle = 0; unsigned long first_handle = 0, last_handle = 0;
spin_lock_bh(&pool->lock); spin_lock_irqsave(&pool->lock, flags);
if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) || if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) ||
retries == 0) { retries == 0) {
spin_unlock_bh(&pool->lock); spin_unlock_irqrestore(&pool->lock, flags);
return -EINVAL; return -EINVAL;
} }
for (i = 0; i < retries; i++) { for (i = 0; i < retries; i++) {
@ -533,7 +536,7 @@ int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries)
first_handle = encode_handle(zhdr, FIRST); first_handle = encode_handle(zhdr, FIRST);
if (zhdr->last_chunks) if (zhdr->last_chunks)
last_handle = encode_handle(zhdr, LAST); last_handle = encode_handle(zhdr, LAST);
spin_unlock_bh(&pool->lock); spin_unlock_irqrestore(&pool->lock, flags);
/* Issue the eviction callback(s) */ /* Issue the eviction callback(s) */
if (first_handle) { if (first_handle) {
@ -547,7 +550,7 @@ int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries)
goto next; goto next;
} }
next: next:
spin_lock_bh(&pool->lock); spin_lock_irqsave(&pool->lock, flags);
zhdr->under_reclaim = false; zhdr->under_reclaim = false;
if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
/* /*
@ -556,7 +559,7 @@ next:
*/ */
free_zbud_page(zhdr); free_zbud_page(zhdr);
pool->pages_nr--; pool->pages_nr--;
spin_unlock_bh(&pool->lock); spin_unlock_irqrestore(&pool->lock, flags);
return 0; return 0;
} else if (zhdr->first_chunks == 0 || } else if (zhdr->first_chunks == 0 ||
zhdr->last_chunks == 0) { zhdr->last_chunks == 0) {
@ -571,7 +574,7 @@ next:
/* add to beginning of LRU */ /* add to beginning of LRU */
list_add(&zhdr->lru, &pool->lru); list_add(&zhdr->lru, &pool->lru);
} }
spin_unlock_bh(&pool->lock); spin_unlock_irqrestore(&pool->lock, flags);
return -EAGAIN; return -EAGAIN;
} }