BACKPORT: zsmalloc: require GFP in zs_malloc()
Pass GFP flags to zs_malloc() instead of using a fixed mask supplied to zs_create_pool(), so we can be more flexible, but, more importantly, we need this to switch zram to per-cpu compression streams -- zram will try to allocate handle with preemption disabled in a fast path and switch to a slow path (using different gfp mask) if the fast one has failed. Apart from that, this also align zs_malloc() interface with zspool/zbud. [sergey.senozhatsky@gmail.com: pass GFP flags to zs_malloc() instead of using a fixed mask] Link: http://lkml.kernel.org/r/20160429150942.GA637@swordfish Link: http://lkml.kernel.org/r/20160429150942.GA637@swordfish Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Acked-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> (cherry picked from commit d0d8da2dc49dfdfe1d788eaf4d55eb5d4964d926) Signed-off-by: Peter Kalauskas <peskal@google.com> Bug: 112488418 Change-Id: I31276c9351be21a4ed588681b332e98142b76526
This commit is contained in:
parent
6982182465
commit
bece429b72
3 changed files with 17 additions and 15 deletions
|
@ -514,7 +514,7 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
|
||||||
goto out_error;
|
goto out_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
|
meta->mem_pool = zs_create_pool(pool_name);
|
||||||
if (!meta->mem_pool) {
|
if (!meta->mem_pool) {
|
||||||
pr_err("Error creating memory pool\n");
|
pr_err("Error creating memory pool\n");
|
||||||
goto out_error;
|
goto out_error;
|
||||||
|
@ -717,7 +717,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
||||||
src = uncmem;
|
src = uncmem;
|
||||||
}
|
}
|
||||||
|
|
||||||
handle = zs_malloc(meta->mem_pool, clen);
|
handle = zs_malloc(meta->mem_pool, clen, GFP_NOIO | __GFP_HIGHMEM);
|
||||||
if (!handle) {
|
if (!handle) {
|
||||||
pr_err("Error allocating memory for compressed page: %u, size=%zu\n",
|
pr_err("Error allocating memory for compressed page: %u, size=%zu\n",
|
||||||
index, clen);
|
index, clen);
|
||||||
|
|
|
@ -41,10 +41,10 @@ struct zs_pool_stats {
|
||||||
|
|
||||||
struct zs_pool;
|
struct zs_pool;
|
||||||
|
|
||||||
struct zs_pool *zs_create_pool(const char *name, gfp_t flags);
|
struct zs_pool *zs_create_pool(const char *name);
|
||||||
void zs_destroy_pool(struct zs_pool *pool);
|
void zs_destroy_pool(struct zs_pool *pool);
|
||||||
|
|
||||||
unsigned long zs_malloc(struct zs_pool *pool, size_t size);
|
unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags);
|
||||||
void zs_free(struct zs_pool *pool, unsigned long obj);
|
void zs_free(struct zs_pool *pool, unsigned long obj);
|
||||||
|
|
||||||
void *zs_map_object(struct zs_pool *pool, unsigned long handle,
|
void *zs_map_object(struct zs_pool *pool, unsigned long handle,
|
||||||
|
|
|
@ -247,7 +247,6 @@ struct zs_pool {
|
||||||
struct size_class **size_class;
|
struct size_class **size_class;
|
||||||
struct kmem_cache *handle_cachep;
|
struct kmem_cache *handle_cachep;
|
||||||
|
|
||||||
gfp_t flags; /* allocation flags used when growing pool */
|
|
||||||
atomic_long_t pages_allocated;
|
atomic_long_t pages_allocated;
|
||||||
|
|
||||||
struct zs_pool_stats stats;
|
struct zs_pool_stats stats;
|
||||||
|
@ -296,10 +295,10 @@ static void destroy_handle_cache(struct zs_pool *pool)
|
||||||
kmem_cache_destroy(pool->handle_cachep);
|
kmem_cache_destroy(pool->handle_cachep);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long alloc_handle(struct zs_pool *pool)
|
static unsigned long alloc_handle(struct zs_pool *pool, gfp_t gfp)
|
||||||
{
|
{
|
||||||
return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
|
return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
|
||||||
pool->flags & ~__GFP_HIGHMEM);
|
gfp & ~__GFP_HIGHMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void free_handle(struct zs_pool *pool, unsigned long handle)
|
static void free_handle(struct zs_pool *pool, unsigned long handle)
|
||||||
|
@ -325,7 +324,12 @@ static void *zs_zpool_create(const char *name, gfp_t gfp,
|
||||||
const struct zpool_ops *zpool_ops,
|
const struct zpool_ops *zpool_ops,
|
||||||
struct zpool *zpool)
|
struct zpool *zpool)
|
||||||
{
|
{
|
||||||
return zs_create_pool(name, gfp);
|
/*
|
||||||
|
* Ignore global gfp flags: zs_malloc() may be invoked from
|
||||||
|
* different contexts and its caller must provide a valid
|
||||||
|
* gfp mask.
|
||||||
|
*/
|
||||||
|
return zs_create_pool(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void zs_zpool_destroy(void *pool)
|
static void zs_zpool_destroy(void *pool)
|
||||||
|
@ -336,7 +340,7 @@ static void zs_zpool_destroy(void *pool)
|
||||||
static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
|
static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
|
||||||
unsigned long *handle)
|
unsigned long *handle)
|
||||||
{
|
{
|
||||||
*handle = zs_malloc(pool, size);
|
*handle = zs_malloc(pool, size, gfp);
|
||||||
return *handle ? 0 : -1;
|
return *handle ? 0 : -1;
|
||||||
}
|
}
|
||||||
static void zs_zpool_free(void *pool, unsigned long handle)
|
static void zs_zpool_free(void *pool, unsigned long handle)
|
||||||
|
@ -1388,7 +1392,7 @@ static unsigned long obj_malloc(struct page *first_page,
|
||||||
* otherwise 0.
|
* otherwise 0.
|
||||||
* Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
|
* Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
|
||||||
*/
|
*/
|
||||||
unsigned long zs_malloc(struct zs_pool *pool, size_t size)
|
unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
|
||||||
{
|
{
|
||||||
unsigned long handle, obj;
|
unsigned long handle, obj;
|
||||||
struct size_class *class;
|
struct size_class *class;
|
||||||
|
@ -1397,7 +1401,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
|
||||||
if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
|
if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
handle = alloc_handle(pool);
|
handle = alloc_handle(pool, gfp);
|
||||||
if (!handle)
|
if (!handle)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -1410,7 +1414,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
|
||||||
|
|
||||||
if (!first_page) {
|
if (!first_page) {
|
||||||
spin_unlock(&class->lock);
|
spin_unlock(&class->lock);
|
||||||
first_page = alloc_zspage(class, pool->flags);
|
first_page = alloc_zspage(class, gfp);
|
||||||
if (unlikely(!first_page)) {
|
if (unlikely(!first_page)) {
|
||||||
free_handle(pool, handle);
|
free_handle(pool, handle);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1884,7 +1888,7 @@ static int zs_register_shrinker(struct zs_pool *pool)
|
||||||
* On success, a pointer to the newly created pool is returned,
|
* On success, a pointer to the newly created pool is returned,
|
||||||
* otherwise NULL.
|
* otherwise NULL.
|
||||||
*/
|
*/
|
||||||
struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
|
struct zs_pool *zs_create_pool(const char *name)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
struct zs_pool *pool;
|
struct zs_pool *pool;
|
||||||
|
@ -1954,8 +1958,6 @@ struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
|
||||||
prev_class = class;
|
prev_class = class;
|
||||||
}
|
}
|
||||||
|
|
||||||
pool->flags = flags;
|
|
||||||
|
|
||||||
if (zs_pool_stat_create(name, pool))
|
if (zs_pool_stat_create(name, pool))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue