Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull SLAB changes from Pekka Enberg: "There's the new kmalloc_array() API, minor fixes and performance improvements, but quite honestly, nothing terribly exciting." * 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: mm: SLAB Out-of-memory diagnostics slab: introduce kmalloc_array() slub: per cpu partial statistics change slub: include include for prefetch slub: Do not hold slub_lock when calling sysfs_slab_add() slub: prefetch next freelist pointer in slab_alloc() slab, cleanup: remove unneeded return
This commit is contained in:
commit
0c9aac0826
4 changed files with 91 additions and 14 deletions
|
@ -190,7 +190,7 @@ size_t ksize(const void *);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* kcalloc - allocate memory for an array. The memory is set to zero.
|
* kmalloc_array - allocate memory for an array.
|
||||||
* @n: number of elements.
|
* @n: number of elements.
|
||||||
* @size: element size.
|
* @size: element size.
|
||||||
* @flags: the type of memory to allocate.
|
* @flags: the type of memory to allocate.
|
||||||
|
@ -240,11 +240,22 @@ size_t ksize(const void *);
|
||||||
* for general use, and so are not documented here. For a full list of
|
* for general use, and so are not documented here. For a full list of
|
||||||
* potential flags, always refer to linux/gfp.h.
|
* potential flags, always refer to linux/gfp.h.
|
||||||
*/
|
*/
|
||||||
static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
|
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
|
||||||
{
|
{
|
||||||
if (size != 0 && n > ULONG_MAX / size)
|
if (size != 0 && n > ULONG_MAX / size)
|
||||||
return NULL;
|
return NULL;
|
||||||
return __kmalloc(n * size, flags | __GFP_ZERO);
|
return __kmalloc(n * size, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* kcalloc - allocate memory for an array. The memory is set to zero.
|
||||||
|
* @n: number of elements.
|
||||||
|
* @size: element size.
|
||||||
|
* @flags: the type of memory to allocate (see kmalloc).
|
||||||
|
*/
|
||||||
|
static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
|
||||||
|
{
|
||||||
|
return kmalloc_array(n, size, flags | __GFP_ZERO);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
|
#if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
|
||||||
|
|
|
@ -22,7 +22,7 @@ enum stat_item {
|
||||||
FREE_FROZEN, /* Freeing to frozen slab */
|
FREE_FROZEN, /* Freeing to frozen slab */
|
||||||
FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
|
FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
|
||||||
FREE_REMOVE_PARTIAL, /* Freeing removes last object */
|
FREE_REMOVE_PARTIAL, /* Freeing removes last object */
|
||||||
ALLOC_FROM_PARTIAL, /* Cpu slab acquired from partial list */
|
ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
|
||||||
ALLOC_SLAB, /* Cpu slab acquired from page allocator */
|
ALLOC_SLAB, /* Cpu slab acquired from page allocator */
|
||||||
ALLOC_REFILL, /* Refill cpu slab from slab freelist */
|
ALLOC_REFILL, /* Refill cpu slab from slab freelist */
|
||||||
ALLOC_NODE_MISMATCH, /* Switching cpu slab */
|
ALLOC_NODE_MISMATCH, /* Switching cpu slab */
|
||||||
|
@ -38,7 +38,9 @@ enum stat_item {
|
||||||
CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
|
CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
|
||||||
CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
|
CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
|
||||||
CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
|
CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
|
||||||
CPU_PARTIAL_FREE, /* USed cpu partial on free */
|
CPU_PARTIAL_FREE, /* Refill cpu partial on free */
|
||||||
|
CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
|
||||||
|
CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
|
||||||
NR_SLUB_STAT_ITEMS };
|
NR_SLUB_STAT_ITEMS };
|
||||||
|
|
||||||
struct kmem_cache_cpu {
|
struct kmem_cache_cpu {
|
||||||
|
|
56
mm/slab.c
56
mm/slab.c
|
@ -1731,6 +1731,52 @@ static int __init cpucache_init(void)
|
||||||
}
|
}
|
||||||
__initcall(cpucache_init);
|
__initcall(cpucache_init);
|
||||||
|
|
||||||
|
static noinline void
|
||||||
|
slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
|
||||||
|
{
|
||||||
|
struct kmem_list3 *l3;
|
||||||
|
struct slab *slabp;
|
||||||
|
unsigned long flags;
|
||||||
|
int node;
|
||||||
|
|
||||||
|
printk(KERN_WARNING
|
||||||
|
"SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
|
||||||
|
nodeid, gfpflags);
|
||||||
|
printk(KERN_WARNING " cache: %s, object size: %d, order: %d\n",
|
||||||
|
cachep->name, cachep->buffer_size, cachep->gfporder);
|
||||||
|
|
||||||
|
for_each_online_node(node) {
|
||||||
|
unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
|
||||||
|
unsigned long active_slabs = 0, num_slabs = 0;
|
||||||
|
|
||||||
|
l3 = cachep->nodelists[node];
|
||||||
|
if (!l3)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&l3->list_lock, flags);
|
||||||
|
list_for_each_entry(slabp, &l3->slabs_full, list) {
|
||||||
|
active_objs += cachep->num;
|
||||||
|
active_slabs++;
|
||||||
|
}
|
||||||
|
list_for_each_entry(slabp, &l3->slabs_partial, list) {
|
||||||
|
active_objs += slabp->inuse;
|
||||||
|
active_slabs++;
|
||||||
|
}
|
||||||
|
list_for_each_entry(slabp, &l3->slabs_free, list)
|
||||||
|
num_slabs++;
|
||||||
|
|
||||||
|
free_objects += l3->free_objects;
|
||||||
|
spin_unlock_irqrestore(&l3->list_lock, flags);
|
||||||
|
|
||||||
|
num_slabs += active_slabs;
|
||||||
|
num_objs = num_slabs * cachep->num;
|
||||||
|
printk(KERN_WARNING
|
||||||
|
" node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
|
||||||
|
node, active_slabs, num_slabs, active_objs, num_objs,
|
||||||
|
free_objects);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Interface to system's page allocator. No need to hold the cache-lock.
|
* Interface to system's page allocator. No need to hold the cache-lock.
|
||||||
*
|
*
|
||||||
|
@ -1757,8 +1803,11 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
|
||||||
flags |= __GFP_RECLAIMABLE;
|
flags |= __GFP_RECLAIMABLE;
|
||||||
|
|
||||||
page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
|
page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
|
||||||
if (!page)
|
if (!page) {
|
||||||
|
if (!(flags & __GFP_NOWARN) && printk_ratelimit())
|
||||||
|
slab_out_of_memory(cachep, flags, nodeid);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
nr_pages = (1 << cachep->gfporder);
|
nr_pages = (1 << cachep->gfporder);
|
||||||
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
|
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
|
||||||
|
@ -3696,13 +3745,12 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
|
||||||
|
|
||||||
if (likely(ac->avail < ac->limit)) {
|
if (likely(ac->avail < ac->limit)) {
|
||||||
STATS_INC_FREEHIT(cachep);
|
STATS_INC_FREEHIT(cachep);
|
||||||
ac->entry[ac->avail++] = objp;
|
|
||||||
return;
|
|
||||||
} else {
|
} else {
|
||||||
STATS_INC_FREEMISS(cachep);
|
STATS_INC_FREEMISS(cachep);
|
||||||
cache_flusharray(cachep, ac);
|
cache_flusharray(cachep, ac);
|
||||||
ac->entry[ac->avail++] = objp;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ac->entry[ac->avail++] = objp;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
26
mm/slub.c
26
mm/slub.c
|
@ -29,6 +29,7 @@
|
||||||
#include <linux/math64.h>
|
#include <linux/math64.h>
|
||||||
#include <linux/fault-inject.h>
|
#include <linux/fault-inject.h>
|
||||||
#include <linux/stacktrace.h>
|
#include <linux/stacktrace.h>
|
||||||
|
#include <linux/prefetch.h>
|
||||||
|
|
||||||
#include <trace/events/kmem.h>
|
#include <trace/events/kmem.h>
|
||||||
|
|
||||||
|
@ -269,6 +270,11 @@ static inline void *get_freepointer(struct kmem_cache *s, void *object)
|
||||||
return *(void **)(object + s->offset);
|
return *(void **)(object + s->offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void prefetch_freepointer(const struct kmem_cache *s, void *object)
|
||||||
|
{
|
||||||
|
prefetch(object + s->offset);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
|
static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
|
||||||
{
|
{
|
||||||
void *p;
|
void *p;
|
||||||
|
@ -1560,6 +1566,7 @@ static void *get_partial_node(struct kmem_cache *s,
|
||||||
} else {
|
} else {
|
||||||
page->freelist = t;
|
page->freelist = t;
|
||||||
available = put_cpu_partial(s, page, 0);
|
available = put_cpu_partial(s, page, 0);
|
||||||
|
stat(s, CPU_PARTIAL_NODE);
|
||||||
}
|
}
|
||||||
if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
|
if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
|
||||||
break;
|
break;
|
||||||
|
@ -1983,6 +1990,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
pobjects = 0;
|
pobjects = 0;
|
||||||
pages = 0;
|
pages = 0;
|
||||||
|
stat(s, CPU_PARTIAL_DRAIN);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1994,7 +2002,6 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
|
||||||
page->next = oldpage;
|
page->next = oldpage;
|
||||||
|
|
||||||
} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
|
} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
|
||||||
stat(s, CPU_PARTIAL_FREE);
|
|
||||||
return pobjects;
|
return pobjects;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2319,6 +2326,8 @@ redo:
|
||||||
object = __slab_alloc(s, gfpflags, node, addr, c);
|
object = __slab_alloc(s, gfpflags, node, addr, c);
|
||||||
|
|
||||||
else {
|
else {
|
||||||
|
void *next_object = get_freepointer_safe(s, object);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The cmpxchg will only match if there was no additional
|
* The cmpxchg will only match if there was no additional
|
||||||
* operation and if we are on the right processor.
|
* operation and if we are on the right processor.
|
||||||
|
@ -2334,11 +2343,12 @@ redo:
|
||||||
if (unlikely(!this_cpu_cmpxchg_double(
|
if (unlikely(!this_cpu_cmpxchg_double(
|
||||||
s->cpu_slab->freelist, s->cpu_slab->tid,
|
s->cpu_slab->freelist, s->cpu_slab->tid,
|
||||||
object, tid,
|
object, tid,
|
||||||
get_freepointer_safe(s, object), next_tid(tid)))) {
|
next_object, next_tid(tid)))) {
|
||||||
|
|
||||||
note_cmpxchg_failure("slab_alloc", s, tid);
|
note_cmpxchg_failure("slab_alloc", s, tid);
|
||||||
goto redo;
|
goto redo;
|
||||||
}
|
}
|
||||||
|
prefetch_freepointer(s, next_object);
|
||||||
stat(s, ALLOC_FASTPATH);
|
stat(s, ALLOC_FASTPATH);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2475,9 +2485,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
|
||||||
* If we just froze the page then put it onto the
|
* If we just froze the page then put it onto the
|
||||||
* per cpu partial list.
|
* per cpu partial list.
|
||||||
*/
|
*/
|
||||||
if (new.frozen && !was_frozen)
|
if (new.frozen && !was_frozen) {
|
||||||
put_cpu_partial(s, page, 1);
|
put_cpu_partial(s, page, 1);
|
||||||
|
stat(s, CPU_PARTIAL_FREE);
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* The list lock was not taken therefore no list
|
* The list lock was not taken therefore no list
|
||||||
* activity can be necessary.
|
* activity can be necessary.
|
||||||
|
@ -3939,13 +3950,14 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
|
||||||
if (kmem_cache_open(s, n,
|
if (kmem_cache_open(s, n,
|
||||||
size, align, flags, ctor)) {
|
size, align, flags, ctor)) {
|
||||||
list_add(&s->list, &slab_caches);
|
list_add(&s->list, &slab_caches);
|
||||||
|
up_write(&slub_lock);
|
||||||
if (sysfs_slab_add(s)) {
|
if (sysfs_slab_add(s)) {
|
||||||
|
down_write(&slub_lock);
|
||||||
list_del(&s->list);
|
list_del(&s->list);
|
||||||
kfree(n);
|
kfree(n);
|
||||||
kfree(s);
|
kfree(s);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
up_write(&slub_lock);
|
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
kfree(n);
|
kfree(n);
|
||||||
|
@ -5069,6 +5081,8 @@ STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
|
||||||
STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
|
STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
|
||||||
STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
|
STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
|
||||||
STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
|
STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
|
||||||
|
STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
|
||||||
|
STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static struct attribute *slab_attrs[] = {
|
static struct attribute *slab_attrs[] = {
|
||||||
|
@ -5134,6 +5148,8 @@ static struct attribute *slab_attrs[] = {
|
||||||
&cmpxchg_double_cpu_fail_attr.attr,
|
&cmpxchg_double_cpu_fail_attr.attr,
|
||||||
&cpu_partial_alloc_attr.attr,
|
&cpu_partial_alloc_attr.attr,
|
||||||
&cpu_partial_free_attr.attr,
|
&cpu_partial_free_attr.attr,
|
||||||
|
&cpu_partial_node_attr.attr,
|
||||||
|
&cpu_partial_drain_attr.attr,
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_FAILSLAB
|
#ifdef CONFIG_FAILSLAB
|
||||||
&failslab_attr.attr,
|
&failslab_attr.attr,
|
||||||
|
|
Loading…
Add table
Reference in a new issue