android_kernel_oneplus_msm8998/include/linux/shrinker.h
Vinayak Menon d8c98bb6ae mm: separate out the invocation of lowmemorykiller shrinker
The commit '6b4f77 (mm: vmscan: invoke slab shrinkers from shrink_zone())'
fixed the invocation of shrinkers but resulted in lowmemorykiller shrinker
being called more. Reduce the number of lowmemorykiller shrinker
invocations by separating out the lowmemorykiller shrinker from
shrink_slab. This will make lowmemorykiller invoked only once for all zones
reclaimed in the direct reclaim path, and once for each zone in the kswap
path. As a consequence the eligible pages passed to shrink_slab_lmk is now
the reclaimable pages of all zones. Reducing the number of lowmemorykiller
invocations reduces the unnecessary time spent in lowmemorykiller and thus
contention or failures on lowmemorykiller's scan_mutex.

Change-Id: Iaabb9e441711f1dc804980b5853b64b3f214698d
Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
2017-04-18 10:29:44 +05:30

73 lines
2.4 KiB
C

#ifndef _LINUX_SHRINKER_H
#define _LINUX_SHRINKER_H
/*
* This struct is used to pass information from page reclaim to the shrinkers.
* We consolidate the values for easier extention later.
*
* The 'gfpmask' refers to the allocation we are currently trying to
* fulfil.
*/
struct shrink_control {
gfp_t gfp_mask;
/*
* How many objects scan_objects should scan and try to reclaim.
* This is reset before every call, so it is safe for callees
* to modify.
*/
unsigned long nr_to_scan;
/* current node being shrunk (for NUMA aware shrinkers) */
int nid;
/* current memcg being shrunk (for memcg aware shrinkers) */
struct mem_cgroup *memcg;
};
#define SHRINK_STOP (~0UL)
/*
* A callback you can register to apply pressure to ageable caches.
*
* @count_objects should return the number of freeable items in the cache. If
* there are no objects to free or the number of freeable items cannot be
* determined, it should return 0. No deadlock checks should be done during the
* count callback - the shrinker relies on aggregating scan counts that couldn't
* be executed due to potential deadlocks to be run at a later call when the
* deadlock condition is no longer pending.
*
* @scan_objects will only be called if @count_objects returned a non-zero
* value for the number of freeable objects. The callout should scan the cache
* and attempt to free items from the cache. It should then return the number
* of objects freed during the scan, or SHRINK_STOP if progress cannot be made
* due to potential deadlocks. If SHRINK_STOP is returned, then no further
* attempts to call the @scan_objects will be made from the current reclaim
* context.
*
* @flags determine the shrinker abilities, like numa awareness
*/
struct shrinker {
unsigned long (*count_objects)(struct shrinker *,
struct shrink_control *sc);
unsigned long (*scan_objects)(struct shrinker *,
struct shrink_control *sc);
int seeks; /* seeks to recreate an obj */
long batch; /* reclaim batch size, 0 = default */
unsigned long flags;
/* These are for internal use */
struct list_head list;
/* objs pending delete, per node */
atomic_long_t *nr_deferred;
};
#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
/* Flags */
#define SHRINKER_NUMA_AWARE (1 << 0)
#define SHRINKER_MEMCG_AWARE (1 << 1)
#define SHRINKER_LMK (1 << 2)
extern int register_shrinker(struct shrinker *);
extern void unregister_shrinker(struct shrinker *);
#endif