shrinker: convert superblock shrinkers to new API
Convert superblock shrinker to use the new count/scan API, and propagate the API changes through to the filesystem callouts. The filesystem callouts already use a count/scan API, so it's just changing counters to longs to match the VM API. This requires the dentry and inode shrinker callouts to be converted to the count/scan API. This is mainly a mechanical change. [glommer@openvz.org: use mult_frac for fractional proportions, build fixes] Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Glauber Costa <glommer@openvz.org> Acked-by: Mel Gorman <mgorman@suse.de> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Cc: Arve Hjønnevåg <arve@android.com> Cc: Carlos Maiolino <cmaiolino@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Rientjes <rientjes@google.com> Cc: Gleb Natapov <gleb@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: J. Bruce Fields <bfields@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Stultz <john.stultz@linaro.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kent Overstreet <koverstreet@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
24f7c6b981
commit
0a234c6dcb
8 changed files with 69 additions and 47 deletions
|
@ -913,11 +913,12 @@ static void shrink_dentry_list(struct list_head *list)
|
||||||
* This function may fail to free any resources if all the dentries are in
|
* This function may fail to free any resources if all the dentries are in
|
||||||
* use.
|
* use.
|
||||||
*/
|
*/
|
||||||
void prune_dcache_sb(struct super_block *sb, int count)
|
long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan)
|
||||||
{
|
{
|
||||||
struct dentry *dentry;
|
struct dentry *dentry;
|
||||||
LIST_HEAD(referenced);
|
LIST_HEAD(referenced);
|
||||||
LIST_HEAD(tmp);
|
LIST_HEAD(tmp);
|
||||||
|
long freed = 0;
|
||||||
|
|
||||||
relock:
|
relock:
|
||||||
spin_lock(&sb->s_dentry_lru_lock);
|
spin_lock(&sb->s_dentry_lru_lock);
|
||||||
|
@ -942,7 +943,8 @@ relock:
|
||||||
this_cpu_dec(nr_dentry_unused);
|
this_cpu_dec(nr_dentry_unused);
|
||||||
sb->s_nr_dentry_unused--;
|
sb->s_nr_dentry_unused--;
|
||||||
spin_unlock(&dentry->d_lock);
|
spin_unlock(&dentry->d_lock);
|
||||||
if (!--count)
|
freed++;
|
||||||
|
if (!--nr_to_scan)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
cond_resched_lock(&sb->s_dentry_lru_lock);
|
cond_resched_lock(&sb->s_dentry_lru_lock);
|
||||||
|
@ -952,6 +954,7 @@ relock:
|
||||||
spin_unlock(&sb->s_dentry_lru_lock);
|
spin_unlock(&sb->s_dentry_lru_lock);
|
||||||
|
|
||||||
shrink_dentry_list(&tmp);
|
shrink_dentry_list(&tmp);
|
||||||
|
return freed;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -706,10 +706,11 @@ static int can_unuse(struct inode *inode)
|
||||||
* LRU does not have strict ordering. Hence we don't want to reclaim inodes
|
* LRU does not have strict ordering. Hence we don't want to reclaim inodes
|
||||||
* with this flag set because they are the inodes that are out of order.
|
* with this flag set because they are the inodes that are out of order.
|
||||||
*/
|
*/
|
||||||
void prune_icache_sb(struct super_block *sb, int nr_to_scan)
|
long prune_icache_sb(struct super_block *sb, unsigned long nr_to_scan)
|
||||||
{
|
{
|
||||||
LIST_HEAD(freeable);
|
LIST_HEAD(freeable);
|
||||||
int nr_scanned;
|
long nr_scanned;
|
||||||
|
long freed = 0;
|
||||||
unsigned long reap = 0;
|
unsigned long reap = 0;
|
||||||
|
|
||||||
spin_lock(&sb->s_inode_lru_lock);
|
spin_lock(&sb->s_inode_lru_lock);
|
||||||
|
@ -779,6 +780,7 @@ void prune_icache_sb(struct super_block *sb, int nr_to_scan)
|
||||||
list_move(&inode->i_lru, &freeable);
|
list_move(&inode->i_lru, &freeable);
|
||||||
sb->s_nr_inodes_unused--;
|
sb->s_nr_inodes_unused--;
|
||||||
this_cpu_dec(nr_unused);
|
this_cpu_dec(nr_unused);
|
||||||
|
freed++;
|
||||||
}
|
}
|
||||||
if (current_is_kswapd())
|
if (current_is_kswapd())
|
||||||
__count_vm_events(KSWAPD_INODESTEAL, reap);
|
__count_vm_events(KSWAPD_INODESTEAL, reap);
|
||||||
|
@ -789,6 +791,7 @@ void prune_icache_sb(struct super_block *sb, int nr_to_scan)
|
||||||
current->reclaim_state->reclaimed_slab += reap;
|
current->reclaim_state->reclaimed_slab += reap;
|
||||||
|
|
||||||
dispose_list(&freeable);
|
dispose_list(&freeable);
|
||||||
|
return freed;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __wait_on_freeing_inode(struct inode *inode);
|
static void __wait_on_freeing_inode(struct inode *inode);
|
||||||
|
|
|
@ -114,6 +114,7 @@ extern int open_check_o_direct(struct file *f);
|
||||||
* inode.c
|
* inode.c
|
||||||
*/
|
*/
|
||||||
extern spinlock_t inode_sb_list_lock;
|
extern spinlock_t inode_sb_list_lock;
|
||||||
|
extern long prune_icache_sb(struct super_block *sb, unsigned long nr_to_scan);
|
||||||
extern void inode_add_lru(struct inode *inode);
|
extern void inode_add_lru(struct inode *inode);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -130,6 +131,7 @@ extern int invalidate_inodes(struct super_block *, bool);
|
||||||
*/
|
*/
|
||||||
extern struct dentry *__d_alloc(struct super_block *, const struct qstr *);
|
extern struct dentry *__d_alloc(struct super_block *, const struct qstr *);
|
||||||
extern int d_set_mounted(struct dentry *dentry);
|
extern int d_set_mounted(struct dentry *dentry);
|
||||||
|
extern long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* read_write.c
|
* read_write.c
|
||||||
|
|
78
fs/super.c
78
fs/super.c
|
@ -53,11 +53,15 @@ static char *sb_writers_name[SB_FREEZE_LEVELS] = {
|
||||||
* shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
|
* shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
|
||||||
* take a passive reference to the superblock to avoid this from occurring.
|
* take a passive reference to the superblock to avoid this from occurring.
|
||||||
*/
|
*/
|
||||||
static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
|
static unsigned long super_cache_scan(struct shrinker *shrink,
|
||||||
|
struct shrink_control *sc)
|
||||||
{
|
{
|
||||||
struct super_block *sb;
|
struct super_block *sb;
|
||||||
int fs_objects = 0;
|
long fs_objects = 0;
|
||||||
int total_objects;
|
long total_objects;
|
||||||
|
long freed = 0;
|
||||||
|
long dentries;
|
||||||
|
long inodes;
|
||||||
|
|
||||||
sb = container_of(shrink, struct super_block, s_shrink);
|
sb = container_of(shrink, struct super_block, s_shrink);
|
||||||
|
|
||||||
|
@ -65,11 +69,11 @@ static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
|
||||||
* Deadlock avoidance. We may hold various FS locks, and we don't want
|
* Deadlock avoidance. We may hold various FS locks, and we don't want
|
||||||
* to recurse into the FS that called us in clear_inode() and friends..
|
* to recurse into the FS that called us in clear_inode() and friends..
|
||||||
*/
|
*/
|
||||||
if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
|
if (!(sc->gfp_mask & __GFP_FS))
|
||||||
return -1;
|
return SHRINK_STOP;
|
||||||
|
|
||||||
if (!grab_super_passive(sb))
|
if (!grab_super_passive(sb))
|
||||||
return -1;
|
return SHRINK_STOP;
|
||||||
|
|
||||||
if (sb->s_op->nr_cached_objects)
|
if (sb->s_op->nr_cached_objects)
|
||||||
fs_objects = sb->s_op->nr_cached_objects(sb);
|
fs_objects = sb->s_op->nr_cached_objects(sb);
|
||||||
|
@ -77,33 +81,46 @@ static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
|
||||||
total_objects = sb->s_nr_dentry_unused +
|
total_objects = sb->s_nr_dentry_unused +
|
||||||
sb->s_nr_inodes_unused + fs_objects + 1;
|
sb->s_nr_inodes_unused + fs_objects + 1;
|
||||||
|
|
||||||
if (sc->nr_to_scan) {
|
/* proportion the scan between the caches */
|
||||||
int dentries;
|
dentries = mult_frac(sc->nr_to_scan, sb->s_nr_dentry_unused,
|
||||||
int inodes;
|
total_objects);
|
||||||
|
inodes = mult_frac(sc->nr_to_scan, sb->s_nr_inodes_unused,
|
||||||
|
total_objects);
|
||||||
|
|
||||||
/* proportion the scan between the caches */
|
/*
|
||||||
dentries = mult_frac(sc->nr_to_scan, sb->s_nr_dentry_unused,
|
* prune the dcache first as the icache is pinned by it, then
|
||||||
total_objects);
|
* prune the icache, followed by the filesystem specific caches
|
||||||
inodes = mult_frac(sc->nr_to_scan, sb->s_nr_inodes_unused,
|
*/
|
||||||
total_objects);
|
freed = prune_dcache_sb(sb, dentries);
|
||||||
if (fs_objects)
|
freed += prune_icache_sb(sb, inodes);
|
||||||
fs_objects = mult_frac(sc->nr_to_scan, fs_objects,
|
|
||||||
total_objects);
|
|
||||||
/*
|
|
||||||
* prune the dcache first as the icache is pinned by it, then
|
|
||||||
* prune the icache, followed by the filesystem specific caches
|
|
||||||
*/
|
|
||||||
prune_dcache_sb(sb, dentries);
|
|
||||||
prune_icache_sb(sb, inodes);
|
|
||||||
|
|
||||||
if (fs_objects && sb->s_op->free_cached_objects) {
|
if (fs_objects) {
|
||||||
sb->s_op->free_cached_objects(sb, fs_objects);
|
fs_objects = mult_frac(sc->nr_to_scan, fs_objects,
|
||||||
fs_objects = sb->s_op->nr_cached_objects(sb);
|
total_objects);
|
||||||
}
|
freed += sb->s_op->free_cached_objects(sb, fs_objects);
|
||||||
total_objects = sb->s_nr_dentry_unused +
|
|
||||||
sb->s_nr_inodes_unused + fs_objects;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
drop_super(sb);
|
||||||
|
return freed;
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned long super_cache_count(struct shrinker *shrink,
|
||||||
|
struct shrink_control *sc)
|
||||||
|
{
|
||||||
|
struct super_block *sb;
|
||||||
|
long total_objects = 0;
|
||||||
|
|
||||||
|
sb = container_of(shrink, struct super_block, s_shrink);
|
||||||
|
|
||||||
|
if (!grab_super_passive(sb))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (sb->s_op && sb->s_op->nr_cached_objects)
|
||||||
|
total_objects = sb->s_op->nr_cached_objects(sb);
|
||||||
|
|
||||||
|
total_objects += sb->s_nr_dentry_unused;
|
||||||
|
total_objects += sb->s_nr_inodes_unused;
|
||||||
|
|
||||||
total_objects = vfs_pressure_ratio(total_objects);
|
total_objects = vfs_pressure_ratio(total_objects);
|
||||||
drop_super(sb);
|
drop_super(sb);
|
||||||
return total_objects;
|
return total_objects;
|
||||||
|
@ -211,7 +228,8 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
|
||||||
s->cleancache_poolid = -1;
|
s->cleancache_poolid = -1;
|
||||||
|
|
||||||
s->s_shrink.seeks = DEFAULT_SEEKS;
|
s->s_shrink.seeks = DEFAULT_SEEKS;
|
||||||
s->s_shrink.shrink = prune_super;
|
s->s_shrink.scan_objects = super_cache_scan;
|
||||||
|
s->s_shrink.count_objects = super_cache_count;
|
||||||
s->s_shrink.batch = 1024;
|
s->s_shrink.batch = 1024;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
|
|
|
@ -1167,7 +1167,7 @@ xfs_reclaim_inodes(
|
||||||
* them to be cleaned, which we hope will not be very long due to the
|
* them to be cleaned, which we hope will not be very long due to the
|
||||||
* background walker having already kicked the IO off on those dirty inodes.
|
* background walker having already kicked the IO off on those dirty inodes.
|
||||||
*/
|
*/
|
||||||
void
|
long
|
||||||
xfs_reclaim_inodes_nr(
|
xfs_reclaim_inodes_nr(
|
||||||
struct xfs_mount *mp,
|
struct xfs_mount *mp,
|
||||||
int nr_to_scan)
|
int nr_to_scan)
|
||||||
|
@ -1176,7 +1176,7 @@ xfs_reclaim_inodes_nr(
|
||||||
xfs_reclaim_work_queue(mp);
|
xfs_reclaim_work_queue(mp);
|
||||||
xfs_ail_push_all(mp->m_ail);
|
xfs_ail_push_all(mp->m_ail);
|
||||||
|
|
||||||
xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
|
return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -46,7 +46,7 @@ void xfs_reclaim_worker(struct work_struct *work);
|
||||||
|
|
||||||
int xfs_reclaim_inodes(struct xfs_mount *mp, int mode);
|
int xfs_reclaim_inodes(struct xfs_mount *mp, int mode);
|
||||||
int xfs_reclaim_inodes_count(struct xfs_mount *mp);
|
int xfs_reclaim_inodes_count(struct xfs_mount *mp);
|
||||||
void xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan);
|
long xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan);
|
||||||
|
|
||||||
void xfs_inode_set_reclaim_tag(struct xfs_inode *ip);
|
void xfs_inode_set_reclaim_tag(struct xfs_inode *ip);
|
||||||
|
|
||||||
|
|
|
@ -1535,19 +1535,19 @@ xfs_fs_mount(
|
||||||
return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
|
return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static long
|
||||||
xfs_fs_nr_cached_objects(
|
xfs_fs_nr_cached_objects(
|
||||||
struct super_block *sb)
|
struct super_block *sb)
|
||||||
{
|
{
|
||||||
return xfs_reclaim_inodes_count(XFS_M(sb));
|
return xfs_reclaim_inodes_count(XFS_M(sb));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static long
|
||||||
xfs_fs_free_cached_objects(
|
xfs_fs_free_cached_objects(
|
||||||
struct super_block *sb,
|
struct super_block *sb,
|
||||||
int nr_to_scan)
|
long nr_to_scan)
|
||||||
{
|
{
|
||||||
xfs_reclaim_inodes_nr(XFS_M(sb), nr_to_scan);
|
return xfs_reclaim_inodes_nr(XFS_M(sb), nr_to_scan);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct super_operations xfs_super_operations = {
|
static const struct super_operations xfs_super_operations = {
|
||||||
|
|
|
@ -1335,10 +1335,6 @@ struct super_block {
|
||||||
struct workqueue_struct *s_dio_done_wq;
|
struct workqueue_struct *s_dio_done_wq;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* superblock cache pruning functions */
|
|
||||||
extern void prune_icache_sb(struct super_block *sb, int nr_to_scan);
|
|
||||||
extern void prune_dcache_sb(struct super_block *sb, int nr_to_scan);
|
|
||||||
|
|
||||||
extern struct timespec current_fs_time(struct super_block *sb);
|
extern struct timespec current_fs_time(struct super_block *sb);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1631,8 +1627,8 @@ struct super_operations {
|
||||||
ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
|
ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
|
||||||
#endif
|
#endif
|
||||||
int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
|
int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
|
||||||
int (*nr_cached_objects)(struct super_block *);
|
long (*nr_cached_objects)(struct super_block *);
|
||||||
void (*free_cached_objects)(struct super_block *, int);
|
long (*free_cached_objects)(struct super_block *, long);
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Add table
Reference in a new issue