From 33b30b124fde308d6192a55df6f176f1c36a79ce Mon Sep 17 00:00:00 2001 From: Sahitya Tummala Date: Mon, 5 Jun 2017 16:10:31 +0530 Subject: [PATCH] fs/mbcache: fix mb_cache_lru_list corruption With the recent 'commit d07d314e7d1d ("fs/mbcache: fix use after free issue in mb_cache_shrink_scan()")', the ce entry is deleted from mbcache list after ce->e_refcnt incremented under global spinlock mb_cache_spinlock. If __mb_cache_entry_release(), is waiting for mb_cache_spinlock at the same time, to add the same ce to mb_cache_lru_list and if it gets the lock after mb_cache_entry_get() deleted it, then it corrupts the list, as that element will be freed immediately after mb_cache_entry_get(). When this list is accessed next time for deleting/adding another ce, we see list corruption issue. Fix this by synchronizing these two contexts with mb_cache_spinlock and evaluating the conditions(ce->e_refcnt) in __mb_cache_entry_release() under the global lock before adding ce to mb_cache_lru_list. Change-Id: I3e20fb4fa163755126e30be7aeca747d74215ed2 Signed-off-by: Sahitya Tummala --- fs/mbcache.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/fs/mbcache.c b/fs/mbcache.c index ab1da987d1ae..de509271d031 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c @@ -222,8 +222,19 @@ __mb_cache_entry_release(struct mb_cache_entry *ce) * then reacquire the lock in the proper order. */ spin_lock(&mb_cache_spinlock); - if (list_empty(&ce->e_lru_list)) - list_add_tail(&ce->e_lru_list, &mb_cache_lru_list); + /* + * Evaluate the conditions under global lock mb_cache_spinlock, + * to check if mb_cache_entry_get() is running now + * and has already deleted the entry from mb_cache_lru_list + * and incremented ce->e_refcnt to prevent further additions + * to mb_cache_lru_list. + */ + if (!(ce->e_used || ce->e_queued || + atomic_read(&ce->e_refcnt))) { + if (list_empty(&ce->e_lru_list)) + list_add_tail(&ce->e_lru_list, + &mb_cache_lru_list); + } spin_unlock(&mb_cache_spinlock); } __spin_unlock_mb_cache_entry(ce);