Merge "page-flags: define PG_locked behavior on compound pages"
This commit is contained in:
commit
9ac236b00c
13 changed files with 169 additions and 120 deletions
|
@ -3406,13 +3406,13 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
|
|||
* should have access to this page, we're safe to simply set
|
||||
* PG_locked without checking it first.
|
||||
*/
|
||||
__set_page_locked(page);
|
||||
__SetPageLocked(page);
|
||||
rc = add_to_page_cache_locked(page, mapping,
|
||||
page->index, gfp);
|
||||
|
||||
/* give up if we can't stick it in the cache */
|
||||
if (rc) {
|
||||
__clear_page_locked(page);
|
||||
__ClearPageLocked(page);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -3433,9 +3433,9 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
|
|||
if (*bytes + PAGE_CACHE_SIZE > rsize)
|
||||
break;
|
||||
|
||||
__set_page_locked(page);
|
||||
__SetPageLocked(page);
|
||||
if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
|
||||
__clear_page_locked(page);
|
||||
__ClearPageLocked(page);
|
||||
break;
|
||||
}
|
||||
list_move_tail(&page->lru, tmplist);
|
||||
|
|
|
@ -56,4 +56,10 @@ void dump_mm(const struct mm_struct *mm);
|
|||
#define VIRTUAL_BUG_ON(cond) do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_VM_PGFLAGS
|
||||
#define VM_BUG_ON_PGFLAGS(cond, page) VM_BUG_ON_PAGE(cond, page)
|
||||
#else
|
||||
#define VM_BUG_ON_PGFLAGS(cond, page) BUILD_BUG_ON_INVALID(cond)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -139,49 +139,101 @@ enum pageflags {
|
|||
|
||||
#ifndef __GENERATING_BOUNDS_H
|
||||
|
||||
struct page; /* forward declaration */
|
||||
|
||||
static inline struct page *compound_head(struct page *page)
|
||||
{
|
||||
unsigned long head = READ_ONCE(page->compound_head);
|
||||
|
||||
if (unlikely(head & 1))
|
||||
return (struct page *) (head - 1);
|
||||
return page;
|
||||
}
|
||||
|
||||
static inline int PageTail(struct page *page)
|
||||
{
|
||||
return READ_ONCE(page->compound_head) & 1;
|
||||
}
|
||||
|
||||
static inline int PageCompound(struct page *page)
|
||||
{
|
||||
return test_bit(PG_head, &page->flags) || PageTail(page);
|
||||
}
|
||||
|
||||
/*
|
||||
* Page flags policies wrt compound pages
|
||||
*
|
||||
* PF_ANY:
|
||||
* the page flag is relevant for small, head and tail pages.
|
||||
*
|
||||
* PF_HEAD:
|
||||
* for compound page all operations related to the page flag applied to
|
||||
* head page.
|
||||
*
|
||||
* PF_NO_TAIL:
|
||||
* modifications of the page flag must be done on small or head pages,
|
||||
* checks can be done on tail pages too.
|
||||
*
|
||||
* PF_NO_COMPOUND:
|
||||
* the page flag is not relevant for compound pages.
|
||||
*/
|
||||
#define PF_ANY(page, enforce) page
|
||||
#define PF_HEAD(page, enforce) compound_head(page)
|
||||
#define PF_NO_TAIL(page, enforce) ({ \
|
||||
VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
|
||||
compound_head(page);})
|
||||
#define PF_NO_COMPOUND(page, enforce) ({ \
|
||||
VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
|
||||
page;})
|
||||
|
||||
/*
|
||||
* Macros to create function definitions for page flags
|
||||
*/
|
||||
#define TESTPAGEFLAG(uname, lname) \
|
||||
static inline int Page##uname(const struct page *page) \
|
||||
{ return test_bit(PG_##lname, &page->flags); }
|
||||
#define TESTPAGEFLAG(uname, lname, policy) \
|
||||
static inline int Page##uname(struct page *page) \
|
||||
{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
|
||||
|
||||
#define SETPAGEFLAG(uname, lname) \
|
||||
#define SETPAGEFLAG(uname, lname, policy) \
|
||||
static inline void SetPage##uname(struct page *page) \
|
||||
{ set_bit(PG_##lname, &page->flags); }
|
||||
{ set_bit(PG_##lname, &policy(page, 1)->flags); }
|
||||
|
||||
#define CLEARPAGEFLAG(uname, lname) \
|
||||
#define CLEARPAGEFLAG(uname, lname, policy) \
|
||||
static inline void ClearPage##uname(struct page *page) \
|
||||
{ clear_bit(PG_##lname, &page->flags); }
|
||||
{ clear_bit(PG_##lname, &policy(page, 1)->flags); }
|
||||
|
||||
#define __SETPAGEFLAG(uname, lname) \
|
||||
#define __SETPAGEFLAG(uname, lname, policy) \
|
||||
static inline void __SetPage##uname(struct page *page) \
|
||||
{ __set_bit(PG_##lname, &page->flags); }
|
||||
{ __set_bit(PG_##lname, &policy(page, 1)->flags); }
|
||||
|
||||
#define __CLEARPAGEFLAG(uname, lname) \
|
||||
#define __CLEARPAGEFLAG(uname, lname, policy) \
|
||||
static inline void __ClearPage##uname(struct page *page) \
|
||||
{ __clear_bit(PG_##lname, &page->flags); }
|
||||
{ __clear_bit(PG_##lname, &policy(page, 1)->flags); }
|
||||
|
||||
#define TESTSETFLAG(uname, lname) \
|
||||
#define TESTSETFLAG(uname, lname, policy) \
|
||||
static inline int TestSetPage##uname(struct page *page) \
|
||||
{ return test_and_set_bit(PG_##lname, &page->flags); }
|
||||
{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
|
||||
|
||||
#define TESTCLEARFLAG(uname, lname) \
|
||||
#define TESTCLEARFLAG(uname, lname, policy) \
|
||||
static inline int TestClearPage##uname(struct page *page) \
|
||||
{ return test_and_clear_bit(PG_##lname, &page->flags); }
|
||||
{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
|
||||
|
||||
#define __TESTCLEARFLAG(uname, lname) \
|
||||
#define __TESTCLEARFLAG(uname, lname, policy) \
|
||||
static inline int __TestClearPage##uname(struct page *page) \
|
||||
{ return __test_and_clear_bit(PG_##lname, &page->flags); }
|
||||
{ return __test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
|
||||
|
||||
#define PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \
|
||||
SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname)
|
||||
#define PAGEFLAG(uname, lname, policy) \
|
||||
TESTPAGEFLAG(uname, lname, policy) \
|
||||
SETPAGEFLAG(uname, lname, policy) \
|
||||
CLEARPAGEFLAG(uname, lname, policy)
|
||||
|
||||
#define __PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \
|
||||
__SETPAGEFLAG(uname, lname) __CLEARPAGEFLAG(uname, lname)
|
||||
#define __PAGEFLAG(uname, lname, policy) \
|
||||
TESTPAGEFLAG(uname, lname, policy) \
|
||||
__SETPAGEFLAG(uname, lname, policy) \
|
||||
__CLEARPAGEFLAG(uname, lname, policy)
|
||||
|
||||
#define TESTSCFLAG(uname, lname) \
|
||||
TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname)
|
||||
#define TESTSCFLAG(uname, lname, policy) \
|
||||
TESTSETFLAG(uname, lname, policy) \
|
||||
TESTCLEARFLAG(uname, lname, policy)
|
||||
|
||||
#define TESTPAGEFLAG_FALSE(uname) \
|
||||
static inline int Page##uname(const struct page *page) { return 0; }
|
||||
|
@ -210,28 +262,28 @@ static inline int __TestClearPage##uname(struct page *page) { return 0; }
|
|||
#define TESTSCFLAG_FALSE(uname) \
|
||||
TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
|
||||
|
||||
struct page; /* forward declaration */
|
||||
__PAGEFLAG(Locked, locked, PF_NO_TAIL)
|
||||
PAGEFLAG(Error, error, PF_ANY) TESTCLEARFLAG(Error, error, PF_ANY)
|
||||
PAGEFLAG(Referenced, referenced, PF_ANY) TESTCLEARFLAG(Referenced, referenced, PF_ANY)
|
||||
__SETPAGEFLAG(Referenced, referenced, PF_ANY)
|
||||
PAGEFLAG(Dirty, dirty, PF_ANY) TESTSCFLAG(Dirty, dirty, PF_ANY)
|
||||
__CLEARPAGEFLAG(Dirty, dirty, PF_ANY)
|
||||
PAGEFLAG(LRU, lru, PF_ANY) __CLEARPAGEFLAG(LRU, lru, PF_ANY)
|
||||
PAGEFLAG(Active, active, PF_ANY) __CLEARPAGEFLAG(Active, active, PF_ANY)
|
||||
TESTCLEARFLAG(Active, active, PF_ANY)
|
||||
__PAGEFLAG(Slab, slab, PF_ANY)
|
||||
PAGEFLAG(Checked, checked, PF_ANY) /* Used by some filesystems */
|
||||
PAGEFLAG(Pinned, pinned, PF_ANY) TESTSCFLAG(Pinned, pinned, PF_ANY) /* Xen */
|
||||
PAGEFLAG(SavePinned, savepinned, PF_ANY); /* Xen */
|
||||
PAGEFLAG(Foreign, foreign, PF_ANY); /* Xen */
|
||||
PAGEFLAG(Reserved, reserved, PF_ANY) __CLEARPAGEFLAG(Reserved, reserved, PF_ANY)
|
||||
PAGEFLAG(SwapBacked, swapbacked, PF_ANY)
|
||||
__CLEARPAGEFLAG(SwapBacked, swapbacked, PF_ANY)
|
||||
__SETPAGEFLAG(SwapBacked, swapbacked, PF_ANY)
|
||||
|
||||
TESTPAGEFLAG(Locked, locked)
|
||||
PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error)
|
||||
PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
|
||||
__SETPAGEFLAG(Referenced, referenced)
|
||||
PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
|
||||
PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
|
||||
PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active)
|
||||
TESTCLEARFLAG(Active, active)
|
||||
__PAGEFLAG(Slab, slab)
|
||||
PAGEFLAG(Checked, checked) /* Used by some filesystems */
|
||||
PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
|
||||
PAGEFLAG(SavePinned, savepinned); /* Xen */
|
||||
PAGEFLAG(Foreign, foreign); /* Xen */
|
||||
PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
|
||||
PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
|
||||
__SETPAGEFLAG(SwapBacked, swapbacked)
|
||||
|
||||
__PAGEFLAG(SlobFree, slob_free)
|
||||
__PAGEFLAG(SlobFree, slob_free, PF_ANY)
|
||||
#ifdef CONFIG_ZCACHE
|
||||
PAGEFLAG(WasActive, was_active)
|
||||
PAGEFLAG(WasActive, was_active, PF_ANY)
|
||||
#else
|
||||
PAGEFLAG_FALSE(WasActive)
|
||||
#endif
|
||||
|
@ -241,21 +293,22 @@ PAGEFLAG_FALSE(WasActive)
|
|||
* for its own purposes.
|
||||
* - PG_private and PG_private_2 cause releasepage() and co to be invoked
|
||||
*/
|
||||
PAGEFLAG(Private, private) __SETPAGEFLAG(Private, private)
|
||||
__CLEARPAGEFLAG(Private, private)
|
||||
PAGEFLAG(Private2, private_2) TESTSCFLAG(Private2, private_2)
|
||||
PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1)
|
||||
PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY)
|
||||
__CLEARPAGEFLAG(Private, private, PF_ANY)
|
||||
PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
|
||||
PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
|
||||
TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
|
||||
|
||||
/*
|
||||
* Only test-and-set exist for PG_writeback. The unconditional operators are
|
||||
* risky: they bypass page accounting.
|
||||
*/
|
||||
TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback)
|
||||
PAGEFLAG(MappedToDisk, mappedtodisk)
|
||||
TESTPAGEFLAG(Writeback, writeback, PF_ANY) TESTSCFLAG(Writeback, writeback, PF_ANY)
|
||||
PAGEFLAG(MappedToDisk, mappedtodisk, PF_ANY)
|
||||
|
||||
/* PG_readahead is only used for reads; PG_reclaim is only for writes */
|
||||
PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim)
|
||||
PAGEFLAG(Readahead, reclaim) TESTCLEARFLAG(Readahead, reclaim)
|
||||
PAGEFLAG(Reclaim, reclaim, PF_ANY) TESTCLEARFLAG(Reclaim, reclaim, PF_ANY)
|
||||
PAGEFLAG(Readahead, reclaim, PF_ANY) TESTCLEARFLAG(Readahead, reclaim, PF_ANY)
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
/*
|
||||
|
@ -268,31 +321,32 @@ PAGEFLAG_FALSE(HighMem)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_SWAP
|
||||
PAGEFLAG(SwapCache, swapcache)
|
||||
PAGEFLAG(SwapCache, swapcache, PF_ANY)
|
||||
#else
|
||||
PAGEFLAG_FALSE(SwapCache)
|
||||
#endif
|
||||
|
||||
PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
|
||||
TESTCLEARFLAG(Unevictable, unevictable)
|
||||
PAGEFLAG(Unevictable, unevictable, PF_ANY)
|
||||
__CLEARPAGEFLAG(Unevictable, unevictable, PF_ANY)
|
||||
TESTCLEARFLAG(Unevictable, unevictable, PF_ANY)
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
|
||||
TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked)
|
||||
PAGEFLAG(Mlocked, mlocked, PF_ANY) __CLEARPAGEFLAG(Mlocked, mlocked, PF_ANY)
|
||||
TESTSCFLAG(Mlocked, mlocked, PF_ANY) __TESTCLEARFLAG(Mlocked, mlocked, PF_ANY)
|
||||
#else
|
||||
PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
|
||||
TESTSCFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_USES_PG_UNCACHED
|
||||
PAGEFLAG(Uncached, uncached)
|
||||
PAGEFLAG(Uncached, uncached, PF_ANY)
|
||||
#else
|
||||
PAGEFLAG_FALSE(Uncached)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MEMORY_FAILURE
|
||||
PAGEFLAG(HWPoison, hwpoison)
|
||||
TESTSCFLAG(HWPoison, hwpoison)
|
||||
PAGEFLAG(HWPoison, hwpoison, PF_ANY)
|
||||
TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
|
||||
#define __PG_HWPOISON (1UL << PG_hwpoison)
|
||||
#else
|
||||
PAGEFLAG_FALSE(HWPoison)
|
||||
|
@ -300,10 +354,10 @@ PAGEFLAG_FALSE(HWPoison)
|
|||
#endif
|
||||
|
||||
#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
|
||||
TESTPAGEFLAG(Young, young)
|
||||
SETPAGEFLAG(Young, young)
|
||||
TESTCLEARFLAG(Young, young)
|
||||
PAGEFLAG(Idle, idle)
|
||||
TESTPAGEFLAG(Young, young, PF_ANY)
|
||||
SETPAGEFLAG(Young, young, PF_ANY)
|
||||
TESTCLEARFLAG(Young, young, PF_ANY)
|
||||
PAGEFLAG(Idle, idle, PF_ANY)
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -397,7 +451,7 @@ static inline void SetPageUptodate(struct page *page)
|
|||
set_bit(PG_uptodate, &(page)->flags);
|
||||
}
|
||||
|
||||
CLEARPAGEFLAG(Uptodate, uptodate)
|
||||
CLEARPAGEFLAG(Uptodate, uptodate, PF_ANY)
|
||||
|
||||
int test_clear_page_writeback(struct page *page);
|
||||
int __test_set_page_writeback(struct page *page, bool keep_write);
|
||||
|
@ -417,12 +471,7 @@ static inline void set_page_writeback_keepwrite(struct page *page)
|
|||
test_set_page_writeback_keepwrite(page);
|
||||
}
|
||||
|
||||
__PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head)
|
||||
|
||||
static inline int PageTail(struct page *page)
|
||||
{
|
||||
return READ_ONCE(page->compound_head) & 1;
|
||||
}
|
||||
__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
|
||||
|
||||
static inline void set_compound_head(struct page *page, struct page *head)
|
||||
{
|
||||
|
@ -434,20 +483,6 @@ static inline void clear_compound_head(struct page *page)
|
|||
WRITE_ONCE(page->compound_head, 0);
|
||||
}
|
||||
|
||||
static inline struct page *compound_head(struct page *page)
|
||||
{
|
||||
unsigned long head = READ_ONCE(page->compound_head);
|
||||
|
||||
if (unlikely(head & 1))
|
||||
return (struct page *) (head - 1);
|
||||
return page;
|
||||
}
|
||||
|
||||
static inline int PageCompound(struct page *page)
|
||||
{
|
||||
return PageHead(page) || PageTail(page);
|
||||
|
||||
}
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
static inline void ClearPageCompound(struct page *page)
|
||||
{
|
||||
|
@ -573,7 +608,7 @@ static inline void __ClearPageBalloon(struct page *page)
|
|||
atomic_set(&page->_mapcount, -1);
|
||||
}
|
||||
|
||||
__PAGEFLAG(Isolated, isolated);
|
||||
__PAGEFLAG(Isolated, isolated, PF_ANY);
|
||||
|
||||
/*
|
||||
* If network-based swap is enabled, sl*b must keep track of whether pages
|
||||
|
@ -652,6 +687,10 @@ static inline int page_has_private(struct page *page)
|
|||
return !!(page->flags & PAGE_FLAGS_PRIVATE);
|
||||
}
|
||||
|
||||
#undef PF_ANY
|
||||
#undef PF_HEAD
|
||||
#undef PF_NO_TAIL
|
||||
#undef PF_NO_COMPOUND
|
||||
#endif /* !__GENERATING_BOUNDS_H */
|
||||
|
||||
#endif /* PAGE_FLAGS_H */
|
||||
|
|
|
@ -441,18 +441,9 @@ extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
|
|||
unsigned int flags);
|
||||
extern void unlock_page(struct page *page);
|
||||
|
||||
static inline void __set_page_locked(struct page *page)
|
||||
{
|
||||
__set_bit(PG_locked, &page->flags);
|
||||
}
|
||||
|
||||
static inline void __clear_page_locked(struct page *page)
|
||||
{
|
||||
__clear_bit(PG_locked, &page->flags);
|
||||
}
|
||||
|
||||
static inline int trylock_page(struct page *page)
|
||||
{
|
||||
page = compound_head(page);
|
||||
return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
|
||||
}
|
||||
|
||||
|
@ -505,9 +496,9 @@ extern int wait_on_page_bit_killable_timeout(struct page *page,
|
|||
|
||||
static inline int wait_on_page_locked_killable(struct page *page)
|
||||
{
|
||||
if (PageLocked(page))
|
||||
return wait_on_page_bit_killable(page, PG_locked);
|
||||
return 0;
|
||||
if (!PageLocked(page))
|
||||
return 0;
|
||||
return wait_on_page_bit_killable(compound_head(page), PG_locked);
|
||||
}
|
||||
|
||||
extern wait_queue_head_t *page_waitqueue(struct page *page);
|
||||
|
@ -526,7 +517,7 @@ static inline void wake_up_page(struct page *page, int bit)
|
|||
static inline void wait_on_page_locked(struct page *page)
|
||||
{
|
||||
if (PageLocked(page))
|
||||
wait_on_page_bit(page, PG_locked);
|
||||
wait_on_page_bit(compound_head(page), PG_locked);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -672,17 +663,17 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
|
|||
|
||||
/*
|
||||
* Like add_to_page_cache_locked, but used to add newly allocated pages:
|
||||
* the page is new, so we can just run __set_page_locked() against it.
|
||||
* the page is new, so we can just run __SetPageLocked() against it.
|
||||
*/
|
||||
static inline int add_to_page_cache(struct page *page,
|
||||
struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
|
||||
{
|
||||
int error;
|
||||
|
||||
__set_page_locked(page);
|
||||
__SetPageLocked(page);
|
||||
error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
|
||||
if (unlikely(error))
|
||||
__clear_page_locked(page);
|
||||
__ClearPageLocked(page);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
|
|
@ -588,6 +588,14 @@ config DEBUG_VM_RB
|
|||
|
||||
If unsure, say N.
|
||||
|
||||
config DEBUG_VM_PGFLAGS
|
||||
bool "Debug page-flags operations"
|
||||
depends on DEBUG_VM
|
||||
help
|
||||
Enables extra validation on page flags operations.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config DEBUG_VIRTUAL
|
||||
bool "Debug VM translations"
|
||||
depends on DEBUG_KERNEL && X86
|
||||
|
|
15
mm/filemap.c
15
mm/filemap.c
|
@ -689,11 +689,11 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
|
|||
void *shadow = NULL;
|
||||
int ret;
|
||||
|
||||
__set_page_locked(page);
|
||||
__SetPageLocked(page);
|
||||
ret = __add_to_page_cache_locked(page, mapping, offset,
|
||||
gfp_mask, &shadow);
|
||||
if (unlikely(ret))
|
||||
__clear_page_locked(page);
|
||||
__ClearPageLocked(page);
|
||||
else {
|
||||
/*
|
||||
* The page might have been evicted from cache only
|
||||
|
@ -816,6 +816,7 @@ EXPORT_SYMBOL_GPL(add_page_wait_queue);
|
|||
*/
|
||||
void unlock_page(struct page *page)
|
||||
{
|
||||
page = compound_head(page);
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
clear_bit_unlock(PG_locked, &page->flags);
|
||||
smp_mb__after_atomic();
|
||||
|
@ -883,18 +884,20 @@ EXPORT_SYMBOL_GPL(page_endio);
|
|||
*/
|
||||
void __lock_page(struct page *page)
|
||||
{
|
||||
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
|
||||
struct page *page_head = compound_head(page);
|
||||
DEFINE_WAIT_BIT(wait, &page_head->flags, PG_locked);
|
||||
|
||||
__wait_on_bit_lock(page_waitqueue(page), &wait, bit_wait_io,
|
||||
__wait_on_bit_lock(page_waitqueue(page_head), &wait, bit_wait_io,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
EXPORT_SYMBOL(__lock_page);
|
||||
|
||||
int __lock_page_killable(struct page *page)
|
||||
{
|
||||
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
|
||||
struct page *page_head = compound_head(page);
|
||||
DEFINE_WAIT_BIT(wait, &page_head->flags, PG_locked);
|
||||
|
||||
return __wait_on_bit_lock(page_waitqueue(page), &wait,
|
||||
return __wait_on_bit_lock(page_waitqueue(page_head), &wait,
|
||||
bit_wait_io, TASK_KILLABLE);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__lock_page_killable);
|
||||
|
|
2
mm/ksm.c
2
mm/ksm.c
|
@ -1989,7 +1989,7 @@ struct page *ksm_might_need_to_copy(struct page *page,
|
|||
|
||||
SetPageDirty(new_page);
|
||||
__SetPageUptodate(new_page);
|
||||
__set_page_locked(new_page);
|
||||
__SetPageLocked(new_page);
|
||||
}
|
||||
|
||||
return new_page;
|
||||
|
|
|
@ -1173,7 +1173,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
|
|||
/*
|
||||
* We ignore non-LRU pages for good reasons.
|
||||
* - PG_locked is only well defined for LRU pages and a few others
|
||||
* - to avoid races with __set_page_locked()
|
||||
* - to avoid races with __SetPageLocked()
|
||||
* - to avoid races with __SetPageSlab*() (and more non-atomic ops)
|
||||
* The check (unnecessarily) ignores LRU pages being isolated and
|
||||
* walked by the page reclaim code, however that's not a big loss.
|
||||
|
|
|
@ -1936,7 +1936,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
|
|||
flush_tlb_range(vma, mmun_start, mmun_end);
|
||||
|
||||
/* Prepare a page as a migration target */
|
||||
__set_page_locked(new_page);
|
||||
__SetPageLocked(new_page);
|
||||
SetPageSwapBacked(new_page);
|
||||
|
||||
/* anon mapping, we can simply copy page->mapping to the new page: */
|
||||
|
|
|
@ -1003,7 +1003,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
|
|||
copy_highpage(newpage, oldpage);
|
||||
flush_dcache_page(newpage);
|
||||
|
||||
__set_page_locked(newpage);
|
||||
__SetPageLocked(newpage);
|
||||
SetPageUptodate(newpage);
|
||||
SetPageSwapBacked(newpage);
|
||||
set_page_private(newpage, swap_index);
|
||||
|
@ -1195,7 +1195,7 @@ repeat:
|
|||
}
|
||||
|
||||
__SetPageSwapBacked(page);
|
||||
__set_page_locked(page);
|
||||
__SetPageLocked(page);
|
||||
if (sgp == SGP_WRITE)
|
||||
__SetPageReferenced(page);
|
||||
|
||||
|
|
|
@ -333,11 +333,13 @@ static inline int oo_objects(struct kmem_cache_order_objects x)
|
|||
*/
|
||||
static __always_inline void slab_lock(struct page *page)
|
||||
{
|
||||
VM_BUG_ON_PAGE(PageTail(page), page);
|
||||
bit_spin_lock(PG_locked, &page->flags);
|
||||
}
|
||||
|
||||
static __always_inline void slab_unlock(struct page *page)
|
||||
{
|
||||
VM_BUG_ON_PAGE(PageTail(page), page);
|
||||
__bit_spin_unlock(PG_locked, &page->flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -355,7 +355,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
|||
}
|
||||
|
||||
/* May fail (-ENOMEM) if radix-tree node allocation failed. */
|
||||
__set_page_locked(new_page);
|
||||
__SetPageLocked(new_page);
|
||||
SetPageSwapBacked(new_page);
|
||||
err = __add_to_swap_cache(new_page, entry);
|
||||
if (likely(!err)) {
|
||||
|
@ -369,7 +369,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
|||
}
|
||||
radix_tree_preload_end();
|
||||
ClearPageSwapBacked(new_page);
|
||||
__clear_page_locked(new_page);
|
||||
__ClearPageLocked(new_page);
|
||||
/*
|
||||
* add_to_swap_cache() doesn't return -EEXIST, so we can safely
|
||||
* clear SWAP_HAS_CACHE flag.
|
||||
|
|
|
@ -1247,7 +1247,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
|||
* we obviously don't have to worry about waking up a process
|
||||
* waiting on the page lock, because there are no references.
|
||||
*/
|
||||
__clear_page_locked(page);
|
||||
__ClearPageLocked(page);
|
||||
free_it:
|
||||
nr_reclaimed++;
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue