mm: remove write/force parameters from __get_user_pages_unlocked()

commit d4944b0ecec0af882483fe44b66729316e575208 upstream.

This removes the redundant 'write' and 'force' parameters from
__get_user_pages_unlocked() to make the use of FOLL_FORCE explicit in
callers as use of this flag can result in surprising behaviour (and
hence bugs) within the mm subsystem.

Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
[bwh: Backported to 4.4:
 - Defer changes in process_vm_rw_single_vec() and async_pf_execute() since
   they use get_user_pages_unlocked() here
 - Adjust context]
Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Lorenzo Stoakes 2016-10-13 01:20:12 +01:00 committed by Greg Kroah-Hartman
parent 6a3c9524df
commit ab424c8eb7
4 changed files with 29 additions and 18 deletions

View file

@ -1207,8 +1207,7 @@ long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
int *locked);
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages,
unsigned int gup_flags);
struct page **pages, unsigned int gup_flags);
long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages);

View file

@ -764,17 +764,11 @@ EXPORT_SYMBOL(get_user_pages_locked);
*/
__always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages,
unsigned int gup_flags)
struct page **pages, unsigned int gup_flags)
{
long ret;
int locked = 1;
if (write)
gup_flags |= FOLL_WRITE;
if (force)
gup_flags |= FOLL_FORCE;
down_read(&mm->mmap_sem);
ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL,
&locked, false, gup_flags);
@ -805,8 +799,15 @@ long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages)
{
return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write,
force, pages, FOLL_TOUCH);
unsigned int flags = FOLL_TOUCH;
if (write)
flags |= FOLL_WRITE;
if (force)
flags |= FOLL_FORCE;
return __get_user_pages_unlocked(tsk, mm, start, nr_pages,
pages, flags);
}
EXPORT_SYMBOL(get_user_pages_unlocked);

View file

@ -211,8 +211,7 @@ EXPORT_SYMBOL(get_user_pages_locked);
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages,
unsigned int gup_flags)
struct page **pages, unsigned int gup_flags)
{
long ret;
down_read(&mm->mmap_sem);
@ -227,8 +226,15 @@ long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages)
{
return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write,
force, pages, 0);
unsigned int flags = 0;
if (write)
flags |= FOLL_WRITE;
if (force)
flags |= FOLL_FORCE;
return __get_user_pages_unlocked(tsk, mm, start, nr_pages,
pages, flags);
}
EXPORT_SYMBOL(get_user_pages_unlocked);

View file

@ -1352,10 +1352,15 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
npages = get_user_page_nowait(current, current->mm,
addr, write_fault, page);
up_read(&current->mm->mmap_sem);
} else
} else {
unsigned int flags = FOLL_TOUCH | FOLL_HWPOISON;
if (write_fault)
flags |= FOLL_WRITE;
npages = __get_user_pages_unlocked(current, current->mm, addr, 1,
write_fault, 0, page,
FOLL_TOUCH|FOLL_HWPOISON);
page, flags);
}
if (npages != 1)
return npages;