UPSTREAM: binder: fix proc->files use-after-free

proc->files cleanup is initiated by binder_vma_close. Therefore
a reference on the binder_proc is not enough to prevent the
files_struct from being released while the binder_proc still has
a reference. This can lead to an attempt to dereference the
stale pointer obtained from proc->files prior to proc->files
cleanup. This has been seen once in task_get_unused_fd_flags()
when __alloc_fd() is called with a stale "files".

The fix is to protect proc->files with a mutex to prevent cleanup
while in use.

Signed-off-by: Todd Kjos <tkjos@google.com>
Cc: stable <stable@vger.kernel.org> # 4.14
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
(cherry picked from commit 7f3dc0088b98533f17128058fac73cd8b2752ef1)

Change-Id: I40982bb0b4615bda5459538c20eb2a913964042c
This commit is contained in:
Todd Kjos 2017-11-27 09:32:33 -08:00 committed by Martijn Coenen
parent 96523f2450
commit c88a3ec1ee

View file

@ -502,7 +502,8 @@ struct binder_priority {
* @tsk task_struct for group_leader of process * @tsk task_struct for group_leader of process
* (invariant after initialized) * (invariant after initialized)
* @files files_struct for process * @files files_struct for process
* (invariant after initialized) * (protected by @files_lock)
* @files_lock mutex to protect @files
* @deferred_work_node: element for binder_deferred_list * @deferred_work_node: element for binder_deferred_list
* (protected by binder_deferred_lock) * (protected by binder_deferred_lock)
* @deferred_work: bitmap of deferred work to perform * @deferred_work: bitmap of deferred work to perform
@ -548,6 +549,7 @@ struct binder_proc {
int pid; int pid;
struct task_struct *tsk; struct task_struct *tsk;
struct files_struct *files; struct files_struct *files;
struct mutex files_lock;
struct hlist_node deferred_work_node; struct hlist_node deferred_work_node;
int deferred_work; int deferred_work;
bool is_dead; bool is_dead;
@ -944,20 +946,26 @@ static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
{ {
struct files_struct *files = proc->files;
unsigned long rlim_cur; unsigned long rlim_cur;
unsigned long irqs; unsigned long irqs;
int ret;
if (files == NULL) mutex_lock(&proc->files_lock);
return -ESRCH; if (proc->files == NULL) {
ret = -ESRCH;
if (!lock_task_sighand(proc->tsk, &irqs)) goto err;
return -EMFILE; }
if (!lock_task_sighand(proc->tsk, &irqs)) {
ret = -EMFILE;
goto err;
}
rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
unlock_task_sighand(proc->tsk, &irqs); unlock_task_sighand(proc->tsk, &irqs);
return __alloc_fd(files, 0, rlim_cur, flags); ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
err:
mutex_unlock(&proc->files_lock);
return ret;
} }
/* /*
@ -966,8 +974,10 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
static void task_fd_install( static void task_fd_install(
struct binder_proc *proc, unsigned int fd, struct file *file) struct binder_proc *proc, unsigned int fd, struct file *file)
{ {
mutex_lock(&proc->files_lock);
if (proc->files) if (proc->files)
__fd_install(proc->files, fd, file); __fd_install(proc->files, fd, file);
mutex_unlock(&proc->files_lock);
} }
/* /*
@ -977,9 +987,11 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
{ {
int retval; int retval;
if (proc->files == NULL) mutex_lock(&proc->files_lock);
return -ESRCH; if (proc->files == NULL) {
retval = -ESRCH;
goto err;
}
retval = __close_fd(proc->files, fd); retval = __close_fd(proc->files, fd);
/* can't restart close syscall because file table entry was cleared */ /* can't restart close syscall because file table entry was cleared */
if (unlikely(retval == -ERESTARTSYS || if (unlikely(retval == -ERESTARTSYS ||
@ -987,7 +999,8 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
retval == -ERESTARTNOHAND || retval == -ERESTARTNOHAND ||
retval == -ERESTART_RESTARTBLOCK)) retval == -ERESTART_RESTARTBLOCK))
retval = -EINTR; retval = -EINTR;
err:
mutex_unlock(&proc->files_lock);
return retval; return retval;
} }
@ -4895,7 +4908,9 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
ret = binder_alloc_mmap_handler(&proc->alloc, vma); ret = binder_alloc_mmap_handler(&proc->alloc, vma);
if (ret) if (ret)
return ret; return ret;
mutex_lock(&proc->files_lock);
proc->files = get_files_struct(current); proc->files = get_files_struct(current);
mutex_unlock(&proc->files_lock);
return 0; return 0;
err_bad_arg: err_bad_arg:
@ -4919,6 +4934,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
spin_lock_init(&proc->outer_lock); spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader); get_task_struct(current->group_leader);
proc->tsk = current->group_leader; proc->tsk = current->group_leader;
mutex_init(&proc->files_lock);
INIT_LIST_HEAD(&proc->todo); INIT_LIST_HEAD(&proc->todo);
if (binder_supported_policy(current->policy)) { if (binder_supported_policy(current->policy)) {
proc->default_priority.sched_policy = current->policy; proc->default_priority.sched_policy = current->policy;
@ -5178,9 +5194,11 @@ static void binder_deferred_func(struct work_struct *work)
files = NULL; files = NULL;
if (defer & BINDER_DEFERRED_PUT_FILES) { if (defer & BINDER_DEFERRED_PUT_FILES) {
mutex_lock(&proc->files_lock);
files = proc->files; files = proc->files;
if (files) if (files)
proc->files = NULL; proc->files = NULL;
mutex_unlock(&proc->files_lock);
} }
if (defer & BINDER_DEFERRED_FLUSH) if (defer & BINDER_DEFERRED_FLUSH)