binder: binder: fix possible UAF when freeing buffer
There is a race between the binder driver cleaning up a completed transaction via binder_free_transaction() and a user calling binder_ioctl(BC_FREE_BUFFER) to release a buffer. It doesn't matter which is first but they need to be protected against running concurrently which can result in a UAF. Bug: 133758011 Change-Id: Ie1426ff3d00218d050d61ff77b333ddf8818b7c9 Signed-off-by: Todd Kjos <tkjos@google.com>
This commit is contained in:
parent
78a688fa83
commit
0e1b964ab4
1 changed files with 32 additions and 10 deletions
|
@ -528,7 +528,8 @@ struct binder_priority {
|
||||||
* @requested_threads_started: number binder threads started
|
* @requested_threads_started: number binder threads started
|
||||||
* (protected by @inner_lock)
|
* (protected by @inner_lock)
|
||||||
* @tmp_ref: temporary reference to indicate proc is in use
|
* @tmp_ref: temporary reference to indicate proc is in use
|
||||||
* (protected by @inner_lock)
|
* (atomic since @proc->inner_lock cannot
|
||||||
|
* always be acquired)
|
||||||
* @default_priority: default scheduler priority
|
* @default_priority: default scheduler priority
|
||||||
* (invariant after initialized)
|
* (invariant after initialized)
|
||||||
* @debugfs_entry: debugfs node
|
* @debugfs_entry: debugfs node
|
||||||
|
@ -562,7 +563,7 @@ struct binder_proc {
|
||||||
int max_threads;
|
int max_threads;
|
||||||
int requested_threads;
|
int requested_threads;
|
||||||
int requested_threads_started;
|
int requested_threads_started;
|
||||||
int tmp_ref;
|
atomic_t tmp_ref;
|
||||||
struct binder_priority default_priority;
|
struct binder_priority default_priority;
|
||||||
struct dentry *debugfs_entry;
|
struct dentry *debugfs_entry;
|
||||||
struct binder_alloc alloc;
|
struct binder_alloc alloc;
|
||||||
|
@ -2053,9 +2054,9 @@ static void binder_thread_dec_tmpref(struct binder_thread *thread)
|
||||||
static void binder_proc_dec_tmpref(struct binder_proc *proc)
|
static void binder_proc_dec_tmpref(struct binder_proc *proc)
|
||||||
{
|
{
|
||||||
binder_inner_proc_lock(proc);
|
binder_inner_proc_lock(proc);
|
||||||
proc->tmp_ref--;
|
atomic_dec(&proc->tmp_ref);
|
||||||
if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
|
if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
|
||||||
!proc->tmp_ref) {
|
!atomic_read(&proc->tmp_ref)) {
|
||||||
binder_inner_proc_unlock(proc);
|
binder_inner_proc_unlock(proc);
|
||||||
binder_free_proc(proc);
|
binder_free_proc(proc);
|
||||||
return;
|
return;
|
||||||
|
@ -2117,8 +2118,26 @@ static struct binder_thread *binder_get_txn_from_and_acq_inner(
|
||||||
|
|
||||||
static void binder_free_transaction(struct binder_transaction *t)
|
static void binder_free_transaction(struct binder_transaction *t)
|
||||||
{
|
{
|
||||||
if (t->buffer)
|
struct binder_proc *target_proc;
|
||||||
t->buffer->transaction = NULL;
|
|
||||||
|
spin_lock(&t->lock);
|
||||||
|
target_proc = t->to_proc;
|
||||||
|
if (target_proc) {
|
||||||
|
atomic_inc(&target_proc->tmp_ref);
|
||||||
|
spin_unlock(&t->lock);
|
||||||
|
|
||||||
|
binder_inner_proc_lock(target_proc);
|
||||||
|
if (t->buffer)
|
||||||
|
t->buffer->transaction = NULL;
|
||||||
|
binder_inner_proc_unlock(target_proc);
|
||||||
|
binder_proc_dec_tmpref(target_proc);
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* If the transaction has no target_proc, then
|
||||||
|
* t->buffer->transaction * has already been cleared.
|
||||||
|
*/
|
||||||
|
spin_unlock(&t->lock);
|
||||||
|
}
|
||||||
kfree(t);
|
kfree(t);
|
||||||
binder_stats_deleted(BINDER_STAT_TRANSACTION);
|
binder_stats_deleted(BINDER_STAT_TRANSACTION);
|
||||||
}
|
}
|
||||||
|
@ -2871,7 +2890,7 @@ static struct binder_node *binder_get_node_refs_for_txn(
|
||||||
target_node = node;
|
target_node = node;
|
||||||
binder_inc_node_nilocked(node, 1, 0, NULL);
|
binder_inc_node_nilocked(node, 1, 0, NULL);
|
||||||
binder_inc_node_tmpref_ilocked(node);
|
binder_inc_node_tmpref_ilocked(node);
|
||||||
node->proc->tmp_ref++;
|
atomic_inc(&node->proc->tmp_ref);
|
||||||
*procp = node->proc;
|
*procp = node->proc;
|
||||||
} else
|
} else
|
||||||
*error = BR_DEAD_REPLY;
|
*error = BR_DEAD_REPLY;
|
||||||
|
@ -2967,7 +2986,7 @@ static void binder_transaction(struct binder_proc *proc,
|
||||||
goto err_dead_binder;
|
goto err_dead_binder;
|
||||||
}
|
}
|
||||||
target_proc = target_thread->proc;
|
target_proc = target_thread->proc;
|
||||||
target_proc->tmp_ref++;
|
atomic_inc(&target_proc->tmp_ref);
|
||||||
binder_inner_proc_unlock(target_thread->proc);
|
binder_inner_proc_unlock(target_thread->proc);
|
||||||
} else {
|
} else {
|
||||||
if (tr->target.handle) {
|
if (tr->target.handle) {
|
||||||
|
@ -3700,10 +3719,12 @@ static int binder_thread_write(struct binder_proc *proc,
|
||||||
buffer->debug_id,
|
buffer->debug_id,
|
||||||
buffer->transaction ? "active" : "finished");
|
buffer->transaction ? "active" : "finished");
|
||||||
|
|
||||||
|
binder_inner_proc_lock(proc);
|
||||||
if (buffer->transaction) {
|
if (buffer->transaction) {
|
||||||
buffer->transaction->buffer = NULL;
|
buffer->transaction->buffer = NULL;
|
||||||
buffer->transaction = NULL;
|
buffer->transaction = NULL;
|
||||||
}
|
}
|
||||||
|
binder_inner_proc_unlock(proc);
|
||||||
if (buffer->async_transaction && buffer->target_node) {
|
if (buffer->async_transaction && buffer->target_node) {
|
||||||
struct binder_node *buf_node;
|
struct binder_node *buf_node;
|
||||||
struct binder_work *w;
|
struct binder_work *w;
|
||||||
|
@ -4565,7 +4586,7 @@ static int binder_thread_release(struct binder_proc *proc,
|
||||||
* The corresponding dec is when we actually
|
* The corresponding dec is when we actually
|
||||||
* free the thread in binder_free_thread()
|
* free the thread in binder_free_thread()
|
||||||
*/
|
*/
|
||||||
proc->tmp_ref++;
|
atomic_inc(&proc->tmp_ref);
|
||||||
/*
|
/*
|
||||||
* take a ref on this thread to ensure it
|
* take a ref on this thread to ensure it
|
||||||
* survives while we are releasing it
|
* survives while we are releasing it
|
||||||
|
@ -5005,6 +5026,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
spin_lock_init(&proc->inner_lock);
|
spin_lock_init(&proc->inner_lock);
|
||||||
spin_lock_init(&proc->outer_lock);
|
spin_lock_init(&proc->outer_lock);
|
||||||
|
atomic_set(&proc->tmp_ref, 0);
|
||||||
get_task_struct(current->group_leader);
|
get_task_struct(current->group_leader);
|
||||||
proc->tsk = current->group_leader;
|
proc->tsk = current->group_leader;
|
||||||
mutex_init(&proc->files_lock);
|
mutex_init(&proc->files_lock);
|
||||||
|
@ -5184,7 +5206,7 @@ static void binder_deferred_release(struct binder_proc *proc)
|
||||||
* Make sure proc stays alive after we
|
* Make sure proc stays alive after we
|
||||||
* remove all the threads
|
* remove all the threads
|
||||||
*/
|
*/
|
||||||
proc->tmp_ref++;
|
atomic_inc(&proc->tmp_ref);
|
||||||
|
|
||||||
proc->is_dead = true;
|
proc->is_dead = true;
|
||||||
threads = 0;
|
threads = 0;
|
||||||
|
|
Loading…
Add table
Reference in a new issue