Revert "android: binder: move global binder state into context struct."
This reverts commit d6bbb32767
.
Signed-off-by: Todd Kjos <tkjos@google.com>
Change-Id: Ib507d62803f2beba7178c3f6f3f78bd1095b25b8
This commit is contained in:
parent
d368c6faa1
commit
ec49bb00cd
1 changed files with 133 additions and 259 deletions
|
@ -18,7 +18,6 @@
|
|||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/fdtable.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/freezer.h>
|
||||
|
@ -47,11 +46,19 @@
|
|||
#include <uapi/linux/android/binder.h>
|
||||
#include "binder_trace.h"
|
||||
|
||||
static DEFINE_MUTEX(binder_main_lock);
|
||||
static DEFINE_MUTEX(binder_deferred_lock);
|
||||
static DEFINE_MUTEX(binder_mmap_lock);
|
||||
|
||||
static HLIST_HEAD(binder_devices);
|
||||
static HLIST_HEAD(binder_procs);
|
||||
static HLIST_HEAD(binder_deferred_list);
|
||||
static HLIST_HEAD(binder_dead_nodes);
|
||||
|
||||
static struct dentry *binder_debugfs_dir_entry_root;
|
||||
static struct dentry *binder_debugfs_dir_entry_proc;
|
||||
atomic_t binder_last_id;
|
||||
static int binder_last_id;
|
||||
static struct workqueue_struct *binder_deferred_workqueue;
|
||||
|
||||
#define BINDER_DEBUG_ENTRY(name) \
|
||||
static int binder_##name##_open(struct inode *inode, struct file *file) \
|
||||
|
@ -166,24 +173,20 @@ enum binder_stat_types {
|
|||
struct binder_stats {
|
||||
int br[_IOC_NR(BR_FAILED_REPLY) + 1];
|
||||
int bc[_IOC_NR(BC_REPLY_SG) + 1];
|
||||
int obj_created[BINDER_STAT_COUNT];
|
||||
int obj_deleted[BINDER_STAT_COUNT];
|
||||
};
|
||||
|
||||
/* These are still global, since it's not always easy to get the context */
|
||||
struct binder_obj_stats {
|
||||
atomic_t obj_created[BINDER_STAT_COUNT];
|
||||
atomic_t obj_deleted[BINDER_STAT_COUNT];
|
||||
};
|
||||
|
||||
static struct binder_obj_stats binder_obj_stats;
|
||||
static struct binder_stats binder_stats;
|
||||
|
||||
static inline void binder_stats_deleted(enum binder_stat_types type)
|
||||
{
|
||||
atomic_inc(&binder_obj_stats.obj_deleted[type]);
|
||||
binder_stats.obj_deleted[type]++;
|
||||
}
|
||||
|
||||
static inline void binder_stats_created(enum binder_stat_types type)
|
||||
{
|
||||
atomic_inc(&binder_obj_stats.obj_created[type]);
|
||||
binder_stats.obj_created[type]++;
|
||||
}
|
||||
|
||||
struct binder_transaction_log_entry {
|
||||
|
@ -204,6 +207,8 @@ struct binder_transaction_log {
|
|||
int full;
|
||||
struct binder_transaction_log_entry entry[32];
|
||||
};
|
||||
static struct binder_transaction_log binder_transaction_log;
|
||||
static struct binder_transaction_log binder_transaction_log_failed;
|
||||
|
||||
static struct binder_transaction_log_entry *binder_transaction_log_add(
|
||||
struct binder_transaction_log *log)
|
||||
|
@ -224,21 +229,6 @@ struct binder_context {
|
|||
struct binder_node *binder_context_mgr_node;
|
||||
kuid_t binder_context_mgr_uid;
|
||||
const char *name;
|
||||
|
||||
struct mutex binder_main_lock;
|
||||
struct mutex binder_deferred_lock;
|
||||
struct mutex binder_mmap_lock;
|
||||
|
||||
struct hlist_head binder_procs;
|
||||
struct hlist_head binder_dead_nodes;
|
||||
struct hlist_head binder_deferred_list;
|
||||
|
||||
struct work_struct deferred_work;
|
||||
struct workqueue_struct *binder_deferred_workqueue;
|
||||
struct binder_transaction_log transaction_log;
|
||||
struct binder_transaction_log transaction_log_failed;
|
||||
|
||||
struct binder_stats binder_stats;
|
||||
};
|
||||
|
||||
struct binder_device {
|
||||
|
@ -461,18 +451,17 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
|
|||
return retval;
|
||||
}
|
||||
|
||||
static inline void binder_lock(struct binder_context *context, const char *tag)
|
||||
static inline void binder_lock(const char *tag)
|
||||
{
|
||||
trace_binder_lock(tag);
|
||||
mutex_lock(&context->binder_main_lock);
|
||||
mutex_lock(&binder_main_lock);
|
||||
trace_binder_locked(tag);
|
||||
}
|
||||
|
||||
static inline void binder_unlock(struct binder_context *context,
|
||||
const char *tag)
|
||||
static inline void binder_unlock(const char *tag)
|
||||
{
|
||||
trace_binder_unlock(tag);
|
||||
mutex_unlock(&context->binder_main_lock);
|
||||
mutex_unlock(&binder_main_lock);
|
||||
}
|
||||
|
||||
static void binder_set_nice(long nice)
|
||||
|
@ -957,7 +946,7 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
|
|||
binder_stats_created(BINDER_STAT_NODE);
|
||||
rb_link_node(&node->rb_node, parent, p);
|
||||
rb_insert_color(&node->rb_node, &proc->nodes);
|
||||
node->debug_id = atomic_inc_return(&binder_last_id);
|
||||
node->debug_id = ++binder_last_id;
|
||||
node->proc = proc;
|
||||
node->ptr = ptr;
|
||||
node->cookie = cookie;
|
||||
|
@ -1099,7 +1088,7 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
|
|||
if (new_ref == NULL)
|
||||
return NULL;
|
||||
binder_stats_created(BINDER_STAT_REF);
|
||||
new_ref->debug_id = atomic_inc_return(&binder_last_id);
|
||||
new_ref->debug_id = ++binder_last_id;
|
||||
new_ref->proc = proc;
|
||||
new_ref->node = node;
|
||||
rb_link_node(&new_ref->rb_node_node, parent, p);
|
||||
|
@ -1859,7 +1848,7 @@ static void binder_transaction(struct binder_proc *proc,
|
|||
binder_size_t last_fixup_min_off = 0;
|
||||
struct binder_context *context = proc->context;
|
||||
|
||||
e = binder_transaction_log_add(&context->transaction_log);
|
||||
e = binder_transaction_log_add(&binder_transaction_log);
|
||||
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
|
||||
e->from_proc = proc->pid;
|
||||
e->from_thread = thread->pid;
|
||||
|
@ -1981,7 +1970,7 @@ static void binder_transaction(struct binder_proc *proc,
|
|||
}
|
||||
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
|
||||
|
||||
t->debug_id = atomic_inc_return(&binder_last_id);
|
||||
t->debug_id = ++binder_last_id;
|
||||
e->debug_id = t->debug_id;
|
||||
|
||||
if (reply)
|
||||
|
@ -2245,8 +2234,7 @@ err_no_context_mgr_node:
|
|||
{
|
||||
struct binder_transaction_log_entry *fe;
|
||||
|
||||
fe = binder_transaction_log_add(
|
||||
&context->transaction_log_failed);
|
||||
fe = binder_transaction_log_add(&binder_transaction_log_failed);
|
||||
*fe = *e;
|
||||
}
|
||||
|
||||
|
@ -2274,8 +2262,8 @@ static int binder_thread_write(struct binder_proc *proc,
|
|||
return -EFAULT;
|
||||
ptr += sizeof(uint32_t);
|
||||
trace_binder_command(cmd);
|
||||
if (_IOC_NR(cmd) < ARRAY_SIZE(context->binder_stats.bc)) {
|
||||
context->binder_stats.bc[_IOC_NR(cmd)]++;
|
||||
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
|
||||
binder_stats.bc[_IOC_NR(cmd)]++;
|
||||
proc->stats.bc[_IOC_NR(cmd)]++;
|
||||
thread->stats.bc[_IOC_NR(cmd)]++;
|
||||
}
|
||||
|
@ -2640,8 +2628,8 @@ static void binder_stat_br(struct binder_proc *proc,
|
|||
struct binder_thread *thread, uint32_t cmd)
|
||||
{
|
||||
trace_binder_return(cmd);
|
||||
if (_IOC_NR(cmd) < ARRAY_SIZE(proc->stats.br)) {
|
||||
proc->context->binder_stats.br[_IOC_NR(cmd)]++;
|
||||
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
|
||||
binder_stats.br[_IOC_NR(cmd)]++;
|
||||
proc->stats.br[_IOC_NR(cmd)]++;
|
||||
thread->stats.br[_IOC_NR(cmd)]++;
|
||||
}
|
||||
|
@ -2705,7 +2693,7 @@ retry:
|
|||
if (wait_for_proc_work)
|
||||
proc->ready_threads++;
|
||||
|
||||
binder_unlock(proc->context, __func__);
|
||||
binder_unlock(__func__);
|
||||
|
||||
trace_binder_wait_for_work(wait_for_proc_work,
|
||||
!!thread->transaction_stack,
|
||||
|
@ -2732,7 +2720,7 @@ retry:
|
|||
ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
|
||||
}
|
||||
|
||||
binder_lock(proc->context, __func__);
|
||||
binder_lock(__func__);
|
||||
|
||||
if (wait_for_proc_work)
|
||||
proc->ready_threads--;
|
||||
|
@ -3119,14 +3107,14 @@ static unsigned int binder_poll(struct file *filp,
|
|||
struct binder_thread *thread = NULL;
|
||||
int wait_for_proc_work;
|
||||
|
||||
binder_lock(proc->context, __func__);
|
||||
binder_lock(__func__);
|
||||
|
||||
thread = binder_get_thread(proc);
|
||||
|
||||
wait_for_proc_work = thread->transaction_stack == NULL &&
|
||||
list_empty(&thread->todo) && thread->return_error == BR_OK;
|
||||
|
||||
binder_unlock(proc->context, __func__);
|
||||
binder_unlock(__func__);
|
||||
|
||||
if (wait_for_proc_work) {
|
||||
if (binder_has_proc_work(proc, thread))
|
||||
|
@ -3253,7 +3241,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
{
|
||||
int ret;
|
||||
struct binder_proc *proc = filp->private_data;
|
||||
struct binder_context *context = proc->context;
|
||||
struct binder_thread *thread;
|
||||
unsigned int size = _IOC_SIZE(cmd);
|
||||
void __user *ubuf = (void __user *)arg;
|
||||
|
@ -3267,7 +3254,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
if (ret)
|
||||
goto err_unlocked;
|
||||
|
||||
binder_lock(context, __func__);
|
||||
binder_lock(__func__);
|
||||
thread = binder_get_thread(proc);
|
||||
if (thread == NULL) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -3319,7 +3306,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
err:
|
||||
if (thread)
|
||||
thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
|
||||
binder_unlock(context, __func__);
|
||||
binder_unlock(__func__);
|
||||
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
|
||||
if (ret && ret != -ERESTARTSYS)
|
||||
pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
|
||||
|
@ -3391,7 +3378,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
}
|
||||
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
|
||||
|
||||
mutex_lock(&proc->context->binder_mmap_lock);
|
||||
mutex_lock(&binder_mmap_lock);
|
||||
if (proc->buffer) {
|
||||
ret = -EBUSY;
|
||||
failure_string = "already mapped";
|
||||
|
@ -3406,7 +3393,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
}
|
||||
proc->buffer = area->addr;
|
||||
proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
|
||||
mutex_unlock(&proc->context->binder_mmap_lock);
|
||||
mutex_unlock(&binder_mmap_lock);
|
||||
|
||||
#ifdef CONFIG_CPU_CACHE_VIPT
|
||||
if (cache_is_vipt_aliasing()) {
|
||||
|
@ -3451,12 +3438,12 @@ err_alloc_small_buf_failed:
|
|||
kfree(proc->pages);
|
||||
proc->pages = NULL;
|
||||
err_alloc_pages_failed:
|
||||
mutex_lock(&proc->context->binder_mmap_lock);
|
||||
mutex_lock(&binder_mmap_lock);
|
||||
vfree(proc->buffer);
|
||||
proc->buffer = NULL;
|
||||
err_get_vm_area_failed:
|
||||
err_already_mapped:
|
||||
mutex_unlock(&proc->context->binder_mmap_lock);
|
||||
mutex_unlock(&binder_mmap_lock);
|
||||
err_bad_arg:
|
||||
pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
|
||||
proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
|
||||
|
@ -3483,15 +3470,15 @@ static int binder_open(struct inode *nodp, struct file *filp)
|
|||
miscdev);
|
||||
proc->context = &binder_dev->context;
|
||||
|
||||
binder_lock(proc->context, __func__);
|
||||
binder_lock(__func__);
|
||||
|
||||
binder_stats_created(BINDER_STAT_PROC);
|
||||
hlist_add_head(&proc->proc_node, &proc->context->binder_procs);
|
||||
hlist_add_head(&proc->proc_node, &binder_procs);
|
||||
proc->pid = current->group_leader->pid;
|
||||
INIT_LIST_HEAD(&proc->delivered_death);
|
||||
filp->private_data = proc;
|
||||
|
||||
binder_unlock(proc->context, __func__);
|
||||
binder_unlock(__func__);
|
||||
|
||||
if (binder_debugfs_dir_entry_proc) {
|
||||
char strbuf[11];
|
||||
|
@ -3556,7 +3543,6 @@ static int binder_release(struct inode *nodp, struct file *filp)
|
|||
static int binder_node_release(struct binder_node *node, int refs)
|
||||
{
|
||||
struct binder_ref *ref;
|
||||
struct binder_context *context = node->proc->context;
|
||||
int death = 0;
|
||||
|
||||
list_del_init(&node->work.entry);
|
||||
|
@ -3572,7 +3558,7 @@ static int binder_node_release(struct binder_node *node, int refs)
|
|||
node->proc = NULL;
|
||||
node->local_strong_refs = 0;
|
||||
node->local_weak_refs = 0;
|
||||
hlist_add_head(&node->dead_node, &context->binder_dead_nodes);
|
||||
hlist_add_head(&node->dead_node, &binder_dead_nodes);
|
||||
|
||||
hlist_for_each_entry(ref, &node->refs, node_entry) {
|
||||
refs++;
|
||||
|
@ -3637,8 +3623,7 @@ static void binder_deferred_release(struct binder_proc *proc)
|
|||
node = rb_entry(n, struct binder_node, rb_node);
|
||||
nodes++;
|
||||
rb_erase(&node->rb_node, &proc->nodes);
|
||||
incoming_refs = binder_node_release(node,
|
||||
incoming_refs);
|
||||
incoming_refs = binder_node_release(node, incoming_refs);
|
||||
}
|
||||
|
||||
outgoing_refs = 0;
|
||||
|
@ -3710,16 +3695,14 @@ static void binder_deferred_func(struct work_struct *work)
|
|||
{
|
||||
struct binder_proc *proc;
|
||||
struct files_struct *files;
|
||||
struct binder_context *context =
|
||||
container_of(work, struct binder_context, deferred_work);
|
||||
|
||||
int defer;
|
||||
|
||||
do {
|
||||
binder_lock(context, __func__);
|
||||
mutex_lock(&context->binder_deferred_lock);
|
||||
if (!hlist_empty(&context->binder_deferred_list)) {
|
||||
proc = hlist_entry(context->binder_deferred_list.first,
|
||||
binder_lock(__func__);
|
||||
mutex_lock(&binder_deferred_lock);
|
||||
if (!hlist_empty(&binder_deferred_list)) {
|
||||
proc = hlist_entry(binder_deferred_list.first,
|
||||
struct binder_proc, deferred_work_node);
|
||||
hlist_del_init(&proc->deferred_work_node);
|
||||
defer = proc->deferred_work;
|
||||
|
@ -3728,7 +3711,7 @@ static void binder_deferred_func(struct work_struct *work)
|
|||
proc = NULL;
|
||||
defer = 0;
|
||||
}
|
||||
mutex_unlock(&context->binder_deferred_lock);
|
||||
mutex_unlock(&binder_deferred_lock);
|
||||
|
||||
files = NULL;
|
||||
if (defer & BINDER_DEFERRED_PUT_FILES) {
|
||||
|
@ -3743,24 +3726,24 @@ static void binder_deferred_func(struct work_struct *work)
|
|||
if (defer & BINDER_DEFERRED_RELEASE)
|
||||
binder_deferred_release(proc); /* frees proc */
|
||||
|
||||
binder_unlock(context, __func__);
|
||||
binder_unlock(__func__);
|
||||
if (files)
|
||||
put_files_struct(files);
|
||||
} while (proc);
|
||||
}
|
||||
static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
|
||||
|
||||
static void
|
||||
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
|
||||
{
|
||||
mutex_lock(&proc->context->binder_deferred_lock);
|
||||
mutex_lock(&binder_deferred_lock);
|
||||
proc->deferred_work |= defer;
|
||||
if (hlist_unhashed(&proc->deferred_work_node)) {
|
||||
hlist_add_head(&proc->deferred_work_node,
|
||||
&proc->context->binder_deferred_list);
|
||||
queue_work(proc->context->binder_deferred_workqueue,
|
||||
&proc->context->deferred_work);
|
||||
&binder_deferred_list);
|
||||
queue_work(binder_deferred_workqueue, &binder_deferred_work);
|
||||
}
|
||||
mutex_unlock(&proc->context->binder_deferred_lock);
|
||||
mutex_unlock(&binder_deferred_lock);
|
||||
}
|
||||
|
||||
static void print_binder_transaction(struct seq_file *m, const char *prefix,
|
||||
|
@ -3991,20 +3974,8 @@ static const char * const binder_objstat_strings[] = {
|
|||
"transaction_complete"
|
||||
};
|
||||
|
||||
static void add_binder_stats(struct binder_stats *from, struct binder_stats *to)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(to->bc); i++)
|
||||
to->bc[i] += from->bc[i];
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(to->br); i++)
|
||||
to->br[i] += from->br[i];
|
||||
}
|
||||
|
||||
static void print_binder_stats(struct seq_file *m, const char *prefix,
|
||||
struct binder_stats *stats,
|
||||
struct binder_obj_stats *obj_stats)
|
||||
struct binder_stats *stats)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -4024,21 +3995,16 @@ static void print_binder_stats(struct seq_file *m, const char *prefix,
|
|||
binder_return_strings[i], stats->br[i]);
|
||||
}
|
||||
|
||||
if (!obj_stats)
|
||||
return;
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(obj_stats->obj_created) !=
|
||||
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
|
||||
ARRAY_SIZE(binder_objstat_strings));
|
||||
BUILD_BUG_ON(ARRAY_SIZE(obj_stats->obj_created) !=
|
||||
ARRAY_SIZE(obj_stats->obj_deleted));
|
||||
for (i = 0; i < ARRAY_SIZE(obj_stats->obj_created); i++) {
|
||||
int obj_created = atomic_read(&obj_stats->obj_created[i]);
|
||||
int obj_deleted = atomic_read(&obj_stats->obj_deleted[i]);
|
||||
|
||||
if (obj_created || obj_deleted)
|
||||
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
|
||||
ARRAY_SIZE(stats->obj_deleted));
|
||||
for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
|
||||
if (stats->obj_created[i] || stats->obj_deleted[i])
|
||||
seq_printf(m, "%s%s: active %d total %d\n", prefix,
|
||||
binder_objstat_strings[i],
|
||||
obj_created - obj_deleted, obj_created);
|
||||
binder_objstat_strings[i],
|
||||
stats->obj_created[i] - stats->obj_deleted[i],
|
||||
stats->obj_created[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4093,131 +4059,85 @@ static void print_binder_proc_stats(struct seq_file *m,
|
|||
}
|
||||
seq_printf(m, " pending transactions: %d\n", count);
|
||||
|
||||
print_binder_stats(m, " ", &proc->stats, NULL);
|
||||
print_binder_stats(m, " ", &proc->stats);
|
||||
}
|
||||
|
||||
|
||||
static int binder_state_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct binder_device *device;
|
||||
struct binder_context *context;
|
||||
struct binder_proc *proc;
|
||||
struct binder_node *node;
|
||||
int do_lock = !binder_debug_no_lock;
|
||||
bool wrote_dead_nodes_header = false;
|
||||
|
||||
if (do_lock)
|
||||
binder_lock(__func__);
|
||||
|
||||
seq_puts(m, "binder state:\n");
|
||||
|
||||
hlist_for_each_entry(device, &binder_devices, hlist) {
|
||||
context = &device->context;
|
||||
if (do_lock)
|
||||
binder_lock(context, __func__);
|
||||
if (!wrote_dead_nodes_header &&
|
||||
!hlist_empty(&context->binder_dead_nodes)) {
|
||||
seq_puts(m, "dead nodes:\n");
|
||||
wrote_dead_nodes_header = true;
|
||||
}
|
||||
hlist_for_each_entry(node, &context->binder_dead_nodes,
|
||||
dead_node)
|
||||
print_binder_node(m, node);
|
||||
if (!hlist_empty(&binder_dead_nodes))
|
||||
seq_puts(m, "dead nodes:\n");
|
||||
hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
|
||||
print_binder_node(m, node);
|
||||
|
||||
if (do_lock)
|
||||
binder_unlock(context, __func__);
|
||||
}
|
||||
|
||||
hlist_for_each_entry(device, &binder_devices, hlist) {
|
||||
context = &device->context;
|
||||
if (do_lock)
|
||||
binder_lock(context, __func__);
|
||||
|
||||
hlist_for_each_entry(proc, &context->binder_procs, proc_node)
|
||||
print_binder_proc(m, proc, 1);
|
||||
if (do_lock)
|
||||
binder_unlock(context, __func__);
|
||||
}
|
||||
hlist_for_each_entry(proc, &binder_procs, proc_node)
|
||||
print_binder_proc(m, proc, 1);
|
||||
if (do_lock)
|
||||
binder_unlock(__func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int binder_stats_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct binder_device *device;
|
||||
struct binder_context *context;
|
||||
struct binder_proc *proc;
|
||||
struct binder_stats total_binder_stats;
|
||||
int do_lock = !binder_debug_no_lock;
|
||||
|
||||
memset(&total_binder_stats, 0, sizeof(struct binder_stats));
|
||||
|
||||
hlist_for_each_entry(device, &binder_devices, hlist) {
|
||||
context = &device->context;
|
||||
if (do_lock)
|
||||
binder_lock(context, __func__);
|
||||
|
||||
add_binder_stats(&context->binder_stats, &total_binder_stats);
|
||||
|
||||
if (do_lock)
|
||||
binder_unlock(context, __func__);
|
||||
}
|
||||
if (do_lock)
|
||||
binder_lock(__func__);
|
||||
|
||||
seq_puts(m, "binder stats:\n");
|
||||
print_binder_stats(m, "", &total_binder_stats, &binder_obj_stats);
|
||||
|
||||
hlist_for_each_entry(device, &binder_devices, hlist) {
|
||||
context = &device->context;
|
||||
if (do_lock)
|
||||
binder_lock(context, __func__);
|
||||
print_binder_stats(m, "", &binder_stats);
|
||||
|
||||
hlist_for_each_entry(proc, &context->binder_procs, proc_node)
|
||||
print_binder_proc_stats(m, proc);
|
||||
if (do_lock)
|
||||
binder_unlock(context, __func__);
|
||||
}
|
||||
hlist_for_each_entry(proc, &binder_procs, proc_node)
|
||||
print_binder_proc_stats(m, proc);
|
||||
if (do_lock)
|
||||
binder_unlock(__func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int binder_transactions_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct binder_device *device;
|
||||
struct binder_context *context;
|
||||
struct binder_proc *proc;
|
||||
int do_lock = !binder_debug_no_lock;
|
||||
|
||||
seq_puts(m, "binder transactions:\n");
|
||||
hlist_for_each_entry(device, &binder_devices, hlist) {
|
||||
context = &device->context;
|
||||
if (do_lock)
|
||||
binder_lock(context, __func__);
|
||||
if (do_lock)
|
||||
binder_lock(__func__);
|
||||
|
||||
hlist_for_each_entry(proc, &context->binder_procs, proc_node)
|
||||
print_binder_proc(m, proc, 0);
|
||||
if (do_lock)
|
||||
binder_unlock(context, __func__);
|
||||
}
|
||||
seq_puts(m, "binder transactions:\n");
|
||||
hlist_for_each_entry(proc, &binder_procs, proc_node)
|
||||
print_binder_proc(m, proc, 0);
|
||||
if (do_lock)
|
||||
binder_unlock(__func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int binder_proc_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct binder_device *device;
|
||||
struct binder_context *context;
|
||||
struct binder_proc *itr;
|
||||
int pid = (unsigned long)m->private;
|
||||
int do_lock = !binder_debug_no_lock;
|
||||
|
||||
hlist_for_each_entry(device, &binder_devices, hlist) {
|
||||
context = &device->context;
|
||||
if (do_lock)
|
||||
binder_lock(context, __func__);
|
||||
if (do_lock)
|
||||
binder_lock(__func__);
|
||||
|
||||
hlist_for_each_entry(itr, &context->binder_procs, proc_node) {
|
||||
if (itr->pid == pid) {
|
||||
seq_puts(m, "binder proc state:\n");
|
||||
print_binder_proc(m, itr, 1);
|
||||
}
|
||||
hlist_for_each_entry(itr, &binder_procs, proc_node) {
|
||||
if (itr->pid == pid) {
|
||||
seq_puts(m, "binder proc state:\n");
|
||||
print_binder_proc(m, itr, 1);
|
||||
}
|
||||
if (do_lock)
|
||||
binder_unlock(context, __func__);
|
||||
}
|
||||
if (do_lock)
|
||||
binder_unlock(__func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4232,10 +4152,11 @@ static void print_binder_transaction_log_entry(struct seq_file *m,
|
|||
e->to_node, e->target_handle, e->data_size, e->offsets_size);
|
||||
}
|
||||
|
||||
static int print_binder_transaction_log(struct seq_file *m,
|
||||
struct binder_transaction_log *log)
|
||||
static int binder_transaction_log_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct binder_transaction_log *log = m->private;
|
||||
int i;
|
||||
|
||||
if (log->full) {
|
||||
for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
|
||||
print_binder_transaction_log_entry(m, &log->entry[i]);
|
||||
|
@ -4245,31 +4166,6 @@ static int print_binder_transaction_log(struct seq_file *m,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int binder_transaction_log_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct binder_device *device;
|
||||
struct binder_context *context;
|
||||
|
||||
hlist_for_each_entry(device, &binder_devices, hlist) {
|
||||
context = &device->context;
|
||||
print_binder_transaction_log(m, &context->transaction_log);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int binder_failed_transaction_log_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct binder_device *device;
|
||||
struct binder_context *context;
|
||||
|
||||
hlist_for_each_entry(device, &binder_devices, hlist) {
|
||||
context = &device->context;
|
||||
print_binder_transaction_log(m,
|
||||
&context->transaction_log_failed);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations binder_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.poll = binder_poll,
|
||||
|
@ -4285,20 +4181,11 @@ BINDER_DEBUG_ENTRY(state);
|
|||
BINDER_DEBUG_ENTRY(stats);
|
||||
BINDER_DEBUG_ENTRY(transactions);
|
||||
BINDER_DEBUG_ENTRY(transaction_log);
|
||||
BINDER_DEBUG_ENTRY(failed_transaction_log);
|
||||
|
||||
static void __init free_binder_device(struct binder_device *device)
|
||||
{
|
||||
if (device->context.binder_deferred_workqueue)
|
||||
destroy_workqueue(device->context.binder_deferred_workqueue);
|
||||
kfree(device);
|
||||
}
|
||||
|
||||
static int __init init_binder_device(const char *name)
|
||||
{
|
||||
int ret;
|
||||
struct binder_device *binder_device;
|
||||
struct binder_context *context;
|
||||
|
||||
binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
|
||||
if (!binder_device)
|
||||
|
@ -4308,65 +4195,31 @@ static int __init init_binder_device(const char *name)
|
|||
binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
|
||||
binder_device->miscdev.name = name;
|
||||
|
||||
context = &binder_device->context;
|
||||
context->binder_context_mgr_uid = INVALID_UID;
|
||||
context->name = name;
|
||||
|
||||
mutex_init(&context->binder_main_lock);
|
||||
mutex_init(&context->binder_deferred_lock);
|
||||
mutex_init(&context->binder_mmap_lock);
|
||||
|
||||
context->binder_deferred_workqueue =
|
||||
create_singlethread_workqueue(name);
|
||||
|
||||
if (!context->binder_deferred_workqueue) {
|
||||
ret = -ENOMEM;
|
||||
goto err_create_singlethread_workqueue_failed;
|
||||
}
|
||||
|
||||
INIT_HLIST_HEAD(&context->binder_procs);
|
||||
INIT_HLIST_HEAD(&context->binder_dead_nodes);
|
||||
INIT_HLIST_HEAD(&context->binder_deferred_list);
|
||||
INIT_WORK(&context->deferred_work, binder_deferred_func);
|
||||
binder_device->context.binder_context_mgr_uid = INVALID_UID;
|
||||
binder_device->context.name = name;
|
||||
|
||||
ret = misc_register(&binder_device->miscdev);
|
||||
if (ret < 0) {
|
||||
goto err_misc_register_failed;
|
||||
kfree(binder_device);
|
||||
return ret;
|
||||
}
|
||||
|
||||
hlist_add_head(&binder_device->hlist, &binder_devices);
|
||||
return ret;
|
||||
|
||||
err_create_singlethread_workqueue_failed:
|
||||
err_misc_register_failed:
|
||||
free_binder_device(binder_device);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __init binder_init(void)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret;
|
||||
char *device_name, *device_names;
|
||||
struct binder_device *device;
|
||||
struct hlist_node *tmp;
|
||||
|
||||
/*
|
||||
* Copy the module_parameter string, because we don't want to
|
||||
* tokenize it in-place.
|
||||
*/
|
||||
device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
|
||||
if (!device_names)
|
||||
binder_deferred_workqueue = create_singlethread_workqueue("binder");
|
||||
if (!binder_deferred_workqueue)
|
||||
return -ENOMEM;
|
||||
|
||||
strcpy(device_names, binder_devices_param);
|
||||
|
||||
while ((device_name = strsep(&device_names, ","))) {
|
||||
ret = init_binder_device(device_name);
|
||||
if (ret)
|
||||
goto err_init_binder_device_failed;
|
||||
}
|
||||
|
||||
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
|
||||
if (binder_debugfs_dir_entry_root)
|
||||
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
|
||||
|
@ -4391,13 +4244,30 @@ static int __init binder_init(void)
|
|||
debugfs_create_file("transaction_log",
|
||||
S_IRUGO,
|
||||
binder_debugfs_dir_entry_root,
|
||||
NULL,
|
||||
&binder_transaction_log,
|
||||
&binder_transaction_log_fops);
|
||||
debugfs_create_file("failed_transaction_log",
|
||||
S_IRUGO,
|
||||
binder_debugfs_dir_entry_root,
|
||||
NULL,
|
||||
&binder_failed_transaction_log_fops);
|
||||
&binder_transaction_log_failed,
|
||||
&binder_transaction_log_fops);
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy the module_parameter string, because we don't want to
|
||||
* tokenize it in-place.
|
||||
*/
|
||||
device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
|
||||
if (!device_names) {
|
||||
ret = -ENOMEM;
|
||||
goto err_alloc_device_names_failed;
|
||||
}
|
||||
strcpy(device_names, binder_devices_param);
|
||||
|
||||
while ((device_name = strsep(&device_names, ","))) {
|
||||
ret = init_binder_device(device_name);
|
||||
if (ret)
|
||||
goto err_init_binder_device_failed;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -4406,8 +4276,12 @@ err_init_binder_device_failed:
|
|||
hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
|
||||
misc_deregister(&device->miscdev);
|
||||
hlist_del(&device->hlist);
|
||||
free_binder_device(device);
|
||||
kfree(device);
|
||||
}
|
||||
err_alloc_device_names_failed:
|
||||
debugfs_remove_recursive(binder_debugfs_dir_entry_root);
|
||||
|
||||
destroy_workqueue(binder_deferred_workqueue);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue