perf: protect group_leader from races that cause ctx double-free
When moving a group_leader perf event from a software-context to a hardware-context, there's a race in checking and updating that context. The existing locking solution doesn't work; note that it tries to grab a lock inside the group_leader's context object, which you can only get at by going through a pointer that should be protected from these races. To avoid that problem, and to produce a simple solution, we can just use a lock per group_leader to protect all checks on the group_leader's context. The new lock is grabbed and released when no context locks are held. Bug: 30955111 Bug: 31095224 Change-Id: If37124c100ca6f4aa962559fba3bd5dbbec8e052 Git-repo: https://android.googlesource.com/kernel/msm Git-commit: 5b87e00be9ca28ea32cab49b92c0386e4a91f730 Signed-off-by: Dennis Cagle <d-cagle@codeaurora.org>
This commit is contained in:
parent
368fecd7df
commit
7ee4109d7e
2 changed files with 21 additions and 0 deletions
|
@ -476,6 +476,12 @@ struct perf_event {
|
||||||
int nr_siblings;
|
int nr_siblings;
|
||||||
int group_flags;
|
int group_flags;
|
||||||
struct perf_event *group_leader;
|
struct perf_event *group_leader;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Protect the pmu, attributes and context of a group leader.
|
||||||
|
* Note: does not protect the pointer to the group_leader.
|
||||||
|
*/
|
||||||
|
struct mutex group_leader_mutex;
|
||||||
struct pmu *pmu;
|
struct pmu *pmu;
|
||||||
|
|
||||||
enum perf_event_active_state state;
|
enum perf_event_active_state state;
|
||||||
|
|
|
@ -8066,6 +8066,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
||||||
if (!group_leader)
|
if (!group_leader)
|
||||||
group_leader = event;
|
group_leader = event;
|
||||||
|
|
||||||
|
mutex_init(&event->group_leader_mutex);
|
||||||
mutex_init(&event->child_mutex);
|
mutex_init(&event->child_mutex);
|
||||||
INIT_LIST_HEAD(&event->child_list);
|
INIT_LIST_HEAD(&event->child_list);
|
||||||
|
|
||||||
|
@ -8505,6 +8506,16 @@ SYSCALL_DEFINE5(perf_event_open,
|
||||||
group_leader = NULL;
|
group_leader = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Take the group_leader's group_leader_mutex before observing
|
||||||
|
* anything in the group leader that leads to changes in ctx,
|
||||||
|
* many of which may be changing on another thread.
|
||||||
|
* In particular, we want to take this lock before deciding
|
||||||
|
* whether we need to move_group.
|
||||||
|
*/
|
||||||
|
if (group_leader)
|
||||||
|
mutex_lock(&group_leader->group_leader_mutex);
|
||||||
|
|
||||||
if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
|
if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
|
||||||
task = find_lively_task_by_vpid(pid);
|
task = find_lively_task_by_vpid(pid);
|
||||||
if (IS_ERR(task)) {
|
if (IS_ERR(task)) {
|
||||||
|
@ -8766,6 +8777,8 @@ SYSCALL_DEFINE5(perf_event_open,
|
||||||
if (move_group)
|
if (move_group)
|
||||||
mutex_unlock(&gctx->mutex);
|
mutex_unlock(&gctx->mutex);
|
||||||
mutex_unlock(&ctx->mutex);
|
mutex_unlock(&ctx->mutex);
|
||||||
|
if (group_leader)
|
||||||
|
mutex_unlock(&group_leader->group_leader_mutex);
|
||||||
|
|
||||||
if (task) {
|
if (task) {
|
||||||
mutex_unlock(&task->signal->cred_guard_mutex);
|
mutex_unlock(&task->signal->cred_guard_mutex);
|
||||||
|
@ -8815,6 +8828,8 @@ err_task:
|
||||||
if (task)
|
if (task)
|
||||||
put_task_struct(task);
|
put_task_struct(task);
|
||||||
err_group_fd:
|
err_group_fd:
|
||||||
|
if (group_leader)
|
||||||
|
mutex_unlock(&group_leader->group_leader_mutex);
|
||||||
fdput(group);
|
fdput(group);
|
||||||
err_fd:
|
err_fd:
|
||||||
put_unused_fd(event_fd);
|
put_unused_fd(event_fd);
|
||||||
|
|
Loading…
Add table
Reference in a new issue