mm: process_reclaim: do not iterate over stale task structs
swap_fn iterates through the threads of selected tasks after a rcu_read_unlock which is wrong. But we can't extend the rcu_read_lock since it will result in severe performance issues. So better avoid iterating over the threads. Just lock the group leader and use it further. Change-Id: I36269b1b6619315f33f6f3b49ec73571a66796f2 Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
This commit is contained in:
parent
1ad1a93af4
commit
9ff0477006
1 changed files with 12 additions and 19 deletions
|
@ -88,14 +88,17 @@ static int test_task_flag(struct task_struct *p, int flag)
|
||||||
{
|
{
|
||||||
struct task_struct *t = p;
|
struct task_struct *t = p;
|
||||||
|
|
||||||
do {
|
rcu_read_lock();
|
||||||
|
for_each_thread(p, t) {
|
||||||
task_lock(t);
|
task_lock(t);
|
||||||
if (test_tsk_thread_flag(t, flag)) {
|
if (test_tsk_thread_flag(t, flag)) {
|
||||||
task_unlock(t);
|
task_unlock(t);
|
||||||
|
rcu_read_unlock();
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
task_unlock(t);
|
task_unlock(t);
|
||||||
} while_each_thread(p, t);
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -125,10 +128,6 @@ static void swap_fn(struct work_struct *work)
|
||||||
if (tsk->flags & PF_KTHREAD)
|
if (tsk->flags & PF_KTHREAD)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* if task no longer has any memory ignore it */
|
|
||||||
if (test_task_flag(tsk, TIF_MM_RELEASED))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (test_task_flag(tsk, TIF_MEMDIE))
|
if (test_task_flag(tsk, TIF_MEMDIE))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -165,20 +164,20 @@ static void swap_fn(struct work_struct *work)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < si; i++) {
|
for (i = 0; i < si; i++)
|
||||||
get_task_struct(selected[i].p);
|
|
||||||
total_sz += selected[i].tasksize;
|
total_sz += selected[i].tasksize;
|
||||||
}
|
|
||||||
|
|
||||||
rcu_read_unlock();
|
|
||||||
|
|
||||||
/* Skip reclaim if total size is too less */
|
/* Skip reclaim if total size is too less */
|
||||||
if (total_sz < SWAP_CLUSTER_MAX) {
|
if (total_sz < SWAP_CLUSTER_MAX) {
|
||||||
for (i = 0; i < si; i++)
|
rcu_read_unlock();
|
||||||
put_task_struct(selected[i].p);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < si; i++)
|
||||||
|
get_task_struct(selected[i].p);
|
||||||
|
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
while (si--) {
|
while (si--) {
|
||||||
nr_to_reclaim =
|
nr_to_reclaim =
|
||||||
(selected[si].tasksize * per_swap_size) / total_sz;
|
(selected[si].tasksize * per_swap_size) / total_sz;
|
||||||
|
@ -186,12 +185,6 @@ static void swap_fn(struct work_struct *work)
|
||||||
if (!nr_to_reclaim)
|
if (!nr_to_reclaim)
|
||||||
nr_to_reclaim = 1;
|
nr_to_reclaim = 1;
|
||||||
|
|
||||||
if ((test_task_flag(selected[si].p, TIF_MM_RELEASED))
|
|
||||||
|| (test_task_flag(selected[si].p, TIF_MEMDIE))) {
|
|
||||||
put_task_struct(selected[si].p);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
rp = reclaim_task_anon(selected[si].p, nr_to_reclaim);
|
rp = reclaim_task_anon(selected[si].p, nr_to_reclaim);
|
||||||
|
|
||||||
trace_process_reclaim(selected[si].tasksize,
|
trace_process_reclaim(selected[si].tasksize,
|
||||||
|
|
Loading…
Add table
Reference in a new issue