sched/core: Prevent (user) space tasks from affining to isolated cpus
We don't want user space tasks to run on isolated cpus. If the affinity mask that the user space task is trying to set only includes online cpus that are isolated return error. Also ensure that tasks do not get stuck on isolated cores. We are not properly updating the mask that we check against the current CPU so we might end up thinking we can run on the current CPU. Fix this. Change-Id: I078d01e63860d1fc60fc96eb0c739c0f680ae983 Signed-off-by: Olav Haugan <ohaugan@codeaurora.org>
This commit is contained in:
parent
368fecd7df
commit
584d38f189
1 changed files with 24 additions and 15 deletions
|
@ -1248,15 +1248,16 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
|
|||
goto out;
|
||||
|
||||
cpumask_andnot(&allowed_mask, new_mask, cpu_isolated_mask);
|
||||
cpumask_and(&allowed_mask, &allowed_mask, cpu_active_mask);
|
||||
|
||||
dest_cpu = cpumask_any_and(cpu_active_mask, &allowed_mask);
|
||||
dest_cpu = cpumask_any(&allowed_mask);
|
||||
if (dest_cpu >= nr_cpu_ids) {
|
||||
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
|
||||
cpumask_and(&allowed_mask, cpu_active_mask, new_mask);
|
||||
dest_cpu = cpumask_any(&allowed_mask);
|
||||
if (dest_cpu >= nr_cpu_ids) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
cpumask_copy(&allowed_mask, new_mask);
|
||||
}
|
||||
|
||||
do_set_cpus_allowed(p, new_mask);
|
||||
|
@ -4635,6 +4636,8 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
|
|||
cpumask_var_t cpus_allowed, new_mask;
|
||||
struct task_struct *p;
|
||||
int retval;
|
||||
int dest_cpu;
|
||||
cpumask_t allowed_mask;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
|
@ -4696,20 +4699,26 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
|
|||
}
|
||||
#endif
|
||||
again:
|
||||
retval = __set_cpus_allowed_ptr(p, new_mask, true);
|
||||
|
||||
if (!retval) {
|
||||
cpuset_cpus_allowed(p, cpus_allowed);
|
||||
if (!cpumask_subset(new_mask, cpus_allowed)) {
|
||||
/*
|
||||
* We must have raced with a concurrent cpuset
|
||||
* update. Just reset the cpus_allowed to the
|
||||
* cpuset's cpus_allowed
|
||||
*/
|
||||
cpumask_copy(new_mask, cpus_allowed);
|
||||
goto again;
|
||||
cpumask_andnot(&allowed_mask, new_mask, cpu_isolated_mask);
|
||||
dest_cpu = cpumask_any_and(cpu_active_mask, &allowed_mask);
|
||||
if (dest_cpu < nr_cpu_ids) {
|
||||
retval = __set_cpus_allowed_ptr(p, new_mask, true);
|
||||
if (!retval) {
|
||||
cpuset_cpus_allowed(p, cpus_allowed);
|
||||
if (!cpumask_subset(new_mask, cpus_allowed)) {
|
||||
/*
|
||||
* We must have raced with a concurrent cpuset
|
||||
* update. Just reset the cpus_allowed to the
|
||||
* cpuset's cpus_allowed
|
||||
*/
|
||||
cpumask_copy(new_mask, cpus_allowed);
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
retval = -EINVAL;
|
||||
}
|
||||
|
||||
out_free_new_mask:
|
||||
free_cpumask_var(new_mask);
|
||||
out_free_cpus_allowed:
|
||||
|
|
Loading…
Add table
Reference in a new issue