cpuset: Make cpusets restore on hotplug

This deliberately changes the behavior of the per-cpuset
cpus file to not be effected by hotplug. When a cpu is offlined,
it will be removed from the cpuset/cpus file. When a cpu is onlined,
if the cpuset originally requested that that cpu was part of the cpuset, that
cpu will be restored to the cpuset. The cpus files still
have to be hierachical, but the ranges no longer have to be out of
the currently online cpus, just the physically present cpus.

Change-Id: I3efbae24a1f6384be1e603fb56f0d3baef61d924
[ohaugan@codeaurora.org: Port to 4.4]
Git-commit: f180bcac788464a0baf3d79d76dd86d6972ea413
Git-repo: https://android.googlesource.com/kernel/common/msm.git
Signed-off-by: Olav Haugan <ohaugan@codeaurora.org>
This commit is contained in:
Riley Andrews 2015-06-12 14:36:28 -07:00 committed by Olav Haugan
parent e97b6a0e02
commit 1943b682a3

View file

@ -98,6 +98,7 @@ struct cpuset {
/* user-configured CPUs and Memory Nodes allow to tasks */ /* user-configured CPUs and Memory Nodes allow to tasks */
cpumask_var_t cpus_allowed; cpumask_var_t cpus_allowed;
cpumask_var_t cpus_requested; /* CPUS requested, but not used because of hotplug */
nodemask_t mems_allowed; nodemask_t mems_allowed;
/* effective CPUs and Memory Nodes allow to tasks */ /* effective CPUs and Memory Nodes allow to tasks */
@ -386,7 +387,7 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs,
static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
{ {
return cpumask_subset(p->cpus_allowed, q->cpus_allowed) && return cpumask_subset(p->cpus_requested, q->cpus_requested) &&
nodes_subset(p->mems_allowed, q->mems_allowed) && nodes_subset(p->mems_allowed, q->mems_allowed) &&
is_cpu_exclusive(p) <= is_cpu_exclusive(q) && is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
is_mem_exclusive(p) <= is_mem_exclusive(q); is_mem_exclusive(p) <= is_mem_exclusive(q);
@ -486,7 +487,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
cpuset_for_each_child(c, css, par) { cpuset_for_each_child(c, css, par) {
if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
c != cur && c != cur &&
cpumask_intersects(trial->cpus_allowed, c->cpus_allowed)) cpumask_intersects(trial->cpus_requested, c->cpus_requested))
goto out; goto out;
if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
c != cur && c != cur &&
@ -945,17 +946,18 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
if (!*buf) { if (!*buf) {
cpumask_clear(trialcs->cpus_allowed); cpumask_clear(trialcs->cpus_allowed);
} else { } else {
retval = cpulist_parse(buf, trialcs->cpus_allowed); retval = cpulist_parse(buf, trialcs->cpus_requested);
if (retval < 0) if (retval < 0)
return retval; return retval;
if (!cpumask_subset(trialcs->cpus_allowed, if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask))
top_cpuset.cpus_allowed))
return -EINVAL; return -EINVAL;
cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, cpu_active_mask);
} }
/* Nothing to do if the cpus didn't change */ /* Nothing to do if the cpus didn't change */
if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) if (cpumask_equal(cs->cpus_requested, trialcs->cpus_requested))
return 0; return 0;
retval = validate_change(cs, trialcs); retval = validate_change(cs, trialcs);
@ -964,6 +966,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
spin_lock_irq(&callback_lock); spin_lock_irq(&callback_lock);
cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
cpumask_copy(cs->cpus_requested, trialcs->cpus_requested);
spin_unlock_irq(&callback_lock); spin_unlock_irq(&callback_lock);
/* use trialcs->cpus_allowed as a temp variable */ /* use trialcs->cpus_allowed as a temp variable */
@ -1754,7 +1757,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
switch (type) { switch (type) {
case FILE_CPULIST: case FILE_CPULIST:
seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_requested));
break; break;
case FILE_MEMLIST: case FILE_MEMLIST:
seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
@ -1942,12 +1945,15 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
if (!cs) if (!cs)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL))
goto free_cs; goto error_allowed;
if (!alloc_cpumask_var(&cs->effective_cpus, GFP_KERNEL)) if (!alloc_cpumask_var(&cs->effective_cpus, GFP_KERNEL))
goto free_cpus; goto error_effective;
if (!alloc_cpumask_var(&cs->cpus_requested, GFP_KERNEL))
goto error_requested;
set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
cpumask_clear(cs->cpus_allowed); cpumask_clear(cs->cpus_allowed);
cpumask_clear(cs->cpus_requested);
nodes_clear(cs->mems_allowed); nodes_clear(cs->mems_allowed);
cpumask_clear(cs->effective_cpus); cpumask_clear(cs->effective_cpus);
nodes_clear(cs->effective_mems); nodes_clear(cs->effective_mems);
@ -1956,9 +1962,11 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
return &cs->css; return &cs->css;
free_cpus: error_requested:
free_cpumask_var(cs->effective_cpus);
error_effective:
free_cpumask_var(cs->cpus_allowed); free_cpumask_var(cs->cpus_allowed);
free_cs: error_allowed:
kfree(cs); kfree(cs);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
@ -2019,6 +2027,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
cs->mems_allowed = parent->mems_allowed; cs->mems_allowed = parent->mems_allowed;
cs->effective_mems = parent->mems_allowed; cs->effective_mems = parent->mems_allowed;
cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
cpumask_copy(cs->cpus_requested, parent->cpus_requested);
cpumask_copy(cs->effective_cpus, parent->cpus_allowed); cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
spin_unlock_irq(&callback_lock); spin_unlock_irq(&callback_lock);
out_unlock: out_unlock:
@ -2053,6 +2062,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
free_cpumask_var(cs->effective_cpus); free_cpumask_var(cs->effective_cpus);
free_cpumask_var(cs->cpus_allowed); free_cpumask_var(cs->cpus_allowed);
free_cpumask_var(cs->cpus_requested);
kfree(cs); kfree(cs);
} }
@ -2120,8 +2130,11 @@ int __init cpuset_init(void)
BUG(); BUG();
if (!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL)) if (!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL))
BUG(); BUG();
if (!alloc_cpumask_var(&top_cpuset.cpus_requested, GFP_KERNEL))
BUG();
cpumask_setall(top_cpuset.cpus_allowed); cpumask_setall(top_cpuset.cpus_allowed);
cpumask_setall(top_cpuset.cpus_requested);
nodes_setall(top_cpuset.mems_allowed); nodes_setall(top_cpuset.mems_allowed);
cpumask_setall(top_cpuset.effective_cpus); cpumask_setall(top_cpuset.effective_cpus);
nodes_setall(top_cpuset.effective_mems); nodes_setall(top_cpuset.effective_mems);
@ -2255,7 +2268,8 @@ retry:
goto retry; goto retry;
} }
cpumask_and(&new_cpus, cs->cpus_allowed, parent_cs(cs)->effective_cpus); cpumask_and(&new_cpus, cs->cpus_requested,
parent_cs(cs)->effective_cpus);
nodes_and(new_mems, cs->mems_allowed, parent_cs(cs)->effective_mems); nodes_and(new_mems, cs->mems_allowed, parent_cs(cs)->effective_mems);
cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);