Merge "cpuset: Make cpusets restore on hotplug"

This commit is contained in:
Linux Build Service Account 2016-08-26 14:48:45 -07:00 committed by Gerrit - the friendly Code Review server
commit 05966eacca

View file

@ -98,6 +98,7 @@ struct cpuset {
/* user-configured CPUs and Memory Nodes allow to tasks */
cpumask_var_t cpus_allowed;
cpumask_var_t cpus_requested; /* CPUS requested, but not used because of hotplug */
nodemask_t mems_allowed;
/* effective CPUs and Memory Nodes allow to tasks */
@ -386,7 +387,7 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs,
static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
{
return cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
return cpumask_subset(p->cpus_requested, q->cpus_requested) &&
nodes_subset(p->mems_allowed, q->mems_allowed) &&
is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
is_mem_exclusive(p) <= is_mem_exclusive(q);
@ -486,7 +487,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
cpuset_for_each_child(c, css, par) {
if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
c != cur &&
cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
cpumask_intersects(trial->cpus_requested, c->cpus_requested))
goto out;
if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
c != cur &&
@ -945,17 +946,18 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
if (!*buf) {
cpumask_clear(trialcs->cpus_allowed);
} else {
retval = cpulist_parse(buf, trialcs->cpus_allowed);
retval = cpulist_parse(buf, trialcs->cpus_requested);
if (retval < 0)
return retval;
if (!cpumask_subset(trialcs->cpus_allowed,
top_cpuset.cpus_allowed))
if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask))
return -EINVAL;
cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, cpu_active_mask);
}
/* Nothing to do if the cpus didn't change */
if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
if (cpumask_equal(cs->cpus_requested, trialcs->cpus_requested))
return 0;
retval = validate_change(cs, trialcs);
@ -964,6 +966,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
spin_lock_irq(&callback_lock);
cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
cpumask_copy(cs->cpus_requested, trialcs->cpus_requested);
spin_unlock_irq(&callback_lock);
/* use trialcs->cpus_allowed as a temp variable */
@ -1754,7 +1757,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
switch (type) {
case FILE_CPULIST:
seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_requested));
break;
case FILE_MEMLIST:
seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
@ -1942,12 +1945,15 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
if (!cs)
return ERR_PTR(-ENOMEM);
if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL))
goto free_cs;
goto error_allowed;
if (!alloc_cpumask_var(&cs->effective_cpus, GFP_KERNEL))
goto free_cpus;
goto error_effective;
if (!alloc_cpumask_var(&cs->cpus_requested, GFP_KERNEL))
goto error_requested;
set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
cpumask_clear(cs->cpus_allowed);
cpumask_clear(cs->cpus_requested);
nodes_clear(cs->mems_allowed);
cpumask_clear(cs->effective_cpus);
nodes_clear(cs->effective_mems);
@ -1956,9 +1962,11 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
return &cs->css;
free_cpus:
error_requested:
free_cpumask_var(cs->effective_cpus);
error_effective:
free_cpumask_var(cs->cpus_allowed);
free_cs:
error_allowed:
kfree(cs);
return ERR_PTR(-ENOMEM);
}
@ -2019,6 +2027,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
cs->mems_allowed = parent->mems_allowed;
cs->effective_mems = parent->mems_allowed;
cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
cpumask_copy(cs->cpus_requested, parent->cpus_requested);
cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
spin_unlock_irq(&callback_lock);
out_unlock:
@ -2053,6 +2062,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
free_cpumask_var(cs->effective_cpus);
free_cpumask_var(cs->cpus_allowed);
free_cpumask_var(cs->cpus_requested);
kfree(cs);
}
@ -2120,8 +2130,11 @@ int __init cpuset_init(void)
BUG();
if (!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL))
BUG();
if (!alloc_cpumask_var(&top_cpuset.cpus_requested, GFP_KERNEL))
BUG();
cpumask_setall(top_cpuset.cpus_allowed);
cpumask_setall(top_cpuset.cpus_requested);
nodes_setall(top_cpuset.mems_allowed);
cpumask_setall(top_cpuset.effective_cpus);
nodes_setall(top_cpuset.effective_mems);
@ -2255,7 +2268,8 @@ retry:
goto retry;
}
cpumask_and(&new_cpus, cs->cpus_allowed, parent_cs(cs)->effective_cpus);
cpumask_and(&new_cpus, cs->cpus_requested,
parent_cs(cs)->effective_cpus);
nodes_and(new_mems, cs->mems_allowed, parent_cs(cs)->effective_mems);
cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);