cgroup: Disable IRQs while holding css_set_lock

While testing the deadline scheduler + cgroup setup I hit this
warning.

[  132.612935] ------------[ cut here ]------------
[  132.612951] WARNING: CPU: 5 PID: 0 at kernel/softirq.c:150 __local_bh_enable_ip+0x6b/0x80
[  132.612952] Modules linked in: (a ton of modules...)
[  132.612981] CPU: 5 PID: 0 Comm: swapper/5 Not tainted 4.7.0-rc2 #2
[  132.612981] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.8.2-20150714_191134- 04/01/2014
[  132.612982]  0000000000000086 45c8bb5effdd088b ffff88013fd43da0 ffffffff813d229e
[  132.612984]  0000000000000000 0000000000000000 ffff88013fd43de0 ffffffff810a652b
[  132.612985]  00000096811387b5 0000000000000200 ffff8800bab29d80 ffff880034c54c00
[  132.612986] Call Trace:
[  132.612987]  <IRQ>  [<ffffffff813d229e>] dump_stack+0x63/0x85
[  132.612994]  [<ffffffff810a652b>] __warn+0xcb/0xf0
[  132.612997]  [<ffffffff810e76a0>] ? push_dl_task.part.32+0x170/0x170
[  132.612999]  [<ffffffff810a665d>] warn_slowpath_null+0x1d/0x20
[  132.613000]  [<ffffffff810aba5b>] __local_bh_enable_ip+0x6b/0x80
[  132.613008]  [<ffffffff817d6c8a>] _raw_write_unlock_bh+0x1a/0x20
[  132.613010]  [<ffffffff817d6c9e>] _raw_spin_unlock_bh+0xe/0x10
[  132.613015]  [<ffffffff811388ac>] put_css_set+0x5c/0x60
[  132.613016]  [<ffffffff8113dc7f>] cgroup_free+0x7f/0xa0
[  132.613017]  [<ffffffff810a3912>] __put_task_struct+0x42/0x140
[  132.613018]  [<ffffffff810e776a>] dl_task_timer+0xca/0x250
[  132.613027]  [<ffffffff810e76a0>] ? push_dl_task.part.32+0x170/0x170
[  132.613030]  [<ffffffff8111371e>] __hrtimer_run_queues+0xee/0x270
[  132.613031]  [<ffffffff81113ec8>] hrtimer_interrupt+0xa8/0x190
[  132.613034]  [<ffffffff81051a58>] local_apic_timer_interrupt+0x38/0x60
[  132.613035]  [<ffffffff817d9b0d>] smp_apic_timer_interrupt+0x3d/0x50
[  132.613037]  [<ffffffff817d7c5c>] apic_timer_interrupt+0x8c/0xa0
[  132.613038]  <EOI>  [<ffffffff81063466>] ? native_safe_halt+0x6/0x10
[  132.613043]  [<ffffffff81037a4e>] default_idle+0x1e/0xd0
[  132.613044]  [<ffffffff810381cf>] arch_cpu_idle+0xf/0x20
[  132.613046]  [<ffffffff810e8fda>] default_idle_call+0x2a/0x40
[  132.613047]  [<ffffffff810e92d7>] cpu_startup_entry+0x2e7/0x340
[  132.613048]  [<ffffffff81050235>] start_secondary+0x155/0x190
[  132.613049] ---[ end trace f91934d162ce9977 ]---

The warn is the spin_(lock|unlock)_bh(&css_set_lock) in the interrupt
context. Converting the spin_lock_bh to spin_lock_irq(save) to avoid
this problem - and other problems of sharing a spinlock with an
interrupt.

Change-Id: I2064d3c21863e50ee1a70e57f7915d04f2ba0407
Cc: Tejun Heo <tj@kernel.org>
Cc: Li Zefan <lizefan@huawei.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Juri Lelli <juri.lelli@arm.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: cgroups@vger.kernel.org
Cc: stable@vger.kernel.org # 4.5+
Cc: linux-kernel@vger.kernel.org
Reviewed-by: Rik van Riel <riel@redhat.com>
Reviewed-by: "Luis Claudio R. Goncalves" <lgoncalv@redhat.com>
Signed-off-by: Daniel Bristot de Oliveira <bristot@redhat.com>
Acked-by: Zefan Li <lizefan@huawei.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Git-commit: 82d6489d0fed2ec8a8c48c19e8d8a04ac8e5bb26
Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
[runminw@codeaurora.org: resolve trivial merge conflicts]
Signed-off-by: Runmin Wang <runminw@codeaurora.org>
This commit is contained in:
Daniel Bristot de Oliveira 2016-06-22 17:28:41 -03:00 committed by Runmin Wang
parent e1f711f8e0
commit 916622c7d5

View file

@ -781,6 +781,8 @@ static void put_css_set_locked(struct css_set *cset)
static void put_css_set(struct css_set *cset) static void put_css_set(struct css_set *cset)
{ {
unsigned long flags;
/* /*
* Ensure that the refcount doesn't hit zero while any readers * Ensure that the refcount doesn't hit zero while any readers
* can see it. Similar to atomic_dec_and_lock(), but for an * can see it. Similar to atomic_dec_and_lock(), but for an
@ -789,9 +791,9 @@ static void put_css_set(struct css_set *cset)
if (atomic_add_unless(&cset->refcount, -1, 1)) if (atomic_add_unless(&cset->refcount, -1, 1))
return; return;
spin_lock_bh(&css_set_lock); spin_lock_irqsave(&css_set_lock, flags);
put_css_set_locked(cset); put_css_set_locked(cset);
spin_unlock_bh(&css_set_lock); spin_unlock_irqrestore(&css_set_lock, flags);
} }
/* /*
@ -1014,11 +1016,11 @@ static struct css_set *find_css_set(struct css_set *old_cset,
/* First see if we already have a cgroup group that matches /* First see if we already have a cgroup group that matches
* the desired set */ * the desired set */
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
cset = find_existing_css_set(old_cset, cgrp, template); cset = find_existing_css_set(old_cset, cgrp, template);
if (cset) if (cset)
get_css_set(cset); get_css_set(cset);
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
if (cset) if (cset)
return cset; return cset;
@ -1046,7 +1048,7 @@ static struct css_set *find_css_set(struct css_set *old_cset,
* find_existing_css_set() */ * find_existing_css_set() */
memcpy(cset->subsys, template, sizeof(cset->subsys)); memcpy(cset->subsys, template, sizeof(cset->subsys));
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
/* Add reference counts and links from the new css_set. */ /* Add reference counts and links from the new css_set. */
list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) { list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
struct cgroup *c = link->cgrp; struct cgroup *c = link->cgrp;
@ -1072,7 +1074,7 @@ static struct css_set *find_css_set(struct css_set *old_cset,
css_get(css); css_get(css);
} }
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
return cset; return cset;
} }
@ -1136,7 +1138,7 @@ static void cgroup_destroy_root(struct cgroup_root *root)
* Release all the links from cset_links to this hierarchy's * Release all the links from cset_links to this hierarchy's
* root cgroup * root cgroup
*/ */
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) { list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
list_del(&link->cset_link); list_del(&link->cset_link);
@ -1144,7 +1146,7 @@ static void cgroup_destroy_root(struct cgroup_root *root)
kfree(link); kfree(link);
} }
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
if (!list_empty(&root->root_list)) { if (!list_empty(&root->root_list)) {
list_del(&root->root_list); list_del(&root->root_list);
@ -1548,11 +1550,11 @@ static int rebind_subsystems(struct cgroup_root *dst_root,
ss->root = dst_root; ss->root = dst_root;
css->cgroup = dcgrp; css->cgroup = dcgrp;
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
hash_for_each(css_set_table, i, cset, hlist) hash_for_each(css_set_table, i, cset, hlist)
list_move_tail(&cset->e_cset_node[ss->id], list_move_tail(&cset->e_cset_node[ss->id],
&dcgrp->e_csets[ss->id]); &dcgrp->e_csets[ss->id]);
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
src_root->subsys_mask &= ~(1 << ssid); src_root->subsys_mask &= ~(1 << ssid);
scgrp->subtree_control &= ~(1 << ssid); scgrp->subtree_control &= ~(1 << ssid);
@ -1829,7 +1831,7 @@ static void cgroup_enable_task_cg_lists(void)
{ {
struct task_struct *p, *g; struct task_struct *p, *g;
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
if (use_task_css_set_links) if (use_task_css_set_links)
goto out_unlock; goto out_unlock;
@ -1854,8 +1856,12 @@ static void cgroup_enable_task_cg_lists(void)
* entry won't be deleted though the process has exited. * entry won't be deleted though the process has exited.
* Do it while holding siglock so that we don't end up * Do it while holding siglock so that we don't end up
* racing against cgroup_exit(). * racing against cgroup_exit().
*
* Interrupts were already disabled while acquiring
* the css_set_lock, so we do not need to disable it
* again when acquiring the sighand->siglock here.
*/ */
spin_lock_irq(&p->sighand->siglock); spin_lock(&p->sighand->siglock);
if (!(p->flags & PF_EXITING)) { if (!(p->flags & PF_EXITING)) {
struct css_set *cset = task_css_set(p); struct css_set *cset = task_css_set(p);
@ -1864,11 +1870,11 @@ static void cgroup_enable_task_cg_lists(void)
list_add_tail(&p->cg_list, &cset->tasks); list_add_tail(&p->cg_list, &cset->tasks);
get_css_set(cset); get_css_set(cset);
} }
spin_unlock_irq(&p->sighand->siglock); spin_unlock(&p->sighand->siglock);
} while_each_thread(g, p); } while_each_thread(g, p);
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
out_unlock: out_unlock:
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
} }
static void init_cgroup_housekeeping(struct cgroup *cgrp) static void init_cgroup_housekeeping(struct cgroup *cgrp)
@ -1973,13 +1979,13 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned long ss_mask)
* Link the root cgroup in this hierarchy into all the css_set * Link the root cgroup in this hierarchy into all the css_set
* objects. * objects.
*/ */
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
hash_for_each(css_set_table, i, cset, hlist) { hash_for_each(css_set_table, i, cset, hlist) {
link_css_set(&tmp_links, cset, root_cgrp); link_css_set(&tmp_links, cset, root_cgrp);
if (css_set_populated(cset)) if (css_set_populated(cset))
cgroup_update_populated(root_cgrp, true); cgroup_update_populated(root_cgrp, true);
} }
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
BUG_ON(!list_empty(&root_cgrp->self.children)); BUG_ON(!list_empty(&root_cgrp->self.children));
BUG_ON(atomic_read(&root->nr_cgrps) != 1); BUG_ON(atomic_read(&root->nr_cgrps) != 1);
@ -2212,7 +2218,7 @@ char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
char *path = NULL; char *path = NULL;
mutex_lock(&cgroup_mutex); mutex_lock(&cgroup_mutex);
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id); root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
@ -2225,7 +2231,7 @@ char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
path = buf; path = buf;
} }
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_mutex);
return path; return path;
} }
@ -2400,7 +2406,7 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
* the new cgroup. There are no failure cases after here, so this * the new cgroup. There are no failure cases after here, so this
* is the commit point. * is the commit point.
*/ */
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
list_for_each_entry(cset, &tset->src_csets, mg_node) { list_for_each_entry(cset, &tset->src_csets, mg_node) {
list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) { list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
struct css_set *from_cset = task_css_set(task); struct css_set *from_cset = task_css_set(task);
@ -2411,7 +2417,7 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
put_css_set_locked(from_cset); put_css_set_locked(from_cset);
} }
} }
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
/* /*
* Migration is committed, all target tasks are now on dst_csets. * Migration is committed, all target tasks are now on dst_csets.
@ -2440,13 +2446,13 @@ out_cancel_attach:
} }
} }
out_release_tset: out_release_tset:
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
list_splice_init(&tset->dst_csets, &tset->src_csets); list_splice_init(&tset->dst_csets, &tset->src_csets);
list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) { list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
list_splice_tail_init(&cset->mg_tasks, &cset->tasks); list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
list_del_init(&cset->mg_node); list_del_init(&cset->mg_node);
} }
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
return ret; return ret;
} }
@ -2463,14 +2469,14 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets)
lockdep_assert_held(&cgroup_mutex); lockdep_assert_held(&cgroup_mutex);
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) { list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
cset->mg_src_cgrp = NULL; cset->mg_src_cgrp = NULL;
cset->mg_dst_cset = NULL; cset->mg_dst_cset = NULL;
list_del_init(&cset->mg_preload_node); list_del_init(&cset->mg_preload_node);
put_css_set_locked(cset); put_css_set_locked(cset);
} }
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
} }
/** /**
@ -2620,7 +2626,7 @@ static int cgroup_migrate(struct task_struct *leader, bool threadgroup,
* already PF_EXITING could be freed from underneath us unless we * already PF_EXITING could be freed from underneath us unless we
* take an rcu_read_lock. * take an rcu_read_lock.
*/ */
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
rcu_read_lock(); rcu_read_lock();
task = leader; task = leader;
do { do {
@ -2629,7 +2635,7 @@ static int cgroup_migrate(struct task_struct *leader, bool threadgroup,
break; break;
} while_each_thread(leader, task); } while_each_thread(leader, task);
rcu_read_unlock(); rcu_read_unlock();
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
return cgroup_taskset_migrate(&tset, cgrp); return cgroup_taskset_migrate(&tset, cgrp);
} }
@ -2650,7 +2656,7 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp,
int ret; int ret;
/* look up all src csets */ /* look up all src csets */
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
rcu_read_lock(); rcu_read_lock();
task = leader; task = leader;
do { do {
@ -2660,7 +2666,7 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp,
break; break;
} while_each_thread(leader, task); } while_each_thread(leader, task);
rcu_read_unlock(); rcu_read_unlock();
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
/* prepare dst csets and commit */ /* prepare dst csets and commit */
ret = cgroup_migrate_prepare_dst(dst_cgrp, &preloaded_csets); ret = cgroup_migrate_prepare_dst(dst_cgrp, &preloaded_csets);
@ -2748,9 +2754,9 @@ static int cgroup_procs_write_permission(struct task_struct *task,
struct cgroup *cgrp; struct cgroup *cgrp;
struct inode *inode; struct inode *inode;
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
cgrp = task_cgroup_from_root(task, &cgrp_dfl_root); cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
while (!cgroup_is_descendant(dst_cgrp, cgrp)) while (!cgroup_is_descendant(dst_cgrp, cgrp))
cgrp = cgroup_parent(cgrp); cgrp = cgroup_parent(cgrp);
@ -2851,9 +2857,9 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
if (root == &cgrp_dfl_root) if (root == &cgrp_dfl_root)
continue; continue;
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
from_cgrp = task_cgroup_from_root(from, root); from_cgrp = task_cgroup_from_root(from, root);
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
retval = cgroup_attach_task(from_cgrp, tsk, false); retval = cgroup_attach_task(from_cgrp, tsk, false);
if (retval) if (retval)
@ -2978,7 +2984,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
percpu_down_write(&cgroup_threadgroup_rwsem); percpu_down_write(&cgroup_threadgroup_rwsem);
/* look up all csses currently attached to @cgrp's subtree */ /* look up all csses currently attached to @cgrp's subtree */
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) { css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) {
struct cgrp_cset_link *link; struct cgrp_cset_link *link;
@ -2990,14 +2996,14 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
cgroup_migrate_add_src(link->cset, cgrp, cgroup_migrate_add_src(link->cset, cgrp,
&preloaded_csets); &preloaded_csets);
} }
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
/* NULL dst indicates self on default hierarchy */ /* NULL dst indicates self on default hierarchy */
ret = cgroup_migrate_prepare_dst(NULL, &preloaded_csets); ret = cgroup_migrate_prepare_dst(NULL, &preloaded_csets);
if (ret) if (ret)
goto out_finish; goto out_finish;
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) { list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
struct task_struct *task, *ntask; struct task_struct *task, *ntask;
@ -3009,7 +3015,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list) list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
cgroup_taskset_add(task, &tset); cgroup_taskset_add(task, &tset);
} }
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
ret = cgroup_taskset_migrate(&tset, cgrp); ret = cgroup_taskset_migrate(&tset, cgrp);
out_finish: out_finish:
@ -3692,10 +3698,10 @@ static int cgroup_task_count(const struct cgroup *cgrp)
int count = 0; int count = 0;
struct cgrp_cset_link *link; struct cgrp_cset_link *link;
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
list_for_each_entry(link, &cgrp->cset_links, cset_link) list_for_each_entry(link, &cgrp->cset_links, cset_link)
count += atomic_read(&link->cset->refcount); count += atomic_read(&link->cset->refcount);
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
return count; return count;
} }
@ -4033,7 +4039,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css,
memset(it, 0, sizeof(*it)); memset(it, 0, sizeof(*it));
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
it->ss = css->ss; it->ss = css->ss;
@ -4046,7 +4052,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css,
css_task_iter_advance_css_set(it); css_task_iter_advance_css_set(it);
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
} }
/** /**
@ -4064,7 +4070,7 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
it->cur_task = NULL; it->cur_task = NULL;
} }
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
if (it->task_pos) { if (it->task_pos) {
it->cur_task = list_entry(it->task_pos, struct task_struct, it->cur_task = list_entry(it->task_pos, struct task_struct,
@ -4073,7 +4079,7 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
css_task_iter_advance(it); css_task_iter_advance(it);
} }
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
return it->cur_task; return it->cur_task;
} }
@ -4087,10 +4093,10 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
void css_task_iter_end(struct css_task_iter *it) void css_task_iter_end(struct css_task_iter *it)
{ {
if (it->cur_cset) { if (it->cur_cset) {
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
list_del(&it->iters_node); list_del(&it->iters_node);
put_css_set_locked(it->cur_cset); put_css_set_locked(it->cur_cset);
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
} }
if (it->cur_task) if (it->cur_task)
@ -4119,10 +4125,10 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
mutex_lock(&cgroup_mutex); mutex_lock(&cgroup_mutex);
/* all tasks in @from are being moved, all csets are source */ /* all tasks in @from are being moved, all csets are source */
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
list_for_each_entry(link, &from->cset_links, cset_link) list_for_each_entry(link, &from->cset_links, cset_link)
cgroup_migrate_add_src(link->cset, to, &preloaded_csets); cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
ret = cgroup_migrate_prepare_dst(to, &preloaded_csets); ret = cgroup_migrate_prepare_dst(to, &preloaded_csets);
if (ret) if (ret)
@ -5226,10 +5232,10 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
*/ */
cgrp->self.flags &= ~CSS_ONLINE; cgrp->self.flags &= ~CSS_ONLINE;
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
list_for_each_entry(link, &cgrp->cset_links, cset_link) list_for_each_entry(link, &cgrp->cset_links, cset_link)
link->cset->dead = true; link->cset->dead = true;
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
/* initiate massacre of all css's */ /* initiate massacre of all css's */
for_each_css(css, ssid, cgrp) for_each_css(css, ssid, cgrp)
@ -5488,7 +5494,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
goto out; goto out;
mutex_lock(&cgroup_mutex); mutex_lock(&cgroup_mutex);
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
for_each_root(root) { for_each_root(root) {
struct cgroup_subsys *ss; struct cgroup_subsys *ss;
@ -5540,7 +5546,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
retval = 0; retval = 0;
out_unlock: out_unlock:
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_mutex);
kfree(buf); kfree(buf);
out: out:
@ -5701,13 +5707,13 @@ void cgroup_post_fork(struct task_struct *child,
if (use_task_css_set_links) { if (use_task_css_set_links) {
struct css_set *cset; struct css_set *cset;
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
cset = task_css_set(current); cset = task_css_set(current);
if (list_empty(&child->cg_list)) { if (list_empty(&child->cg_list)) {
get_css_set(cset); get_css_set(cset);
css_set_move_task(child, NULL, cset, false); css_set_move_task(child, NULL, cset, false);
} }
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
} }
/* /*
@ -5751,9 +5757,9 @@ void cgroup_exit(struct task_struct *tsk)
cset = task_css_set(tsk); cset = task_css_set(tsk);
if (!list_empty(&tsk->cg_list)) { if (!list_empty(&tsk->cg_list)) {
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
css_set_move_task(tsk, cset, NULL, false); css_set_move_task(tsk, cset, NULL, false);
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
} else { } else {
get_css_set(cset); get_css_set(cset);
} }
@ -5819,7 +5825,9 @@ static void cgroup_release_agent(struct work_struct *work)
if (!pathbuf || !agentbuf) if (!pathbuf || !agentbuf)
goto out; goto out;
spin_lock_irq(&css_set_lock);
path = cgroup_path(cgrp, pathbuf, PATH_MAX); path = cgroup_path(cgrp, pathbuf, PATH_MAX);
spin_unlock_irq(&css_set_lock);
if (!path) if (!path)
goto out; goto out;
@ -5966,7 +5974,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
if (!name_buf) if (!name_buf)
return -ENOMEM; return -ENOMEM;
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
rcu_read_lock(); rcu_read_lock();
cset = rcu_dereference(current->cgroups); cset = rcu_dereference(current->cgroups);
list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
@ -5977,7 +5985,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
c->root->hierarchy_id, name_buf); c->root->hierarchy_id, name_buf);
} }
rcu_read_unlock(); rcu_read_unlock();
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
kfree(name_buf); kfree(name_buf);
return 0; return 0;
} }
@ -5988,7 +5996,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
struct cgroup_subsys_state *css = seq_css(seq); struct cgroup_subsys_state *css = seq_css(seq);
struct cgrp_cset_link *link; struct cgrp_cset_link *link;
spin_lock_bh(&css_set_lock); spin_lock_irq(&css_set_lock);
list_for_each_entry(link, &css->cgroup->cset_links, cset_link) { list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
struct css_set *cset = link->cset; struct css_set *cset = link->cset;
struct task_struct *task; struct task_struct *task;
@ -6011,7 +6019,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
overflow: overflow:
seq_puts(seq, " ...\n"); seq_puts(seq, " ...\n");
} }
spin_unlock_bh(&css_set_lock); spin_unlock_irq(&css_set_lock);
return 0; return 0;
} }