}
}
if (ss->can_attach_task) {
- retval = ss->can_attach_task(cgrp, tsk);
+ retval = ss->can_attach_task(cgrp, oldcgrp, tsk);
if (retval) {
failed_ss = ss;
goto out;
if (ss->pre_attach)
ss->pre_attach(cgrp);
if (ss->attach_task)
- ss->attach_task(cgrp, tsk);
+ ss->attach_task(cgrp, oldcgrp, tsk);
if (ss->attach)
ss->attach(ss, cgrp, oldcgrp, tsk);
}
* remaining subsystems.
*/
break;
+
+ if (ss->cancel_attach_task)
+ ss->cancel_attach_task(cgrp, oldcgrp, tsk);
if (ss->cancel_attach)
ss->cancel_attach(ss, cgrp, tsk);
}
return 0;
}
+struct task_cgroup {
+ struct task_struct *tsk;
+ struct cgroup *oldcgrp;
+};
+
/**
* cgroup_attach_proc - attach all threads in a threadgroup to a cgroup
* @cgrp: the cgroup to attach to
{
int retval, i, group_size;
struct cgroup_subsys *ss, *failed_ss = NULL;
- bool cancel_failed_ss = false;
+ struct task_struct *failed_task = NULL;
/* guaranteed to be initialized later, but the compiler needs this */
struct cgroup *oldcgrp = NULL;
struct css_set *oldcg;
/* threadgroup list cursor and array */
struct task_struct *tsk;
struct flex_array *group;
+ struct task_cgroup *tc;
/*
* we need to make sure we have css_sets for all the tasks we're
* going to move -before- we actually start moving them, so that in
*/
group_size = get_nr_threads(leader);
/* flex_array supports very large thread-groups better than kmalloc. */
- group = flex_array_alloc(sizeof(struct task_struct *), group_size,
+ group = flex_array_alloc(sizeof(struct task_cgroup), group_size,
GFP_KERNEL);
if (!group)
return -ENOMEM;
goto out_free_group_list;
/* prevent changes to the threadgroup list while we take a snapshot. */
- rcu_read_lock();
+ read_lock(&tasklist_lock);
if (!thread_group_leader(leader)) {
/*
* a race with de_thread from another thread's exec() may strip
* throw this task away and try again (from cgroup_procs_write);
* this is "double-double-toil-and-trouble-check locking".
*/
- rcu_read_unlock();
+ read_unlock(&tasklist_lock);
retval = -EAGAIN;
goto out_free_group_list;
}
tsk = leader;
i = 0;
do {
+ struct task_cgroup tsk_cgrp;
+
/* as per above, nr_threads may decrease, but not increase. */
BUG_ON(i >= group_size);
get_task_struct(tsk);
+ tsk_cgrp.tsk = tsk;
+ tsk_cgrp.oldcgrp = task_cgroup_from_root(tsk, root);
/*
* saying GFP_ATOMIC has no effect here because we did prealloc
* earlier, but it's good form to communicate our expectations.
*/
- retval = flex_array_put_ptr(group, i, tsk, GFP_ATOMIC);
+ retval = flex_array_put(group, i, &tsk_cgrp, GFP_ATOMIC);
BUG_ON(retval != 0);
i++;
} while_each_thread(leader, tsk);
/* remember the number of threads in the array for later. */
group_size = i;
- rcu_read_unlock();
+ read_unlock(&tasklist_lock);
/*
* step 1: check that we can legitimately attach to the cgroup.
if (ss->can_attach_task) {
/* run on each task in the threadgroup. */
for (i = 0; i < group_size; i++) {
- tsk = flex_array_get_ptr(group, i);
- retval = ss->can_attach_task(cgrp, tsk);
+ tc = flex_array_get(group, i);
+ retval = ss->can_attach_task(cgrp,
+ tc->oldcgrp,
+ tc->tsk);
if (retval) {
failed_ss = ss;
- cancel_failed_ss = true;
+ failed_task = tc->tsk;
goto out_cancel_attach;
}
}
*/
INIT_LIST_HEAD(&newcg_list);
for (i = 0; i < group_size; i++) {
- tsk = flex_array_get_ptr(group, i);
+ tc = flex_array_get(group, i);
+ tsk = tc->tsk;
/* nothing to do if this task is already in the cgroup */
- oldcgrp = task_cgroup_from_root(tsk, root);
- if (cgrp == oldcgrp)
+ if (cgrp == tc->oldcgrp)
continue;
/* get old css_set pointer */
task_lock(tsk);
ss->pre_attach(cgrp);
}
for (i = 0; i < group_size; i++) {
- tsk = flex_array_get_ptr(group, i);
+ tc = flex_array_get(group, i);
+ tsk = tc->tsk;
+ oldcgrp = tc->oldcgrp;
/* leave current thread as it is if it's already there */
- oldcgrp = task_cgroup_from_root(tsk, root);
if (cgrp == oldcgrp)
continue;
- /* attach each task to each subsystem */
- for_each_subsys(root, ss) {
- if (ss->attach_task)
- ss->attach_task(cgrp, tsk);
- }
/* if the thread is PF_EXITING, it can just get skipped. */
retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, true);
- BUG_ON(retval != 0 && retval != -ESRCH);
+ if (retval == 0) {
+ /* attach each task to each subsystem */
+ for_each_subsys(root, ss) {
+ if (ss->attach_task)
+ ss->attach_task(cgrp, oldcgrp, tsk);
+ }
+ } else if (retval == -ESRCH) {
+ if (ss->cancel_attach_task)
+ ss->cancel_attach_task(cgrp, oldcgrp, tsk);
+ } else {
+ BUG_ON(1);
+ }
}
/* nothing is sensitive to fork() after this point. */
/* same deal as in cgroup_attach_task */
if (retval) {
for_each_subsys(root, ss) {
+ if (ss->cancel_attach_task && (ss != failed_ss ||
+ failed_task)) {
+ for (i = 0; i < group_size; i++) {
+ tc = flex_array_get(group, i);
+ if (tc->tsk == failed_task)
+ break;
+ ss->cancel_attach_task(cgrp,
+ tc->oldcgrp, tc->tsk);
+ }
+ }
+
if (ss == failed_ss) {
- if (cancel_failed_ss && ss->cancel_attach)
+ if (failed_task && ss->cancel_attach)
ss->cancel_attach(ss, cgrp, leader);
break;
}
}
/* clean up the array of referenced threads in the group. */
for (i = 0; i < group_size; i++) {
- tsk = flex_array_get_ptr(group, i);
- put_task_struct(tsk);
+ tc = flex_array_get(group, i);
+ put_task_struct(tc->tsk);
}
out_free_group_list:
flex_array_free(group);
* tasklist. No need to take any locks since no-one can
* be operating on this task.
*/
-void cgroup_fork_callbacks(struct task_struct *child)
+int cgroup_fork_callbacks(struct task_struct *child,
+ struct cgroup_subsys **failed_ss)
{
+ int err;
+
if (need_forkexit_callback) {
int i;
/*
*/
for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
struct cgroup_subsys *ss = subsys[i];
- if (ss->fork)
- ss->fork(ss, child);
+ if (ss->fork) {
+ err = ss->fork(ss, child);
+ if (err) {
+ *failed_ss = ss;
+ return err;
+ }
+ }
}
}
+
+ return 0;
}
/**
* which wards off any cgroup_attach_task() attempts, or task is a failed
* fork, never visible to cgroup_attach_task.
*/
-void cgroup_exit(struct task_struct *tsk, int run_callbacks)
+void cgroup_exit(struct task_struct *tsk, int run_callbacks,
+ struct cgroup_subsys *failed_ss)
{
struct css_set *cg;
int i;
*/
for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
struct cgroup_subsys *ss = subsys[i];
+
+ if (ss == failed_ss)
+ break;
+
if (ss->exit) {
struct cgroup *old_cgrp =
rcu_dereference_raw(cg->subsys[i])->cgroup;
rcu_assign_pointer(id->css, NULL);
rcu_assign_pointer(css->id, NULL);
- spin_lock(&ss->id_lock);
+ write_lock(&ss->id_lock);
idr_remove(&ss->idr, id->id);
- spin_unlock(&ss->id_lock);
+ write_unlock(&ss->id_lock);
kfree_rcu(id, rcu_head);
}
EXPORT_SYMBOL_GPL(free_css_id);
error = -ENOMEM;
goto err_out;
}
- spin_lock(&ss->id_lock);
+ write_lock(&ss->id_lock);
/* Don't use 0. allocates an ID of 1-65535 */
error = idr_get_new_above(&ss->idr, newid, 1, &myid);
- spin_unlock(&ss->id_lock);
+ write_unlock(&ss->id_lock);
/* Returns error when there are no free spaces for new ID.*/
if (error) {
return newid;
remove_idr:
error = -ENOSPC;
- spin_lock(&ss->id_lock);
+ write_lock(&ss->id_lock);
idr_remove(&ss->idr, myid);
- spin_unlock(&ss->id_lock);
+ write_unlock(&ss->id_lock);
err_out:
kfree(newid);
return ERR_PTR(error);
{
struct css_id *newid;
- spin_lock_init(&ss->id_lock);
+ rwlock_init(&ss->id_lock);
idr_init(&ss->idr);
newid = get_new_cssid(ss, 0);
* scan next entry from bitmap(tree), tmpid is updated after
* idr_get_next().
*/
- spin_lock(&ss->id_lock);
+ read_lock(&ss->id_lock);
tmp = idr_get_next(&ss->idr, &tmpid);
- spin_unlock(&ss->id_lock);
+ read_unlock(&ss->id_lock);
if (!tmp)
break;