]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - kernel/cpuset.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi...
[karo-tx-linux.git] / kernel / cpuset.c
index f6b33c6962243ee54c955df76f77becf01a1f057..116a4164720a08f235c1fc53be47be4213c16226 100644 (file)
@@ -1181,7 +1181,13 @@ done:
 
 int current_cpuset_is_being_rebound(void)
 {
-       return task_cs(current) == cpuset_being_rebound;
+       int ret;
+
+       rcu_read_lock();
+       ret = task_cs(current) == cpuset_being_rebound;
+       rcu_read_unlock();
+
+       return ret;
 }
 
 static int update_relax_domain_level(struct cpuset *cs, s64 val)
@@ -1617,7 +1623,17 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
         * resources, wait for the previously scheduled operations before
         * proceeding, so that we don't end up keep removing tasks added
         * after execution capability is restored.
+        *
+        * cpuset_hotplug_work calls back into cgroup core via
+        * cgroup_transfer_tasks() and waiting for it from a cgroupfs
+        * operation like this one can lead to a deadlock through kernfs
+        * active_ref protection.  Let's break the protection.  Losing the
+        * protection is okay as we check whether @cs is online after
+        * grabbing cpuset_mutex anyway.  This only happens on the legacy
+        * hierarchies.
         */
+       css_get(&cs->css);
+       kernfs_break_active_protection(of->kn);
        flush_work(&cpuset_hotplug_work);
 
        mutex_lock(&cpuset_mutex);
@@ -1645,6 +1661,8 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
        free_trial_cpuset(trialcs);
 out_unlock:
        mutex_unlock(&cpuset_mutex);
+       kernfs_unbreak_active_protection(of->kn);
+       css_put(&cs->css);
        return retval ?: nbytes;
 }