#include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
-#include <linux/rwsem.h>
 #include <linux/percpu-rwsem.h>
 #include <linux/string.h>
 #include <linux/sort.h>
  * cgroup_mutex is the master lock.  Any modification to cgroup or its
  * hierarchy must be performed while holding it.
  *
- * css_set_rwsem protects task->cgroups pointer, the list of css_set
+ * css_set_lock protects task->cgroups pointer, the list of css_set
  * objects, and the chain of tasks off each css_set.
  *
  * These locks are exported if CONFIG_PROVE_RCU so that accessors in
  */
 #ifdef CONFIG_PROVE_RCU
 DEFINE_MUTEX(cgroup_mutex);
-DECLARE_RWSEM(css_set_rwsem);
+DEFINE_SPINLOCK(css_set_lock);
 EXPORT_SYMBOL_GPL(cgroup_mutex);
-EXPORT_SYMBOL_GPL(css_set_rwsem);
+EXPORT_SYMBOL_GPL(css_set_lock);
 #else
 static DEFINE_MUTEX(cgroup_mutex);
-static DECLARE_RWSEM(css_set_rwsem);
+static DEFINE_SPINLOCK(css_set_lock);
 #endif
 
 /*
  */
 static bool css_set_populated(struct css_set *cset)
 {
-       lockdep_assert_held(&css_set_rwsem);
+       lockdep_assert_held(&css_set_lock);
 
        return !list_empty(&cset->tasks) || !list_empty(&cset->mg_tasks);
 }
  */
 static void cgroup_update_populated(struct cgroup *cgrp, bool populated)
 {
-       lockdep_assert_held(&css_set_rwsem);
+       lockdep_assert_held(&css_set_lock);
 
        do {
                bool trigger;
 {
        struct cgrp_cset_link *link;
 
-       lockdep_assert_held(&css_set_rwsem);
+       lockdep_assert_held(&css_set_lock);
 
        list_for_each_entry(link, &cset->cgrp_links, cgrp_link)
                cgroup_update_populated(link->cgrp, populated);
                              struct css_set *from_cset, struct css_set *to_cset,
                              bool use_mg_tasks)
 {
-       lockdep_assert_held(&css_set_rwsem);
+       lockdep_assert_held(&css_set_lock);
 
        if (from_cset) {
                struct css_task_iter *it, *pos;
        struct cgroup_subsys *ss;
        int ssid;
 
-       lockdep_assert_held(&css_set_rwsem);
+       lockdep_assert_held(&css_set_lock);
 
        if (!atomic_dec_and_test(&cset->refcount))
                return;
        if (atomic_add_unless(&cset->refcount, -1, 1))
                return;
 
-       down_write(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        put_css_set_locked(cset);
-       up_write(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
 }
 
 /*
 
        /* First see if we already have a cgroup group that matches
         * the desired set */
-       down_read(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        cset = find_existing_css_set(old_cset, cgrp, template);
        if (cset)
                get_css_set(cset);
-       up_read(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
 
        if (cset)
                return cset;
         * find_existing_css_set() */
        memcpy(cset->subsys, template, sizeof(cset->subsys));
 
-       down_write(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        /* Add reference counts and links from the new css_set. */
        list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
                struct cgroup *c = link->cgrp;
                list_add_tail(&cset->e_cset_node[ssid],
                              &cset->subsys[ssid]->cgroup->e_csets[ssid]);
 
-       up_write(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
 
        return cset;
 }
         * Release all the links from cset_links to this hierarchy's
         * root cgroup
         */
-       down_write(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
 
        list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
                list_del(&link->cset_link);
                list_del(&link->cgrp_link);
                kfree(link);
        }
-       up_write(&css_set_rwsem);
+
+       spin_unlock_bh(&css_set_lock);
 
        if (!list_empty(&root->root_list)) {
                list_del(&root->root_list);
        struct cgroup *res = NULL;
 
        lockdep_assert_held(&cgroup_mutex);
-       lockdep_assert_held(&css_set_rwsem);
+       lockdep_assert_held(&css_set_lock);
 
        if (cset == &init_css_set) {
                res = &root->cgrp;
 
 /*
  * Return the cgroup for "task" from the given hierarchy. Must be
- * called with cgroup_mutex and css_set_rwsem held.
+ * called with cgroup_mutex and css_set_lock held.
  */
 static struct cgroup *task_cgroup_from_root(struct task_struct *task,
                                            struct cgroup_root *root)
                ss->root = dst_root;
                css->cgroup = dcgrp;
 
-               down_write(&css_set_rwsem);
+               spin_lock_bh(&css_set_lock);
                hash_for_each(css_set_table, i, cset, hlist)
                        list_move_tail(&cset->e_cset_node[ss->id],
                                       &dcgrp->e_csets[ss->id]);
-               up_write(&css_set_rwsem);
+               spin_unlock_bh(&css_set_lock);
 
                src_root->subsys_mask &= ~(1 << ssid);
                scgrp->subtree_control &= ~(1 << ssid);
 {
        struct task_struct *p, *g;
 
-       down_write(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
 
        if (use_task_css_set_links)
                goto out_unlock;
        } while_each_thread(g, p);
        read_unlock(&tasklist_lock);
 out_unlock:
-       up_write(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
 }
 
 static void init_cgroup_housekeeping(struct cgroup *cgrp)
                goto out;
 
        /*
-        * We're accessing css_set_count without locking css_set_rwsem here,
+        * We're accessing css_set_count without locking css_set_lock here,
         * but that's OK - it can only be increased by someone holding
         * cgroup_lock, and that's us. The worst that can happen is that we
         * have some link structures left over
         * Link the root cgroup in this hierarchy into all the css_set
         * objects.
         */
-       down_write(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        hash_for_each(css_set_table, i, cset, hlist) {
                link_css_set(&tmp_links, cset, root_cgrp);
                if (css_set_populated(cset))
                        cgroup_update_populated(root_cgrp, true);
        }
-       up_write(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
 
        BUG_ON(!list_empty(&root_cgrp->self.children));
        BUG_ON(atomic_read(&root->nr_cgrps) != 1);
        char *path = NULL;
 
        mutex_lock(&cgroup_mutex);
-       down_read(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
 
        root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
 
                        path = buf;
        }
 
-       up_read(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
        mutex_unlock(&cgroup_mutex);
        return path;
 }
 {
        struct css_set *cset;
 
-       lockdep_assert_held(&css_set_rwsem);
+       lockdep_assert_held(&css_set_lock);
 
        /* @task either already exited or can't exit until the end */
        if (task->flags & PF_EXITING)
         * the new cgroup.  There are no failure cases after here, so this
         * is the commit point.
         */
-       down_write(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        list_for_each_entry(cset, &tset->src_csets, mg_node) {
                list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
                        struct css_set *from_cset = task_css_set(task);
                        put_css_set_locked(from_cset);
                }
        }
-       up_write(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
 
        /*
         * Migration is committed, all target tasks are now on dst_csets.
                        css->ss->cancel_attach(css, tset);
        }
 out_release_tset:
-       down_write(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        list_splice_init(&tset->dst_csets, &tset->src_csets);
        list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
                list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
                list_del_init(&cset->mg_node);
        }
-       up_write(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
        return ret;
 }
 
 
        lockdep_assert_held(&cgroup_mutex);
 
-       down_write(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
                cset->mg_src_cgrp = NULL;
                cset->mg_dst_cset = NULL;
                list_del_init(&cset->mg_preload_node);
                put_css_set_locked(cset);
        }
-       up_write(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
 }
 
 /**
        struct cgroup *src_cgrp;
 
        lockdep_assert_held(&cgroup_mutex);
-       lockdep_assert_held(&css_set_rwsem);
+       lockdep_assert_held(&css_set_lock);
 
        src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);
 
         * already PF_EXITING could be freed from underneath us unless we
         * take an rcu_read_lock.
         */
-       down_write(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        rcu_read_lock();
        task = leader;
        do {
                        break;
        } while_each_thread(leader, task);
        rcu_read_unlock();
-       up_write(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
 
        return cgroup_taskset_migrate(&tset, cgrp);
 }
        int ret;
 
        /* look up all src csets */
-       down_read(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        rcu_read_lock();
        task = leader;
        do {
                        break;
        } while_each_thread(leader, task);
        rcu_read_unlock();
-       up_read(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
 
        /* prepare dst csets and commit */
        ret = cgroup_migrate_prepare_dst(dst_cgrp, &preloaded_csets);
                struct cgroup *cgrp;
                struct inode *inode;
 
-               down_read(&css_set_rwsem);
+               spin_lock_bh(&css_set_lock);
                cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
-               up_read(&css_set_rwsem);
+               spin_unlock_bh(&css_set_lock);
 
                while (!cgroup_is_descendant(dst_cgrp, cgrp))
                        cgrp = cgroup_parent(cgrp);
                if (root == &cgrp_dfl_root)
                        continue;
 
-               down_read(&css_set_rwsem);
+               spin_lock_bh(&css_set_lock);
                from_cgrp = task_cgroup_from_root(from, root);
-               up_read(&css_set_rwsem);
+               spin_unlock_bh(&css_set_lock);
 
                retval = cgroup_attach_task(from_cgrp, tsk, false);
                if (retval)
        percpu_down_write(&cgroup_threadgroup_rwsem);
 
        /* look up all csses currently attached to @cgrp's subtree */
-       down_read(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) {
                struct cgrp_cset_link *link;
 
                        cgroup_migrate_add_src(link->cset, cgrp,
                                               &preloaded_csets);
        }
-       up_read(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
 
        /* NULL dst indicates self on default hierarchy */
        ret = cgroup_migrate_prepare_dst(NULL, &preloaded_csets);
        if (ret)
                goto out_finish;
 
-       down_write(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
                struct task_struct *task, *ntask;
 
                list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
                        cgroup_taskset_add(task, &tset);
        }
-       up_write(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
 
        ret = cgroup_taskset_migrate(&tset, cgrp);
 out_finish:
        int count = 0;
        struct cgrp_cset_link *link;
 
-       down_read(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        list_for_each_entry(link, &cgrp->cset_links, cset_link)
                count += atomic_read(&link->cset->refcount);
-       up_read(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
        return count;
 }
 
        struct cgrp_cset_link *link;
        struct css_set *cset;
 
-       lockdep_assert_held(&css_set_rwsem);
+       lockdep_assert_held(&css_set_lock);
 
        /* Advance to the next non-empty css_set */
        do {
 {
        struct list_head *l = it->task_pos;
 
-       lockdep_assert_held(&css_set_rwsem);
+       lockdep_assert_held(&css_set_lock);
        WARN_ON_ONCE(!l);
 
        /*
 
        memset(it, 0, sizeof(*it));
 
-       down_write(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
 
        it->ss = css->ss;
 
 
        css_task_iter_advance_css_set(it);
 
-       up_write(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
 }
 
 /**
        if (it->cur_task)
                put_task_struct(it->cur_task);
 
-       down_write(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
 
        it->cur_task = list_entry(it->task_pos, struct task_struct, cg_list);
        get_task_struct(it->cur_task);
 
        css_task_iter_advance(it);
 
-       up_write(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
 
        return it->cur_task;
 }
 void css_task_iter_end(struct css_task_iter *it)
 {
        if (it->cur_cset) {
-               down_write(&css_set_rwsem);
+               spin_lock_bh(&css_set_lock);
                list_del(&it->iters_node);
                put_css_set_locked(it->cur_cset);
-               up_write(&css_set_rwsem);
+               spin_unlock_bh(&css_set_lock);
        }
 
        if (it->cur_task)
        mutex_lock(&cgroup_mutex);
 
        /* all tasks in @from are being moved, all csets are source */
-       down_read(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        list_for_each_entry(link, &from->cset_links, cset_link)
                cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
-       up_read(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
 
        ret = cgroup_migrate_prepare_dst(to, &preloaded_csets);
        if (ret)
                goto out;
 
        mutex_lock(&cgroup_mutex);
-       down_read(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
 
        for_each_root(root) {
                struct cgroup_subsys *ss;
 
        retval = 0;
 out_unlock:
-       up_read(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
        mutex_unlock(&cgroup_mutex);
        kfree(buf);
 out:
         * @child during its iteration.
         *
         * If we won the race, @child is associated with %current's
-        * css_set.  Grabbing css_set_rwsem guarantees both that the
+        * css_set.  Grabbing css_set_lock guarantees both that the
         * association is stable, and, on completion of the parent's
         * migration, @child is visible in the source of migration or
         * already in the destination cgroup.  This guarantee is necessary
        if (use_task_css_set_links) {
                struct css_set *cset;
 
-               down_write(&css_set_rwsem);
+               spin_lock_bh(&css_set_lock);
                cset = task_css_set(current);
                if (list_empty(&child->cg_list)) {
                        get_css_set(cset);
                        css_set_move_task(child, NULL, cset, false);
                }
-               up_write(&css_set_rwsem);
+               spin_unlock_bh(&css_set_lock);
        }
 
        /*
        cset = task_css_set(tsk);
 
        if (!list_empty(&tsk->cg_list)) {
-               down_write(&css_set_rwsem);
+               spin_lock_bh(&css_set_lock);
                css_set_move_task(tsk, cset, NULL, false);
-               up_write(&css_set_rwsem);
+               spin_unlock_bh(&css_set_lock);
                put_cset = true;
        }
 
        if (!name_buf)
                return -ENOMEM;
 
-       down_read(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        rcu_read_lock();
        cset = rcu_dereference(current->cgroups);
        list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
                           c->root->hierarchy_id, name_buf);
        }
        rcu_read_unlock();
-       up_read(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
        kfree(name_buf);
        return 0;
 }
        struct cgroup_subsys_state *css = seq_css(seq);
        struct cgrp_cset_link *link;
 
-       down_read(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
                struct css_set *cset = link->cset;
                struct task_struct *task;
        overflow:
                seq_puts(seq, "  ...\n");
        }
-       up_read(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
        return 0;
 }