pr_warning("cgroup: new mount options do not match the existing superblock, will be ignored\n");
                        }
                }
-       }
- 
-       kfree(opts.release_agent);
-       kfree(opts.name);
-       return dget(sb->s_root);
- 
-  rm_base_files:
-       free_cgrp_cset_links(&tmp_links);
-       cgroup_addrm_files(&root->top_cgroup, cgroup_base_files, false);
-       revert_creds(cred);
-  unlock_drop:
-       cgroup_exit_root_id(root);
-       mutex_unlock(&cgroup_root_mutex);
-       mutex_unlock(&cgroup_mutex);
-       mutex_unlock(&inode->i_mutex);
-  drop_new_super:
-       deactivate_locked_super(sb);
-  out_err:
-       kfree(opts.release_agent);
-       kfree(opts.name);
-       return ERR_PTR(ret);
- }
  
- static void cgroup_kill_sb(struct super_block *sb)
- {
-       struct cgroupfs_root *root = sb->s_fs_info;
-       struct cgroup *cgrp = &root->top_cgroup;
-       struct cgrp_cset_link *link, *tmp_link;
-       int ret;
+               /*
+                * A root's lifetime is governed by its root cgroup.  Zero
+                * ref indicate that the root is being destroyed.  Wait for
+                * destruction to complete so that the subsystems are free.
+                * We can use wait_queue for the wait but this path is
+                * super cold.  Let's just sleep for a bit and retry.
+                */
+               if (!atomic_inc_not_zero(&root->cgrp.refcnt)) {
+                       mutex_unlock(&cgroup_mutex);
+                       mutex_unlock(&cgroup_tree_mutex);
+                       kfree(opts.release_agent);
+                       kfree(opts.name);
+                       msleep(10);
+                       goto retry;
+               }
  
-       BUG_ON(!root);
- 
-       BUG_ON(root->number_of_cgroups != 1);
-       BUG_ON(!list_empty(&cgrp->children));
- 
-       mutex_lock(&cgrp->dentry->d_inode->i_mutex);
-       mutex_lock(&cgroup_mutex);
-       mutex_lock(&cgroup_root_mutex);
- 
-       /* Rebind all subsystems back to the default hierarchy */
-       if (root->flags & CGRP_ROOT_SUBSYS_BOUND) {
-               ret = rebind_subsystems(root, 0, root->subsys_mask);
-               /* Shouldn't be able to fail ... */
-               BUG_ON(ret);
-       }
+               ret = 0;
+               goto out_unlock;
+       }
  
        /*
-        * Release all the links from cset_links to this hierarchy's
-        * root cgroup
+        * No such thing, create a new one.  name= matching without subsys
+        * specification is allowed for already existing hierarchies but we
+        * can't create new one without subsys specification.
         */
-       write_lock(&css_set_lock);
- 
-       list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
-               list_del(&link->cset_link);
-               list_del(&link->cgrp_link);
-               kfree(link);
+       if (!opts.subsys_mask && !opts.none) {
+               ret = -EINVAL;
+               goto out_unlock;
        }
-       write_unlock(&css_set_lock);
  
-       if (!list_empty(&root->root_list)) {
-               list_del(&root->root_list);
-               cgroup_root_count--;
+       root = kzalloc(sizeof(*root), GFP_KERNEL);
+       if (!root) {
+               ret = -ENOMEM;
+               goto out_unlock;
        }
  
-       cgroup_exit_root_id(root);
+       init_cgroup_root(root, &opts);
+ 
+       ret = cgroup_setup_root(root, opts.subsys_mask);
+       if (ret)
+               cgroup_free_root(root);
  
-       mutex_unlock(&cgroup_root_mutex);
+ out_unlock:
        mutex_unlock(&cgroup_mutex);
-       mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
+       mutex_unlock(&cgroup_tree_mutex);
  
-       simple_xattrs_free(&cgrp->xattrs);
+       kfree(opts.release_agent);
+       kfree(opts.name);
  
-       kill_litter_super(sb);
-       cgroup_free_root(root);
+       if (ret)
+               return ERR_PTR(ret);
+ 
 -      dentry = kernfs_mount(fs_type, flags, root->kf_root);
++      dentry = kernfs_mount(fs_type, flags, root->kf_root, NULL);
+       if (IS_ERR(dentry))
+               cgroup_put(&root->cgrp);
+       return dentry;
+ }
+ 
+ static void cgroup_kill_sb(struct super_block *sb)
+ {
+       struct kernfs_root *kf_root = kernfs_root_from_sb(sb);
+       struct cgroup_root *root = cgroup_root_from_kf(kf_root);
+ 
+       cgroup_put(&root->cgrp);
+       kernfs_kill_sb(sb);
  }
  
  static struct file_system_type cgroup_fs_type = {
  
        init_css(css, ss, cgrp);
  
-       err = cgroup_populate_dir(cgrp, 1 << ss->subsys_id);
+       err = cgroup_populate_dir(cgrp, 1 << ss->id);
        if (err)
 -              goto err_free;
 +              goto err_free_percpu_ref;
  
        err = online_css(css);
        if (err)
 -              goto err_free;
 +              goto err_clear_dir;
  
-       dget(cgrp->dentry);
+       cgroup_get(cgrp);
        css_get(css->parent);
  
+       cgrp->subsys_mask |= 1 << ss->id;
+ 
        if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
            parent->parent) {
                pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
  
        return 0;
  
 -err_free:
 +err_clear_dir:
-       cgroup_clear_dir(css->cgroup, 1 << css->ss->subsys_id);
++      cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
 +err_free_percpu_ref:
        percpu_ref_cancel_init(&css->refcnt);
 +err_free_css:
        ss->css_free(css);
        return err;
  }
 
   */
  void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
  {
-       /*
-        * protects memcg_name and makes sure that parallel ooms do not
-        * interleave
-        */
+       /* oom_info_lock ensures that parallel ooms do not interleave */
 -      static DEFINE_SPINLOCK(oom_info_lock);
 +      static DEFINE_MUTEX(oom_info_lock);
-       struct cgroup *task_cgrp;
-       struct cgroup *mem_cgrp;
-       static char memcg_name[PATH_MAX];
-       int ret;
        struct mem_cgroup *iter;
        unsigned int i;
  
        if (!p)
                return;
  
 -      spin_lock(&oom_info_lock);
 +      mutex_lock(&oom_info_lock);
        rcu_read_lock();
  
-       mem_cgrp = memcg->css.cgroup;
-       task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
- 
-       ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
-       if (ret < 0) {
-               /*
-                * Unfortunately, we are unable to convert to a useful name
-                * But we'll still print out the usage information
-                */
-               rcu_read_unlock();
-               goto done;
-       }
-       rcu_read_unlock();
- 
-       pr_info("Task in %s killed", memcg_name);
+       pr_info("Task in ");
+       pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
+       pr_info(" killed as a result of limit of ");
+       pr_cont_cgroup_path(memcg->css.cgroup);
+       pr_info("\n");
  
-       rcu_read_lock();
-       ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
-       if (ret < 0) {
-               rcu_read_unlock();
-               goto done;
-       }
        rcu_read_unlock();
  
-       /*
-        * Continues from above, so we don't need an KERN_ level
-        */
-       pr_cont(" as a result of limit of %s\n", memcg_name);
- done:
- 
        pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n",
                res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
                res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,