*/
 void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
 void cgroup_rstat_flush(struct cgroup *cgrp);
-void cgroup_rstat_flush_atomic(struct cgroup *cgrp);
 void cgroup_rstat_flush_hold(struct cgroup *cgrp);
 void cgroup_rstat_flush_release(void);
 
 
 __diag_pop();
 
 /* see cgroup_rstat_flush() */
-static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
+static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
        __releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock)
 {
        int cpu;
                }
                raw_spin_unlock_irqrestore(cpu_lock, flags);
 
-               /* if @may_sleep, play nice and yield if necessary */
-               if (may_sleep && (need_resched() ||
-                                 spin_needbreak(&cgroup_rstat_lock))) {
+               /* play nice and yield if necessary */
+               if (need_resched() || spin_needbreak(&cgroup_rstat_lock)) {
                        spin_unlock_irq(&cgroup_rstat_lock);
                        if (!cond_resched())
                                cpu_relax();
        might_sleep();
 
        spin_lock_irq(&cgroup_rstat_lock);
-       cgroup_rstat_flush_locked(cgrp, true);
+       cgroup_rstat_flush_locked(cgrp);
        spin_unlock_irq(&cgroup_rstat_lock);
 }
 
-/**
- * cgroup_rstat_flush_atomic- atomic version of cgroup_rstat_flush()
- * @cgrp: target cgroup
- *
- * This function can be called from any context.
- */
-void cgroup_rstat_flush_atomic(struct cgroup *cgrp)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&cgroup_rstat_lock, flags);
-       cgroup_rstat_flush_locked(cgrp, false);
-       spin_unlock_irqrestore(&cgroup_rstat_lock, flags);
-}
-
 /**
  * cgroup_rstat_flush_hold - flush stats in @cgrp's subtree and hold
  * @cgrp: target cgroup
 {
        might_sleep();
        spin_lock_irq(&cgroup_rstat_lock);
-       cgroup_rstat_flush_locked(cgrp, true);
+       cgroup_rstat_flush_locked(cgrp);
 }
 
 /**