root_task_group.shares = ROOT_TASK_GROUP_LOAD;
                init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
 #endif /* CONFIG_FAIR_GROUP_SCHED */
+#ifdef CONFIG_EXT_GROUP_SCHED
+               root_task_group.scx_weight = CGROUP_WEIGHT_DFL;
+#endif /* CONFIG_EXT_GROUP_SCHED */
 #ifdef CONFIG_RT_GROUP_SCHED
                root_task_group.rt_se = (struct sched_rt_entity **)ptr;
                ptr += nr_cpu_ids * sizeof(void **);
        if (!alloc_rt_sched_group(tg, parent))
                goto err;
 
+       scx_group_set_weight(tg, CGROUP_WEIGHT_DFL);
        alloc_uclamp_sched_group(tg, parent);
 
        return tg;
                put_prev_task(rq, tsk);
 
        sched_change_group(tsk, group);
+       scx_move_task(tsk);
 
        if (queued)
                enqueue_task(rq, tsk, queue_flags);
 {
        struct task_group *tg = css_tg(css);
        struct task_group *parent = css_tg(css->parent);
+       int ret;
+
+       ret = scx_tg_online(tg);
+       if (ret)
+               return ret;
 
        if (parent)
                sched_online_group(tg, parent);
        return 0;
 }
 
+static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
+{
+       struct task_group *tg = css_tg(css);
+
+       scx_tg_offline(tg);
+}
+
 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
 {
        struct task_group *tg = css_tg(css);
        sched_unregister_group(tg);
 }
 
-#ifdef CONFIG_RT_GROUP_SCHED
 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
 {
+#ifdef CONFIG_RT_GROUP_SCHED
        struct task_struct *task;
        struct cgroup_subsys_state *css;
 
                if (!sched_rt_can_attach(css_tg(css), task))
                        return -EINVAL;
        }
-       return 0;
-}
 #endif
+       return scx_cgroup_can_attach(tset);
+}
 
 static void cpu_cgroup_attach(struct cgroup_taskset *tset)
 {
 
        cgroup_taskset_for_each(task, css, tset)
                sched_move_task(task);
+
+       scx_cgroup_finish_attach();
+}
+
+static void cpu_cgroup_cancel_attach(struct cgroup_taskset *tset)
+{
+       scx_cgroup_cancel_attach(tset);
 }
 
 #ifdef CONFIG_UCLAMP_TASK_GROUP
 #ifdef CONFIG_GROUP_SCHED_WEIGHT
 static unsigned long tg_weight(struct task_group *tg)
 {
+#ifdef CONFIG_FAIR_GROUP_SCHED
        return scale_load_down(tg->shares);
+#else
+       return sched_weight_from_cgroup(tg->scx_weight);
+#endif
 }
 
 static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
                                struct cftype *cftype, u64 shareval)
 {
+       int ret;
+
        if (shareval > scale_load_down(ULONG_MAX))
                shareval = MAX_SHARES;
-       return sched_group_set_shares(css_tg(css), scale_load(shareval));
+       ret = sched_group_set_shares(css_tg(css), scale_load(shareval));
+       if (!ret)
+               scx_group_set_weight(css_tg(css),
+                                    sched_weight_to_cgroup(shareval));
+       return ret;
 }
 
 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
 static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
                                struct cftype *cft, s64 idle)
 {
-       return sched_group_set_idle(css_tg(css), idle);
+       int ret;
+
+       ret = sched_group_set_idle(css_tg(css), idle);
+       if (!ret)
+               scx_group_set_idle(css_tg(css), idle);
+       return ret;
 }
 #endif
 
                                struct cftype *cft, u64 cgrp_weight)
 {
        unsigned long weight;
+       int ret;
 
        if (cgrp_weight < CGROUP_WEIGHT_MIN || cgrp_weight > CGROUP_WEIGHT_MAX)
                return -ERANGE;
 
        weight = sched_weight_from_cgroup(cgrp_weight);
 
-       return sched_group_set_shares(css_tg(css), scale_load(weight));
+       ret = sched_group_set_shares(css_tg(css), scale_load(weight));
+       if (!ret)
+               scx_group_set_weight(css_tg(css), cgrp_weight);
+       return ret;
 }
 
 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
                                     struct cftype *cft, s64 nice)
 {
        unsigned long weight;
-       int idx;
+       int idx, ret;
 
        if (nice < MIN_NICE || nice > MAX_NICE)
                return -ERANGE;
        idx = array_index_nospec(idx, 40);
        weight = sched_prio_to_weight[idx];
 
-       return sched_group_set_shares(css_tg(css), scale_load(weight));
+       ret = sched_group_set_shares(css_tg(css), scale_load(weight));
+       if (!ret)
+               scx_group_set_weight(css_tg(css),
+                                    sched_weight_to_cgroup(weight));
+       return ret;
 }
 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
 
 struct cgroup_subsys cpu_cgrp_subsys = {
        .css_alloc      = cpu_cgroup_css_alloc,
        .css_online     = cpu_cgroup_css_online,
+       .css_offline    = cpu_cgroup_css_offline,
        .css_released   = cpu_cgroup_css_released,
        .css_free       = cpu_cgroup_css_free,
        .css_extra_stat_show = cpu_extra_stat_show,
        .css_local_stat_show = cpu_local_stat_show,
-#ifdef CONFIG_RT_GROUP_SCHED
        .can_attach     = cpu_cgroup_can_attach,
-#endif
        .attach         = cpu_cgroup_attach,
+       .cancel_attach  = cpu_cgroup_cancel_attach,
        .legacy_cftypes = cpu_legacy_files,
        .dfl_cftypes    = cpu_files,
        .early_init     = true,
 
         */
        SCX_OPS_SWITCH_PARTIAL  = 1LLU << 3,
 
+       /*
+        * CPU cgroup support flags
+        */
+       SCX_OPS_HAS_CGROUP_WEIGHT = 1LLU << 16, /* cpu.weight */
+
        SCX_OPS_ALL_FLAGS       = SCX_OPS_KEEP_BUILTIN_IDLE |
                                  SCX_OPS_ENQ_LAST |
                                  SCX_OPS_ENQ_EXITING |
-                                 SCX_OPS_SWITCH_PARTIAL,
+                                 SCX_OPS_SWITCH_PARTIAL |
+                                 SCX_OPS_HAS_CGROUP_WEIGHT,
 };
 
 /* argument container for ops.init_task() */
         * to the scheduler transition path.
         */
        bool                    fork;
+#ifdef CONFIG_EXT_GROUP_SCHED
+       /* the cgroup the task is joining */
+       struct cgroup           *cgroup;
+#endif
 };
 
 /* argument container for ops.exit_task() */
        bool cancelled;
 };
 
+/* argument container for ops->cgroup_init() */
+struct scx_cgroup_init_args {
+       /* the weight of the cgroup [1..10000] */
+       u32                     weight;
+};
+
 enum scx_cpu_preempt_reason {
        /* next task is being scheduled by &sched_class_rt */
        SCX_CPU_PREEMPT_RT,
         */
        void (*dump_task)(struct scx_dump_ctx *ctx, struct task_struct *p);
 
+#ifdef CONFIG_EXT_GROUP_SCHED
+       /**
+        * cgroup_init - Initialize a cgroup
+        * @cgrp: cgroup being initialized
+        * @args: init arguments, see the struct definition
+        *
+        * Either the BPF scheduler is being loaded or @cgrp created, initialize
+        * @cgrp for sched_ext. This operation may block.
+        *
+        * Return 0 for success, -errno for failure. An error return while
+        * loading will abort loading of the BPF scheduler. During cgroup
+        * creation, it will abort the specific cgroup creation.
+        */
+       s32 (*cgroup_init)(struct cgroup *cgrp,
+                          struct scx_cgroup_init_args *args);
+
+       /**
+        * cgroup_exit - Exit a cgroup
+        * @cgrp: cgroup being exited
+        *
+        * Either the BPF scheduler is being unloaded or @cgrp destroyed, exit
+        * @cgrp for sched_ext. This operation my block.
+        */
+       void (*cgroup_exit)(struct cgroup *cgrp);
+
+       /**
+        * cgroup_prep_move - Prepare a task to be moved to a different cgroup
+        * @p: task being moved
+        * @from: cgroup @p is being moved from
+        * @to: cgroup @p is being moved to
+        *
+        * Prepare @p for move from cgroup @from to @to. This operation may
+        * block and can be used for allocations.
+        *
+        * Return 0 for success, -errno for failure. An error return aborts the
+        * migration.
+        */
+       s32 (*cgroup_prep_move)(struct task_struct *p,
+                               struct cgroup *from, struct cgroup *to);
+
+       /**
+        * cgroup_move - Commit cgroup move
+        * @p: task being moved
+        * @from: cgroup @p is being moved from
+        * @to: cgroup @p is being moved to
+        *
+        * Commit the move. @p is dequeued during this operation.
+        */
+       void (*cgroup_move)(struct task_struct *p,
+                           struct cgroup *from, struct cgroup *to);
+
+       /**
+        * cgroup_cancel_move - Cancel cgroup move
+        * @p: task whose cgroup move is being canceled
+        * @from: cgroup @p was being moved from
+        * @to: cgroup @p was being moved to
+        *
+        * @p was cgroup_prep_move()'d but failed before reaching cgroup_move().
+        * Undo the preparation.
+        */
+       void (*cgroup_cancel_move)(struct task_struct *p,
+                                  struct cgroup *from, struct cgroup *to);
+
+       /**
+        * cgroup_set_weight - A cgroup's weight is being changed
+        * @cgrp: cgroup whose weight is being updated
+        * @weight: new weight [1..10000]
+        *
+        * Update @tg's weight to @weight.
+        */
+       void (*cgroup_set_weight)(struct cgroup *cgrp, u32 weight);
+#endif /* CONFIG_CGROUPS */
+
        /*
         * All online ops must come before ops.cpu_online().
         */
        SCX_KICK_WAIT           = 1LLU << 2,
 };
 
+enum scx_tg_flags {
+       SCX_TG_ONLINE           = 1U << 0,
+       SCX_TG_INITED           = 1U << 1,
+};
+
 enum scx_ops_enable_state {
        SCX_OPS_PREPPING,
        SCX_OPS_ENABLING,
                resched_curr(rq);
 }
 
+#ifdef CONFIG_EXT_GROUP_SCHED
+static struct cgroup *tg_cgrp(struct task_group *tg)
+{
+       /*
+        * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup,
+        * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the
+        * root cgroup.
+        */
+       if (tg && tg->css.cgroup)
+               return tg->css.cgroup;
+       else
+               return &cgrp_dfl_root.cgrp;
+}
+
+#define SCX_INIT_TASK_ARGS_CGROUP(tg)          .cgroup = tg_cgrp(tg),
+
+#else  /* CONFIG_EXT_GROUP_SCHED */
+
+#define SCX_INIT_TASK_ARGS_CGROUP(tg)
+
+#endif /* CONFIG_EXT_GROUP_SCHED */
+
 static enum scx_task_state scx_get_task_state(const struct task_struct *p)
 {
        return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT;
 
        if (SCX_HAS_OP(init_task)) {
                struct scx_init_task_args args = {
+                       SCX_INIT_TASK_ARGS_CGROUP(tg)
                        .fork = fork,
                };
 
        scx_set_task_state(p, SCX_TASK_ENABLED);
 
        if (SCX_HAS_OP(set_weight))
-               SCX_CALL_OP(SCX_KF_REST, set_weight, p, p->scx.weight);
+               SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
 }
 
 static void scx_ops_disable_task(struct task_struct *p)
 }
 #endif
 
+#ifdef CONFIG_EXT_GROUP_SCHED
+
+DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem);
+static bool cgroup_warned_missing_weight;
+static bool cgroup_warned_missing_idle;
+
+static void scx_cgroup_warn_missing_weight(struct task_group *tg)
+{
+       if (scx_ops_enable_state() == SCX_OPS_DISABLED ||
+           cgroup_warned_missing_weight)
+               return;
+
+       if ((scx_ops.flags & SCX_OPS_HAS_CGROUP_WEIGHT) || !tg->css.parent)
+               return;
+
+       pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.weight\n",
+               scx_ops.name);
+       cgroup_warned_missing_weight = true;
+}
+
+static void scx_cgroup_warn_missing_idle(struct task_group *tg)
+{
+       if (scx_ops_enable_state() == SCX_OPS_DISABLED ||
+           cgroup_warned_missing_idle)
+               return;
+
+       if (!tg->idle)
+               return;
+
+       pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.idle\n",
+               scx_ops.name);
+       cgroup_warned_missing_idle = true;
+}
+
+int scx_tg_online(struct task_group *tg)
+{
+       int ret = 0;
+
+       WARN_ON_ONCE(tg->scx_flags & (SCX_TG_ONLINE | SCX_TG_INITED));
+
+       percpu_down_read(&scx_cgroup_rwsem);
+
+       scx_cgroup_warn_missing_weight(tg);
+
+       if (SCX_HAS_OP(cgroup_init)) {
+               struct scx_cgroup_init_args args = { .weight = tg->scx_weight };
+
+               ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init,
+                                     tg->css.cgroup, &args);
+               if (!ret)
+                       tg->scx_flags |= SCX_TG_ONLINE | SCX_TG_INITED;
+               else
+                       ret = ops_sanitize_err("cgroup_init", ret);
+       } else {
+               tg->scx_flags |= SCX_TG_ONLINE;
+       }
+
+       percpu_up_read(&scx_cgroup_rwsem);
+       return ret;
+}
+
+void scx_tg_offline(struct task_group *tg)
+{
+       WARN_ON_ONCE(!(tg->scx_flags & SCX_TG_ONLINE));
+
+       percpu_down_read(&scx_cgroup_rwsem);
+
+       if (SCX_HAS_OP(cgroup_exit) && (tg->scx_flags & SCX_TG_INITED))
+               SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, tg->css.cgroup);
+       tg->scx_flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED);
+
+       percpu_up_read(&scx_cgroup_rwsem);
+}
+
+int scx_cgroup_can_attach(struct cgroup_taskset *tset)
+{
+       struct cgroup_subsys_state *css;
+       struct task_struct *p;
+       int ret;
+
+       /* released in scx_finish/cancel_attach() */
+       percpu_down_read(&scx_cgroup_rwsem);
+
+       if (!scx_enabled())
+               return 0;
+
+       cgroup_taskset_for_each(p, css, tset) {
+               struct cgroup *from = tg_cgrp(task_group(p));
+               struct cgroup *to = tg_cgrp(css_tg(css));
+
+               WARN_ON_ONCE(p->scx.cgrp_moving_from);
+
+               /*
+                * sched_move_task() omits identity migrations. Let's match the
+                * behavior so that ops.cgroup_prep_move() and ops.cgroup_move()
+                * always match one-to-one.
+                */
+               if (from == to)
+                       continue;
+
+               if (SCX_HAS_OP(cgroup_prep_move)) {
+                       ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_prep_move,
+                                             p, from, css->cgroup);
+                       if (ret)
+                               goto err;
+               }
+
+               p->scx.cgrp_moving_from = from;
+       }
+
+       return 0;
+
+err:
+       cgroup_taskset_for_each(p, css, tset) {
+               if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from)
+                       SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p,
+                                   p->scx.cgrp_moving_from, css->cgroup);
+               p->scx.cgrp_moving_from = NULL;
+       }
+
+       percpu_up_read(&scx_cgroup_rwsem);
+       return ops_sanitize_err("cgroup_prep_move", ret);
+}
+
+void scx_move_task(struct task_struct *p)
+{
+       if (!scx_enabled())
+               return;
+
+       /*
+        * We're called from sched_move_task() which handles both cgroup and
+        * autogroup moves. Ignore the latter.
+        *
+        * Also ignore exiting tasks, because in the exit path tasks transition
+        * from the autogroup to the root group, so task_group_is_autogroup()
+        * alone isn't able to catch exiting autogroup tasks. This is safe for
+        * cgroup_move(), because cgroup migrations never happen for PF_EXITING
+        * tasks.
+        */
+       if (task_group_is_autogroup(task_group(p)) || (p->flags & PF_EXITING))
+               return;
+
+       /*
+        * @p must have ops.cgroup_prep_move() called on it and thus
+        * cgrp_moving_from set.
+        */
+       if (SCX_HAS_OP(cgroup_move) && !WARN_ON_ONCE(!p->scx.cgrp_moving_from))
+               SCX_CALL_OP_TASK(SCX_KF_UNLOCKED, cgroup_move, p,
+                       p->scx.cgrp_moving_from, tg_cgrp(task_group(p)));
+       p->scx.cgrp_moving_from = NULL;
+}
+
+void scx_cgroup_finish_attach(void)
+{
+       percpu_up_read(&scx_cgroup_rwsem);
+}
+
+void scx_cgroup_cancel_attach(struct cgroup_taskset *tset)
+{
+       struct cgroup_subsys_state *css;
+       struct task_struct *p;
+
+       if (!scx_enabled())
+               goto out_unlock;
+
+       cgroup_taskset_for_each(p, css, tset) {
+               if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from)
+                       SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p,
+                                   p->scx.cgrp_moving_from, css->cgroup);
+               p->scx.cgrp_moving_from = NULL;
+       }
+out_unlock:
+       percpu_up_read(&scx_cgroup_rwsem);
+}
+
+void scx_group_set_weight(struct task_group *tg, unsigned long weight)
+{
+       percpu_down_read(&scx_cgroup_rwsem);
+
+       if (tg->scx_weight != weight) {
+               if (SCX_HAS_OP(cgroup_set_weight))
+                       SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_set_weight,
+                                   tg_cgrp(tg), weight);
+               tg->scx_weight = weight;
+       }
+
+       percpu_up_read(&scx_cgroup_rwsem);
+}
+
+void scx_group_set_idle(struct task_group *tg, bool idle)
+{
+       percpu_down_read(&scx_cgroup_rwsem);
+       scx_cgroup_warn_missing_idle(tg);
+       percpu_up_read(&scx_cgroup_rwsem);
+}
+
+static void scx_cgroup_lock(void)
+{
+       percpu_down_write(&scx_cgroup_rwsem);
+}
+
+static void scx_cgroup_unlock(void)
+{
+       percpu_up_write(&scx_cgroup_rwsem);
+}
+
+#else  /* CONFIG_EXT_GROUP_SCHED */
+
+static inline void scx_cgroup_lock(void) {}
+static inline void scx_cgroup_unlock(void) {}
+
+#endif /* CONFIG_EXT_GROUP_SCHED */
+
 /*
  * Omitted operations:
  *
        rcu_read_unlock();
 }
 
+#ifdef CONFIG_EXT_GROUP_SCHED
+static void scx_cgroup_exit(void)
+{
+       struct cgroup_subsys_state *css;
+
+       percpu_rwsem_assert_held(&scx_cgroup_rwsem);
+
+       /*
+        * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk
+        * cgroups and exit all the inited ones, all online cgroups are exited.
+        */
+       rcu_read_lock();
+       css_for_each_descendant_post(css, &root_task_group.css) {
+               struct task_group *tg = css_tg(css);
+
+               if (!(tg->scx_flags & SCX_TG_INITED))
+                       continue;
+               tg->scx_flags &= ~SCX_TG_INITED;
+
+               if (!scx_ops.cgroup_exit)
+                       continue;
+
+               if (WARN_ON_ONCE(!css_tryget(css)))
+                       continue;
+               rcu_read_unlock();
+
+               SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, css->cgroup);
+
+               rcu_read_lock();
+               css_put(css);
+       }
+       rcu_read_unlock();
+}
+
+static int scx_cgroup_init(void)
+{
+       struct cgroup_subsys_state *css;
+       int ret;
+
+       percpu_rwsem_assert_held(&scx_cgroup_rwsem);
+
+       cgroup_warned_missing_weight = false;
+       cgroup_warned_missing_idle = false;
+
+       /*
+        * scx_tg_on/offline() are excluded thorugh scx_cgroup_rwsem. If we walk
+        * cgroups and init, all online cgroups are initialized.
+        */
+       rcu_read_lock();
+       css_for_each_descendant_pre(css, &root_task_group.css) {
+               struct task_group *tg = css_tg(css);
+               struct scx_cgroup_init_args args = { .weight = tg->scx_weight };
+
+               scx_cgroup_warn_missing_weight(tg);
+               scx_cgroup_warn_missing_idle(tg);
+
+               if ((tg->scx_flags &
+                    (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE)
+                       continue;
+
+               if (!scx_ops.cgroup_init) {
+                       tg->scx_flags |= SCX_TG_INITED;
+                       continue;
+               }
+
+               if (WARN_ON_ONCE(!css_tryget(css)))
+                       continue;
+               rcu_read_unlock();
+
+               ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init,
+                                     css->cgroup, &args);
+               if (ret) {
+                       css_put(css);
+                       return ret;
+               }
+               tg->scx_flags |= SCX_TG_INITED;
+
+               rcu_read_lock();
+               css_put(css);
+       }
+       rcu_read_unlock();
+
+       return 0;
+}
+
+#else
+static void scx_cgroup_exit(void) {}
+static int scx_cgroup_init(void) { return 0; }
+#endif
+
 
 /********************************************************************************
  * Sysfs interface and ops enable/disable.
        WRITE_ONCE(scx_switching_all, false);
 
        /*
-        * Avoid racing against fork. See scx_ops_enable() for explanation on
-        * the locking order.
+        * Avoid racing against fork and cgroup changes. See scx_ops_enable()
+        * for explanation on the locking order.
         */
        percpu_down_write(&scx_fork_rwsem);
        cpus_read_lock();
+       scx_cgroup_lock();
 
        spin_lock_irq(&scx_tasks_lock);
        scx_task_iter_init(&sti);
        static_branch_disable_cpuslocked(&scx_builtin_idle_enabled);
        synchronize_rcu();
 
+       scx_cgroup_exit();
+
+       scx_cgroup_unlock();
        cpus_read_unlock();
        percpu_up_write(&scx_fork_rwsem);
 
                           scx_watchdog_timeout / 2);
 
        /*
-        * Lock out forks before opening the floodgate so that they don't wander
-        * into the operations prematurely.
+        * Lock out forks, cgroup on/offlining and moves before opening the
+        * floodgate so that they don't wander into the operations prematurely.
+        *
+        * We don't need to keep the CPUs stable but static_branch_*() requires
+        * cpus_read_lock() and scx_cgroup_rwsem must nest inside
+        * cpu_hotplug_lock because of the following dependency chain:
+        *
+        *   cpu_hotplug_lock --> cgroup_threadgroup_rwsem --> scx_cgroup_rwsem
         *
-        * We don't need to keep the CPUs stable but grab cpus_read_lock() to
-        * ease future locking changes for cgroup suport.
+        * So, we need to do cpus_read_lock() before scx_cgroup_lock() and use
+        * static_branch_*_cpuslocked().
         *
         * Note that cpu_hotplug_lock must nest inside scx_fork_rwsem due to the
         * following dependency chain:
         */
        percpu_down_write(&scx_fork_rwsem);
        cpus_read_lock();
+       scx_cgroup_lock();
 
        check_hotplug_seq(ops);
 
                static_branch_disable_cpuslocked(&scx_builtin_idle_enabled);
        }
 
+       /*
+        * All cgroups should be initialized before letting in tasks. cgroup
+        * on/offlining and task migrations are already locked out.
+        */
+       ret = scx_cgroup_init();
+       if (ret)
+               goto err_disable_unlock_all;
+
        static_branch_enable_cpuslocked(&__scx_ops_enabled);
 
        /*
 
        spin_unlock_irq(&scx_tasks_lock);
        preempt_enable();
+       scx_cgroup_unlock();
        cpus_read_unlock();
        percpu_up_write(&scx_fork_rwsem);
 
        return ret;
 
 err_disable_unlock_all:
+       scx_cgroup_unlock();
        percpu_up_write(&scx_fork_rwsem);
 err_disable_unlock_cpus:
        cpus_read_unlock();
 
        switch (moff) {
        case offsetof(struct sched_ext_ops, init_task):
+#ifdef CONFIG_EXT_GROUP_SCHED
+       case offsetof(struct sched_ext_ops, cgroup_init):
+       case offsetof(struct sched_ext_ops, cgroup_exit):
+       case offsetof(struct sched_ext_ops, cgroup_prep_move):
+#endif
        case offsetof(struct sched_ext_ops, cpu_online):
        case offsetof(struct sched_ext_ops, cpu_offline):
        case offsetof(struct sched_ext_ops, init):
 static void exit_task_stub(struct task_struct *p, struct scx_exit_task_args *args) {}
 static void enable_stub(struct task_struct *p) {}
 static void disable_stub(struct task_struct *p) {}
+#ifdef CONFIG_EXT_GROUP_SCHED
+static s32 cgroup_init_stub(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; }
+static void cgroup_exit_stub(struct cgroup *cgrp) {}
+static s32 cgroup_prep_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; }
+static void cgroup_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
+static void cgroup_cancel_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
+static void cgroup_set_weight_stub(struct cgroup *cgrp, u32 weight) {}
+#endif
 static void cpu_online_stub(s32 cpu) {}
 static void cpu_offline_stub(s32 cpu) {}
 static s32 init_stub(void) { return -EINVAL; }
        .exit_task = exit_task_stub,
        .enable = enable_stub,
        .disable = disable_stub,
+#ifdef CONFIG_EXT_GROUP_SCHED
+       .cgroup_init = cgroup_init_stub,
+       .cgroup_exit = cgroup_exit_stub,
+       .cgroup_prep_move = cgroup_prep_move_stub,
+       .cgroup_move = cgroup_move_stub,
+       .cgroup_cancel_move = cgroup_cancel_move_stub,
+       .cgroup_set_weight = cgroup_set_weight_stub,
+#endif
        .cpu_online = cpu_online_stub,
        .cpu_offline = cpu_offline_stub,
        .init = init_stub,
         * definitions so that BPF scheduler implementations can use them
         * through the generated vmlinux.h.
         */
-       WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT);
+       WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT |
+                  SCX_TG_ONLINE);
 
        BUG_ON(rhashtable_init(&dsq_hash, &dsq_hash_params));
        init_dsq(&scx_dsq_global, SCX_DSQ_GLOBAL);
        return cpu_rq(cpu);
 }
 
+/**
+ * scx_bpf_task_cgroup - Return the sched cgroup of a task
+ * @p: task of interest
+ *
+ * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with
+ * from the scheduler's POV. SCX operations should use this function to
+ * determine @p's current cgroup as, unlike following @p->cgroups,
+ * @p->sched_task_group is protected by @p's rq lock and thus atomic w.r.t. all
+ * rq-locked operations. Can be called on the parameter tasks of rq-locked
+ * operations. The restriction guarantees that @p's rq is locked by the caller.
+ */
+#ifdef CONFIG_CGROUP_SCHED
+__bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p)
+{
+       struct task_group *tg = p->sched_task_group;
+       struct cgroup *cgrp = &cgrp_dfl_root.cgrp;
+
+       if (!scx_kf_allowed_on_arg_tasks(__SCX_KF_RQ_LOCKED, p))
+               goto out;
+
+       /*
+        * A task_group may either be a cgroup or an autogroup. In the latter
+        * case, @tg->css.cgroup is %NULL. A task_group can't become the other
+        * kind once created.
+        */
+       if (tg && tg->css.cgroup)
+               cgrp = tg->css.cgroup;
+       else
+               cgrp = &cgrp_dfl_root.cgrp;
+out:
+       cgroup_get(cgrp);
+       return cgrp;
+}
+#endif
+
 __bpf_kfunc_end_defs();
 
 BTF_KFUNCS_START(scx_kfunc_ids_any)
 BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU)
 BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU)
 BTF_ID_FLAGS(func, scx_bpf_cpu_rq)
+#ifdef CONFIG_CGROUP_SCHED
+BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE)
+#endif
 BTF_KFUNCS_END(scx_kfunc_ids_any)
 
 static const struct btf_kfunc_id_set scx_kfunc_set_any = {