]> www.infradead.org Git - users/hch/misc.git/commitdiff
cpuset: decouple tmpmasks and cpumasks freeing in cgroup
authorChen Ridong <chenridong@huawei.com>
Mon, 25 Aug 2025 03:23:50 +0000 (03:23 +0000)
committerTejun Heo <tj@kernel.org>
Mon, 25 Aug 2025 18:19:24 +0000 (08:19 -1000)
Currently, free_cpumasks() can free both tmpmasks and cpumasks of a cpuset
(cs). However, these two operations are not logically coupled. To improve
code clarity:
1. Move cpumask freeing to free_cpuset()
2. Rename free_cpumasks() to free_tmpmasks()

This change enforces the single responsibility principle.

Signed-off-by: Chen Ridong <chenridong@huawei.com>
Reviewed-by: Waiman Long <longman@redhat.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
kernel/cgroup/cpuset.c

index 3466ebbf10164ab5775d13479fa02138a5be8545..aebda14cc67ff3f550b28a75bc5b58dac0595a50 100644 (file)
@@ -459,23 +459,14 @@ free_one:
 }
 
 /**
- * free_cpumasks - free cpumasks in a tmpmasks structure
- * @cs:  the cpuset that have cpumasks to be free.
+ * free_tmpmasks - free cpumasks in a tmpmasks structure
  * @tmp: the tmpmasks structure pointer
  */
-static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
+static inline void free_tmpmasks(struct tmpmasks *tmp)
 {
-       if (cs) {
-               free_cpumask_var(cs->cpus_allowed);
-               free_cpumask_var(cs->effective_cpus);
-               free_cpumask_var(cs->effective_xcpus);
-               free_cpumask_var(cs->exclusive_cpus);
-       }
-       if (tmp) {
-               free_cpumask_var(tmp->new_cpus);
-               free_cpumask_var(tmp->addmask);
-               free_cpumask_var(tmp->delmask);
-       }
+       free_cpumask_var(tmp->new_cpus);
+       free_cpumask_var(tmp->addmask);
+       free_cpumask_var(tmp->delmask);
 }
 
 /**
@@ -508,7 +499,10 @@ static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
  */
 static inline void free_cpuset(struct cpuset *cs)
 {
-       free_cpumasks(cs, NULL);
+       free_cpumask_var(cs->cpus_allowed);
+       free_cpumask_var(cs->effective_cpus);
+       free_cpumask_var(cs->effective_xcpus);
+       free_cpumask_var(cs->exclusive_cpus);
        kfree(cs);
 }
 
@@ -2427,7 +2421,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
        if (cs->partition_root_state)
                update_partition_sd_lb(cs, old_prs);
 out_free:
-       free_cpumasks(NULL, &tmp);
+       free_tmpmasks(&tmp);
        return retval;
 }
 
@@ -2530,7 +2524,7 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
        if (cs->partition_root_state)
                update_partition_sd_lb(cs, old_prs);
 
-       free_cpumasks(NULL, &tmp);
+       free_tmpmasks(&tmp);
        return 0;
 }
 
@@ -2983,7 +2977,7 @@ out:
        notify_partition_change(cs, old_prs);
        if (force_sd_rebuild)
                rebuild_sched_domains_locked();
-       free_cpumasks(NULL, &tmpmask);
+       free_tmpmasks(&tmpmask);
        return 0;
 }
 
@@ -4006,7 +4000,7 @@ static void cpuset_handle_hotplug(void)
        if (force_sd_rebuild)
                rebuild_sched_domains_cpuslocked();
 
-       free_cpumasks(NULL, ptmp);
+       free_tmpmasks(ptmp);
 }
 
 void cpuset_update_active_cpus(void)