]> www.infradead.org Git - users/hch/xfs.git/commitdiff
workqueue: Add wq_online_cpumask
authorLai Jiangshan <jiangshan.ljs@antgroup.com>
Thu, 11 Jul 2024 08:35:41 +0000 (16:35 +0800)
committerTejun Heo <tj@kernel.org>
Thu, 11 Jul 2024 22:50:34 +0000 (12:50 -1000)
The new wq_online_mask mirrors the cpu_online_mask except during
hotplugging; specifically, it differs between the hotplugging stages
of workqueue_offline_cpu() and workqueue_online_cpu(), during which
the transitioning CPU is not represented in the mask.

Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
kernel/workqueue.c

index 4337910cc0348f8638981f1b290bb69bd4033b8b..e49afcb3d90a7d4a146763a54e5abfafa95c9c7b 100644 (file)
@@ -444,6 +444,9 @@ static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
 static LIST_HEAD(workqueues);          /* PR: list of all workqueues */
 static bool workqueue_freezing;                /* PL: have wqs started freezing? */
 
+/* PL: mirror the cpu_online_mask excluding the CPU in the midst of hotplugging */
+static cpumask_var_t wq_online_cpumask;
+
 /* PL&A: allowable cpus for unbound wqs and work items */
 static cpumask_var_t wq_unbound_cpumask;
 
@@ -6574,6 +6577,8 @@ int workqueue_online_cpu(unsigned int cpu)
 
        mutex_lock(&wq_pool_mutex);
 
+       cpumask_set_cpu(cpu, wq_online_cpumask);
+
        for_each_pool(pool, pi) {
                /* BH pools aren't affected by hotplug */
                if (pool->flags & POOL_BH)
@@ -6620,6 +6625,9 @@ int workqueue_offline_cpu(unsigned int cpu)
 
        /* update pod affinity of unbound workqueues */
        mutex_lock(&wq_pool_mutex);
+
+       cpumask_clear_cpu(cpu, wq_online_cpumask);
+
        list_for_each_entry(wq, &workqueues, list) {
                struct workqueue_attrs *attrs = wq->unbound_attrs;
 
@@ -7649,10 +7657,12 @@ void __init workqueue_init_early(void)
 
        BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
 
+       BUG_ON(!alloc_cpumask_var(&wq_online_cpumask, GFP_KERNEL));
        BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
        BUG_ON(!alloc_cpumask_var(&wq_requested_unbound_cpumask, GFP_KERNEL));
        BUG_ON(!zalloc_cpumask_var(&wq_isolated_cpumask, GFP_KERNEL));
 
+       cpumask_copy(wq_online_cpumask, cpu_online_mask);
        cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
        restrict_unbound_cpumask("HK_TYPE_WQ", housekeeping_cpumask(HK_TYPE_WQ));
        restrict_unbound_cpumask("HK_TYPE_DOMAIN", housekeeping_cpumask(HK_TYPE_DOMAIN));