]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
cpumask: replace cpumask_next_* with cpumask_first_* where appropriate
authorYury Norov <yury.norov@gmail.com>
Mon, 23 Aug 2021 23:59:57 +0000 (09:59 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 25 Aug 2021 23:34:50 +0000 (09:34 +1000)
cpumask_first() is a more effective analogue of 'next' version if n == -1
(which means start == 0).  This patch replaces 'next' with 'first' where
things look trivial.

There's no cpumask_first_zero() function, so create it.

Link: https://lkml.kernel.org/r/20210814211713.180533-10-yury.norov@gmail.com
Signed-off-by: Yury Norov <yury.norov@gmail.com>
Tested-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
Cc: Alexander Lobakin <alobakin@pm.me>
Cc: Alexey Klimov <aklimov@redhat.com>
Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Ulf Hansson <ulf.hansson@linaro.org>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
arch/powerpc/include/asm/cputhreads.h
block/blk-mq.c
drivers/net/virtio_net.c
drivers/soc/fsl/qbman/bman_portal.c
drivers/soc/fsl/qbman/qman_portal.c
include/linux/cpumask.h
kernel/time/clocksource.c

index b167186aaee4af09e4743142c71cd752892d3d1e..44286df21d2aefe394b2819ec15b92dc31663191 100644 (file)
@@ -52,7 +52,7 @@ static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
        for (i = 0; i < NR_CPUS; i += threads_per_core) {
                cpumask_shift_left(&tmp, &threads_core_mask, i);
                if (cpumask_intersects(threads, &tmp)) {
-                       cpu = cpumask_next_and(-1, &tmp, cpu_online_mask);
+                       cpu = cpumask_first_and(&tmp, cpu_online_mask);
                        if (cpu < nr_cpu_ids)
                                cpumask_set_cpu(cpu, &res);
                }
index 9d4fdc2be88a5e5eb7d995fc1592cbd932eed2f9..2bf57e0cce507eda6928fe031c642bc5dd9ed6a3 100644 (file)
@@ -2524,7 +2524,7 @@ static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
 static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
                struct blk_mq_hw_ctx *hctx)
 {
-       if (cpumask_next_and(-1, hctx->cpumask, cpu_online_mask) != cpu)
+       if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu)
                return false;
        if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
                return false;
index eee493685aad5d70b0824b5441c495cd63284518..4d04caca09bb04b8dd6edea77c855b2d6499aa99 100644 (file)
@@ -2091,7 +2091,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
        stragglers = num_cpu >= vi->curr_queue_pairs ?
                        num_cpu % vi->curr_queue_pairs :
                        0;
-       cpu = cpumask_next(-1, cpu_online_mask);
+       cpu = cpumask_first(cpu_online_mask);
 
        for (i = 0; i < vi->curr_queue_pairs; i++) {
                group_size = stride + (i < stragglers ? 1 : 0);
index acda8a5637c52066891aa3ab1f480a397cc8372a..4d7b9caee1c4719675191b8348099e410a756b3a 100644 (file)
@@ -155,7 +155,7 @@ static int bman_portal_probe(struct platform_device *pdev)
        }
 
        spin_lock(&bman_lock);
-       cpu = cpumask_next_zero(-1, &portal_cpus);
+       cpu = cpumask_first_zero(&portal_cpus);
        if (cpu >= nr_cpu_ids) {
                __bman_portals_probed = 1;
                /* unassigned portal, skip init */
index 96f74a1dc60355dcf01f33641cba79c66f393567..e23b60618c1a15aa7f89f655ded6bdbb732ae2a5 100644 (file)
@@ -248,7 +248,7 @@ static int qman_portal_probe(struct platform_device *pdev)
        pcfg->pools = qm_get_pools_sdqcr();
 
        spin_lock(&qman_lock);
-       cpu = cpumask_next_zero(-1, &portal_cpus);
+       cpu = cpumask_first_zero(&portal_cpus);
        if (cpu >= nr_cpu_ids) {
                __qman_portals_probed = 1;
                /* unassigned portal, skip init */
index 4a03f6636e6ceed65233f4e2eb86098ed0dd4fbf..f5883a8f28caaf643a9c5c261fe4ad7c5f3fe8c9 100644 (file)
@@ -123,6 +123,11 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
        return 0;
 }
 
+static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
+{
+       return 0;
+}
+
 static inline unsigned int cpumask_first_and(const struct cpumask *srcp1,
                                             const struct cpumask *srcp2)
 {
@@ -201,6 +206,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
        return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
 }
 
+/**
+ * cpumask_first_zero - get the first unset cpu in a cpumask
+ * @srcp: the cpumask pointer
+ *
+ * Returns >= nr_cpu_ids if all cpus are set.
+ */
+static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
+{
+       return find_first_zero_bit(cpumask_bits(srcp), nr_cpumask_bits);
+}
+
 /**
  * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
  * @src1p: the first input
index b89c76e1c02c4763453de19d2c5a3c8540bb01a6..fbbee6fca1ba81b147c8c2f8f183486d73f7e05e 100644 (file)
@@ -257,7 +257,7 @@ static void clocksource_verify_choose_cpus(void)
                return;
 
        /* Make sure to select at least one CPU other than the current CPU. */
-       cpu = cpumask_next(-1, cpu_online_mask);
+       cpu = cpumask_first(cpu_online_mask);
        if (cpu == smp_processor_id())
                cpu = cpumask_next(cpu, cpu_online_mask);
        if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
@@ -279,7 +279,7 @@ static void clocksource_verify_choose_cpus(void)
                cpu = prandom_u32() % nr_cpu_ids;
                cpu = cpumask_next(cpu - 1, cpu_online_mask);
                if (cpu >= nr_cpu_ids)
-                       cpu = cpumask_next(-1, cpu_online_mask);
+                       cpu = cpumask_first(cpu_online_mask);
                if (!WARN_ON_ONCE(cpu >= nr_cpu_ids))
                        cpumask_set_cpu(cpu, &cpus_chosen);
        }