]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
hv_balloon: Use kernel macros to simplify open coded sequences
authorMichael Kelley <mhklinux@outlook.com>
Fri, 3 May 2024 15:43:11 +0000 (08:43 -0700)
committerWei Liu <wei.liu@kernel.org>
Tue, 28 May 2024 05:26:01 +0000 (05:26 +0000)
Code sequences equivalent to ALIGN(), ALIGN_DOWN(), and umin() are
currently open coded. Change these to use the kernel macro to
improve code clarity. ALIGN() and ALIGN_DOWN() require the
alignment value to be a power of 2, which is the case here.

Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Michael Kelley <mhklinux@outlook.com>
Link: https://lore.kernel.org/r/20240503154312.142466-1-mhklinux@outlook.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
Message-ID: <20240503154312.142466-1-mhklinux@outlook.com>

drivers/hv/hv_balloon.c

index e000fa3b9f978cfc195babd258b1d17f8cb83995..9f45b8a6762c87bd82d9f324f27f68cd8f76f52f 100644 (file)
@@ -729,15 +729,8 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
 
                scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
                        has->ha_end_pfn +=  HA_CHUNK;
-
-                       if (total_pfn > HA_CHUNK) {
-                               processed_pfn = HA_CHUNK;
-                               total_pfn -= HA_CHUNK;
-                       } else {
-                               processed_pfn = total_pfn;
-                               total_pfn = 0;
-                       }
-
+                       processed_pfn = umin(total_pfn, HA_CHUNK);
+                       total_pfn -= processed_pfn;
                        has->covered_end_pfn +=  processed_pfn;
                }
 
@@ -800,7 +793,7 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
 {
        struct hv_hotadd_state *has;
        struct hv_hotadd_gap *gap;
-       unsigned long residual, new_inc;
+       unsigned long residual;
        int ret = 0;
 
        guard(spinlock_irqsave)(&dm_device.ha_lock);
@@ -836,15 +829,9 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
                 * our current limit; extend it.
                 */
                if ((start_pfn + pfn_cnt) > has->end_pfn) {
+                       /* Extend the region by multiples of HA_CHUNK */
                        residual = (start_pfn + pfn_cnt - has->end_pfn);
-                       /*
-                        * Extend the region by multiples of HA_CHUNK.
-                        */
-                       new_inc = (residual / HA_CHUNK) * HA_CHUNK;
-                       if (residual % HA_CHUNK)
-                               new_inc += HA_CHUNK;
-
-                       has->end_pfn += new_inc;
+                       has->end_pfn += ALIGN(residual, HA_CHUNK);
                }
 
                ret = 1;
@@ -915,9 +902,7 @@ static unsigned long handle_pg_range(unsigned long pg_start,
                         */
                        size = (has->end_pfn - has->ha_end_pfn);
                        if (pfn_cnt <= size) {
-                               size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
-                               if (pfn_cnt % HA_CHUNK)
-                                       size += HA_CHUNK;
+                               size = ALIGN(pfn_cnt, HA_CHUNK);
                        } else {
                                pfn_cnt = size;
                        }
@@ -1011,9 +996,6 @@ static void hot_add_req(struct work_struct *dummy)
        rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
 
        if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
-               unsigned long region_size;
-               unsigned long region_start;
-
                /*
                 * The host has not specified the hot-add region.
                 * Based on the hot-add page range being specified,
@@ -1021,14 +1003,8 @@ static void hot_add_req(struct work_struct *dummy)
                 * that need to be hot-added while ensuring the alignment
                 * and size requirements of Linux as it relates to hot-add.
                 */
-               region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
-               if (pfn_cnt % HA_CHUNK)
-                       region_size += HA_CHUNK;
-
-               region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
-
-               rg_start = region_start;
-               rg_sz = region_size;
+               rg_start = ALIGN_DOWN(pg_start, HA_CHUNK);
+               rg_sz = ALIGN(pfn_cnt, HA_CHUNK);
        }
 
        if (do_hot_add)