this_cpu_write(*si->cluster_next_cpu, next);
 }
 
+static bool swap_offset_available_and_locked(struct swap_info_struct *si,
+                                            unsigned long offset)
+{
+       if (data_race(!si->swap_map[offset])) {
+               spin_lock(&si->lock);
+               return true;
+       }
+
+       if (vm_swap_full() && READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
+               spin_lock(&si->lock);
+               return true;
+       }
+
+       return false;
+}
+
 static int scan_swap_map_slots(struct swap_info_struct *si,
                               unsigned char usage, int nr,
                               swp_entry_t slots[])
 scan:
        spin_unlock(&si->lock);
        while (++offset <= READ_ONCE(si->highest_bit)) {
-               if (data_race(!si->swap_map[offset])) {
-                       spin_lock(&si->lock);
+               if (swap_offset_available_and_locked(si, offset))
                        goto checks;
-               }
-               if (vm_swap_full() &&
-                   READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
-                       spin_lock(&si->lock);
-                       goto checks;
-               }
                if (unlikely(--latency_ration < 0)) {
                        cond_resched();
                        latency_ration = LATENCY_LIMIT;
        }
        offset = si->lowest_bit;
        while (offset < scan_base) {
-               if (data_race(!si->swap_map[offset])) {
-                       spin_lock(&si->lock);
+               if (swap_offset_available_and_locked(si, offset))
                        goto checks;
-               }
-               if (vm_swap_full() &&
-                   READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
-                       spin_lock(&si->lock);
-                       goto checks;
-               }
                if (unlikely(--latency_ration < 0)) {
                        cond_resched();
                        latency_ration = LATENCY_LIMIT;