static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
                             unsigned int nr_entries);
 static bool folio_swapcache_freeable(struct folio *folio);
-static struct swap_cluster_info *lock_cluster_or_swap_info(
-               struct swap_info_struct *si, unsigned long offset);
-static void unlock_cluster_or_swap_info(struct swap_info_struct *si,
-                                       struct swap_cluster_info *ci);
+static struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
+                                             unsigned long offset);
+static void unlock_cluster(struct swap_cluster_info *ci);
 
 static DEFINE_SPINLOCK(swap_lock);
 static unsigned int nr_swapfiles;
         * swap_map is HAS_CACHE only, which means the slots have no page table
         * reference or pending writeback, and can't be allocated to others.
         */
-       ci = lock_cluster_or_swap_info(si, offset);
+       ci = lock_cluster(si, offset);
        need_reclaim = swap_is_has_cache(si, offset, nr_pages);
-       unlock_cluster_or_swap_info(si, ci);
+       unlock_cluster(ci);
        if (!need_reclaim)
                goto out_unlock;
 
 {
        struct swap_cluster_info *ci;
 
-       ci = si->cluster_info;
-       if (ci) {
-               ci += offset / SWAPFILE_CLUSTER;
-               spin_lock(&ci->lock);
-       }
-       return ci;
-}
-
-static inline void unlock_cluster(struct swap_cluster_info *ci)
-{
-       if (ci)
-               spin_unlock(&ci->lock);
-}
-
-/*
- * Determine the locking method in use for this device.  Return
- * swap_cluster_info if SSD-style cluster-based locking is in place.
- */
-static inline struct swap_cluster_info *lock_cluster_or_swap_info(
-               struct swap_info_struct *si, unsigned long offset)
-{
-       struct swap_cluster_info *ci;
-
-       /* Try to use fine-grained SSD-style locking if available: */
-       ci = lock_cluster(si, offset);
-       /* Otherwise, fall back to traditional, coarse locking: */
-       if (!ci)
-               spin_lock(&si->lock);
+       ci = &si->cluster_info[offset / SWAPFILE_CLUSTER];
+       spin_lock(&ci->lock);
 
        return ci;
 }
 
-static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si,
-                                              struct swap_cluster_info *ci)
+static inline void unlock_cluster(struct swap_cluster_info *ci)
 {
-       if (ci)
-               unlock_cluster(ci);
-       else
-               spin_unlock(&si->lock);
+       spin_unlock(&ci->lock);
 }
 
 /* Add a cluster to discard list and schedule it to do discard */
        unsigned long idx = page_nr / SWAPFILE_CLUSTER;
        struct swap_cluster_info *ci;
 
-       if (!cluster_info)
-               return;
-
        ci = cluster_info + idx;
        ci->count++;
 
 static void dec_cluster_info_page(struct swap_info_struct *si,
                                  struct swap_cluster_info *ci, int nr_pages)
 {
-       if (!si->cluster_info)
-               return;
-
        VM_BUG_ON(ci->count < nr_pages);
        VM_BUG_ON(cluster_is_free(ci));
        lockdep_assert_held(&si->lock);
                si->highest_bit = 0;
                del_from_avail_list(si);
 
-               if (si->cluster_info && vm_swap_full())
+               if (vm_swap_full())
                        schedule_work(&si->reclaim_work);
        }
 }
 {
        int n_ret = 0;
 
-       VM_BUG_ON(!si->cluster_info);
-
        si->flags += SWP_SCANNING;
 
        while (n_ret < nr) {
                }
 
                /*
-                * Swapfile is not block device or not using clusters so unable
+                * Swapfile is not block device so unable
                 * to allocate large entries.
                 */
-               if (!(si->flags & SWP_BLKDEV) || !si->cluster_info)
+               if (!(si->flags & SWP_BLKDEV))
                        return 0;
        }
 
        unsigned long offset = swp_offset(entry);
        unsigned char usage;
 
-       ci = lock_cluster_or_swap_info(si, offset);
+       ci = lock_cluster(si, offset);
        usage = __swap_entry_free_locked(si, offset, 1);
-       unlock_cluster_or_swap_info(si, ci);
+       unlock_cluster(ci);
        if (!usage)
                free_swap_slot(entry);
 
        if (nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER)
                goto fallback;
 
-       ci = lock_cluster_or_swap_info(si, offset);
+       ci = lock_cluster(si, offset);
        if (!swap_is_last_map(si, offset, nr, &has_cache)) {
-               unlock_cluster_or_swap_info(si, ci);
+               unlock_cluster(ci);
                goto fallback;
        }
        for (i = 0; i < nr; i++)
                WRITE_ONCE(si->swap_map[offset + i], SWAP_HAS_CACHE);
-       unlock_cluster_or_swap_info(si, ci);
+       unlock_cluster(ci);
 
        if (!has_cache) {
                for (i = 0; i < nr; i++)
        DECLARE_BITMAP(to_free, BITS_PER_LONG) = { 0 };
        int i, nr;
 
-       ci = lock_cluster_or_swap_info(si, offset);
+       ci = lock_cluster(si, offset);
        while (nr_pages) {
                nr = min(BITS_PER_LONG, nr_pages);
                for (i = 0; i < nr; i++) {
                                bitmap_set(to_free, i, 1);
                }
                if (!bitmap_empty(to_free, BITS_PER_LONG)) {
-                       unlock_cluster_or_swap_info(si, ci);
+                       unlock_cluster(ci);
                        for_each_set_bit(i, to_free, BITS_PER_LONG)
                                free_swap_slot(swp_entry(si->type, offset + i));
                        if (nr == nr_pages)
                                return;
                        bitmap_clear(to_free, 0, BITS_PER_LONG);
-                       ci = lock_cluster_or_swap_info(si, offset);
+                       ci = lock_cluster(si, offset);
                }
                offset += nr;
                nr_pages -= nr;
        }
-       unlock_cluster_or_swap_info(si, ci);
+       unlock_cluster(ci);
 }
 
 /*
        if (!si)
                return;
 
-       ci = lock_cluster_or_swap_info(si, offset);
+       ci = lock_cluster(si, offset);
        if (size > 1 && swap_is_has_cache(si, offset, size)) {
-               unlock_cluster_or_swap_info(si, ci);
+               unlock_cluster(ci);
                spin_lock(&si->lock);
                swap_entry_range_free(si, entry, size);
                spin_unlock(&si->lock);
        }
        for (int i = 0; i < size; i++, entry.val++) {
                if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) {
-                       unlock_cluster_or_swap_info(si, ci);
+                       unlock_cluster(ci);
                        free_swap_slot(entry);
                        if (i == size - 1)
                                return;
-                       lock_cluster_or_swap_info(si, offset);
+                       lock_cluster(si, offset);
                }
        }
-       unlock_cluster_or_swap_info(si, ci);
+       unlock_cluster(ci);
 }
 
 static int swp_entry_cmp(const void *ent1, const void *ent2)
        struct swap_cluster_info *ci;
        int count;
 
-       ci = lock_cluster_or_swap_info(si, offset);
+       ci = lock_cluster(si, offset);
        count = swap_count(si->swap_map[offset]);
-       unlock_cluster_or_swap_info(si, ci);
+       unlock_cluster(ci);
        return count;
 }
 
 
        offset = swp_offset(entry);
 
-       ci = lock_cluster_or_swap_info(si, offset);
+       ci = lock_cluster(si, offset);
 
        count = swap_count(si->swap_map[offset]);
        if (!(count & COUNT_CONTINUED))
                n *= (SWAP_CONT_MAX + 1);
        } while (tmp_count & COUNT_CONTINUED);
 out:
-       unlock_cluster_or_swap_info(si, ci);
+       unlock_cluster(ci);
        return count;
 }
 
        int i;
        bool ret = false;
 
-       ci = lock_cluster_or_swap_info(si, offset);
-       if (!ci || nr_pages == 1) {
+       ci = lock_cluster(si, offset);
+       if (nr_pages == 1) {
                if (swap_count(map[roffset]))
                        ret = true;
                goto unlock_out;
                }
        }
 unlock_out:
-       unlock_cluster_or_swap_info(si, ci);
+       unlock_cluster(ci);
        return ret;
 }
 
        offset = swp_offset(entry);
        VM_WARN_ON(nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER);
        VM_WARN_ON(usage == 1 && nr > 1);
-       ci = lock_cluster_or_swap_info(si, offset);
+       ci = lock_cluster(si, offset);
 
        err = 0;
        for (i = 0; i < nr; i++) {
        }
 
 unlock_out:
-       unlock_cluster_or_swap_info(si, ci);
+       unlock_cluster(ci);
        return err;
 }