]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/swapfile: fold scan_swap_map() into the only caller get_swap_page_of_type()
authorMiaohe Lin <linmiaohe@huawei.com>
Wed, 2 Jun 2021 03:52:11 +0000 (13:52 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 2 Jun 2021 03:52:11 +0000 (13:52 +1000)
We should fold scan_swap_map() into the only caller
get_swap_page_of_type() and update the comments referring to it to make
the code more succinct.

Link: https://lkml.kernel.org/r/20210527120328.3935132-1-linmiaohe@huawei.com
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
mm/swapfile.c

index 53d2161edfc0861dccb4511f8fd58010e6f5a21b..166be489664d62d3d713b783dc251acca6bbc0ff 100644 (file)
@@ -453,10 +453,10 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si,
                unsigned int idx)
 {
        /*
-        * If scan_swap_map() can't find a free cluster, it will check
+        * If scan_swap_map_slots() can't find a free cluster, it will check
         * si->swap_map directly. To make sure the discarding cluster isn't
-        * taken by scan_swap_map(), mark the swap entries bad (occupied). It
-        * will be cleared after discard
+        * taken by scan_swap_map_slots(), mark the swap entries bad (occupied).
+        * It will be cleared after discard
         */
        memset(si->swap_map + idx * SWAPFILE_CLUSTER,
                        SWAP_MAP_BAD, SWAPFILE_CLUSTER);
@@ -589,7 +589,7 @@ static void dec_cluster_info_page(struct swap_info_struct *p,
 }
 
 /*
- * It's possible scan_swap_map() uses a free cluster in the middle of free
+ * It's possible scan_swap_map_slots() uses a free cluster in the middle of free
  * cluster list. Avoiding such abuse to avoid list corruption.
  */
 static bool
@@ -1099,14 +1099,14 @@ start_over:
 nextsi:
                /*
                 * if we got here, it's likely that si was almost full before,
-                * and since scan_swap_map() can drop the si->lock, multiple
-                * callers probably all tried to get a page from the same si
-                * and it filled up before we could get one; or, the si filled
-                * up between us dropping swap_avail_lock and taking si->lock.
-                * Since we dropped the swap_avail_lock, the swap_avail_head
-                * list may have been modified; so if next is still in the
-                * swap_avail_head list then try it, otherwise start over
-                * if we have not gotten any slots.
+                * and since scan_swap_map_slots() can drop the si->lock,
+                * multiple callers probably all tried to get a page from the
+                * same si and it filled up before we could get one; or, the si
+                * filled up between us dropping swap_avail_lock and taking
+                * si->lock. Since we dropped the swap_avail_lock, the
+                * swap_avail_head list may have been modified; so if next is
+                * still in the swap_avail_head list then try it, otherwise
+                * start over if we have not gotten any slots.
                 */
                if (plist_node_empty(&next->avail_lists[node]))
                        goto start_over;
@@ -1774,42 +1774,21 @@ int free_swap_and_cache(swp_entry_t entry)
 
 #ifdef CONFIG_HIBERNATION
 
-static unsigned long scan_swap_map(struct swap_info_struct *si,
-                                  unsigned char usage)
-{
-       swp_entry_t entry;
-       int n_ret;
-
-       n_ret = scan_swap_map_slots(si, usage, 1, &entry);
-
-       if (n_ret)
-               return swp_offset(entry);
-       else
-               return 0;
-
-}
-
 swp_entry_t get_swap_page_of_type(int type)
 {
        struct swap_info_struct *si = swap_type_to_swap_info(type);
-       pgoff_t offset;
+       swp_entry_t entry = {0};
 
        if (!si)
                goto fail;
 
+       /* This is called for allocating swap entry, not cache */
        spin_lock(&si->lock);
-       if (si->flags & SWP_WRITEOK) {
-               /* This is called for allocating swap entry, not cache */
-               offset = scan_swap_map(si, 1);
-               if (offset) {
-                       atomic_long_dec(&nr_swap_pages);
-                       spin_unlock(&si->lock);
-                       return swp_entry(type, offset);
-               }
-       }
+       if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry))
+               atomic_long_dec(&nr_swap_pages);
        spin_unlock(&si->lock);
 fail:
-       return (swp_entry_t) {0};
+       return entry;
 }
 
 /*
@@ -2649,7 +2628,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
        spin_lock(&p->lock);
        drain_mmlist();
 
-       /* wait for anyone still in scan_swap_map */
+       /* wait for anyone still in scan_swap_map_slots */
        p->highest_bit = 0;             /* cuts scans short */
        while (p->flags >= SWP_SCANNING) {
                spin_unlock(&p->lock);