]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
mm: shmem: return number of pages beeing freed in shmem_free_swap
authorDaniel Gomez <da.gomez@samsung.com>
Mon, 12 Aug 2024 07:42:04 +0000 (15:42 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 4 Sep 2024 04:15:34 +0000 (21:15 -0700)
Both shmem_free_swap callers expect the number of pages being freed.  In
the large folios context, this needs to support larger values other than 0
(used as 1 page being freed) and -ENOENT (used as 0 pages being freed).
In preparation for large folios adoption, make shmem_free_swap routine
return the number of pages being freed.  So, returning 0 in this context,
means 0 pages being freed.

While we are at it, changing to use free_swap_and_cache_nr() to free large
order swap entry by Baolin Wang.

Link: https://lkml.kernel.org/r/9623e863c83d749d5ab407f6fdf0a8e5a3bdf052.1723434324.git.baolin.wang@linux.alibaba.com
Signed-off-by: Daniel Gomez <da.gomez@samsung.com>
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Suggested-by: Matthew Wilcox <willy@infradead.org>
Cc: Barry Song <baohua@kernel.org>
Cc: Chris Li <chrisl@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Lance Yang <ioworker0@gmail.com>
Cc: Pankaj Raghav <p.raghav@samsung.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/shmem.c

index 02fb188d627f48ba1f32a8ea8f7b8d08e8de7861..d0d54939da4803e1e96a96b86acec3ec5034cc4f 100644 (file)
@@ -856,18 +856,22 @@ static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
 }
 
 /*
- * Remove swap entry from page cache, free the swap and its page cache.
+ * Remove swap entry from page cache, free the swap and its page cache. Returns
+ * the number of pages being freed. 0 means entry not found in XArray (0 pages
+ * being freed).
  */
-static int shmem_free_swap(struct address_space *mapping,
-                          pgoff_t index, void *radswap)
+static long shmem_free_swap(struct address_space *mapping,
+                           pgoff_t index, void *radswap)
 {
+       int order = xa_get_order(&mapping->i_pages, index);
        void *old;
 
        old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
        if (old != radswap)
-               return -ENOENT;
-       free_swap_and_cache(radix_to_swp_entry(radswap));
-       return 0;
+               return 0;
+       free_swap_and_cache_nr(radix_to_swp_entry(radswap), 1 << order);
+
+       return 1 << order;
 }
 
 /*
@@ -1019,7 +1023,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                        if (xa_is_value(folio)) {
                                if (unfalloc)
                                        continue;
-                               nr_swaps_freed += !shmem_free_swap(mapping,
+                               nr_swaps_freed += shmem_free_swap(mapping,
                                                        indices[i], folio);
                                continue;
                        }
@@ -1086,14 +1090,17 @@ whole_folios:
                        folio = fbatch.folios[i];
 
                        if (xa_is_value(folio)) {
+                               long swaps_freed;
+
                                if (unfalloc)
                                        continue;
-                               if (shmem_free_swap(mapping, indices[i], folio)) {
+                               swaps_freed = shmem_free_swap(mapping, indices[i], folio);
+                               if (!swaps_freed) {
                                        /* Swap was replaced by page: retry */
                                        index = indices[i];
                                        break;
                                }
-                               nr_swaps_freed++;
+                               nr_swaps_freed += swaps_freed;
                                continue;
                        }