]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: shmem: use swap_free_nr() to free shmem swap entries
authorBaolin Wang <baolin.wang@linux.alibaba.com>
Mon, 12 Aug 2024 07:42:06 +0000 (15:42 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 4 Sep 2024 04:15:34 +0000 (21:15 -0700)
As a preparation for supporting shmem large folio swapout, use
swap_free_nr() to free some continuous swap entries of the shmem large
folio when the large folio was swapped in from the swap cache.  In
addition, the index should also be round down to the number of pages when
adding the swapin folio into the pagecache.

Link: https://lkml.kernel.org/r/342207fa679fc88a447dac2e101ad79e6050fe79.1723434324.git.baolin.wang@linux.alibaba.com
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Chris Li <chrisl@kernel.org>
Cc: Daniel Gomez <da.gomez@samsung.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Lance Yang <ioworker0@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Pankaj Raghav <p.raghav@samsung.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/shmem.c

index d0d54939da4803e1e96a96b86acec3ec5034cc4f..f6bab42180ea7e4ee3f27078b791a2df516ec6fa 100644 (file)
@@ -1961,6 +1961,7 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
        struct address_space *mapping = inode->i_mapping;
        swp_entry_t swapin_error;
        void *old;
+       int nr_pages;
 
        swapin_error = make_poisoned_swp_entry();
        old = xa_cmpxchg_irq(&mapping->i_pages, index,
@@ -1969,6 +1970,7 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
        if (old != swp_to_radix_entry(swap))
                return;
 
+       nr_pages = folio_nr_pages(folio);
        folio_wait_writeback(folio);
        delete_from_swap_cache(folio);
        /*
@@ -1976,8 +1978,8 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
         * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
         * in shmem_evict_inode().
         */
-       shmem_recalc_inode(inode, -1, -1);
-       swap_free(swap);
+       shmem_recalc_inode(inode, -nr_pages, -nr_pages);
+       swap_free_nr(swap, nr_pages);
 }
 
 /*
@@ -1996,7 +1998,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
        struct swap_info_struct *si;
        struct folio *folio = NULL;
        swp_entry_t swap;
-       int error;
+       int error, nr_pages;
 
        VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
        swap = radix_to_swp_entry(*foliop);
@@ -2043,6 +2045,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
                goto failed;
        }
        folio_wait_writeback(folio);
+       nr_pages = folio_nr_pages(folio);
 
        /*
         * Some architectures may have to restore extra metadata to the
@@ -2056,19 +2059,20 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
                        goto failed;
        }
 
-       error = shmem_add_to_page_cache(folio, mapping, index,
+       error = shmem_add_to_page_cache(folio, mapping,
+                                       round_down(index, nr_pages),
                                        swp_to_radix_entry(swap), gfp);
        if (error)
                goto failed;
 
-       shmem_recalc_inode(inode, 0, -1);
+       shmem_recalc_inode(inode, 0, -nr_pages);
 
        if (sgp == SGP_WRITE)
                folio_mark_accessed(folio);
 
        delete_from_swap_cache(folio);
        folio_mark_dirty(folio);
-       swap_free(swap);
+       swap_free_nr(swap, nr_pages);
        put_swap_device(si);
 
        *foliop = folio;