]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: shmem: use swap_free_nr() to free shmem swap entries
authorBaolin Wang <baolin.wang@linux.alibaba.com>
Mon, 12 Aug 2024 07:42:06 +0000 (15:42 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 17 Aug 2024 00:53:14 +0000 (17:53 -0700)
As a preparation for supporting shmem large folio swapout, use
swap_free_nr() to free some continuous swap entries of the shmem large
folio when the large folio was swapped in from the swap cache.  In
addition, the index should also be round down to the number of pages when
adding the swapin folio into the pagecache.

Link: https://lkml.kernel.org/r/342207fa679fc88a447dac2e101ad79e6050fe79.1723434324.git.baolin.wang@linux.alibaba.com
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Chris Li <chrisl@kernel.org>
Cc: Daniel Gomez <da.gomez@samsung.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Lance Yang <ioworker0@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Pankaj Raghav <p.raghav@samsung.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/shmem.c

index e9bdc53d70006ba870450881d8c059ef22cf878f..18422db2cd5846a960eb26e14522e1c61c0cb211 100644 (file)
@@ -1957,6 +1957,7 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
        struct address_space *mapping = inode->i_mapping;
        swp_entry_t swapin_error;
        void *old;
+       int nr_pages;
 
        swapin_error = make_poisoned_swp_entry();
        old = xa_cmpxchg_irq(&mapping->i_pages, index,
@@ -1965,6 +1966,7 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
        if (old != swp_to_radix_entry(swap))
                return;
 
+       nr_pages = folio_nr_pages(folio);
        folio_wait_writeback(folio);
        delete_from_swap_cache(folio);
        /*
@@ -1972,8 +1974,8 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
         * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
         * in shmem_evict_inode().
         */
-       shmem_recalc_inode(inode, -1, -1);
-       swap_free(swap);
+       shmem_recalc_inode(inode, -nr_pages, -nr_pages);
+       swap_free_nr(swap, nr_pages);
 }
 
 /*
@@ -1992,7 +1994,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
        struct swap_info_struct *si;
        struct folio *folio = NULL;
        swp_entry_t swap;
-       int error;
+       int error, nr_pages;
 
        VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
        swap = radix_to_swp_entry(*foliop);
@@ -2039,6 +2041,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
                goto failed;
        }
        folio_wait_writeback(folio);
+       nr_pages = folio_nr_pages(folio);
 
        /*
         * Some architectures may have to restore extra metadata to the
@@ -2052,19 +2055,20 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
                        goto failed;
        }
 
-       error = shmem_add_to_page_cache(folio, mapping, index,
+       error = shmem_add_to_page_cache(folio, mapping,
+                                       round_down(index, nr_pages),
                                        swp_to_radix_entry(swap), gfp);
        if (error)
                goto failed;
 
-       shmem_recalc_inode(inode, 0, -1);
+       shmem_recalc_inode(inode, 0, -nr_pages);
 
        if (sgp == SGP_WRITE)
                folio_mark_accessed(folio);
 
        delete_from_swap_cache(folio);
        folio_mark_dirty(folio);
-       swap_free(swap);
+       swap_free_nr(swap, nr_pages);
        put_swap_device(si);
 
        *foliop = folio;