struct address_space *mapping = inode->i_mapping;
        swp_entry_t swapin_error;
        void *old;
+       int nr_pages;
 
        swapin_error = make_poisoned_swp_entry();
        old = xa_cmpxchg_irq(&mapping->i_pages, index,
        if (old != swp_to_radix_entry(swap))
                return;
 
+       nr_pages = folio_nr_pages(folio);
        folio_wait_writeback(folio);
        delete_from_swap_cache(folio);
        /*
         * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
         * in shmem_evict_inode().
         */
-       shmem_recalc_inode(inode, -1, -1);
-       swap_free(swap);
+       shmem_recalc_inode(inode, -nr_pages, -nr_pages);
+       swap_free_nr(swap, nr_pages);
 }
 
 /*
        struct swap_info_struct *si;
        struct folio *folio = NULL;
        swp_entry_t swap;
-       int error;
+       int error, nr_pages;
 
        VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
        swap = radix_to_swp_entry(*foliop);
                goto failed;
        }
        folio_wait_writeback(folio);
+       nr_pages = folio_nr_pages(folio);
 
        /*
         * Some architectures may have to restore extra metadata to the
                        goto failed;
        }
 
-       error = shmem_add_to_page_cache(folio, mapping, index,
+       error = shmem_add_to_page_cache(folio, mapping,
+                                       round_down(index, nr_pages),
                                        swp_to_radix_entry(swap), gfp);
        if (error)
                goto failed;
 
-       shmem_recalc_inode(inode, 0, -1);
+       shmem_recalc_inode(inode, 0, -nr_pages);
 
        if (sgp == SGP_WRITE)
                folio_mark_accessed(folio);
 
        delete_from_swap_cache(folio);
        folio_mark_dirty(folio);
-       swap_free(swap);
+       swap_free_nr(swap, nr_pages);
        put_swap_device(si);
 
        *foliop = folio;