]> www.infradead.org Git - users/hch/block.git/commitdiff
mm: add pfn_swap_entry_folio()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 11 Jan 2024 15:24:20 +0000 (15:24 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 22 Feb 2024 00:00:03 +0000 (16:00 -0800)
Patch series "mm: convert mm counter to take a folio", v3.

Make sure all mm_counter() and mm_counter_file() callers have a folio,
then convert mm counter functions to take a folio, which saves some
compound_head() calls.

This patch (of 10):

Thanks to the compound_head() hidden inside PageLocked(), this saves a
call to compound_head() over calling page_folio(pfn_swap_entry_to_page())

Link: https://lkml.kernel.org/r/20240111152429.3374566-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20240111152429.3374566-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/swapops.h
mm/filemap.c
mm/huge_memory.c

index bff1e8d97de0e089a70ed3f9aa872bf050955087..48b700ba1d188a798209d4de4693173bfc6b98af 100644 (file)
@@ -468,6 +468,19 @@ static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
        return p;
 }
 
+static inline struct folio *pfn_swap_entry_folio(swp_entry_t entry)
+{
+       struct folio *folio = pfn_folio(swp_offset_pfn(entry));
+
+       /*
+        * Any use of migration entries may only occur while the
+        * corresponding folio is locked
+        */
+       BUG_ON(is_migration_entry(entry) && !folio_test_locked(folio));
+
+       return folio;
+}
+
 /*
  * A pfn swap entry is a special type of swap entry that always has a pfn stored
  * in the swap offset. They are used to represent unaddressable device memory
index 0d7e20edf46f59801db10abeb7bf1e1c56bfc113..142864338ca4f2600bd54a411ffba6fbb558fcde 100644 (file)
@@ -1354,7 +1354,7 @@ void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
        unsigned long pflags;
        bool in_thrashing;
        wait_queue_head_t *q;
-       struct folio *folio = page_folio(pfn_swap_entry_to_page(entry));
+       struct folio *folio = pfn_swap_entry_folio(entry);
 
        q = folio_waitqueue(folio);
        if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) {
index 94c958f7ebb50dd925070157c0d0b2432dfc0483..5468b2f97cbf70c7bf581d51a606a1fcdc32f82c 100644 (file)
@@ -2045,7 +2045,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
        if (is_swap_pmd(*pmd)) {
                swp_entry_t entry = pmd_to_swp_entry(*pmd);
-               struct folio *folio = page_folio(pfn_swap_entry_to_page(entry));
+               struct folio *folio = pfn_swap_entry_folio(entry);
                pmd_t newpmd;
 
                VM_BUG_ON(!is_pmd_migration_entry(*pmd));