]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/page-writeback: drop usage of folio_index
authorKairui Song <kasong@tencent.com>
Mon, 25 Aug 2025 16:37:21 +0000 (00:37 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Sep 2025 00:25:17 +0000 (17:25 -0700)
folio_index is only needed for mixed usage of page cache and swap cache.
The remaining three caller in page-writeback are for page cache tag
marking.  Swap cache space doesn't use tag (explicitly sets
mapping_set_no_writeback_tags), so use folio->index here directly.

Link: https://lkml.kernel.org/r/20250825163721.17734-1-ryncsn@gmail.com
Signed-off-by: Kairui Song <kasong@tencent.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/page-writeback.c

index 7e1e798e72133c8a53fda9ba06f191793b333ff4..5f90fd6a713793339339674cdbc75950aa8f9172 100644 (file)
 #include <linux/sched/rt.h>
 #include <linux/sched/signal.h>
 #include <linux/mm_inline.h>
+#include <linux/shmem_fs.h>
 #include <trace/events/writeback.h>
 
 #include "internal.h"
-#include "swap.h"
 
 /*
  * Sleep at most 200ms at a time in balance_dirty_pages().
@@ -2705,12 +2705,18 @@ void __folio_mark_dirty(struct folio *folio, struct address_space *mapping,
 {
        unsigned long flags;
 
+       /*
+        * Shmem writeback relies on swap, and swap writeback is LRU based,
+        * not using the dirty mark.
+        */
+       VM_WARN_ON_ONCE(folio_test_swapcache(folio) || shmem_mapping(mapping));
+
        xa_lock_irqsave(&mapping->i_pages, flags);
        if (folio->mapping) {   /* Race with truncate? */
                WARN_ON_ONCE(warn && !folio_test_uptodate(folio));
                folio_account_dirtied(folio, mapping);
-               __xa_set_mark(&mapping->i_pages, folio_index(folio),
-                               PAGECACHE_TAG_DIRTY);
+               __xa_set_mark(&mapping->i_pages, folio->index,
+                             PAGECACHE_TAG_DIRTY);
        }
        xa_unlock_irqrestore(&mapping->i_pages, flags);
 }
@@ -2989,7 +2995,7 @@ bool __folio_end_writeback(struct folio *folio)
 
                xa_lock_irqsave(&mapping->i_pages, flags);
                ret = folio_xor_flags_has_waiters(folio, 1 << PG_writeback);
-               __xa_clear_mark(&mapping->i_pages, folio_index(folio),
+               __xa_clear_mark(&mapping->i_pages, folio->index,
                                        PAGECACHE_TAG_WRITEBACK);
                if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
                        struct bdi_writeback *wb = inode_to_wb(inode);
@@ -3026,7 +3032,7 @@ void __folio_start_writeback(struct folio *folio, bool keep_write)
        VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
 
        if (mapping && mapping_use_writeback_tags(mapping)) {
-               XA_STATE(xas, &mapping->i_pages, folio_index(folio));
+               XA_STATE(xas, &mapping->i_pages, folio->index);
                struct inode *inode = mapping->host;
                struct backing_dev_info *bdi = inode_to_bdi(inode);
                unsigned long flags;