]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
mm: Make __end_folio_writeback() return void
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 18 Jul 2023 16:47:25 +0000 (12:47 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 4 Oct 2023 01:18:59 +0000 (21:18 -0400)
Rather than check the result of test-and-clear, just check that we have
the writeback bit set at the start.  This wouldn't catch every case, but
it's good enough (and enables the next patch).

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
mm/filemap.c
mm/internal.h
mm/page-writeback.c

index 3dad2615af414f1009ab891d8b70757ee2c7d725..ddcced4638b53133c0eee1fa136717e2061e5cc0 100644 (file)
@@ -1595,9 +1595,15 @@ EXPORT_SYMBOL(folio_wait_private_2_killable);
 /**
  * folio_end_writeback - End writeback against a folio.
  * @folio: The folio.
+ *
+ * The folio must actually be under writeback.
+ *
+ * Context: May be called from process or interrupt context.
  */
 void folio_end_writeback(struct folio *folio)
 {
+       VM_BUG_ON_FOLIO(!folio_test_writeback(folio), folio);
+
        /*
         * folio_test_clear_reclaim() could be used here but it is an
         * atomic operation and overkill in this particular case. Failing
@@ -1617,8 +1623,7 @@ void folio_end_writeback(struct folio *folio)
         * reused before the folio_wake().
         */
        folio_get(folio);
-       if (!__folio_end_writeback(folio))
-               BUG();
+       __folio_end_writeback(folio);
 
        smp_mb__after_atomic();
        folio_wake(folio, PG_writeback);
index 30cf724ddbce3399999d6a9e9816fa133e9c5c4b..ccb08dd9b5ec7b47a827ead5327941eceffca8f6 100644 (file)
@@ -105,7 +105,7 @@ static inline void wake_throttle_isolated(pg_data_t *pgdat)
 
 vm_fault_t do_swap_page(struct vm_fault *vmf);
 void folio_rotate_reclaimable(struct folio *folio);
-bool __folio_end_writeback(struct folio *folio);
+void __folio_end_writeback(struct folio *folio);
 void deactivate_file_folio(struct folio *folio);
 void folio_activate(struct folio *folio);
 
index b8d3d7040a506a01c9fdc7743a2f0ca6795575fb..410b53e888e3b649986d7b4c9c23695e3464bace 100644 (file)
@@ -2940,11 +2940,10 @@ static void wb_inode_writeback_end(struct bdi_writeback *wb)
        spin_unlock_irqrestore(&wb->work_lock, flags);
 }
 
-bool __folio_end_writeback(struct folio *folio)
+void __folio_end_writeback(struct folio *folio)
 {
        long nr = folio_nr_pages(folio);
        struct address_space *mapping = folio_mapping(folio);
-       bool ret;
 
        folio_memcg_lock(folio);
        if (mapping && mapping_use_writeback_tags(mapping)) {
@@ -2953,19 +2952,16 @@ bool __folio_end_writeback(struct folio *folio)
                unsigned long flags;
 
                xa_lock_irqsave(&mapping->i_pages, flags);
-               ret = folio_test_clear_writeback(folio);
-               if (ret) {
-                       __xa_clear_mark(&mapping->i_pages, folio_index(folio),
-                                               PAGECACHE_TAG_WRITEBACK);
-                       if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
-                               struct bdi_writeback *wb = inode_to_wb(inode);
-
-                               wb_stat_mod(wb, WB_WRITEBACK, -nr);
-                               __wb_writeout_add(wb, nr);
-                               if (!mapping_tagged(mapping,
-                                                   PAGECACHE_TAG_WRITEBACK))
-                                       wb_inode_writeback_end(wb);
-                       }
+               folio_test_clear_writeback(folio);
+               __xa_clear_mark(&mapping->i_pages, folio_index(folio),
+                                       PAGECACHE_TAG_WRITEBACK);
+               if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
+                       struct bdi_writeback *wb = inode_to_wb(inode);
+
+                       wb_stat_mod(wb, WB_WRITEBACK, -nr);
+                       __wb_writeout_add(wb, nr);
+                       if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
+                               wb_inode_writeback_end(wb);
                }
 
                if (mapping->host && !mapping_tagged(mapping,
@@ -2974,15 +2970,13 @@ bool __folio_end_writeback(struct folio *folio)
 
                xa_unlock_irqrestore(&mapping->i_pages, flags);
        } else {
-               ret = folio_test_clear_writeback(folio);
-       }
-       if (ret) {
-               lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
-               zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
-               node_stat_mod_folio(folio, NR_WRITTEN, nr);
+               folio_test_clear_writeback(folio);
        }
+
+       lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
+       zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
+       node_stat_mod_folio(folio, NR_WRITTEN, nr);
        folio_memcg_unlock(folio);
-       return ret;
 }
 
 bool __folio_start_writeback(struct folio *folio, bool keep_write)