]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
writeback: Factor out writeback_finish()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 22 Jun 2023 19:42:22 +0000 (15:42 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 28 Jun 2023 19:06:38 +0000 (15:06 -0400)
Instead of having a 'done' variable that controls the nested loops,
have a writeback_finish() that can be returned directly.  This involves
keeping more things in writeback_control, but it's just moving stuff
allocated on the stack to being allocated slightly earlier on the stack.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
include/linux/writeback.h
mm/page-writeback.c

index fba937999fbfd344ceda7b52f61ff6b57c81ce38..5b7d11f540132e4c42cf9ca3c403195a5b0bc793 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/flex_proportions.h>
 #include <linux/backing-dev-defs.h>
 #include <linux/blk_types.h>
+#include <linux/pagevec.h>
 
 struct bio;
 
@@ -52,6 +53,10 @@ struct writeback_control {
        loff_t range_start;
        loff_t range_end;
 
+       struct folio_batch fbatch;
+       pgoff_t done_index;
+       int err;
+
        enum writeback_sync_modes sync_mode;
 
        unsigned for_kupdate:1;         /* A kupdate writeback */
@@ -59,6 +64,7 @@ struct writeback_control {
        unsigned tagged_writepages:1;   /* tag-and-write to avoid livelock */
        unsigned for_reclaim:1;         /* Invoked from the page allocator */
        unsigned range_cyclic:1;        /* range_start is cyclic */
+       unsigned range_whole:1;         /* entire file */
        unsigned for_sync:1;            /* sync(2) WB_SYNC_ALL writeback */
        unsigned unpinned_fscache_wb:1; /* Cleared I_PINNING_FSCACHE_WB */
 
index d3f42009bb702e2e62a0f69c985b2466bd83b208..b5421510b593cce517bc08d7a9731432e1c363ec 100644 (file)
@@ -2360,6 +2360,24 @@ void tag_pages_for_writeback(struct address_space *mapping,
 }
 EXPORT_SYMBOL(tag_pages_for_writeback);
 
+static int writeback_finish(struct address_space *mapping,
+               struct writeback_control *wbc, bool done)
+{
+       folio_batch_release(&wbc->fbatch);
+
+       /*
+        * If we hit the last page and there is more work to be done:
+        * wrap the index back to the start of the file for the next
+        * time we are called.
+        */
+       if (wbc->range_cyclic && !done)
+               wbc->done_index = 0;
+       if (wbc->range_cyclic || (wbc->range_whole && wbc->nr_to_write > 0))
+               mapping->writeback_index = wbc->done_index;
+
+       return wbc->err;
+}
+
 /**
  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
  * @mapping: address space structure to write
@@ -2395,18 +2413,12 @@ int write_cache_pages(struct address_space *mapping,
                      struct writeback_control *wbc, writepage_t writepage,
                      void *data)
 {
-       int ret = 0;
-       int done = 0;
        int error;
-       struct folio_batch fbatch;
        int nr_folios;
        pgoff_t index;
        pgoff_t end;            /* Inclusive */
-       pgoff_t done_index;
-       int range_whole = 0;
        xa_mark_t tag;
 
-       folio_batch_init(&fbatch);
        if (wbc->range_cyclic) {
                index = mapping->writeback_index; /* prev offset */
                end = -1;
@@ -2414,7 +2426,7 @@ int write_cache_pages(struct address_space *mapping,
                index = wbc->range_start >> PAGE_SHIFT;
                end = wbc->range_end >> PAGE_SHIFT;
                if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
-                       range_whole = 1;
+                       wbc->range_whole = 1;
        }
        if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) {
                tag_pages_for_writeback(mapping, index, end);
@@ -2422,21 +2434,25 @@ int write_cache_pages(struct address_space *mapping,
        } else {
                tag = PAGECACHE_TAG_DIRTY;
        }
-       done_index = index;
-       while (!done && (index <= end)) {
+
+       wbc->done_index = index;
+       folio_batch_init(&wbc->fbatch);
+       wbc->err = 0;
+
+       while (index <= end) {
                int i;
 
                nr_folios = filemap_get_folios_tag(mapping, &index, end,
-                               tag, &fbatch);
+                               tag, &wbc->fbatch);
 
                if (nr_folios == 0)
                        break;
 
                for (i = 0; i < nr_folios; i++) {
-                       struct folio *folio = fbatch.folios[i];
+                       struct folio *folio = wbc->fbatch.folios[i];
                        unsigned long nr;
 
-                       done_index = folio->index;
+                       wbc->done_index = folio->index;
 
                        folio_lock(folio);
 
@@ -2490,13 +2506,13 @@ continue_unlock:
                                        folio_unlock(folio);
                                        error = 0;
                                } else if (wbc->sync_mode != WB_SYNC_ALL) {
-                                       ret = error;
-                                       done_index = folio->index + nr;
-                                       done = 1;
-                                       break;
+                                       wbc->err = error;
+                                       wbc->done_index = folio->index + nr;
+                                       return writeback_finish(mapping,
+                                                       wbc, true);
                                }
-                               if (!ret)
-                                       ret = error;
+                               if (!wbc->err)
+                                       wbc->err = error;
                        }
 
                        /*
@@ -2507,26 +2523,14 @@ continue_unlock:
                         */
                        wbc->nr_to_write -= nr;
                        if (wbc->nr_to_write <= 0 &&
-                           wbc->sync_mode == WB_SYNC_NONE) {
-                               done = 1;
-                               break;
-                       }
+                           wbc->sync_mode == WB_SYNC_NONE)
+                               return writeback_finish(mapping, wbc, true);
                }
-               folio_batch_release(&fbatch);
+               folio_batch_release(&wbc->fbatch);
                cond_resched();
        }
 
-       /*
-        * If we hit the last page and there is more work to be done: wrap
-        * back the index back to the start of the file for the next
-        * time we are called.
-        */
-       if (wbc->range_cyclic && !done)
-               done_index = 0;
-       if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
-               mapping->writeback_index = done_index;
-
-       return ret;
+       return writeback_finish(mapping, wbc, false);
 }
 EXPORT_SYMBOL(write_cache_pages);