]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/writeback: Add folio_account_cleaned()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 4 May 2021 20:12:09 +0000 (16:12 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Sun, 15 Aug 2021 11:01:56 +0000 (07:01 -0400)
Get the statistics right; compound pages were being accounted as a
single page.  This didn't matter before now as no filesystem which
supported compound pages did writeback.  Also move the declaration
to pagemap.h since this is part of the page cache.  Add a wrapper for
account_page_cleaned().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: David Howells <dhowells@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
include/linux/mm.h
include/linux/pagemap.h
mm/page-writeback.c

index c322a6e692c353c80f9d62412b939225fc73156b..a554d792e82b507f08e101c60ef9ea671efcc33d 100644 (file)
@@ -39,7 +39,6 @@ struct anon_vma_chain;
 struct file_ra_state;
 struct user_struct;
 struct writeback_control;
-struct bdi_writeback;
 struct pt_regs;
 
 extern int sysctl_page_lock_unfairness;
@@ -2006,8 +2005,6 @@ extern void do_invalidatepage(struct page *page, unsigned int offset,
 
 int redirty_page_for_writepage(struct writeback_control *wbc,
                                struct page *page);
-void account_page_cleaned(struct page *page, struct address_space *mapping,
-                         struct bdi_writeback *wb);
 bool folio_mark_dirty(struct folio *folio);
 bool set_page_dirty(struct page *page);
 int set_page_dirty_lock(struct page *page);
index 3d88c17fedc92269bb7b445454cc1fa86f88df2b..665ba6a673853d8982b9b3a25c095f77b98da258 100644 (file)
@@ -779,6 +779,13 @@ static inline void __set_page_dirty(struct page *page,
 {
        __folio_mark_dirty(page_folio(page), mapping, warn);
 }
+void folio_account_cleaned(struct folio *folio, struct address_space *mapping,
+                         struct bdi_writeback *wb);
+static inline void account_page_cleaned(struct page *page,
+               struct address_space *mapping, struct bdi_writeback *wb)
+{
+       return folio_account_cleaned(page_folio(page), mapping, wb);
+}
 
 int __set_page_dirty_nobuffers(struct page *page);
 int __set_page_dirty_no_writeback(struct page *page);
index bd97c461d4998a90e3befa113fe99f6502d9837f..9b91cf90a33f4e54e80676408b38c89de7efc904 100644 (file)
@@ -2453,14 +2453,15 @@ static void folio_account_dirtied(struct folio *folio,
  *
  * Caller must hold lock_page_memcg().
  */
-void account_page_cleaned(struct page *page, struct address_space *mapping,
+void folio_account_cleaned(struct folio *folio, struct address_space *mapping,
                          struct bdi_writeback *wb)
 {
        if (mapping_can_writeback(mapping)) {
-               dec_lruvec_page_state(page, NR_FILE_DIRTY);
-               dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
-               dec_wb_stat(wb, WB_RECLAIMABLE);
-               task_io_account_cancelled_write(PAGE_SIZE);
+               long nr = folio_nr_pages(folio);
+               lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
+               zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
+               wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
+               task_io_account_cancelled_write(nr * PAGE_SIZE);
        }
 }