Most of the callers of wbc_account_cgroup_owner() are converting a folio
to page before calling the function. wbc_account_cgroup_owner() is
converting the page back to a folio to call mem_cgroup_css_from_folio().
Convert wbc_account_cgroup_owner() to take a folio instead of a page,
and convert all callers to pass a folio directly except f2fs.
Convert the page to folio for all the callers from f2fs as they were the
only callers calling wbc_account_cgroup_owner() with a page. As f2fs is
already in the process of converting to folios, these call sites might
also soon be calling wbc_account_cgroup_owner() with a folio directly in
the future.
No functional changes. Only compile tested.
Signed-off-by: Pankaj Raghav <p.raghav@samsung.com>
Link: https://lore.kernel.org/r/20240926140121.203821-1-kernel@pankajraghav.com
Acked-by: David Sterba <dsterba@suse.com>
Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Christian Brauner <brauner@kernel.org>
        a queue (device) has been associated with the bio and
        before submission.
 
-  wbc_account_cgroup_owner(@wbc, @page, @bytes)
+  wbc_account_cgroup_owner(@wbc, @folio, @bytes)
        Should be called for each data segment being written out.
        While this function doesn't care exactly when it's called
        during the writeback session, it's the easiest and most
 
                }
 
                if (bio_ctrl->wbc)
-                       wbc_account_cgroup_owner(bio_ctrl->wbc, &folio->page,
+                       wbc_account_cgroup_owner(bio_ctrl->wbc, folio,
                                                 len);
 
                size -= len;
                ret = bio_add_folio(&bbio->bio, folio, eb->len,
                                    eb->start - folio_pos(folio));
                ASSERT(ret);
-               wbc_account_cgroup_owner(wbc, folio_page(folio, 0), eb->len);
+               wbc_account_cgroup_owner(wbc, folio, eb->len);
                folio_unlock(folio);
        } else {
                int num_folios = num_extent_folios(eb);
                        folio_start_writeback(folio);
                        ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
                        ASSERT(ret);
-                       wbc_account_cgroup_owner(wbc, folio_page(folio, 0),
-                                                eb->folio_size);
+                       wbc_account_cgroup_owner(wbc, folio, eb->folio_size);
                        wbc->nr_to_write -= folio_nr_pages(folio);
                        folio_unlock(folio);
                }
 
                         * need full accuracy.  Just account the whole thing
                         * against the first page.
                         */
-                       wbc_account_cgroup_owner(wbc, &locked_folio->page,
+                       wbc_account_cgroup_owner(wbc, locked_folio,
                                                 cur_end - start);
                        async_chunk[i].locked_folio = locked_folio;
                        locked_folio = NULL;
 
        bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
        bio->bi_write_hint = write_hint;
 
-       __bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
+       bio_add_folio_nofail(bio, bh->b_folio, bh->b_size, bh_offset(bh));
 
        bio->bi_end_io = end_bio_bh_io_sync;
        bio->bi_private = bh;
 
        if (wbc) {
                wbc_init_bio(wbc, bio);
-               wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
+               wbc_account_cgroup_owner(wbc, bh->b_folio, bh->b_size);
        }
 
        submit_bio(bio);
 
                io_submit_init_bio(io, bh);
        if (!bio_add_folio(io->io_bio, io_folio, bh->b_size, bh_offset(bh)))
                goto submit_and_retry;
-       wbc_account_cgroup_owner(io->io_wbc, &folio->page, bh->b_size);
+       wbc_account_cgroup_owner(io->io_wbc, folio, bh->b_size);
        io->io_next_block++;
 }
 
 
        }
 
        if (fio->io_wbc && !is_read_io(fio->op))
-               wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
+               wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page),
+                                        PAGE_SIZE);
 
        inc_page_count(fio->sbi, is_read_io(fio->op) ?
                        __read_io_type(page) : WB_DATA_TYPE(fio->page, false));
        }
 
        if (fio->io_wbc)
-               wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
+               wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page),
+                                        PAGE_SIZE);
 
        inc_page_count(fio->sbi, WB_DATA_TYPE(page, false));
 
        }
 
        if (fio->io_wbc)
-               wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
+               wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page),
+                                        PAGE_SIZE);
 
        io->last_block_in_bio = fio->new_blkaddr;
 
 
 /**
  * wbc_account_cgroup_owner - account writeback to update inode cgroup ownership
  * @wbc: writeback_control of the writeback in progress
- * @page: page being written out
+ * @folio: folio being written out
  * @bytes: number of bytes being written out
  *
- * @bytes from @page are about to written out during the writeback
+ * @bytes from @folio are about to written out during the writeback
  * controlled by @wbc.  Keep the book for foreign inode detection.  See
  * wbc_detach_inode().
  */
-void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
+void wbc_account_cgroup_owner(struct writeback_control *wbc, struct folio *folio,
                              size_t bytes)
 {
-       struct folio *folio;
        struct cgroup_subsys_state *css;
        int id;
 
        if (!wbc->wb || wbc->no_cgroup_owner)
                return;
 
-       folio = page_folio(page);
        css = mem_cgroup_css_from_folio(folio);
        /* dead cgroups shouldn't contribute to inode ownership arbitration */
        if (!(css->flags & CSS_ONLINE))
 
        if (ifs)
                atomic_add(len, &ifs->write_bytes_pending);
        wpc->ioend->io_size += len;
-       wbc_account_cgroup_owner(wbc, &folio->page, len);
+       wbc_account_cgroup_owner(wbc, folio, len);
        return 0;
 }
 
 
         * the confused fail path above (OOM) will be very confused when
         * it finds all bh marked clean (i.e. it will not write anything)
         */
-       wbc_account_cgroup_owner(wbc, &folio->page, folio_size(folio));
+       wbc_account_cgroup_owner(wbc, folio, folio_size(folio));
        length = first_unmapped << blkbits;
        if (!bio_add_folio(bio, folio, length, 0)) {
                bio = mpage_bio_submit_write(bio);
 
                                 struct inode *inode)
        __releases(&inode->i_lock);
 void wbc_detach_inode(struct writeback_control *wbc);
-void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
+void wbc_account_cgroup_owner(struct writeback_control *wbc, struct folio *folio,
                              size_t bytes);
 int cgroup_writeback_by_id(u64 bdi_id, int memcg_id,
                           enum wb_reason reason, struct wb_completion *done);
 }
 
 static inline void wbc_account_cgroup_owner(struct writeback_control *wbc,
-                                           struct page *page, size_t bytes)
+                                           struct folio *folio, size_t bytes)
 {
 }