]> www.infradead.org Git - users/hch/block.git/commitdiff
block: remove blk_needs_flush_plug
authorChristoph Hellwig <hch@lst.de>
Tue, 11 Jan 2022 09:08:49 +0000 (10:08 +0100)
committerChristoph Hellwig <hch@lst.de>
Tue, 11 Jan 2022 09:12:30 +0000 (10:12 +0100)
blk_needs_flush_plug forgets to check the callbacks list and is one of
a few reasons why blkdev.h needs to pull in sched.h.  Remove it and just
make blk_flush_plug check if there is a plug before calling out of line,
which gets us 90% of the advantages without poking into details in the
header.

Signed-off-by: Christoph Hellwig <hch@lst.de>
block/blk-core.c
fs/fs-writeback.c
include/linux/blkdev.h
kernel/exit.c
kernel/sched/core.c

index 97f8bc8d3a7916c4b3fde840ea6aa55eaf3f82f5..f5f0d45841a47443e70ff5083935c593a4f722f0 100644 (file)
@@ -991,8 +991,7 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
            !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
                return 0;
 
-       if (current->plug)
-               blk_flush_plug(current->plug, false);
+       blk_flush_plug(current->plug, false);
 
        if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT))
                return 0;
@@ -1261,7 +1260,7 @@ struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
 }
 EXPORT_SYMBOL(blk_check_plugged);
 
-void blk_flush_plug(struct blk_plug *plug, bool from_schedule)
+void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
 {
        if (!list_empty(&plug->cb_list))
                flush_plug_callbacks(plug, from_schedule);
@@ -1290,7 +1289,7 @@ void blk_flush_plug(struct blk_plug *plug, bool from_schedule)
 void blk_finish_plug(struct blk_plug *plug)
 {
        if (plug == current->plug) {
-               blk_flush_plug(plug, false);
+               __blk_flush_plug(plug, false);
                current->plug = NULL;
        }
 }
index 67f0e88eed01ae3035d7593f21057ccfc47db856..148b5a40c0038f66432368900949e9d4d1c974e6 100644 (file)
@@ -1893,8 +1893,7 @@ static long writeback_sb_inodes(struct super_block *sb,
                         * unplug, so get our IOs out the door before we
                         * give up the CPU.
                         */
-                       if (current->plug)
-                               blk_flush_plug(current->plug, false);
+                       blk_flush_plug(current->plug, false);
                        cond_resched();
                }
 
@@ -2291,8 +2290,7 @@ void wakeup_flusher_threads(enum wb_reason reason)
        /*
         * If we are expecting writeback progress we must submit plugged IO.
         */
-       if (blk_needs_flush_plug(current))
-               blk_flush_plug(current->plug, true);
+       blk_flush_plug(current->plug, true);
 
        rcu_read_lock();
        list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
index 9c95df26fc26b1fec99c5325f8f22ceeef59d175..56351ac489096ff36c4c0cd21ee28a2b1081e445 100644 (file)
@@ -2,7 +2,6 @@
 #ifndef _LINUX_BLKDEV_H
 #define _LINUX_BLKDEV_H
 
-#include <linux/sched.h>
 #include <linux/genhd.h>
 #include <linux/list.h>
 #include <linux/llist.h>
@@ -790,15 +789,11 @@ extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
 extern void blk_start_plug(struct blk_plug *);
 extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
 extern void blk_finish_plug(struct blk_plug *);
-
-void blk_flush_plug(struct blk_plug *plug, bool from_schedule);
-
-static inline bool blk_needs_flush_plug(struct task_struct *tsk)
+void __blk_flush_plug(struct blk_plug *plug, bool from_schedule);
+static inline void blk_flush_plug(struct blk_plug *plug, bool from_schedule)
 {
-       struct blk_plug *plug = tsk->plug;
-
-       return plug &&
-                (plug->mq_list || !list_empty(&plug->cb_list));
+       if (plug)
+               __blk_flush_plug(plug, from_schedule);
 }
 
 int blkdev_issue_flush(struct block_device *bdev);
@@ -824,11 +819,6 @@ static inline void blk_flush_plug(struct blk_plug *plug, bool async)
 {
 }
 
-static inline bool blk_needs_flush_plug(struct task_struct *tsk)
-{
-       return false;
-}
-
 static inline int blkdev_issue_flush(struct block_device *bdev)
 {
        return 0;
index f702a6a63686ea7c463ea5b73a7f5b665bb2d91e..a1e073254e14781833620b84474c00f637fd627c 100644 (file)
@@ -742,7 +742,7 @@ void __noreturn do_exit(long code)
         * Then do everything else.
         */
 
-       WARN_ON(blk_needs_flush_plug(tsk));
+       WARN_ON(tsk->plug);
 
        if (unlikely(in_interrupt()))
                panic("Aiee, killing interrupt handler!");
index 76f9deeaa942099798f4851fbd85feab7c55d244..a99eb2d016ab7f950d05477946a692998682957d 100644 (file)
@@ -6302,8 +6302,7 @@ static inline void sched_submit_work(struct task_struct *tsk)
         * If we are going to sleep and we have plugged IO queued,
         * make sure to submit it to avoid deadlocks.
         */
-       if (blk_needs_flush_plug(tsk))
-               blk_flush_plug(tsk->plug, true);
+       blk_flush_plug(tsk->plug, true);
 }
 
 static void sched_update_worker(struct task_struct *tsk)
@@ -8335,8 +8334,7 @@ int io_schedule_prepare(void)
        int old_iowait = current->in_iowait;
 
        current->in_iowait = 1;
-       if (current->plug)
-               blk_flush_plug(current->plug, true);
+       blk_flush_plug(current->plug, true);
 
        return old_iowait;
 }