]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
bcachefs: Split out btree_write_submit_wq
authorKent Overstreet <kent.overstreet@linux.dev>
Wed, 5 Jun 2024 15:08:20 +0000 (11:08 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Mon, 10 Jun 2024 17:17:15 +0000 (13:17 -0400)
Split the workqueues for btree read completions and btree write
submissions; we don't want concurrency control on btree read
completions, but we do want concurrency control on write submissions,
else blocking in submit_bio() will cause a ton of kworkers to be
allocated.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/bcachefs.h
fs/bcachefs/btree_io.c
fs/bcachefs/super.c

index 2a538eb2af110c6e8bc5fbef4301b7eed29841c0..2992a644d822c025449933a3da2c253855b56097 100644 (file)
@@ -790,7 +790,8 @@ struct bch_fs {
 
        /* BTREE CACHE */
        struct bio_set          btree_bio;
-       struct workqueue_struct *io_complete_wq;
+       struct workqueue_struct *btree_read_complete_wq;
+       struct workqueue_struct *btree_write_submit_wq;
 
        struct btree_root       btree_roots_known[BTREE_ID_NR];
        DARRAY(struct btree_root) btree_roots_extra;
index 829c1b91477d796bf1e3176807271ff8760e6cf7..7bca15c604f539ffd15962c1073c749bfd49cbba 100644 (file)
@@ -1389,7 +1389,7 @@ static void btree_node_read_endio(struct bio *bio)
                bch2_latency_acct(ca, rb->start_time, READ);
        }
 
-       queue_work(c->io_complete_wq, &rb->work);
+       queue_work(c->btree_read_complete_wq, &rb->work);
 }
 
 struct btree_node_read_all {
@@ -1656,7 +1656,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
                btree_node_read_all_replicas_done(&ra->cl.work);
        } else {
                continue_at(&ra->cl, btree_node_read_all_replicas_done,
-                           c->io_complete_wq);
+                           c->btree_read_complete_wq);
        }
 
        return 0;
@@ -1737,7 +1737,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
                if (sync)
                        btree_node_read_work(&rb->work);
                else
-                       queue_work(c->io_complete_wq, &rb->work);
+                       queue_work(c->btree_read_complete_wq, &rb->work);
        }
 }
 
@@ -2229,7 +2229,7 @@ do_write:
        atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes);
 
        INIT_WORK(&wbio->work, btree_write_submit);
-       queue_work(c->io_complete_wq, &wbio->work);
+       queue_work(c->btree_write_submit_wq, &wbio->work);
        return;
 err:
        set_btree_node_noevict(b);
index df2bea38e83f0ff8996b4df57fec86374dfdf34c..65e239d329157c1cebe43ab4541af2beb6af45a9 100644 (file)
@@ -582,8 +582,10 @@ static void __bch2_fs_free(struct bch_fs *c)
 
        if (c->write_ref_wq)
                destroy_workqueue(c->write_ref_wq);
-       if (c->io_complete_wq)
-               destroy_workqueue(c->io_complete_wq);
+       if (c->btree_write_submit_wq)
+               destroy_workqueue(c->btree_write_submit_wq);
+       if (c->btree_read_complete_wq)
+               destroy_workqueue(c->btree_read_complete_wq);
        if (c->copygc_wq)
                destroy_workqueue(c->copygc_wq);
        if (c->btree_io_complete_wq)
@@ -878,8 +880,10 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
                                WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
            !(c->copygc_wq = alloc_workqueue("bcachefs_copygc",
                                WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
-           !(c->io_complete_wq = alloc_workqueue("bcachefs_io",
+           !(c->btree_read_complete_wq = alloc_workqueue("bcachefs_btree_read_complete",
                                WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 512)) ||
+           !(c->btree_write_submit_wq = alloc_workqueue("bcachefs_btree_write_sumit",
+                               WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
            !(c->write_ref_wq = alloc_workqueue("bcachefs_write_ref",
                                WQ_FREEZABLE, 0)) ||
 #ifndef BCH_WRITE_REF_DEBUG