]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
blk-ioc: don't hold queue_lock for ioc_lookup_icq()
authorYu Kuai <yukuai3@huawei.com>
Tue, 29 Jul 2025 02:32:29 +0000 (10:32 +0800)
committerJens Axboe <axboe@kernel.dk>
Tue, 29 Jul 2025 12:26:34 +0000 (06:26 -0600)
Currently issue io can grab queue_lock three times from bfq_bio_merge(),
bfq_limit_depth() and bfq_prepare_request(), the queue_lock is not
necessary if icq is already created because both queue and ioc can't be
freed before io issuing is done, hence remove the unnecessary queue_lock
and use rcu to protect radix tree lookup.

Noted this is also a prep patch to support request batch dispatching[1].

[1] https://lore.kernel.org/all/20250722072431.610354-1-yukuai1@huaweicloud.com/

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Reviewed-by: Jan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/20250729023229.2944898-1-yukuai1@huaweicloud.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/bfq-iosched.c
block/blk-ioc.c

index 0cb1e9873aabb2d3d61a25970e003b965893335e..f71ec0887733ca16d316763ddd92ab66b09126fc 100644 (file)
@@ -454,17 +454,10 @@ static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
  */
 static struct bfq_io_cq *bfq_bic_lookup(struct request_queue *q)
 {
-       struct bfq_io_cq *icq;
-       unsigned long flags;
-
        if (!current->io_context)
                return NULL;
 
-       spin_lock_irqsave(&q->queue_lock, flags);
-       icq = icq_to_bic(ioc_lookup_icq(q));
-       spin_unlock_irqrestore(&q->queue_lock, flags);
-
-       return icq;
+       return icq_to_bic(ioc_lookup_icq(q));
 }
 
 /*
@@ -2457,15 +2450,8 @@ static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
                unsigned int nr_segs)
 {
        struct bfq_data *bfqd = q->elevator->elevator_data;
-       struct request *free = NULL;
-       /*
-        * bfq_bic_lookup grabs the queue_lock: invoke it now and
-        * store its return value for later use, to avoid nesting
-        * queue_lock inside the bfqd->lock. We assume that the bic
-        * returned by bfq_bic_lookup does not go away before
-        * bfqd->lock is taken.
-        */
        struct bfq_io_cq *bic = bfq_bic_lookup(q);
+       struct request *free = NULL;
        bool ret;
 
        spin_lock_irq(&bfqd->lock);
index ce82770c72abad3f64db13cfb9ac8d222ee88389..9fda3906e5f5d6c5a51378cfc42ec74c776975f4 100644 (file)
@@ -308,24 +308,23 @@ int __copy_io(unsigned long clone_flags, struct task_struct *tsk)
 
 #ifdef CONFIG_BLK_ICQ
 /**
- * ioc_lookup_icq - lookup io_cq from ioc
+ * ioc_lookup_icq - lookup io_cq from ioc in io issue path
  * @q: the associated request_queue
  *
  * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
- * with @q->queue_lock held.
+ * from io issue path, either return NULL if current issue io to @q for the
+ * first time, or return a valid icq.
  */
 struct io_cq *ioc_lookup_icq(struct request_queue *q)
 {
        struct io_context *ioc = current->io_context;
        struct io_cq *icq;
 
-       lockdep_assert_held(&q->queue_lock);
-
        /*
         * icq's are indexed from @ioc using radix tree and hint pointer,
-        * both of which are protected with RCU.  All removals are done
-        * holding both q and ioc locks, and we're holding q lock - if we
-        * find a icq which points to us, it's guaranteed to be valid.
+        * both of which are protected with RCU, io issue path ensures that
+        * both request_queue and current task are valid, the found icq
+        * is guaranteed to be valid until the io is done.
         */
        rcu_read_lock();
        icq = rcu_dereference(ioc->icq_hint);
@@ -419,10 +418,7 @@ struct io_cq *ioc_find_get_icq(struct request_queue *q)
                task_unlock(current);
        } else {
                get_io_context(ioc);
-
-               spin_lock_irq(&q->queue_lock);
                icq = ioc_lookup_icq(q);
-               spin_unlock_irq(&q->queue_lock);
        }
 
        if (!icq) {