]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
blk-mq: Record active_queues_shared_sbitmap per tag_set for when using shared sbitmap
authorJohn Garry <john.garry@huawei.com>
Wed, 19 Aug 2020 15:20:27 +0000 (23:20 +0800)
committerJens Axboe <axboe@kernel.dk>
Thu, 3 Sep 2020 21:20:47 +0000 (15:20 -0600)
For when using a shared sbitmap, no longer should the number of active
request queues per hctx be relied on for when judging how to share the tag
bitmap.

Instead maintain the number of active request queues per tag_set, and make
the judgement based on that.

Originally-from: Kashyap Desai <kashyap.desai@broadcom.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Tested-by: Don Brace<don.brace@microsemi.com> #SCSI resv cmds patches used
Tested-by: Douglas Gilbert <dgilbert@interlog.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq-tag.c
block/blk-mq.c
block/blk-mq.h
include/linux/blk-mq.h
include/linux/blkdev.h

index c6d7ebc62bdb669a4df6a204d51d6c5e35b74b69..c31c4a0478a556aa2be4fc2b28026c96bc7935ec 100644 (file)
  */
 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
 {
-       if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
-           !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
-               atomic_inc(&hctx->tags->active_queues);
+       if (blk_mq_is_sbitmap_shared(hctx->flags)) {
+               struct request_queue *q = hctx->queue;
+               struct blk_mq_tag_set *set = q->tag_set;
+
+               if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) &&
+                   !test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
+                       atomic_inc(&set->active_queues_shared_sbitmap);
+       } else {
+               if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
+                   !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
+                       atomic_inc(&hctx->tags->active_queues);
+       }
 
        return true;
 }
@@ -47,11 +56,19 @@ void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
 {
        struct blk_mq_tags *tags = hctx->tags;
-
-       if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
-               return;
-
-       atomic_dec(&tags->active_queues);
+       struct request_queue *q = hctx->queue;
+       struct blk_mq_tag_set *set = q->tag_set;
+
+       if (blk_mq_is_sbitmap_shared(hctx->flags)) {
+               if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
+                                       &q->queue_flags))
+                       return;
+               atomic_dec(&set->active_queues_shared_sbitmap);
+       } else {
+               if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
+                       return;
+               atomic_dec(&tags->active_queues);
+       }
 
        blk_mq_tag_wakeup_all(tags, false);
 }
index ffc5ad0c91b7a7d114ba0956daca631c97ffef94..eff9d987f85b70675b5ec9e05223914c07fdbe39 100644 (file)
@@ -3442,6 +3442,8 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
                goto out_free_mq_map;
 
        if (blk_mq_is_sbitmap_shared(set->flags)) {
+               atomic_set(&set->active_queues_shared_sbitmap, 0);
+
                if (blk_mq_init_shared_sbitmap(set, set->flags)) {
                        ret = -ENOMEM;
                        goto out_free_mq_rq_maps;
index 25ec73078e956afbd95d9d79756cc39c0eab658e..a52703c98b7736ef69b9678bcee90691861ce2de 100644 (file)
@@ -292,8 +292,6 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
 
        if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
                return true;
-       if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
-               return true;
 
        /*
         * Don't try dividing an ant
@@ -301,7 +299,19 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
        if (bt->sb.depth == 1)
                return true;
 
-       users = atomic_read(&hctx->tags->active_queues);
+       if (blk_mq_is_sbitmap_shared(hctx->flags)) {
+               struct request_queue *q = hctx->queue;
+               struct blk_mq_tag_set *set = q->tag_set;
+
+               if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &q->queue_flags))
+                       return true;
+               users = atomic_read(&set->active_queues_shared_sbitmap);
+       } else {
+               if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
+                       return true;
+               users = atomic_read(&hctx->tags->active_queues);
+       }
+
        if (!users)
                return true;
 
index df7b903ce7aea44d10004bc497a68fb5c7da2976..8279c807e1f366277defbc53a969f0ad19807647 100644 (file)
@@ -252,6 +252,7 @@ struct blk_mq_tag_set {
        unsigned int            timeout;
        unsigned int            flags;
        void                    *driver_data;
+       atomic_t                active_queues_shared_sbitmap;
 
        struct sbitmap_queue    __bitmap_tags;
        struct sbitmap_queue    __breserved_tags;
index 6277aee2aeaa970abca8ab8cb46ef50b16c865cf..7d82959e7b8619ab1d38b428cc1e394bbb9db967 100644 (file)
@@ -618,6 +618,7 @@ struct request_queue {
 #define QUEUE_FLAG_PCI_P2PDMA  25      /* device supports PCI p2p requests */
 #define QUEUE_FLAG_ZONE_RESETALL 26    /* supports Zone Reset All */
 #define QUEUE_FLAG_RQ_ALLOC_TIME 27    /* record rq->alloc_time_ns */
+#define QUEUE_FLAG_HCTX_ACTIVE 28      /* at least one blk-mq hctx is active */
 
 #define QUEUE_FLAG_MQ_DEFAULT  ((1 << QUEUE_FLAG_IO_STAT) |            \
                                 (1 << QUEUE_FLAG_SAME_COMP))