]> www.infradead.org Git - nvme.git/commitdiff
block: track disk DEAD state automatically for modeling queue freeze lockdep
authorMing Lei <ming.lei@redhat.com>
Wed, 27 Nov 2024 13:51:28 +0000 (21:51 +0800)
committerJens Axboe <axboe@kernel.dk>
Mon, 23 Dec 2024 15:17:22 +0000 (08:17 -0700)
Now we only verify the outmost freeze & unfreeze in current context in case
that !q->mq_freeze_depth, so it is reliable to save disk DEAD state when
we want to lock the freeze queue since the state is one per-task variable
now.

Doing this way can kill lots of false positive when freeze queue is
called before adding disk[1].

[1] https://lore.kernel.org/linux-block/6741f6b2.050a0220.1cc393.0017.GAE@google.com/

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20241127135133.3952153-3-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq.c
block/blk.h
block/elevator.c
block/genhd.c
include/linux/blkdev.h

index 60f457f62913fcb147cb68820871c54e529b3e88..0c6a319fb9369785801805596c1988e1a5baab4d 100644 (file)
@@ -131,6 +131,9 @@ static bool blk_freeze_set_owner(struct request_queue *q,
        if (!q->mq_freeze_depth) {
                q->mq_freeze_owner = owner;
                q->mq_freeze_owner_depth = 1;
+               q->mq_freeze_disk_dead = !q->disk ||
+                       test_bit(GD_DEAD, &q->disk->state) ||
+                       !blk_queue_registered(q);
                return true;
        }
 
@@ -187,7 +190,7 @@ bool __blk_freeze_queue_start(struct request_queue *q,
 void blk_freeze_queue_start(struct request_queue *q)
 {
        if (__blk_freeze_queue_start(q, current))
-               blk_freeze_acquire_lock(q, false, false);
+               blk_freeze_acquire_lock(q, false);
 }
 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
 
@@ -235,7 +238,7 @@ bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
 void blk_mq_unfreeze_queue(struct request_queue *q)
 {
        if (__blk_mq_unfreeze_queue(q, false))
-               blk_unfreeze_release_lock(q, false, false);
+               blk_unfreeze_release_lock(q, false);
 }
 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
 
index 2c26abf505b8778987a60fb70a75b0af9dda716f..8708168d50e438c1f9d0da49ada42bc994d205e3 100644 (file)
@@ -720,22 +720,29 @@ void blk_integrity_verify(struct bio *bio);
 void blk_integrity_prepare(struct request *rq);
 void blk_integrity_complete(struct request *rq, unsigned int nr_bytes);
 
-static inline void blk_freeze_acquire_lock(struct request_queue *q, bool
-               disk_dead, bool queue_dying)
+#ifdef CONFIG_LOCKDEP
+static inline void blk_freeze_acquire_lock(struct request_queue *q, bool queue_dying)
 {
-       if (!disk_dead)
+       if (!q->mq_freeze_disk_dead)
                rwsem_acquire(&q->io_lockdep_map, 0, 1, _RET_IP_);
        if (!queue_dying)
                rwsem_acquire(&q->q_lockdep_map, 0, 1, _RET_IP_);
 }
 
-static inline void blk_unfreeze_release_lock(struct request_queue *q, bool
-               disk_dead, bool queue_dying)
+static inline void blk_unfreeze_release_lock(struct request_queue *q, bool queue_dying)
 {
        if (!queue_dying)
                rwsem_release(&q->q_lockdep_map, _RET_IP_);
-       if (!disk_dead)
+       if (!q->mq_freeze_disk_dead)
                rwsem_release(&q->io_lockdep_map, _RET_IP_);
 }
+#else
+static inline void blk_freeze_acquire_lock(struct request_queue *q, bool queue_dying)
+{
+}
+static inline void blk_unfreeze_release_lock(struct request_queue *q, bool queue_dying)
+{
+}
+#endif
 
 #endif /* BLK_INTERNAL_H */
index 7c3ba80e5ff4a390e62bb4318d364e95da92cd3c..ca0a74369f1c13e82cf5be907328d5250e7a1044 100644 (file)
@@ -602,14 +602,14 @@ void elevator_init_mq(struct request_queue *q)
         * Disk isn't added yet, so verifying queue lock only manually.
         */
        blk_freeze_queue_start_non_owner(q);
-       blk_freeze_acquire_lock(q, true, false);
+       blk_freeze_acquire_lock(q, false);
        blk_mq_freeze_queue_wait(q);
 
        blk_mq_cancel_work_sync(q);
 
        err = blk_mq_init_sched(q, e);
 
-       blk_unfreeze_release_lock(q, true, false);
+       blk_unfreeze_release_lock(q, false);
        blk_mq_unfreeze_queue_non_owner(q);
 
        if (err) {
index 79230c109fca036816667f4cf0c9839fa120c562..59ac299909b3044b203565b5d7fa2998a6dc3d99 100644 (file)
@@ -692,7 +692,7 @@ void del_gendisk(struct gendisk *disk)
        start_drain = __blk_mark_disk_dead(disk);
        queue_dying = blk_queue_dying(q);
        if (start_drain)
-               blk_freeze_acquire_lock(q, true, queue_dying);
+               blk_freeze_acquire_lock(q, queue_dying);
        xa_for_each_start(&disk->part_tbl, idx, part, 1)
                drop_partition(part);
        mutex_unlock(&disk->open_mutex);
@@ -748,7 +748,7 @@ void del_gendisk(struct gendisk *disk)
                blk_mq_exit_queue(q);
 
        if (start_drain)
-               blk_unfreeze_release_lock(q, true, queue_dying);
+               blk_unfreeze_release_lock(q, queue_dying);
 }
 EXPORT_SYMBOL(del_gendisk);
 
index 378d3a1a22fca66375bb757e1a42d38e4e9af701..522cf8eef66c1f9aefb45671a5848ed0b32cc67d 100644 (file)
@@ -581,6 +581,8 @@ struct request_queue {
 #ifdef CONFIG_LOCKDEP
        struct task_struct      *mq_freeze_owner;
        int                     mq_freeze_owner_depth;
+       /* Records disk state in current context, used in unfreeze queue */
+       bool                    mq_freeze_disk_dead;
 #endif
        wait_queue_head_t       mq_freeze_wq;
        /*