]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
block: use an atomic_t for mq_freeze_depth
authorChristoph Hellwig <hch@lst.de>
Thu, 7 May 2015 07:38:13 +0000 (09:38 +0200)
committerDan Duval <dan.duval@oracle.com>
Thu, 8 Dec 2016 18:35:24 +0000 (13:35 -0500)
Orabug: 22913653

lockdep gets unhappy about the not disabling irqs when using the queue_lock
around it.  Instead of trying to fix that up just switch to an atomic_t
and get rid of the lock.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
(cherry picked from commit 4ecd4fef3a074c8bb43c391a57742c422469ebbd)
Signed-off-by: Dan Duval <dan.duval@oracle.com>
block/blk-mq.c
include/linux/blkdev.h

index d720a7992616e71e2cadd6fc7b707feed65833f6..a6b17e2922abc6de6644dc3697eacbe6cf9d0ac5 100644 (file)
@@ -89,7 +89,8 @@ static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
                        return -EBUSY;
 
                ret = wait_event_interruptible(q->mq_freeze_wq,
-                               !q->mq_freeze_depth || blk_queue_dying(q));
+                               !atomic_read(&q->mq_freeze_depth) ||
+                               blk_queue_dying(q));
                if (blk_queue_dying(q))
                        return -ENODEV;
                if (ret)
@@ -112,13 +113,10 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
 
 void blk_mq_freeze_queue_start(struct request_queue *q)
 {
-       bool freeze;
+       int freeze_depth;
 
-       spin_lock_irq(q->queue_lock);
-       freeze = !q->mq_freeze_depth++;
-       spin_unlock_irq(q->queue_lock);
-
-       if (freeze) {
+       freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
+       if (freeze_depth == 1) {
                percpu_ref_kill(&q->mq_usage_counter);
                blk_mq_run_hw_queues(q, false);
        }
@@ -143,13 +141,11 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
 
 void blk_mq_unfreeze_queue(struct request_queue *q)
 {
-       bool wake;
+       int freeze_depth;
 
-       spin_lock_irq(q->queue_lock);
-       wake = !--q->mq_freeze_depth;
-       WARN_ON_ONCE(q->mq_freeze_depth < 0);
-       spin_unlock_irq(q->queue_lock);
-       if (wake) {
+       freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
+       WARN_ON_ONCE(freeze_depth < 0);
+       if (!freeze_depth) {
                percpu_ref_reinit(&q->mq_usage_counter);
                wake_up_all(&q->mq_freeze_wq);
        }
@@ -2071,7 +2067,7 @@ void blk_mq_free_queue(struct request_queue *q)
 static void blk_mq_queue_reinit(struct request_queue *q,
                                const struct cpumask *online_mask)
 {
-       WARN_ON_ONCE(!q->mq_freeze_depth);
+       WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
 
        blk_mq_sysfs_unregister(q);
 
index 4d02db8837c153a0e0da1d74aceda5304fcd2436..f85197afe2570285dcb9e27bb0cad0dd647216e4 100644 (file)
@@ -477,7 +477,7 @@ struct request_queue {
        struct mutex            sysfs_lock;
 
        int                     bypass_depth;
-       int                     mq_freeze_depth;
+       atomic_t                mq_freeze_depth;
 
 #if defined(CONFIG_BLK_DEV_BSG)
        bsg_job_fn              *bsg_job_fn;