]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
block: Fix mismerge in queue freeze logic
authorMartin K. Petersen <martin.petersen@oracle.com>
Sun, 11 Jun 2017 01:55:44 +0000 (18:55 -0700)
committerChuck Anderson <chuck.anderson@oracle.com>
Sun, 18 Jun 2017 21:03:43 +0000 (14:03 -0700)
Commit 7466bf8e2078 ("blk-mq: fix freeze queue race") introduced a mutex
to protect the queue freeze/unfreeze logic.

The locking requirement was obsoleted by commit c03fa711de6a ("block:
use an atomic_t for mq_freeze_depth") but the mutex was left in place in
our backport.

During the c311ca8a3d93 merge of the pmem tree conflicts arose in
blk-mq. The mutex lock calls were removed but the mutex unlocks left in
place. This lead to NVMe controller reset failures in ED testing. Remove
the last remnants of commit 7466bf8e2078.

Orabug: 26254388

Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: Ashok Vairavan <ashok.vairavan@oracle.com>
Signed-off-by: Brian Maly <brian.maly@oracle.com>
block/blk-core.c
block/blk-mq.c
include/linux/blkdev.h

index 87f278d9e8dd453ebfefb889072ab41cb3bcd16a..5c451b5ee89e7ac29589c28dac2e2c8b5700c905 100644 (file)
@@ -694,7 +694,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
 
        init_waitqueue_head(&q->mq_freeze_wq);
-       mutex_init(&q->mq_freeze_lock);
 
        /*
         * Init percpu_ref in atomic mode so that it's faster to shutdown.
index 7b9fab7d97f65c4137394367d005a811a5191115..1a9dba2f5a464870c3ae28594466de46a158833d 100644 (file)
@@ -81,13 +81,11 @@ void blk_mq_freeze_queue_start(struct request_queue *q)
 {
        int freeze_depth;
 
-       mutex_lock(&q->mq_freeze_lock);
        freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
        if (freeze_depth == 1) {
                percpu_ref_kill(&q->q_usage_counter);
                blk_mq_run_hw_queues(q, false);
        }
-       mutex_unlock(&q->mq_freeze_lock);
 }
 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
 
@@ -127,14 +125,12 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
 {
        int freeze_depth;
 
-       mutex_lock(&q->mq_freeze_lock);
        freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
        WARN_ON_ONCE(freeze_depth < 0);
        if (!freeze_depth) {
                percpu_ref_reinit(&q->q_usage_counter);
                wake_up_all(&q->mq_freeze_wq);
        }
-       mutex_unlock(&q->mq_freeze_lock);
 }
 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
 
index b119db52d1620e5134f164d9b57ccb6dfec683f2..143578081b32fc029abb94e5d6de527c5a751f50 100644 (file)
@@ -509,14 +509,6 @@ struct request_queue {
 
        struct blk_mq_tag_set   *tag_set;
        struct list_head        tag_set_list;
-#ifndef __GENKSYMS__
-       /*
-        * Protect concurrent access to mq_usage_counter by
-        * percpu_ref_switch_to_percpu(), percpu_ref_kill(), and
-        * percpu_ref_reinit().
-        */
-       struct mutex            mq_freeze_lock;
-#endif
 };
 
 #define QUEUE_FLAG_QUEUED      1       /* uses generic tag queueing */