*
  * Description:
  *    See @blk_run_queue. This variant must be called with the queue lock
- *    held and interrupts disabled.
+ *    held and interrupts disabled. If force_kblockd is true, then it is
+ *    safe to call this without holding the queue lock.
  *
  */
 void __blk_run_queue(struct request_queue *q, bool force_kblockd)
  */
 static void queue_unplugged(struct request_queue *q, unsigned int depth,
                            bool from_schedule)
+       __releases(q->queue_lock)
 {
        trace_block_unplug(q, depth, !from_schedule);
-       __blk_run_queue(q, from_schedule);
+
+       /*
+        * If we are punting this to kblockd, then we can safely drop
+        * the queue_lock before waking kblockd (which needs to take
+        * this lock).
+        */
+       if (from_schedule) {
+               spin_unlock(q->queue_lock);
+               __blk_run_queue(q, true);
+       } else {
+               __blk_run_queue(q, false);
+               spin_unlock(q->queue_lock);
+       }
+
 }
 
 static void flush_plug_callbacks(struct blk_plug *plug)
                BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
                BUG_ON(!rq->q);
                if (rq->q != q) {
-                       if (q) {
+                       /*
+                        * This drops the queue lock
+                        */
+                       if (q)
                                queue_unplugged(q, depth, from_schedule);
-                               spin_unlock(q->queue_lock);
-                       }
                        q = rq->q;
                        depth = 0;
                        spin_lock(q->queue_lock);
                depth++;
        }
 
-       if (q) {
+       /*
+        * This drops the queue lock
+        */
+       if (q)
                queue_unplugged(q, depth, from_schedule);
-               spin_unlock(q->queue_lock);
-       }
 
        local_irq_restore(flags);
 }