void blk_delay_queue(struct request_queue *q, unsigned long msecs)
 {
        lockdep_assert_held(q->queue_lock);
+       WARN_ON_ONCE(q->mq_ops);
 
        if (likely(!blk_queue_dead(q)))
                queue_delayed_work(kblockd_workqueue, &q->delay_work,
 void blk_start_queue_async(struct request_queue *q)
 {
        lockdep_assert_held(q->queue_lock);
+       WARN_ON_ONCE(q->mq_ops);
 
        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
        blk_run_queue_async(q);
 {
        lockdep_assert_held(q->queue_lock);
        WARN_ON(!irqs_disabled());
+       WARN_ON_ONCE(q->mq_ops);
 
        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
        __blk_run_queue(q);
 void blk_stop_queue(struct request_queue *q)
 {
        lockdep_assert_held(q->queue_lock);
+       WARN_ON_ONCE(q->mq_ops);
 
        cancel_delayed_work(&q->delay_work);
        queue_flag_set(QUEUE_FLAG_STOPPED, q);
 inline void __blk_run_queue_uncond(struct request_queue *q)
 {
        lockdep_assert_held(q->queue_lock);
+       WARN_ON_ONCE(q->mq_ops);
 
        if (unlikely(blk_queue_dead(q)))
                return;
 void __blk_run_queue(struct request_queue *q)
 {
        lockdep_assert_held(q->queue_lock);
+       WARN_ON_ONCE(q->mq_ops);
 
        if (unlikely(blk_queue_stopped(q)))
                return;
 void blk_run_queue_async(struct request_queue *q)
 {
        lockdep_assert_held(q->queue_lock);
+       WARN_ON_ONCE(q->mq_ops);
 
        if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
                mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
 {
        unsigned long flags;
 
+       WARN_ON_ONCE(q->mq_ops);
+
        spin_lock_irqsave(q->queue_lock, flags);
        __blk_run_queue(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
        int i;
 
        lockdep_assert_held(q->queue_lock);
+       WARN_ON_ONCE(q->mq_ops);
 
        while (true) {
                bool drain = false;
  */
 void blk_queue_bypass_start(struct request_queue *q)
 {
+       WARN_ON_ONCE(q->mq_ops);
+
        spin_lock_irq(q->queue_lock);
        q->bypass_depth++;
        queue_flag_set(QUEUE_FLAG_BYPASS, q);
  * @q: queue of interest
  *
  * Leave bypass mode and restore the normal queueing behavior.
+ *
+ * Note: although blk_queue_bypass_start() is only called for blk-sq queues,
+ * this function is called for both blk-sq and blk-mq queues.
  */
 void blk_queue_bypass_end(struct request_queue *q)
 {
 
 int blk_init_allocated_queue(struct request_queue *q)
 {
+       WARN_ON_ONCE(q->mq_ops);
+
        q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
        if (!q->fq)
                return -ENOMEM;
        struct request_list *rl;
        int on_thresh, off_thresh;
 
+       WARN_ON_ONCE(q->mq_ops);
+
        spin_lock_irq(q->queue_lock);
        q->nr_requests = nr;
        blk_queue_congestion_threshold(q);
        struct request *rq;
 
        lockdep_assert_held(q->queue_lock);
+       WARN_ON_ONCE(q->mq_ops);
 
        rl = blk_get_rl(q, bio);        /* transferred to @rq on success */
 retry:
 {
        struct request *rq;
 
+       WARN_ON_ONCE(q->mq_ops);
+
        /* create ioc upfront */
        create_io_context(gfp_mask, q->node);
 
 void blk_requeue_request(struct request_queue *q, struct request *rq)
 {
        lockdep_assert_held(q->queue_lock);
+       WARN_ON_ONCE(q->mq_ops);
 
        blk_delete_timer(rq);
        blk_clear_rq_complete(rq);
        int ret;
 
        lockdep_assert_held(q->queue_lock);
+       WARN_ON_ONCE(q->mq_ops);
 
        while ((rq = __elv_next_request(q)) != NULL) {
 
 void blk_start_request(struct request *req)
 {
        lockdep_assert_held(req->q->queue_lock);
+       WARN_ON_ONCE(req->q->mq_ops);
 
        blk_dequeue_request(req);
 
        struct request *rq;
 
        lockdep_assert_held(q->queue_lock);
+       WARN_ON_ONCE(q->mq_ops);
 
        rq = blk_peek_request(q);
        if (rq)
        struct request_queue *q = req->q;
 
        lockdep_assert_held(req->q->queue_lock);
+       WARN_ON_ONCE(q->mq_ops);
 
        if (req->rq_flags & RQF_STATS)
                blk_stat_add(req);
        struct request_queue *q = rq->q;
        unsigned long flags;
 
+       WARN_ON_ONCE(q->mq_ops);
+
        if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
                return true;
 
                                   unsigned int nr_bytes, unsigned int bidi_bytes)
 {
        lockdep_assert_held(rq->q->queue_lock);
+       WARN_ON_ONCE(rq->q->mq_ops);
 
        if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
                return true;
 bool blk_end_request(struct request *rq, blk_status_t error,
                unsigned int nr_bytes)
 {
+       WARN_ON_ONCE(rq->q->mq_ops);
        return blk_end_bidi_request(rq, error, nr_bytes, 0);
 }
 EXPORT_SYMBOL(blk_end_request);
                unsigned int nr_bytes)
 {
        lockdep_assert_held(rq->q->queue_lock);
+       WARN_ON_ONCE(rq->q->mq_ops);
 
        return __blk_end_bidi_request(rq, error, nr_bytes, 0);
 }
        unsigned int bidi_bytes = 0;
 
        lockdep_assert_held(rq->q->queue_lock);
+       WARN_ON_ONCE(rq->q->mq_ops);
 
        if (unlikely(blk_bidi_rq(rq)))
                bidi_bytes = blk_rq_bytes(rq->next_rq);