blk_insert_t flags);
 static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                struct list_head *list);
-
-static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q,
-               blk_qc_t qc)
-{
-       return xa_load(&q->hctx_table, qc);
-}
-
-static inline blk_qc_t blk_rq_to_qc(struct request *rq)
-{
-       return rq->mq_hctx->queue_num;
-}
+static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+                        struct io_comp_batch *iob, unsigned int flags);
 
 /*
  * Check if any of the ctx, dispatch list or elevator
                q->integrity.profile->prepare_fn(rq);
 #endif
        if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
-               WRITE_ONCE(rq->bio->bi_cookie, blk_rq_to_qc(rq));
+               WRITE_ONCE(rq->bio->bi_cookie, rq->mq_hctx->queue_num);
 }
 EXPORT_SYMBOL(blk_mq_start_request);
 
 static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
 {
        do {
-               blk_mq_poll(rq->q, blk_rq_to_qc(rq), NULL, 0);
+               blk_hctx_poll(rq->q, rq->mq_hctx, NULL, 0);
                cond_resched();
        } while (!completion_done(wait));
 }
 }
 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
 
-int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
-               unsigned int flags)
+static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+                        struct io_comp_batch *iob, unsigned int flags)
 {
-       struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
        long state = get_current_state();
        int ret;
 
        return 0;
 }
 
+int blk_mq_poll(struct request_queue *q, blk_qc_t cookie,
+               struct io_comp_batch *iob, unsigned int flags)
+{
+       struct blk_mq_hw_ctx *hctx = xa_load(&q->hctx_table, cookie);
+
+       return blk_hctx_poll(q, hctx, iob, flags);
+}
+
+int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
+               unsigned int poll_flags)
+{
+       struct request_queue *q = rq->q;
+       int ret;
+
+       if (!blk_rq_is_poll(rq))
+               return 0;
+       if (!percpu_ref_tryget(&q->q_usage_counter))
+               return 0;
+
+       ret = blk_hctx_poll(q, rq->mq_hctx, iob, poll_flags);
+       blk_queue_exit(q);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(blk_rq_poll);
+
 unsigned int blk_mq_rq_cpu(struct request *rq)
 {
        return rq->mq_ctx->cpu;