}
 
        spin_lock_irqsave(&ns->head->requeue_lock, flags);
-       for (bio = req->bio; bio; bio = bio->bi_next)
+       for (bio = req->bio; bio; bio = bio->bi_next) {
                bio_set_dev(bio, ns->head->disk->part0);
+               if (bio->bi_opf & REQ_POLLED) {
+                       bio->bi_opf &= ~REQ_POLLED;
+                       bio->bi_cookie = BLK_QC_T_NONE;
+               }
+       }
        blk_steal_bios(&ns->head->requeue_list, req);
        spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
 
 
        blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue);
        blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue);
+       /*
+        * This assumes all controllers that refer to a namespace either
+        * support poll queues or not.  That is not a strict guarantee,
+        * but if the assumption is wrong the effect is only suboptimal
+        * performance but not correctness problem.
+        */
+       if (ctrl->tagset->nr_maps > HCTX_TYPE_POLL &&
+           ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues)
+               blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue);
 
        /* set to a default value of 512 until the disk is validated */
        blk_queue_logical_block_size(head->disk->queue, 512);