pdu->result = le64_to_cpu(nvme_req(req)->result.u64);
 
        /*
-        * For iopoll, complete it directly.
+        * For iopoll, complete it directly. Note that using the uring_cmd
+        * helper for this is safe only because we check blk_rq_is_poll().
+        * As that returns false if we're NOT on a polled queue, then it's
+        * safe to use the polled completion helper.
+        *
         * Otherwise, move the completion to task work.
         */
-       if (blk_rq_is_poll(req))
-               nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED);
-       else
+       if (blk_rq_is_poll(req)) {
+               if (pdu->bio)
+                       blk_rq_unmap_user(pdu->bio);
+               io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status);
+       } else {
                io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
+       }
 
        return RQ_END_IO_FREE;
 }
 
 }
 #endif
 
+/*
+ * Polled completions must ensure they are coming from a poll queue, and
+ * hence are completed inside the usual poll handling loops.
+ */
+static inline void io_uring_cmd_iopoll_done(struct io_uring_cmd *ioucmd,
+                                           ssize_t ret, ssize_t res2)
+{
+       lockdep_assert(in_task());
+       io_uring_cmd_done(ioucmd, ret, res2, 0);
+}
+
 /* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */
 static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
                        void (*task_work_cb)(struct io_uring_cmd *, unsigned))