struct nvme_ns *ns = q->queuedata;
        struct block_device *bdev = ns ? ns->disk->part0 : NULL;
        bool supports_metadata = bdev && blk_get_integrity(bdev->bd_disk);
+       struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
        bool has_metadata = meta_buffer && meta_len;
        struct bio *bio = NULL;
        int ret;
 
-       if (has_metadata && !supports_metadata)
-               return -EINVAL;
+       if (!nvme_ctrl_sgl_supported(ctrl))
+               dev_warn_once(ctrl->device, "using unchecked data buffer\n");
+       if (has_metadata) {
+               if (!supports_metadata)
+                       return -EINVAL;
+               if (!nvme_ctrl_meta_sgl_supported(ctrl))
+                       dev_warn_once(ctrl->device,
+                                     "using unchecked metadata buffer\n");
+       }
 
        if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
                struct iov_iter iter;
 
 {
        if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl))
                return false;
-       return req->nr_integrity_segments > 1;
+       return req->nr_integrity_segments > 1 ||
+               nvme_req(req)->flags & NVME_REQ_USERCMD;
 }
 
 static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req,
        if (nvme_pci_metadata_use_sgls(dev, req))
                return true;
        if (!sgl_threshold || avg_seg_size < sgl_threshold)
-               return false;
+               return nvme_req(req)->flags & NVME_REQ_USERCMD;
        return true;
 }