]> www.infradead.org Git - users/hch/block.git/commitdiff
move
authorChristoph Hellwig <hch@lst.de>
Fri, 6 May 2022 06:50:00 +0000 (08:50 +0200)
committerChristoph Hellwig <hch@lst.de>
Fri, 6 May 2022 06:58:24 +0000 (08:58 +0200)
drivers/nvme/host/ioctl.c

index 2a56c9fb5ddffbf9449012f45f543a741692b503..ee69a9d89039a890e1f37968b7f1156ede7f5880 100644 (file)
@@ -19,83 +19,6 @@ static void __user *nvme_to_user_ptr(uintptr_t ptrval)
        return (void __user *)ptrval;
 }
 
-/*
- * This overlays struct io_uring_cmd pdu.
- * Expect build errors if this grows larger than that.
- */
-struct nvme_uring_cmd_pdu {
-       union {
-               struct bio *bio;
-               struct request *req;
-       };
-       void *meta; /* kernel-resident buffer */
-       void __user *meta_buffer;
-       u32 meta_len;
-};
-
-static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
-               struct io_uring_cmd *ioucmd)
-{
-       return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
-}
-
-static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
-{
-       struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
-       struct request *req = pdu->req;
-       struct bio *bio = req->bio;
-       int status;
-       u64 result;
-
-       if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
-               status = -EINTR;
-       else
-               status = nvme_req(req)->status;
-
-       result = le64_to_cpu(nvme_req(req)->result.u64);
-
-       if (pdu->meta) {
-               bool write = req_op(req) == REQ_OP_DRV_OUT;
-
-               if (!status && !write &&
-                   copy_to_user(pdu->meta_buffer, pdu->meta, pdu->meta_len))
-                       status = -EFAULT;
-               kfree(pdu->meta);
-       }
-       if (bio)
-               blk_rq_unmap_user(bio);
-       blk_mq_free_request(req);
-
-       io_uring_cmd_done(ioucmd, status, result);
-}
-
-static void nvme_end_async_pt(struct request *req, blk_status_t err)
-{
-       struct io_uring_cmd *ioucmd = req->end_io_data;
-       struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
-       /* extract bio before reusing the same field for request */
-       struct bio *bio = pdu->bio;
-
-       pdu->req = req;
-       req->bio = bio;
-       /* this takes care of moving rest of completion-work to task context */
-       io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
-}
-
-static void nvme_setup_uring_cmd_data(struct request *rq,
-               struct io_uring_cmd *ioucmd, void __user *meta_buffer,
-               u32 meta_len, void *meta)
-{
-       struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
-
-       /* to free bio on completion, as req->bio will be null at that time */
-       pdu->bio = rq->bio;
-       pdu->meta = meta;
-       pdu->meta_buffer = meta_buffer;
-       pdu->meta_len = meta_len;
-       rq->end_io_data = ioucmd;
-}
-
 static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
                unsigned len, u32 seed, bool write)
 {
@@ -416,6 +339,83 @@ struct nvme_uring_data {
        __u32   timeout_ms;
 };
 
+/*
+ * This overlays struct io_uring_cmd pdu.
+ * Expect build errors if this grows larger than that.
+ */
+struct nvme_uring_cmd_pdu {
+       union {
+               struct bio *bio;
+               struct request *req;
+       };
+       void *meta; /* kernel-resident buffer */
+       void __user *meta_buffer;
+       u32 meta_len;
+};
+
+static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
+               struct io_uring_cmd *ioucmd)
+{
+       return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
+}
+
+static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
+{
+       struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+       struct request *req = pdu->req;
+       struct bio *bio = req->bio;
+       int status;
+       u64 result;
+
+       if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
+               status = -EINTR;
+       else
+               status = nvme_req(req)->status;
+
+       result = le64_to_cpu(nvme_req(req)->result.u64);
+
+       if (pdu->meta) {
+               bool write = req_op(req) == REQ_OP_DRV_OUT;
+
+               if (!status && !write &&
+                   copy_to_user(pdu->meta_buffer, pdu->meta, pdu->meta_len))
+                       status = -EFAULT;
+               kfree(pdu->meta);
+       }
+       if (bio)
+               blk_rq_unmap_user(bio);
+       blk_mq_free_request(req);
+
+       io_uring_cmd_done(ioucmd, status, result);
+}
+
+static void nvme_end_async_pt(struct request *req, blk_status_t err)
+{
+       struct io_uring_cmd *ioucmd = req->end_io_data;
+       struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+       /* extract bio before reusing the same field for request */
+       struct bio *bio = pdu->bio;
+
+       pdu->req = req;
+       req->bio = bio;
+       /* this takes care of moving rest of completion-work to task context */
+       io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
+}
+
+static void nvme_setup_uring_cmd_data(struct request *rq,
+               struct io_uring_cmd *ioucmd, void __user *meta_buffer,
+               u32 meta_len, void *meta)
+{
+       struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+
+       /* to free bio on completion, as req->bio will be null at that time */
+       pdu->bio = rq->bio;
+       pdu->meta = meta;
+       pdu->meta_buffer = meta_buffer;
+       pdu->meta_len = meta_len;
+       rq->end_io_data = ioucmd;
+}
+
 static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
                struct io_uring_cmd *ioucmd, unsigned int issue_flags)
 {