sizeof(struct io_async_rw),
offsetof(struct io_async_rw, clear));
ret |= io_alloc_cache_init(&ctx->cmd_cache, IO_ALLOC_CACHE_MAX,
- sizeof(struct io_uring_cmd_data), 0);
+ sizeof(struct io_async_cmd), 0);
spin_lock_init(&ctx->msg_lock);
ret |= io_alloc_cache_init(&ctx->msg_cache, IO_ALLOC_CACHE_MAX,
sizeof(struct io_kiocb), 0);
static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
- struct io_uring_cmd_data *cache = req->async_data;
+ struct io_async_cmd *ac = req->async_data;
+ struct io_uring_cmd_data *cache = &ac->data;
if (cache->op_data) {
kfree(cache->op_data);
const struct io_uring_sqe *sqe)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
- struct io_uring_cmd_data *cache;
+ struct io_async_cmd *ac;
- cache = io_uring_alloc_async_data(&req->ctx->cmd_cache, req);
- if (!cache)
+ /* see io_uring_cmd_get_async_data() */
+ BUILD_BUG_ON(offsetof(struct io_async_cmd, data) != 0);
+
+ ac = io_uring_alloc_async_data(&req->ctx->cmd_cache, req);
+ if (!ac)
return -ENOMEM;
- cache->op_data = NULL;
+ ac->data.op_data = NULL;
/*
* Unconditionally cache the SQE for now - this is only needed for
* that it doesn't read in per-op data, play it safe and ensure that
* any SQE data is stable beyond prep. This can later get relaxed.
*/
- memcpy(cache->sqes, sqe, uring_sqe_size(req->ctx));
- ioucmd->sqe = cache->sqes;
+ memcpy(ac->data.sqes, sqe, uring_sqe_size(req->ctx));
+ ioucmd->sqe = ac->data.sqes;
return 0;
}
// SPDX-License-Identifier: GPL-2.0
+#include <linux/io_uring/cmd.h>
+
+struct io_async_cmd {
+ struct io_uring_cmd_data data;
+};
+
int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags);
int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);