return 0;
}
-static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int io_prep_fsync(struct io_kiocb *req)
{
+ const struct io_uring_sqe *sqe = req->sqe;
struct io_ring_ctx *ctx = req->ctx;
if (!req->file)
return 0;
}
-static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- struct io_kiocb **nxt, bool force_nonblock)
+static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
{
+ const struct io_uring_sqe *sqe = req->sqe;
loff_t sqe_off = READ_ONCE(sqe->off);
loff_t sqe_len = READ_ONCE(sqe->len);
loff_t end = sqe_off + sqe_len;
if (unlikely(fsync_flags & ~IORING_FSYNC_DATASYNC))
return -EINVAL;
- ret = io_prep_fsync(req, sqe);
+ ret = io_prep_fsync(req);
if (ret)
return ret;
return 0;
}
-static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int io_prep_sfr(struct io_kiocb *req)
{
+ const struct io_uring_sqe *sqe = req->sqe;
struct io_ring_ctx *ctx = req->ctx;
int ret = 0;
return ret;
}
-static int io_sync_file_range(struct io_kiocb *req,
- const struct io_uring_sqe *sqe,
- struct io_kiocb **nxt,
+static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt,
bool force_nonblock)
{
+ const struct io_uring_sqe *sqe = req->sqe;
loff_t sqe_off;
loff_t sqe_len;
unsigned flags;
int ret;
- ret = io_prep_sfr(req, sqe);
+ ret = io_prep_sfr(req);
if (ret)
return ret;
#endif
}
-static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- struct io_kiocb **nxt, bool force_nonblock)
+static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
{
#if defined(CONFIG_NET)
+ const struct io_uring_sqe *sqe = req->sqe;
struct io_async_msghdr *kmsg = NULL;
struct socket *sock;
int ret;
#endif
}
-static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- struct io_kiocb **nxt, bool force_nonblock)
+static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
{
#if defined(CONFIG_NET)
+ const struct io_uring_sqe *sqe = req->sqe;
struct io_async_msghdr *kmsg = NULL;
struct socket *sock;
int ret;
#endif
}
-static int io_accept(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- struct io_kiocb **nxt, bool force_nonblock)
+static int io_accept(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
{
#if defined(CONFIG_NET)
+ const struct io_uring_sqe *sqe = req->sqe;
struct sockaddr __user *addr;
int __user *addr_len;
unsigned file_flags;
#endif
}
-static int io_connect(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- struct io_kiocb **nxt, bool force_nonblock)
+static int io_connect(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
{
#if defined(CONFIG_NET)
+ const struct io_uring_sqe *sqe = req->sqe;
struct io_async_ctx __io, *io;
unsigned file_flags;
int addr_len, ret;
* Find a running poll command that matches one specified in sqe->addr,
* and remove it if found.
*/
-static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int io_poll_remove(struct io_kiocb *req)
{
+ const struct io_uring_sqe *sqe = req->sqe;
struct io_ring_ctx *ctx = req->ctx;
int ret;
hlist_add_head(&req->hash_node, list);
}
-static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- struct io_kiocb **nxt)
+static int io_poll_add(struct io_kiocb *req, struct io_kiocb **nxt)
{
+ const struct io_uring_sqe *sqe = req->sqe;
struct io_poll_iocb *poll = &req->poll;
struct io_ring_ctx *ctx = req->ctx;
struct io_poll_table ipt;
/*
* Remove or update an existing timeout command
*/
-static int io_timeout_remove(struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
+static int io_timeout_remove(struct io_kiocb *req)
{
+ const struct io_uring_sqe *sqe = req->sqe;
struct io_ring_ctx *ctx = req->ctx;
unsigned flags;
int ret;
return 0;
}
-static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int io_timeout(struct io_kiocb *req)
{
+ const struct io_uring_sqe *sqe = req->sqe;
unsigned count;
struct io_ring_ctx *ctx = req->ctx;
struct io_timeout_data *data;
io_put_req_find_next(req, nxt);
}
-static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- struct io_kiocb **nxt)
+static int io_async_cancel(struct io_kiocb *req, struct io_kiocb **nxt)
{
+ const struct io_uring_sqe *sqe = req->sqe;
struct io_ring_ctx *ctx = req->ctx;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
ret = io_write(req, nxt, force_nonblock);
break;
case IORING_OP_FSYNC:
- ret = io_fsync(req, req->sqe, nxt, force_nonblock);
+ ret = io_fsync(req, nxt, force_nonblock);
break;
case IORING_OP_POLL_ADD:
- ret = io_poll_add(req, req->sqe, nxt);
+ ret = io_poll_add(req, nxt);
break;
case IORING_OP_POLL_REMOVE:
- ret = io_poll_remove(req, req->sqe);
+ ret = io_poll_remove(req);
break;
case IORING_OP_SYNC_FILE_RANGE:
- ret = io_sync_file_range(req, req->sqe, nxt, force_nonblock);
+ ret = io_sync_file_range(req, nxt, force_nonblock);
break;
case IORING_OP_SENDMSG:
- ret = io_sendmsg(req, req->sqe, nxt, force_nonblock);
+ ret = io_sendmsg(req, nxt, force_nonblock);
break;
case IORING_OP_RECVMSG:
- ret = io_recvmsg(req, req->sqe, nxt, force_nonblock);
+ ret = io_recvmsg(req, nxt, force_nonblock);
break;
case IORING_OP_TIMEOUT:
- ret = io_timeout(req, req->sqe);
+ ret = io_timeout(req);
break;
case IORING_OP_TIMEOUT_REMOVE:
- ret = io_timeout_remove(req, req->sqe);
+ ret = io_timeout_remove(req);
break;
case IORING_OP_ACCEPT:
- ret = io_accept(req, req->sqe, nxt, force_nonblock);
+ ret = io_accept(req, nxt, force_nonblock);
break;
case IORING_OP_CONNECT:
- ret = io_connect(req, req->sqe, nxt, force_nonblock);
+ ret = io_connect(req, nxt, force_nonblock);
break;
case IORING_OP_ASYNC_CANCEL:
- ret = io_async_cancel(req, req->sqe, nxt);
+ ret = io_async_cancel(req, nxt);
break;
default:
ret = -EINVAL;