REQ_F_BL_EMPTY_BIT,
REQ_F_BL_NO_RECYCLE_BIT,
REQ_F_BUFFERS_COMMIT_BIT,
+ REQ_F_BUF_NODE_BIT,
/* not a real bit, just to check we're not overflowing the space */
__REQ_F_LAST_BIT,
REQ_F_BL_NO_RECYCLE = IO_REQ_FLAG(REQ_F_BL_NO_RECYCLE_BIT),
/* buffer ring head needs incrementing on put */
REQ_F_BUFFERS_COMMIT = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT),
+ /* buf node is valid */
+ REQ_F_BUF_NODE = IO_REQ_FLAG(REQ_F_BUF_NODE_BIT),
};
typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
* REQ_F_BUFFER_RING is set.
*/
struct io_buffer_list *buf_list;
+
+ struct io_rsrc_node *buf_node;
};
union {
__poll_t apoll_events;
};
- struct io_rsrc_node *rsrc_nodes[2];
+ struct io_rsrc_node *file_node;
atomic_t refs;
bool cancel_seq_set;
static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
{
req->ctx = ctx;
- req->rsrc_nodes[IORING_RSRC_FILE] = NULL;
- req->rsrc_nodes[IORING_RSRC_BUFFER] = NULL;
+ req->buf_node = NULL;
+ req->file_node = NULL;
req->link = NULL;
req->async_data = NULL;
/* not necessary, but safer to zero */
io_ring_submit_lock(ctx, issue_flags);
node = io_rsrc_node_lookup(&ctx->file_table.data, fd);
if (node) {
- io_req_assign_rsrc_node(req, node);
+ io_req_assign_rsrc_node(&req->file_node, node);
req->flags |= io_slot_flags(node);
file = io_slot_file(node);
}
io_ring_submit_lock(ctx, issue_flags);
node = io_rsrc_node_lookup(&ctx->buf_table, sr->buf_index);
if (node) {
- io_req_assign_rsrc_node(sr->notif, node);
+ io_req_assign_rsrc_node(&sr->notif->buf_node, node);
+ sr->notif->flags |= REQ_F_BUF_NODE;
ret = 0;
}
io_ring_submit_unlock(ctx, issue_flags);
io_ring_submit_lock(ctx, issue_flags);
node = io_rsrc_node_lookup(&ctx->buf_table, nop->buffer);
if (node) {
- io_req_assign_rsrc_node(req, node);
+ io_req_assign_rsrc_node(&req->buf_node, node);
+ req->flags |= REQ_F_BUF_NODE;
ret = 0;
}
io_ring_submit_unlock(ctx, issue_flags);
notif->file = NULL;
notif->task = current;
io_get_task_refs(1);
- notif->rsrc_nodes[IORING_RSRC_FILE] = NULL;
- notif->rsrc_nodes[IORING_RSRC_BUFFER] = NULL;
+ notif->file_node = NULL;
+ notif->buf_node = NULL;
nd = io_notif_to_data(notif);
nd->zc_report = false;
static inline void io_req_put_rsrc_nodes(struct io_kiocb *req)
{
- io_put_rsrc_node(req->rsrc_nodes[IORING_RSRC_FILE]);
- io_put_rsrc_node(req->rsrc_nodes[IORING_RSRC_BUFFER]);
- req->rsrc_nodes[IORING_RSRC_FILE] = NULL;
- req->rsrc_nodes[IORING_RSRC_BUFFER] = NULL;
+ if (req->file_node) {
+ io_put_rsrc_node(req->file_node);
+ req->file_node = NULL;
+ }
+ if (req->flags & REQ_F_BUF_NODE) {
+ io_put_rsrc_node(req->buf_node);
+ req->buf_node = NULL;
+ }
}
static inline struct io_ring_ctx *io_rsrc_node_ctx(struct io_rsrc_node *node)
return node->ctx_ptr & IORING_RSRC_TYPE_MASK;
}
-static inline void io_req_assign_rsrc_node(struct io_kiocb *req,
+static inline void io_req_assign_rsrc_node(struct io_rsrc_node **dst_node,
struct io_rsrc_node *node)
{
node->refs++;
- req->rsrc_nodes[io_rsrc_node_type(node)] = node;
+ *dst_node = node;
}
int io_files_update(struct io_kiocb *req, unsigned int issue_flags);
node = io_rsrc_node_lookup(&ctx->buf_table, req->buf_index);
if (!node)
return -EFAULT;
- io_req_assign_rsrc_node(req, node);
+ io_req_assign_rsrc_node(&req->buf_node, node);
+ req->flags |= REQ_F_BUF_NODE;
io = req->async_data;
ret = io_import_fixed(ddir, &io->iter, node->buf, rw->addr, rw->len);
* being called. This prevents destruction of the mapped buffer
* we'll need at actual import time.
*/
- io_req_assign_rsrc_node(req, node);
+ io_req_assign_rsrc_node(&req->buf_node, node);
}
ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
struct iov_iter *iter, void *ioucmd)
{
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
- struct io_rsrc_node *node = req->rsrc_nodes[IORING_RSRC_BUFFER];
+ struct io_rsrc_node *node = req->buf_node;
/* Must have had rsrc_node assigned at prep time */
if (node)