unsigned                not_supported : 1;
        /* skip auditing */
        unsigned                audit_skip : 1;
+       /* supports ioprio */
+       unsigned                ioprio : 1;
+       /* supports iopoll */
+       unsigned                iopoll : 1;
        /* size of async data needed, if any */
        unsigned short          async_size;
 };
 
 static const struct io_op_def io_op_defs[] = {
-       [IORING_OP_NOP] = {},
+       [IORING_OP_NOP] = {
+               .audit_skip             = 1,
+               .iopoll                 = 1,
+       },
        [IORING_OP_READV] = {
                .needs_file             = 1,
                .unbound_nonreg_file    = 1,
                .needs_async_setup      = 1,
                .plug                   = 1,
                .audit_skip             = 1,
+               .ioprio                 = 1,
+               .iopoll                 = 1,
                .async_size             = sizeof(struct io_async_rw),
        },
        [IORING_OP_WRITEV] = {
                .needs_async_setup      = 1,
                .plug                   = 1,
                .audit_skip             = 1,
+               .ioprio                 = 1,
+               .iopoll                 = 1,
                .async_size             = sizeof(struct io_async_rw),
        },
        [IORING_OP_FSYNC] = {
                .pollin                 = 1,
                .plug                   = 1,
                .audit_skip             = 1,
+               .ioprio                 = 1,
+               .iopoll                 = 1,
                .async_size             = sizeof(struct io_async_rw),
        },
        [IORING_OP_WRITE_FIXED] = {
                .pollout                = 1,
                .plug                   = 1,
                .audit_skip             = 1,
+               .ioprio                 = 1,
+               .iopoll                 = 1,
                .async_size             = sizeof(struct io_async_rw),
        },
        [IORING_OP_POLL_ADD] = {
        [IORING_OP_CLOSE] = {},
        [IORING_OP_FILES_UPDATE] = {
                .audit_skip             = 1,
+               .iopoll                 = 1,
        },
        [IORING_OP_STATX] = {
                .audit_skip             = 1,
                .buffer_select          = 1,
                .plug                   = 1,
                .audit_skip             = 1,
+               .ioprio                 = 1,
+               .iopoll                 = 1,
                .async_size             = sizeof(struct io_async_rw),
        },
        [IORING_OP_WRITE] = {
                .pollout                = 1,
                .plug                   = 1,
                .audit_skip             = 1,
+               .ioprio                 = 1,
+               .iopoll                 = 1,
                .async_size             = sizeof(struct io_async_rw),
        },
        [IORING_OP_FADVISE] = {
        },
        [IORING_OP_PROVIDE_BUFFERS] = {
                .audit_skip             = 1,
+               .iopoll                 = 1,
        },
        [IORING_OP_REMOVE_BUFFERS] = {
                .audit_skip             = 1,
+               .iopoll                 = 1,
        },
        [IORING_OP_TEE] = {
                .needs_file             = 1,
        [IORING_OP_LINKAT] = {},
        [IORING_OP_MSG_RING] = {
                .needs_file             = 1,
+               .iopoll                 = 1,
        },
 };
 
        struct io_rename *ren = &req->rename;
        const char __user *oldf, *newf;
 
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
-       if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
+       if (sqe->buf_index || sqe->splice_fd_in)
                return -EINVAL;
        if (unlikely(req->flags & REQ_F_FIXED_FILE))
                return -EBADF;
        struct io_unlink *un = &req->unlink;
        const char __user *fname;
 
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
-       if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
-           sqe->splice_fd_in)
+       if (sqe->off || sqe->len || sqe->buf_index || sqe->splice_fd_in)
                return -EINVAL;
        if (unlikely(req->flags & REQ_F_FIXED_FILE))
                return -EBADF;
        struct io_mkdir *mkd = &req->mkdir;
        const char __user *fname;
 
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
-       if (sqe->ioprio || sqe->off || sqe->rw_flags || sqe->buf_index ||
-           sqe->splice_fd_in)
+       if (sqe->off || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
                return -EINVAL;
        if (unlikely(req->flags & REQ_F_FIXED_FILE))
                return -EBADF;
        struct io_symlink *sl = &req->symlink;
        const char __user *oldpath, *newpath;
 
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
-       if (sqe->ioprio || sqe->len || sqe->rw_flags || sqe->buf_index ||
-           sqe->splice_fd_in)
+       if (sqe->len || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
                return -EINVAL;
        if (unlikely(req->flags & REQ_F_FIXED_FILE))
                return -EBADF;
        struct io_hardlink *lnk = &req->hardlink;
        const char __user *oldf, *newf;
 
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
-       if (sqe->ioprio || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
+       if (sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
                return -EINVAL;
        if (unlikely(req->flags & REQ_F_FIXED_FILE))
                return -EBADF;
                            const struct io_uring_sqe *sqe)
 {
 #if defined(CONFIG_NET)
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
-       if (unlikely(sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
+       if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
                     sqe->buf_index || sqe->splice_fd_in))
                return -EINVAL;
 
        struct io_splice *sp = &req->splice;
        unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
 
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
-
        sp->len = READ_ONCE(sqe->len);
        sp->flags = READ_ONCE(sqe->splice_flags);
        if (unlikely(sp->flags & ~valid_flags))
  */
 static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
 {
-       struct io_ring_ctx *ctx = req->ctx;
-
-       if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
-
        __io_req_complete(req, issue_flags, 0, 0);
        return 0;
 }
 static int io_msg_ring_prep(struct io_kiocb *req,
                            const struct io_uring_sqe *sqe)
 {
-       if (unlikely(sqe->addr || sqe->ioprio || sqe->rw_flags ||
-                    sqe->splice_fd_in || sqe->buf_index || sqe->personality))
+       if (unlikely(sqe->addr || sqe->rw_flags || sqe->splice_fd_in ||
+                    sqe->buf_index || sqe->personality))
                return -EINVAL;
 
        req->msg.user_data = READ_ONCE(sqe->off);
 
 static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       struct io_ring_ctx *ctx = req->ctx;
-
-       if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
-       if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
-                    sqe->splice_fd_in))
+       if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in))
                return -EINVAL;
 
        req->sync.flags = READ_ONCE(sqe->fsync_flags);
 static int io_fallocate_prep(struct io_kiocb *req,
                             const struct io_uring_sqe *sqe)
 {
-       if (sqe->ioprio || sqe->buf_index || sqe->rw_flags ||
-           sqe->splice_fd_in)
-               return -EINVAL;
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+       if (sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
                return -EINVAL;
 
        req->sync.off = READ_ONCE(sqe->off);
        const char __user *fname;
        int ret;
 
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
-       if (unlikely(sqe->ioprio || sqe->buf_index))
+       if (unlikely(sqe->buf_index))
                return -EINVAL;
        if (unlikely(req->flags & REQ_F_FIXED_FILE))
                return -EBADF;
        struct io_provide_buf *p = &req->pbuf;
        u64 tmp;
 
-       if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
+       if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
            sqe->splice_fd_in)
                return -EINVAL;
 
        struct io_provide_buf *p = &req->pbuf;
        u64 tmp;
 
-       if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
+       if (sqe->rw_flags || sqe->splice_fd_in)
                return -EINVAL;
 
        tmp = READ_ONCE(sqe->fd);
                             const struct io_uring_sqe *sqe)
 {
 #if defined(CONFIG_EPOLL)
-       if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
-               return -EINVAL;
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+       if (sqe->buf_index || sqe->splice_fd_in)
                return -EINVAL;
 
        req->epoll.epfd = READ_ONCE(sqe->fd);
 static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
-       if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->splice_fd_in)
-               return -EINVAL;
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+       if (sqe->buf_index || sqe->off || sqe->splice_fd_in)
                return -EINVAL;
 
        req->madvise.addr = READ_ONCE(sqe->addr);
 
 static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       if (sqe->ioprio || sqe->buf_index || sqe->addr || sqe->splice_fd_in)
-               return -EINVAL;
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+       if (sqe->buf_index || sqe->addr || sqe->splice_fd_in)
                return -EINVAL;
 
        req->fadvise.offset = READ_ONCE(sqe->off);
 {
        const char __user *path;
 
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
-       if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
+       if (sqe->buf_index || sqe->splice_fd_in)
                return -EINVAL;
        if (req->flags & REQ_F_FIXED_FILE)
                return -EBADF;
 
 static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
-       if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
-           sqe->rw_flags || sqe->buf_index)
+       if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index)
                return -EINVAL;
        if (req->flags & REQ_F_FIXED_FILE)
                return -EBADF;
 
 static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       struct io_ring_ctx *ctx = req->ctx;
-
-       if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
-       if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
-                    sqe->splice_fd_in))
+       if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in))
                return -EINVAL;
 
        req->sync.off = READ_ONCE(sqe->off);
 {
        struct io_sr_msg *sr = &req->sr_msg;
 
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+       if (unlikely(sqe->addr2 || sqe->file_index))
                return -EINVAL;
 
        sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
 {
        struct io_sr_msg *sr = &req->sr_msg;
 
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+       if (unlikely(sqe->addr2 || sqe->file_index))
                return -EINVAL;
 
        sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
 {
        struct io_accept *accept = &req->accept;
 
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
-       if (sqe->ioprio || sqe->len || sqe->buf_index)
+       if (sqe->len || sqe->buf_index)
                return -EINVAL;
 
        accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
 {
        struct io_connect *conn = &req->connect;
 
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
-       if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags ||
-           sqe->splice_fd_in)
+       if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
                return -EINVAL;
 
        conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
        struct io_poll_update *upd = &req->poll_update;
        u32 flags;
 
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
-       if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
+       if (sqe->buf_index || sqe->splice_fd_in)
                return -EINVAL;
        flags = READ_ONCE(sqe->len);
        if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
        struct io_poll_iocb *poll = &req->poll;
        u32 flags;
 
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
-       if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr)
+       if (sqe->buf_index || sqe->off || sqe->addr)
                return -EINVAL;
        flags = READ_ONCE(sqe->len);
        if (flags & ~IORING_POLL_ADD_MULTI)
 {
        struct io_timeout_rem *tr = &req->timeout_rem;
 
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
        if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
                return -EINVAL;
-       if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->splice_fd_in)
+       if (sqe->buf_index || sqe->len || sqe->splice_fd_in)
                return -EINVAL;
 
        tr->ltimeout = false;
        unsigned flags;
        u32 off = READ_ONCE(sqe->off);
 
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
-       if (sqe->ioprio || sqe->buf_index || sqe->len != 1 ||
-           sqe->splice_fd_in)
+       if (sqe->buf_index || sqe->len != 1 || sqe->splice_fd_in)
                return -EINVAL;
        if (off && is_timeout_link)
                return -EINVAL;
 static int io_async_cancel_prep(struct io_kiocb *req,
                                const struct io_uring_sqe *sqe)
 {
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
        if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
                return -EINVAL;
-       if (sqe->ioprio || sqe->off || sqe->len || sqe->splice_fd_in)
+       if (sqe->off || sqe->len || sqe->splice_fd_in)
                return -EINVAL;
 
        req->cancel.addr = READ_ONCE(sqe->addr);
 {
        if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
                return -EINVAL;
-       if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
+       if (sqe->rw_flags || sqe->splice_fd_in)
                return -EINVAL;
 
        req->rsrc_update.offset = READ_ONCE(sqe->off);
                }
        }
 
+       if (!io_op_defs[opcode].ioprio && sqe->ioprio)
+               return -EINVAL;
+       if (!io_op_defs[opcode].iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
+               return -EINVAL;
+
        if (io_op_defs[opcode].needs_file) {
                struct io_submit_state *state = &ctx->submit_state;