req->flags &= ~IO_REQ_CLEAN_FLAGS;
 }
 
+static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags)
+{
+       if (req->file || !io_op_defs[req->opcode].needs_file)
+               return true;
+
+       if (req->flags & REQ_F_FIXED_FILE)
+               req->file = io_file_get_fixed(req, req->work.fd, issue_flags);
+       else
+               req->file = io_file_get_normal(req, req->work.fd);
+       if (req->file)
+               return true;
+
+       req_set_fail(req);
+       req->result = -EBADF;
+       return false;
+}
+
 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
 {
        const struct cred *creds = NULL;
 
        if (!io_op_defs[req->opcode].audit_skip)
                audit_uring_entry(req->opcode);
+       if (unlikely(!io_assign_file(req, issue_flags)))
+               return -EBADF;
 
        switch (req->opcode) {
        case IORING_OP_NOP:
 static void io_wq_submit_work(struct io_wq_work *work)
 {
        struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+       const struct io_op_def *def = &io_op_defs[req->opcode];
        unsigned int issue_flags = IO_URING_F_UNLOCKED;
        bool needs_poll = false;
        struct io_kiocb *timeout;
-       int ret = 0;
+       int ret = 0, err = -ECANCELED;
 
        /* one will be dropped by ->io_free_work() after returning to io-wq */
        if (!(req->flags & REQ_F_REFCOUNT))
        if (timeout)
                io_queue_linked_timeout(timeout);
 
+       if (!io_assign_file(req, issue_flags)) {
+               err = -EBADF;
+               work->flags |= IO_WQ_WORK_CANCEL;
+       }
+
        /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
        if (work->flags & IO_WQ_WORK_CANCEL) {
-               io_req_task_queue_fail(req, -ECANCELED);
+               io_req_task_queue_fail(req, err);
                return;
        }
 
        if (req->flags & REQ_F_FORCE_ASYNC) {
-               const struct io_op_def *def = &io_op_defs[req->opcode];
                bool opcode_poll = def->pollin || def->pollout;
 
                if (opcode_poll && file_can_poll(req->file)) {
        if (io_op_defs[opcode].needs_file) {
                struct io_submit_state *state = &ctx->submit_state;
 
+               req->work.fd = READ_ONCE(sqe->fd);
+
                /*
                 * Plug now if we have more than 2 IO left after this, and the
                 * target is potentially a read/write to block based storage.
                        state->need_plug = false;
                        blk_start_plug_nr_ios(&state->plug, state->submit_nr);
                }
-
-               if (req->flags & REQ_F_FIXED_FILE)
-                       req->file = io_file_get_fixed(req, READ_ONCE(sqe->fd), 0);
-               else
-                       req->file = io_file_get_normal(req, READ_ONCE(sqe->fd));
-               if (unlikely(!req->file))
-                       return -EBADF;
        }
 
        personality = READ_ONCE(sqe->personality);