struct io_wq_work *cur_work;
        spinlock_t lock;
 
-       const struct cred *cur_creds;
-       const struct cred *saved_creds;
-
        struct completion ref_done;
        struct completion started;
 
        worker->flags = 0;
        preempt_enable();
 
-       if (worker->saved_creds) {
-               revert_creds(worker->saved_creds);
-               worker->cur_creds = worker->saved_creds = NULL;
-       }
-
        raw_spin_lock_irq(&wqe->lock);
        if (flags & IO_WORKER_F_FREE)
                hlist_nulls_del_rcu(&worker->nulls_node);
                worker->flags |= IO_WORKER_F_FREE;
                hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
        }
-       if (worker->saved_creds) {
-               revert_creds(worker->saved_creds);
-               worker->cur_creds = worker->saved_creds = NULL;
-       }
 }
 
 static inline unsigned int io_get_work_hash(struct io_wq_work *work)
        }
 }
 
-static void io_wq_switch_creds(struct io_worker *worker,
-                              struct io_wq_work *work)
-{
-       const struct cred *old_creds = override_creds(work->creds);
-
-       worker->cur_creds = work->creds;
-       if (worker->saved_creds)
-               put_cred(old_creds); /* creds set by previous switch */
-       else
-               worker->saved_creds = old_creds;
-}
-
 static void io_assign_current_work(struct io_worker *worker,
                                   struct io_wq_work *work)
 {
                        unsigned int hash = io_get_work_hash(work);
 
                        next_hashed = wq_next_work(work);
-                       if (work->creds && worker->cur_creds != work->creds)
-                               io_wq_switch_creds(worker, work);
                        wq->do_work(work);
                        io_assign_current_work(worker, NULL);
 
 
        REQ_F_POLLED_BIT,
        REQ_F_BUFFER_SELECTED_BIT,
        REQ_F_NO_FILE_TABLE_BIT,
-       REQ_F_WORK_INITIALIZED_BIT,
        REQ_F_LTIMEOUT_ACTIVE_BIT,
        REQ_F_COMPLETE_INLINE_BIT,
 
        REQ_F_BUFFER_SELECTED   = BIT(REQ_F_BUFFER_SELECTED_BIT),
        /* doesn't need file table for this request */
        REQ_F_NO_FILE_TABLE     = BIT(REQ_F_NO_FILE_TABLE_BIT),
-       /* io_wq_work is initialized */
-       REQ_F_WORK_INITIALIZED  = BIT(REQ_F_WORK_INITIALIZED_BIT),
        /* linked timeout is active, i.e. prepared by link's head */
        REQ_F_LTIMEOUT_ACTIVE   = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
        /* completion is deferred through io_comp_state */
                req->flags |= REQ_F_FAIL_LINK;
 }
 
-static inline void __io_req_init_async(struct io_kiocb *req)
-{
-       memset(&req->work, 0, sizeof(req->work));
-       req->flags |= REQ_F_WORK_INITIALIZED;
-}
-
-/*
- * Note: must call io_req_init_async() for the first time you
- * touch any members of io_wq_work.
- */
-static inline void io_req_init_async(struct io_kiocb *req)
-{
-       if (req->flags & REQ_F_WORK_INITIALIZED)
-               return;
-
-       __io_req_init_async(req);
-}
-
 static void io_ring_ctx_ref_free(struct percpu_ref *ref)
 {
        struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
 
 static void io_req_clean_work(struct io_kiocb *req)
 {
-       if (!(req->flags & REQ_F_WORK_INITIALIZED))
-               return;
-
-       if (req->work.creds) {
-               put_cred(req->work.creds);
-               req->work.creds = NULL;
-       }
        if (req->flags & REQ_F_INFLIGHT) {
                struct io_ring_ctx *ctx = req->ctx;
                struct io_uring_task *tctx = req->task->io_uring;
                if (atomic_read(&tctx->in_idle))
                        wake_up(&tctx->wait);
        }
-
-       req->flags &= ~REQ_F_WORK_INITIALIZED;
 }
 
 static void io_req_track_inflight(struct io_kiocb *req)
        struct io_ring_ctx *ctx = req->ctx;
 
        if (!(req->flags & REQ_F_INFLIGHT)) {
-               io_req_init_async(req);
                req->flags |= REQ_F_INFLIGHT;
 
                spin_lock_irq(&ctx->inflight_lock);
        const struct io_op_def *def = &io_op_defs[req->opcode];
        struct io_ring_ctx *ctx = req->ctx;
 
-       io_req_init_async(req);
-
        if (req->flags & REQ_F_FORCE_ASYNC)
                req->work.flags |= IO_WQ_WORK_CONCURRENT;
 
                if (def->unbound_nonreg_file)
                        req->work.flags |= IO_WQ_WORK_UNBOUND;
        }
-       if (!req->work.creds)
-               req->work.creds = get_current_cred();
 }
 
 static void io_prep_async_link(struct io_kiocb *req)
                 * Splice operation will be punted aync, and here need to
                 * modify io_wq_work.flags, so initialize io_wq_work firstly.
                 */
-               io_req_init_async(req);
                req->work.flags |= IO_WQ_WORK_UNBOUND;
        }
 
 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_ring_ctx *ctx = req->ctx;
+       const struct cred *creds = NULL;
        int ret;
 
+       if (req->work.personality) {
+               const struct cred *new_creds;
+
+               if (!(issue_flags & IO_URING_F_NONBLOCK))
+                       mutex_lock(&ctx->uring_lock);
+               new_creds = idr_find(&ctx->personality_idr, req->work.personality);
+               if (!(issue_flags & IO_URING_F_NONBLOCK))
+                       mutex_unlock(&ctx->uring_lock);
+               if (!new_creds)
+                       return -EINVAL;
+               creds = override_creds(new_creds);
+       }
+
        switch (req->opcode) {
        case IORING_OP_NOP:
                ret = io_nop(req, issue_flags);
                break;
        }
 
+       if (creds)
+               revert_creds(creds);
+
        if (ret)
                return ret;
 
 static void __io_queue_sqe(struct io_kiocb *req)
 {
        struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
-       const struct cred *old_creds = NULL;
        int ret;
 
-       if ((req->flags & REQ_F_WORK_INITIALIZED) && req->work.creds &&
-           req->work.creds != current_cred())
-               old_creds = override_creds(req->work.creds);
-
        ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
 
-       if (old_creds)
-               revert_creds(old_creds);
-
        /*
         * We async punt it if the file wasn't marked NOWAIT, or if the file
         * doesn't support non-blocking read/write attempts
 {
        struct io_submit_state *state;
        unsigned int sqe_flags;
-       int id, ret = 0;
+       int ret = 0;
 
        req->opcode = READ_ONCE(sqe->opcode);
        /* same numerical values with corresponding REQ_F_*, safe to copy */
            !io_op_defs[req->opcode].buffer_select)
                return -EOPNOTSUPP;
 
-       id = READ_ONCE(sqe->personality);
-       if (id) {
-               __io_req_init_async(req);
-               req->work.creds = idr_find(&ctx->personality_idr, id);
-               if (unlikely(!req->work.creds))
-                       return -EINVAL;
-               get_cred(req->work.creds);
-       }
-
+       req->work.list.next = NULL;
+       req->work.flags = 0;
+       req->work.personality = READ_ONCE(sqe->personality);
        state = &ctx->submit_state;
 
        /*