return __io_put_kbuf(req);
 }
 
-static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
-{
-       bool got = percpu_ref_tryget(ref);
-
-       /* already at zero, wait for ->release() */
-       if (!got)
-               wait_for_completion(compl);
-       percpu_ref_resurrect(ref);
-       if (got)
-               percpu_ref_put(ref);
-}
-
 static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
                          bool cancel_all)
        __must_hold(&req->ctx->timeout_lock)
        return ret;
 }
 
-static bool io_register_op_must_quiesce(int op)
-{
-       switch (op) {
-       case IORING_REGISTER_BUFFERS:
-       case IORING_UNREGISTER_BUFFERS:
-       case IORING_REGISTER_FILES:
-       case IORING_UNREGISTER_FILES:
-       case IORING_REGISTER_FILES_UPDATE:
-       case IORING_REGISTER_EVENTFD:
-       case IORING_REGISTER_EVENTFD_ASYNC:
-       case IORING_UNREGISTER_EVENTFD:
-       case IORING_REGISTER_PROBE:
-       case IORING_REGISTER_PERSONALITY:
-       case IORING_UNREGISTER_PERSONALITY:
-       case IORING_REGISTER_ENABLE_RINGS:
-       case IORING_REGISTER_RESTRICTIONS:
-       case IORING_REGISTER_FILES2:
-       case IORING_REGISTER_FILES_UPDATE2:
-       case IORING_REGISTER_BUFFERS2:
-       case IORING_REGISTER_BUFFERS_UPDATE:
-       case IORING_REGISTER_IOWQ_AFF:
-       case IORING_UNREGISTER_IOWQ_AFF:
-       case IORING_REGISTER_IOWQ_MAX_WORKERS:
-               return false;
-       default:
-               return true;
-       }
-}
-
-static __cold int io_ctx_quiesce(struct io_ring_ctx *ctx)
-{
-       long ret;
-
-       percpu_ref_kill(&ctx->refs);
-
-       /*
-        * Drop uring mutex before waiting for references to exit. If another
-        * thread is currently inside io_uring_enter() it might need to grab the
-        * uring_lock to make progress. If we hold it here across the drain
-        * wait, then we can deadlock. It's safe to drop the mutex here, since
-        * no new references will come in after we've killed the percpu ref.
-        */
-       mutex_unlock(&ctx->uring_lock);
-       do {
-               ret = wait_for_completion_interruptible_timeout(&ctx->ref_comp, HZ);
-               if (ret) {
-                       ret = min(0L, ret);
-                       break;
-               }
-
-               ret = io_run_task_work_sig();
-               io_req_caches_free(ctx);
-       } while (ret >= 0);
-       mutex_lock(&ctx->uring_lock);
-
-       if (ret)
-               io_refs_resurrect(&ctx->refs, &ctx->ref_comp);
-       return ret;
-}
-
 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
                               void __user *arg, unsigned nr_args)
        __releases(ctx->uring_lock)
                        return -EACCES;
        }
 
-       if (io_register_op_must_quiesce(opcode)) {
-               ret = io_ctx_quiesce(ctx);
-               if (ret)
-                       return ret;
-       }
-
        switch (opcode) {
        case IORING_REGISTER_BUFFERS:
                ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
                break;
        }
 
-       if (io_register_op_must_quiesce(opcode)) {
-               /* bring the ctx back to life */
-               percpu_ref_reinit(&ctx->refs);
-               reinit_completion(&ctx->ref_comp);
-       }
        return ret;
 }