static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
 {
+       lockdep_assert_held(&req->ctx->completion_lock);
+
        if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
                return 0;
        return __io_put_kbuf(req, &req->ctx->io_buffers_comp);
                        }
                }
                io_req_put_rsrc(req, ctx);
+               /*
+                * Selected buffer deallocation in io_clean_op() assumes that
+                * we don't hold ->completion_lock. Clean them here to avoid
+                * deadlocks.
+                */
+               io_put_kbuf_comp(req);
                io_dismantle_req(req);
                io_put_task(req->task, 1);
                wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
 
 static void io_clean_op(struct io_kiocb *req)
 {
-       if (req->flags & REQ_F_BUFFER_SELECTED)
+       if (req->flags & REQ_F_BUFFER_SELECTED) {
+               spin_lock(&req->ctx->completion_lock);
                io_put_kbuf_comp(req);
+               spin_unlock(&req->ctx->completion_lock);
+       }
 
        if (req->flags & REQ_F_NEED_CLEANUP) {
                switch (req->opcode) {