__poll_t                        events;
        bool                            done;
        bool                            canceled;
-       struct wait_queue_entry         wait;
+       struct wait_queue_entry         *wait;
 };
 
 struct io_timeout_data {
 
        spin_lock(&poll->head->lock);
        WRITE_ONCE(poll->canceled, true);
-       if (!list_empty(&poll->wait.entry)) {
-               list_del_init(&poll->wait.entry);
+       if (!list_empty(&poll->wait->entry)) {
+               list_del_init(&poll->wait->entry);
                io_queue_async_work(req);
        }
        spin_unlock(&poll->head->lock);
        struct io_ring_ctx *ctx = req->ctx;
 
        req->poll.done = true;
+       kfree(req->poll.wait);
        if (error)
                io_cqring_fill_event(req, error);
        else
         */
        spin_lock_irq(&ctx->completion_lock);
        if (!mask && ret != -ECANCELED) {
-               add_wait_queue(poll->head, &poll->wait);
+               add_wait_queue(poll->head, poll->wait);
                spin_unlock_irq(&ctx->completion_lock);
                return;
        }
 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
                        void *key)
 {
-       struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
-                                                       wait);
+       struct io_poll_iocb *poll = wait->private;
        struct io_kiocb *req = container_of(poll, struct io_kiocb, poll);
        struct io_ring_ctx *ctx = req->ctx;
        __poll_t mask = key_to_poll(key);
        if (mask && !(mask & poll->events))
                return 0;
 
-       list_del_init(&poll->wait.entry);
+       list_del_init(&poll->wait->entry);
 
        /*
         * Run completion inline if we can. We're using trylock here because
 
        pt->error = 0;
        pt->req->poll.head = head;
-       add_wait_queue(head, &pt->req->poll.wait);
+       add_wait_queue(head, pt->req->poll.wait);
 }
 
 static void io_poll_req_insert(struct io_kiocb *req)
        if (!poll->file)
                return -EBADF;
 
+       poll->wait = kmalloc(sizeof(*poll->wait), GFP_KERNEL);
+       if (!poll->wait)
+               return -ENOMEM;
+
        req->sqe = NULL;
        INIT_IO_WORK(&req->work, io_poll_complete_work);
        events = READ_ONCE(sqe->poll_events);
        ipt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
 
        /* initialized the list so that we can do list_empty checks */
-       INIT_LIST_HEAD(&poll->wait.entry);
-       init_waitqueue_func_entry(&poll->wait, io_poll_wake);
+       INIT_LIST_HEAD(&poll->wait->entry);
+       init_waitqueue_func_entry(poll->wait, io_poll_wake);
+       poll->wait->private = poll;
 
        INIT_LIST_HEAD(&req->list);
 
        spin_lock_irq(&ctx->completion_lock);
        if (likely(poll->head)) {
                spin_lock(&poll->head->lock);
-               if (unlikely(list_empty(&poll->wait.entry))) {
+               if (unlikely(list_empty(&poll->wait->entry))) {
                        if (ipt.error)
                                cancel = true;
                        ipt.error = 0;
                        mask = 0;
                }
                if (mask || ipt.error)
-                       list_del_init(&poll->wait.entry);
+                       list_del_init(&poll->wait->entry);
                else if (cancel)
                        WRITE_ONCE(poll->canceled, true);
                else if (!poll->done) /* actually waiting for an event */