void *priv)
 {
        struct epitem *epi, *tmp;
+       poll_table pt;
 
+       init_poll_funcptr(&pt, NULL);
        list_for_each_entry_safe(epi, tmp, head, rdllink) {
-               if (epi->ffd.file->f_op->poll(epi->ffd.file, NULL) &
+               pt._key = epi->event.events;
+               if (epi->ffd.file->f_op->poll(epi->ffd.file, &pt) &
                    epi->event.events)
                        return POLLIN | POLLRDNORM;
                else {
        /* Initialize the poll table using the queue callback */
        epq.epi = epi;
        init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
+       epq.pt._key = event->events;
 
        /*
         * Attach the item to the poll hooks and get current event bits.
 {
        int pwake = 0;
        unsigned int revents;
+       poll_table pt;
+
+       init_poll_funcptr(&pt, NULL);
 
        /*
         * Set the new event interest mask before calling f_op->poll();
         * f_op->poll() call and the new event set registering.
         */
        epi->event.events = event->events;
+       pt._key = event->events;
        epi->event.data = event->data; /* protected by mtx */
 
        /*
         * Get current event bits. We can safely use the file* here because
         * its usage count has been increased by the caller of this function.
         */
-       revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL);
+       revents = epi->ffd.file->f_op->poll(epi->ffd.file, &pt);
 
        /*
         * If the item is "hot" and it is not registered inside the ready
        unsigned int revents;
        struct epitem *epi;
        struct epoll_event __user *uevent;
+       poll_table pt;
+
+       init_poll_funcptr(&pt, NULL);
 
        /*
         * We can loop without lock because we are passed a task private list.
 
                list_del_init(&epi->rdllink);
 
-               revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL) &
+               pt._key = epi->event.events;
+               revents = epi->ffd.file->f_op->poll(epi->ffd.file, &pt) &
                        epi->event.events;
 
                /*
 
        get_file(filp);
        entry->filp = filp;
        entry->wait_address = wait_address;
-       entry->key = p->key;
+       entry->key = p->_key;
        init_waitqueue_func_entry(&entry->wait, pollwake);
        entry->wait.private = pwq;
        add_wait_queue(wait_address, &entry->wait);
 static inline void wait_key_set(poll_table *wait, unsigned long in,
                                unsigned long out, unsigned long bit)
 {
-       if (wait) {
-               wait->key = POLLEX_SET;
-               if (in & bit)
-                       wait->key |= POLLIN_SET;
-               if (out & bit)
-                       wait->key |= POLLOUT_SET;
-       }
+       wait->_key = POLLEX_SET;
+       if (in & bit)
+               wait->_key |= POLLIN_SET;
+       if (out & bit)
+               wait->_key |= POLLOUT_SET;
 }
 
 int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
        poll_initwait(&table);
        wait = &table.pt;
        if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
-               wait = NULL;
+               wait->_qproc = NULL;
                timed_out = 1;
        }
 
                                        if ((mask & POLLIN_SET) && (in & bit)) {
                                                res_in |= bit;
                                                retval++;
-                                               wait = NULL;
+                                               wait->_qproc = NULL;
                                        }
                                        if ((mask & POLLOUT_SET) && (out & bit)) {
                                                res_out |= bit;
                                                retval++;
-                                               wait = NULL;
+                                               wait->_qproc = NULL;
                                        }
                                        if ((mask & POLLEX_SET) && (ex & bit)) {
                                                res_ex |= bit;
                                                retval++;
-                                               wait = NULL;
+                                               wait->_qproc = NULL;
                                        }
                                }
                        }
                                *rexp = res_ex;
                        cond_resched();
                }
-               wait = NULL;
+               wait->_qproc = NULL;
                if (retval || timed_out || signal_pending(current))
                        break;
                if (table.error) {
  * interested in events matching the pollfd->events mask, and the result
  * matching that mask is both recorded in pollfd->revents and returned. The
  * pwait poll_table will be used by the fd-provided poll handler for waiting,
- * if non-NULL.
+ * if pwait->_qproc is non-NULL.
  */
 static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait)
 {
                if (file != NULL) {
                        mask = DEFAULT_POLLMASK;
                        if (file->f_op && file->f_op->poll) {
-                               if (pwait)
-                                       pwait->key = pollfd->events |
-                                                       POLLERR | POLLHUP;
+                               pwait->_key = pollfd->events|POLLERR|POLLHUP;
                                mask = file->f_op->poll(file, pwait);
                        }
                        /* Mask out unneeded events. */
 
        /* Optimise the no-wait case */
        if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
-               pt = NULL;
+               pt->_qproc = NULL;
                timed_out = 1;
        }
 
                        for (; pfd != pfd_end; pfd++) {
                                /*
                                 * Fish for events. If we found one, record it
-                                * and kill the poll_table, so we don't
+                                * and kill poll_table->_qproc, so we don't
                                 * needlessly register any other waiters after
                                 * this. They'll get immediately deregistered
                                 * when we break out and return.
                                 */
                                if (do_pollfd(pfd, pt)) {
                                        count++;
-                                       pt = NULL;
+                                       pt->_qproc = NULL;
                                }
                        }
                }
                /*
                 * All waiters have already been registered, so don't provide
-                * a poll_table to them on the next loop iteration.
+                * a poll_table->_qproc to them on the next loop iteration.
                 */
-               pt = NULL;
+               pt->_qproc = NULL;
                if (!count) {
                        count = wait->error;
                        if (signal_pending(current))
 
  */
 typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *);
 
+/*
+ * Do not touch the structure directly, use the access functions
+ * poll_does_not_wait() and poll_requested_events() instead.
+ */
 typedef struct poll_table_struct {
-       poll_queue_proc qproc;
-       unsigned long key;
+       poll_queue_proc _qproc;
+       unsigned long _key;
 } poll_table;
 
 static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
 {
-       if (p && wait_address)
-               p->qproc(filp, wait_address, p);
+       if (p && p->_qproc && wait_address)
+               p->_qproc(filp, wait_address, p);
+}
+
+/*
+ * Return true if it is guaranteed that poll will not wait. This is the case
+ * if the poll() of another file descriptor in the set got an event, so there
+ * is no need for waiting.
+ */
+static inline bool poll_does_not_wait(const poll_table *p)
+{
+       return p == NULL || p->_qproc == NULL;
+}
+
+/*
+ * Return the set of events that the application wants to poll for.
+ * This is useful for drivers that need to know whether a DMA transfer has
+ * to be started implicitly on poll(). You typically only want to do that
+ * if the application is actually polling for POLLIN and/or POLLOUT.
+ */
+static inline unsigned long poll_requested_events(const poll_table *p)
+{
+       return p ? p->_key : ~0UL;
 }
 
 static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
 {
-       pt->qproc = qproc;
-       pt->key   = ~0UL; /* all events enabled */
+       pt->_qproc = qproc;
+       pt->_key   = ~0UL; /* all events enabled */
 }
 
 struct poll_table_entry {
 
 static inline void sock_poll_wait(struct file *filp,
                wait_queue_head_t *wait_address, poll_table *p)
 {
-       if (p && wait_address) {
+       if (!poll_does_not_wait(p) && wait_address) {
                poll_wait(filp, wait_address, p);
                /*
                 * We need to be sure we are in sync with the
 
        }
 
        /* No write status requested, avoid expensive OUT tests. */
-       if (wait && !(wait->key & (POLLWRBAND | POLLWRNORM | POLLOUT)))
+       if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
                return mask;
 
        writable = unix_writable(sk);