if ((data & priv->highalarm) ||
            (~data & priv->lowalarm)) {
-               mask = POLLIN|POLLRDNORM;
+               mask = EPOLLIN|EPOLLRDNORM;
        }
 
 out:
 
        poll_wait(file, &port->in_wait_q, wait);
        /* Some room to write */
        if (port->out_count < OUT_BUFFER_SIZE)
-               mask |=  POLLOUT | POLLWRNORM;
+               mask |=  EPOLLOUT | EPOLLWRNORM;
        /* At least an inbufchunk of data */
        if (sync_data_avail(port) >= port->inbufchunk)
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        DEBUGPOLL(if (mask != prev_mask)
                printk(KERN_DEBUG "sync_serial_poll: mask 0x%08X %s %s\n",
                        mask,
-                       mask & POLLOUT ? "POLLOUT" : "",
-                       mask & POLLIN ? "POLLIN" : "");
+                       mask & EPOLLOUT ? "POLLOUT" : "",
+                       mask & EPOLLIN ? "POLLIN" : "");
                prev_mask = mask;
        );
        return mask;
 
 
        /* No active transfer, descriptors are available */
        if (port->output && !port->tr_running)
-               mask |= POLLOUT | POLLWRNORM;
+               mask |= EPOLLOUT | EPOLLWRNORM;
 
        /* Descriptor and buffer space available. */
        if (port->output &&
            port->active_tr_descr != port->catch_tr_descr &&
            port->out_buf_count < OUT_BUFFER_SIZE)
-               mask |=  POLLOUT | POLLWRNORM;
+               mask |=  EPOLLOUT | EPOLLWRNORM;
 
        /* At least an inbufchunk of data */
        if (port->input && sync_data_avail(port) >= port->inbufchunk)
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        DEBUGPOLL(
        if (mask != prev_mask)
                pr_info("sync_serial_poll: mask 0x%08X %s %s\n",
                        mask,
-                       mask & POLLOUT ? "POLLOUT" : "",
-                       mask & POLLIN ? "POLLIN" : "");
+                       mask & EPOLLOUT ? "POLLOUT" : "",
+                       mask & EPOLLIN ? "POLLIN" : "");
                prev_mask = mask;
        );
        return mask;
 
        PROTECT_CTX(ctx, flags);
 
        if (PFM_CTXQ_EMPTY(ctx) == 0)
-               mask =  POLLIN | POLLRDNORM;
+               mask =  EPOLLIN | EPOLLRDNORM;
 
        UNPROTECT_CTX(ctx, flags);
 
 
 
        /* data available to read? */
        if (rtlx_read_poll(minor, 0))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        /* space to write */
        if (rtlx_write_poll(minor))
-               mask |= POLLOUT | POLLWRNORM;
+               mask |= EPOLLOUT | EPOLLWRNORM;
 
        return mask;
 }
 
 {
        poll_wait(file, &rtas_log_wait, wait);
        if (rtas_log_size)
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        return 0;
 }
 
 
           but first mark any pending interrupts as done so
           we don't get woken up unnecessarily */
 
-       if (events & (POLLIN | POLLRDNORM)) {
+       if (events & (EPOLLIN | EPOLLRDNORM)) {
                if (stat & 0xff0000)
-                       ret |= POLLIN | POLLRDNORM;
+                       ret |= EPOLLIN | EPOLLRDNORM;
                else {
                        ctx->csa.priv1.int_stat_class2_RW &=
                                ~CLASS2_MAILBOX_INTR;
                                CLASS2_ENABLE_MAILBOX_INTR;
                }
        }
-       if (events & (POLLOUT | POLLWRNORM)) {
+       if (events & (EPOLLOUT | EPOLLWRNORM)) {
                if (stat & 0x00ff00)
-                       ret = POLLOUT | POLLWRNORM;
+                       ret = EPOLLOUT | EPOLLWRNORM;
                else {
                        ctx->csa.priv1.int_stat_class2_RW &=
                                ~CLASS2_MAILBOX_THRESHOLD_INTR;
 
         * that poll should not sleep.  Will be fixed later.
         */
        mutex_lock(&ctx->state_mutex);
-       mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
+       mask = ctx->ops->mbox_stat_poll(ctx, EPOLLIN | EPOLLRDNORM);
        spu_release(ctx);
 
        return mask;
         * that poll should not sleep.  Will be fixed later.
         */
        mutex_lock(&ctx->state_mutex);
-       mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
+       mask = ctx->ops->mbox_stat_poll(ctx, EPOLLOUT | EPOLLWRNORM);
        spu_release(ctx);
 
        return mask;
 
        mask = 0;
        if (free_elements & 0xffff)
-               mask |= POLLOUT | POLLWRNORM;
+               mask |= EPOLLOUT | EPOLLWRNORM;
        if (tagstatus & ctx->tagwait)
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__,
                free_elements, tagstatus, ctx->tagwait);
                return rc;
 
        if (spufs_switch_log_used(ctx) > 0)
-               mask |= POLLIN;
+               mask |= EPOLLIN;
 
        spu_release(ctx);
 
 
           but first mark any pending interrupts as done so
           we don't get woken up unnecessarily */
 
-       if (events & (POLLIN | POLLRDNORM)) {
+       if (events & (EPOLLIN | EPOLLRDNORM)) {
                if (stat & 0xff0000)
-                       ret |= POLLIN | POLLRDNORM;
+                       ret |= EPOLLIN | EPOLLRDNORM;
                else {
                        spu_int_stat_clear(spu, 2, CLASS2_MAILBOX_INTR);
                        spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
                }
        }
-       if (events & (POLLOUT | POLLWRNORM)) {
+       if (events & (EPOLLOUT | EPOLLWRNORM)) {
                if (stat & 0x00ff00)
-                       ret = POLLOUT | POLLWRNORM;
+                       ret = EPOLLOUT | EPOLLWRNORM;
                else {
                        spu_int_stat_clear(spu, 2,
                                        CLASS2_MAILBOX_THRESHOLD_INTR);
 
        poll_wait(file, &opal_prd_msg_wait, wait);
 
        if (!opal_msg_queue_empty())
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
 
        return 0;
 }
 
                return 0;
        poll_wait(fp, &apm_waitqueue, wait);
        if (!queue_empty(as))
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        return 0;
 }
 
 
 {
        poll_wait(file, &mce_chrdev_wait, wait);
        if (READ_ONCE(mcelog.next))
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        if (!mce_apei_read_done && apei_check_mce())
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        return 0;
 }
 
 
 
        spin_lock_irq(&bd->lock);
        if (!list_empty(&bd->done_list))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
        if (bd->queued_cmds < bd->max_queue)
-               mask |= POLLOUT;
+               mask |= EPOLLOUT;
        spin_unlock_irq(&bd->lock);
 
        return mask;
 
        rcu_read_lock();
        wq = rcu_dereference(sk->sk_wq);
        if (skwq_has_sleeper(wq))
-               wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
-                                                          POLLRDNORM |
-                                                          POLLRDBAND);
+               wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
+                                                          EPOLLRDNORM |
+                                                          EPOLLRDBAND);
        sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
        rcu_read_unlock();
 }
        rcu_read_lock();
        wq = rcu_dereference(sk->sk_wq);
        if (skwq_has_sleeper(wq))
-               wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
-                                                          POLLRDNORM |
-                                                          POLLRDBAND);
+               wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
+                                                          EPOLLRDNORM |
+                                                          EPOLLRDBAND);
        sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
        rcu_read_unlock();
 }
        mask = 0;
 
        if (!ctx->more || ctx->used)
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        if (af_alg_writable(sk))
-               mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+               mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
 
        return mask;
 }
 
 
        poll_wait(file, &acpi_aml_io.wait, wait);
        if (acpi_aml_user_readable())
-               masks |= POLLIN | POLLRDNORM;
+               masks |= EPOLLIN | EPOLLRDNORM;
        if (acpi_aml_user_writable())
-               masks |= POLLOUT | POLLWRNORM;
+               masks |= EPOLLOUT | EPOLLWRNORM;
 
        return masks;
 }
 
         */
        if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
            waitqueue_active(&thread->wait)) {
-               wake_up_poll(&thread->wait, POLLHUP | POLLFREE);
+               wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
        }
 
        binder_inner_proc_unlock(thread->proc);
        poll_wait(filp, &thread->wait, wait);
 
        if (binder_has_work(thread, wait_for_proc_work))
-               return POLLIN;
+               return EPOLLIN;
 
        return 0;
 }
 
        poll_wait(file, &data->read_wait, wait);
 
        if (!skb_queue_empty(&data->readq))
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
 
-       return POLLOUT | POLLWRNORM;
+       return EPOLLOUT | EPOLLWRNORM;
 }
 
 static void vhci_open_timeout(struct work_struct *work)
 
        struct apm_user *as = fp->private_data;
 
        poll_wait(fp, &apm_waitqueue, wait);
-       return queue_empty(&as->queue) ? 0 : POLLIN | POLLRDNORM;
+       return queue_empty(&as->queue) ? 0 : EPOLLIN | EPOLLRDNORM;
 }
 
 /*
 
        {
        case DSP56K_DEV_56001:
                /* poll_wait(file, ???, wait); */
-               return POLLIN | POLLRDNORM | POLLOUT;
+               return EPOLLIN | EPOLLRDNORM | EPOLLOUT;
 
        default:
                printk("DSP56k driver: Unknown minor device: %d\n", dev);
 
 #include <linux/uaccess.h>     /* for get_user, etc. */
 #include <linux/wait.h>                /* for wait_queue */
 #include <linux/init.h>                /* for __init, module_{init,exit} */
-#include <linux/poll.h>                /* for POLLIN, etc. */
+#include <linux/poll.h>                /* for EPOLLIN, etc. */
 #include <linux/dtlk.h>                /* local header file for DoubleTalk values */
 
 #ifdef TRACING
 
        if (dtlk_has_indexing && dtlk_readable()) {
                del_timer(&dtlk_timer);
-               mask = POLLIN | POLLRDNORM;
+               mask = EPOLLIN | EPOLLRDNORM;
        }
        if (dtlk_writeable()) {
                del_timer(&dtlk_timer);
-               mask |= POLLOUT | POLLWRNORM;
+               mask |= EPOLLOUT | EPOLLWRNORM;
        }
        /* there are no exception conditions */
 
 
        spin_unlock_irq(&hpet_lock);
 
        if (v != 0)
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
 
        return 0;
 }
 
        ctrl = bt_inb(bt_bmc, BT_CTRL);
 
        if (ctrl & BT_CTRL_H2B_ATN)
-               mask |= POLLIN;
+               mask |= EPOLLIN;
 
        if (!(ctrl & (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN)))
-               mask |= POLLOUT;
+               mask |= EPOLLOUT;
 
        return mask;
 }
 
        spin_lock_irqsave(&priv->recv_msg_lock, flags);
 
        if (!list_empty(&(priv->recv_msgs)))
-               mask |= (POLLIN | POLLRDNORM);
+               mask |= (EPOLLIN | EPOLLRDNORM);
 
        spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
 
 
 
        spin_lock(&ipmi_read_lock);
        if (data_to_read)
-               mask |= (POLLIN | POLLRDNORM);
+               mask |= (EPOLLIN | EPOLLRDNORM);
        spin_unlock(&ipmi_read_lock);
 
        return mask;
 
        poll_wait(filp, &dev->poll_wait, wait);
 
        if (test_and_clear_bit(BS_READABLE, &dev->buffer_status))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
        if (test_and_clear_bit(BS_WRITABLE, &dev->buffer_status))
-               mask |= POLLOUT | POLLWRNORM;
+               mask |= EPOLLOUT | EPOLLWRNORM;
 
        DEBUGP(2, dev, "<- cm4040_poll(%u)\n", mask);
 
 
 
        poll_wait(file, &pp->irq_wait, wait);
        if (atomic_read(&pp->irqc))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        return mask;
 }
 
        poll_wait(file, &random_write_wait, wait);
        mask = 0;
        if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
        if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
-               mask |= POLLOUT | POLLWRNORM;
+               mask |= EPOLLOUT | EPOLLWRNORM;
        return mask;
 }
 
 
        spin_unlock_irq(&rtc_lock);
 
        if (l != 0)
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        return 0;
 }
 #endif
 
 
        if (status > 0) {
                if (status & SAL_IROUTER_INTR_RECV) {
-                       mask |= POLLIN | POLLRDNORM;
+                       mask |= EPOLLIN | EPOLLRDNORM;
                }
                if (status & SAL_IROUTER_INTR_XMIT) {
-                       mask |= POLLOUT | POLLWRNORM;
+                       mask |= EPOLLOUT | EPOLLWRNORM;
                }
        }
 
 
 {
        poll_wait(file, &sonypi_device.fifo_proc_list, wait);
        if (kfifo_len(&sonypi_device.fifo))
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        return 0;
 }
 
 
 
        poll_wait(filp, &proxy_dev->wq, wait);
 
-       ret = POLLOUT;
+       ret = EPOLLOUT;
 
        mutex_lock(&proxy_dev->buf_lock);
 
        if (proxy_dev->req_len)
-               ret |= POLLIN | POLLRDNORM;
+               ret |= EPOLLIN | EPOLLRDNORM;
 
        if (!(proxy_dev->state & STATE_OPENED_FLAG))
-               ret |= POLLHUP;
+               ret |= EPOLLHUP;
 
        mutex_unlock(&proxy_dev->buf_lock);
 
 
 
        if (!port->guest_connected) {
                /* Port got unplugged */
-               return POLLHUP;
+               return EPOLLHUP;
        }
        ret = 0;
        if (!will_read_block(port))
-               ret |= POLLIN | POLLRDNORM;
+               ret |= EPOLLIN | EPOLLRDNORM;
        if (!will_write_block(port))
-               ret |= POLLOUT;
+               ret |= EPOLLOUT;
        if (!port->host_connected)
-               ret |= POLLHUP;
+               ret |= EPOLLHUP;
 
        return ret;
 }
 
 
                spin_lock_irqsave(&channel->wr_spinlock, flags);
                if (!channel->wr_empty || channel->wr_ready)
-                       mask |= POLLIN | POLLRDNORM;
+                       mask |= EPOLLIN | EPOLLRDNORM;
 
                if (channel->wr_hangup)
                        /*
-                        * Not POLLHUP, because its behavior is in the
-                        * mist, and POLLIN does what we want: Wake up
+                        * Not EPOLLHUP, because its behavior is in the
+                        * mist, and EPOLLIN does what we want: Wake up
                         * the read file descriptor so it sees EOF.
                         */
-                       mask |=  POLLIN | POLLRDNORM;
+                       mask |=  EPOLLIN | EPOLLRDNORM;
                spin_unlock_irqrestore(&channel->wr_spinlock, flags);
        }
 
 
                spin_lock_irqsave(&channel->rd_spinlock, flags);
                if (!channel->rd_full)
-                       mask |= POLLOUT | POLLWRNORM;
+                       mask |= EPOLLOUT | EPOLLWRNORM;
                spin_unlock_irqrestore(&channel->rd_spinlock, flags);
        }
 
        if (channel->endpoint->fatal_error)
-               mask |= POLLERR;
+               mask |= EPOLLERR;
 
        return mask;
 }
 
  * Userspace can query the state of these implicitly tracked fences using poll()
  * and related system calls:
  *
- * - Checking for POLLIN, i.e. read access, can be use to query the state of the
+ * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
  *   most recent write or exclusive fence.
  *
- * - Checking for POLLOUT, i.e. write access, can be used to query the state of
+ * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
  *   all attached fences, shared and exclusive ones.
  *
  * Note that this only signals the completion of the respective fences, i.e. the
 
        dmabuf = file->private_data;
        if (!dmabuf || !dmabuf->resv)
-               return POLLERR;
+               return EPOLLERR;
 
        resv = dmabuf->resv;
 
        poll_wait(file, &dmabuf->poll, poll);
 
-       events = poll_requested_events(poll) & (POLLIN | POLLOUT);
+       events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
        if (!events)
                return 0;
 
                goto retry;
        }
 
-       if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) {
+       if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
                struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
-               __poll_t pevents = POLLIN;
+               __poll_t pevents = EPOLLIN;
 
                if (shared_count == 0)
-                       pevents |= POLLOUT;
+                       pevents |= EPOLLOUT;
 
                spin_lock_irq(&dmabuf->poll.lock);
                if (dcb->active) {
                }
        }
 
-       if ((events & POLLOUT) && shared_count > 0) {
+       if ((events & EPOLLOUT) && shared_count > 0) {
                struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
                int i;
 
                /* Only queue a new callback if no event has fired yet */
                spin_lock_irq(&dmabuf->poll.lock);
                if (dcb->active)
-                       events &= ~POLLOUT;
+                       events &= ~EPOLLOUT;
                else
-                       dcb->active = POLLOUT;
+                       dcb->active = EPOLLOUT;
                spin_unlock_irq(&dmabuf->poll.lock);
 
-               if (!(events & POLLOUT))
+               if (!(events & EPOLLOUT))
                        goto out;
 
                for (i = 0; i < shared_count; ++i) {
                                 *
                                 * call dma_buf_poll_cb and force a recheck!
                                 */
-                               events &= ~POLLOUT;
+                               events &= ~EPOLLOUT;
                                dma_buf_poll_cb(NULL, &dcb->cb);
                                break;
                        }
                        if (!dma_fence_add_callback(fence, &dcb->cb,
                                                    dma_buf_poll_cb)) {
                                dma_fence_put(fence);
-                               events &= ~POLLOUT;
+                               events &= ~EPOLLOUT;
                                break;
                        }
                        dma_fence_put(fence);
 
                        wake_up_all(&sync_file->wq);
        }
 
-       return dma_fence_is_signaled(sync_file->fence) ? POLLIN : 0;
+       return dma_fence_is_signaled(sync_file->fence) ? EPOLLIN : 0;
 }
 
 static long sync_file_ioctl_merge(struct sync_file *sync_file,
 
        poll_wait(file, &client->wait, pt);
 
        if (fw_device_is_shutdown(client->device))
-               mask |= POLLHUP | POLLERR;
+               mask |= EPOLLHUP | EPOLLERR;
        if (!list_empty(&client->event_list))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        return mask;
 }
 
        poll_wait(file, &client->buffer.wait, pt);
 
        if (atomic_read(&client->buffer.size) > 0)
-               ret = POLLIN | POLLRDNORM;
+               ret = EPOLLIN | EPOLLRDNORM;
 
        if (list_empty(&client->lynx->link))
-               ret |= POLLHUP;
+               ret |= EPOLLHUP;
 
        return ret;
 }
 
        poll_wait(filep, &le->wait, wait);
 
        if (!kfifo_is_empty(&le->events))
-               events = POLLIN | POLLRDNORM;
+               events = EPOLLIN | EPOLLRDNORM;
 
        return events;
 }
 
        ret = kfifo_put(&le->events, ge);
        if (ret != 0)
-               wake_up_poll(&le->wait, POLLIN);
+               wake_up_poll(&le->wait, EPOLLIN);
 
        return IRQ_HANDLED;
 }
 
        poll_wait(filp, &file_priv->event_wait, wait);
 
        if (!list_empty(&file_priv->event_list))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        return mask;
 }
 
  * The two separate pointers let us decouple read()s from tail pointer aging.
  *
  * The tail pointers are checked and updated at a limited rate within a hrtimer
- * callback (the same callback that is used for delivering POLLIN events)
+ * callback (the same callback that is used for delivering EPOLLIN events)
  *
  * Initially the tails are marked invalid with %INVALID_TAIL_PTR which
  * indicates that an updated tail pointer is needed.
                mutex_unlock(&dev_priv->perf.lock);
        }
 
-       /* We allow the poll checking to sometimes report false positive POLLIN
+       /* We allow the poll checking to sometimes report false positive EPOLLIN
         * events where we might actually report EAGAIN on read() if there's
         * not really any data available. In this situation though we don't
-        * want to enter a busy loop between poll() reporting a POLLIN event
+        * want to enter a busy loop between poll() reporting a EPOLLIN event
         * and read() returning -EAGAIN. Clearing the oa.pollin state here
         * effectively ensures we back off until the next hrtimer callback
-        * before reporting another POLLIN event.
+        * before reporting another EPOLLIN event.
         */
        if (ret >= 0 || ret == -EAGAIN) {
                /* Maybe make ->pollin per-stream state if we support multiple
         * samples to read.
         */
        if (dev_priv->perf.oa.pollin)
-               events |= POLLIN;
+               events |= EPOLLIN;
 
        return events;
 }
 
        pr_debug("%s\n", __func__);
 
        poll_wait(file, &vga_wait_queue, wait);
-       return POLLIN;
+       return EPOLLIN;
 }
 
 static int vga_arb_open(struct inode *inode, struct file *file)
 
 
        poll_wait(file, &list->hdev->debug_wait, wait);
        if (list->head != list->tail)
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        if (!list->hdev->debug)
-               return POLLERR | POLLHUP;
+               return EPOLLERR | EPOLLHUP;
        return 0;
 }
 
 
        struct roccat_reader *reader = file->private_data;
        poll_wait(file, &reader->device->wait, wait);
        if (reader->cbuf_start != reader->device->cbuf_end)
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        if (!reader->device->exist)
-               return POLLERR | POLLHUP;
+               return EPOLLERR | EPOLLHUP;
        return 0;
 }
 
 
        poll_wait(file, &sensor_inst->wait, wait);
 
        if (!kfifo_is_empty(&sensor_inst->data_fifo))
-               mask = POLLIN | POLLRDNORM;
+               mask = EPOLLIN | EPOLLRDNORM;
 
        return mask;
 }
 
 
        poll_wait(file, &list->hidraw->wait, wait);
        if (list->head != list->tail)
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        if (!list->hidraw->exist)
-               return POLLERR | POLLHUP;
+               return EPOLLERR | EPOLLHUP;
        return 0;
 }
 
 
        poll_wait(file, &uhid->waitq, wait);
 
        if (uhid->head != uhid->tail)
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
 
        return 0;
 }
 
 
        poll_wait(file, &list->hiddev->wait, wait);
        if (list->head != list->tail)
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        if (!list->hiddev->exist)
-               return POLLERR | POLLHUP;
+               return EPOLLERR | EPOLLHUP;
        return 0;
 }
 
 
        poll_wait(file, &cs_char_data.wait, wait);
        spin_lock_bh(&csdata->lock);
        if (!list_empty(&csdata->chardev_queue))
-               ret = POLLIN | POLLRDNORM;
+               ret = EPOLLIN | EPOLLRDNORM;
        else if (!list_empty(&csdata->dataind_queue))
-               ret = POLLIN | POLLRDNORM;
+               ret = EPOLLIN | EPOLLRDNORM;
        spin_unlock_bh(&csdata->lock);
 
        return ret;
 
        poll_wait(file, &hvt->outmsg_q, wait);
 
        if (hvt->mode == HVUTIL_TRANSPORT_DESTROY)
-               return POLLERR | POLLHUP;
+               return EPOLLERR | EPOLLHUP;
 
        if (hvt->outmsg_len > 0)
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
 
        return 0;
 }
 
        spin_unlock_irqrestore(&queue->list_lock, flags);
 
        iio_buffer_block_put_atomic(block);
-       wake_up_interruptible_poll(&queue->buffer.pollq, POLLIN | POLLRDNORM);
+       wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
 }
 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
 
        }
        spin_unlock_irqrestore(&queue->list_lock, flags);
 
-       wake_up_interruptible_poll(&queue->buffer.pollq, POLLIN | POLLRDNORM);
+       wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
 }
 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
 
 
  * @wait:      Poll table structure pointer for which the driver adds
  *             a wait queue
  *
- * Return: (POLLIN | POLLRDNORM) if data is available for reading
+ * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
  *        or 0 for other cases
  */
 __poll_t iio_buffer_poll(struct file *filp,
 
        poll_wait(filp, &rb->pollq, wait);
        if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        return 0;
 }
 
         * We can't just test for watermark to decide if we wake the poll queue
         * because read may request less samples than the watermark.
         */
-       wake_up_interruptible_poll(&buffer->pollq, POLLIN | POLLRDNORM);
+       wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM);
        return 0;
 }
 
 
 
                copied = kfifo_put(&ev_int->det_events, ev);
                if (copied != 0)
-                       wake_up_poll(&ev_int->wait, POLLIN);
+                       wake_up_poll(&ev_int->wait, EPOLLIN);
        }
 
        return 0;
  * @filep:     File structure pointer to identify the device
  * @wait:      Poll table pointer to add the wait queue on
  *
- * Return: (POLLIN | POLLRDNORM) if data is available for reading
+ * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
  *        or a negative error code on failure
  */
 static __poll_t iio_event_poll(struct file *filep,
        poll_wait(filep, &ev_int->wait, wait);
 
        if (!kfifo_is_empty(&ev_int->det_events))
-               events = POLLIN | POLLRDNORM;
+               events = EPOLLIN | EPOLLRDNORM;
 
        return events;
 }
 
        poll_wait(filp, &file->poll_wait, wait);
 
        if (!list_empty(&file->events))
-               mask = POLLIN | POLLRDNORM;
+               mask = EPOLLIN | EPOLLRDNORM;
 
        return mask;
 }
 
        poll_wait(filp, &file->poll_wait, wait);
 
        if (!list_empty(&file->event_list))
-               mask = POLLIN | POLLRDNORM;
+               mask = EPOLLIN | EPOLLRDNORM;
 
        return mask;
 }
 
        struct ib_umad_file *file = filp->private_data;
 
        /* we will always be able to post a MAD send */
-       __poll_t mask = POLLOUT | POLLWRNORM;
+       __poll_t mask = EPOLLOUT | EPOLLWRNORM;
 
        poll_wait(filp, &file->recv_wait, wait);
 
        if (!list_empty(&file->recv_list))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        return mask;
 }
 
 
        spin_lock_irq(&ev_queue->lock);
        if (!list_empty(&ev_queue->event_list))
-               pollflags = POLLIN | POLLRDNORM;
+               pollflags = EPOLLIN | EPOLLRDNORM;
        spin_unlock_irq(&ev_queue->lock);
 
        return pollflags;
 
 
        uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt;
        if (!uctxt)
-               pollflag = POLLERR;
+               pollflag = EPOLLERR;
        else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT)
                pollflag = poll_urgent(fp, pt);
        else  if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV)
                pollflag = poll_next(fp, pt);
        else /* invalid */
-               pollflag = POLLERR;
+               pollflag = EPOLLERR;
 
        return pollflag;
 }
 
        spin_lock_irq(&dd->uctxt_lock);
        if (uctxt->urgent != uctxt->urgent_poll) {
-               pollflag = POLLIN | POLLRDNORM;
+               pollflag = EPOLLIN | EPOLLRDNORM;
                uctxt->urgent_poll = uctxt->urgent;
        } else {
                pollflag = 0;
                hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt);
                pollflag = 0;
        } else {
-               pollflag = POLLIN | POLLRDNORM;
+               pollflag = EPOLLIN | EPOLLRDNORM;
        }
        spin_unlock_irq(&dd->uctxt_lock);
 
 
 
        spin_lock_irq(&dd->uctxt_lock);
        if (rcd->urgent != rcd->urgent_poll) {
-               pollflag = POLLIN | POLLRDNORM;
+               pollflag = EPOLLIN | EPOLLRDNORM;
                rcd->urgent_poll = rcd->urgent;
        } else {
                pollflag = 0;
                dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt);
                pollflag = 0;
        } else
-               pollflag = POLLIN | POLLRDNORM;
+               pollflag = EPOLLIN | EPOLLRDNORM;
        spin_unlock_irq(&dd->uctxt_lock);
 
        return pollflag;
 
        rcd = ctxt_fp(fp);
        if (!rcd)
-               pollflag = POLLERR;
+               pollflag = EPOLLERR;
        else if (rcd->poll_type == QIB_POLL_TYPE_URGENT)
                pollflag = qib_poll_urgent(rcd, fp, pt);
        else  if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV)
                pollflag = qib_poll_next(rcd, fp, pt);
        else /* invalid */
-               pollflag = POLLERR;
+               pollflag = EPOLLERR;
 
        return pollflag;
 }
 
        iser_info("iser conn %p rc = %d\n", iser_conn, rc);
 
        if (rc > 0)
-               return 1; /* success, this is the equivalent of POLLOUT */
+               return 1; /* success, this is the equivalent of EPOLLOUT */
        else if (!rc)
                return 0; /* timeout */
        else
 
        poll_wait(file, &evdev->wait, wait);
 
        if (evdev->exist && !client->revoked)
-               mask = POLLOUT | POLLWRNORM;
+               mask = EPOLLOUT | EPOLLWRNORM;
        else
-               mask = POLLHUP | POLLERR;
+               mask = EPOLLHUP | EPOLLERR;
 
        if (client->packet_head != client->tail)
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        return mask;
 }
 
        poll_wait(file, &input_devices_poll_wait, wait);
        if (file->f_version != input_devices_state) {
                file->f_version = input_devices_state;
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        }
 
        return 0;
 
        struct joydev *joydev = client->joydev;
 
        poll_wait(file, &joydev->wait, wait);
-       return (joydev_data_pending(client) ? (POLLIN | POLLRDNORM) : 0) |
-               (joydev->exist ?  0 : (POLLHUP | POLLERR));
+       return (joydev_data_pending(client) ? (EPOLLIN | EPOLLRDNORM) : 0) |
+               (joydev->exist ?  0 : (EPOLLHUP | EPOLLERR));
 }
 
 static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev,
 
 
        l = 0;
         if (l != 0)
-                return POLLIN | POLLRDNORM;
+                return EPOLLIN | EPOLLRDNORM;
         return 0;
 }
 
 
        poll_wait(file, &udev->waitq, wait);
 
        if (udev->head != udev->tail)
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
 
        return 0;
 }
 
 
        poll_wait(file, &mousedev->wait, wait);
 
-       mask = mousedev->exist ? POLLOUT | POLLWRNORM : POLLHUP | POLLERR;
+       mask = mousedev->exist ? EPOLLOUT | EPOLLWRNORM : EPOLLHUP | EPOLLERR;
        if (client->ready || client->buffer)
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        return mask;
 }
 
 
        poll_wait(file, &serio_raw->wait, wait);
 
-       mask = serio_raw->dead ? POLLHUP | POLLERR : POLLOUT | POLLWRNORM;
+       mask = serio_raw->dead ? EPOLLHUP | EPOLLERR : EPOLLOUT | EPOLLWRNORM;
        if (serio_raw->head != serio_raw->tail)
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        return mask;
 }
 
        poll_wait(file, &userio->waitq, wait);
 
        if (userio->head != userio->tail)
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
 
        return 0;
 }
 
        __poll_t mask = 0;
 
        if (!cdev->ap.applid)
-               return POLLERR;
+               return EPOLLERR;
 
        poll_wait(file, &(cdev->recvwait), wait);
-       mask = POLLOUT | POLLWRNORM;
+       mask = EPOLLOUT | EPOLLWRNORM;
        if (!skb_queue_empty(&cdev->recvqueue))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
        return mask;
 }
 
 
        __poll_t mask = 0;
 
        poll_wait(file, &(rd_queue), wait);
-       /* mask = POLLOUT | POLLWRNORM; */
+       /* mask = EPOLLOUT | EPOLLWRNORM; */
        if (*((struct divert_info **) file->private_data)) {
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
        }
        return mask;
 }                              /* isdn_divert_poll */
 
        __poll_t mask = 0;
 
        poll_wait(file, &msgwaitq, wait);
-       mask = POLLOUT | POLLWRNORM;
+       mask = EPOLLOUT | EPOLLWRNORM;
        if (file->private_data || diva_dbg_q_length()) {
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
        }
        return (mask);
 }
 
        diva_um_idi_os_context_t *p_os;
 
        if (!file->private_data) {
-               return (POLLERR);
+               return (EPOLLERR);
        }
 
        if ((!(p_os =
               (diva_um_idi_os_context_t *)
               diva_um_id_get_os_context(file->private_data)))
            || p_os->aborted) {
-               return (POLLERR);
+               return (EPOLLERR);
        }
 
        poll_wait(file, &p_os->read_wait, wait);
 
        if (p_os->aborted) {
-               return (POLLERR);
+               return (EPOLLERR);
        }
 
        switch (diva_user_mode_idi_ind_ready(file->private_data, file)) {
        case (-1):
-               return (POLLERR);
+               return (EPOLLERR);
 
        case 0:
                return (0);
        }
 
-       return (POLLIN | POLLRDNORM);
+       return (EPOLLIN | EPOLLRDNORM);
 }
 
 static int um_idi_open(struct inode *inode, struct file *file)
 
 static __poll_t divas_poll(struct file *file, poll_table *wait)
 {
        if (!file->private_data) {
-               return (POLLERR);
+               return (EPOLLERR);
        }
-       return (POLLIN | POLLRDNORM);
+       return (EPOLLIN | EPOLLRDNORM);
 }
 
 static const struct file_operations divas_fops = {
 
 
 static __poll_t divas_poll(struct file *file, poll_table *wait)
 {
-       return (POLLERR);
+       return (EPOLLERR);
 }
 
 static int divas_open(struct inode *inode, struct file *file)
 
        poll_wait(file, &(pd->rd_queue), wait);
 
        if (*((struct log_data **) file->private_data))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        return mask;
 }                              /* hysdn_log_poll */
 
        mutex_lock(&isdn_mutex);
        if (minor == ISDN_MINOR_STATUS) {
                poll_wait(file, &(dev->info_waitq), wait);
-               /* mask = POLLOUT | POLLWRNORM; */
+               /* mask = EPOLLOUT | EPOLLWRNORM; */
                if (file->private_data) {
-                       mask |= POLLIN | POLLRDNORM;
+                       mask |= EPOLLIN | EPOLLRDNORM;
                }
                goto out;
        }
        if (minor >= ISDN_MINOR_CTRL && minor <= ISDN_MINOR_CTRLMAX) {
                if (drvidx < 0) {
                        /* driver deregistered while file open */
-                       mask = POLLHUP;
+                       mask = EPOLLHUP;
                        goto out;
                }
                poll_wait(file, &(dev->drv[drvidx]->st_waitq), wait);
-               mask = POLLOUT | POLLWRNORM;
+               mask = EPOLLOUT | EPOLLWRNORM;
                if (dev->drv[drvidx]->stavail) {
-                       mask |= POLLIN | POLLRDNORM;
+                       mask |= EPOLLIN | EPOLLRDNORM;
                }
                goto out;
        }
                goto out;
        }
 #endif
-       mask = POLLERR;
+       mask = EPOLLERR;
 out:
        mutex_unlock(&isdn_mutex);
        return mask;
 
 
        if (!(is->state & IPPP_OPEN)) {
                if (is->state == IPPP_CLOSEWAIT)
-                       return POLLHUP;
+                       return EPOLLHUP;
                printk(KERN_DEBUG "isdn_ppp: device not open\n");
-               return POLLERR;
+               return EPOLLERR;
        }
        /* we're always ready to send .. */
-       mask = POLLOUT | POLLWRNORM;
+       mask = EPOLLOUT | EPOLLWRNORM;
 
        spin_lock_irqsave(&is->buflock, flags);
        bl = is->last;
         */
        if (bf->next != bl || (is->state & IPPP_NOBLOCK)) {
                is->state &= ~IPPP_NOBLOCK;
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
        }
        spin_unlock_irqrestore(&is->buflock, flags);
        return mask;
 
 mISDN_poll(struct file *filep, poll_table *wait)
 {
        struct mISDNtimerdev    *dev = filep->private_data;
-       __poll_t                mask = POLLERR;
+       __poll_t                mask = EPOLLERR;
 
        if (*debug & DEBUG_TIMER)
                printk(KERN_DEBUG "%s(%p, %p)\n", __func__, filep, wait);
                poll_wait(filep, &dev->wait, wait);
                mask = 0;
                if (dev->work || !list_empty(&dev->expired))
-                       mask |= (POLLIN | POLLRDNORM);
+                       mask |= (EPOLLIN | EPOLLRDNORM);
                if (*debug & DEBUG_TIMER)
                        printk(KERN_DEBUG "%s work(%d) empty(%d)\n", __func__,
                               dev->work, list_empty(&dev->expired));
 
        poll_wait(file, &udev->waitq, wait);
 
        if (udev->new_data)
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
 
        return 0;
 }
 
 
                spin_lock_irqsave(&pp->lock, flags);
                if (pp->busy && pp->cmd.status != 1)
-                       mask |= POLLIN;
+                       mask |= EPOLLIN;
                spin_unlock_irqrestore(&pp->lock, flags);
        }
        if (pp->mode == smu_file_events) {
 
        poll_wait(filp, &pp->wait, wait);
        spin_lock_irqsave(&pp->lock, flags);
        if (pp->rb_get != pp->rb_put)
-               mask |= POLLIN;
+               mask |= EPOLLIN;
        spin_unlock_irqrestore(&pp->lock, flags);
        return mask;
 }
 
        poll_wait(filp, &tdev->waitq, wait);
 
        if (mbox_test_message_data_ready(tdev))
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        return 0;
 }
 
 
        poll_wait(filp, &dm_global_eventq, wait);
 
        if ((int)(atomic_read(&dm_global_event_nr) - priv->global_event_nr) > 0)
-               mask |= POLLIN;
+               mask |= EPOLLIN;
 
        return mask;
 }
 
        __poll_t mask;
 
        if (md_unloading)
-               return POLLIN|POLLRDNORM|POLLERR|POLLPRI;
+               return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
        poll_wait(filp, &md_event_waiters, wait);
 
        /* always allow read */
-       mask = POLLIN | POLLRDNORM;
+       mask = EPOLLIN | EPOLLRDNORM;
 
        if (seq->poll_event != atomic_read(&md_event_count))
-               mask |= POLLERR | POLLPRI;
+               mask |= EPOLLERR | EPOLLPRI;
        return mask;
 }
 
 
        __poll_t res = 0;
 
        if (!cec_is_registered(adap))
-               return POLLERR | POLLHUP;
+               return EPOLLERR | EPOLLHUP;
        mutex_lock(&adap->lock);
        if (adap->is_configured &&
            adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ)
-               res |= POLLOUT | POLLWRNORM;
+               res |= EPOLLOUT | EPOLLWRNORM;
        if (fh->queued_msgs)
-               res |= POLLIN | POLLRDNORM;
+               res |= EPOLLIN | EPOLLRDNORM;
        if (fh->total_queued_events)
-               res |= POLLPRI;
+               res |= EPOLLPRI;
        poll_wait(filp, &fh->wait, poll);
        mutex_unlock(&adap->lock);
        return res;
 
 
        if (vdev->vfl_type == VFL_TYPE_VBI) {
                if (fh->dev->ext_vv_data->capabilities & V4L2_CAP_SLICED_VBI_OUTPUT)
-                       return res | POLLOUT | POLLWRNORM;
+                       return res | EPOLLOUT | EPOLLWRNORM;
                if( 0 == fh->vbi_q.streaming )
                        return res | videobuf_poll_stream(file, &fh->vbi_q, wait);
                q = &fh->vbi_q;
 
        if (!buf) {
                DEB_D("buf == NULL!\n");
-               return res | POLLERR;
+               return res | EPOLLERR;
        }
 
        poll_wait(file, &buf->done, wait);
        if (buf->state == VIDEOBUF_DONE || buf->state == VIDEOBUF_ERROR) {
                DEB_D("poll succeeded!\n");
-               return res | POLLIN | POLLRDNORM;
+               return res | EPOLLIN | EPOLLRDNORM;
        }
 
        DEB_D("nothing to poll for, buf->state:%d\n", buf->state);
 
        rc = smsdvb_stats_wait_read(debug_data);
        kref_put(&debug_data->refcount, smsdvb_debugfs_data_release);
 
-       return rc > 0 ? POLLIN | POLLRDNORM : 0;
+       return rc > 0 ? EPOLLIN | EPOLLRDNORM : 0;
 }
 
 static ssize_t smsdvb_stats_read(struct file *file, char __user *user_buf,
 
        struct vb2_buffer *vb = NULL;
        unsigned long flags;
 
-       if (!q->is_output && !(req_events & (POLLIN | POLLRDNORM)))
+       if (!q->is_output && !(req_events & (EPOLLIN | EPOLLRDNORM)))
                return 0;
-       if (q->is_output && !(req_events & (POLLOUT | POLLWRNORM)))
+       if (q->is_output && !(req_events & (EPOLLOUT | EPOLLWRNORM)))
                return 0;
 
        /*
         */
        if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
                if (!q->is_output && (q->io_modes & VB2_READ) &&
-                               (req_events & (POLLIN | POLLRDNORM))) {
+                               (req_events & (EPOLLIN | EPOLLRDNORM))) {
                        if (__vb2_init_fileio(q, 1))
-                               return POLLERR;
+                               return EPOLLERR;
                }
                if (q->is_output && (q->io_modes & VB2_WRITE) &&
-                               (req_events & (POLLOUT | POLLWRNORM))) {
+                               (req_events & (EPOLLOUT | EPOLLWRNORM))) {
                        if (__vb2_init_fileio(q, 0))
-                               return POLLERR;
+                               return EPOLLERR;
                        /*
                         * Write to OUTPUT queue can be done immediately.
                         */
-                       return POLLOUT | POLLWRNORM;
+                       return EPOLLOUT | EPOLLWRNORM;
                }
        }
 
         * error flag is set.
         */
        if (!vb2_is_streaming(q) || q->error)
-               return POLLERR;
+               return EPOLLERR;
 
        /*
         * If this quirk is set and QBUF hasn't been called yet then
-        * return POLLERR as well. This only affects capture queues, output
+        * return EPOLLERR as well. This only affects capture queues, output
         * queues will always initialize waiting_for_buffers to false.
         * This quirk is set by V4L2 for backwards compatibility reasons.
         */
        if (q->quirk_poll_must_check_waiting_for_buffers &&
-           q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
-               return POLLERR;
+           q->waiting_for_buffers && (req_events & (EPOLLIN | EPOLLRDNORM)))
+               return EPOLLERR;
 
        /*
         * For output streams you can call write() as long as there are fewer
         * buffers queued than there are buffers available.
         */
        if (q->is_output && q->fileio && q->queued_count < q->num_buffers)
-               return POLLOUT | POLLWRNORM;
+               return EPOLLOUT | EPOLLWRNORM;
 
        if (list_empty(&q->done_list)) {
                /*
                 * return immediately. DQBUF will return -EPIPE.
                 */
                if (q->last_buffer_dequeued)
-                       return POLLIN | POLLRDNORM;
+                       return EPOLLIN | EPOLLRDNORM;
 
                poll_wait(file, &q->done_wq, wait);
        }
        if (vb && (vb->state == VB2_BUF_STATE_DONE
                        || vb->state == VB2_BUF_STATE_ERROR)) {
                return (q->is_output) ?
-                               POLLOUT | POLLWRNORM :
-                               POLLIN | POLLRDNORM;
+                               EPOLLOUT | EPOLLWRNORM :
+                               EPOLLIN | EPOLLRDNORM;
        }
        return 0;
 }
 
                        == V4L2_BUF_FLAG_TIMESTAMP_COPY;
        /*
         * For compatibility with vb1: if QBUF hasn't been called yet, then
-        * return POLLERR as well. This only affects capture queues, output
+        * return EPOLLERR as well. This only affects capture queues, output
         * queues will always initialize waiting_for_buffers to false.
         */
        q->quirk_poll_must_check_waiting_for_buffers = true;
                struct v4l2_fh *fh = file->private_data;
 
                if (v4l2_event_pending(fh))
-                       res = POLLPRI;
-               else if (req_events & POLLPRI)
+                       res = EPOLLPRI;
+               else if (req_events & EPOLLPRI)
                        poll_wait(file, &fh->wait, wait);
        }
 
        WARN_ON(!lock);
 
        if (lock && mutex_lock_interruptible(lock))
-               return POLLERR;
+               return EPOLLERR;
 
        fileio = q->fileio;
 
 
        __poll_t mask = 0;
 
        if ((!dmxdevfilter) || dmxdevfilter->dev->exit)
-               return POLLERR;
+               return EPOLLERR;
        if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx))
                return dvb_vb2_poll(&dmxdevfilter->vb2_ctx, file, wait);
 
                return 0;
 
        if (dmxdevfilter->buffer.error)
-               mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR);
+               mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI | EPOLLERR);
 
        if (!dvb_ringbuffer_empty(&dmxdevfilter->buffer))
-               mask |= (POLLIN | POLLRDNORM | POLLPRI);
+               mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI);
 
        return mask;
 }
        dprintk("%s\n", __func__);
 
        if (dmxdev->exit)
-               return POLLERR;
+               return EPOLLERR;
        if (dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx))
                return dvb_vb2_poll(&dmxdev->dvr_vb2_ctx, file, wait);
 
 #endif
        if (need_ringbuffer) {
                if (dmxdev->dvr_buffer.error)
-                       mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR);
+                       mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI | EPOLLERR);
 
                if (!dvb_ringbuffer_empty(&dmxdev->dvr_buffer))
-                       mask |= (POLLIN | POLLRDNORM | POLLPRI);
+                       mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI);
        } else
-               mask |= (POLLOUT | POLLWRNORM | POLLPRI);
+               mask |= (EPOLLOUT | EPOLLWRNORM | EPOLLPRI);
 
        return mask;
 }
 
        dprintk("%s\n", __func__);
 
        if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1)
-               mask |= POLLIN;
+               mask |= EPOLLIN;
 
        /* if there is something, return now */
        if (mask)
        poll_wait(file, &ca->wait_queue, wait);
 
        if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1)
-               mask |= POLLIN;
+               mask |= EPOLLIN;
 
        return mask;
 }
 
        poll_wait (file, &fepriv->events.wait_queue, wait);
 
        if (fepriv->events.eventw != fepriv->events.eventr)
-               return (POLLIN | POLLRDNORM | POLLPRI);
+               return (EPOLLIN | EPOLLRDNORM | EPOLLPRI);
 
        return 0;
 }
 
 
 static __poll_t fdtv_ca_io_poll(struct file *file, poll_table *wait)
 {
-       return POLLIN;
+       return EPOLLIN;
 }
 
 static const struct file_operations fdtv_ca_fops = {
 
        case SAA6588_CMD_POLL:
                a->result = 0;
                if (s->data_available_for_read)
-                       a->result |= POLLIN | POLLRDNORM;
+                       a->result |= EPOLLIN | EPOLLRDNORM;
                poll_wait(a->instance, &s->read_queue, a->event_list);
                break;
 
 
        struct media_devnode *devnode = media_devnode_data(filp);
 
        if (!media_devnode_is_registered(devnode))
-               return POLLERR | POLLHUP;
+               return EPOLLERR | EPOLLHUP;
        if (!devnode->fops->poll)
                return DEFAULT_POLLMASK;
        return devnode->fops->poll(filp, poll);
 
        __poll_t req_events = poll_requested_events(wait);
 
        if (v4l2_event_pending(&fh->fh))
-               rc = POLLPRI;
-       else if (req_events & POLLPRI)
+               rc = EPOLLPRI;
+       else if (req_events & EPOLLPRI)
                poll_wait(file, &fh->fh.wait, wait);
 
-       if (!(req_events & (POLLIN | POLLRDNORM)))
+       if (!(req_events & (EPOLLIN | EPOLLRDNORM)))
                return rc;
 
        if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) {
                if (!check_alloc_btres_lock(fh->btv,fh,RESOURCE_VBI))
-                       return rc | POLLERR;
+                       return rc | EPOLLERR;
                return rc | videobuf_poll_stream(file, &fh->vbi, wait);
        }
 
        if (check_btres(fh,RESOURCE_VIDEO_STREAM)) {
                /* streaming capture */
                if (list_empty(&fh->cap.stream))
-                       return rc | POLLERR;
+                       return rc | EPOLLERR;
                buf = list_entry(fh->cap.stream.next,struct bttv_buffer,vb.stream);
        } else {
                /* read() capture */
                if (NULL == fh->cap.read_buf) {
                        /* need to capture a new frame */
                        if (locked_btres(fh->btv,RESOURCE_VIDEO_STREAM))
-                               return rc | POLLERR;
+                               return rc | EPOLLERR;
                        fh->cap.read_buf = videobuf_sg_alloc(fh->cap.msize);
                        if (NULL == fh->cap.read_buf)
-                               return rc | POLLERR;
+                               return rc | EPOLLERR;
                        fh->cap.read_buf->memory = V4L2_MEMORY_USERPTR;
                        field = videobuf_next_field(&fh->cap);
                        if (0 != fh->cap.ops->buf_prepare(&fh->cap,fh->cap.read_buf,field)) {
                                kfree (fh->cap.read_buf);
                                fh->cap.read_buf = NULL;
-                               return rc | POLLERR;
+                               return rc | EPOLLERR;
                        }
                        fh->cap.ops->buf_queue(&fh->cap,fh->cap.read_buf);
                        fh->cap.read_off = 0;
        poll_wait(file, &buf->vb.done, wait);
        if (buf->vb.state == VIDEOBUF_DONE ||
            buf->vb.state == VIDEOBUF_ERROR)
-               rc = rc | POLLIN|POLLRDNORM;
+               rc = rc | EPOLLIN|EPOLLRDNORM;
        return rc;
 }
 
        __poll_t res = 0;
 
        if (v4l2_event_pending(&fh->fh))
-               res = POLLPRI;
-       else if (req_events & POLLPRI)
+               res = EPOLLPRI;
+       else if (req_events & EPOLLPRI)
                poll_wait(file, &fh->fh.wait, wait);
        radio_enable(btv);
        cmd.instance = file;
 
 
        /* Start a capture if there is none */
        if (!eof && !test_bit(CX18_F_S_STREAMING, &s->s_flags) &&
-                       (req_events & (POLLIN | POLLRDNORM))) {
+                       (req_events & (EPOLLIN | EPOLLRDNORM))) {
                int rc;
 
                mutex_lock(&cx->serialize_lock);
                if (rc) {
                        CX18_DEBUG_INFO("Could not start capture for %s (%d)\n",
                                        s->name, rc);
-                       return POLLERR;
+                       return EPOLLERR;
                }
                CX18_DEBUG_FILE("Encoder poll started capture\n");
        }
                __poll_t videobuf_poll = videobuf_poll_stream(filp, &s->vbuf_q, wait);
 
                if (v4l2_event_pending(&id->fh))
-                       res |= POLLPRI;
-               if (eof && videobuf_poll == POLLERR)
-                       return res | POLLHUP;
+                       res |= EPOLLPRI;
+               if (eof && videobuf_poll == EPOLLERR)
+                       return res | EPOLLHUP;
                return res | videobuf_poll;
        }
 
        /* add stream's waitq to the poll list */
        CX18_DEBUG_HI_FILE("Encoder poll\n");
        if (v4l2_event_pending(&id->fh))
-               res |= POLLPRI;
+               res |= EPOLLPRI;
        else
                poll_wait(filp, &s->waitq, wait);
 
        if (atomic_read(&s->q_full.depth))
-               return res | POLLIN | POLLRDNORM;
+               return res | EPOLLIN | EPOLLRDNORM;
        if (eof)
-               return res | POLLHUP;
+               return res | EPOLLHUP;
        return res;
 }
 
 
        poll_wait(file, &input->dma->wq, wait);
        poll_wait(file, &output->dma->wq, wait);
        if (ddb_input_avail(input) >= 188)
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
        if (ddb_output_free(output) >= 188)
-               mask |= POLLOUT | POLLWRNORM;
+               mask |= EPOLLOUT | EPOLLWRNORM;
        return mask;
 }
 
 
                /* Turn off the old-style vsync events */
                clear_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags);
                if (v4l2_event_pending(&id->fh))
-                       res = POLLPRI;
+                       res = EPOLLPRI;
        } else {
                /* This is the old-style API which is here only for backwards
                   compatibility. */
                set_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags);
                if (test_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags) ||
                    test_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags))
-                       res = POLLPRI;
+                       res = EPOLLPRI;
        }
 
        /* Allow write if buffers are available for writing */
        if (s->q_free.buffers)
-               res |= POLLOUT | POLLWRNORM;
+               res |= EPOLLOUT | EPOLLWRNORM;
        return res;
 }
 
        /* Start a capture if there is none */
        if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags) &&
                        s->type != IVTV_ENC_STREAM_TYPE_RAD &&
-                       (req_events & (POLLIN | POLLRDNORM))) {
+                       (req_events & (EPOLLIN | EPOLLRDNORM))) {
                int rc;
 
                mutex_lock(&itv->serialize_lock);
                if (rc) {
                        IVTV_DEBUG_INFO("Could not start capture for %s (%d)\n",
                                        s->name, rc);
-                       return POLLERR;
+                       return EPOLLERR;
                }
                IVTV_DEBUG_FILE("Encoder poll started capture\n");
        }
        IVTV_DEBUG_HI_FILE("Encoder poll\n");
        poll_wait(filp, &s->waitq, wait);
        if (v4l2_event_pending(&id->fh))
-               res |= POLLPRI;
+               res |= EPOLLPRI;
        else
                poll_wait(filp, &id->fh.wait, wait);
 
        if (s->q_full.length || s->q_io.length)
-               return res | POLLIN | POLLRDNORM;
+               return res | EPOLLIN | EPOLLRDNORM;
        if (eof)
-               return res | POLLHUP;
+               return res | EPOLLHUP;
        return res;
 }
 
 
        mutex_lock(&meye.lock);
        poll_wait(file, &meye.proc_list, wait);
        if (kfifo_len(&meye.doneq))
-               res |= POLLIN | POLLRDNORM;
+               res |= EPOLLIN | EPOLLRDNORM;
        mutex_unlock(&meye.lock);
        return res;
 }
 
        saa7164_histogram_update(&port->poll_interval,
                port->last_poll_msecs_diff);
 
-       if (!(req_events & (POLLIN | POLLRDNORM)))
+       if (!(req_events & (EPOLLIN | EPOLLRDNORM)))
                return mask;
 
        if (atomic_cmpxchg(&fh->v4l_reading, 0, 1) == 0) {
                if (atomic_inc_return(&port->v4l_reader_count) == 1) {
                        if (saa7164_encoder_initialize(port) < 0)
-                               return mask | POLLERR;
+                               return mask | EPOLLERR;
                        saa7164_encoder_start_streaming(port);
                        msleep(200);
                }
 
        /* Pull the first buffer from the used list */
        if (!list_empty(&port->list_buf_used.list))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        return mask;
 }
 
 
        /* Pull the first buffer from the used list */
        if (!list_empty(&port->list_buf_used.list))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        return mask;
 }
 
        poll_wait(file, &av7110->video_events.wait_queue, wait);
 
        if (av7110->video_events.eventw != av7110->video_events.eventr)
-               mask = POLLPRI;
+               mask = EPOLLPRI;
 
        if ((file->f_flags & O_ACCMODE) != O_RDONLY) {
                if (av7110->playing) {
                        if (FREE_COND)
-                               mask |= (POLLOUT | POLLWRNORM);
+                               mask |= (EPOLLOUT | EPOLLWRNORM);
                } else {
                        /* if not playing: may play if asked for */
-                       mask |= (POLLOUT | POLLWRNORM);
+                       mask |= (EPOLLOUT | EPOLLWRNORM);
                }
        }
 
 
        if (av7110->playing) {
                if (dvb_ringbuffer_free(&av7110->aout) >= 20 * 1024)
-                       mask |= (POLLOUT | POLLWRNORM);
+                       mask |= (EPOLLOUT | EPOLLWRNORM);
        } else /* if not playing: may play if asked for */
-               mask = (POLLOUT | POLLWRNORM);
+               mask = (EPOLLOUT | EPOLLWRNORM);
 
        return mask;
 }
 
        poll_wait(file, &wbuf->queue, wait);
 
        if (!dvb_ringbuffer_empty(rbuf))
-               mask |= (POLLIN | POLLRDNORM);
+               mask |= (EPOLLIN | EPOLLRDNORM);
 
        if (dvb_ringbuffer_free(wbuf) > 1024)
-               mask |= (POLLOUT | POLLWRNORM);
+               mask |= (EPOLLOUT | EPOLLWRNORM);
 
        return mask;
 }
 
 
        /* we should check whether buffers are ready to be synced on
         * (w/o waits - O_NONBLOCK) here
-        * if ready for read (sync), return POLLIN|POLLRDNORM,
-        * if ready for write (sync), return POLLOUT|POLLWRNORM,
-        * if error, return POLLERR,
-        * if no buffers queued or so, return POLLNVAL
+        * if ready for read (sync), return EPOLLIN|EPOLLRDNORM,
+        * if ready for write (sync), return EPOLLOUT|EPOLLWRNORM,
+        * if error, return EPOLLERR,
+        * if no buffers queued or so, return EPOLLNVAL
         */
 
        switch (fh->map_mode) {
                if (fh->buffers.active != ZORAN_FREE &&
                    /* Buffer ready to DQBUF? */
                    zr->v4l_buffers.buffer[frame].state == BUZ_STATE_DONE)
-                       res |= POLLIN | POLLRDNORM;
+                       res |= EPOLLIN | EPOLLRDNORM;
                spin_unlock_irqrestore(&zr->spinlock, flags);
 
                break;
                if (fh->buffers.active != ZORAN_FREE &&
                    zr->jpg_buffers.buffer[frame].state == BUZ_STATE_DONE) {
                        if (fh->map_mode == ZORAN_MAP_MODE_JPG_REC)
-                               res |= POLLIN | POLLRDNORM;
+                               res |= EPOLLIN | EPOLLRDNORM;
                        else
-                               res |= POLLOUT | POLLWRNORM;
+                               res |= EPOLLOUT | EPOLLWRNORM;
                }
                spin_unlock_irqrestore(&zr->spinlock, flags);
 
                        KERN_ERR
                        "%s: %s - internal error, unknown map_mode=%d\n",
                        ZR_DEVNAME(zr), __func__, fh->map_mode);
-               res |= POLLERR;
+               res |= EPOLLERR;
        }
 
        return res;
 
        __poll_t res = v4l2_ctrl_poll(file, wait);
 
        if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
-               return POLLERR;
+               return EPOLLERR;
 
-       if (!(req_events & (POLLIN | POLLRDNORM)))
+       if (!(req_events & (EPOLLIN | EPOLLRDNORM)))
                return res;
 
        mutex_lock(&dev->lock);
 
         */
        if ((!src_q->streaming || list_empty(&src_q->queued_list))
                && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
-               rc = POLLERR;
+               rc = EPOLLERR;
                goto end;
        }
        mutex_unlock(&dev->mfc_mutex);
        poll_wait(file, &dst_q->done_wq, wait);
        mutex_lock(&dev->mfc_mutex);
        if (v4l2_event_pending(&ctx->fh))
-               rc |= POLLPRI;
+               rc |= EPOLLPRI;
        spin_lock_irqsave(&src_q->done_lock, flags);
        if (!list_empty(&src_q->done_list))
                src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
                                                                done_entry);
        if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
                                || src_vb->state == VB2_BUF_STATE_ERROR))
-               rc |= POLLOUT | POLLWRNORM;
+               rc |= EPOLLOUT | EPOLLWRNORM;
        spin_unlock_irqrestore(&src_q->done_lock, flags);
        spin_lock_irqsave(&dst_q->done_lock, flags);
        if (!list_empty(&dst_q->done_list))
                                                                done_entry);
        if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
                                || dst_vb->state == VB2_BUF_STATE_ERROR))
-               rc |= POLLIN | POLLRDNORM;
+               rc |= EPOLLIN | EPOLLRDNORM;
        spin_unlock_irqrestore(&dst_q->done_lock, flags);
 end:
        mutex_unlock(&dev->mfc_mutex);
 
 {
        struct soc_camera_device *icd = file->private_data;
        struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-       __poll_t res = POLLERR;
+       __poll_t res = EPOLLERR;
 
        if (icd->streamer != file)
-               return POLLERR;
+               return EPOLLERR;
 
        mutex_lock(&ici->host_lock);
        res = ici->ops->poll(file, pt);
 
 
 __poll_t vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait)
 {
-       return POLLIN | POLLRDNORM | v4l2_ctrl_poll(file, wait);
+       return EPOLLIN | EPOLLRDNORM | v4l2_ctrl_poll(file, wait);
 }
 
 int vivid_radio_rx_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band)
 
 
 __poll_t vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait)
 {
-       return POLLOUT | POLLWRNORM | v4l2_ctrl_poll(file, wait);
+       return EPOLLOUT | EPOLLWRNORM | v4l2_ctrl_poll(file, wait);
 }
 
 int vidioc_g_modulator(struct file *file, void *fh, struct v4l2_modulator *a)
 
        __poll_t res = v4l2_ctrl_poll(file, wait);
 
        poll_wait(file, &dev->read_queue, wait);
-       if (dev->rdsstat == 0 && (req_events & (POLLIN | POLLRDNORM))) {
+       if (dev->rdsstat == 0 && (req_events & (EPOLLIN | EPOLLRDNORM))) {
                mutex_lock(&dev->lock);
                if (dev->rdsstat == 0)
                        cadet_start_rds(dev);
                mutex_unlock(&dev->lock);
        }
        if (cadet_has_rds_data(dev))
-               res |= POLLIN | POLLRDNORM;
+               res |= EPOLLIN | EPOLLRDNORM;
        return res;
 }
 
 
        __poll_t req_events = poll_requested_events(pts);
        __poll_t err = v4l2_ctrl_poll(file, pts);
 
-       if (req_events & (POLLIN | POLLRDNORM)) {
+       if (req_events & (EPOLLIN | EPOLLRDNORM)) {
                if (atomic_read(&radio->core->is_alive))
                        poll_wait(file, &radio->core->rds_read_queue, pts);
 
                if (!atomic_read(&radio->core->is_alive))
-                       err = POLLHUP;
+                       err = EPOLLHUP;
 
                if (!kfifo_is_empty(&radio->core->rds_fifo))
-                       err = POLLIN | POLLRDNORM;
+                       err = EPOLLIN | EPOLLRDNORM;
        }
 
        return err;
 
                poll_wait(file, &radio->read_queue, pts);
 
                if (radio->rd_index != radio->wr_index)
-                       return POLLIN | POLLRDNORM;
+                       return EPOLLIN | EPOLLRDNORM;
 
        } else if (core->mode == WL1273_MODE_TX) {
-               return POLLOUT | POLLWRNORM;
+               return EPOLLOUT | EPOLLWRNORM;
        }
 
        return 0;
 
        __poll_t req_events = poll_requested_events(pts);
        __poll_t retval = v4l2_ctrl_poll(file, pts);
 
-       if (req_events & (POLLIN | POLLRDNORM)) {
+       if (req_events & (EPOLLIN | EPOLLRDNORM)) {
                /* switch on rds reception */
                if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
                        si470x_rds_on(radio);
                poll_wait(file, &radio->read_queue, pts);
 
                if (radio->rd_index != radio->wr_index)
-                       retval |= POLLIN | POLLRDNORM;
+                       retval |= EPOLLIN | EPOLLRDNORM;
        }
 
        return retval;
 
        ret = fmc_is_rds_data_available(fmdev, file, pts);
        mutex_unlock(&fmdev->mutex);
        if (ret < 0)
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
 
        return 0;
 }
 
                if (LIRC_IS_TIMEOUT(sample) && !fh->send_timeout_reports)
                        continue;
                if (kfifo_put(&fh->rawir, sample))
-                       wake_up_poll(&fh->wait_poll, POLLIN | POLLRDNORM);
+                       wake_up_poll(&fh->wait_poll, EPOLLIN | EPOLLRDNORM);
        }
        spin_unlock_irqrestore(&dev->lirc_fh_lock, flags);
 }
        spin_lock_irqsave(&dev->lirc_fh_lock, flags);
        list_for_each_entry(fh, &dev->lirc_fh, list) {
                if (kfifo_put(&fh->scancodes, *lsc))
-                       wake_up_poll(&fh->wait_poll, POLLIN | POLLRDNORM);
+                       wake_up_poll(&fh->wait_poll, EPOLLIN | EPOLLRDNORM);
        }
        spin_unlock_irqrestore(&dev->lirc_fh_lock, flags);
 }
        poll_wait(file, &fh->wait_poll, wait);
 
        if (!rcdev->registered) {
-               events = POLLHUP | POLLERR;
+               events = EPOLLHUP | EPOLLERR;
        } else if (rcdev->driver_type != RC_DRIVER_IR_RAW_TX) {
                if (fh->rec_mode == LIRC_MODE_SCANCODE &&
                    !kfifo_is_empty(&fh->scancodes))
-                       events = POLLIN | POLLRDNORM;
+                       events = EPOLLIN | EPOLLRDNORM;
 
                if (fh->rec_mode == LIRC_MODE_MODE2 &&
                    !kfifo_is_empty(&fh->rawir))
-                       events = POLLIN | POLLRDNORM;
+                       events = EPOLLIN | EPOLLRDNORM;
        }
 
        return events;
 
        spin_lock_irqsave(&dev->lirc_fh_lock, flags);
        list_for_each_entry(fh, &dev->lirc_fh, list)
-               wake_up_poll(&fh->wait_poll, POLLHUP | POLLERR);
+               wake_up_poll(&fh->wait_poll, EPOLLHUP | EPOLLERR);
        spin_unlock_irqrestore(&dev->lirc_fh_lock, flags);
 
        cdev_device_del(&dev->lirc_cdev, &dev->lirc_dev);
 
 {
        __poll_t status = v4l2_ctrl_poll(filp, wait);
 
-       if ((poll_requested_events(wait) & (POLLIN | POLLRDNORM)) &&
+       if ((poll_requested_events(wait) & (EPOLLIN | EPOLLRDNORM)) &&
                        !cam->streaming) {
                /* Start streaming */
                cpia2_usb_stream_start(cam,
        poll_wait(filp, &cam->wq_stream, wait);
 
        if (cam->curbuff->status == FRAME_READY)
-               status |= POLLIN | POLLRDNORM;
+               status |= EPOLLIN | EPOLLRDNORM;
 
        return status;
 }
 
        __poll_t res = 0;
 
        if (v4l2_event_pending(&fh->fh))
-               res |= POLLPRI;
+               res |= EPOLLPRI;
        else
                poll_wait(file, &fh->fh.wait, wait);
 
-       if (!(req_events & (POLLIN | POLLRDNORM)))
+       if (!(req_events & (EPOLLIN | EPOLLRDNORM)))
                return res;
 
        mutex_lock(&dev->lock);
 
 
        rc = check_dev(dev);
        if (rc < 0)
-               return POLLERR;
+               return EPOLLERR;
 
        rc = res_get(fh);
 
        if (unlikely(rc < 0))
-               return POLLERR;
+               return EPOLLERR;
 
        if (v4l2_event_pending(&fh->fh))
-               res |= POLLPRI;
+               res |= EPOLLPRI;
        else
                poll_wait(filp, &fh->fh.wait, wait);
 
-       if (!(req_events & (POLLIN | POLLRDNORM)))
+       if (!(req_events & (EPOLLIN | EPOLLRDNORM)))
                return res;
 
        if ((V4L2_BUF_TYPE_VIDEO_CAPTURE == fh->type) ||
                mutex_unlock(&dev->lock);
                return res;
        }
-       return res | POLLERR;
+       return res | EPOLLERR;
 }
 
 /*
 
 
        gspca_dbg(gspca_dev, D_FRAM, "poll\n");
 
-       if (req_events & POLLPRI)
+       if (req_events & EPOLLPRI)
                ret |= v4l2_ctrl_poll(file, wait);
 
-       if (req_events & (POLLIN | POLLRDNORM)) {
+       if (req_events & (EPOLLIN | EPOLLRDNORM)) {
                /* if reqbufs is not done, the user would use read() */
                if (gspca_dev->memory == GSPCA_MEMORY_NO) {
                        if (read_alloc(gspca_dev, file) != 0) {
-                               ret |= POLLERR;
+                               ret |= EPOLLERR;
                                goto out;
                        }
                }
 
                /* check if an image has been received */
                if (mutex_lock_interruptible(&gspca_dev->queue_lock) != 0) {
-                       ret |= POLLERR;
+                       ret |= EPOLLERR;
                        goto out;
                }
                if (gspca_dev->fr_o != atomic_read(&gspca_dev->fr_i))
-                       ret |= POLLIN | POLLRDNORM;
+                       ret |= EPOLLIN | EPOLLRDNORM;
                mutex_unlock(&gspca_dev->queue_lock);
        }
 
 out:
        if (!gspca_dev->present)
-               ret |= POLLHUP;
+               ret |= EPOLLHUP;
 
        return ret;
 }
 
        struct hdpvr_device *dev = video_drvdata(filp);
        __poll_t mask = v4l2_ctrl_poll(filp, wait);
 
-       if (!(req_events & (POLLIN | POLLRDNORM)))
+       if (!(req_events & (EPOLLIN | EPOLLRDNORM)))
                return mask;
 
        mutex_lock(&dev->io_mutex);
                buf = hdpvr_get_next_buffer(dev);
        }
        if (buf && buf->status == BUFSTAT_READY)
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        return mask;
 }
 
        int ret;
 
        if (fh->fw_mode_flag) {
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
                return mask;
        }
 
        if (!fh->rhp) {
                ret = pvr2_v4l2_iosetup(fh);
-               if (ret) return POLLERR;
+               if (ret) return EPOLLERR;
        }
 
        poll_wait(file,&fh->wait_data,wait);
 
        if (pvr2_ioread_avail(fh->rhp) >= 0) {
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
        }
 
        return mask;
 
        poll_wait(fp, &dev->wait_frame, wait);
 
        if (!is_present(dev))
-               return POLLERR;
+               return EPOLLERR;
 
        if (!list_empty(&dev->sio_full))
-               return res | POLLIN | POLLRDNORM;
+               return res | EPOLLIN | EPOLLRDNORM;
 
        return res;
 }
 
        __poll_t res = 0;
 
        if (v4l2_event_pending(&fh->fh))
-               res = POLLPRI;
-       else if (req_events & POLLPRI)
+               res = EPOLLPRI;
+       else if (req_events & EPOLLPRI)
                poll_wait(file, &fh->fh.wait, wait);
        if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
-               return res | POLLERR;
+               return res | EPOLLERR;
 
        if (!!is_res_streaming(fh->dev, fh))
-               return res | POLLERR;
+               return res | EPOLLERR;
 
        if (!is_res_read(fh->dev, fh)) {
                /* streaming capture */
                if (list_empty(&fh->vb_vidq.stream))
-                       return res | POLLERR;
+                       return res | EPOLLERR;
                buf = list_entry(fh->vb_vidq.stream.next, struct tm6000_buffer, vb.stream);
                poll_wait(file, &buf->vb.done, wait);
                if (buf->vb.state == VIDEOBUF_DONE ||
                    buf->vb.state == VIDEOBUF_ERROR)
-                       return res | POLLIN | POLLRDNORM;
-       } else if (req_events & (POLLIN | POLLRDNORM)) {
+                       return res | EPOLLIN | EPOLLRDNORM;
+       } else if (req_events & (EPOLLIN | EPOLLRDNORM)) {
                /* read() capture */
                return res | videobuf_poll_stream(file, &fh->vb_vidq, wait);
        }
 
        struct v4l2_fh *fh = file->private_data;
 
        if (v4l2_event_pending(fh))
-               return POLLPRI;
+               return EPOLLPRI;
        poll_wait(file, &fh->wait, wait);
        return 0;
 }
 
 static __poll_t v4l2_poll(struct file *filp, struct poll_table_struct *poll)
 {
        struct video_device *vdev = video_devdata(filp);
-       __poll_t res = POLLERR | POLLHUP;
+       __poll_t res = EPOLLERR | EPOLLHUP;
 
        if (!vdev->fops->poll)
                return DEFAULT_POLLMASK;
 
                struct v4l2_fh *fh = file->private_data;
 
                if (v4l2_event_pending(fh))
-                       rc = POLLPRI;
-               else if (req_events & POLLPRI)
+                       rc = EPOLLPRI;
+               else if (req_events & EPOLLPRI)
                        poll_wait(file, &fh->wait, wait);
-               if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM)))
+               if (!(req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM)))
                        return rc;
        }
 
         */
        if ((!src_q->streaming || list_empty(&src_q->queued_list))
                && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
-               rc |= POLLERR;
+               rc |= EPOLLERR;
                goto end;
        }
 
                 */
                if (dst_q->last_buffer_dequeued) {
                        spin_unlock_irqrestore(&dst_q->done_lock, flags);
-                       return rc | POLLIN | POLLRDNORM;
+                       return rc | EPOLLIN | EPOLLRDNORM;
                }
 
                poll_wait(file, &dst_q->done_wq, wait);
                                                done_entry);
        if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
                        || src_vb->state == VB2_BUF_STATE_ERROR))
-               rc |= POLLOUT | POLLWRNORM;
+               rc |= EPOLLOUT | EPOLLWRNORM;
        spin_unlock_irqrestore(&src_q->done_lock, flags);
 
        spin_lock_irqsave(&dst_q->done_lock, flags);
                                                done_entry);
        if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
                        || dst_vb->state == VB2_BUF_STATE_ERROR))
-               rc |= POLLIN | POLLRDNORM;
+               rc |= EPOLLIN | EPOLLRDNORM;
        spin_unlock_irqrestore(&dst_q->done_lock, flags);
 
 end:
 
        struct v4l2_fh *fh = file->private_data;
 
        if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
-               return POLLERR;
+               return EPOLLERR;
 
        poll_wait(file, &fh->wait, wait);
 
        if (v4l2_event_pending(fh))
-               return POLLPRI;
+               return EPOLLPRI;
 
        return 0;
 }
 
                if (!list_empty(&q->stream))
                        buf = list_entry(q->stream.next,
                                         struct videobuf_buffer, stream);
-       } else if (req_events & (POLLIN | POLLRDNORM)) {
+       } else if (req_events & (EPOLLIN | EPOLLRDNORM)) {
                if (!q->reading)
                        __videobuf_read_start(q);
                if (!q->reading) {
-                       rc = POLLERR;
+                       rc = EPOLLERR;
                } else if (NULL == q->read_buf) {
                        q->read_buf = list_entry(q->stream.next,
                                                 struct videobuf_buffer,
                buf = q->read_buf;
        }
        if (!buf)
-               rc = POLLERR;
+               rc = EPOLLERR;
 
        if (0 == rc) {
                poll_wait(file, &buf->done, wait);
                        case V4L2_BUF_TYPE_VBI_OUTPUT:
                        case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
                        case V4L2_BUF_TYPE_SDR_OUTPUT:
-                               rc = POLLOUT | POLLWRNORM;
+                               rc = EPOLLOUT | EPOLLWRNORM;
                                break;
                        default:
-                               rc = POLLIN | POLLRDNORM;
+                               rc = EPOLLIN | EPOLLRDNORM;
                                break;
                        }
                }
 
        if (irq_abb < num_irqs)
                irq_count[irq_abb]++;
        /*
-        * This makes it possible to use poll for events (POLLPRI | POLLERR)
+        * This makes it possible to use poll for events (EPOLLPRI | EPOLLERR)
         * from userspace on sysfs file named <irq-nr>
         */
        sprintf(buf, "%d", irq);
 
 
        spin_lock_irqsave(&ctx->lock, flags);
        if (ctx_event_pending(ctx))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
        else if (ctx->status == CLOSED)
                /* Only error on closed when there are no futher events pending
                 */
-               mask |= POLLERR;
+               mask |= EPOLLERR;
        spin_unlock_irqrestore(&ctx->lock, flags);
 
        pr_devel("afu_poll pe: %i returning %#x\n", ctx->pe, mask);
 
        poll_wait(fp, &data->ccb_waitq, wait);
 
        if (is_channel_reset(driver_ccb))
-               return POLLERR;
+               return EPOLLERR;
        else if (ilo_pkt_recv(data->ilo_hw, driver_ccb))
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
 
        return 0;
 }
 
 
        poll_wait(file, &lis3->misc_wait, wait);
        if (atomic_read(&lis3->count))
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        return 0;
 }
 
 
        bool notify_en;
 
        if (WARN_ON(!cl || !cl->dev))
-               return POLLERR;
+               return EPOLLERR;
 
        dev = cl->dev;
 
        mutex_lock(&dev->device_lock);
 
-       notify_en = cl->notify_en && (req_events & POLLPRI);
+       notify_en = cl->notify_en && (req_events & EPOLLPRI);
 
        if (dev->dev_state != MEI_DEV_ENABLED ||
            !mei_cl_is_connected(cl)) {
-               mask = POLLERR;
+               mask = EPOLLERR;
                goto out;
        }
 
        if (notify_en) {
                poll_wait(file, &cl->ev_wait, wait);
                if (cl->notify_ev)
-                       mask |= POLLPRI;
+                       mask |= EPOLLPRI;
        }
 
-       if (req_events & (POLLIN | POLLRDNORM)) {
+       if (req_events & (EPOLLIN | EPOLLRDNORM)) {
                poll_wait(file, &cl->rx_wait, wait);
 
                if (!list_empty(&cl->rd_completed))
-                       mask |= POLLIN | POLLRDNORM;
+                       mask |= EPOLLIN | EPOLLRDNORM;
                else
                        mei_cl_read_start(cl, mei_cl_mtu(cl), file);
        }
 
  *    message being sent to host SCIF. SCIF_DISCNCT message processing on the
  *    host SCIF sets the host COSM SCIF endpoint state to DISCONNECTED and wakes
  *    up the host COSM thread blocked in scif_poll(..) resulting in
- *    scif_poll(..)  returning POLLHUP.
+ *    scif_poll(..)  returning EPOLLHUP.
  * 5. On the card, scif_peer_release_dev is next called which results in an
  *    SCIF_EXIT message being sent to the host and after receiving the
  *    SCIF_EXIT_ACK from the host the peer device teardown on the card is
  *    processing. This results in the COSM endpoint on the card being closed and
  *    the SCIF host peer device on the card getting unregistered similar to
  *    steps 3, 4 and 5 for the card shutdown case above. scif_poll(..) on the
- *    host returns POLLHUP as a result.
+ *    host returns EPOLLHUP as a result.
  * 4. On the host, card peer device unregister and SCIF HW remove(..) also
  *    subsequently complete.
  *
  * ----------
  * If a reset is issued after the card has crashed, there is no SCIF_DISCNT
  * message from the card which would result in scif_poll(..) returning
- * POLLHUP. In this case when the host SCIF driver sends a SCIF_REMOVE_NODE
+ * EPOLLHUP. In this case when the host SCIF driver sends a SCIF_REMOVE_NODE
  * message to itself resulting in the card SCIF peer device being unregistered,
  * this results in a scif_peer_release_dev -> scif_cleanup_scifdev->
  * scif_invalidate_ep call sequence which sets the endpoint state to
- * DISCONNECTED and results in scif_poll(..) returning POLLHUP.
+ * DISCONNECTED and results in scif_poll(..) returning EPOLLHUP.
  */
 
 #define COSM_SCIF_BACKLOG 16
 
 /*
  * Close this cosm_device's endpoint after its peer endpoint on the card has
- * been closed. In all cases except MIC card crash POLLHUP on the host is
+ * been closed. In all cases except MIC card crash EPOLLHUP on the host is
  * triggered by the client's endpoint being closed.
  */
 static void cosm_scif_close(struct cosm_device *cdev)
 
        while (1) {
                pollepd.epd = cdev->epd;
-               pollepd.events = POLLIN;
+               pollepd.events = EPOLLIN;
 
                /* Drop the mutex before blocking in scif_poll(..) */
                mutex_unlock(&cdev->cosm_mutex);
                }
 
                /* There is a message from the card */
-               if (pollepd.revents & POLLIN)
+               if (pollepd.revents & EPOLLIN)
                        cosm_scif_recv(cdev);
 
                /* The peer endpoint is closed or this endpoint disconnected */
-               if (pollepd.revents & POLLHUP) {
+               if (pollepd.revents & EPOLLHUP) {
                        cosm_scif_close(cdev);
                        break;
                }
 
 
        while (!kthread_should_stop()) {
                pollepd.epd = client_epd;
-               pollepd.events = POLLIN;
+               pollepd.events = EPOLLIN;
 
                rc = scif_poll(&pollepd, 1, COSM_HEARTBEAT_SEND_MSEC);
                if (rc < 0) {
                        continue;
                }
 
-               if (pollepd.revents & POLLIN)
+               if (pollepd.revents & EPOLLIN)
                        cosm_client_recv();
 
                msg.id = COSM_MSG_HEARTBEAT;
 
                        if (ep->state == SCIFEP_CONNECTED ||
                            ep->state == SCIFEP_DISCONNECTED ||
                            ep->conn_err)
-                               mask |= POLLOUT;
+                               mask |= EPOLLOUT;
                        goto exit;
                }
        }
                _scif_poll_wait(f, &ep->conwq, wait, ep);
                if (ep->state == SCIFEP_LISTENING) {
                        if (ep->conreqcnt)
-                               mask |= POLLIN;
+                               mask |= EPOLLIN;
                        goto exit;
                }
        }
 
        /* Endpoint is connected or disconnected */
        if (ep->state == SCIFEP_CONNECTED || ep->state == SCIFEP_DISCONNECTED) {
-               if (poll_requested_events(wait) & POLLIN)
+               if (poll_requested_events(wait) & EPOLLIN)
                        _scif_poll_wait(f, &ep->recvwq, wait, ep);
-               if (poll_requested_events(wait) & POLLOUT)
+               if (poll_requested_events(wait) & EPOLLOUT)
                        _scif_poll_wait(f, &ep->sendwq, wait, ep);
                if (ep->state == SCIFEP_CONNECTED ||
                    ep->state == SCIFEP_DISCONNECTED) {
                        /* Data can be read without blocking */
                        if (scif_rb_count(&ep->qp_info.qp->inbound_q, 1))
-                               mask |= POLLIN;
+                               mask |= EPOLLIN;
                        /* Data can be written without blocking */
                        if (scif_rb_space(&ep->qp_info.qp->outbound_q))
-                               mask |= POLLOUT;
-                       /* Return POLLHUP if endpoint is disconnected */
+                               mask |= EPOLLOUT;
+                       /* Return EPOLLHUP if endpoint is disconnected */
                        if (ep->state == SCIFEP_DISCONNECTED)
-                               mask |= POLLHUP;
+                               mask |= EPOLLHUP;
                        goto exit;
                }
        }
 
-       /* Return POLLERR if the endpoint is in none of the above states */
-       mask |= POLLERR;
+       /* Return EPOLLERR if the endpoint is in none of the above states */
+       mask |= EPOLLERR;
 exit:
        spin_unlock(&ep->lock);
        return mask;
        pt = &table.pt;
        while (1) {
                for (i = 0; i < nfds; i++) {
-                       pt->_key = ufds[i].events | POLLERR | POLLHUP;
+                       pt->_key = ufds[i].events | EPOLLERR | EPOLLHUP;
                        mask = __scif_pollfd(ufds[i].epd->anon,
                                             pt, ufds[i].epd);
-                       mask &= ufds[i].events | POLLERR | POLLHUP;
+                       mask &= ufds[i].events | EPOLLERR | EPOLLHUP;
                        if (mask) {
                                count++;
                                pt->_qproc = NULL;
 
 }
 
 /*
- * We return POLLIN | POLLOUT from poll when new buffers are enqueued, and
+ * We return EPOLLIN | EPOLLOUT from poll when new buffers are enqueued, and
  * not when previously enqueued buffers may be available. This means that
  * in the card->host (TX) path, when userspace is unblocked by poll it
  * must drain all available descriptors or it can stall.
 
        mutex_lock(&vdev->vdev_mutex);
        if (vop_vdev_inited(vdev)) {
-               mask = POLLERR;
+               mask = EPOLLERR;
                goto done;
        }
        poll_wait(f, &vdev->waitq, wait);
        if (vop_vdev_inited(vdev)) {
-               mask = POLLERR;
+               mask = EPOLLERR;
        } else if (vdev->poll_wake) {
                vdev->poll_wake = 0;
-               mask = POLLIN | POLLOUT;
+               mask = EPOLLIN | EPOLLOUT;
        }
 done:
        mutex_unlock(&vdev->vdev_mutex);
 
        mutex_unlock(&ctx->status_mutex);
 
        if (afu_events_pending(ctx))
-               mask = POLLIN | POLLRDNORM;
+               mask = EPOLLIN | EPOLLRDNORM;
        else if (closed)
-               mask = POLLERR;
+               mask = EPOLLERR;
 
        return mask;
 }
 
        poll_wait(file, &dev->wait, wait);
 
        if (!(dev->status & PHB_RUNNING))
-               mask = POLLERR;
+               mask = EPOLLERR;
        else if (atomic_read(&dev->counter))
-               mask = POLLIN | POLLRDNORM;
+               mask = EPOLLIN | EPOLLRDNORM;
 
        pr_debug("phantom_poll end: %x/%d\n", mask, atomic_read(&dev->counter));
 
 
                if (context->pending_datagrams > 0 ||
                    vmci_handle_arr_get_size(
                                context->pending_doorbell_array) > 0) {
-                       mask = POLLIN;
+                       mask = EPOLLIN;
                }
                spin_unlock(&context->lock);
        }
 
 
        poll_wait(filp, &priv->test.readq, ptable);
        if (!kfifo_is_empty(&priv->test.up_fifo))
-               return_flags |= (POLLIN | POLLRDNORM);
+               return_flags |= (EPOLLIN | EPOLLRDNORM);
        if (wait_event_interruptible(
                priv->test.readq,
                !kfifo_is_empty(&priv->test.up_fifo))) {
-               return POLLERR;
+               return EPOLLERR;
        }
        return return_flags;
 }
 
        if (!pf)
                return 0;
        poll_wait(file, &pf->rwait, wait);
-       mask = POLLOUT | POLLWRNORM;
+       mask = EPOLLOUT | EPOLLWRNORM;
        if (skb_peek(&pf->rq))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
        if (pf->dead)
-               mask |= POLLHUP;
+               mask |= EPOLLHUP;
        else if (pf->kind == INTERFACE) {
                /* see comment in ppp_read */
                struct ppp *ppp = PF_TO_PPP(pf);
                ppp_recv_lock(ppp);
                if (ppp->n_channels == 0 &&
                    (ppp->flags & SC_LOOP_TRAFFIC) == 0)
-                       mask |= POLLIN | POLLRDNORM;
+                       mask |= EPOLLIN | EPOLLRDNORM;
                ppp_recv_unlock(ppp);
        }
 
 
        }
 
 wake_up:
-       wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
+       wake_up_interruptible_poll(sk_sleep(&q->sk), EPOLLIN | EPOLLRDNORM | EPOLLRDBAND);
        return RX_HANDLER_CONSUMED;
 
 drop:
 
        wqueue = sk_sleep(sk);
        if (wqueue && waitqueue_active(wqueue))
-               wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
+               wake_up_interruptible_poll(wqueue, EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
 }
 
 static void tap_sock_destruct(struct sock *sk)
 static __poll_t tap_poll(struct file *file, poll_table *wait)
 {
        struct tap_queue *q = file->private_data;
-       __poll_t mask = POLLERR;
+       __poll_t mask = EPOLLERR;
 
        if (!q)
                goto out;
        poll_wait(file, &q->wq.wait, wait);
 
        if (!ptr_ring_empty(&q->ring))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        if (sock_writeable(&q->sk) ||
            (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) &&
             sock_writeable(&q->sk)))
-               mask |= POLLOUT | POLLWRNORM;
+               mask |= EPOLLOUT | EPOLLWRNORM;
 
 out:
        return mask;
 
        __poll_t mask = 0;
 
        if (!tun)
-               return POLLERR;
+               return EPOLLERR;
 
        sk = tfile->socket.sk;
 
        poll_wait(file, sk_sleep(sk), wait);
 
        if (!ptr_ring_empty(&tfile->tx_ring))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        if (tun->dev->flags & IFF_UP &&
            (sock_writeable(sk) ||
             (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
              sock_writeable(sk))))
-               mask |= POLLOUT | POLLWRNORM;
+               mask |= EPOLLOUT | EPOLLWRNORM;
 
        if (tun->dev->reg_state != NETREG_REGISTERED)
-               mask = POLLERR;
+               mask = EPOLLERR;
 
        tun_put(tun);
        return mask;
 
        wqueue = sk_sleep(sk);
        if (wqueue && waitqueue_active(wqueue))
-               wake_up_interruptible_sync_poll(wqueue, POLLOUT |
-                                               POLLWRNORM | POLLWRBAND);
+               wake_up_interruptible_sync_poll(wqueue, EPOLLOUT |
+                                               EPOLLWRNORM | EPOLLWRBAND);
 
        tfile = container_of(sk, struct tun_file, sk);
        kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
 
        poll_wait(file, &intf->frame_dump_waitqueue, wait);
 
        if (!skb_queue_empty(&intf->frame_dump_skbqueue))
-               return POLLOUT | POLLWRNORM;
+               return EPOLLOUT | EPOLLWRNORM;
 
        return 0;
 }
 
        poll_wait(filp, &stdev->event_wq, wait);
 
        if (lock_mutex_and_test_alive(stdev))
-               return POLLIN | POLLRDHUP | POLLOUT | POLLERR | POLLHUP;
+               return EPOLLIN | EPOLLRDHUP | EPOLLOUT | EPOLLERR | EPOLLHUP;
 
        mutex_unlock(&stdev->mrpc_mutex);
 
        if (try_wait_for_completion(&stuser->comp))
-               ret |= POLLIN | POLLRDNORM;
+               ret |= EPOLLIN | EPOLLRDNORM;
 
        if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
-               ret |= POLLPRI | POLLRDBAND;
+               ret |= EPOLLPRI | EPOLLRDBAND;
 
        return ret;
 }
 
        if (CIRC_CNT(debug_info->log_buffer.head,
                     debug_info->log_buffer.tail,
                     LOG_SIZE))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
        mutex_unlock(&debug_info->log_mutex);
 
        return mask;
 
                return -ERESTARTSYS;
 
        if (status & PIPE_POLL_IN)
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
        if (status & PIPE_POLL_OUT)
-               mask |= POLLOUT | POLLWRNORM;
+               mask |= EPOLLOUT | EPOLLWRNORM;
        if (status & PIPE_POLL_HUP)
-               mask |= POLLHUP;
+               mask |= EPOLLHUP;
        if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
-               mask |= POLLERR;
+               mask |= EPOLLERR;
 
        return mask;
 }
 
 {
        poll_wait(file, &sonypi_compat.fifo_proc_list, wait);
        if (kfifo_len(&sonypi_compat.fifo))
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        return 0;
 }
 
 
 
        poll_wait(file, &pps->queue, wait);
 
-       return POLLIN | POLLRDNORM;
+       return EPOLLIN | EPOLLRDNORM;
 }
 
 static int pps_cdev_fasync(int fd, struct file *file, int on)
 
 
        poll_wait(fp, &ptp->tsev_wq, wait);
 
-       return queue_cnt(&ptp->tsevq) ? POLLIN : 0;
+       return queue_cnt(&ptp->tsevq) ? EPOLLIN : 0;
 }
 
 #define EXTTS_BUFSIZE (PTP_BUF_TIMESTAMPS * sizeof(struct ptp_extts_event))
 
 
        poll_wait(filp, &priv->event_rx_wait, wait);
        if (kfifo_len(&priv->event_fifo))
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
 
        return 0;
 }
 
        poll_wait(filp, &channel->fblockread_event, wait);
 
        if (qcom_smd_get_tx_avail(channel) > 20)
-               mask |= POLLOUT | POLLWRNORM;
+               mask |= EPOLLOUT | EPOLLWRNORM;
 
        return mask;
 }
 
        __poll_t mask = 0;
 
        if (!eptdev->ept)
-               return POLLERR;
+               return EPOLLERR;
 
        poll_wait(filp, &eptdev->readq, wait);
 
        if (!skb_queue_empty(&eptdev->queue))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        mask |= rpmsg_poll(eptdev->ept, filp, wait);
 
 
 
        data = rtc->irq_data;
 
-       return (data != 0) ? (POLLIN | POLLRDNORM) : 0;
+       return (data != 0) ? (EPOLLIN | EPOLLRDNORM) : 0;
 }
 
 static long rtc_dev_ioctl(struct file *file,
 
        poll_wait(filp, &dasd_eer_read_wait_queue, ptable);
        spin_lock_irqsave(&bufferlock, flags);
        if (eerb->head != eerb->tail)
-               mask = POLLIN | POLLRDNORM ;
+               mask = EPOLLIN | EPOLLRDNORM ;
        else
                mask = 0;
        spin_unlock_irqrestore(&bufferlock, flags);
 
 
        poll_wait(filp, &mon_read_wait_queue, p);
        if (unlikely(atomic_read(&monpriv->iucv_severed)))
-               return POLLERR;
+               return EPOLLERR;
        if (atomic_read(&monpriv->read_ready))
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        return 0;
 }
 
 
        poll_wait(file, &megasas_poll_wait, wait);
        spin_lock_irqsave(&poll_aen_lock, flags);
        if (megasas_poll_wait_aen)
-               mask = (POLLIN | POLLRDNORM);
+               mask = (EPOLLIN | EPOLLRDNORM);
        else
                mask = 0;
        megasas_poll_wait_aen = 0;
 
        list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
                if (ioc->aen_event_read_flag) {
                        spin_unlock(&gioc_lock);
-                       return POLLIN | POLLRDNORM;
+                       return EPOLLIN | EPOLLRDNORM;
                }
        }
        spin_unlock(&gioc_lock);
 
 
        sfp = filp->private_data;
        if (!sfp)
-               return POLLERR;
+               return EPOLLERR;
        sdp = sfp->parentdp;
        if (!sdp)
-               return POLLERR;
+               return EPOLLERR;
        poll_wait(filp, &sfp->read_wait, wait);
        read_lock_irqsave(&sfp->rq_list_lock, iflags);
        list_for_each_entry(srp, &sfp->rq_list, entry) {
                /* if any read waiting, flag it */
                if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
-                       res = POLLIN | POLLRDNORM;
+                       res = EPOLLIN | EPOLLRDNORM;
                ++count;
        }
        read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 
        if (atomic_read(&sdp->detaching))
-               res |= POLLHUP;
+               res |= EPOLLHUP;
        else if (!sfp->cmd_q) {
                if (0 == count)
-                       res |= POLLOUT | POLLWRNORM;
+                       res |= EPOLLOUT | EPOLLWRNORM;
        } else if (count < SG_MAX_QUEUE)
-               res |= POLLOUT | POLLWRNORM;
+               res |= EPOLLOUT | EPOLLWRNORM;
        SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
                                      "sg_poll: res=0x%x\n", (__force u32) res));
        return res;
 
                if (s->busy != file || !comedi_is_subdevice_running(s) ||
                    (s->async->cmd.flags & CMDF_WRITE) ||
                    comedi_buf_read_n_available(s) > 0)
-                       mask |= POLLIN | POLLRDNORM;
+                       mask |= EPOLLIN | EPOLLRDNORM;
        }
 
        s = comedi_file_write_subdevice(file);
                if (s->busy != file || !comedi_is_subdevice_running(s) ||
                    !(s->async->cmd.flags & CMDF_WRITE) ||
                    comedi_buf_write_n_available(s) >= bps)
-                       mask |= POLLOUT | POLLWRNORM;
+                       mask |= EPOLLOUT | EPOLLWRNORM;
        }
 
 done:
 
                __poll_t mask;
 
                mask = f->f_op->poll(f, &table.pt);
-               if (mask & (POLLRDNORM | POLLRDBAND | POLLIN |
-                           POLLHUP | POLLERR)) {
+               if (mask & (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN |
+                           EPOLLHUP | EPOLLERR)) {
                        break;
                }
                now = ktime_get();
 
 /*
  * Threshold below which the tty is woken for writing
  * - should be equal to WAKEUP_CHARS in drivers/tty/n_tty.c because
- *   even if the writer is woken, n_tty_poll() won't set POLLOUT until
+ *   even if the writer is woken, n_tty_poll() won't set EPOLLOUT until
  *   our fifo is below this level
  */
 #define WAKEUP_CHARS             256
 
                        goto err;
                }
                read(t->fds[fds_idx].fd, &dummy, 1);
-               t->fds[fds_idx].events = POLLERR|POLLPRI;
+               t->fds[fds_idx].events = EPOLLERR|EPOLLPRI;
                t->fds[fds_idx].revents = 0;
                fds_idx++;
        }
                }
 
                for (i = 0; i < t->poll_count; i++) {
-                       if (t->fds[i].revents & POLLPRI) {
+                       if (t->fds[i].revents & EPOLLPRI) {
                                /* Dummy read to clear the event */
                                read(t->fds[i].fd, &dummy, 1);
                                number_of_events++;
 
 
        /* Exceptional events? */
        if (sk->sk_err)
-               mask |= POLLERR;
+               mask |= EPOLLERR;
        if (sk->sk_shutdown & RCV_SHUTDOWN) {
                pr_debug("%s(), POLLHUP\n", __func__);
-               mask |= POLLHUP;
+               mask |= EPOLLHUP;
        }
 
        /* Readable? */
        if (!skb_queue_empty(&sk->sk_receive_queue)) {
                pr_debug("Socket is readable\n");
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
        }
 
        /* Connection-based need to check for termination and startup */
        case SOCK_STREAM:
                if (sk->sk_state == TCP_CLOSE) {
                        pr_debug("%s(), POLLHUP\n", __func__);
-                       mask |= POLLHUP;
+                       mask |= EPOLLHUP;
                }
 
                if (sk->sk_state == TCP_ESTABLISHED) {
                        if ((self->tx_flow == FLOW_START) &&
                            sock_writeable(sk))
                        {
-                               mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+                               mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
                        }
                }
                break;
                if ((self->tx_flow == FLOW_START) &&
                    sock_writeable(sk))
                {
-                       mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+                       mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
                }
                break;
        case SOCK_DGRAM:
                if (sock_writeable(sk))
-                       mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+                       mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
                break;
        default:
                break;
 
   DENTER(CTRL_TRACE, "(ap=0x%p)\n", ap);
 
   poll_wait(file, &irnet_events.rwait, wait);
-  mask = POLLOUT | POLLWRNORM;
+  mask = EPOLLOUT | EPOLLWRNORM;
   /* If there is unread events */
   if(ap->event_index != irnet_events.index)
-    mask |= POLLIN | POLLRDNORM;
+    mask |= EPOLLIN | EPOLLRDNORM;
 #ifdef INITIAL_DISCOVERY
   if(ap->disco_number != -1)
     {
        irnet_get_discovery_log(ap);
       /* Recheck */
       if(ap->disco_number != -1)
-       mask |= POLLIN | POLLRDNORM;
+       mask |= EPOLLIN | EPOLLRDNORM;
     }
 #endif /* INITIAL_DISCOVERY */
 
   DENTER(FS_TRACE, "(file=0x%p, ap=0x%p)\n",
         file, ap);
 
-  mask = POLLOUT | POLLWRNORM;
+  mask = EPOLLOUT | EPOLLWRNORM;
   DABORT(ap == NULL, mask, FS_ERROR, "ap is NULL !!!\n");
 
   /* If we are connected to ppp_generic, let it handle the job */
 
        rt_mutex_lock(&isp->mutex);
        if (pipe->capq.streaming != 1) {
                rt_mutex_unlock(&isp->mutex);
-               return POLLERR;
+               return EPOLLERR;
        }
        rt_mutex_unlock(&isp->mutex);
 
 
        poll_wait(file, &bdev->read_queue, pts);
 
        if (bdev->rds_data_available)
-               retval = POLLIN | POLLRDNORM;
+               retval = EPOLLIN | EPOLLRDNORM;
 
        return retval;
 }
 
 
        if (c->cfg->direction == MOST_CH_RX) {
                if (!kfifo_is_empty(&c->fifo))
-                       mask |= POLLIN | POLLRDNORM;
+                       mask |= EPOLLIN | EPOLLRDNORM;
        } else {
                if (!kfifo_is_empty(&c->fifo) || ch_has_mbo(c))
-                       mask |= POLLOUT | POLLWRNORM;
+                       mask |= EPOLLOUT | EPOLLWRNORM;
        }
        return mask;
 }
 
        if (!data_ready(mdev))
                poll_wait(filp, &mdev->wait_data, wait);
        if (data_ready(mdev))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        return mask;
 }
 
 
        spin_lock_irqsave(&speakup_info.spinlock, flags);
        if (!synth_buffer_empty() || speakup_info.flushing)
-               ret = POLLIN | POLLRDNORM;
+               ret = EPOLLIN | EPOLLRDNORM;
        spin_unlock_irqrestore(&speakup_info.spinlock, flags);
        return ret;
 }
 
        poll_wait(file, &tty->read_wait, wait);
        poll_wait(file, &tty->write_wait, wait);
        if (tty_hung_up_p(file))
-               mask |= POLLHUP;
+               mask |= EPOLLHUP;
        if (!tty_is_writelocked(tty) && tty_write_room(tty) > 0)
-               mask |= POLLOUT | POLLWRNORM;
+               mask |= EPOLLOUT | EPOLLWRNORM;
        if (gsm->dead)
-               mask |= POLLHUP;
+               mask |= EPOLLHUP;
        return mask;
 }
 
 
 
                /* set bits for operations that won't block */
                if (!list_empty(&n_hdlc->rx_buf_list.list))
-                       mask |= POLLIN | POLLRDNORM;    /* readable */
+                       mask |= EPOLLIN | EPOLLRDNORM;  /* readable */
                if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
-                       mask |= POLLHUP;
+                       mask |= EPOLLHUP;
                if (tty_hung_up_p(filp))
-                       mask |= POLLHUP;
+                       mask |= EPOLLHUP;
                if (!tty_is_writelocked(tty) &&
                                !list_empty(&n_hdlc->tx_free_buf_list.list))
-                       mask |= POLLOUT | POLLWRNORM;   /* writable */
+                       mask |= EPOLLOUT | EPOLLWRNORM; /* writable */
        }
        return mask;
 }      /* end of n_hdlc_tty_poll() */
 
        struct r3964_client_info *pClient;
        struct r3964_message *pMsg = NULL;
        unsigned long flags;
-       __poll_t result = POLLOUT;
+       __poll_t result = EPOLLOUT;
 
        TRACE_L("POLL");
 
                pMsg = pClient->first_msg;
                spin_unlock_irqrestore(&pInfo->lock, flags);
                if (pMsg)
-                       result |= POLLIN | POLLRDNORM;
+                       result |= EPOLLIN | EPOLLRDNORM;
        } else {
                result = -EINVAL;
        }
 
                        put_tty_queue(c, ldata);
                        smp_store_release(&ldata->canon_head, ldata->read_head);
                        kill_fasync(&tty->fasync, SIGIO, POLL_IN);
-                       wake_up_interruptible_poll(&tty->read_wait, POLLIN);
+                       wake_up_interruptible_poll(&tty->read_wait, EPOLLIN);
                        return 0;
                }
        }
 
        if (read_cnt(ldata)) {
                kill_fasync(&tty->fasync, SIGIO, POLL_IN);
-               wake_up_interruptible_poll(&tty->read_wait, POLLIN);
+               wake_up_interruptible_poll(&tty->read_wait, EPOLLIN);
        }
 }
 
        poll_wait(file, &tty->read_wait, wait);
        poll_wait(file, &tty->write_wait, wait);
        if (input_available_p(tty, 1))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
        else {
                tty_buffer_flush_work(tty->port);
                if (input_available_p(tty, 1))
-                       mask |= POLLIN | POLLRDNORM;
+                       mask |= EPOLLIN | EPOLLRDNORM;
        }
        if (tty->packet && tty->link->ctrl_status)
-               mask |= POLLPRI | POLLIN | POLLRDNORM;
+               mask |= EPOLLPRI | EPOLLIN | EPOLLRDNORM;
        if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
-               mask |= POLLHUP;
+               mask |= EPOLLHUP;
        if (tty_hung_up_p(file))
-               mask |= POLLHUP;
+               mask |= EPOLLHUP;
        if (tty->ops->write && !tty_is_writelocked(tty) &&
                        tty_chars_in_buffer(tty) < WAKEUP_CHARS &&
                        tty_write_room(tty) > 0)
-               mask |= POLLOUT | POLLWRNORM;
+               mask |= EPOLLOUT | EPOLLWRNORM;
        return mask;
 }
 
 
                tty->ctrl_status &= ~TIOCPKT_STOP;
                tty->ctrl_status |= TIOCPKT_START;
                spin_unlock_irqrestore(&tty->ctrl_lock, flags);
-               wake_up_interruptible_poll(&tty->link->read_wait, POLLIN);
+               wake_up_interruptible_poll(&tty->link->read_wait, EPOLLIN);
        }
 }
 
                tty->ctrl_status &= ~TIOCPKT_START;
                tty->ctrl_status |= TIOCPKT_STOP;
                spin_unlock_irqrestore(&tty->ctrl_lock, flags);
-               wake_up_interruptible_poll(&tty->link->read_wait, POLLIN);
+               wake_up_interruptible_poll(&tty->link->read_wait, EPOLLIN);
        }
 }
 
 
 /* No kernel lock held - none needed ;) */
 static __poll_t hung_up_tty_poll(struct file *filp, poll_table *wait)
 {
-       return POLLIN | POLLOUT | POLLERR | POLLHUP | POLLRDNORM | POLLWRNORM;
+       return EPOLLIN | EPOLLOUT | EPOLLERR | EPOLLHUP | EPOLLRDNORM | EPOLLWRNORM;
 }
 
 static long hung_up_tty_ioctl(struct file *file, unsigned int cmd,
                        tty_ldisc_deref(ld);
                }
        }
-       wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
+       wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT);
 }
 
 EXPORT_SYMBOL_GPL(tty_wakeup);
 static void tty_write_unlock(struct tty_struct *tty)
 {
        mutex_unlock(&tty->atomic_write_lock);
-       wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
+       wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT);
 }
 
 static int tty_write_lock(struct tty_struct *tty, int ndelay)
 
                if (tty->count <= 1) {
                        if (waitqueue_active(&tty->read_wait)) {
-                               wake_up_poll(&tty->read_wait, POLLIN);
+                               wake_up_poll(&tty->read_wait, EPOLLIN);
                                do_sleep++;
                        }
                        if (waitqueue_active(&tty->write_wait)) {
-                               wake_up_poll(&tty->write_wait, POLLOUT);
+                               wake_up_poll(&tty->write_wait, EPOLLOUT);
                                do_sleep++;
                        }
                }
                if (o_tty && o_tty->count <= 1) {
                        if (waitqueue_active(&o_tty->read_wait)) {
-                               wake_up_poll(&o_tty->read_wait, POLLIN);
+                               wake_up_poll(&o_tty->read_wait, EPOLLIN);
                                do_sleep++;
                        }
                        if (waitqueue_active(&o_tty->write_wait)) {
-                               wake_up_poll(&o_tty->write_wait, POLLOUT);
+                               wake_up_poll(&o_tty->write_wait, EPOLLOUT);
                                do_sleep++;
                        }
                }
 
                tty_ldisc_deref(ld);
        }
 
-       wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
-       wake_up_interruptible_poll(&tty->read_wait, POLLIN);
+       wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT);
+       wake_up_interruptible_poll(&tty->read_wait, EPOLLIN);
 
        /*
         * Shutdown the current line discipline, and reset it to
 
 vcs_poll(struct file *file, poll_table *wait)
 {
        struct vcs_poll_data *poll = vcs_poll_data_get(file);
-       __poll_t ret = DEFAULT_POLLMASK|POLLERR|POLLPRI;
+       __poll_t ret = DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
 
        if (poll) {
                poll_wait(file, &poll->waitq, wait);
 
 
        poll_wait(filep, &idev->wait, wait);
        if (listener->event_count != atomic_read(&idev->event))
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        return 0;
 }
 
 
 
        spin_lock_irqsave(&desc->iuspin, flags);
        if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
-               mask = POLLHUP | POLLERR;
+               mask = EPOLLHUP | EPOLLERR;
                spin_unlock_irqrestore(&desc->iuspin, flags);
                goto desc_out;
        }
        if (test_bit(WDM_READ, &desc->flags))
-               mask = POLLIN | POLLRDNORM;
+               mask = EPOLLIN | EPOLLRDNORM;
        if (desc->rerr || desc->werr)
-               mask |= POLLERR;
+               mask |= EPOLLERR;
        if (!test_bit(WDM_IN_USE, &desc->flags))
-               mask |= POLLOUT | POLLWRNORM;
+               mask |= EPOLLOUT | EPOLLWRNORM;
        spin_unlock_irqrestore(&desc->iuspin, flags);
 
        poll_wait(file, &desc->wait, wait);
 
        poll_wait(file, &usblp->rwait, wait);
        poll_wait(file, &usblp->wwait, wait);
        spin_lock_irqsave(&usblp->lock, flags);
-       ret = ((usblp->bidir && usblp->rcomplete) ? POLLIN  | POLLRDNORM : 0) |
-          ((usblp->no_paper || usblp->wcomplete) ? POLLOUT | POLLWRNORM : 0);
+       ret = ((usblp->bidir && usblp->rcomplete) ? EPOLLIN  | EPOLLRDNORM : 0) |
+          ((usblp->no_paper || usblp->wcomplete) ? EPOLLOUT | EPOLLWRNORM : 0);
        spin_unlock_irqrestore(&usblp->lock, flags);
        return ret;
 }
 
        mutex_lock(&data->io_mutex);
 
        if (data->zombie) {
-               mask = POLLHUP | POLLERR;
+               mask = EPOLLHUP | EPOLLERR;
                goto no_poll;
        }
 
        poll_wait(file, &data->waitq, wait);
 
-       mask = (atomic_read(&data->srq_asserted)) ? POLLIN | POLLRDNORM : 0;
+       mask = (atomic_read(&data->srq_asserted)) ? EPOLLIN | EPOLLRDNORM : 0;
 
 no_poll:
        mutex_unlock(&data->io_mutex);
 
        event_count = atomic_read(&device_event.count);
        if (file->f_version != event_count) {
                file->f_version = event_count;
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        }
 
        return 0;
 
 
        poll_wait(file, &ps->wait, wait);
        if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed))
-               mask |= POLLOUT | POLLWRNORM;
+               mask |= EPOLLOUT | EPOLLWRNORM;
        if (!connected(ps))
-               mask |= POLLHUP;
+               mask |= EPOLLHUP;
        if (list_empty(&ps->list))
-               mask |= POLLERR;
+               mask |= EPOLLERR;
        return mask;
 }
 
 
 static __poll_t ffs_ep0_poll(struct file *file, poll_table *wait)
 {
        struct ffs_data *ffs = file->private_data;
-       __poll_t mask = POLLWRNORM;
+       __poll_t mask = EPOLLWRNORM;
        int ret;
 
        poll_wait(file, &ffs->ev.waitq, wait);
        switch (ffs->state) {
        case FFS_READ_DESCRIPTORS:
        case FFS_READ_STRINGS:
-               mask |= POLLOUT;
+               mask |= EPOLLOUT;
                break;
 
        case FFS_ACTIVE:
                switch (ffs->setup_state) {
                case FFS_NO_SETUP:
                        if (ffs->ev.count)
-                               mask |= POLLIN;
+                               mask |= EPOLLIN;
                        break;
 
                case FFS_SETUP_PENDING:
                case FFS_SETUP_CANCELLED:
-                       mask |= (POLLIN | POLLOUT);
+                       mask |= (EPOLLIN | EPOLLOUT);
                        break;
                }
        case FFS_CLOSING:
 
        poll_wait(file, &hidg->write_queue, wait);
 
        if (WRITE_COND)
-               ret |= POLLOUT | POLLWRNORM;
+               ret |= EPOLLOUT | EPOLLWRNORM;
 
        if (READ_COND)
-               ret |= POLLIN | POLLRDNORM;
+               ret |= EPOLLIN | EPOLLRDNORM;
 
        return ret;
 }
 
 
        spin_lock_irqsave(&dev->lock, flags);
        if (likely(!list_empty(&dev->tx_reqs)))
-               status |= POLLOUT | POLLWRNORM;
+               status |= EPOLLOUT | EPOLLWRNORM;
 
        if (likely(dev->current_rx_bytes) ||
                        likely(!list_empty(&dev->rx_buffers)))
-               status |= POLLIN | POLLRDNORM;
+               status |= EPOLLIN | EPOLLRDNORM;
 
        spin_unlock_irqrestore(&dev->lock, flags);
 
 
        /* report fd mode change before acting on it */
        if (dev->setup_abort) {
                dev->setup_abort = 0;
-               mask = POLLHUP;
+               mask = EPOLLHUP;
                goto out;
        }
 
        if (dev->state == STATE_DEV_SETUP) {
                if (dev->setup_in || dev->setup_can_stall)
-                       mask = POLLOUT;
+                       mask = EPOLLOUT;
        } else {
                if (dev->ev_next != 0)
-                       mask = POLLIN;
+                       mask = EPOLLIN;
        }
 out:
        spin_unlock_irq(&dev->lock);
 
        __poll_t mask = 0;
 
        if (!dev->present)
-               return POLLERR | POLLHUP;
+               return EPOLLERR | EPOLLHUP;
 
        poll_wait(file, &dev->read_wait, wait);
        poll_wait(file, &dev->write_wait, wait);
 
        if (!dev->present)
-               return POLLERR | POLLHUP;
+               return EPOLLERR | EPOLLHUP;
 
        if (read_index(dev) != -1)
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        if (atomic_read(&dev->write_busy) < MAX_WRITES_IN_FLIGHT)
-               mask |= POLLOUT | POLLWRNORM;
+               mask |= EPOLLOUT | EPOLLWRNORM;
        return mask;
 }
 
 
        dev = file->private_data;
 
        if (!dev->intf)
-               return POLLERR | POLLHUP;
+               return EPOLLERR | EPOLLHUP;
 
        poll_wait(file, &dev->read_wait, wait);
        poll_wait(file, &dev->write_wait, wait);
 
        if (dev->ring_head != dev->ring_tail)
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
        if (!dev->interrupt_out_busy)
-               mask |= POLLOUT | POLLWRNORM;
+               mask |= EPOLLOUT | EPOLLWRNORM;
 
        return mask;
 }
 
        dev = file->private_data;
 
        if (!dev->udev)
-               return POLLERR | POLLHUP;
+               return EPOLLERR | EPOLLHUP;
 
        poll_wait(file, &dev->read_wait, wait);
        poll_wait(file, &dev->write_wait, wait);
 
        tower_check_for_read_packet(dev);
        if (dev->read_packet_length > 0) {
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
        }
        if (!dev->interrupt_out_busy) {
-               mask |= POLLOUT | POLLWRNORM;
+               mask |= EPOLLOUT | EPOLLWRNORM;
        }
 
        return mask;
 
 
        spin_lock_irqsave(&rp->b_lock, flags);
        if (!MON_RING_EMPTY(rp))
-               mask |= POLLIN | POLLRDNORM;    /* readable */
+               mask |= EPOLLIN | EPOLLRDNORM;    /* readable */
        spin_unlock_irqrestore(&rp->b_lock, flags);
        return mask;
 }
 
        struct virqfd *virqfd = container_of(wait, struct virqfd, wait);
        __poll_t flags = key_to_poll(key);
 
-       if (flags & POLLIN) {
+       if (flags & EPOLLIN) {
                /* An event has been signaled, call function */
                if ((!virqfd->handler ||
                     virqfd->handler(virqfd->opaque, virqfd->data)) &&
                        schedule_work(&virqfd->inject);
        }
 
-       if (flags & POLLHUP) {
+       if (flags & EPOLLHUP) {
                unsigned long flags;
                spin_lock_irqsave(&virqfd_lock, flags);
 
         * Check if there was an event already pending on the eventfd
         * before we registered and trigger it as if we didn't miss it.
         */
-       if (events & POLLIN) {
+       if (events & EPOLLIN) {
                if ((!handler || handler(opaque, data)) && thread)
                        schedule_work(&virqfd->inject);
        }
 
        /*
         * Do not drop the file until the irqfd is fully initialized,
-        * otherwise we might race against the POLLHUP.
+        * otherwise we might race against the EPOLLHUP.
         */
        fdput(irqfd);
 
 
        }
        vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
 
-       vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
-       vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
+       vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
+       vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
 
        f->private_data = n;
 
 
        mask = file->f_op->poll(file, &poll->table);
        if (mask)
                vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
-       if (mask & POLLERR) {
+       if (mask & EPOLLERR) {
                if (poll->wqh)
                        remove_wait_queue(poll->wqh, &poll->wait);
                ret = -EINVAL;
                vhost_vq_reset(dev, vq);
                if (vq->handle_kick)
                        vhost_poll_init(&vq->poll, vq->handle_kick,
-                                       POLLIN, dev);
+                                       EPOLLIN, dev);
        }
 }
 EXPORT_SYMBOL_GPL(vhost_dev_init);
        vhost_umem_clean(dev->iotlb);
        dev->iotlb = NULL;
        vhost_clear_msg(dev);
-       wake_up_interruptible_poll(&dev->wait, POLLIN | POLLRDNORM);
+       wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
        WARN_ON(!llist_empty(&dev->work_list));
        if (dev->worker) {
                kthread_stop(dev->worker);
        poll_wait(file, &dev->wait, wait);
 
        if (!list_empty(&dev->read_list))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        return mask;
 }
        list_add_tail(&node->node, head);
        spin_unlock(&dev->iotlb_lock);
 
-       wake_up_interruptible_poll(&dev->wait, POLLIN | POLLRDNORM);
+       wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
 }
 EXPORT_SYMBOL_GPL(vhost_enqueue_msg);
 
 
        spin_lock_irqsave(&dbq->lock, flags);
 
        poll_wait(filp, &dbq->wait, p);
-       mask = (dbq->head == dbq->tail) ? 0 : (POLLIN | POLLRDNORM);
+       mask = (dbq->head == dbq->tail) ? 0 : (EPOLLIN | EPOLLRDNORM);
 
        spin_unlock_irqrestore(&dbq->lock, flags);
 
 
 
 static __poll_t evtchn_poll(struct file *file, poll_table *wait)
 {
-       __poll_t mask = POLLOUT | POLLWRNORM;
+       __poll_t mask = EPOLLOUT | EPOLLWRNORM;
        struct per_user_data *u = file->private_data;
 
        poll_wait(file, &u->evtchn_wait, wait);
        if (u->ring_cons != u->ring_prod)
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
        if (u->ring_overflow)
-               mask = POLLERR;
+               mask = EPOLLERR;
        return mask;
 }
 
 
        poll_wait(file, &xen_mce_chrdev_wait, wait);
 
        if (xen_mcelog.next)
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
 
        return 0;
 }
 
 
                if (req_id != PVCALLS_INVALID_ID &&
                    READ_ONCE(bedata->rsp[req_id].req_id) == req_id)
-                       return POLLIN | POLLRDNORM;
+                       return EPOLLIN | EPOLLRDNORM;
 
                poll_wait(file, &map->passive.inflight_accept_req, wait);
                return 0;
 
        if (test_and_clear_bit(PVCALLS_FLAG_POLL_RET,
                               (void *)&map->passive.flags))
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
 
        /*
         * First check RET, then INFLIGHT. No barriers necessary to
 
        poll_wait(file, &map->active.inflight_conn_req, wait);
        if (pvcalls_front_write_todo(map))
-               mask |= POLLOUT | POLLWRNORM;
+               mask |= EPOLLOUT | EPOLLWRNORM;
        if (pvcalls_front_read_todo(map))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
        if (in_error != 0 || out_error != 0)
-               mask |= POLLERR;
+               mask |= EPOLLERR;
 
        return mask;
 }
        pvcalls_enter();
        if (!pvcalls_front_dev) {
                pvcalls_exit();
-               return POLLNVAL;
+               return EPOLLNVAL;
        }
        bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
 
        map = (struct sock_mapping *) sock->sk->sk_send_head;
        if (!map) {
                pvcalls_exit();
-               return POLLNVAL;
+               return EPOLLNVAL;
        }
        if (map->active_socket)
                ret = pvcalls_front_poll_active(file, bedata, map, wait);
 
 
        poll_wait(file, &u->read_waitq, wait);
        if (!list_empty(&u->read_buffers))
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        return 0;
 }
 
 
 
 /*
  * poll for culling state
- * - use POLLOUT to indicate culling state
+ * - use EPOLLOUT to indicate culling state
  */
 static __poll_t cachefiles_daemon_poll(struct file *file,
                                           struct poll_table_struct *poll)
        mask = 0;
 
        if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags))
-               mask |= POLLIN;
+               mask |= EPOLLIN;
 
        if (test_bit(CACHEFILES_CULLING, &cache->flags))
-               mask |= POLLOUT;
+               mask |= EPOLLOUT;
 
        return mask;
 }
 
 static __poll_t coda_psdev_poll(struct file *file, poll_table * wait)
 {
         struct venus_comm *vcp = (struct venus_comm *) file->private_data;
-       __poll_t mask = POLLOUT | POLLWRNORM;
+       __poll_t mask = EPOLLOUT | EPOLLWRNORM;
 
        poll_wait(file, &vcp->vc_waitq, wait);
        mutex_lock(&vcp->vc_mutex);
        if (!list_empty(&vcp->vc_pending))
-                mask |= POLLIN | POLLRDNORM;
+                mask |= EPOLLIN | EPOLLRDNORM;
        mutex_unlock(&vcp->vc_mutex);
 
        return mask;
 
        const struct file_operations *real_fops;
 
        if (debugfs_file_get(dentry))
-               return POLLHUP;
+               return EPOLLHUP;
 
        real_fops = debugfs_real_fops(filp);
        r = real_fops->poll(filp, wait);
 
 
        spin_lock(&ops_lock);
        if (!list_empty(&send_list))
-               mask = POLLIN | POLLRDNORM;
+               mask = EPOLLIN | EPOLLRDNORM;
        spin_unlock(&ops_lock);
 
        return mask;
 
        spin_lock(&proc->asts_spin);
        if (!list_empty(&proc->asts)) {
                spin_unlock(&proc->asts_spin);
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        }
        spin_unlock(&proc->asts_spin);
        return 0;
 
        poll_wait(file, &daemon->wait, pt);
        mutex_lock(&daemon->mux);
        if (!list_empty(&daemon->msg_ctx_out_queue))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 out_unlock_daemon:
        daemon->flags &= ~ECRYPTFS_DAEMON_IN_POLL;
        mutex_unlock(&daemon->mux);
 
  *
  * This function is supposed to be called by the kernel in paths that do not
  * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
- * value, and we signal this as overflow condition by returning a POLLERR
+ * value, and we signal this as overflow condition by returning a EPOLLERR
  * to poll(2).
  *
  * Returns the amount by which the counter was incremented.  This will be less
                n = ULLONG_MAX - ctx->count;
        ctx->count += n;
        if (waitqueue_active(&ctx->wqh))
-               wake_up_locked_poll(&ctx->wqh, POLLIN);
+               wake_up_locked_poll(&ctx->wqh, EPOLLIN);
        spin_unlock_irqrestore(&ctx->wqh.lock, flags);
 
        return n;
 {
        struct eventfd_ctx *ctx = file->private_data;
 
-       wake_up_poll(&ctx->wqh, POLLHUP);
+       wake_up_poll(&ctx->wqh, EPOLLHUP);
        eventfd_ctx_put(ctx);
        return 0;
 }
        count = READ_ONCE(ctx->count);
 
        if (count > 0)
-               events |= POLLIN;
+               events |= EPOLLIN;
        if (count == ULLONG_MAX)
-               events |= POLLERR;
+               events |= EPOLLERR;
        if (ULLONG_MAX - 1 > count)
-               events |= POLLOUT;
+               events |= EPOLLOUT;
 
        return events;
 }
        eventfd_ctx_do_read(ctx, cnt);
        __remove_wait_queue(&ctx->wqh, wait);
        if (*cnt != 0 && waitqueue_active(&ctx->wqh))
-               wake_up_locked_poll(&ctx->wqh, POLLOUT);
+               wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
        spin_unlock_irqrestore(&ctx->wqh.lock, flags);
 
        return *cnt != 0 ? 0 : -EAGAIN;
        if (likely(res > 0)) {
                eventfd_ctx_do_read(ctx, &ucnt);
                if (waitqueue_active(&ctx->wqh))
-                       wake_up_locked_poll(&ctx->wqh, POLLOUT);
+                       wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
        }
        spin_unlock_irq(&ctx->wqh.lock);
 
        if (likely(res > 0)) {
                ctx->count += ucnt;
                if (waitqueue_active(&ctx->wqh))
-                       wake_up_locked_poll(&ctx->wqh, POLLIN);
+                       wake_up_locked_poll(&ctx->wqh, EPOLLIN);
        }
        spin_unlock_irq(&ctx->wqh.lock);
 
 
 /* Epoll private bits inside the event mask */
 #define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET | EPOLLEXCLUSIVE)
 
-#define EPOLLINOUT_BITS (POLLIN | POLLOUT)
+#define EPOLLINOUT_BITS (EPOLLIN | EPOLLOUT)
 
-#define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BITS | POLLERR | POLLHUP | \
+#define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BITS | EPOLLERR | EPOLLHUP | \
                                EPOLLWAKEUP | EPOLLET | EPOLLEXCLUSIVE)
 
 /* Maximum number of nesting allowed inside epoll sets */
        wait_queue_head_t *wqueue = (wait_queue_head_t *)cookie;
 
        spin_lock_irqsave_nested(&wqueue->lock, flags, call_nests + 1);
-       wake_up_locked_poll(wqueue, POLLIN);
+       wake_up_locked_poll(wqueue, EPOLLIN);
        spin_unlock_irqrestore(&wqueue->lock, flags);
 
        return 0;
 
 static void ep_poll_safewake(wait_queue_head_t *wq)
 {
-       wake_up_poll(wq, POLLIN);
+       wake_up_poll(wq, EPOLLIN);
 }
 
 #endif
 
        list_for_each_entry_safe(epi, tmp, head, rdllink) {
                if (ep_item_poll(epi, &pt, depth)) {
-                       return POLLIN | POLLRDNORM;
+                       return EPOLLIN | EPOLLRDNORM;
                } else {
                        /*
                         * Item has been dropped into the ready list by the poll
                if ((epi->event.events & EPOLLEXCLUSIVE) &&
                                        !(pollflags & POLLFREE)) {
                        switch (pollflags & EPOLLINOUT_BITS) {
-                       case POLLIN:
-                               if (epi->event.events & POLLIN)
+                       case EPOLLIN:
+                               if (epi->event.events & EPOLLIN)
                                        ewake = 1;
                                break;
-                       case POLLOUT:
-                               if (epi->event.events & POLLOUT)
+                       case EPOLLOUT:
+                               if (epi->event.events & EPOLLOUT)
                                        ewake = 1;
                                break;
                        case 0:
        switch (op) {
        case EPOLL_CTL_ADD:
                if (!epi) {
-                       epds.events |= POLLERR | POLLHUP;
+                       epds.events |= EPOLLERR | EPOLLHUP;
                        error = ep_insert(ep, &epds, tf.file, fd, full_check);
                } else
                        error = -EEXIST;
        case EPOLL_CTL_MOD:
                if (epi) {
                        if (!(epi->event.events & EPOLLEXCLUSIVE)) {
-                               epds.events |= POLLERR | POLLHUP;
+                               epds.events |= EPOLLERR | EPOLLHUP;
                                error = ep_modify(ep, epi, &epds);
                        }
                } else
 
 /* Table to convert sigio signal codes into poll band bitmaps */
 
 static const __poll_t band_table[NSIGPOLL] = {
-       POLLIN | POLLRDNORM,                    /* POLL_IN */
-       POLLOUT | POLLWRNORM | POLLWRBAND,      /* POLL_OUT */
-       POLLIN | POLLRDNORM | POLLMSG,          /* POLL_MSG */
-       POLLERR,                                /* POLL_ERR */
-       POLLPRI | POLLRDBAND,                   /* POLL_PRI */
-       POLLHUP | POLLERR                       /* POLL_HUP */
+       EPOLLIN | EPOLLRDNORM,                  /* POLL_IN */
+       EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND,   /* POLL_OUT */
+       EPOLLIN | EPOLLRDNORM | EPOLLMSG,               /* POLL_MSG */
+       EPOLLERR,                               /* POLL_ERR */
+       EPOLLPRI | EPOLLRDBAND,                 /* POLL_PRI */
+       EPOLLHUP | EPOLLERR                     /* POLL_HUP */
 };
 
 static inline int sigio_perm(struct task_struct *p,
 
 
 static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
 {
-       __poll_t mask = POLLOUT | POLLWRNORM;
+       __poll_t mask = EPOLLOUT | EPOLLWRNORM;
        struct fuse_iqueue *fiq;
        struct fuse_dev *fud = fuse_get_dev(file);
 
        if (!fud)
-               return POLLERR;
+               return EPOLLERR;
 
        fiq = &fud->fc->iq;
        poll_wait(file, &fiq->waitq, wait);
 
        spin_lock(&fiq->waitq.lock);
        if (!fiq->connected)
-               mask = POLLERR;
+               mask = EPOLLERR;
        else if (request_pending(fiq))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
        spin_unlock(&fiq->waitq.lock);
 
        return mask;
 
                fc->no_poll = 1;
                return DEFAULT_POLLMASK;
        }
-       return POLLERR;
+       return EPOLLERR;
 }
 EXPORT_SYMBOL_GPL(fuse_file_poll);
 
 
  * the content and then you use 'poll' or 'select' to wait for
  * the content to change.  When the content changes (assuming the
  * manager for the kobject supports notification), poll will
- * return POLLERR|POLLPRI, and select will return the fd whether
+ * return EPOLLERR|EPOLLPRI, and select will return the fd whether
  * it is waiting for read, write, or exceptions.
  * Once poll/select indicates that the value has changed, you
  * need to close and re-open the file, or seek to 0 and read again.
        return DEFAULT_POLLMASK;
 
  trigger:
-       return DEFAULT_POLLMASK|POLLERR|POLLPRI;
+       return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
 }
 
 static void kernfs_notify_workfn(struct work_struct *work)
 
        poll_wait(file, &group->notification_waitq, wait);
        spin_lock(&group->notification_lock);
        if (!fsnotify_notify_queue_is_empty(group))
-               ret = POLLIN | POLLRDNORM;
+               ret = EPOLLIN | EPOLLRDNORM;
        spin_unlock(&group->notification_lock);
 
        return ret;
 
        poll_wait(file, &group->notification_waitq, wait);
        spin_lock(&group->notification_lock);
        if (!fsnotify_notify_queue_is_empty(group))
-               ret = POLLIN | POLLRDNORM;
+               ret = EPOLLIN | EPOLLRDNORM;
        spin_unlock(&group->notification_lock);
 
        return ret;
 
  * Over time, dlmfs has added some features that were not part of the
  * initial ABI.  Unfortunately, some of these features are not detectable
  * via standard usage.  For example, Linux's default poll always returns
- * POLLIN, so there is no way for a caller of poll(2) to know when dlmfs
+ * EPOLLIN, so there is no way for a caller of poll(2) to know when dlmfs
  * added poll support.  Instead, we provide this list of new capabilities.
  *
  * Capabilities is a read-only attribute.  We do it as a module parameter
  * interaction.
  *
  * Capabilities:
- * - bast      : POLLIN against the file descriptor of a held lock
+ * - bast      : EPOLLIN against the file descriptor of a held lock
  *               signifies a bast fired on the lock.
  */
 #define DLMFS_CAPABILITIES "bast stackglue"
 
        spin_lock(&ip->ip_lockres.l_lock);
        if (ip->ip_lockres.l_flags & USER_LOCK_BLOCKED)
-               event = POLLIN | POLLRDNORM;
+               event = EPOLLIN | EPOLLRDNORM;
        spin_unlock(&ip->ip_lockres.l_lock);
 
        return event;
 
        poll_wait(file, &orangefs_request_list_waitq, poll_table);
 
        if (!list_empty(&orangefs_request_list))
-               poll_revent_mask |= POLLIN;
+               poll_revent_mask |= EPOLLIN;
        return poll_revent_mask;
 }
 
 
                        break;
                }
                if (do_wakeup) {
-                       wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
+                       wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
                        kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
                }
                pipe_wait(pipe);
 
        /* Signal writers asynchronously that there is more room. */
        if (do_wakeup) {
-               wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
+               wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
                kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
        }
        if (ret > 0)
                        break;
                }
                if (do_wakeup) {
-                       wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
+                       wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
                        kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
                        do_wakeup = 0;
                }
 out:
        __pipe_unlock(pipe);
        if (do_wakeup) {
-               wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
+               wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
                kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
        }
        if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
        nrbufs = pipe->nrbufs;
        mask = 0;
        if (filp->f_mode & FMODE_READ) {
-               mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
+               mask = (nrbufs > 0) ? EPOLLIN | EPOLLRDNORM : 0;
                if (!pipe->writers && filp->f_version != pipe->w_counter)
-                       mask |= POLLHUP;
+                       mask |= EPOLLHUP;
        }
 
        if (filp->f_mode & FMODE_WRITE) {
-               mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0;
+               mask |= (nrbufs < pipe->buffers) ? EPOLLOUT | EPOLLWRNORM : 0;
                /*
-                * Most Unices do not set POLLERR for FIFOs but on Linux they
+                * Most Unices do not set EPOLLERR for FIFOs but on Linux they
                 * behave exactly like pipes for poll().
                 */
                if (!pipe->readers)
-                       mask |= POLLERR;
+                       mask |= EPOLLERR;
        }
 
        return mask;
                pipe->writers--;
 
        if (pipe->readers || pipe->writers) {
-               wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
+               wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM | EPOLLERR | EPOLLHUP);
                kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
                kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
        }
 
                if (!is_pipe && !pipe->writers) {
                        if ((filp->f_flags & O_NONBLOCK)) {
-                               /* suppress POLLHUP until we have
+                               /* suppress EPOLLHUP until we have
                                 * seen a writer */
                                filp->f_version = pipe->w_counter;
                        } else {
 
 {
        poll_wait(file, &log_wait, wait);
        if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_PROC))
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        return 0;
 }
 
 
 
        /* sysctl was unregistered */
        if (IS_ERR(head))
-               return POLLERR | POLLHUP;
+               return EPOLLERR | EPOLLHUP;
 
        if (!table->proc_handler)
                goto out;
 
        if (event != atomic_read(&table->poll->event)) {
                filp->private_data = proc_sys_poll_event(table->poll);
-               ret = POLLIN | POLLRDNORM | POLLERR | POLLPRI;
+               ret = EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI;
        }
 
 out:
 
        struct seq_file *m = file->private_data;
        struct proc_mounts *p = m->private;
        struct mnt_namespace *ns = p->ns;
-       __poll_t res = POLLIN | POLLRDNORM;
+       __poll_t res = EPOLLIN | EPOLLRDNORM;
        int event;
 
        poll_wait(file, &p->ns->poll, wait);
        event = READ_ONCE(ns->event);
        if (m->poll_event != event) {
                m->poll_event = event;
-               res |= POLLERR | POLLPRI;
+               res |= EPOLLERR | EPOLLPRI;
        }
 
        return res;
 
        return max;
 }
 
-#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
-#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
-#define POLLEX_SET (POLLPRI)
+#define POLLIN_SET (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN | EPOLLHUP | EPOLLERR)
+#define POLLOUT_SET (EPOLLWRBAND | EPOLLWRNORM | EPOLLOUT | EPOLLERR)
+#define POLLEX_SET (EPOLLPRI)
 
 static inline void wait_key_set(poll_table *wait, unsigned long in,
                                unsigned long out, unsigned long bit,
        fd = pollfd->fd;
        if (fd >= 0) {
                struct fd f = fdget(fd);
-               mask = POLLNVAL;
+               mask = EPOLLNVAL;
                if (f.file) {
                        /* userland u16 ->events contains POLL... bitmap */
                        __poll_t filter = demangle_poll(pollfd->events) |
-                                               POLLERR | POLLHUP;
+                                               EPOLLERR | EPOLLHUP;
                        mask = DEFAULT_POLLMASK;
                        if (f.file->f_op->poll) {
                                pwait->_key = filter;
 
                return;
 
        /* wait_queue_entry_t->func(POLLFREE) should do remove_wait_queue() */
-       wake_up_poll(wqh, POLLHUP | POLLFREE);
+       wake_up_poll(wqh, EPOLLHUP | POLLFREE);
 }
 
 struct signalfd_ctx {
        if (next_signal(¤t->pending, &ctx->sigmask) ||
            next_signal(¤t->signal->shared_pending,
                        &ctx->sigmask))
-               events |= POLLIN;
+               events |= EPOLLIN;
        spin_unlock_irq(¤t->sighand->siglock);
 
        return events;
 
 
        spin_lock_irqsave(&ctx->wqh.lock, flags);
        if (ctx->ticks)
-               events |= POLLIN;
+               events |= EPOLLIN;
        spin_unlock_irqrestore(&ctx->wqh.lock, flags);
 
        return events;
 
        if (likely(must_wait && !READ_ONCE(ctx->released) &&
                   (return_to_userland ? !signal_pending(current) :
                    !fatal_signal_pending(current)))) {
-               wake_up_poll(&ctx->fd_wqh, POLLIN);
+               wake_up_poll(&ctx->fd_wqh, EPOLLIN);
                schedule();
                ret |= VM_FAULT_MAJOR;
 
 
                spin_unlock(&ctx->event_wqh.lock);
 
-               wake_up_poll(&ctx->fd_wqh, POLLIN);
+               wake_up_poll(&ctx->fd_wqh, EPOLLIN);
                schedule();
 
                spin_lock(&ctx->event_wqh.lock);
        /* Flush pending events that may still wait on event_wqh */
        wake_up_all(&ctx->event_wqh);
 
-       wake_up_poll(&ctx->fd_wqh, POLLHUP);
+       wake_up_poll(&ctx->fd_wqh, EPOLLHUP);
        userfaultfd_ctx_put(ctx);
        return 0;
 }
 
        switch (ctx->state) {
        case UFFD_STATE_WAIT_API:
-               return POLLERR;
+               return EPOLLERR;
        case UFFD_STATE_RUNNING:
                /*
                 * poll() never guarantees that read won't block.
                 * userfaults can be waken before they're read().
                 */
                if (unlikely(!(file->f_flags & O_NONBLOCK)))
-                       return POLLERR;
+                       return EPOLLERR;
                /*
                 * lockless access to see if there are pending faults
                 * __pollwait last action is the add_wait_queue but
                ret = 0;
                smp_mb();
                if (waitqueue_active(&ctx->fault_pending_wqh))
-                       ret = POLLIN;
+                       ret = EPOLLIN;
                else if (waitqueue_active(&ctx->event_wqh))
-                       ret = POLLIN;
+                       ret = EPOLLIN;
 
                return ret;
        default:
                WARN_ON_ONCE(1);
-               return POLLERR;
+               return EPOLLERR;
        }
 }
 
 
  * events is a bitmask specifying the events which the application is
  * interested in. The field revents is an output parameter, filled by the
  * kernel with the events that actually occurred. The bits returned in revents
- * can include any of those specified in events, or one of the values POLLERR,
- * POLLHUP, or POLLNVAL. (These three bits are meaningless in the events
+ * can include any of those specified in events, or one of the values EPOLLERR,
+ * EPOLLHUP, or EPOLLNVAL. (These three bits are meaningless in the events
  * field, and will be set in the revents field whenever the corresponding
  * condition is true.)
  *
  * timeout means an infinite timeout.
  *
  * The following bits may be set in events and returned in revents.
- * POLLIN - Data may be received without blocking. For a connected
+ * EPOLLIN - Data may be received without blocking. For a connected
  * endpoint, this means that scif_recv() may be called without blocking. For a
  * listening endpoint, this means that scif_accept() may be called without
  * blocking.
- * POLLOUT - Data may be sent without blocking. For a connected endpoint, this
- * means that scif_send() may be called without blocking. POLLOUT may also be
+ * EPOLLOUT - Data may be sent without blocking. For a connected endpoint, this
+ * means that scif_send() may be called without blocking. EPOLLOUT may also be
  * used to block waiting for a non-blocking connect to complete. This bit value
  * has no meaning for a listening endpoint and is ignored if specified.
  *
  * The following bits are only returned in revents, and are ignored if set in
  * events.
- * POLLERR - An error occurred on the endpoint
- * POLLHUP - The connection to the peer endpoint was disconnected
- * POLLNVAL - The specified endpoint descriptor is invalid.
+ * EPOLLERR - An error occurred on the endpoint
+ * EPOLLHUP - The connection to the peer endpoint was disconnected
+ * EPOLLNVAL - The specified endpoint descriptor is invalid.
  *
  * Return:
  * Upon successful completion, scif_poll() returns a non-negative value. A
 
  * @fileio_read_once:          report EOF after reading the first buffer
  * @fileio_write_immediately:  queue buffer after each write() call
  * @allow_zero_bytesused:      allow bytesused == 0 to be passed to the driver
- * @quirk_poll_must_check_waiting_for_buffers: Return %POLLERR at poll when QBUF
+ * @quirk_poll_must_check_waiting_for_buffers: Return %EPOLLERR at poll when QBUF
  *              has not been called. This is a vb1 idiom that has been adopted
  *              also by vb2.
  * @lock:      pointer to a mutex that protects the &struct vb2_queue. The
  * @error:     a fatal error occurred on the queue
  * @waiting_for_buffers: used in poll() to check if vb2 is still waiting for
  *             buffers. Only set for capture queues if qbuf has not yet been
- *             called since poll() needs to return %POLLERR in that situation.
+ *             called since poll() needs to return %EPOLLERR in that situation.
  * @is_multiplanar: set if buffer type is multiplanar
  * @is_output: set if buffer type is output
  * @copy_timestamp: set if vb2-core should set timestamps
  * @q:         pointer to &struct vb2_queue with videobuf2 queue.
  *
  * Flag that a fatal unrecoverable error has occurred and wake up all processes
- * waiting on the queue. Polling will now set %POLLERR and queuing and dequeuing
+ * waiting on the queue. Polling will now set %EPOLLERR and queuing and dequeuing
  * buffers will return %-EIO.
  *
  * The error flag will be cleared when canceling the queue, either from
 
 static inline __poll_t inet_csk_listen_poll(const struct sock *sk)
 {
        return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ?
-                       (POLLIN | POLLRDNORM) : 0;
+                       (EPOLLIN | EPOLLRDNORM) : 0;
 }
 
 int inet_csk_listen_start(struct sock *sk, int backlog);
 
 
        spin_lock(&info->lock);
        if (info->attr.mq_curmsgs)
-               retval = POLLIN | POLLRDNORM;
+               retval = EPOLLIN | EPOLLRDNORM;
 
        if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
-               retval |= POLLOUT | POLLWRNORM;
+               retval |= EPOLLOUT | EPOLLWRNORM;
        spin_unlock(&info->lock);
 
        return retval;
 
 {
        struct perf_event *event = file->private_data;
        struct ring_buffer *rb;
-       __poll_t events = POLLHUP;
+       __poll_t events = EPOLLHUP;
 
        poll_wait(file, &event->waitq, wait);
 
 
 
 static void perf_output_wakeup(struct perf_output_handle *handle)
 {
-       atomic_set(&handle->rb->poll, POLLIN);
+       atomic_set(&handle->rb->poll, EPOLLIN);
 
        handle->event->pending_wakeup = 1;
        irq_work_queue(&handle->event->pending);
 
        __poll_t ret = 0;
 
        if (!user)
-               return POLLERR|POLLNVAL;
+               return EPOLLERR|EPOLLNVAL;
 
        poll_wait(file, &log_wait, wait);
 
        if (user->seq < log_next_seq) {
                /* return error when data has vanished underneath us */
                if (user->seq < log_first_seq)
-                       ret = POLLIN|POLLRDNORM|POLLERR|POLLPRI;
+                       ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
                else
-                       ret = POLLIN|POLLRDNORM;
+                       ret = EPOLLIN|EPOLLRDNORM;
        }
        logbuf_unlock_irq();
 
 
        struct rchan_buf *buf = filp->private_data;
 
        if (buf->finalized)
-               return POLLERR;
+               return EPOLLERR;
 
        if (filp->f_mode & FMODE_READ) {
                poll_wait(filp, &buf->read_wait, wait);
                if (!relay_buf_empty(buf))
-                       mask |= POLLIN | POLLRDNORM;
+                       mask |= EPOLLIN | EPOLLRDNORM;
        }
 
        return mask;
 
        __poll_t result = 0;
 
        if (!clk)
-               return POLLERR;
+               return EPOLLERR;
 
        if (clk->ops.poll)
                result = clk->ops.poll(clk, fp, wait);
 
  * as data is added to any of the @buffer's cpu buffers. Otherwise
  * it will wait for data to be added to a specific cpu buffer.
  *
- * Returns POLLIN | POLLRDNORM if data exists in the buffers,
+ * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
  * zero otherwise.
  */
 __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
 
        if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
            (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        return 0;
 }
 
 
 
        /* Iterators are static, they should be filled or empty */
        if (trace_buffer_iter(iter, iter->cpu_file))
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
 
        if (tr->trace_flags & TRACE_ITER_BLOCK)
                /*
                 * Always select as readable when in blocking mode
                 */
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        else
                return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
                                             filp, poll_table);
 
 }
 
 /*
- * Gets called on POLLHUP on eventfd when user closes it.
+ * Gets called on EPOLLHUP on eventfd when user closes it.
  *
  * Called with wqh->lock held and interrupts disabled.
  */
        struct mem_cgroup *memcg = event->memcg;
        __poll_t flags = key_to_poll(key);
 
-       if (flags & POLLHUP) {
+       if (flags & EPOLLHUP) {
                /*
                 * If the event has been detached at cgroup removal, we
                 * can simply return knowing the other side will cleanup
 
 
        if (seq->poll_event != atomic_read(&proc_poll_event)) {
                seq->poll_event = atomic_read(&proc_poll_event);
-               return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
+               return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI;
        }
 
-       return POLLIN | POLLRDNORM;
+       return EPOLLIN | EPOLLRDNORM;
 }
 
 /* iterator */
 
        if (!ts) {
                if (err)
                        *err = -EREMOTEIO;
-               return POLLERR;
+               return EPOLLERR;
        }
 
        if (!ts->rd->f_op->poll)
                        n = DEFAULT_POLLMASK;
                else
                        n = ts->wr->f_op->poll(ts->wr, pt);
-               ret = (ret & ~POLLOUT) | (n & ~POLLIN);
+               ret = (ret & ~EPOLLOUT) | (n & ~EPOLLIN);
        }
 
        return ret;
 
        if (!list_empty(&m->req_list)) {
                if (test_and_clear_bit(Rpending, &m->wsched))
-                       n = POLLIN;
+                       n = EPOLLIN;
                else
                        n = p9_fd_poll(m->client, NULL, NULL);
 
-               if ((n & POLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) {
+               if ((n & EPOLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) {
                        p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
                        schedule_work(&m->rq);
                }
 
        if (m->wsize || !list_empty(&m->unsent_req_list)) {
                if (test_and_clear_bit(Wpending, &m->wsched))
-                       n = POLLOUT;
+                       n = EPOLLOUT;
                else
                        n = p9_fd_poll(m->client, NULL, NULL);
 
-               if ((n & POLLOUT) &&
+               if ((n & EPOLLOUT) &&
                   !test_and_set_bit(Wworksched, &m->wsched)) {
                        p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m);
                        schedule_work(&m->wq);
        init_poll_funcptr(&m->pt, p9_pollwait);
 
        n = p9_fd_poll(client, &m->pt, NULL);
-       if (n & POLLIN) {
+       if (n & EPOLLIN) {
                p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
                set_bit(Rpending, &m->wsched);
        }
 
-       if (n & POLLOUT) {
+       if (n & EPOLLOUT) {
                p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
                set_bit(Wpending, &m->wsched);
        }
                return;
 
        n = p9_fd_poll(m->client, NULL, &err);
-       if (n & (POLLERR | POLLHUP | POLLNVAL)) {
+       if (n & (EPOLLERR | EPOLLHUP | EPOLLNVAL)) {
                p9_debug(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n);
                p9_conn_cancel(m, err);
        }
 
-       if (n & POLLIN) {
+       if (n & EPOLLIN) {
                set_bit(Rpending, &m->wsched);
                p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
                if (!test_and_set_bit(Rworksched, &m->wsched)) {
                }
        }
 
-       if (n & POLLOUT) {
+       if (n & EPOLLOUT) {
                set_bit(Wpending, &m->wsched);
                p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
                if ((m->wsize || !list_empty(&m->unsent_req_list)) &&
        spin_unlock(&client->lock);
 
        if (test_and_clear_bit(Wpending, &m->wsched))
-               n = POLLOUT;
+               n = EPOLLOUT;
        else
                n = p9_fd_poll(m->client, NULL, NULL);
 
-       if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
+       if (n & EPOLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
                schedule_work(&m->wq);
 
        return 0;
 
 
        /* exceptional events */
        if (sk->sk_err)
-               mask = POLLERR;
+               mask = EPOLLERR;
 
        if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
            test_bit(ATM_VF_CLOSE, &vcc->flags))
-               mask |= POLLHUP;
+               mask |= EPOLLHUP;
 
        /* readable? */
        if (!skb_queue_empty(&sk->sk_receive_queue))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        /* writable? */
        if (sock->state == SS_CONNECTING &&
 
        if (vcc->qos.txtp.traffic_class != ATM_NONE &&
            vcc_writable(sk))
-               mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+               mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
 
        return mask;
 }
 
        poll_wait(file, &socket_client->queue_wait, wait);
 
        if (socket_client->queue_len > 0)
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
 
        return 0;
 }
 
        poll_wait(file, &debug_log->queue_wait, wait);
 
        if (!batadv_log_empty(debug_log))
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
 
        return 0;
 }
 
                if (sk->sk_state == BT_CONNECTED ||
                    (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
                     sk->sk_state == BT_CONNECT2))
-                       return POLLIN | POLLRDNORM;
+                       return EPOLLIN | EPOLLRDNORM;
        }
 
        return 0;
                return bt_accept_poll(sk);
 
        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
-               mask |= POLLERR |
-                       (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
+               mask |= EPOLLERR |
+                       (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
 
        if (sk->sk_shutdown & RCV_SHUTDOWN)
-               mask |= POLLRDHUP | POLLIN | POLLRDNORM;
+               mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
 
        if (sk->sk_shutdown == SHUTDOWN_MASK)
-               mask |= POLLHUP;
+               mask |= EPOLLHUP;
 
        if (!skb_queue_empty(&sk->sk_receive_queue))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        if (sk->sk_state == BT_CLOSED)
-               mask |= POLLHUP;
+               mask |= EPOLLHUP;
 
        if (sk->sk_state == BT_CONNECT ||
                        sk->sk_state == BT_CONNECT2 ||
                return mask;
 
        if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
-               mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+               mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
        else
                sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
 
 
        caif_disconnect_client(sock_net(sk), &cf_sk->layer);
        cf_sk->sk.sk_socket->state = SS_DISCONNECTING;
-       wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP);
+       wake_up_interruptible_poll(sk_sleep(sk), EPOLLERR|EPOLLHUP);
 
        sock_orphan(sk);
        sk_stream_kill_queues(&cf_sk->sk);
 
        /* exceptional events? */
        if (sk->sk_err)
-               mask |= POLLERR;
+               mask |= EPOLLERR;
        if (sk->sk_shutdown == SHUTDOWN_MASK)
-               mask |= POLLHUP;
+               mask |= EPOLLHUP;
        if (sk->sk_shutdown & RCV_SHUTDOWN)
-               mask |= POLLRDHUP;
+               mask |= EPOLLRDHUP;
 
        /* readable? */
        if (!skb_queue_empty(&sk->sk_receive_queue) ||
                (sk->sk_shutdown & RCV_SHUTDOWN))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        /*
         * we set writable also when the other side has shut down the
         * connection. This prevents stuck sockets.
         */
        if (sock_writeable(sk) && tx_flow_is_on(cf_sk))
-               mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+               mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
 
        return mask;
 }
 
        /*
         * Avoid a wakeup if event not interesting for us
         */
-       if (key && !(key_to_poll(key) & (POLLIN | POLLERR)))
+       if (key && !(key_to_poll(key) & (EPOLLIN | EPOLLERR)))
                return 0;
        return autoremove_wake_function(wait, mode, sync, key);
 }
 
        /* exceptional events? */
        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
-               mask |= POLLERR |
-                       (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
+               mask |= EPOLLERR |
+                       (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
 
        if (sk->sk_shutdown & RCV_SHUTDOWN)
-               mask |= POLLRDHUP | POLLIN | POLLRDNORM;
+               mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
        if (sk->sk_shutdown == SHUTDOWN_MASK)
-               mask |= POLLHUP;
+               mask |= EPOLLHUP;
 
        /* readable? */
        if (!skb_queue_empty(&sk->sk_receive_queue))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        /* Connection-based need to check for termination and startup */
        if (connection_based(sk)) {
                if (sk->sk_state == TCP_CLOSE)
-                       mask |= POLLHUP;
+                       mask |= EPOLLHUP;
                /* connection hasn't started yet? */
                if (sk->sk_state == TCP_SYN_SENT)
                        return mask;
 
        /* writable? */
        if (sock_writeable(sk))
-               mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+               mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
        else
                sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
 
        rcu_read_lock();
        wq = rcu_dereference(sk->sk_wq);
        if (skwq_has_sleeper(wq))
-               wake_up_interruptible_poll(&wq->wait, POLLERR);
+               wake_up_interruptible_poll(&wq->wait, EPOLLERR);
        sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
        rcu_read_unlock();
 }
        rcu_read_lock();
        wq = rcu_dereference(sk->sk_wq);
        if (skwq_has_sleeper(wq))
-               wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
-                                               POLLRDNORM | POLLRDBAND);
+               wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
+                                               EPOLLRDNORM | EPOLLRDBAND);
        sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
        rcu_read_unlock();
 }
        if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
                wq = rcu_dereference(sk->sk_wq);
                if (skwq_has_sleeper(wq))
-                       wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
-                                               POLLWRNORM | POLLWRBAND);
+                       wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
+                                               EPOLLWRNORM | EPOLLWRBAND);
 
                /* Should agree with poll, otherwise some programs break */
                if (sock_writeable(sk))
 
                rcu_read_lock();
                wq = rcu_dereference(sk->sk_wq);
                if (skwq_has_sleeper(wq))
-                       wake_up_interruptible_poll(&wq->wait, POLLOUT |
-                                               POLLWRNORM | POLLWRBAND);
+                       wake_up_interruptible_poll(&wq->wait, EPOLLOUT |
+                                               EPOLLWRNORM | EPOLLWRBAND);
                if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
                        sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
                rcu_read_unlock();
 
 
        mask = 0;
        if (sk->sk_err)
-               mask = POLLERR;
+               mask = EPOLLERR;
 
        if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
-               mask |= POLLHUP;
+               mask |= EPOLLHUP;
        if (sk->sk_shutdown & RCV_SHUTDOWN)
-               mask |= POLLIN | POLLRDNORM | POLLRDHUP;
+               mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
 
        /* Connected? */
        if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
                if (atomic_read(&sk->sk_rmem_alloc) > 0)
-                       mask |= POLLIN | POLLRDNORM;
+                       mask |= EPOLLIN | EPOLLRDNORM;
 
                if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
                        if (sk_stream_is_writeable(sk)) {
-                               mask |= POLLOUT | POLLWRNORM;
+                               mask |= EPOLLOUT | EPOLLWRNORM;
                        } else {  /* send SIGIO later */
                                sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
                                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
                                 * IO signal will be lost.
                                 */
                                if (sk_stream_is_writeable(sk))
-                                       mask |= POLLOUT | POLLWRNORM;
+                                       mask |= EPOLLOUT | EPOLLWRNORM;
                        }
                }
        }
 
        __poll_t mask = datagram_poll(file, sock, wait);
 
        if (!skb_queue_empty(&scp->other_receive_queue))
-               mask |= POLLRDBAND;
+               mask |= EPOLLRDBAND;
 
        return mask;
 }
 
        case TCP_CLOSE:
                err = -ENOTCONN;
                /* Hack to wake up other listeners, who can poll for
-                  POLLHUP, even on eg. unconnected UDP sockets -- RR */
+                  EPOLLHUP, even on eg. unconnected UDP sockets -- RR */
                /* fall through */
        default:
                sk->sk_shutdown |= how;
 
        mask = 0;
 
        /*
-        * POLLHUP is certainly not done right. But poll() doesn't
+        * EPOLLHUP is certainly not done right. But poll() doesn't
         * have a notion of HUP in just one direction, and for a
         * socket the read side is more interesting.
         *
-        * Some poll() documentation says that POLLHUP is incompatible
-        * with the POLLOUT/POLLWR flags, so somebody should check this
+        * Some poll() documentation says that EPOLLHUP is incompatible
+        * with the EPOLLOUT/POLLWR flags, so somebody should check this
         * all. But careful, it tends to be safer to return too many
         * bits than too few, and you can easily break real applications
         * if you don't tell them that something has hung up!
         *
         * Check-me.
         *
-        * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
+        * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and
         * our fs/select.c). It means that after we received EOF,
         * poll always returns immediately, making impossible poll() on write()
-        * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
+        * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP
         * if and only if shutdown has been made in both directions.
         * Actually, it is interesting to look how Solaris and DUX
-        * solve this dilemma. I would prefer, if POLLHUP were maskable,
+        * solve this dilemma. I would prefer, if EPOLLHUP were maskable,
         * then we could set it on SND_SHUTDOWN. BTW examples given
         * in Stevens' books assume exactly this behaviour, it explains
-        * why POLLHUP is incompatible with POLLOUT.    --ANK
+        * why EPOLLHUP is incompatible with EPOLLOUT.  --ANK
         *
         * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
         * blocking on fresh not-connected or disconnected socket. --ANK
         */
        if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
-               mask |= POLLHUP;
+               mask |= EPOLLHUP;
        if (sk->sk_shutdown & RCV_SHUTDOWN)
-               mask |= POLLIN | POLLRDNORM | POLLRDHUP;
+               mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
 
        /* Connected or passive Fast Open socket? */
        if (state != TCP_SYN_SENT &&
                        target++;
 
                if (tp->rcv_nxt - tp->copied_seq >= target)
-                       mask |= POLLIN | POLLRDNORM;
+                       mask |= EPOLLIN | EPOLLRDNORM;
 
                if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
                        if (sk_stream_is_writeable(sk)) {
-                               mask |= POLLOUT | POLLWRNORM;
+                               mask |= EPOLLOUT | EPOLLWRNORM;
                        } else {  /* send SIGIO later */
                                sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
                                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
                                 */
                                smp_mb__after_atomic();
                                if (sk_stream_is_writeable(sk))
-                                       mask |= POLLOUT | POLLWRNORM;
+                                       mask |= EPOLLOUT | EPOLLWRNORM;
                        }
                } else
-                       mask |= POLLOUT | POLLWRNORM;
+                       mask |= EPOLLOUT | EPOLLWRNORM;
 
                if (tp->urg_data & TCP_URG_VALID)
-                       mask |= POLLPRI;
+                       mask |= EPOLLPRI;
        } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) {
                /* Active TCP fastopen socket with defer_connect
-                * Return POLLOUT so application can call write()
+                * Return EPOLLOUT so application can call write()
                 * in order for kernel to generate SYN+data
                 */
-               mask |= POLLOUT | POLLWRNORM;
+               mask |= EPOLLOUT | EPOLLWRNORM;
        }
        /* This barrier is coupled with smp_wmb() in tcp_reset() */
        smp_rmb();
        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
-               mask |= POLLERR;
+               mask |= EPOLLERR;
 
        return mask;
 }
 
 
        /* Fast Recovery (RFC 5681 3.2) :
         * Cubic needs 1.7 factor, rounded to 2 to include
-        * extra cushion (application might react slowly to POLLOUT)
+        * extra cushion (application might react slowly to EPOLLOUT)
         */
        sndmem = ca_ops->sndbuf_expand ? ca_ops->sndbuf_expand(sk) : 2;
        sndmem *= nr_segs * per_mss;
 
        struct sock *sk = sock->sk;
 
        if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        /* Check for false positives due to checksum errors */
-       if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
+       if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
            !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
-               mask &= ~(POLLIN | POLLRDNORM);
+               mask &= ~(EPOLLIN | EPOLLRDNORM);
 
        return mask;
 
 
                sk = (struct sock *) isk;
 
                if (sk->sk_state == IUCV_CONNECTED)
-                       return POLLIN | POLLRDNORM;
+                       return EPOLLIN | EPOLLRDNORM;
        }
 
        return 0;
                return iucv_accept_poll(sk);
 
        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
-               mask |= POLLERR |
-                       (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
+               mask |= EPOLLERR |
+                       (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
 
        if (sk->sk_shutdown & RCV_SHUTDOWN)
-               mask |= POLLRDHUP;
+               mask |= EPOLLRDHUP;
 
        if (sk->sk_shutdown == SHUTDOWN_MASK)
-               mask |= POLLHUP;
+               mask |= EPOLLHUP;
 
        if (!skb_queue_empty(&sk->sk_receive_queue) ||
            (sk->sk_shutdown & RCV_SHUTDOWN))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        if (sk->sk_state == IUCV_CLOSED)
-               mask |= POLLHUP;
+               mask |= EPOLLHUP;
 
        if (sk->sk_state == IUCV_DISCONN)
-               mask |= POLLIN;
+               mask |= EPOLLIN;
 
        if (sock_writeable(sk) && iucv_below_msglim(sk))
-               mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+               mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
        else
                sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
 
 
 static void psock_state_change(struct sock *sk)
 {
-       /* TCP only does a POLLIN for a half close. Do a POLLHUP here
-        * since application will normally not poll with POLLIN
+       /* TCP only does a EPOLLIN for a half close. Do a EPOLLHUP here
+        * since application will normally not poll with EPOLLIN
         * on the TCP sockets.
         */
 
 
        /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
         * we set sk_state, otherwise epoll_wait always returns right away with
-        * POLLHUP
+        * EPOLLHUP
         */
        kcm->sk.sk_state = TCP_ESTABLISHED;
 
 
                sk = &llcp_sock->sk;
 
                if (sk->sk_state == LLCP_CONNECTED)
-                       return POLLIN | POLLRDNORM;
+                       return EPOLLIN | EPOLLRDNORM;
        }
 
        return 0;
                return llcp_accept_poll(sk);
 
        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
-               mask |= POLLERR |
-                       (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
+               mask |= EPOLLERR |
+                       (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
 
        if (!skb_queue_empty(&sk->sk_receive_queue))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        if (sk->sk_state == LLCP_CLOSED)
-               mask |= POLLHUP;
+               mask |= EPOLLHUP;
 
        if (sk->sk_shutdown & RCV_SHUTDOWN)
-               mask |= POLLRDHUP | POLLIN | POLLRDNORM;
+               mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
 
        if (sk->sk_shutdown == SHUTDOWN_MASK)
-               mask |= POLLHUP;
+               mask |= EPOLLHUP;
 
        if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED)
-               mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+               mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
        else
                sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
 
        if (po->rx_ring.pg_vec) {
                if (!packet_previous_rx_frame(po, &po->rx_ring,
                        TP_STATUS_KERNEL))
-                       mask |= POLLIN | POLLRDNORM;
+                       mask |= EPOLLIN | EPOLLRDNORM;
        }
        if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
                po->pressure = 0;
        spin_lock_bh(&sk->sk_write_queue.lock);
        if (po->tx_ring.pg_vec) {
                if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
-                       mask |= POLLOUT | POLLWRNORM;
+                       mask |= EPOLLOUT | EPOLLWRNORM;
        }
        spin_unlock_bh(&sk->sk_write_queue.lock);
        return mask;
 
        poll_wait(file, sk_sleep(sk), wait);
 
        if (sk->sk_state == TCP_CLOSE)
-               return POLLERR;
+               return EPOLLERR;
        if (!skb_queue_empty(&sk->sk_receive_queue))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
        if (!skb_queue_empty(&pn->ctrlreq_queue))
-               mask |= POLLPRI;
+               mask |= EPOLLPRI;
        if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
-               return POLLHUP;
+               return EPOLLHUP;
 
        if (sk->sk_state == TCP_ESTABLISHED &&
                refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
                atomic_read(&pn->tx_credits))
-               mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+               mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
 
        return mask;
 }
 
 
 /*
  * RDS' poll is without a doubt the least intuitive part of the interface,
- * as POLLIN and POLLOUT do not behave entirely as you would expect from
+ * as EPOLLIN and EPOLLOUT do not behave entirely as you would expect from
  * a network protocol.
  *
- * POLLIN is asserted if
+ * EPOLLIN is asserted if
  *  -  there is data on the receive queue.
  *  -  to signal that a previously congested destination may have become
  *     uncongested
  *  -  A notification has been queued to the socket (this can be a congestion
  *     update, or a RDMA completion).
  *
- * POLLOUT is asserted if there is room on the send queue. This does not mean
+ * EPOLLOUT is asserted if there is room on the send queue. This does not mean
  * however, that the next sendmsg() call will succeed. If the application tries
  * to send to a congested destination, the system call may still fail (and
  * return ENOBUFS).
 
        read_lock_irqsave(&rs->rs_recv_lock, flags);
        if (!rs->rs_cong_monitor) {
-               /* When a congestion map was updated, we signal POLLIN for
+               /* When a congestion map was updated, we signal EPOLLIN for
                 * "historical" reasons. Applications can also poll for
                 * WRBAND instead. */
                if (rds_cong_updated_since(&rs->rs_cong_track))
-                       mask |= (POLLIN | POLLRDNORM | POLLWRBAND);
+                       mask |= (EPOLLIN | EPOLLRDNORM | EPOLLWRBAND);
        } else {
                spin_lock(&rs->rs_lock);
                if (rs->rs_cong_notify)
-                       mask |= (POLLIN | POLLRDNORM);
+                       mask |= (EPOLLIN | EPOLLRDNORM);
                spin_unlock(&rs->rs_lock);
        }
        if (!list_empty(&rs->rs_recv_queue) ||
            !list_empty(&rs->rs_notify_queue))
-               mask |= (POLLIN | POLLRDNORM);
+               mask |= (EPOLLIN | EPOLLRDNORM);
        if (rs->rs_snd_bytes < rds_sk_sndbuf(rs))
-               mask |= (POLLOUT | POLLWRNORM);
+               mask |= (EPOLLOUT | EPOLLWRNORM);
        read_unlock_irqrestore(&rs->rs_recv_lock, flags);
 
        /* clear state any time we wake a seen-congested socket */
 
 static __poll_t rfkill_fop_poll(struct file *file, poll_table *wait)
 {
        struct rfkill_data *data = file->private_data;
-       __poll_t res = POLLOUT | POLLWRNORM;
+       __poll_t res = EPOLLOUT | EPOLLWRNORM;
 
        poll_wait(file, &data->read_wait, wait);
 
        mutex_lock(&data->mtx);
        if (!list_empty(&data->events))
-               res = POLLIN | POLLRDNORM;
+               res = EPOLLIN | EPOLLRDNORM;
        mutex_unlock(&data->mtx);
 
        return res;
 
        /* the socket is readable if there are any messages waiting on the Rx
         * queue */
        if (!list_empty(&rx->recvmsg_q))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        /* the socket is writable if there is space to add new data to the
         * socket; there is no guarantee that any particular call in progress
         * on the socket may have space in the Tx ACK window */
        if (rxrpc_writable(sk))
-               mask |= POLLOUT | POLLWRNORM;
+               mask |= EPOLLOUT | EPOLLWRNORM;
 
        return mask;
 }
 
         */
        if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
                return (!list_empty(&sp->ep->asocs)) ?
-                       (POLLIN | POLLRDNORM) : 0;
+                       (EPOLLIN | EPOLLRDNORM) : 0;
 
        mask = 0;
 
        /* Is there any exceptional events?  */
        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
-               mask |= POLLERR |
-                       (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
+               mask |= EPOLLERR |
+                       (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
        if (sk->sk_shutdown & RCV_SHUTDOWN)
-               mask |= POLLRDHUP | POLLIN | POLLRDNORM;
+               mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
        if (sk->sk_shutdown == SHUTDOWN_MASK)
-               mask |= POLLHUP;
+               mask |= EPOLLHUP;
 
        /* Is it readable?  Reconsider this code with TCP-style support.  */
        if (!skb_queue_empty(&sk->sk_receive_queue))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        /* The association is either gone or not ready.  */
        if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED))
 
        /* Is it writable?  */
        if (sctp_writeable(sk)) {
-               mask |= POLLOUT | POLLWRNORM;
+               mask |= EPOLLOUT | EPOLLWRNORM;
        } else {
                sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
                /*
                 * in the following code to cover it as well.
                 */
                if (sctp_writeable(sk))
-                       mask |= POLLOUT | POLLWRNORM;
+                       mask |= EPOLLOUT | EPOLLWRNORM;
        }
        return mask;
 }
        rcu_read_lock();
        wq = rcu_dereference(sk->sk_wq);
        if (skwq_has_sleeper(wq))
-               wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
-                                               POLLRDNORM | POLLRDBAND);
+               wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
+                                               EPOLLRDNORM | EPOLLRDBAND);
        sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
        rcu_read_unlock();
 }
 
 
        spin_lock(&isk->accept_q_lock);
        if (!list_empty(&isk->accept_q))
-               mask = POLLIN | POLLRDNORM;
+               mask = EPOLLIN | EPOLLRDNORM;
        spin_unlock(&isk->accept_q_lock);
 
        return mask;
        int rc;
 
        if (!sk)
-               return POLLNVAL;
+               return EPOLLNVAL;
 
        smc = smc_sk(sock->sk);
        sock_hold(sk);
                mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
                /* if non-blocking connect finished ... */
                lock_sock(sk);
-               if ((sk->sk_state == SMC_INIT) && (mask & POLLOUT)) {
+               if ((sk->sk_state == SMC_INIT) && (mask & EPOLLOUT)) {
                        sk->sk_err = smc->clcsock->sk->sk_err;
                        if (sk->sk_err) {
-                               mask |= POLLERR;
+                               mask |= EPOLLERR;
                        } else {
                                rc = smc_connect_rdma(smc);
                                if (rc < 0)
-                                       mask |= POLLERR;
+                                       mask |= EPOLLERR;
                                /* success cases including fallback */
-                               mask |= POLLOUT | POLLWRNORM;
+                               mask |= EPOLLOUT | EPOLLWRNORM;
                        }
                }
        } else {
                        lock_sock(sk);
                }
                if (sk->sk_err)
-                       mask |= POLLERR;
+                       mask |= EPOLLERR;
                if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
                    (sk->sk_state == SMC_CLOSED))
-                       mask |= POLLHUP;
+                       mask |= EPOLLHUP;
                if (sk->sk_state == SMC_LISTEN) {
                        /* woken up by sk_data_ready in smc_listen_work() */
                        mask = smc_accept_poll(sk);
                } else {
                        if (atomic_read(&smc->conn.sndbuf_space) ||
                            sk->sk_shutdown & SEND_SHUTDOWN) {
-                               mask |= POLLOUT | POLLWRNORM;
+                               mask |= EPOLLOUT | EPOLLWRNORM;
                        } else {
                                sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
                                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
                        }
                        if (atomic_read(&smc->conn.bytes_to_rcv))
-                               mask |= POLLIN | POLLRDNORM;
+                               mask |= EPOLLIN | EPOLLRDNORM;
                        if (sk->sk_shutdown & RCV_SHUTDOWN)
-                               mask |= POLLIN | POLLRDNORM | POLLRDHUP;
+                               mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
                        if (sk->sk_state == SMC_APPCLOSEWAIT1)
-                               mask |= POLLIN;
+                               mask |= EPOLLIN;
                }
 
        }
 
        rcu_read_lock();
        wq = rcu_dereference(sk->sk_wq);
        if (skwq_has_sleeper(wq))
-               wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
-                                               POLLRDNORM | POLLRDBAND);
+               wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
+                                               EPOLLRDNORM | EPOLLRDBAND);
        sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
        if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
            (sk->sk_state == SMC_CLOSED))
 
                wq = rcu_dereference(sk->sk_wq);
                if (skwq_has_sleeper(wq))
                        wake_up_interruptible_poll(&wq->wait,
-                                                  POLLOUT | POLLWRNORM |
-                                                  POLLWRBAND);
+                                                  EPOLLOUT | EPOLLWRNORM |
+                                                  EPOLLWRBAND);
                if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
                        sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
                rcu_read_unlock();
 
        poll_wait(filp, &queue_wait, wait);
 
        /* alway allow write */
-       mask = POLLOUT | POLLWRNORM;
+       mask = EPOLLOUT | EPOLLWRNORM;
 
        if (!rp)
                return mask;
        for (cq= &rp->q; &cq->list != &cd->queue;
             cq = list_entry(cq->list.next, struct cache_queue, list))
                if (!cq->reader) {
-                       mask |= POLLIN | POLLRDNORM;
+                       mask |= EPOLLIN | EPOLLRDNORM;
                        break;
                }
        spin_unlock(&queue_lock);
 
 {
        struct inode *inode = file_inode(filp);
        struct rpc_inode *rpci = RPC_I(inode);
-       __poll_t mask = POLLOUT | POLLWRNORM;
+       __poll_t mask = EPOLLOUT | EPOLLWRNORM;
 
        poll_wait(filp, &rpci->waitq, wait);
 
        inode_lock(inode);
        if (rpci->pipe == NULL)
-               mask |= POLLERR | POLLHUP;
+               mask |= EPOLLERR | EPOLLHUP;
        else if (filp->private_data || !list_empty(&rpci->pipe->pipe))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
        inode_unlock(inode);
        return mask;
 }
 
        sock_poll_wait(file, sk_sleep(sk), wait);
 
        if (sk->sk_shutdown & RCV_SHUTDOWN)
-               revents |= POLLRDHUP | POLLIN | POLLRDNORM;
+               revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
        if (sk->sk_shutdown == SHUTDOWN_MASK)
-               revents |= POLLHUP;
+               revents |= EPOLLHUP;
 
        switch (sk->sk_state) {
        case TIPC_ESTABLISHED:
        case TIPC_CONNECTING:
                if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
-                       revents |= POLLOUT;
+                       revents |= EPOLLOUT;
                /* fall thru' */
        case TIPC_LISTEN:
                if (!skb_queue_empty(&sk->sk_receive_queue))
-                       revents |= POLLIN | POLLRDNORM;
+                       revents |= EPOLLIN | EPOLLRDNORM;
                break;
        case TIPC_OPEN:
                if (tsk->group_is_open && !tsk->cong_link_cnt)
-                       revents |= POLLOUT;
+                       revents |= EPOLLOUT;
                if (!tipc_sk_type_connectionless(sk))
                        break;
                if (skb_queue_empty(&sk->sk_receive_queue))
                        break;
-               revents |= POLLIN | POLLRDNORM;
+               revents |= EPOLLIN | EPOLLRDNORM;
                break;
        case TIPC_DISCONNECTING:
-               revents = POLLIN | POLLRDNORM | POLLHUP;
+               revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
                break;
        }
        return revents;
        rcu_read_lock();
        wq = rcu_dereference(sk->sk_wq);
        if (skwq_has_sleeper(wq))
-               wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
-                                               POLLWRNORM | POLLWRBAND);
+               wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
+                                               EPOLLWRNORM | EPOLLWRBAND);
        rcu_read_unlock();
 }
 
        rcu_read_lock();
        wq = rcu_dereference(sk->sk_wq);
        if (skwq_has_sleeper(wq))
-               wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
-                                               POLLRDNORM | POLLRDBAND);
+               wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
+                                               EPOLLRDNORM | EPOLLRDBAND);
        rcu_read_unlock();
 }
 
 
 {
        unix_dgram_peer_wake_disconnect(sk, other);
        wake_up_interruptible_poll(sk_sleep(sk),
-                                  POLLOUT |
-                                  POLLWRNORM |
-                                  POLLWRBAND);
+                                  EPOLLOUT |
+                                  EPOLLWRNORM |
+                                  EPOLLWRBAND);
 }
 
 /* preconditions:
                wq = rcu_dereference(sk->sk_wq);
                if (skwq_has_sleeper(wq))
                        wake_up_interruptible_sync_poll(&wq->wait,
-                               POLLOUT | POLLWRNORM | POLLWRBAND);
+                               EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
                sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
        }
        rcu_read_unlock();
 
        if (wq_has_sleeper(&u->peer_wait))
                wake_up_interruptible_sync_poll(&u->peer_wait,
-                                               POLLOUT | POLLWRNORM |
-                                               POLLWRBAND);
+                                               EPOLLOUT | EPOLLWRNORM |
+                                               EPOLLWRBAND);
 
        if (msg->msg_name)
                unix_copy_addr(msg, skb->sk);
 
        /* exceptional events? */
        if (sk->sk_err)
-               mask |= POLLERR;
+               mask |= EPOLLERR;
        if (sk->sk_shutdown == SHUTDOWN_MASK)
-               mask |= POLLHUP;
+               mask |= EPOLLHUP;
        if (sk->sk_shutdown & RCV_SHUTDOWN)
-               mask |= POLLRDHUP | POLLIN | POLLRDNORM;
+               mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
 
        /* readable? */
        if (!skb_queue_empty(&sk->sk_receive_queue))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        /* Connection-based need to check for termination and startup */
        if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
            sk->sk_state == TCP_CLOSE)
-               mask |= POLLHUP;
+               mask |= EPOLLHUP;
 
        /*
         * we set writable also when the other side has shut down the
         * connection. This prevents stuck sockets.
         */
        if (unix_writable(sk))
-               mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+               mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
 
        return mask;
 }
 
        /* exceptional events? */
        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
-               mask |= POLLERR |
-                       (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
+               mask |= EPOLLERR |
+                       (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
 
        if (sk->sk_shutdown & RCV_SHUTDOWN)
-               mask |= POLLRDHUP | POLLIN | POLLRDNORM;
+               mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
        if (sk->sk_shutdown == SHUTDOWN_MASK)
-               mask |= POLLHUP;
+               mask |= EPOLLHUP;
 
        /* readable? */
        if (!skb_queue_empty(&sk->sk_receive_queue))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        /* Connection-based need to check for termination and startup */
        if (sk->sk_type == SOCK_SEQPACKET) {
                if (sk->sk_state == TCP_CLOSE)
-                       mask |= POLLHUP;
+                       mask |= EPOLLHUP;
                /* connection hasn't started yet? */
                if (sk->sk_state == TCP_SYN_SENT)
                        return mask;
        }
 
        /* No write status requested, avoid expensive OUT tests. */
-       if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
+       if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
                return mask;
 
        writable = unix_writable(sk);
        }
 
        if (writable)
-               mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+               mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
        else
                sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
 
 
        if (sk->sk_err)
                /* Signify that there has been an error on this socket. */
-               mask |= POLLERR;
+               mask |= EPOLLERR;
 
        /* INET sockets treat local write shutdown and peer write shutdown as a
-        * case of POLLHUP set.
+        * case of EPOLLHUP set.
         */
        if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
            ((sk->sk_shutdown & SEND_SHUTDOWN) &&
             (vsk->peer_shutdown & SEND_SHUTDOWN))) {
-               mask |= POLLHUP;
+               mask |= EPOLLHUP;
        }
 
        if (sk->sk_shutdown & RCV_SHUTDOWN ||
            vsk->peer_shutdown & SEND_SHUTDOWN) {
-               mask |= POLLRDHUP;
+               mask |= EPOLLRDHUP;
        }
 
        if (sock->type == SOCK_DGRAM) {
                 */
                if (!skb_queue_empty(&sk->sk_receive_queue) ||
                    (sk->sk_shutdown & RCV_SHUTDOWN)) {
-                       mask |= POLLIN | POLLRDNORM;
+                       mask |= EPOLLIN | EPOLLRDNORM;
                }
 
                if (!(sk->sk_shutdown & SEND_SHUTDOWN))
-                       mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+                       mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
 
        } else if (sock->type == SOCK_STREAM) {
                lock_sock(sk);
                 */
                if (sk->sk_state == TCP_LISTEN
                    && !vsock_is_accept_queue_empty(sk))
-                       mask |= POLLIN | POLLRDNORM;
+                       mask |= EPOLLIN | EPOLLRDNORM;
 
                /* If there is something in the queue then we can read. */
                if (transport->stream_is_active(vsk) &&
                        int ret = transport->notify_poll_in(
                                        vsk, 1, &data_ready_now);
                        if (ret < 0) {
-                               mask |= POLLERR;
+                               mask |= EPOLLERR;
                        } else {
                                if (data_ready_now)
-                                       mask |= POLLIN | POLLRDNORM;
+                                       mask |= EPOLLIN | EPOLLRDNORM;
 
                        }
                }
                 */
                if (sk->sk_shutdown & RCV_SHUTDOWN ||
                    vsk->peer_shutdown & SEND_SHUTDOWN) {
-                       mask |= POLLIN | POLLRDNORM;
+                       mask |= EPOLLIN | EPOLLRDNORM;
                }
 
                /* Connected sockets that can produce data can be written. */
                                int ret = transport->notify_poll_out(
                                                vsk, 1, &space_avail_now);
                                if (ret < 0) {
-                                       mask |= POLLERR;
+                                       mask |= EPOLLERR;
                                } else {
                                        if (space_avail_now)
-                                               /* Remove POLLWRBAND since INET
+                                               /* Remove EPOLLWRBAND since INET
                                                 * sockets are not setting it.
                                                 */
-                                               mask |= POLLOUT | POLLWRNORM;
+                                               mask |= EPOLLOUT | EPOLLWRNORM;
 
                                }
                        }
                }
 
                /* Simulate INET socket poll behaviors, which sets
-                * POLLOUT|POLLWRNORM when peer is closed and nothing to read,
+                * EPOLLOUT|EPOLLWRNORM when peer is closed and nothing to read,
                 * but local send is not shutdown.
                 */
                if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_CLOSING) {
                        if (!(sk->sk_shutdown & SEND_SHUTDOWN))
-                               mask |= POLLOUT | POLLWRNORM;
+                               mask |= EPOLLOUT | EPOLLWRNORM;
 
                }
 
 
                mutex_lock_nested(&rev->ns->lock, rev->ns->level);
                poll_wait(file, &rev->ns->wait, pt);
                if (rev->last_read < rev->ns->revision)
-                       mask |= POLLIN | POLLRDNORM;
+                       mask |= EPOLLIN | EPOLLRDNORM;
                mutex_unlock(&rev->ns->lock);
        }
 
 
  * @file: Pointer to "struct file".
  * @wait: Pointer to "poll_table". Maybe NULL.
  *
- * Returns POLLIN | POLLRDNORM when ready to read an audit log.
+ * Returns EPOLLIN | EPOLLRDNORM when ready to read an audit log.
  */
 __poll_t tomoyo_poll_log(struct file *file, poll_table *wait)
 {
        if (tomoyo_log_count)
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        poll_wait(file, &tomoyo_log_wait, wait);
        if (tomoyo_log_count)
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        return 0;
 }
 
  * @file: Pointer to "struct file".
  * @wait: Pointer to "poll_table".
  *
- * Returns POLLIN | POLLRDNORM when ready to read, 0 otherwise.
+ * Returns EPOLLIN | EPOLLRDNORM when ready to read, 0 otherwise.
  *
  * Waits for access requests which violated policy in enforcing mode.
  */
 static __poll_t tomoyo_poll_query(struct file *file, poll_table *wait)
 {
        if (!list_empty(&tomoyo_query_list))
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        poll_wait(file, &tomoyo_query_wait, wait);
        if (!list_empty(&tomoyo_query_list))
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
        return 0;
 }
 
  * @file: Pointer to "struct file".
  * @wait: Pointer to "poll_table". Maybe NULL.
  *
- * Returns POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM if ready to read/write,
- * POLLOUT | POLLWRNORM otherwise.
+ * Returns EPOLLIN | EPOLLRDNORM | EPOLLOUT | EPOLLWRNORM if ready to read/write,
+ * EPOLLOUT | EPOLLWRNORM otherwise.
  */
 __poll_t tomoyo_poll_control(struct file *file, poll_table *wait)
 {
        struct tomoyo_io_buffer *head = file->private_data;
        if (head->poll)
-               return head->poll(file, wait) | POLLOUT | POLLWRNORM;
-       return POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM;
+               return head->poll(file, wait) | EPOLLOUT | EPOLLWRNORM;
+       return EPOLLIN | EPOLLRDNORM | EPOLLOUT | EPOLLWRNORM;
 }
 
 /**
 
  * @file: Pointer to "struct file".
  * @wait: Pointer to "poll_table". Maybe NULL.
  *
- * Returns POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM if ready to read/write,
- * POLLOUT | POLLWRNORM otherwise.
+ * Returns EPOLLIN | EPOLLRDNORM | EPOLLOUT | EPOLLWRNORM if ready to read/write,
+ * EPOLLOUT | EPOLLWRNORM otherwise.
  */
 static __poll_t tomoyo_poll(struct file *file, poll_table *wait)
 {
 
 static __poll_t snd_compr_get_poll(struct snd_compr_stream *stream)
 {
        if (stream->direction == SND_COMPRESS_PLAYBACK)
-               return POLLOUT | POLLWRNORM;
+               return EPOLLOUT | EPOLLWRNORM;
        else
-               return POLLIN | POLLRDNORM;
+               return EPOLLIN | EPOLLRDNORM;
 }
 
 static __poll_t snd_compr_poll(struct file *f, poll_table *wait)
        __poll_t retval = 0;
 
        if (snd_BUG_ON(!data))
-               return POLLERR;
+               return EPOLLERR;
 
        stream = &data->stream;
 
        switch (stream->runtime->state) {
        case SNDRV_PCM_STATE_OPEN:
        case SNDRV_PCM_STATE_XRUN:
-               retval = snd_compr_get_poll(stream) | POLLERR;
+               retval = snd_compr_get_poll(stream) | EPOLLERR;
                goto out;
        default:
                break;
                        retval = snd_compr_get_poll(stream);
                break;
        default:
-               retval = snd_compr_get_poll(stream) | POLLERR;
+               retval = snd_compr_get_poll(stream) | EPOLLERR;
                break;
        }
 out:
 
 
        mask = 0;
        if (!list_empty(&ctl->events))
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
        return mask;
 }
 
                                          data->file_private_data,
                                          file, wait);
        if (entry->c.ops->read)
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
        if (entry->c.ops->write)
-               mask |= POLLOUT | POLLWRNORM;
+               mask |= EPOLLOUT | EPOLLWRNORM;
        return mask;
 }
 
 
 
 static __poll_t snd_disconnect_poll(struct file * file, poll_table * wait)
 {
-       return POLLERR | POLLNVAL;
+       return EPOLLERR | EPOLLNVAL;
 }
 
 static long snd_disconnect_ioctl(struct file *file,
 
                if (runtime->status->state != SNDRV_PCM_STATE_DRAINING &&
                    (runtime->status->state != SNDRV_PCM_STATE_RUNNING ||
                     snd_pcm_oss_playback_ready(psubstream)))
-                       mask |= POLLOUT | POLLWRNORM;
+                       mask |= EPOLLOUT | EPOLLWRNORM;
                snd_pcm_stream_unlock_irq(psubstream);
        }
        if (csubstream != NULL) {
                snd_pcm_stream_lock_irq(csubstream);
                if ((ostate = runtime->status->state) != SNDRV_PCM_STATE_RUNNING ||
                    snd_pcm_oss_capture_ready(csubstream))
-                       mask |= POLLIN | POLLRDNORM;
+                       mask |= EPOLLIN | EPOLLRDNORM;
                snd_pcm_stream_unlock_irq(csubstream);
                if (ostate != SNDRV_PCM_STATE_RUNNING && runtime->oss.trigger) {
                        struct snd_pcm_oss_file ofile;
 
 
        substream = pcm_file->substream;
        if (PCM_RUNTIME_CHECK(substream))
-               return POLLOUT | POLLWRNORM | POLLERR;
+               return EPOLLOUT | EPOLLWRNORM | EPOLLERR;
        runtime = substream->runtime;
 
        poll_wait(file, &runtime->sleep, wait);
        case SNDRV_PCM_STATE_PREPARED:
        case SNDRV_PCM_STATE_PAUSED:
                if (avail >= runtime->control->avail_min) {
-                       mask = POLLOUT | POLLWRNORM;
+                       mask = EPOLLOUT | EPOLLWRNORM;
                        break;
                }
                /* Fall through */
                mask = 0;
                break;
        default:
-               mask = POLLOUT | POLLWRNORM | POLLERR;
+               mask = EPOLLOUT | EPOLLWRNORM | EPOLLERR;
                break;
        }
        snd_pcm_stream_unlock_irq(substream);
 
        substream = pcm_file->substream;
        if (PCM_RUNTIME_CHECK(substream))
-               return POLLIN | POLLRDNORM | POLLERR;
+               return EPOLLIN | EPOLLRDNORM | EPOLLERR;
        runtime = substream->runtime;
 
        poll_wait(file, &runtime->sleep, wait);
        case SNDRV_PCM_STATE_PREPARED:
        case SNDRV_PCM_STATE_PAUSED:
                if (avail >= runtime->control->avail_min) {
-                       mask = POLLIN | POLLRDNORM;
+                       mask = EPOLLIN | EPOLLRDNORM;
                        break;
                }
                mask = 0;
                break;
        case SNDRV_PCM_STATE_DRAINING:
                if (avail > 0) {
-                       mask = POLLIN | POLLRDNORM;
+                       mask = EPOLLIN | EPOLLRDNORM;
                        break;
                }
                /* Fall through */
        default:
-               mask = POLLIN | POLLRDNORM | POLLERR;
+               mask = EPOLLIN | EPOLLRDNORM | EPOLLERR;
                break;
        }
        snd_pcm_stream_unlock_irq(substream);
 
        mask = 0;
        if (rfile->input != NULL) {
                if (snd_rawmidi_ready(rfile->input))
-                       mask |= POLLIN | POLLRDNORM;
+                       mask |= EPOLLIN | EPOLLRDNORM;
        }
        if (rfile->output != NULL) {
                if (snd_rawmidi_ready(rfile->output))
-                       mask |= POLLOUT | POLLWRNORM;
+                       mask |= EPOLLOUT | EPOLLWRNORM;
        }
        return mask;
 }
 
        /* input */
        if (dp->readq && is_read_mode(dp->file_mode)) {
                if (snd_seq_oss_readq_poll(dp->readq, file, wait))
-                       mask |= POLLIN | POLLRDNORM;
+                       mask |= EPOLLIN | EPOLLRDNORM;
        }
 
        /* output */
        if (dp->writeq && is_write_mode(dp->file_mode)) {
                if (snd_seq_kernel_client_write_poll(dp->cseq, file, wait))
-                       mask |= POLLOUT | POLLWRNORM;
+                       mask |= EPOLLOUT | EPOLLWRNORM;
        }
        return mask;
 }
 
 
                /* check if data is available in the outqueue */
                if (snd_seq_fifo_poll_wait(client->data.user.fifo, file, wait))
-                       mask |= POLLIN | POLLRDNORM;
+                       mask |= EPOLLIN | EPOLLRDNORM;
        }
 
        if (snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT) {
                /* check if data is available in the pool */
                if (!snd_seq_write_pool_allocated(client) ||
                    snd_seq_pool_poll_wait(client->pool, file, wait))
-                       mask |= POLLOUT | POLLWRNORM;
+                       mask |= EPOLLOUT | EPOLLWRNORM;
        }
 
        return mask;
 
        mask = 0;
        spin_lock_irq(&tu->qlock);
        if (tu->qused)
-               mask |= POLLIN | POLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
        if (tu->disconnected)
-               mask |= POLLERR;
+               mask |= EPOLLERR;
        spin_unlock_irq(&tu->qlock);
 
        return mask;
 
 
        spin_lock_irq(&bebob->lock);
        if (bebob->dev_lock_changed)
-               events = POLLIN | POLLRDNORM;
+               events = EPOLLIN | EPOLLRDNORM;
        else
                events = 0;
        spin_unlock_irq(&bebob->lock);
 
 
        spin_lock_irq(&dice->lock);
        if (dice->dev_lock_changed || dice->notification_bits != 0)
-               events = POLLIN | POLLRDNORM;
+               events = EPOLLIN | EPOLLRDNORM;
        else
                events = 0;
        spin_unlock_irq(&dice->lock);
 
 
        spin_lock_irq(&dg00x->lock);
        if (dg00x->dev_lock_changed || dg00x->msg)
-               events = POLLIN | POLLRDNORM;
+               events = EPOLLIN | EPOLLRDNORM;
        else
                events = 0;
        spin_unlock_irq(&dg00x->lock);
 
 
        spin_lock_irq(&ff->lock);
        if (ff->dev_lock_changed)
-               events = POLLIN | POLLRDNORM;
+               events = EPOLLIN | EPOLLRDNORM;
        else
                events = 0;
        spin_unlock_irq(&ff->lock);
 
 
        spin_lock_irq(&efw->lock);
        if (efw->dev_lock_changed || efw->pull_ptr != efw->push_ptr)
-               events = POLLIN | POLLRDNORM;
+               events = EPOLLIN | EPOLLRDNORM;
        else
                events = 0;
        spin_unlock_irq(&efw->lock);
 
-       return events | POLLOUT;
+       return events | EPOLLOUT;
 }
 
 static int
 
 
        spin_lock_irq(&motu->lock);
        if (motu->dev_lock_changed || motu->msg)
-               events = POLLIN | POLLRDNORM;
+               events = EPOLLIN | EPOLLRDNORM;
        else
                events = 0;
        spin_unlock_irq(&motu->lock);
 
-       return events | POLLOUT;
+       return events | EPOLLOUT;
 }
 
 static int hwdep_get_info(struct snd_motu *motu, void __user *arg)
 
 
        spin_lock_irq(&oxfw->lock);
        if (oxfw->dev_lock_changed)
-               events = POLLIN | POLLRDNORM;
+               events = EPOLLIN | EPOLLRDNORM;
        else
                events = 0;
        spin_unlock_irq(&oxfw->lock);
 
 
        spin_lock_irq(&tscm->lock);
        if (tscm->dev_lock_changed)
-               events = POLLIN | POLLRDNORM;
+               events = EPOLLIN | EPOLLRDNORM;
        else
                events = 0;
        spin_unlock_irq(&tscm->lock);
 
                poll_wait(file, &write_sq.action_queue, wait);
        if (file->f_mode & FMODE_WRITE)
                if (write_sq.count < write_sq.max_active || write_sq.block_size - write_sq.rear_size > 0)
-                       mask |= POLLOUT | POLLWRNORM;
+                       mask |= EPOLLOUT | EPOLLWRNORM;
        return mask;
 
 }
 
        struct usb_mixer_interface *mixer = hw->private_data;
 
        poll_wait(file, &mixer->rc_waitq, wait);
-       return mixer->rc_code ? POLLIN | POLLRDNORM : 0;
+       return mixer->rc_code ? EPOLLIN | EPOLLRDNORM : 0;
 }
 
 static int snd_usb_soundblaster_remote_init(struct usb_mixer_interface *mixer)
 
 
        poll_wait(file, &us122l->sk.sleep, wait);
 
-       mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR;
+       mask = EPOLLIN | EPOLLOUT | EPOLLWRNORM | EPOLLERR;
        if (mutex_trylock(&us122l->mutex)) {
                struct usb_stream *s = us122l->sk.s;
                if (s && s->state == usb_stream_ready) {
                                polled = &us122l->second_periods_polled;
                        if (*polled != s->periods_done) {
                                *polled = s->periods_done;
-                               mask = POLLIN | POLLOUT | POLLWRNORM;
+                               mask = EPOLLIN | EPOLLOUT | EPOLLWRNORM;
                        } else
                                mask = 0;
                }
 
        struct usX2Ydev *us428 = hw->private_data;
        struct us428ctls_sharedmem *shm = us428->us428ctls_sharedmem;
        if (us428->chip_status & USX2Y_STAT_CHIP_HUP)
-               return POLLHUP;
+               return EPOLLHUP;
 
        poll_wait(file, &us428->us428ctls_wait_queue_head, wait);
 
        if (shm != NULL && shm->CtlSnapShotLast != shm->CtlSnapShotRed)
-               mask |= POLLIN;
+               mask |= EPOLLIN;
 
        return mask;
 }
 
        unsigned seq;
        int idx;
 
-       if (flags & POLLIN) {
+       if (flags & EPOLLIN) {
                idx = srcu_read_lock(&kvm->irq_srcu);
                do {
                        seq = read_seqcount_begin(&irqfd->irq_entry_sc);
                srcu_read_unlock(&kvm->irq_srcu, idx);
        }
 
-       if (flags & POLLHUP) {
+       if (flags & EPOLLHUP) {
                /* The eventfd is closing, detach from KVM */
                unsigned long flags;
 
         */
        events = f.file->f_op->poll(f.file, &irqfd->pt);
 
-       if (events & POLLIN)
+       if (events & EPOLLIN)
                schedule_work(&irqfd->inject);
 
        /*
         * do not drop the file until the irqfd is fully initialized, otherwise
-        * we might race against the POLLHUP
+        * we might race against the EPOLLHUP
         */
        fdput(f);
 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS