REQ_F_ISREG_BIT,
        REQ_F_POLL_NO_LAZY_BIT,
        REQ_F_CANCEL_SEQ_BIT,
+       REQ_F_CAN_POLL_BIT,
 
        /* not a real bit, just to check we're not overflowing the space */
        __REQ_F_LAST_BIT,
        REQ_F_POLL_NO_LAZY      = IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT),
        /* cancel sequence is set and valid */
        REQ_F_CANCEL_SEQ        = IO_REQ_FLAG(REQ_F_CANCEL_SEQ_BIT),
+       /* file is pollable */
+       REQ_F_CAN_POLL          = IO_REQ_FLAG(REQ_F_CAN_POLL_BIT),
 };
 
 typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
 
        if (req->flags & REQ_F_FORCE_ASYNC) {
                bool opcode_poll = def->pollin || def->pollout;
 
-               if (opcode_poll && file_can_poll(req->file)) {
+               if (opcode_poll && io_file_can_poll(req)) {
                        needs_poll = true;
                        issue_flags |= IO_URING_F_NONBLOCK;
                }
 
 #include <linux/lockdep.h>
 #include <linux/resume_user_mode.h>
 #include <linux/kasan.h>
+#include <linux/poll.h>
 #include <linux/io_uring_types.h>
 #include <uapi/linux/eventpoll.h>
 #include "io-wq.h"
                return 2 * sizeof(struct io_uring_sqe);
        return sizeof(struct io_uring_sqe);
 }
+
+static inline bool io_file_can_poll(struct io_kiocb *req)
+{
+       if (req->flags & REQ_F_CAN_POLL)
+               return true;
+       if (file_can_poll(req->file)) {
+               req->flags |= REQ_F_CAN_POLL;
+               return true;
+       }
+       return false;
+}
 #endif
 
        req->buf_list = bl;
        req->buf_index = buf->bid;
 
-       if (issue_flags & IO_URING_F_UNLOCKED || !file_can_poll(req->file)) {
+       if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) {
                /*
                 * If we came in unlocked, we have no choice but to consume the
                 * buffer here, otherwise nothing ensures that the buffer won't
 
 
        if (!def->pollin && !def->pollout)
                return IO_APOLL_ABORTED;
-       if (!file_can_poll(req->file))
+       if (!io_file_can_poll(req))
                return IO_APOLL_ABORTED;
        if (!(req->flags & REQ_F_APOLL_MULTISHOT))
                mask |= EPOLLONESHOT;
 
         * just use poll if we can, and don't attempt if the fs doesn't
         * support callback based unlocks
         */
-       if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
+       if (io_file_can_poll(req) || !(req->file->f_mode & FMODE_BUF_RASYNC))
                return false;
 
        wait->wait.func = io_async_buf_func;
                 * If we can poll, just do that. For a vectored read, we'll
                 * need to copy state first.
                 */
-               if (file_can_poll(req->file) && !io_issue_defs[req->opcode].vectored)
+               if (io_file_can_poll(req) && !io_issue_defs[req->opcode].vectored)
                        return -EAGAIN;
                /* IOPOLL retry should happen for io-wq threads */
                if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
        /*
         * Multishot MUST be used on a pollable file
         */
-       if (!file_can_poll(req->file))
+       if (!io_file_can_poll(req))
                return -EBADFD;
 
        ret = __io_read(req, issue_flags);