struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
                              struct vm_fault *vmf);
 
+static inline unsigned int page_swap_flags(struct page *page)
+{
+       return page_swap_info(page)->flags;
+}
 #else /* CONFIG_SWAP */
 static inline int swap_readpage(struct page *page, bool do_poll)
 {
 {
 }
 
+static inline unsigned int page_swap_flags(struct page *page)
+{
+       return 0;
+}
 #endif /* CONFIG_SWAP */
 #endif /* _MM_SWAP_H */
 
        return nr_succeeded;
 }
 
+static bool may_enter_fs(struct page *page, gfp_t gfp_mask)
+{
+       if (gfp_mask & __GFP_FS)
+               return true;
+       if (!PageSwapCache(page) || !(gfp_mask & __GFP_IO))
+               return false;
+       /*
+        * We can "enter_fs" for swap-cache with only __GFP_IO
+        * providing this isn't SWP_FS_OPS.
+        * ->flags can be updated non-atomicially (scan_swap_map_slots),
+        * but that will never affect SWP_FS_OPS, so the data_race
+        * is safe.
+        */
+       return !data_race(page_swap_flags(page) & SWP_FS_OPS);
+}
+
 /*
  * shrink_page_list() returns the number of reclaimed pages
  */
                struct page *page;
                struct folio *folio;
                enum page_references references = PAGEREF_RECLAIM;
-               bool dirty, writeback, may_enter_fs;
+               bool dirty, writeback;
                unsigned int nr_pages;
 
                cond_resched();
                if (!sc->may_unmap && page_mapped(page))
                        goto keep_locked;
 
-               may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
-                       (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
-
                /*
                 * The number of dirty pages determines if a node is marked
                 * reclaim_congested. kswapd will stall and start writing
                 *    not to fs). In this case mark the page for immediate
                 *    reclaim and continue scanning.
                 *
-                *    Require may_enter_fs because we would wait on fs, which
+                *    Require may_enter_fs() because we would wait on fs, which
                 *    may not have submitted IO yet. And the loop driver might
                 *    enter reclaim, and deadlock if it waits on a page for
                 *    which it is needed to do the write (loop masks off
 
                        /* Case 2 above */
                        } else if (writeback_throttling_sane(sc) ||
-                           !PageReclaim(page) || !may_enter_fs) {
+                           !PageReclaim(page) || !may_enter_fs(page, sc->gfp_mask)) {
                                /*
                                 * This is slightly racy - end_page_writeback()
                                 * might have just cleared PageReclaim, then
                                                goto activate_locked_split;
                                }
 
-                               may_enter_fs = true;
-
                                /* Adding to swap updated mapping */
                                mapping = page_mapping(page);
                        }
 
                        if (references == PAGEREF_RECLAIM_CLEAN)
                                goto keep_locked;
-                       if (!may_enter_fs)
+                       if (!may_enter_fs(page, sc->gfp_mask))
                                goto keep_locked;
                        if (!sc->may_writepage)
                                goto keep_locked;