#ifdef CONFIG_DEV_PAGEMAP_OPS
 DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
 
-static inline bool page_is_devmap_managed(struct page *page)
+bool __put_devmap_managed_page(struct page *page);
+static inline bool put_devmap_managed_page(struct page *page)
 {
        if (!static_branch_unlikely(&devmap_managed_key))
                return false;
        if (!is_zone_device_page(page))
                return false;
-       switch (page->pgmap->type) {
-       case MEMORY_DEVICE_PRIVATE:
-       case MEMORY_DEVICE_FS_DAX:
-               return true;
-       default:
-               break;
-       }
-       return false;
+       if (page->pgmap->type != MEMORY_DEVICE_PRIVATE &&
+           page->pgmap->type != MEMORY_DEVICE_FS_DAX)
+               return false;
+       return __put_devmap_managed_page(page);
 }
 
-void put_devmap_managed_page(struct page *page);
-
 #else /* CONFIG_DEV_PAGEMAP_OPS */
-static inline bool page_is_devmap_managed(struct page *page)
+static inline bool put_devmap_managed_page(struct page *page)
 {
        return false;
 }
-
-static inline void put_devmap_managed_page(struct page *page)
-{
-}
 #endif /* CONFIG_DEV_PAGEMAP_OPS */
 
 static inline bool is_device_private_page(const struct page *page)
        struct folio *folio = page_folio(page);
 
        /*
-        * For devmap managed pages we need to catch refcount transition from
-        * 2 to 1, when refcount reach one it means the page is free and we
-        * need to inform the device driver through callback. See
-        * include/linux/memremap.h and HMM for details.
+        * For some devmap managed pages we need to catch refcount transition
+        * from 2 to 1:
         */
-       if (page_is_devmap_managed(&folio->page)) {
-               put_devmap_managed_page(&folio->page);
+       if (put_devmap_managed_page(&folio->page))
                return;
-       }
-
        folio_put(folio);
 }
 
 
        page->pgmap->ops->page_free(page);
 }
 
-void put_devmap_managed_page(struct page *page)
+bool __put_devmap_managed_page(struct page *page)
 {
-       int count;
-
-       if (WARN_ON_ONCE(!page_is_devmap_managed(page)))
-               return;
-
-       count = page_ref_dec_return(page);
-
        /*
         * devmap page refcounts are 1-based, rather than 0-based: if
         * refcount is 1, then the page is free and the refcount is
         * stable because nobody holds a reference on the page.
         */
-       if (count == 1)
+       switch (page_ref_dec_return(page)) {
+       case 1:
                free_devmap_managed_page(page);
-       else if (!count)
+               break;
+       case 0:
                __put_page(page);
+               break;
+       }
+       return true;
 }
-EXPORT_SYMBOL(put_devmap_managed_page);
+EXPORT_SYMBOL(__put_devmap_managed_page);
 #endif /* CONFIG_DEV_PAGEMAP_OPS */
 
                                unlock_page_lruvec_irqrestore(lruvec, flags);
                                lruvec = NULL;
                        }
-                       /*
-                        * ZONE_DEVICE pages that return 'false' from
-                        * page_is_devmap_managed() do not require special
-                        * processing, and instead, expect a call to
-                        * put_page_testzero().
-                        */
-                       if (page_is_devmap_managed(page)) {
-                               put_devmap_managed_page(page);
+                       if (put_devmap_managed_page(page))
                                continue;
-                       }
                        if (put_page_testzero(page))
                                put_dev_pagemap(page->pgmap);
                        continue;