return page;
 }
 
+/*
+ * Check if a potentially blocking operations needs to dip into the atomic
+ * pools for the given device/gfp.
+ */
+static bool dma_direct_use_pool(struct device *dev, gfp_t gfp)
+{
+       return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);
+}
+
 static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
                dma_addr_t *dma_handle, gfp_t gfp)
 {
                 */
                remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP);
                if (remap) {
-                       if (!gfpflags_allow_blocking(gfp) &&
-                           !is_swiotlb_for_alloc(dev))
+                       if (dma_direct_use_pool(dev, gfp))
                                return dma_direct_alloc_from_pool(dev, size,
                                                dma_handle, gfp);
                } else {
         * Decrypting memory may block, so allocate the memory from the atomic
         * pools if we can't block.
         */
-       if (force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
-           !is_swiotlb_for_alloc(dev))
+       if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
                return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 
        /* we always manually zero the memory once we are done */
        struct page *page;
        void *ret;
 
-       if (force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
-           !is_swiotlb_for_alloc(dev))
+       if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
                return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 
        page = __dma_direct_alloc_pages(dev, size, gfp);