gfp_t gfp, unsigned long attrs);
 void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
                dma_addr_t dma_addr, unsigned long attrs);
+int dma_direct_supported(struct device *dev, u64 mask);
 
 #endif /* _LINUX_DMA_DIRECT_H */
 
        return nents;
 }
 
+int dma_direct_supported(struct device *dev, u64 mask)
+{
+#ifdef CONFIG_ZONE_DMA
+       if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
+               return 0;
+#else
+       /*
+        * Because 32-bit DMA masks are so common we expect every architecture
+        * to be able to satisfy them - either by not supporting more physical
+        * memory, or by providing a ZONE_DMA32.  If neither is the case, the
+        * architecture needs to use an IOMMU instead of the direct mapping.
+        */
+       if (mask < DMA_BIT_MASK(32))
+               return 0;
+#endif
+       return 1;
+}
+
 static int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
        return dma_addr == DIRECT_MAPPING_ERROR;
        .free                   = dma_direct_free,
        .map_page               = dma_direct_map_page,
        .map_sg                 = dma_direct_map_sg,
+       .dma_supported          = dma_direct_supported,
        .mapping_error          = dma_direct_mapping_error,
 };
 EXPORT_SYMBOL(dma_direct_ops);