#include <linux/limits.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/of_iommu.h>
 #include <linux/of_pci.h>
 #include <linux/pci.h>
 
        return ops;
 }
+
+static enum iommu_resv_type iommu_resv_region_get_type(struct device *dev, struct resource *phys,
+                                                      phys_addr_t start, size_t length)
+{
+       phys_addr_t end = start + length - 1;
+
+       /*
+        * IOMMU regions without an associated physical region cannot be
+        * mapped and are simply reservations.
+        */
+       if (phys->start >= phys->end)
+               return IOMMU_RESV_RESERVED;
+
+       /* may be IOMMU_RESV_DIRECT_RELAXABLE for certain cases */
+       if (start == phys->start && end == phys->end)
+               return IOMMU_RESV_DIRECT;
+
+       dev_warn(dev, "treating non-direct mapping [%pr] -> [%pap-%pap] as reservation\n", &phys,
+                &start, &end);
+       return IOMMU_RESV_RESERVED;
+}
+
+/**
+ * of_iommu_get_resv_regions - reserved region driver helper for device tree
+ * @dev: device for which to get reserved regions
+ * @list: reserved region list
+ *
+ * IOMMU drivers can use this to implement their .get_resv_regions() callback
+ * for memory regions attached to a device tree node. See the reserved-memory
+ * device tree bindings on how to use these:
+ *
+ *   Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
+ */
+void of_iommu_get_resv_regions(struct device *dev, struct list_head *list)
+{
+#if IS_ENABLED(CONFIG_OF_ADDRESS)
+       struct of_phandle_iterator it;
+       int err;
+
+       of_for_each_phandle(&it, err, dev->of_node, "memory-region", NULL, 0) {
+               const __be32 *maps, *end;
+               struct resource phys;
+               int size;
+
+               memset(&phys, 0, sizeof(phys));
+
+               /*
+                * The "reg" property is optional and can be omitted by reserved-memory regions
+                * that represent reservations in the IOVA space, which are regions that should
+                * not be mapped.
+                */
+               if (of_find_property(it.node, "reg", NULL)) {
+                       err = of_address_to_resource(it.node, 0, &phys);
+                       if (err < 0) {
+                               dev_err(dev, "failed to parse memory region %pOF: %d\n",
+                                       it.node, err);
+                               continue;
+                       }
+               }
+
+               maps = of_get_property(it.node, "iommu-addresses", &size);
+               if (!maps)
+                       continue;
+
+               end = maps + size / sizeof(__be32);
+
+               while (maps < end) {
+                       struct device_node *np;
+                       u32 phandle;
+
+                       phandle = be32_to_cpup(maps++);
+                       np = of_find_node_by_phandle(phandle);
+
+                       if (np == dev->of_node) {
+                               int prot = IOMMU_READ | IOMMU_WRITE;
+                               struct iommu_resv_region *region;
+                               enum iommu_resv_type type;
+                               phys_addr_t iova;
+                               size_t length;
+
+                               maps = of_translate_dma_region(np, maps, &iova, &length);
+                               type = iommu_resv_region_get_type(dev, &phys, iova, length);
+
+                               region = iommu_alloc_resv_region(iova, length, prot, type,
+                                                                GFP_KERNEL);
+                               if (region)
+                                       list_add_tail(®ion->list, list);
+                       }
+               }
+       }
+#endif
+}
+EXPORT_SYMBOL(of_iommu_get_resv_regions);