return mmio->base_offset + line_offset + table_offset + sub_line_offset;
 }
 
-static void wmb_blk(struct nfit_blk *nfit_blk)
-{
-
-       if (nfit_blk->nvdimm_flush) {
-               /*
-                * The first wmb() is needed to 'sfence' all previous writes
-                * such that they are architecturally visible for the platform
-                * buffer flush.  Note that we've already arranged for pmem
-                * writes to avoid the cache via arch_memcpy_to_pmem().  The
-                * final wmb() ensures ordering for the NVDIMM flush write.
-                */
-               wmb();
-               writeq(1, nfit_blk->nvdimm_flush);
-               wmb();
-       } else
-               wmb_pmem();
-}
-
 static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
 {
        struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
                offset = to_interleave_offset(offset, mmio);
 
        writeq(cmd, mmio->addr.base + offset);
-       wmb_blk(nfit_blk);
+       nvdimm_flush(nfit_blk->nd_region);
 
        if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
                readq(mmio->addr.base + offset);
        }
 
        if (rw)
-               wmb_blk(nfit_blk);
+               nvdimm_flush(nfit_blk->nd_region);
 
        rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
        return rc;
 {
        struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
        struct nd_blk_region *ndbr = to_nd_blk_region(dev);
-       struct nfit_flush *nfit_flush;
        struct nfit_blk_mmio *mmio;
        struct nfit_blk *nfit_blk;
        struct nfit_mem *nfit_mem;
                return rc;
        }
 
-       nfit_flush = nfit_mem->nfit_flush;
-       if (nfit_flush && nfit_flush->flush->hint_count != 0) {
-               nfit_blk->nvdimm_flush = devm_nvdimm_ioremap(dev,
-                               nfit_flush->flush->hint_address[0], 8);
-               if (!nfit_blk->nvdimm_flush)
-                       return -ENOMEM;
-       }
-
-       if (!arch_has_wmb_pmem() && !nfit_blk->nvdimm_flush)
+       if (nvdimm_has_flush(nfit_blk->nd_region) < 0)
                dev_warn(dev, "unable to guarantee persistence of writes\n");
 
        if (mmio->line_size == 0)
 
        u64 bdw_offset; /* post interleave offset */
        u64 stat_offset;
        u64 cmd_offset;
-       void __iomem *nvdimm_flush;
        u32 dimm_flags;
 };
 
 
 #include "pfn.h"
 #include "nd.h"
 
+static struct device *to_dev(struct pmem_device *pmem)
+{
+       /*
+        * nvdimm bus services need a 'dev' parameter, and we record the device
+        * at init in bb.dev.
+        */
+       return pmem->bb.dev;
+}
+
+static struct nd_region *to_region(struct pmem_device *pmem)
+{
+       return to_nd_region(to_dev(pmem)->parent);
+}
+
 static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
                unsigned int len)
 {
-       struct device *dev = pmem->bb.dev;
+       struct device *dev = to_dev(pmem);
        sector_t sector;
        long cleared;
 
                nd_iostat_end(bio, start);
 
        if (bio_data_dir(bio))
-               wmb_pmem();
+               nvdimm_flush(to_region(pmem));
 
        bio_endio(bio);
        return BLK_QC_T_NONE;
 
        rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
        if (rw & WRITE)
-               wmb_pmem();
+               nvdimm_flush(to_region(pmem));
 
        /*
         * The ->rw_page interface is subtle and tricky.  The core
                struct nd_namespace_common *ndns)
 {
        struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+       struct nd_region *nd_region = to_nd_region(dev->parent);
        struct vmem_altmap __altmap, *altmap = NULL;
        struct resource *res = &nsio->res;
        struct nd_pfn *nd_pfn = NULL;
        dev_set_drvdata(dev, pmem);
        pmem->phys_addr = res->start;
        pmem->size = resource_size(res);
-       if (!arch_has_wmb_pmem())
+       if (nvdimm_has_flush(nd_region) < 0)
                dev_warn(dev, "unable to guarantee persistence of writes\n");
 
        if (!devm_request_mem_region(dev, res->start, resource_size(res),
                        / 512);
        if (devm_init_badblocks(dev, &pmem->bb))
                return -ENOMEM;
-       nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb, res);
+       nvdimm_badblocks_populate(nd_region, &pmem->bb, res);
        disk->bb = &pmem->bb;
        add_disk(disk);
 
 
 static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
 {
-       struct nd_region *nd_region = to_nd_region(dev->parent);
        struct pmem_device *pmem = dev_get_drvdata(dev);
+       struct nd_region *nd_region = to_region(pmem);
        resource_size_t offset = 0, end_trunc = 0;
        struct nd_namespace_common *ndns;
        struct nd_namespace_io *nsio;
 
 #include <linux/highmem.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/pmem.h>
 #include <linux/sort.h>
 #include <linux/io.h>
 #include <linux/nd.h>
 #include "nd-core.h"
 #include "nd.h"
 
+/*
+ * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
+ * irrelevant.
+ */
+#include <linux/io-64-nonatomic-hi-lo.h>
+
 static DEFINE_IDA(region_ida);
 
 static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
 }
 EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
 
+/**
+ * nvdimm_flush - flush any posted write queues between the cpu and pmem media
+ * @nd_region: blk or interleaved pmem region
+ */
+void nvdimm_flush(struct nd_region *nd_region)
+{
+       struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
+       int i;
+
+       /*
+        * The first wmb() is needed to 'sfence' all previous writes
+        * such that they are architecturally visible for the platform
+        * buffer flush.  Note that we've already arranged for pmem
+        * writes to avoid the cache via arch_memcpy_to_pmem().  The
+        * final wmb() ensures ordering for the NVDIMM flush write.
+        */
+       wmb();
+       for (i = 0; i < nd_region->ndr_mappings; i++)
+               if (ndrd->flush_wpq[i][0])
+                       writeq(1, ndrd->flush_wpq[i][0]);
+       wmb();
+}
+EXPORT_SYMBOL_GPL(nvdimm_flush);
+
+/**
+ * nvdimm_has_flush - determine write flushing requirements
+ * @nd_region: blk or interleaved pmem region
+ *
+ * Returns 1 if writes require flushing
+ * Returns 0 if writes do not require flushing
+ * Returns -ENXIO if flushing capability can not be determined
+ */
+int nvdimm_has_flush(struct nd_region *nd_region)
+{
+       struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
+       int i;
+
+       /* no nvdimm == flushing capability unknown */
+       if (nd_region->ndr_mappings == 0)
+               return -ENXIO;
+
+       for (i = 0; i < nd_region->ndr_mappings; i++)
+               /* flush hints present, flushing required */
+               if (ndrd->flush_wpq[i][0])
+                       return 1;
+
+       /*
+        * The platform defines dimm devices without hints, assume
+        * platform persistence mechanism like ADR
+        */
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nvdimm_has_flush);
+
 void __exit nd_region_devs_exit(void)
 {
        ida_destroy(®ion_ida);
 
 unsigned int nd_region_acquire_lane(struct nd_region *nd_region);
 void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane);
 u64 nd_fletcher64(void *addr, size_t len, bool le);
+void nvdimm_flush(struct nd_region *nd_region);
+int nvdimm_has_flush(struct nd_region *nd_region);
 #endif /* __LIBNVDIMM_H__ */