if (mmio->num_lines)
                offset = to_interleave_offset(offset, mmio);
 
-       return readq(mmio->base + offset);
+       return readq(mmio->addr.base + offset);
 }
 
 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
        if (mmio->num_lines)
                offset = to_interleave_offset(offset, mmio);
 
-       writeq(cmd, mmio->base + offset);
+       writeq(cmd, mmio->addr.base + offset);
        wmb_blk(nfit_blk);
 
        if (nfit_blk->dimm_flags & ND_BLK_DCR_LATCH)
-               readq(mmio->base + offset);
+               readq(mmio->addr.base + offset);
 }
 
 static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
                }
 
                if (rw)
-                       memcpy_to_pmem(mmio->aperture + offset,
+                       memcpy_to_pmem(mmio->addr.aperture + offset,
                                        iobuf + copied, c);
-               else
+               else {
+                       if (nfit_blk->dimm_flags & ND_BLK_READ_FLUSH)
+                               mmio_flush_range((void __force *)
+                                       mmio->addr.aperture + offset, c);
+
                        memcpy_from_pmem(iobuf + copied,
-                                       mmio->aperture + offset, c);
+                                       mmio->addr.aperture + offset, c);
+               }
 
                copied += c;
                len -= c;
 
        WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
        dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index);
-       iounmap(spa_map->iomem);
+       if (spa_map->type == SPA_MAP_APERTURE)
+               memunmap((void __force *)spa_map->addr.aperture);
+       else
+               iounmap(spa_map->addr.base);
        release_mem_region(spa->address, spa->length);
        list_del(&spa_map->list);
        kfree(spa_map);
        spa_map = find_spa_mapping(acpi_desc, spa);
        if (spa_map) {
                kref_get(&spa_map->kref);
-               return spa_map->iomem;
+               return spa_map->addr.base;
        }
 
        spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL);
        if (!res)
                goto err_mem;
 
-       if (type == SPA_MAP_APERTURE) {
-               /*
-                * TODO: memremap_pmem() support, but that requires cache
-                * flushing when the aperture is moved.
-                */
-               spa_map->iomem = ioremap_wc(start, n);
-       } else
-               spa_map->iomem = ioremap_nocache(start, n);
+       spa_map->type = type;
+       if (type == SPA_MAP_APERTURE)
+               spa_map->addr.aperture = (void __pmem *)memremap(start, n,
+                                                       ARCH_MEMREMAP_PMEM);
+       else
+               spa_map->addr.base = ioremap_nocache(start, n);
+
 
-       if (!spa_map->iomem)
+       if (!spa_map->addr.base)
                goto err_map;
 
        list_add_tail(&spa_map->list, &acpi_desc->spa_maps);
-       return spa_map->iomem;
+       return spa_map->addr.base;
 
  err_map:
        release_mem_region(start, n);
                nfit_blk->dimm_flags = flags.flags;
        else if (rc == -ENOTTY) {
                /* fall back to a conservative default */
-               nfit_blk->dimm_flags = ND_BLK_DCR_LATCH;
+               nfit_blk->dimm_flags = ND_BLK_DCR_LATCH | ND_BLK_READ_FLUSH;
                rc = 0;
        } else
                rc = -ENXIO;
        /* map block aperture memory */
        nfit_blk->bdw_offset = nfit_mem->bdw->offset;
        mmio = &nfit_blk->mmio[BDW];
-       mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw,
+       mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw,
                        SPA_MAP_APERTURE);
-       if (!mmio->base) {
+       if (!mmio->addr.base) {
                dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
                                nvdimm_name(nvdimm));
                return -ENOMEM;
        nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
        nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
        mmio = &nfit_blk->mmio[DCR];
-       mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr,
+       mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr,
                        SPA_MAP_CONTROL);
-       if (!mmio->base) {
+       if (!mmio->addr.base) {
                dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
                                nvdimm_name(nvdimm));
                return -ENOMEM;
        for (i = 0; i < 2; i++) {
                struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i];
 
-               if (mmio->base)
+               if (mmio->addr.base)
                        nfit_spa_unmap(acpi_desc, mmio->spa);
        }
        nd_blk_region_set_provider_data(ndbr, NULL);
 
        nfit_res = get_nfit_res(offset);
        rcu_read_unlock();
        if (nfit_res)
-               return (void __iomem *) nfit_res->buf + offset
-                       - nfit_res->res->start;
+               return nfit_res->buf + offset - nfit_res->res->start;
        return devm_memremap(dev, offset, size, flags);
 }
 EXPORT_SYMBOL(__wrap_devm_memremap);
 
+void *__wrap_memremap(resource_size_t offset, size_t size,
+               unsigned long flags)
+{
+       struct nfit_test_resource *nfit_res;
+
+       rcu_read_lock();
+       nfit_res = get_nfit_res(offset);
+       rcu_read_unlock();
+       if (nfit_res)
+               return nfit_res->buf + offset - nfit_res->res->start;
+       return memremap(offset, size, flags);
+}
+EXPORT_SYMBOL(__wrap_memremap);
+
 void __iomem *__wrap_ioremap_nocache(resource_size_t offset, unsigned long size)
 {
        return __nfit_test_ioremap(offset, size, ioremap_nocache);
 }
 EXPORT_SYMBOL(__wrap_iounmap);
 
+void __wrap_memunmap(void *addr)
+{
+       struct nfit_test_resource *nfit_res;
+
+       rcu_read_lock();
+       nfit_res = get_nfit_res((unsigned long) addr);
+       rcu_read_unlock();
+       if (nfit_res)
+               return;
+       return memunmap(addr);
+}
+EXPORT_SYMBOL(__wrap_memunmap);
+
 static struct resource *nfit_test_request_region(struct device *dev,
                struct resource *parent, resource_size_t start,
                resource_size_t n, const char *name, int flags)