blk_cleanup_disk(pmem->disk);
 }
 
+static int pmem_pagemap_memory_failure(struct dev_pagemap *pgmap,
+               unsigned long pfn, unsigned long nr_pages, int mf_flags)
+{
+       struct pmem_device *pmem =
+                       container_of(pgmap, struct pmem_device, pgmap);
+       u64 offset = PFN_PHYS(pfn) - pmem->phys_addr - pmem->data_offset;
+       u64 len = nr_pages << PAGE_SHIFT;
+
+       return dax_holder_notify_failure(pmem->dax_dev, offset, len, mf_flags);
+}
+
+static const struct dev_pagemap_ops fsdax_pagemap_ops = {
+       .memory_failure         = pmem_pagemap_memory_failure,
+};
+
 static int pmem_attach_disk(struct device *dev,
                struct nd_namespace_common *ndns)
 {
        pmem->pfn_flags = PFN_DEV;
        if (is_nd_pfn(dev)) {
                pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
+               pmem->pgmap.ops = &fsdax_pagemap_ops;
                addr = devm_memremap_pages(dev, &pmem->pgmap);
                pfn_sb = nd_pfn->pfn_sb;
                pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
                pmem->pgmap.range.end = res->end;
                pmem->pgmap.nr_range = 1;
                pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
+               pmem->pgmap.ops = &fsdax_pagemap_ops;
                addr = devm_memremap_pages(dev, &pmem->pgmap);
                pmem->pfn_flags |= PFN_MAP;
                bb_range = pmem->pgmap.range;
 
         * the page back to a CPU accessible page.
         */
        vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf);
+
+       /*
+        * Handle the memory failure happens on a range of pfns.  Notify the
+        * processes who are using these pfns, and try to recover the data on
+        * them if necessary.  The mf_flags is finally passed to the recover
+        * function through the whole notify routine.
+        *
+        * When this is not implemented, or it returns -EOPNOTSUPP, the caller
+        * will fall back to a common handler called mf_generic_kill_procs().
+        */
+       int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn,
+                             unsigned long nr_pages, int mf_flags);
 };
 
 #define PGMAP_ALTMAP_VALID     (1 << 0)
 
        if (!pgmap_pfn_valid(pgmap, pfn))
                goto out;
 
+       /*
+        * Call driver's implementation to handle the memory failure, otherwise
+        * fall back to generic handler.
+        */
+       if (pgmap->ops->memory_failure) {
+               rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags);
+               /*
+                * Fall back to generic handler too if operation is not
+                * supported inside the driver/device/filesystem.
+                */
+               if (rc != -EOPNOTSUPP)
+                       goto out;
+       }
+
        rc = mf_generic_kill_procs(pfn, flags, pgmap);
 out:
        /* drop pgmap ref acquired in caller */