}
 EXPORT_SYMBOL_GPL(alloc_dax_region);
 
+static void dax_mapping_release(struct device *dev)
+{
+       struct dax_mapping *mapping = to_dax_mapping(dev);
+       struct dev_dax *dev_dax = to_dev_dax(dev->parent);
+
+       ida_free(&dev_dax->ida, mapping->id);
+       kfree(mapping);
+}
+
+static void unregister_dax_mapping(void *data)
+{
+       struct device *dev = data;
+       struct dax_mapping *mapping = to_dax_mapping(dev);
+       struct dev_dax *dev_dax = to_dev_dax(dev->parent);
+       struct dax_region *dax_region = dev_dax->region;
+
+       dev_dbg(dev, "%s\n", __func__);
+
+       device_lock_assert(dax_region->dev);
+
+       dev_dax->ranges[mapping->range_id].mapping = NULL;
+       mapping->range_id = -1;
+
+       device_del(dev);
+       put_device(dev);
+}
+
+static struct dev_dax_range *get_dax_range(struct device *dev)
+{
+       struct dax_mapping *mapping = to_dax_mapping(dev);
+       struct dev_dax *dev_dax = to_dev_dax(dev->parent);
+       struct dax_region *dax_region = dev_dax->region;
+
+       device_lock(dax_region->dev);
+       if (mapping->range_id < 0) {
+               device_unlock(dax_region->dev);
+               return NULL;
+       }
+
+       return &dev_dax->ranges[mapping->range_id];
+}
+
+static void put_dax_range(struct dev_dax_range *dax_range)
+{
+       struct dax_mapping *mapping = dax_range->mapping;
+       struct dev_dax *dev_dax = to_dev_dax(mapping->dev.parent);
+       struct dax_region *dax_region = dev_dax->region;
+
+       device_unlock(dax_region->dev);
+}
+
+static ssize_t start_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct dev_dax_range *dax_range;
+       ssize_t rc;
+
+       dax_range = get_dax_range(dev);
+       if (!dax_range)
+               return -ENXIO;
+       rc = sprintf(buf, "%#llx\n", dax_range->range.start);
+       put_dax_range(dax_range);
+
+       return rc;
+}
+static DEVICE_ATTR(start, 0400, start_show, NULL);
+
+static ssize_t end_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct dev_dax_range *dax_range;
+       ssize_t rc;
+
+       dax_range = get_dax_range(dev);
+       if (!dax_range)
+               return -ENXIO;
+       rc = sprintf(buf, "%#llx\n", dax_range->range.end);
+       put_dax_range(dax_range);
+
+       return rc;
+}
+static DEVICE_ATTR(end, 0400, end_show, NULL);
+
+static ssize_t pgoff_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct dev_dax_range *dax_range;
+       ssize_t rc;
+
+       dax_range = get_dax_range(dev);
+       if (!dax_range)
+               return -ENXIO;
+       rc = sprintf(buf, "%#lx\n", dax_range->pgoff);
+       put_dax_range(dax_range);
+
+       return rc;
+}
+static DEVICE_ATTR(page_offset, 0400, pgoff_show, NULL);
+
+static struct attribute *dax_mapping_attributes[] = {
+       &dev_attr_start.attr,
+       &dev_attr_end.attr,
+       &dev_attr_page_offset.attr,
+       NULL,
+};
+
+static const struct attribute_group dax_mapping_attribute_group = {
+       .attrs = dax_mapping_attributes,
+};
+
+static const struct attribute_group *dax_mapping_attribute_groups[] = {
+       &dax_mapping_attribute_group,
+       NULL,
+};
+
+static struct device_type dax_mapping_type = {
+       .release = dax_mapping_release,
+       .groups = dax_mapping_attribute_groups,
+};
+
+static int devm_register_dax_mapping(struct dev_dax *dev_dax, int range_id)
+{
+       struct dax_region *dax_region = dev_dax->region;
+       struct dax_mapping *mapping;
+       struct device *dev;
+       int rc;
+
+       device_lock_assert(dax_region->dev);
+
+       if (dev_WARN_ONCE(&dev_dax->dev, !dax_region->dev->driver,
+                               "region disabled\n"))
+               return -ENXIO;
+
+       mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+       if (!mapping)
+               return -ENOMEM;
+       mapping->range_id = range_id;
+       mapping->id = ida_alloc(&dev_dax->ida, GFP_KERNEL);
+       if (mapping->id < 0) {
+               kfree(mapping);
+               return -ENOMEM;
+       }
+       dev_dax->ranges[range_id].mapping = mapping;
+       dev = &mapping->dev;
+       device_initialize(dev);
+       dev->parent = &dev_dax->dev;
+       dev->type = &dax_mapping_type;
+       dev_set_name(dev, "mapping%d", mapping->id);
+       rc = device_add(dev);
+       if (rc) {
+               put_device(dev);
+               return rc;
+       }
+
+       rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_mapping,
+                       dev);
+       if (rc)
+               return rc;
+       return 0;
+}
+
 static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start,
                resource_size_t size)
 {
        struct dev_dax_range *ranges;
        unsigned long pgoff = 0;
        struct resource *alloc;
-       int i;
+       int i, rc;
 
        device_lock_assert(dax_region->dev);
 
 
        dev_dbg(dev, "alloc range[%d]: %pa:%pa\n", dev_dax->nr_range - 1,
                        &alloc->start, &alloc->end);
+       /*
+        * A dev_dax instance must be registered before mapping device
+        * children can be added. Defer to devm_create_dev_dax() to add
+        * the initial mapping device.
+        */
+       if (!device_is_registered(&dev_dax->dev))
+               return 0;
+
+       rc = devm_register_dax_mapping(dev_dax, dev_dax->nr_range - 1);
+       if (rc) {
+               dev_dbg(dev, "delete range[%d]: %pa:%pa\n", dev_dax->nr_range - 1,
+                               &alloc->start, &alloc->end);
+               dev_dax->nr_range--;
+               __release_region(res, alloc->start, resource_size(alloc));
+               return rc;
+       }
 
        return 0;
 }
 
        for (i = dev_dax->nr_range - 1; i >= 0; i--) {
                struct range *range = &dev_dax->ranges[i].range;
+               struct dax_mapping *mapping = dev_dax->ranges[i].mapping;
                struct resource *adjust = NULL, *res;
                resource_size_t shrink;
 
                shrink = min_t(u64, to_shrink, range_len(range));
                if (shrink >= range_len(range)) {
+                       devm_release_action(dax_region->dev,
+                                       unregister_dax_mapping, &mapping->dev);
                        __release_region(&dax_region->res, range->start,
                                        range_len(range));
                        dev_dax->nr_range--;
        /* a device_dax instance is dead while the driver is not attached */
        kill_dax(dax_dev);
 
-       /* from here on we're committed to teardown via dev_dax_release() */
        dev_dax->dax_dev = dax_dev;
        dev_dax->target_node = dax_region->target_node;
+       ida_init(&dev_dax->ida);
        kref_get(&dax_region->kref);
 
        inode = dax_inode(dax_dev);
        if (rc)
                return ERR_PTR(rc);
 
+       /* register mapping device for the initial allocation range */
+       if (dev_dax->nr_range && range_len(&dev_dax->ranges[0].range)) {
+               rc = devm_register_dax_mapping(dev_dax, 0);
+               if (rc)
+                       return ERR_PTR(rc);
+       }
+
        return dev_dax;
 
 err_alloc_dax: