for (pfn = dax_to_pfn(entry); \
                        pfn < dax_end_pfn(entry); pfn++)
 
+static inline bool dax_mapping_is_cow(struct address_space *mapping)
+{
+       return (unsigned long)mapping == PAGE_MAPPING_DAX_COW;
+}
+
 /*
- * TODO: for reflink+dax we need a way to associate a single page with
- * multiple address_space instances at different linear_page_index()
- * offsets.
+ * Set the page->mapping with FS_DAX_MAPPING_COW flag, increase the refcount.
+ */
+static inline void dax_mapping_set_cow(struct page *page)
+{
+       if ((uintptr_t)page->mapping != PAGE_MAPPING_DAX_COW) {
+               /*
+                * Reset the index if the page was already mapped
+                * regularly before.
+                */
+               if (page->mapping)
+                       page->index = 1;
+               page->mapping = (void *)PAGE_MAPPING_DAX_COW;
+       }
+       page->index++;
+}
+
+/*
+ * When it is called in dax_insert_entry(), the cow flag will indicate that
+ * whether this entry is shared by multiple files.  If so, set the page->mapping
+ * FS_DAX_MAPPING_COW, and use page->index as refcount.
  */
 static void dax_associate_entry(void *entry, struct address_space *mapping,
-               struct vm_area_struct *vma, unsigned long address)
+               struct vm_area_struct *vma, unsigned long address, bool cow)
 {
        unsigned long size = dax_entry_size(entry), pfn, index;
        int i = 0;
        for_each_mapped_pfn(entry, pfn) {
                struct page *page = pfn_to_page(pfn);
 
-               WARN_ON_ONCE(page->mapping);
-               page->mapping = mapping;
-               page->index = index + i++;
+               if (cow) {
+                       dax_mapping_set_cow(page);
+               } else {
+                       WARN_ON_ONCE(page->mapping);
+                       page->mapping = mapping;
+                       page->index = index + i++;
+               }
        }
 }
 
                struct page *page = pfn_to_page(pfn);
 
                WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
-               WARN_ON_ONCE(page->mapping && page->mapping != mapping);
+               if (dax_mapping_is_cow(page->mapping)) {
+                       /* keep the CoW flag if this page is still shared */
+                       if (page->index-- > 0)
+                               continue;
+               } else
+                       WARN_ON_ONCE(page->mapping && page->mapping != mapping);
                page->mapping = NULL;
                page->index = 0;
        }
                void *old;
 
                dax_disassociate_entry(entry, mapping, false);
-               dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
+               dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address,
+                               false);
                /*
                 * Only swap our new entry into the page cache if the current
                 * entry is a zero page or an empty entry.  If a normal PTE or