}
 EXPORT_SYMBOL_GPL(perf_event_update_userpage);
 
-static vm_fault_t perf_mmap_fault(struct vm_fault *vmf)
-{
-       struct perf_event *event = vmf->vma->vm_file->private_data;
-       struct perf_buffer *rb;
-       vm_fault_t ret = VM_FAULT_SIGBUS;
-
-       if (vmf->flags & FAULT_FLAG_MKWRITE) {
-               if (vmf->pgoff == 0)
-                       ret = 0;
-               return ret;
-       }
-
-       rcu_read_lock();
-       rb = rcu_dereference(event->rb);
-       if (!rb)
-               goto unlock;
-
-       if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
-               goto unlock;
-
-       vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
-       if (!vmf->page)
-               goto unlock;
-
-       get_page(vmf->page);
-       vmf->page->mapping = vmf->vma->vm_file->f_mapping;
-       vmf->page->index   = vmf->pgoff;
-
-       ret = 0;
-unlock:
-       rcu_read_unlock();
-
-       return ret;
-}
-
 static void ring_buffer_attach(struct perf_event *event,
                               struct perf_buffer *rb)
 {
        ring_buffer_put(rb); /* could be last */
 }
 
+static vm_fault_t perf_mmap_pfn_mkwrite(struct vm_fault *vmf)
+{
+       /* The first page is the user control page, others are read-only. */
+       return vmf->pgoff == 0 ? 0 : VM_FAULT_SIGBUS;
+}
+
 static const struct vm_operations_struct perf_mmap_vmops = {
        .open           = perf_mmap_open,
        .close          = perf_mmap_close, /* non mergeable */
-       .fault          = perf_mmap_fault,
-       .page_mkwrite   = perf_mmap_fault,
+       .pfn_mkwrite    = perf_mmap_pfn_mkwrite,
 };
 
+static int map_range(struct perf_buffer *rb, struct vm_area_struct *vma)
+{
+       unsigned long nr_pages = vma_pages(vma);
+       int err = 0;
+       unsigned long pagenum;
+
+       /*
+        * We map this as a VM_PFNMAP VMA.
+        *
+        * This is not ideal as this is designed broadly for mappings of PFNs
+        * referencing memory-mapped I/O ranges or non-system RAM i.e. for which
+        * !pfn_valid(pfn).
+        *
+        * We are mapping kernel-allocated memory (memory we manage ourselves)
+        * which would more ideally be mapped using vm_insert_page() or a
+        * similar mechanism, that is as a VM_MIXEDMAP mapping.
+        *
+        * However this won't work here, because:
+        *
+        * 1. It uses vma->vm_page_prot, but this field has not been completely
+        *    setup at the point of the f_op->mmp() hook, so we are unable to
+        *    indicate that this should be mapped CoW in order that the
+        *    mkwrite() hook can be invoked to make the first page R/W and the
+        *    rest R/O as desired.
+        *
+        * 2. Anything other than a VM_PFNMAP of valid PFNs will result in
+        *    vm_normal_page() returning a struct page * pointer, which means
+        *    vm_ops->page_mkwrite() will be invoked rather than
+        *    vm_ops->pfn_mkwrite(), and this means we have to set page->mapping
+        *    to work around retry logic in the fault handler, however this
+        *    field is no longer allowed to be used within struct page.
+        *
+        * 3. Having a struct page * made available in the fault logic also
+        *    means that the page gets put on the rmap and becomes
+        *    inappropriately accessible and subject to map and ref counting.
+        *
+        * Ideally we would have a mechanism that could explicitly express our
+        * desires, but this is not currently the case, so we instead use
+        * VM_PFNMAP.
+        *
+        * We manage the lifetime of these mappings with internal refcounts (see
+        * perf_mmap_open() and perf_mmap_close()) so we ensure the lifetime of
+        * this mapping is maintained correctly.
+        */
+       for (pagenum = 0; pagenum < nr_pages; pagenum++) {
+               unsigned long va = vma->vm_start + PAGE_SIZE * pagenum;
+               struct page *page = perf_mmap_to_page(rb, vma->vm_pgoff + pagenum);
+
+               if (page == NULL) {
+                       err = -EINVAL;
+                       break;
+               }
+
+               /* Map readonly, perf_mmap_pfn_mkwrite() called on write fault. */
+               err = remap_pfn_range(vma, va, page_to_pfn(page), PAGE_SIZE,
+                                     vm_get_page_prot(vma->vm_flags & ~VM_SHARED));
+               if (err)
+                       break;
+       }
+
+#ifdef CONFIG_MMU
+       /* Clear any partial mappings on error. */
+       if (err)
+               zap_page_range_single(vma, vma->vm_start, nr_pages * PAGE_SIZE, NULL);
+#endif
+
+       return err;
+}
+
 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct perf_event *event = file->private_data;
                        goto again;
                }
 
+               /* We need the rb to map pages. */
+               rb = event->rb;
                goto unlock;
        }
 
        vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_ops = &perf_mmap_vmops;
 
+       if (!ret)
+               ret = map_range(rb, vma);
+
        if (event->pmu->event_mapped)
                event->pmu->event_mapped(event, vma->vm_mm);