]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
userfaultfd/shmem: support minor fault registration for shmem
authorAxel Rasmussen <axelrasmussen@google.com>
Wed, 2 Jun 2021 03:52:50 +0000 (13:52 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 2 Jun 2021 03:52:50 +0000 (13:52 +1000)
This patch allows shmem-backed VMAs to be registered for minor faults.
Minor faults are appropriately relayed to userspace in the fault path, for
VMAs with the relevant flag.

This commit doesn't hook up the UFFDIO_CONTINUE ioctl for shmem-backed
minor faults, though, so userspace doesn't yet have a way to resolve such
faults.

Because of this, we also don't yet advertise this as a supported feature.
That will be done in a separate commit when the feature is fully
implemented.

Link: https://lkml.kernel.org/r/20210503180737.2487560-4-axelrasmussen@google.com
Signed-off-by: Axel Rasmussen <axelrasmussen@google.com>
Acked-by: Peter Xu <peterx@redhat.com>
Acked-by: Hugh Dickins <hughd@google.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Brian Geffon <bgeffon@google.com>
Cc: "Dr . David Alan Gilbert" <dgilbert@redhat.com>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: Joe Perches <joe@perches.com>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Lokesh Gidra <lokeshgidra@google.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Oliver Upton <oupton@google.com>
Cc: Shaohua Li <shli@fb.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Wang Qing <wangqing@vivo.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
fs/userfaultfd.c
mm/memory.c
mm/shmem.c

index 5dd78238cc156e3fa503ec436a0097beb9c1f418..82ef253d66b68356e3dd2ec27bac21ef56fc1420 100644 (file)
@@ -1267,8 +1267,7 @@ static inline bool vma_can_userfault(struct vm_area_struct *vma,
        }
 
        if (vm_flags & VM_UFFD_MINOR) {
-               /* FIXME: Add minor fault interception for shmem. */
-               if (!is_vm_hugetlb_page(vma))
+               if (!(is_vm_hugetlb_page(vma) || vma_is_shmem(vma)))
                        return false;
        }
 
index ae26cca411872a528c121293fafddad0a88a1968..39e247b2a059fffd6c05ae8255f7cb8a39795e3f 100644 (file)
@@ -4110,9 +4110,11 @@ static vm_fault_t do_read_fault(struct vm_fault *vmf)
         * something).
         */
        if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
-               ret = do_fault_around(vmf);
-               if (ret)
-                       return ret;
+               if (likely(!userfaultfd_minor(vmf->vma))) {
+                       ret = do_fault_around(vmf);
+                       if (ret)
+                               return ret;
+               }
        }
 
        ret = __do_fault(vmf);
index 10a97087267f18bd011a10cf28c19ef65037442e..e1d2e9c42a11e07989bd0e916f63d5fe89212512 100644 (file)
@@ -1797,7 +1797,7 @@ unlock:
  * vm. If we swap it in we mark it dirty since we also free the swap
  * entry since a page cannot live in both the swap and page cache.
  *
- * vmf and fault_type are only supplied by shmem_fault:
+ * vma, vmf, and fault_type are only supplied by shmem_fault:
  * otherwise they are NULL.
  */
 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
@@ -1832,6 +1832,16 @@ repeat:
 
        page = pagecache_get_page(mapping, index,
                                        FGP_ENTRY | FGP_HEAD | FGP_LOCK, 0);
+
+       if (page && vma && userfaultfd_minor(vma)) {
+               if (!xa_is_value(page)) {
+                       unlock_page(page);
+                       put_page(page);
+               }
+               *fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
+               return 0;
+       }
+
        if (xa_is_value(page)) {
                error = shmem_swapin_page(inode, index, &page,
                                          sgp, gfp, vma, fault_type);