]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
binder: use per-vma lock in page installation
authorCarlos Llamas <cmllamas@google.com>
Tue, 10 Dec 2024 14:31:03 +0000 (14:31 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 24 Dec 2024 08:35:23 +0000 (09:35 +0100)
Use per-vma locking for concurrent page installations, this minimizes
contention with unrelated vmas improving performance. The mmap_lock is
still acquired when needed though, e.g. before get_user_pages_remote().

Many thanks to Barry Song who posted a similar approach [1].

Link: https://lore.kernel.org/all/20240902225009.34576-1-21cnbao@gmail.com/
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Barry Song <v-songbaohua@oppo.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Hillf Danton <hdanton@sina.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Carlos Llamas <cmllamas@google.com>
Link: https://lore.kernel.org/r/20241210143114.661252-8-cmllamas@google.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/android/binder_alloc.c

index 9cb47e1bc6bec1de3d15463bfa8e7ab22af4ca50..f86bd6ded4f465a1fcc084b32cc58efa3fa456a6 100644 (file)
@@ -233,6 +233,53 @@ static inline bool binder_alloc_is_mapped(struct binder_alloc *alloc)
        return smp_load_acquire(&alloc->mapped);
 }
 
+static struct page *binder_page_lookup(struct binder_alloc *alloc,
+                                      unsigned long addr)
+{
+       struct mm_struct *mm = alloc->mm;
+       struct page *page;
+       long npages = 0;
+
+       /*
+        * Find an existing page in the remote mm. If missing,
+        * don't attempt to fault-in just propagate an error.
+        */
+       mmap_read_lock(mm);
+       if (binder_alloc_is_mapped(alloc))
+               npages = get_user_pages_remote(mm, addr, 1, FOLL_NOFAULT,
+                                              &page, NULL);
+       mmap_read_unlock(mm);
+
+       return npages > 0 ? page : NULL;
+}
+
+static int binder_page_insert(struct binder_alloc *alloc,
+                             unsigned long addr,
+                             struct page *page)
+{
+       struct mm_struct *mm = alloc->mm;
+       struct vm_area_struct *vma;
+       int ret = -ESRCH;
+
+       /* attempt per-vma lock first */
+       vma = lock_vma_under_rcu(mm, addr);
+       if (vma) {
+               if (binder_alloc_is_mapped(alloc))
+                       ret = vm_insert_page(vma, addr, page);
+               vma_end_read(vma);
+               return ret;
+       }
+
+       /* fall back to mmap_lock */
+       mmap_read_lock(mm);
+       vma = vma_lookup(mm, addr);
+       if (vma && binder_alloc_is_mapped(alloc))
+               ret = vm_insert_page(vma, addr, page);
+       mmap_read_unlock(mm);
+
+       return ret;
+}
+
 static struct page *binder_page_alloc(struct binder_alloc *alloc,
                                      unsigned long index)
 {
@@ -268,9 +315,7 @@ static int binder_install_single_page(struct binder_alloc *alloc,
                                      unsigned long index,
                                      unsigned long addr)
 {
-       struct vm_area_struct *vma;
        struct page *page;
-       long npages;
        int ret;
 
        if (!mmget_not_zero(alloc->mm))
@@ -282,16 +327,7 @@ static int binder_install_single_page(struct binder_alloc *alloc,
                goto out;
        }
 
-       mmap_read_lock(alloc->mm);
-       vma = vma_lookup(alloc->mm, addr);
-       if (!vma || !binder_alloc_is_mapped(alloc)) {
-               binder_free_page(page);
-               pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
-               ret = -ESRCH;
-               goto unlock;
-       }
-
-       ret = vm_insert_page(vma, addr, page);
+       ret = binder_page_insert(alloc, addr, page);
        switch (ret) {
        case -EBUSY:
                /*
@@ -301,9 +337,8 @@ static int binder_install_single_page(struct binder_alloc *alloc,
                 */
                ret = 0;
                binder_free_page(page);
-               npages = get_user_pages_remote(alloc->mm, addr, 1,
-                                              FOLL_NOFAULT, &page, NULL);
-               if (npages <= 0) {
+               page = binder_page_lookup(alloc, addr);
+               if (!page) {
                        pr_err("%d: failed to find page at offset %lx\n",
                               alloc->pid, addr - alloc->vm_start);
                        ret = -ESRCH;
@@ -321,8 +356,6 @@ static int binder_install_single_page(struct binder_alloc *alloc,
                ret = -ENOMEM;
                break;
        }
-unlock:
-       mmap_read_unlock(alloc->mm);
 out:
        mmput_async(alloc->mm);
        return ret;