From: Carlos Llamas Date: Tue, 10 Dec 2024 14:31:05 +0000 (+0000) Subject: binder: use per-vma lock in page reclaiming X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=95bc2d4a9020efcd7858c91e68e9f4e842e3e8c8;p=users%2Fdwmw2%2Flinux.git binder: use per-vma lock in page reclaiming Use per-vma locking in the shrinker's callback when reclaiming pages, similar to the page installation logic. This minimizes contention with unrelated vmas improving performance. The mmap_sem is still acquired if the per-vma lock cannot be obtained. Cc: Suren Baghdasaryan Suggested-by: Liam R. Howlett Reviewed-by: Suren Baghdasaryan Signed-off-by: Carlos Llamas Link: https://lore.kernel.org/r/20241210143114.661252-10-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman --- diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index b2b97ff19ba2d..fcfaf1b899c8f 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -1143,19 +1143,28 @@ enum lru_status binder_alloc_free_page(struct list_head *item, struct vm_area_struct *vma; struct page *page_to_free; unsigned long page_addr; + int mm_locked = 0; size_t index; if (!mmget_not_zero(mm)) goto err_mmget; - if (!mmap_read_trylock(mm)) - goto err_mmap_read_lock_failed; - if (!mutex_trylock(&alloc->mutex)) - goto err_get_alloc_mutex_failed; index = mdata->page_index; page_addr = alloc->vm_start + index * PAGE_SIZE; - vma = vma_lookup(mm, page_addr); + /* attempt per-vma lock first */ + vma = lock_vma_under_rcu(mm, page_addr); + if (!vma) { + /* fall back to mmap_lock */ + if (!mmap_read_trylock(mm)) + goto err_mmap_read_lock_failed; + mm_locked = 1; + vma = vma_lookup(mm, page_addr); + } + + if (!mutex_trylock(&alloc->mutex)) + goto err_get_alloc_mutex_failed; + /* * Since a binder_alloc can only be mapped once, we ensure * the vma corresponds to this mapping by checking whether @@ -1183,7 +1192,10 @@ enum lru_status binder_alloc_free_page(struct list_head *item, } mutex_unlock(&alloc->mutex); - mmap_read_unlock(mm); + if (mm_locked) + mmap_read_unlock(mm); + else + vma_end_read(vma); mmput_async(mm); binder_free_page(page_to_free); @@ -1192,7 +1204,10 @@ enum lru_status binder_alloc_free_page(struct list_head *item, err_invalid_vma: mutex_unlock(&alloc->mutex); err_get_alloc_mutex_failed: - mmap_read_unlock(mm); + if (mm_locked) + mmap_read_unlock(mm); + else + vma_end_read(vma); err_mmap_read_lock_failed: mmput_async(mm); err_mmget: