]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
binder: replace alloc->vma with alloc->mapped
authorCarlos Llamas <cmllamas@google.com>
Tue, 10 Dec 2024 14:31:01 +0000 (14:31 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 24 Dec 2024 08:35:23 +0000 (09:35 +0100)
It is unsafe to use alloc->vma outside of the mmap_sem. Instead, add a
new boolean alloc->mapped to save the vma state (mapped or unmmaped) and
use this as a replacement for alloc->vma to validate several paths.

Using the alloc->vma caused several performance and security issues in
the past. Now that it has been replaced with either vm_lookup() or the
alloc->mapped state, we can finally remove it.

Cc: Minchan Kim <minchan@kernel.org>
Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Carlos Llamas <cmllamas@google.com>
Link: https://lore.kernel.org/r/20241210143114.661252-6-cmllamas@google.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/android/binder_alloc.c
drivers/android/binder_alloc.h
drivers/android/binder_alloc_selftest.c

index 3e30ac5b4861d8fc3fa6dbe168ed2258aae0a9ba..ed79d7c146c8bb6fa4b4f335165295cbdf8235d4 100644 (file)
@@ -220,6 +220,19 @@ static void binder_lru_freelist_add(struct binder_alloc *alloc,
        }
 }
 
+static inline
+void binder_alloc_set_mapped(struct binder_alloc *alloc, bool state)
+{
+       /* pairs with smp_load_acquire in binder_alloc_is_mapped() */
+       smp_store_release(&alloc->mapped, state);
+}
+
+static inline bool binder_alloc_is_mapped(struct binder_alloc *alloc)
+{
+       /* pairs with smp_store_release in binder_alloc_set_mapped() */
+       return smp_load_acquire(&alloc->mapped);
+}
+
 static struct page *binder_page_alloc(struct binder_alloc *alloc,
                                      unsigned long index)
 {
@@ -271,7 +284,7 @@ static int binder_install_single_page(struct binder_alloc *alloc,
 
        mmap_read_lock(alloc->mm);
        vma = vma_lookup(alloc->mm, addr);
-       if (!vma || vma != alloc->vma) {
+       if (!vma || !binder_alloc_is_mapped(alloc)) {
                binder_free_page(page);
                pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
                ret = -ESRCH;
@@ -379,20 +392,6 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc,
        }
 }
 
-static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
-               struct vm_area_struct *vma)
-{
-       /* pairs with smp_load_acquire in binder_alloc_get_vma() */
-       smp_store_release(&alloc->vma, vma);
-}
-
-static inline struct vm_area_struct *binder_alloc_get_vma(
-               struct binder_alloc *alloc)
-{
-       /* pairs with smp_store_release in binder_alloc_set_vma() */
-       return smp_load_acquire(&alloc->vma);
-}
-
 static void debug_no_space_locked(struct binder_alloc *alloc)
 {
        size_t largest_alloc_size = 0;
@@ -626,7 +625,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
        int ret;
 
        /* Check binder_alloc is fully initialized */
-       if (!binder_alloc_get_vma(alloc)) {
+       if (!binder_alloc_is_mapped(alloc)) {
                binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
                                   "%d: binder_alloc_buf, no vma\n",
                                   alloc->pid);
@@ -908,7 +907,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
        alloc->free_async_space = alloc->buffer_size / 2;
 
        /* Signal binder_alloc is fully initialized */
-       binder_alloc_set_vma(alloc, vma);
+       binder_alloc_set_mapped(alloc, true);
 
        return 0;
 
@@ -938,7 +937,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
 
        buffers = 0;
        mutex_lock(&alloc->mutex);
-       BUG_ON(alloc->vma);
+       BUG_ON(alloc->mapped);
 
        while ((n = rb_first(&alloc->allocated_buffers))) {
                buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -1044,7 +1043,7 @@ void binder_alloc_print_pages(struct seq_file *m,
         * Make sure the binder_alloc is fully initialized, otherwise we might
         * read inconsistent state.
         */
-       if (binder_alloc_get_vma(alloc) != NULL) {
+       if (binder_alloc_is_mapped(alloc)) {
                for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
                        page = binder_get_installed_page(alloc, i);
                        if (!page)
@@ -1084,12 +1083,12 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
  * @alloc: binder_alloc for this proc
  *
  * Called from binder_vma_close() when releasing address space.
- * Clears alloc->vma to prevent new incoming transactions from
+ * Clears alloc->mapped to prevent new incoming transactions from
  * allocating more buffers.
  */
 void binder_alloc_vma_close(struct binder_alloc *alloc)
 {
-       binder_alloc_set_vma(alloc, NULL);
+       binder_alloc_set_mapped(alloc, false);
 }
 
 /**
@@ -1125,7 +1124,12 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
        page_addr = alloc->buffer + index * PAGE_SIZE;
 
        vma = vma_lookup(mm, page_addr);
-       if (vma && vma != binder_alloc_get_vma(alloc))
+       /*
+        * Since a binder_alloc can only be mapped once, we ensure
+        * the vma corresponds to this mapping by checking whether
+        * the binder_alloc is still mapped.
+        */
+       if (vma && !binder_alloc_is_mapped(alloc))
                goto err_invalid_vma;
 
        trace_binder_unmap_kernel_start(alloc, index);
index d71f99189ef5c2dc0adc856b90c5abf98cf284f9..3ebb12afd4de43b833be7e3f775f3d18fe007f65 100644 (file)
@@ -82,8 +82,6 @@ static inline struct list_head *page_to_lru(struct page *p)
 /**
  * struct binder_alloc - per-binder proc state for binder allocator
  * @mutex:              protects binder_alloc fields
- * @vma:                vm_area_struct passed to mmap_handler
- *                      (invariant after mmap)
  * @mm:                 copy of task->mm (invariant after open)
  * @buffer:             base of per-proc address space mapped via mmap
  * @buffers:            list of all buffers for this proc
@@ -96,6 +94,8 @@ static inline struct list_head *page_to_lru(struct page *p)
  * @buffer_size:        size of address space specified via mmap
  * @pid:                pid for associated binder_proc (invariant after init)
  * @pages_high:         high watermark of offset in @pages
+ * @mapped:             whether the vm area is mapped, each binder instance is
+ *                      allowed a single mapping throughout its lifetime
  * @oneway_spam_detected: %true if oneway spam detection fired, clear that
  * flag once the async buffer has returned to a healthy state
  *
@@ -106,7 +106,6 @@ static inline struct list_head *page_to_lru(struct page *p)
  */
 struct binder_alloc {
        struct mutex mutex;
-       struct vm_area_struct *vma;
        struct mm_struct *mm;
        unsigned long buffer;
        struct list_head buffers;
@@ -117,6 +116,7 @@ struct binder_alloc {
        size_t buffer_size;
        int pid;
        size_t pages_high;
+       bool mapped;
        bool oneway_spam_detected;
 };
 
index a4c650843beecf9754f8ee10c46acb0b03a417e5..6a64847a8555f0ba74f46a8f2a3eb7076516d2fa 100644 (file)
@@ -291,7 +291,7 @@ void binder_selftest_alloc(struct binder_alloc *alloc)
        if (!binder_selftest_run)
                return;
        mutex_lock(&binder_selftest_lock);
-       if (!binder_selftest_run || !alloc->vma)
+       if (!binder_selftest_run || !alloc->mapped)
                goto done;
        pr_info("STARTED\n");
        binder_selftest_alloc_offset(alloc, end_offset, 0);