]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
binder: rename alloc->buffer to vm_start
authorCarlos Llamas <cmllamas@google.com>
Tue, 10 Dec 2024 14:31:02 +0000 (14:31 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 24 Dec 2024 08:35:23 +0000 (09:35 +0100)
The alloc->buffer field in struct binder_alloc stores the starting
address of the mapped vma, rename this field to alloc->vm_start to
better reflect its purpose. It also avoids confusion with the binder
buffer concept, e.g. transaction->buffer.

No functional changes in this patch.

Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Carlos Llamas <cmllamas@google.com>
Link: https://lore.kernel.org/r/20241210143114.661252-7-cmllamas@google.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/android/binder.c
drivers/android/binder_alloc.c
drivers/android/binder_alloc.h
drivers/android/binder_alloc_selftest.c
drivers/android/binder_trace.h

index ef353ca13c356acac42b08e4e2fed41a360ccb4f..9962c606cabd6615b9bb4612e994a83154a12aad 100644 (file)
@@ -6374,7 +6374,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
                seq_printf(m, " node %d", buffer->target_node->debug_id);
        seq_printf(m, " size %zd:%zd offset %lx\n",
                   buffer->data_size, buffer->offsets_size,
-                  proc->alloc.buffer - buffer->user_data);
+                  proc->alloc.vm_start - buffer->user_data);
 }
 
 static void print_binder_work_ilocked(struct seq_file *m,
index ed79d7c146c8bb6fa4b4f335165295cbdf8235d4..9cb47e1bc6bec1de3d15463bfa8e7ab22af4ca50 100644 (file)
@@ -61,7 +61,7 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
                                       struct binder_buffer *buffer)
 {
        if (list_is_last(&buffer->entry, &alloc->buffers))
-               return alloc->buffer + alloc->buffer_size - buffer->user_data;
+               return alloc->vm_start + alloc->buffer_size - buffer->user_data;
        return binder_buffer_next(buffer)->user_data - buffer->user_data;
 }
 
@@ -203,7 +203,7 @@ static void binder_lru_freelist_add(struct binder_alloc *alloc,
                size_t index;
                int ret;
 
-               index = (page_addr - alloc->buffer) / PAGE_SIZE;
+               index = (page_addr - alloc->vm_start) / PAGE_SIZE;
                page = binder_get_installed_page(alloc, index);
                if (!page)
                        continue;
@@ -305,7 +305,7 @@ static int binder_install_single_page(struct binder_alloc *alloc,
                                               FOLL_NOFAULT, &page, NULL);
                if (npages <= 0) {
                        pr_err("%d: failed to find page at offset %lx\n",
-                              alloc->pid, addr - alloc->buffer);
+                              alloc->pid, addr - alloc->vm_start);
                        ret = -ESRCH;
                        break;
                }
@@ -317,7 +317,7 @@ static int binder_install_single_page(struct binder_alloc *alloc,
        default:
                binder_free_page(page);
                pr_err("%d: %s failed to insert page at offset %lx with %d\n",
-                      alloc->pid, __func__, addr - alloc->buffer, ret);
+                      alloc->pid, __func__, addr - alloc->vm_start, ret);
                ret = -ENOMEM;
                break;
        }
@@ -342,7 +342,7 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
                unsigned long index;
                int ret;
 
-               index = (page_addr - alloc->buffer) / PAGE_SIZE;
+               index = (page_addr - alloc->vm_start) / PAGE_SIZE;
                if (binder_get_installed_page(alloc, index))
                        continue;
 
@@ -371,7 +371,7 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc,
                unsigned long index;
                bool on_lru;
 
-               index = (page_addr - alloc->buffer) / PAGE_SIZE;
+               index = (page_addr - alloc->vm_start) / PAGE_SIZE;
                page = binder_get_installed_page(alloc, index);
 
                if (page) {
@@ -723,8 +723,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
        BUG_ON(buffer->free);
        BUG_ON(size > buffer_size);
        BUG_ON(buffer->transaction != NULL);
-       BUG_ON(buffer->user_data < alloc->buffer);
-       BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
+       BUG_ON(buffer->user_data < alloc->vm_start);
+       BUG_ON(buffer->user_data > alloc->vm_start + alloc->buffer_size);
 
        if (buffer->async_transaction) {
                alloc->free_async_space += buffer_size;
@@ -783,7 +783,7 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
                                          pgoff_t *pgoffp)
 {
        binder_size_t buffer_space_offset = buffer_offset +
-               (buffer->user_data - alloc->buffer);
+               (buffer->user_data - alloc->vm_start);
        pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
        size_t index = buffer_space_offset >> PAGE_SHIFT;
 
@@ -882,7 +882,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
                                   SZ_4M);
        mutex_unlock(&binder_alloc_mmap_lock);
 
-       alloc->buffer = vma->vm_start;
+       alloc->vm_start = vma->vm_start;
 
        alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE,
                                sizeof(alloc->pages[0]),
@@ -900,7 +900,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
                goto err_alloc_buf_struct_failed;
        }
 
-       buffer->user_data = alloc->buffer;
+       buffer->user_data = alloc->vm_start;
        list_add(&buffer->entry, &alloc->buffers);
        buffer->free = 1;
        binder_insert_free_buffer(alloc, buffer);
@@ -915,7 +915,7 @@ err_alloc_buf_struct_failed:
        kvfree(alloc->pages);
        alloc->pages = NULL;
 err_alloc_pages_failed:
-       alloc->buffer = 0;
+       alloc->vm_start = 0;
        mutex_lock(&binder_alloc_mmap_lock);
        alloc->buffer_size = 0;
 err_already_mapped:
@@ -1016,7 +1016,7 @@ void binder_alloc_print_allocated(struct seq_file *m,
                buffer = rb_entry(n, struct binder_buffer, rb_node);
                seq_printf(m, "  buffer %d: %lx size %zd:%zd:%zd %s\n",
                           buffer->debug_id,
-                          buffer->user_data - alloc->buffer,
+                          buffer->user_data - alloc->vm_start,
                           buffer->data_size, buffer->offsets_size,
                           buffer->extra_buffers_size,
                           buffer->transaction ? "active" : "delivered");
@@ -1121,7 +1121,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
                goto err_get_alloc_mutex_failed;
 
        index = mdata->page_index;
-       page_addr = alloc->buffer + index * PAGE_SIZE;
+       page_addr = alloc->vm_start + index * PAGE_SIZE;
 
        vma = vma_lookup(mm, page_addr);
        /*
index 3ebb12afd4de43b833be7e3f775f3d18fe007f65..feecd741424101df3f8901d473c2c03146923a0a 100644 (file)
@@ -83,7 +83,7 @@ static inline struct list_head *page_to_lru(struct page *p)
  * struct binder_alloc - per-binder proc state for binder allocator
  * @mutex:              protects binder_alloc fields
  * @mm:                 copy of task->mm (invariant after open)
- * @buffer:             base of per-proc address space mapped via mmap
+ * @vm_start:           base of per-proc address space mapped via mmap
  * @buffers:            list of all buffers for this proc
  * @free_buffers:       rb tree of buffers available for allocation
  *                      sorted by size
@@ -107,7 +107,7 @@ static inline struct list_head *page_to_lru(struct page *p)
 struct binder_alloc {
        struct mutex mutex;
        struct mm_struct *mm;
-       unsigned long buffer;
+       unsigned long vm_start;
        struct list_head buffers;
        struct rb_root free_buffers;
        struct rb_root allocated_buffers;
index 6a64847a8555f0ba74f46a8f2a3eb7076516d2fa..c88735c548485466f485dd9d66501899a46a7731 100644 (file)
@@ -104,7 +104,7 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
        end = PAGE_ALIGN(buffer->user_data + size);
        page_addr = buffer->user_data;
        for (; page_addr < end; page_addr += PAGE_SIZE) {
-               page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
+               page_index = (page_addr - alloc->vm_start) / PAGE_SIZE;
                if (!alloc->pages[page_index] ||
                    !list_empty(page_to_lru(alloc->pages[page_index]))) {
                        pr_err("expect alloc but is %s at page index %d\n",
index fe38c6fc65d0f89ab7ab6e709fd93e20114034cf..16de1b9e72f76167272e0989674b225ab488dc51 100644 (file)
@@ -328,7 +328,7 @@ TRACE_EVENT(binder_update_page_range,
        TP_fast_assign(
                __entry->proc = alloc->pid;
                __entry->allocate = allocate;
-               __entry->offset = start - alloc->buffer;
+               __entry->offset = start - alloc->vm_start;
                __entry->size = end - start;
        ),
        TP_printk("proc=%d allocate=%d offset=%zu size=%zu",