}
 
 static inline void
-binder_set_installed_page(struct binder_lru_page *lru_page,
+binder_set_installed_page(struct binder_alloc *alloc,
+                         unsigned long index,
                          struct page *page)
 {
        /* Pairs with acquire in binder_get_installed_page() */
-       smp_store_release(&lru_page->page_ptr, page);
+       smp_store_release(&alloc->pages[index], page);
 }
 
 static inline struct page *
-binder_get_installed_page(struct binder_lru_page *lru_page)
+binder_get_installed_page(struct binder_alloc *alloc, unsigned long index)
 {
        /* Pairs with release in binder_set_installed_page() */
-       return smp_load_acquire(&lru_page->page_ptr);
+       return smp_load_acquire(&alloc->pages[index]);
 }
 
 static void binder_lru_freelist_add(struct binder_alloc *alloc,
                                    unsigned long start, unsigned long end)
 {
-       struct binder_lru_page *page;
        unsigned long page_addr;
+       struct page *page;
 
        trace_binder_update_page_range(alloc, false, start, end);
 
                int ret;
 
                index = (page_addr - alloc->buffer) / PAGE_SIZE;
-               page = &alloc->pages[index];
-
-               if (!binder_get_installed_page(page))
+               page = binder_get_installed_page(alloc, index);
+               if (!page)
                        continue;
 
                trace_binder_free_lru_start(alloc, index);
 
                ret = list_lru_add(&binder_freelist,
-                                  &page->lru,
-                                  page_to_nid(page->page_ptr),
+                                  page_to_lru(page),
+                                  page_to_nid(page),
                                   NULL);
                WARN_ON(!ret);
 
        }
 }
 
+static struct page *binder_page_alloc(struct binder_alloc *alloc,
+                                     unsigned long index)
+{
+       struct binder_shrinker_mdata *mdata;
+       struct page *page;
+
+       page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+       if (!page)
+               return NULL;
+
+       /* allocate and install shrinker metadata under page->private */
+       mdata = kzalloc(sizeof(*mdata), GFP_KERNEL);
+       if (!mdata) {
+               __free_page(page);
+               return NULL;
+       }
+
+       mdata->alloc = alloc;
+       mdata->page_index = index;
+       INIT_LIST_HEAD(&mdata->lru);
+       set_page_private(page, (unsigned long)mdata);
+
+       return page;
+}
+
+static void binder_free_page(struct page *page)
+{
+       kfree((struct binder_shrinker_mdata *)page_private(page));
+       __free_page(page);
+}
+
 static int binder_install_single_page(struct binder_alloc *alloc,
-                                     struct binder_lru_page *lru_page,
+                                     unsigned long index,
                                      unsigned long addr)
 {
        struct vm_area_struct *vma;
        if (!mmget_not_zero(alloc->mm))
                return -ESRCH;
 
-       page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+       page = binder_page_alloc(alloc, index);
        if (!page) {
-               pr_err("%d: failed to allocate page\n", alloc->pid);
                ret = -ENOMEM;
                goto out;
        }
        mmap_read_lock(alloc->mm);
        vma = vma_lookup(alloc->mm, addr);
        if (!vma || vma != alloc->vma) {
-               __free_page(page);
+               binder_free_page(page);
                pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
                ret = -ESRCH;
                goto unlock;
        case -EBUSY:
                /*
                 * EBUSY is ok. Someone installed the pte first but the
-                * lru_page->page_ptr has not been updated yet. Discard
+                * alloc->pages[index] has not been updated yet. Discard
                 * our page and look up the one already installed.
                 */
                ret = 0;
-               __free_page(page);
+               binder_free_page(page);
                npages = get_user_pages_remote(alloc->mm, addr, 1,
                                               FOLL_NOFAULT, &page, NULL);
                if (npages <= 0) {
                fallthrough;
        case 0:
                /* Mark page installation complete and safe to use */
-               binder_set_installed_page(lru_page, page);
+               binder_set_installed_page(alloc, index, page);
                break;
        default:
-               __free_page(page);
+               binder_free_page(page);
                pr_err("%d: %s failed to insert page at offset %lx with %d\n",
                       alloc->pid, __func__, addr - alloc->buffer, ret);
                ret = -ENOMEM;
                                       struct binder_buffer *buffer,
                                       size_t size)
 {
-       struct binder_lru_page *page;
        unsigned long start, final;
        unsigned long page_addr;
 
                int ret;
 
                index = (page_addr - alloc->buffer) / PAGE_SIZE;
-               page = &alloc->pages[index];
-
-               if (binder_get_installed_page(page))
+               if (binder_get_installed_page(alloc, index))
                        continue;
 
                trace_binder_alloc_page_start(alloc, index);
 
-               ret = binder_install_single_page(alloc, page, page_addr);
+               ret = binder_install_single_page(alloc, index, page_addr);
                if (ret)
                        return ret;
 
 static void binder_lru_freelist_del(struct binder_alloc *alloc,
                                    unsigned long start, unsigned long end)
 {
-       struct binder_lru_page *page;
        unsigned long page_addr;
+       struct page *page;
 
        trace_binder_update_page_range(alloc, true, start, end);
 
                bool on_lru;
 
                index = (page_addr - alloc->buffer) / PAGE_SIZE;
-               page = &alloc->pages[index];
+               page = binder_get_installed_page(alloc, index);
 
-               if (page->page_ptr) {
+               if (page) {
                        trace_binder_alloc_lru_start(alloc, index);
 
                        on_lru = list_lru_del(&binder_freelist,
-                                             &page->lru,
-                                             page_to_nid(page->page_ptr),
+                                             page_to_lru(page),
+                                             page_to_nid(page),
                                              NULL);
                        WARN_ON(!on_lru);
 
                (buffer->user_data - alloc->buffer);
        pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
        size_t index = buffer_space_offset >> PAGE_SHIFT;
-       struct binder_lru_page *lru_page;
 
-       lru_page = &alloc->pages[index];
        *pgoffp = pgoff;
-       return lru_page->page_ptr;
+
+       return alloc->pages[index];
 }
 
 /**
 {
        struct binder_buffer *buffer;
        const char *failure_string;
-       int ret, i;
+       int ret;
 
        if (unlikely(vma->vm_mm != alloc->mm)) {
                ret = -EINVAL;
        alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE,
                                sizeof(alloc->pages[0]),
                                GFP_KERNEL);
-       if (alloc->pages == NULL) {
+       if (!alloc->pages) {
                ret = -ENOMEM;
                failure_string = "alloc page array";
                goto err_alloc_pages_failed;
        }
 
-       for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
-               alloc->pages[i].alloc = alloc;
-               INIT_LIST_HEAD(&alloc->pages[i].lru);
-       }
-
        buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
        if (!buffer) {
                ret = -ENOMEM;
                int i;
 
                for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+                       struct page *page;
                        bool on_lru;
 
-                       if (!alloc->pages[i].page_ptr)
+                       page = binder_get_installed_page(alloc, i);
+                       if (!page)
                                continue;
 
                        on_lru = list_lru_del(&binder_freelist,
-                                             &alloc->pages[i].lru,
-                                             page_to_nid(alloc->pages[i].page_ptr),
+                                             page_to_lru(page),
+                                             page_to_nid(page),
                                              NULL);
                        binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
                                     "%s: %d: page %d %s\n",
                                     __func__, alloc->pid, i,
                                     on_lru ? "on lru" : "active");
-                       __free_page(alloc->pages[i].page_ptr);
+                       binder_free_page(page);
                        page_count++;
                }
        }
 void binder_alloc_print_pages(struct seq_file *m,
                              struct binder_alloc *alloc)
 {
-       struct binder_lru_page *page;
+       struct page *page;
        int i;
        int active = 0;
        int lru = 0;
         */
        if (binder_alloc_get_vma(alloc) != NULL) {
                for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
-                       page = &alloc->pages[i];
-                       if (!page->page_ptr)
+                       page = binder_get_installed_page(alloc, i);
+                       if (!page)
                                free++;
-                       else if (list_empty(&page->lru))
+                       else if (list_empty(page_to_lru(page)))
                                active++;
                        else
                                lru++;
                                       void *cb_arg)
        __must_hold(&lru->lock)
 {
-       struct binder_lru_page *page = container_of(item, typeof(*page), lru);
-       struct binder_alloc *alloc = page->alloc;
+       struct binder_shrinker_mdata *mdata = container_of(item, typeof(*mdata), lru);
+       struct binder_alloc *alloc = mdata->alloc;
        struct mm_struct *mm = alloc->mm;
        struct vm_area_struct *vma;
        struct page *page_to_free;
                goto err_mmap_read_lock_failed;
        if (!mutex_trylock(&alloc->mutex))
                goto err_get_alloc_mutex_failed;
-       if (!page->page_ptr)
-               goto err_page_already_freed;
 
-       index = page - alloc->pages;
+       index = mdata->page_index;
        page_addr = alloc->buffer + index * PAGE_SIZE;
 
        vma = vma_lookup(mm, page_addr);
 
        trace_binder_unmap_kernel_start(alloc, index);
 
-       page_to_free = page->page_ptr;
-       page->page_ptr = NULL;
+       page_to_free = alloc->pages[index];
+       binder_set_installed_page(alloc, index, NULL);
 
        trace_binder_unmap_kernel_end(alloc, index);
 
        mutex_unlock(&alloc->mutex);
        mmap_read_unlock(mm);
        mmput_async(mm);
-       __free_page(page_to_free);
+       binder_free_page(page_to_free);
 
        return LRU_REMOVED_RETRY;
 
 err_invalid_vma:
-err_page_already_freed:
        mutex_unlock(&alloc->mutex);
 err_get_alloc_mutex_failed:
        mmap_read_unlock(mm);