area->nr_pages = npages;
        area->pages = pages;
 
-       if (map_vm_area(area, prot_rwx, &pages)) {
+       if (map_vm_area(area, prot_rwx, pages)) {
                vunmap(area->addr);
                goto error;
        }
 
 static __init int map_switcher(void)
 {
        int i, err;
-       struct page **pagep;
 
        /*
         * Map the Switcher in to high memory.
         * This code actually sets up the pages we've allocated to appear at
         * switcher_addr.  map_vm_area() takes the vma we allocated above, the
         * kind of pages we're mapping (kernel pages), and a pointer to our
-        * array of struct pages.  It increments that pointer, but we don't
-        * care.
+        * array of struct pages.
         */
-       pagep = lg_switcher_pages;
-       err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep);
+       err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, lg_switcher_pages);
        if (err) {
                printk("lguest: map_vm_area failed: %i\n", err);
                goto free_vma;
 
 
        for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
                int ret;
-               struct page **page_array_ptr;
 
                page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
 
                }
                tmp_area.addr = page_addr;
                tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
-               page_array_ptr = page;
-               ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
+               ret = map_vm_area(&tmp_area, PAGE_KERNEL, page);
                if (ret) {
                        pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
                               proc->pid, page_addr);
 
 extern struct vm_struct *find_vm_area(const void *addr);
 
 extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
-                       struct page ***pages);
+                       struct page **pages);
 #ifdef CONFIG_MMU
 extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
                                    pgprot_t prot, struct page **pages);
 
 }
 EXPORT_SYMBOL_GPL(unmap_kernel_range);
 
-int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
+int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
 {
        unsigned long addr = (unsigned long)area->addr;
        unsigned long end = addr + get_vm_area_size(area);
        int err;
 
-       err = vmap_page_range(addr, end, prot, *pages);
-       if (err > 0) {
-               *pages += err;
-               err = 0;
-       }
+       err = vmap_page_range(addr, end, prot, pages);
 
-       return err;
+       return err > 0 ? 0 : err;
 }
 EXPORT_SYMBOL_GPL(map_vm_area);
 
        if (!area)
                return NULL;
 
-       if (map_vm_area(area, prot, &pages)) {
+       if (map_vm_area(area, prot, pages)) {
                vunmap(area->addr);
                return NULL;
        }
                        cond_resched();
        }
 
-       if (map_vm_area(area, prot, &pages))
+       if (map_vm_area(area, prot, pages))
                goto fail;
        return area->addr;
 
 
 static inline void *__zs_map_object(struct mapping_area *area,
                                struct page *pages[2], int off, int size)
 {
-       BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, &pages));
+       BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages));
        area->vm_addr = area->vm->addr;
        return area->vm_addr + off;
 }