spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct spu_context *ctx = vma->vm_file->private_data;
-       unsigned long address = (unsigned long)vmf->virtual_address;
        unsigned long pfn, offset;
 
        offset = vmf->pgoff << PAGE_SHIFT;
                return VM_FAULT_SIGBUS;
 
        pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
-                       address, offset);
+                       vmf->address, offset);
 
        if (spu_acquire(ctx))
                return VM_FAULT_NOPAGE;
                vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
                pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
        }
-       vm_insert_pfn(vma, address, pfn);
+       vm_insert_pfn(vma, vmf->address, pfn);
 
        spu_release(ctx);
 
                down_read(¤t->mm->mmap_sem);
        } else {
                area = ctx->spu->problem_phys + ps_offs;
-               vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
-                                       (area + offset) >> PAGE_SHIFT);
+               vm_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT);
                spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
        }
 
 
                return VM_FAULT_SIGBUS;
 
        if (sym_offset == image->sym_vvar_page) {
-               ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
+               ret = vm_insert_pfn(vma, vmf->address,
                                    __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
        } else if (sym_offset == image->sym_pvclock_page) {
                struct pvclock_vsyscall_time_info *pvti =
                if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
                        ret = vm_insert_pfn(
                                vma,
-                               (unsigned long)vmf->virtual_address,
+                               vmf->address,
                                __pa(pvti) >> PAGE_SHIFT);
                }
        }
 
        unsigned long pa;
        struct page *page;
 
-       dma_addr = (unsigned long)vmf->virtual_address - vma->vm_start
-                                               + agp->aperture.bus_base;
+       dma_addr = vmf->address - vma->vm_start + agp->aperture.bus_base;
        pa = agp->ops->translate(agp, dma_addr);
 
        if (pa == (unsigned long)-EINVAL)
 
         * be because another thread has installed the pte first, so it
         * is no problem.
         */
-       vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+       vm_insert_pfn(vma, vmf->address, pfn);
 
        return VM_FAULT_NOPAGE;
 }
 
 static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
                struct vm_fault *vmf)
 {
-       unsigned long vaddr = (unsigned long) vmf->virtual_address;
        struct device *dev = &dax_dev->dev;
        struct dax_region *dax_region;
        int rc = VM_FAULT_SIGBUS;
 
        pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
 
-       rc = vm_insert_mixed(vma, vaddr, pfn);
+       rc = vm_insert_mixed(vma, vmf->address, pfn);
 
        if (rc == -ENOMEM)
                return VM_FAULT_OOM;
 
 static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
-       unsigned long addr = (unsigned long)vmf->virtual_address;
        unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
        int ret;
 
-       pfn += (addr - vma->vm_start) >> PAGE_SHIFT;
-       ret = vm_insert_pfn(vma, addr, pfn);
+       pfn += (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+       ret = vm_insert_pfn(vma, vmf->address, pfn);
 
        switch (ret) {
        case 0:
 
                 * Using vm_pgoff as a selector forces us to use this unusual
                 * addressing scheme.
                 */
-               resource_size_t offset = (unsigned long)vmf->virtual_address -
-                       vma->vm_start;
+               resource_size_t offset = vmf->address - vma->vm_start;
                resource_size_t baddr = map->offset + offset;
                struct drm_agp_mem *agpmem;
                struct page *page;
        if (!map)
                return VM_FAULT_SIGBUS; /* Nothing allocated */
 
-       offset = (unsigned long)vmf->virtual_address - vma->vm_start;
+       offset = vmf->address - vma->vm_start;
        i = (unsigned long)map->handle + offset;
        page = vmalloc_to_page((void *)i);
        if (!page)
        if (!dma->pagelist)
                return VM_FAULT_SIGBUS; /* Nothing allocated */
 
-       offset = (unsigned long)vmf->virtual_address - vma->vm_start;   /* vm_[pg]off[set] should be 0 */
+       offset = vmf->address - vma->vm_start;
+                                       /* vm_[pg]off[set] should be 0 */
        page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
        page = virt_to_page((void *)dma->pagelist[page_nr]);
 
        if (!entry->pagelist)
                return VM_FAULT_SIGBUS; /* Nothing allocated */
 
-       offset = (unsigned long)vmf->virtual_address - vma->vm_start;
+       offset = vmf->address - vma->vm_start;
        map_offset = map->offset - (unsigned long)dev->sg->virtual;
        page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
        page = entry->pagelist[page_offset];
 
        }
 
        /* We don't use vmf->pgoff since that has the fake offset: */
-       pgoff = ((unsigned long)vmf->virtual_address -
-                       vma->vm_start) >> PAGE_SHIFT;
+       pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 
        page = pages[pgoff];
 
-       VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+       VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
             page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
 
-       ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
+       ret = vm_insert_page(vma, vmf->address, page);
 
 out:
        switch (ret) {
 
        pgoff_t page_offset;
        int ret;
 
-       page_offset = ((unsigned long)vmf->virtual_address -
-                       vma->vm_start) >> PAGE_SHIFT;
+       page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 
        if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
                DRM_ERROR("invalid page offset\n");
        }
 
        pfn = page_to_pfn(exynos_gem->pages[page_offset]);
-       ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
-                       __pfn_to_pfn_t(pfn, PFN_DEV));
+       ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
 
 out:
        switch (ret) {
 
                                  psbfb->gtt->offset;
 
        page_num = vma_pages(vma);
-       address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT);
+       address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
 
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
 
 
        /* Page relative to the VMA start - we must calculate this ourselves
           because vmf->pgoff is the fake GEM offset */
-       page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start)
-                               >> PAGE_SHIFT;
+       page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 
        /* CPU view of the page, don't go via the GART for CPU writes */
        if (r->stolen)
                pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
        else
                pfn = page_to_pfn(r->pages[page_offset]);
-       ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+       ret = vm_insert_pfn(vma, vmf->address, pfn);
 
 fail:
        mutex_unlock(&dev_priv->mmap_mutex);
 
        int ret;
 
        /* We don't use vmf->pgoff since that has the fake offset */
-       page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
-               PAGE_SHIFT;
+       page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
 
        trace_i915_gem_object_fault(obj, page_offset, true, write);
 
 
        }
 
        /* We don't use vmf->pgoff since that has the fake offset: */
-       pgoff = ((unsigned long)vmf->virtual_address -
-                       vma->vm_start) >> PAGE_SHIFT;
+       pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 
        pfn = page_to_pfn(pages[pgoff]);
 
-       VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+       VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
                        pfn, pfn << PAGE_SHIFT);
 
-       ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
-                       __pfn_to_pfn_t(pfn, PFN_DEV));
+       ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
 
 out_unlock:
        mutex_unlock(&dev->struct_mutex);
 
        pgoff_t pgoff;
 
        /* We don't use vmf->pgoff since that has the fake offset: */
-       pgoff = ((unsigned long)vmf->virtual_address -
-                       vma->vm_start) >> PAGE_SHIFT;
+       pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 
        if (omap_obj->pages) {
                omap_gem_cpu_sync(obj, pgoff);
                pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
        }
 
-       VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+       VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
                        pfn, pfn << PAGE_SHIFT);
 
-       return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
-                       __pfn_to_pfn_t(pfn, PFN_DEV));
+       return vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
 }
 
 /* Special handling for the case of faulting in 2d tiled buffers */
        struct page *pages[64];  /* XXX is this too much to have on stack? */
        unsigned long pfn;
        pgoff_t pgoff, base_pgoff;
-       void __user *vaddr;
+       unsigned long vaddr;
        int i, ret, slots;
 
        /*
        const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
 
        /* We don't use vmf->pgoff since that has the fake offset: */
-       pgoff = ((unsigned long)vmf->virtual_address -
-                       vma->vm_start) >> PAGE_SHIFT;
+       pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 
        /*
         * Actual address we start mapping at is rounded down to previous slot
        /* figure out buffer width in slots */
        slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
 
-       vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
+       vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
 
        entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
 
 
        pfn = entry->paddr >> PAGE_SHIFT;
 
-       VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+       VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
                        pfn, pfn << PAGE_SHIFT);
 
        for (i = n; i > 0; i--) {
-               vm_insert_mixed(vma, (unsigned long)vaddr,
-                               __pfn_to_pfn_t(pfn, PFN_DEV));
+               vm_insert_mixed(vma, vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
                pfn += priv->usergart[fmt].stride_pfn;
                vaddr += PAGE_SIZE * m;
        }
 
        if (!bo->pages)
                return VM_FAULT_SIGBUS;
 
-       offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
+       offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
        page = bo->pages[offset];
 
-       err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
+       err = vm_insert_page(vma, vmf->address, page);
        switch (err) {
        case -EAGAIN:
        case 0:
 
        struct page *page;
        int ret;
        int i;
-       unsigned long address = (unsigned long)vmf->virtual_address;
+       unsigned long address = vmf->address;
        int retval = VM_FAULT_NOPAGE;
        struct ttm_mem_type_manager *man =
                &bdev->man[bo->mem.mem_type];
 
        unsigned int page_offset;
        int ret = 0;
 
-       page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
-               PAGE_SHIFT;
+       page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 
        if (!obj->pages)
                return VM_FAULT_SIGBUS;
 
        page = obj->pages[page_offset];
-       ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
+       ret = vm_insert_page(vma, vmf->address, page);
        switch (ret) {
        case -EAGAIN:
        case 0:
 
 {
        struct drm_vgem_gem_object *obj = vma->vm_private_data;
        /* We don't use vmf->pgoff since that has the fake offset */
-       unsigned long vaddr = (unsigned long)vmf->virtual_address;
+       unsigned long vaddr = vmf->address;
        struct page *page;
 
        page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping,
 
        struct page *page;
 
        dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n",
-               (unsigned long)vmf->virtual_address,
-               vma->vm_start, vma->vm_end);
+               vmf->address, vma->vm_start, vma->vm_end);
 
        page = alloc_page(GFP_USER | __GFP_DMA32);
        if (!page)
                return VM_FAULT_OOM;
-       clear_user_highpage(page, (unsigned long)vmf->virtual_address);
+       clear_user_highpage(page, vmf->address);
        vmf->page = page;
 
        return 0;
 
 static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct cxl_context *ctx = vma->vm_file->private_data;
-       unsigned long address = (unsigned long)vmf->virtual_address;
        u64 area, offset;
 
        offset = vmf->pgoff << PAGE_SHIFT;
 
        pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
-                       __func__, ctx->pe, address, offset);
+                       __func__, ctx->pe, vmf->address, offset);
 
        if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
                area = ctx->afu->psn_phys;
                return VM_FAULT_SIGBUS;
        }
 
-       vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
+       vm_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT);
 
        mutex_unlock(&ctx->status_mutex);
 
 
        unsigned long paddr, vaddr;
        unsigned long expires;
 
-       vaddr = (unsigned long)vmf->virtual_address;
+       vaddr = vmf->address;
        gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
                vma, vaddr, GSEG_BASE(vaddr));
        STAT(nopfn);
 
        BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
 
        pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
-       ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+       ret = vm_insert_pfn(vma, vmf->address, pfn);
        mutex_unlock(&buffer->lock);
        if (ret)
                return VM_FAULT_ERROR;
 
                       "page %p map %p index %lu flags %lx count %u priv %0lx: got addr %p type NOPAGE\n",
                       vmf->page, vmf->page->mapping, vmf->page->index,
                       (long)vmf->page->flags, page_count(vmf->page),
-                      page_private(vmf->page), vmf->virtual_address);
+                      page_private(vmf->page), (void *)vmf->address);
                if (unlikely(!(cfio->ft_flags & VM_FAULT_LOCKED))) {
                        lock_page(vmf->page);
                        cfio->ft_flags |= VM_FAULT_LOCKED;
        }
 
        if (cfio->ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
-               CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
+               CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", (void *)vmf->address);
                return -EFAULT;
        }
 
        if (cfio->ft_flags & VM_FAULT_OOM) {
-               CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address);
+               CDEBUG(D_PAGE, "got addr %p - OOM\n", (void *)vmf->address);
                return -ENOMEM;
        }
 
 
 {
        printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
               vma, vma->vm_start, vma->vm_end,
-              vmf->pgoff, vmf->virtual_address);
+              vmf->pgoff, (void *)vmf->address);
 
        return VM_FAULT_SIGBUS;
 }
 
                struct block_device *bdev, sector_t sector, size_t size,
                void **entryp, struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-       unsigned long vaddr = (unsigned long)vmf->virtual_address;
+       unsigned long vaddr = vmf->address;
        struct blk_dax_ctl dax = {
                .sector = sector,
                .size = size,
 {
        struct address_space *mapping = vma->vm_file->f_mapping;
        struct inode *inode = mapping->host;
-       unsigned long vaddr = (unsigned long)vmf->virtual_address;
+       unsigned long vaddr = vmf->address;
        loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
        sector_t sector;
        struct iomap iomap = { 0 };
 
        gfp_t gfp_mask;                 /* gfp mask to be used for allocations */
        pgoff_t pgoff;                  /* Logical page offset based on vma */
        unsigned long address;          /* Faulting virtual address */
-       void __user *virtual_address;   /* Faulting virtual address masked by
-                                        * PAGE_MASK */
        pmd_t *pmd;                     /* Pointer to pmd entry matching
                                         * the 'address'
                                         */
 
        struct vm_fault vmf;
        int ret;
 
-       vmf.virtual_address = (void __user *)(address & PAGE_MASK);
+       vmf.address = address & PAGE_MASK;
        vmf.pgoff = page->index;
        vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
        vmf.gfp_mask = __get_fault_gfp_mask(vma);
                struct vm_fault vmf2 = {
                        .page = NULL,
                        .pgoff = linear_page_index(vma, vmf->address),
-                       .virtual_address =
-                               (void __user *)(vmf->address & PAGE_MASK),
+                       .address = vmf->address,
                        .flags = FAULT_FLAG_WRITE | FAULT_FLAG_MKWRITE,
                };
                int ret;
        struct vm_fault vmf2;
        int ret;
 
-       vmf2.virtual_address = (void __user *)(vmf->address & PAGE_MASK);
+       vmf2.address = vmf->address;
        vmf2.pgoff = pgoff;
        vmf2.flags = vmf->flags;
        vmf2.page = NULL;
 {
        struct vm_fault vmf = {
                .vma = vma,
-               .address = address,
+               .address = address & PAGE_MASK,
                .flags = flags,
        };
        struct mm_struct *mm = vma->vm_mm;