struct page *page;
        pte_t new_pte;
        spinlock_t *ptl;
+       unsigned long haddr = address & huge_page_mask(h);
 
        /*
         * Currently, we are forced to kill the process in the event the
                        u32 hash;
                        struct vm_fault vmf = {
                                .vma = vma,
-                               .address = address,
+                               .address = haddr,
                                .flags = flags,
                                /*
                                 * Hard to debug if it ends up being
                         * fault to make calling code simpler.
                         */
                        hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping,
-                                                       idx, address);
+                                                       idx, haddr);
                        mutex_unlock(&hugetlb_fault_mutex_table[hash]);
                        ret = handle_userfault(&vmf, VM_UFFD_MISSING);
                        mutex_lock(&hugetlb_fault_mutex_table[hash]);
                        goto out;
                }
 
-               page = alloc_huge_page(vma, address, 0);
+               page = alloc_huge_page(vma, haddr, 0);
                if (IS_ERR(page)) {
                        ret = PTR_ERR(page);
                        if (ret == -ENOMEM)
         * the spinlock.
         */
        if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
-               if (vma_needs_reservation(h, vma, address) < 0) {
+               if (vma_needs_reservation(h, vma, haddr) < 0) {
                        ret = VM_FAULT_OOM;
                        goto backout_unlocked;
                }
                /* Just decrements count, does not deallocate */
-               vma_end_reservation(h, vma, address);
+               vma_end_reservation(h, vma, haddr);
        }
 
        ptl = huge_pte_lock(h, mm, ptep);
 
        if (anon_rmap) {
                ClearPagePrivate(page);
-               hugepage_add_new_anon_rmap(page, vma, address);
+               hugepage_add_new_anon_rmap(page, vma, haddr);
        } else
                page_dup_rmap(page, true);
        new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
                                && (vma->vm_flags & VM_SHARED)));
-       set_huge_pte_at(mm, address, ptep, new_pte);
+       set_huge_pte_at(mm, haddr, ptep, new_pte);
 
        hugetlb_count_add(pages_per_huge_page(h), mm);
        if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
                /* Optimization, do the COW without a second fault */
-               ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
+               ret = hugetlb_cow(mm, vma, haddr, ptep, page, ptl);
        }
 
        spin_unlock(ptl);
        spin_unlock(ptl);
 backout_unlocked:
        unlock_page(page);
-       restore_reserve_on_error(h, vma, address, page);
+       restore_reserve_on_error(h, vma, haddr, page);
        put_page(page);
        goto out;
 }
        struct hstate *h = hstate_vma(vma);
        struct address_space *mapping;
        int need_wait_lock = 0;
+       unsigned long haddr = address & huge_page_mask(h);
 
-       address &= huge_page_mask(h);
-
-       ptep = huge_pte_offset(mm, address, huge_page_size(h));
+       ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
        if (ptep) {
                entry = huge_ptep_get(ptep);
                if (unlikely(is_hugetlb_entry_migration(entry))) {
                        return VM_FAULT_HWPOISON_LARGE |
                                VM_FAULT_SET_HINDEX(hstate_index(h));
        } else {
-               ptep = huge_pte_alloc(mm, address, huge_page_size(h));
+               ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
                if (!ptep)
                        return VM_FAULT_OOM;
        }
 
        mapping = vma->vm_file->f_mapping;
-       idx = vma_hugecache_offset(h, vma, address);
+       idx = vma_hugecache_offset(h, vma, haddr);
 
        /*
         * Serialize hugepage allocation and instantiation, so that we don't
         * get spurious allocation failures if two CPUs race to instantiate
         * the same page in the page cache.
         */
-       hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address);
+       hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr);
        mutex_lock(&hugetlb_fault_mutex_table[hash]);
 
        entry = huge_ptep_get(ptep);
         * consumed.
         */
        if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
-               if (vma_needs_reservation(h, vma, address) < 0) {
+               if (vma_needs_reservation(h, vma, haddr) < 0) {
                        ret = VM_FAULT_OOM;
                        goto out_mutex;
                }
                /* Just decrements count, does not deallocate */
-               vma_end_reservation(h, vma, address);
+               vma_end_reservation(h, vma, haddr);
 
                if (!(vma->vm_flags & VM_MAYSHARE))
                        pagecache_page = hugetlbfs_pagecache_page(h,
-                                                               vma, address);
+                                                               vma, haddr);
        }
 
        ptl = huge_pte_lock(h, mm, ptep);
 
        if (flags & FAULT_FLAG_WRITE) {
                if (!huge_pte_write(entry)) {
-                       ret = hugetlb_cow(mm, vma, address, ptep,
+                       ret = hugetlb_cow(mm, vma, haddr, ptep,
                                          pagecache_page, ptl);
                        goto out_put_page;
                }
                entry = huge_pte_mkdirty(entry);
        }
        entry = pte_mkyoung(entry);
-       if (huge_ptep_set_access_flags(vma, address, ptep, entry,
+       if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
                                                flags & FAULT_FLAG_WRITE))
-               update_mmu_cache(vma, address, ptep);
+               update_mmu_cache(vma, haddr, ptep);
 out_put_page:
        if (page != pagecache_page)
                unlock_page(page);