fault->pfn &= ~mask;
 }
 
-void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
-                               kvm_pfn_t *pfnp, u8 *goal_levelp)
+void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level)
 {
-       int level = *goal_levelp;
-
-       if (cur_level == level && level > PG_LEVEL_4K &&
+       if (cur_level > PG_LEVEL_4K &&
+           cur_level == fault->goal_level &&
            is_shadow_present_pte(spte) &&
            !is_large_pte(spte)) {
                /*
                 * patching back for them into pfn the next 9 bits of
                 * the address.
                 */
-               u64 page_mask = KVM_PAGES_PER_HPAGE(level) -
-                               KVM_PAGES_PER_HPAGE(level - 1);
-               *pfnp |= gfn & page_mask;
-               (*goal_levelp)--;
+               u64 page_mask = KVM_PAGES_PER_HPAGE(cur_level) -
+                               KVM_PAGES_PER_HPAGE(cur_level - 1);
+               fault->pfn |= fault->gfn & page_mask;
+               fault->goal_level--;
        }
 }
 
                 * large page, as the leaf could be executable.
                 */
                if (fault->nx_huge_page_workaround_enabled)
-                       disallowed_hugepage_adjust(*it.sptep, fault->gfn, it.level,
-                                                  &fault->pfn, &fault->goal_level);
+                       disallowed_hugepage_adjust(fault, *it.sptep, it.level);
 
                base_gfn = fault->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
                if (it.level == fault->goal_level)
 
                              const struct kvm_memory_slot *slot, gfn_t gfn,
                              kvm_pfn_t pfn, int max_level);
 void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
-void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
-                               kvm_pfn_t *pfnp, u8 *goal_levelp);
+void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level);
 
 void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
 
 
                 * large page, as the leaf could be executable.
                 */
                if (fault->nx_huge_page_workaround_enabled)
-                       disallowed_hugepage_adjust(*it.sptep, fault->gfn, it.level,
-                                                  &fault->pfn, &fault->goal_level);
+                       disallowed_hugepage_adjust(fault, *it.sptep, it.level);
 
                base_gfn = fault->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
                if (it.level == fault->goal_level)
 
 
        tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
                if (fault->nx_huge_page_workaround_enabled)
-                       disallowed_hugepage_adjust(iter.old_spte, fault->gfn,
-                                                  iter.level, &fault->pfn, &fault->goal_level);
+                       disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
 
                if (iter.level == fault->goal_level)
                        break;