__invalidate_icache_guest_page(pfn, size);
 }
 
-static void kvm_send_hwpoison_signal(unsigned long address,
-                                    struct vm_area_struct *vma)
+static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
 {
-       short lsb;
-
-       if (is_vm_hugetlb_page(vma))
-               lsb = huge_page_shift(hstate_vma(vma));
-       else
-               lsb = PAGE_SHIFT;
-
        send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
 }
 
        struct kvm *kvm = vcpu->kvm;
        struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
        struct vm_area_struct *vma;
+       short vma_shift;
        kvm_pfn_t pfn;
        pgprot_t mem_type = PAGE_S2;
        bool logging_active = memslot_is_logging(memslot);
                return -EFAULT;
        }
 
-       vma_pagesize = vma_kernel_pagesize(vma);
+       if (is_vm_hugetlb_page(vma))
+               vma_shift = huge_page_shift(hstate_vma(vma));
+       else
+               vma_shift = PAGE_SHIFT;
+
+       vma_pagesize = 1ULL << vma_shift;
        if (logging_active ||
            (vma->vm_flags & VM_PFNMAP) ||
            !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
 
        pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
        if (pfn == KVM_PFN_ERR_HWPOISON) {
-               kvm_send_hwpoison_signal(hva, vma);
+               kvm_send_hwpoison_signal(hva, vma_shift);
                return 0;
        }
        if (is_error_noslot_pfn(pfn))