work->arch.cr3 != kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu))
                return;
 
-       kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, work->arch.error_code, true, NULL);
+       r = kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, work->arch.error_code,
+                                 true, NULL);
+
+       /*
+        * Account fixed page faults, otherwise they'll never be counted, but
+        * ignore stats for all other return times.  Page-ready "faults" aren't
+        * truly spurious and never trigger emulation
+        */
+       if (r == RET_PF_FIXED)
+               vcpu->stat.pf_fixed++;
 }
 
 static inline u8 kvm_max_level_for_order(int order)
 
        if (r < 0)
                return r;
+
+       if (r == RET_PF_FIXED)
+               vcpu->stat.pf_fixed++;
+       else if (r == RET_PF_EMULATE)
+               vcpu->stat.pf_emulate++;
+       else if (r == RET_PF_SPURIOUS)
+               vcpu->stat.pf_spurious++;
+
        if (r != RET_PF_EMULATE)
                return 1;
 
 
        if (fault.write_fault_to_shadow_pgtable && emulation_type)
                *emulation_type |= EMULTYPE_WRITE_PF_TO_SP;
 
-       /*
-        * Similar to above, prefetch faults aren't truly spurious, and the
-        * async #PF path doesn't do emulation.  Do count faults that are fixed
-        * by the async #PF handler though, otherwise they'll never be counted.
-        */
-       if (r == RET_PF_FIXED)
-               vcpu->stat.pf_fixed++;
-       else if (prefetch)
-               ;
-       else if (r == RET_PF_EMULATE)
-               vcpu->stat.pf_emulate++;
-       else if (r == RET_PF_SPURIOUS)
-               vcpu->stat.pf_spurious++;
        return r;
 }