#define PFERR_RSVD_BIT 3
 #define PFERR_FETCH_BIT 4
 #define PFERR_PK_BIT 5
+#define PFERR_GUEST_FINAL_BIT 32
+#define PFERR_GUEST_PAGE_BIT 33
 
 #define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
 #define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
 #define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
 #define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
 #define PFERR_PK_MASK (1U << PFERR_PK_BIT)
+#define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT)
+#define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT)
+
+#define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK |       \
+                                PFERR_USER_MASK |              \
+                                PFERR_WRITE_MASK |             \
+                                PFERR_PRESENT_MASK)
 
 /* apic attention bits */
 #define KVM_APIC_CHECK_VAPIC   0
 
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
 
-int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code,
                       void *insn, int insn_len);
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
 void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu);
 
        kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
 }
 
-int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
                       void *insn, int insn_len)
 {
        int r, emulation_type = EMULTYPE_RETRY;
                        return r;
        }
 
-       r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
+       r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
+                                     false);
        if (r < 0)
                return r;
        if (!r)
                return 1;
 
+       /*
+        * Before emulating the instruction, check if the error code
+        * was due to a RO violation while translating the guest page.
+        * This can occur when using nested virtualization with nested
+        * paging in both guests. If true, we simply unprotect the page
+        * and resume the guest.
+        *
+        * Note: AMD only (since it supports the PFERR_GUEST_PAGE_MASK used
+        *       in PFERR_NEXT_GUEST_PAGE)
+        */
+       if (error_code == PFERR_NESTED_GUEST_PAGE) {
+               kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2));
+               return 1;
+       }
+
        if (mmio_info_in_cache(vcpu, cr2, direct))
                emulation_type = 0;
 emulate: