As well as discard fake accessed bit and dirty bit of EPT.
Signed-off-by: Sheng Yang <sheng.yang@intel.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
        u64 *spte;
        int young = 0;
 
+       /* always return old for EPT */
+       if (!shadow_accessed_mask)
+               return 0;
+
        spte = rmap_next(kvm, rmapp, NULL);
        while (spte) {
                int _young;
 
                kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
                        VMX_EPT_WRITABLE_MASK |
                        VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
-               kvm_mmu_set_mask_ptes(0ull, VMX_EPT_FAKE_ACCESSED_MASK,
-                               VMX_EPT_FAKE_DIRTY_MASK, 0ull,
+               kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
                                VMX_EPT_EXECUTABLE_MASK);
                kvm_enable_tdp();
        } else
 
 #define VMX_EPT_READABLE_MASK                  0x1ull
 #define VMX_EPT_WRITABLE_MASK                  0x2ull
 #define VMX_EPT_EXECUTABLE_MASK                        0x4ull
-#define VMX_EPT_FAKE_ACCESSED_MASK             (1ull << 62)
-#define VMX_EPT_FAKE_DIRTY_MASK                        (1ull << 63)
 
 #define VMX_EPT_IDENTITY_PAGETABLE_ADDR                0xfffbc000ul