(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
 
 
-#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
+#define PT64_BASE_ADDR_MASK __sme_clr((((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)))
 #define PT64_DIR_BASE_ADDR_MASK \
        (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
 #define PT64_LVL_ADDR_MASK(level) \
                                            * PT32_LEVEL_BITS))) - 1))
 
 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \
-                       | shadow_x_mask | shadow_nx_mask)
+                       | shadow_x_mask | shadow_nx_mask | shadow_me_mask)
 
 #define ACC_EXEC_MASK    1
 #define ACC_WRITE_MASK   PT_WRITABLE_MASK
 static u64 __read_mostly shadow_mmio_mask;
 static u64 __read_mostly shadow_mmio_value;
 static u64 __read_mostly shadow_present_mask;
+static u64 __read_mostly shadow_me_mask;
 
 /*
  * SPTEs used by MMUs without A/D bits are marked with shadow_acc_track_value.
  */
 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
                u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
-               u64 acc_track_mask)
+               u64 acc_track_mask, u64 me_mask)
 {
        BUG_ON(!dirty_mask != !accessed_mask);
        BUG_ON(!accessed_mask && !acc_track_mask);
        shadow_x_mask = x_mask;
        shadow_present_mask = p_mask;
        shadow_acc_track_mask = acc_track_mask;
+       shadow_me_mask = me_mask;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
 
        BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
 
        spte = __pa(sp->spt) | shadow_present_mask | PT_WRITABLE_MASK |
-              shadow_user_mask | shadow_x_mask;
+              shadow_user_mask | shadow_x_mask | shadow_me_mask;
 
        if (sp_ad_disabled(sp))
                spte |= shadow_acc_track_value;
                pte_access &= ~ACC_WRITE_MASK;
 
        spte |= (u64)pfn << PAGE_SHIFT;
+       spte |= shadow_me_mask;
 
        if (pte_access & ACC_WRITE_MASK) {
 
 
 {
        struct vmcb *vmcb = svm->vmcb;
        struct kvm_arch *vm_data = &svm->vcpu.kvm->arch;
-       phys_addr_t bpa = page_to_phys(svm->avic_backing_page);
-       phys_addr_t lpa = page_to_phys(vm_data->avic_logical_id_table_page);
-       phys_addr_t ppa = page_to_phys(vm_data->avic_physical_id_table_page);
+       phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
+       phys_addr_t lpa = __sme_set(page_to_phys(vm_data->avic_logical_id_table_page));
+       phys_addr_t ppa = __sme_set(page_to_phys(vm_data->avic_physical_id_table_page));
 
        vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
        vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
                set_intercept(svm, INTERCEPT_MWAIT);
        }
 
-       control->iopm_base_pa = iopm_base;
-       control->msrpm_base_pa = __pa(svm->msrpm);
+       control->iopm_base_pa = __sme_set(iopm_base);
+       control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
        control->int_ctl = V_INTR_MASKING_MASK;
 
        init_seg(&save->es);
                return -EINVAL;
 
        new_entry = READ_ONCE(*entry);
-       new_entry = (page_to_phys(svm->avic_backing_page) &
-                    AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
-                    AVIC_PHYSICAL_ID_ENTRY_VALID_MASK;
+       new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
+                             AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
+                             AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
        WRITE_ONCE(*entry, new_entry);
 
        svm->avic_physical_id_cache = entry;
 
        svm->vmcb = page_address(page);
        clear_page(svm->vmcb);
-       svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
+       svm->vmcb_pa = __sme_set(page_to_pfn(page) << PAGE_SHIFT);
        svm->asid_generation = 0;
        init_vmcb(svm);
 
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
+       __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
        __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
        __free_page(virt_to_page(svm->nested.hsave));
        __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
        u64 pdpte;
        int ret;
 
-       ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
+       ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
                                       offset_in_page(cr3) + index * 8, 8);
        if (ret)
                return 0;
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       svm->vmcb->control.nested_cr3 = root;
+       svm->vmcb->control.nested_cr3 = __sme_set(root);
        mark_dirty(svm->vmcb, VMCB_NPT);
        svm_flush_tlb(vcpu);
 }
                svm->nested.msrpm[p] = svm->msrpm[p] | value;
        }
 
-       svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
+       svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
 
        return true;
 }
        pr_debug("SVM: %s: use GA mode for irq %u\n", __func__,
                 irq.vector);
        *svm = to_svm(vcpu);
-       vcpu_info->pi_desc_addr = page_to_phys((*svm)->avic_backing_page);
+       vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page));
        vcpu_info->vector = irq.vector;
 
        return 0;
                        struct amd_iommu_pi_data pi;
 
                        /* Try to enable guest_mode in IRTE */
-                       pi.base = page_to_phys(svm->avic_backing_page) & AVIC_HPA_MASK;
+                       pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
+                                           AVIC_HPA_MASK);
                        pi.ga_tag = AVIC_GATAG(kvm->arch.avic_vm_id,
                                                     svm->vcpu.vcpu_id);
                        pi.is_guest_mode = true;
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       svm->vmcb->save.cr3 = root;
+       svm->vmcb->save.cr3 = __sme_set(root);
        mark_dirty(svm->vmcb, VMCB_CR);
        svm_flush_tlb(vcpu);
 }
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       svm->vmcb->control.nested_cr3 = root;
+       svm->vmcb->control.nested_cr3 = __sme_set(root);
        mark_dirty(svm->vmcb, VMCB_NPT);
 
        /* Also sync guest cr3 here in case we live migrate */