__mb();
 }
 
+static void tdx_no_vcpus_enter_start(struct kvm *kvm)
+{
+       struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
+
+       lockdep_assert_held_write(&kvm->mmu_lock);
+
+       WRITE_ONCE(kvm_tdx->wait_for_sept_zap, true);
+
+       kvm_make_all_cpus_request(kvm, KVM_REQ_OUTSIDE_GUEST_MODE);
+}
+
+static void tdx_no_vcpus_enter_stop(struct kvm *kvm)
+{
+       struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
+
+       lockdep_assert_held_write(&kvm->mmu_lock);
+
+       WRITE_ONCE(kvm_tdx->wait_for_sept_zap, false);
+}
+
 /* TDH.PHYMEM.PAGE.RECLAIM is allowed only when destroying the TD. */
 static int __tdx_reclaim_page(struct page *page)
 {
         */
        WARN_ON_ONCE(force_immediate_exit);
 
+       /*
+        * Wait until retry of SEPT-zap-related SEAMCALL completes before
+        * allowing vCPU entry to avoid contention with tdh_vp_enter() and
+        * TDCALLs.
+        */
+       if (unlikely(READ_ONCE(to_kvm_tdx(vcpu->kvm)->wait_for_sept_zap)))
+               return EXIT_FASTPATH_EXIT_HANDLED;
+
        trace_kvm_entry(vcpu, force_immediate_exit);
 
        if (pi_test_on(&vt->pi_desc)) {
        if (KVM_BUG_ON(!is_hkid_assigned(kvm_tdx), kvm))
                return -EINVAL;
 
-       do {
+       /*
+        * When zapping private page, write lock is held. So no race condition
+        * with other vcpu sept operation.
+        * Race with TDH.VP.ENTER due to (0-step mitigation) and Guest TDCALLs.
+        */
+       err = tdh_mem_page_remove(&kvm_tdx->td, gpa, tdx_level, &entry,
+                                 &level_state);
+
+       if (unlikely(tdx_operand_busy(err))) {
                /*
-                * When zapping private page, write lock is held. So no race
-                * condition with other vcpu sept operation.  Race only with
-                * TDH.VP.ENTER.
+                * The second retry is expected to succeed after kicking off all
+                * other vCPUs and prevent them from invoking TDH.VP.ENTER.
                 */
+               tdx_no_vcpus_enter_start(kvm);
                err = tdh_mem_page_remove(&kvm_tdx->td, gpa, tdx_level, &entry,
                                          &level_state);
-       } while (unlikely(tdx_operand_busy(err)));
+               tdx_no_vcpus_enter_stop(kvm);
+       }
 
        if (KVM_BUG_ON(err, kvm)) {
                pr_tdx_error_2(TDH_MEM_PAGE_REMOVE, err, entry, level_state);
        WARN_ON_ONCE(level != PG_LEVEL_4K);
 
        err = tdh_mem_range_block(&kvm_tdx->td, gpa, tdx_level, &entry, &level_state);
-       if (unlikely(tdx_operand_busy(err)))
-               return -EBUSY;
 
+       if (unlikely(tdx_operand_busy(err))) {
+               /* After no vCPUs enter, the second retry is expected to succeed */
+               tdx_no_vcpus_enter_start(kvm);
+               err = tdh_mem_range_block(&kvm_tdx->td, gpa, tdx_level, &entry, &level_state);
+               tdx_no_vcpus_enter_stop(kvm);
+       }
        if (tdx_is_sept_zap_err_due_to_premap(kvm_tdx, err, entry, level) &&
            !KVM_BUG_ON(!atomic64_read(&kvm_tdx->nr_premapped), kvm)) {
                atomic64_dec(&kvm_tdx->nr_premapped);
 
        lockdep_assert_held_write(&kvm->mmu_lock);
 
-       do {
+       err = tdh_mem_track(&kvm_tdx->td);
+       if (unlikely(tdx_operand_busy(err))) {
+               /* After no vCPUs enter, the second retry is expected to succeed */
+               tdx_no_vcpus_enter_start(kvm);
                err = tdh_mem_track(&kvm_tdx->td);
-       } while (unlikely(tdx_operand_busy(err)));
+               tdx_no_vcpus_enter_stop(kvm);
+       }
 
        if (KVM_BUG_ON(err, kvm))
                pr_tdx_error(TDH_MEM_TRACK, err);