#define TLB_CONTROL_DO_NOTHING 0
 #define TLB_CONTROL_FLUSH_ALL_ASID 1
+#define TLB_CONTROL_FLUSH_ASID 3
+#define TLB_CONTROL_FLUSH_ASID_LOCAL 7
 
 #define V_TPR_MASK 0x0f
 
 
 
        struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
 
-       svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
        /* FIXME: handle wraparound of asid_generation */
        if (svm->asid_generation != sd->asid_generation)
                new_asid(svm, sd);
 
 static void svm_flush_tlb(struct kvm_vcpu *vcpu)
 {
-       to_svm(vcpu)->asid_generation--;
+       struct vcpu_svm *svm = to_svm(vcpu);
+
+       if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
+               svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
+       else
+               svm->asid_generation--;
 }
 
 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
 
        svm->next_rip = 0;
 
+       svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
+
        /* if exit due to PF check for async PF */
        if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
                svm->apf_reason = kvm_read_and_reset_pf_reason();