bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
        int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu);
+
+       void (*migrate_timers)(struct kvm_vcpu *vcpu);
 };
 
 struct kvm_x86_nested_ops {
 
 {
        __kvm_migrate_apic_timer(vcpu);
        __kvm_migrate_pit_timer(vcpu);
+       if (kvm_x86_ops.migrate_timers)
+               kvm_x86_ops.migrate_timers(vcpu);
 }
 
 bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
 
        return to_vmx(vcpu)->nested.vmxon;
 }
 
+static void vmx_migrate_timers(struct kvm_vcpu *vcpu)
+{
+       if (is_guest_mode(vcpu)) {
+               struct hrtimer *timer = &to_vmx(vcpu)->nested.preemption_timer;
+
+               if (hrtimer_try_to_cancel(timer) == 1)
+                       hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
+       }
+}
+
 static void hardware_unsetup(void)
 {
        if (nested)
 
        .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault,
        .apic_init_signal_blocked = vmx_apic_init_signal_blocked,
+       .migrate_timers = vmx_migrate_timers,
 };
 
 static __init int hardware_setup(void)