return 1;
 }
 
+static int skinit_interception(struct vcpu_svm *svm)
+{
+       trace_kvm_skinit(svm->vmcb->save.rip, svm->vcpu.arch.regs[VCPU_REGS_RAX]);
+
+       kvm_queue_exception(&svm->vcpu, UD_VECTOR);
+       return 1;
+}
+
 static int invalid_op_interception(struct vcpu_svm *svm)
 {
        kvm_queue_exception(&svm->vcpu, UD_VECTOR);
        [SVM_EXIT_VMSAVE]                       = vmsave_interception,
        [SVM_EXIT_STGI]                         = stgi_interception,
        [SVM_EXIT_CLGI]                         = clgi_interception,
-       [SVM_EXIT_SKINIT]                       = invalid_op_interception,
+       [SVM_EXIT_SKINIT]                       = skinit_interception,
        [SVM_EXIT_WBINVD]                       = emulate_on_interception,
        [SVM_EXIT_MONITOR]                      = invalid_op_interception,
        [SVM_EXIT_MWAIT]                        = invalid_op_interception,
 
        TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx\n",
                  __entry->rip, __entry->asid, __entry->address)
 );
+
+/*
+ * Tracepoint for nested #vmexit because of interrupt pending
+ */
+TRACE_EVENT(kvm_skinit,
+           TP_PROTO(__u64 rip, __u32 slb),
+           TP_ARGS(rip, slb),
+
+       TP_STRUCT__entry(
+               __field(        __u64,  rip     )
+               __field(        __u32,  slb     )
+       ),
+
+       TP_fast_assign(
+               __entry->rip            =       rip;
+               __entry->slb            =       slb;
+       ),
+
+       TP_printk("rip: 0x%016llx slb: 0x%08x\n",
+                 __entry->rip, __entry->slb)
+);
+
 #endif /* _TRACE_KVM_H */
 
 /* This part must be outside protection */
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);