return !(svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID);
 }
 
-static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
-{
-       struct vcpu_svm *svm = to_svm(vcpu);
-
-       svm->vmcb->control.event_inj =          SVM_EVTINJ_VALID |
-                                               SVM_EVTINJ_VALID_ERR |
-                                               SVM_EVTINJ_TYPE_EXEPT |
-                                               GP_VECTOR;
-       svm->vmcb->control.event_inj_err = error_code;
-}
-
 static void inject_ud(struct kvm_vcpu *vcpu)
 {
        to_svm(vcpu)->vmcb->control.event_inj = SVM_EVTINJ_VALID |
        u64 data;
 
        if (svm_get_msr(&svm->vcpu, ecx, &data))
-               svm_inject_gp(&svm->vcpu, 0);
+               kvm_inject_gp(&svm->vcpu, 0);
        else {
                svm->vmcb->save.rax = data & 0xffffffff;
                svm->vcpu.regs[VCPU_REGS_RDX] = data >> 32;
                | ((u64)(svm->vcpu.regs[VCPU_REGS_RDX] & -1u) << 32);
        svm->next_rip = svm->vmcb->save.rip + 2;
        if (svm_set_msr(&svm->vcpu, ecx, data))
-               svm_inject_gp(&svm->vcpu, 0);
+               kvm_inject_gp(&svm->vcpu, 0);
        else
                skip_emulated_instruction(&svm->vcpu);
        return 1;
 
        .tlb_flush = svm_flush_tlb,
 
-       .inject_gp = svm_inject_gp,
-
        .run = svm_vcpu_run,
        .handle_exit = handle_exit,
        .skip_emulated_instruction = skip_emulated_instruction,
 
        return !(vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
 }
 
-static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
-{
-       printk(KERN_DEBUG "inject_general_protection: rip 0x%lx\n",
-              vmcs_readl(GUEST_RIP));
-       vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
-       vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
-                    GP_VECTOR |
-                    INTR_TYPE_EXCEPTION |
-                    INTR_INFO_DELIEVER_CODE_MASK |
-                    INTR_INFO_VALID_MASK);
-}
-
 static void vmx_inject_ud(struct kvm_vcpu *vcpu)
 {
        vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
        u64 data;
 
        if (vmx_get_msr(vcpu, ecx, &data)) {
-               vmx_inject_gp(vcpu, 0);
+               kvm_inject_gp(vcpu, 0);
                return 1;
        }
 
                | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
 
        if (vmx_set_msr(vcpu, ecx, data) != 0) {
-               vmx_inject_gp(vcpu, 0);
+               kvm_inject_gp(vcpu, 0);
                return 1;
        }
 
 
        .tlb_flush = vmx_flush_tlb,
 
-       .inject_gp = vmx_inject_gp,
-
        .run = vmx_vcpu_run,
        .handle_exit = kvm_handle_exit,
        .skip_emulated_instruction = skip_emulated_instruction,
 
 }
 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
 
-static void inject_gp(struct kvm_vcpu *vcpu)
-{
-       kvm_x86_ops->inject_gp(vcpu, 0);
-}
-
 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
 {
        WARN_ON(vcpu->exception.pending);
        if (cr0 & CR0_RESERVED_BITS) {
                printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
                       cr0, vcpu->cr0);
-               inject_gp(vcpu);
+               kvm_inject_gp(vcpu, 0);
                return;
        }
 
        if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
                printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
-               inject_gp(vcpu);
+               kvm_inject_gp(vcpu, 0);
                return;
        }
 
        if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
                printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
                       "and a clear PE flag\n");
-               inject_gp(vcpu);
+               kvm_inject_gp(vcpu, 0);
                return;
        }
 
                        if (!is_pae(vcpu)) {
                                printk(KERN_DEBUG "set_cr0: #GP, start paging "
                                       "in long mode while PAE is disabled\n");
-                               inject_gp(vcpu);
+                               kvm_inject_gp(vcpu, 0);
                                return;
                        }
                        kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
                        if (cs_l) {
                                printk(KERN_DEBUG "set_cr0: #GP, start paging "
                                       "in long mode while CS.L == 1\n");
-                               inject_gp(vcpu);
+                               kvm_inject_gp(vcpu, 0);
                                return;
 
                        }
                if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
                        printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
                               "reserved bits\n");
-                       inject_gp(vcpu);
+                       kvm_inject_gp(vcpu, 0);
                        return;
                }
 
 {
        if (cr4 & CR4_RESERVED_BITS) {
                printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
-               inject_gp(vcpu);
+               kvm_inject_gp(vcpu, 0);
                return;
        }
 
                if (!(cr4 & X86_CR4_PAE)) {
                        printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
                               "in long mode\n");
-                       inject_gp(vcpu);
+                       kvm_inject_gp(vcpu, 0);
                        return;
                }
        } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
                   && !load_pdptrs(vcpu, vcpu->cr3)) {
                printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
-               inject_gp(vcpu);
+               kvm_inject_gp(vcpu, 0);
                return;
        }
 
        if (cr4 & X86_CR4_VMXE) {
                printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
-               inject_gp(vcpu);
+               kvm_inject_gp(vcpu, 0);
                return;
        }
        kvm_x86_ops->set_cr4(vcpu, cr4);
        if (is_long_mode(vcpu)) {
                if (cr3 & CR3_L_MODE_RESERVED_BITS) {
                        printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
-                       inject_gp(vcpu);
+                       kvm_inject_gp(vcpu, 0);
                        return;
                }
        } else {
                        if (cr3 & CR3_PAE_RESERVED_BITS) {
                                printk(KERN_DEBUG
                                       "set_cr3: #GP, reserved bits\n");
-                               inject_gp(vcpu);
+                               kvm_inject_gp(vcpu, 0);
                                return;
                        }
                        if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
                                printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
                                       "reserved bits\n");
-                               inject_gp(vcpu);
+                               kvm_inject_gp(vcpu, 0);
                                return;
                        }
                }
         * to debug) behavior on the guest side.
         */
        if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
-               inject_gp(vcpu);
+               kvm_inject_gp(vcpu, 0);
        else {
                vcpu->cr3 = cr3;
                vcpu->mmu.new_cr3(vcpu);
 {
        if (cr8 & CR8_RESERVED_BITS) {
                printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
-               inject_gp(vcpu);
+               kvm_inject_gp(vcpu, 0);
                return;
        }
        if (irqchip_in_kernel(vcpu->kvm))
        if (efer & EFER_RESERVED_BITS) {
                printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
                       efer);
-               inject_gp(vcpu);
+               kvm_inject_gp(vcpu, 0);
                return;
        }
 
        if (is_paging(vcpu)
            && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
                printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
-               inject_gp(vcpu);
+               kvm_inject_gp(vcpu, 0);
                return;
        }
 
                 * String I/O in reverse.  Yuck.  Kill the guest, fix later.
                 */
                pr_unimpl(vcpu, "guest string pio down\n");
-               inject_gp(vcpu);
+               kvm_inject_gp(vcpu, 0);
                return 1;
        }
        vcpu->run->io.count = now;
                vcpu->pio.guest_pages[i] = page;
                mutex_unlock(&vcpu->kvm->lock);
                if (!page) {
-                       inject_gp(vcpu);
+                       kvm_inject_gp(vcpu, 0);
                        free_pio_guest_pages(vcpu);
                        return 1;
                }