[7] = { 4, 4 },         /* FIQ, unused */
 };
 
+static bool pre_fault_synchronize(struct kvm_vcpu *vcpu)
+{
+       preempt_disable();
+       if (vcpu->arch.sysregs_loaded_on_cpu) {
+               kvm_arch_vcpu_put(vcpu);
+               return true;
+       }
+
+       preempt_enable();
+       return false;
+}
+
+static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded)
+{
+       if (loaded) {
+               kvm_arch_vcpu_load(vcpu, smp_processor_id());
+               preempt_enable();
+       }
+}
+
 /*
  * When an exception is taken, most CPSR fields are left unchanged in the
  * handler. However, some are explicitly overridden (e.g. M[4:0]).
 
 void kvm_inject_undef32(struct kvm_vcpu *vcpu)
 {
+       bool loaded = pre_fault_synchronize(vcpu);
+
        prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
+       post_fault_synchronize(vcpu, loaded);
 }
 
 /*
        u32 vect_offset;
        u32 *far, *fsr;
        bool is_lpae;
+       bool loaded;
+
+       loaded = pre_fault_synchronize(vcpu);
 
        if (is_pabt) {
                vect_offset = 12;
                /* no need to shuffle FS[4] into DFSR[10] as its 0 */
                *fsr = DFSR_FSC_EXTABT_nLPAE;
        }
+
+       post_fault_synchronize(vcpu, loaded);
 }
 
 void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)