I/O interrupt (ai - adapter interrupt; cssid,ssid,schid - subchannel);
     I/O interruption parameters in parm (subchannel) and parm64 (intparm,
     interruption subclass)
+KVM_S390_MCHK (vm, vcpu) - machine check interrupt; cr 14 bits in parm,
+                           machine check interrupt code in parm64 (note that
+                           machine checks needing further payload are not
+                           supported by this ioctl)
 
 Note that the vcpu ioctl is asynchronous to vcpu execution.
 
 
        __u8    reserved40[4];          /* 0x0040 */
 #define LCTL_CR0       0x8000
 #define LCTL_CR6       0x0200
+#define LCTL_CR14      0x0002
        __u16   lctl;                   /* 0x0044 */
        __s16   icpua;                  /* 0x0046 */
+#define ICTL_LPSW 0x00400000
        __u32   ictl;                   /* 0x0048 */
        __u32   eca;                    /* 0x004c */
        __u8    icptcode;               /* 0x0050 */
        __u16 code;
 };
 
+struct kvm_s390_mchk_info {
+       __u64 cr14;
+       __u64 mcic;
+};
+
 struct kvm_s390_interrupt_info {
        struct list_head list;
        u64     type;
                struct kvm_s390_emerg_info emerg;
                struct kvm_s390_extcall_info extcall;
                struct kvm_s390_prefix_info prefix;
+               struct kvm_s390_mchk_info mchk;
        };
 };
 
 
 
 static const intercept_handler_t instruction_handlers[256] = {
        [0x01] = kvm_s390_handle_01,
+       [0x82] = kvm_s390_handle_lpsw,
        [0x83] = kvm_s390_handle_diag,
        [0xae] = kvm_s390_handle_sigp,
        [0xb2] = kvm_s390_handle_b2,
        [0xb7] = handle_lctl,
+       [0xb9] = kvm_s390_handle_b9,
        [0xe5] = kvm_s390_handle_e5,
        [0xeb] = handle_lctlg,
 };
 
        return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
 }
 
+static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
+{
+       return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
+}
+
 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
 {
        if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
        case KVM_S390_SIGP_SET_PREFIX:
        case KVM_S390_RESTART:
                return 1;
+       case KVM_S390_MCHK:
+               if (psw_mchk_disabled(vcpu))
+                       return 0;
+               if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14)
+                       return 1;
+               return 0;
        case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
                if (psw_ioint_disabled(vcpu))
                        return 0;
                CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
                &vcpu->arch.sie_block->cpuflags);
        vcpu->arch.sie_block->lctl = 0x0000;
+       vcpu->arch.sie_block->ictl &= ~ICTL_LPSW;
 }
 
 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
        case KVM_S390_SIGP_STOP:
                __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
                break;
+       case KVM_S390_MCHK:
+               if (psw_mchk_disabled(vcpu))
+                       vcpu->arch.sie_block->ictl |= ICTL_LPSW;
+               else
+                       vcpu->arch.sie_block->lctl |= LCTL_CR14;
+               break;
        case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
                if (psw_ioint_disabled(vcpu))
                        __set_cpuflag(vcpu, CPUSTAT_IO_INT);
                        exception = 1;
                break;
 
+       case KVM_S390_MCHK:
+               VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
+                          inti->mchk.mcic);
+               trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
+                                                inti->mchk.cr14,
+                                                inti->mchk.mcic);
+               rc = kvm_s390_vcpu_store_status(vcpu,
+                                               KVM_S390_STORE_STATUS_PREFIXED);
+               if (rc == -EFAULT)
+                       exception = 1;
+
+               rc = put_guest_u64(vcpu, __LC_MCCK_CODE, inti->mchk.mcic);
+               if (rc == -EFAULT)
+                       exception = 1;
+
+               rc = copy_to_guest(vcpu, __LC_MCK_OLD_PSW,
+                                  &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+               if (rc == -EFAULT)
+                       exception = 1;
+
+               rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+                                    __LC_MCK_NEW_PSW, sizeof(psw_t));
+               if (rc == -EFAULT)
+                       exception = 1;
+               break;
+
        case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
        {
                __u32 param0 = ((__u32)inti->io.subchannel_id << 16) |
        }
 }
 
+void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu)
+{
+       struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+       struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
+       struct kvm_s390_interrupt_info  *n, *inti = NULL;
+       int deliver;
+
+       __reset_intercept_indicators(vcpu);
+       if (atomic_read(&li->active)) {
+               do {
+                       deliver = 0;
+                       spin_lock_bh(&li->lock);
+                       list_for_each_entry_safe(inti, n, &li->list, list) {
+                               if ((inti->type == KVM_S390_MCHK) &&
+                                   __interrupt_is_deliverable(vcpu, inti)) {
+                                       list_del(&inti->list);
+                                       deliver = 1;
+                                       break;
+                               }
+                               __set_intercept_indicator(vcpu, inti);
+                       }
+                       if (list_empty(&li->list))
+                               atomic_set(&li->active, 0);
+                       spin_unlock_bh(&li->lock);
+                       if (deliver) {
+                               __do_deliver_interrupt(vcpu, inti);
+                               kfree(inti);
+                       }
+               } while (deliver);
+       }
+
+       if (atomic_read(&fi->active)) {
+               do {
+                       deliver = 0;
+                       spin_lock(&fi->lock);
+                       list_for_each_entry_safe(inti, n, &fi->list, list) {
+                               if ((inti->type == KVM_S390_MCHK) &&
+                                   __interrupt_is_deliverable(vcpu, inti)) {
+                                       list_del(&inti->list);
+                                       deliver = 1;
+                                       break;
+                               }
+                               __set_intercept_indicator(vcpu, inti);
+                       }
+                       if (list_empty(&fi->list))
+                               atomic_set(&fi->active, 0);
+                       spin_unlock(&fi->lock);
+                       if (deliver) {
+                               __do_deliver_interrupt(vcpu, inti);
+                               kfree(inti);
+                       }
+               } while (deliver);
+       }
+}
+
 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
 {
        struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
        case KVM_S390_INT_EMERGENCY:
                kfree(inti);
                return -EINVAL;
+       case KVM_S390_MCHK:
+               VM_EVENT(kvm, 5, "inject: machine check parm64:%llx",
+                        s390int->parm64);
+               inti->type = s390int->type;
+               inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
+               inti->mchk.mcic = s390int->parm64;
+               break;
        case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
                if (s390int->type & IOINT_AI_MASK)
                        VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
                inti->type = s390int->type;
                inti->emerg.code = s390int->parm;
                break;
+       case KVM_S390_MCHK:
+               VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
+                          s390int->parm64);
+               inti->type = s390int->type;
+               inti->mchk.mcic = s390int->parm64;
+               break;
        case KVM_S390_INT_VIRTIO:
        case KVM_S390_INT_SERVICE:
        case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
 
 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
 void kvm_s390_tasklet(unsigned long parm);
 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
+void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu);
 int kvm_s390_inject_vm(struct kvm *kvm,
                struct kvm_s390_interrupt *s390int);
 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
 int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
+int kvm_s390_handle_b9(struct kvm_vcpu *vcpu);
+int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu);
 
 /* implemented in sigp.c */
 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
 
 #include <asm/debug.h>
 #include <asm/ebcdic.h>
 #include <asm/sysinfo.h>
+#include <asm/ptrace.h>
+#include <asm/compat.h>
 #include "gaccess.h"
 #include "kvm-s390.h"
 #include "trace.h"
        return 0;
 }
 
+static void handle_new_psw(struct kvm_vcpu *vcpu)
+{
+       /* Check whether the new psw is enabled for machine checks. */
+       if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
+               kvm_s390_deliver_pending_machine_checks(vcpu);
+}
+
+#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
+#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
+#define PSW_ADDR_24 0x00000000000fffffUL
+#define PSW_ADDR_31 0x000000007fffffffUL
+
+int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
+{
+       u64 addr;
+       psw_compat_t new_psw;
+
+       if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+               return kvm_s390_inject_program_int(vcpu,
+                                                  PGM_PRIVILEGED_OPERATION);
+
+       addr = kvm_s390_get_base_disp_s(vcpu);
+
+       if (addr & 7) {
+               kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+               goto out;
+       }
+
+       if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) {
+               kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+               goto out;
+       }
+
+       if (!(new_psw.mask & PSW32_MASK_BASE)) {
+               kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+               goto out;
+       }
+
+       vcpu->arch.sie_block->gpsw.mask =
+               (new_psw.mask & ~PSW32_MASK_BASE) << 32;
+       vcpu->arch.sie_block->gpsw.addr = new_psw.addr;
+
+       if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
+           (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
+            (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
+           ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
+            PSW_MASK_EA)) {
+               kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+               goto out;
+       }
+
+       handle_new_psw(vcpu);
+out:
+       return 0;
+}
+
+static int handle_lpswe(struct kvm_vcpu *vcpu)
+{
+       u64 addr;
+       psw_t new_psw;
+
+       addr = kvm_s390_get_base_disp_s(vcpu);
+
+       if (addr & 7) {
+               kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+               goto out;
+       }
+
+       if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) {
+               kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+               goto out;
+       }
+
+       vcpu->arch.sie_block->gpsw.mask = new_psw.mask;
+       vcpu->arch.sie_block->gpsw.addr = new_psw.addr;
+
+       if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
+           (((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
+             PSW_MASK_BA) &&
+            (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_31)) ||
+           (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
+            (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
+           ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
+            PSW_MASK_EA)) {
+               kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+               goto out;
+       }
+
+       handle_new_psw(vcpu);
+out:
+       return 0;
+}
+
 static int handle_stidp(struct kvm_vcpu *vcpu)
 {
        u64 operand2;
        [0x5f] = handle_chsc,
        [0x7d] = handle_stsi,
        [0xb1] = handle_stfl,
+       [0xb2] = handle_lpswe,
 };
 
 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
        return -EOPNOTSUPP;
 }
 
+static int handle_epsw(struct kvm_vcpu *vcpu)
+{
+       int reg1, reg2;
+
+       reg1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 24;
+       reg2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
+
+       /* This basically extracts the mask half of the psw. */
+       vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000;
+       vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
+       if (reg2) {
+               vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000;
+               vcpu->run->s.regs.gprs[reg2] |=
+                       vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffff;
+       }
+       return 0;
+}
+
+static const intercept_handler_t b9_handlers[256] = {
+       [0x8d] = handle_epsw,
+};
+
+int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
+{
+       intercept_handler_t handler;
+
+       /* This is handled just as for the B2 instructions. */
+       handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
+       if (handler) {
+               if ((handler != handle_epsw) &&
+                   (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE))
+                       return kvm_s390_inject_program_int(vcpu,
+                                                  PGM_PRIVILEGED_OPERATION);
+               else
+                       return handler(vcpu);
+       }
+       return -EOPNOTSUPP;
+}
+
 static int handle_tprot(struct kvm_vcpu *vcpu)
 {
        u64 address1, address2;
 
  * Trace point for the actual delivery of interrupts.
  */
 TRACE_EVENT(kvm_s390_deliver_interrupt,
-           TP_PROTO(unsigned int id, __u64 type, __u32 data0, __u64 data1),
+           TP_PROTO(unsigned int id, __u64 type, __u64 data0, __u64 data1),
            TP_ARGS(id, type, data0, data1),
 
            TP_STRUCT__entry(
                    __field(int, id)
                    __field(__u32, inttype)
-                   __field(__u32, data0)
+                   __field(__u64, data0)
                    __field(__u64, data1)
                    ),
 
                    ),
 
            TP_printk("deliver interrupt (vcpu %d): type:%x (%s) "      \
-                     "data:%08x %016llx",
+                     "data:%08llx %016llx",
                      __entry->id, __entry->inttype,
                      __print_symbolic(__entry->inttype, kvm_s390_int_type),
                      __entry->data0, __entry->data1)
 
 #define KVM_S390_PROGRAM_INT           0xfffe0001u
 #define KVM_S390_SIGP_SET_PREFIX       0xfffe0002u
 #define KVM_S390_RESTART               0xfffe0003u
+#define KVM_S390_MCHK                  0xfffe1000u
 #define KVM_S390_INT_VIRTIO            0xffff2603u
 #define KVM_S390_INT_SERVICE           0xffff2401u
 #define KVM_S390_INT_EMERGENCY         0xffff1201u