unsigned long pfra : 52; /* Page-Frame Real Address */
 };
 
+static int ipte_lock_count;
+static DEFINE_MUTEX(ipte_mutex);
+
+int ipte_lock_held(struct kvm_vcpu *vcpu)
+{
+       union ipte_control *ic = &vcpu->kvm->arch.sca->ipte_control;
+
+       if (vcpu->arch.sie_block->eca & 1)
+               return ic->kh != 0;
+       return ipte_lock_count != 0;
+}
+
+static void ipte_lock_simple(struct kvm_vcpu *vcpu)
+{
+       union ipte_control old, new, *ic;
+
+       mutex_lock(&ipte_mutex);
+       ipte_lock_count++;
+       if (ipte_lock_count > 1)
+               goto out;
+       ic = &vcpu->kvm->arch.sca->ipte_control;
+       do {
+               old = ACCESS_ONCE(*ic);
+               while (old.k) {
+                       cond_resched();
+                       old = ACCESS_ONCE(*ic);
+               }
+               new = old;
+               new.k = 1;
+       } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
+out:
+       mutex_unlock(&ipte_mutex);
+}
+
+static void ipte_unlock_simple(struct kvm_vcpu *vcpu)
+{
+       union ipte_control old, new, *ic;
+
+       mutex_lock(&ipte_mutex);
+       ipte_lock_count--;
+       if (ipte_lock_count)
+               goto out;
+       ic = &vcpu->kvm->arch.sca->ipte_control;
+       do {
+               new = old = ACCESS_ONCE(*ic);
+               new.k = 0;
+       } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
+       if (!ipte_lock_count)
+               wake_up(&vcpu->kvm->arch.ipte_wq);
+out:
+       mutex_unlock(&ipte_mutex);
+}
+
+static void ipte_lock_siif(struct kvm_vcpu *vcpu)
+{
+       union ipte_control old, new, *ic;
+
+       ic = &vcpu->kvm->arch.sca->ipte_control;
+       do {
+               old = ACCESS_ONCE(*ic);
+               while (old.kg) {
+                       cond_resched();
+                       old = ACCESS_ONCE(*ic);
+               }
+               new = old;
+               new.k = 1;
+               new.kh++;
+       } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
+}
+
+static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
+{
+       union ipte_control old, new, *ic;
+
+       ic = &vcpu->kvm->arch.sca->ipte_control;
+       do {
+               new = old = ACCESS_ONCE(*ic);
+               new.kh--;
+               if (!new.kh)
+                       new.k = 0;
+       } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
+       if (!new.kh)
+               wake_up(&vcpu->kvm->arch.ipte_wq);
+}
+
+static void ipte_lock(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->arch.sie_block->eca & 1)
+               ipte_lock_siif(vcpu);
+       else
+               ipte_lock_simple(vcpu);
+}
+
+static void ipte_unlock(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->arch.sie_block->eca & 1)
+               ipte_unlock_siif(vcpu);
+       else
+               ipte_unlock_simple(vcpu);
+}
+
 static unsigned long get_vcpu_asce(struct kvm_vcpu *vcpu)
 {
        switch (psw_bits(vcpu->arch.sie_block->gpsw).as) {
        unsigned long _len, nr_pages, gpa, idx;
        unsigned long pages_array[2];
        unsigned long *pages;
+       int need_ipte_lock;
+       union asce asce;
        int rc;
 
        if (!len)
                pages = vmalloc(nr_pages * sizeof(unsigned long));
        if (!pages)
                return -ENOMEM;
+       asce.val = get_vcpu_asce(vcpu);
+       need_ipte_lock = psw_bits(*psw).t && !asce.r;
+       if (need_ipte_lock)
+               ipte_lock(vcpu);
        rc = guest_page_range(vcpu, ga, pages, nr_pages, write);
        for (idx = 0; idx < nr_pages && !rc; idx++) {
                gpa = *(pages + idx) + (ga & ~PAGE_MASK);
                ga += _len;
                data += _len;
        }
+       if (need_ipte_lock)
+               ipte_unlock(vcpu);
        if (nr_pages > ARRAY_SIZE(pages_array))
                vfree(pages);
        return rc;
 
        { "instruction_stpx", VCPU_STAT(instruction_stpx) },
        { "instruction_stap", VCPU_STAT(instruction_stap) },
        { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
+       { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
        { "instruction_stsch", VCPU_STAT(instruction_stsch) },
        { "instruction_chsc", VCPU_STAT(instruction_chsc) },
        { "instruction_essa", VCPU_STAT(instruction_essa) },
 
        spin_lock_init(&kvm->arch.float_int.lock);
        INIT_LIST_HEAD(&kvm->arch.float_int.list);
+       init_waitqueue_head(&kvm->arch.ipte_wq);
 
        debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
        VM_EVENT(kvm, 3, "%s", "vm created");
 
        return 0;
 }
 
+static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
+{
+       psw_t *psw = &vcpu->arch.sie_block->gpsw;
+
+       vcpu->stat.instruction_ipte_interlock++;
+       if (psw_bits(*psw).p)
+               return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+       wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
+       psw->addr = __rewind_psw(*psw, 4);
+       VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
+       return 0;
+}
+
 static int handle_test_block(struct kvm_vcpu *vcpu)
 {
        unsigned long hva;
        [0x10] = handle_set_prefix,
        [0x11] = handle_store_prefix,
        [0x12] = handle_store_cpu_address,
+       [0x21] = handle_ipte_interlock,
        [0x29] = handle_skey,
        [0x2a] = handle_skey,
        [0x2b] = handle_skey,
        [0x3a] = handle_io_inst,
        [0x3b] = handle_io_inst,
        [0x3c] = handle_io_inst,
+       [0x50] = handle_ipte_interlock,
        [0x5f] = handle_io_inst,
        [0x74] = handle_io_inst,
        [0x76] = handle_io_inst,
 }
 
 static const intercept_handler_t b9_handlers[256] = {
+       [0x8a] = handle_ipte_interlock,
        [0x8d] = handle_epsw,
+       [0x8e] = handle_ipte_interlock,
+       [0x8f] = handle_ipte_interlock,
        [0xab] = handle_essa,
        [0xaf] = handle_pfmf,
 };