return 0;
 }
 
+static u64 read_vncr_el2(struct kvm_vcpu *vcpu)
+{
+       return (u64)sign_extend64(__vcpu_sys_reg(vcpu, VNCR_EL2), 48);
+}
+
+static int kvm_translate_vncr(struct kvm_vcpu *vcpu)
+{
+       bool write_fault, writable;
+       unsigned long mmu_seq;
+       struct vncr_tlb *vt;
+       struct page *page;
+       u64 va, pfn, gfn;
+       int ret;
+
+       vt = vcpu->arch.vncr_tlb;
+
+       vt->wi = (struct s1_walk_info) {
+               .regime = TR_EL20,
+               .as_el0 = false,
+               .pan    = false,
+       };
+       vt->wr = (struct s1_walk_result){};
+       vt->valid = false;
+
+       guard(srcu)(&vcpu->kvm->srcu);
+
+       va =  read_vncr_el2(vcpu);
+
+       ret = __kvm_translate_va(vcpu, &vt->wi, &vt->wr, va);
+       if (ret)
+               return ret;
+
+       write_fault = kvm_is_write_fault(vcpu);
+
+       mmu_seq = vcpu->kvm->mmu_invalidate_seq;
+       smp_rmb();
+
+       gfn = vt->wr.pa >> PAGE_SHIFT;
+       pfn = kvm_faultin_pfn(vcpu, gfn, write_fault, &writable, &page);
+       if (is_error_noslot_pfn(pfn) || (write_fault && !writable))
+               return -EFAULT;
+
+       scoped_guard(write_lock, &vcpu->kvm->mmu_lock) {
+               if (mmu_invalidate_retry(vcpu->kvm, mmu_seq))
+                       return -EAGAIN;
+
+               vt->gva = va;
+               vt->hpa = pfn << PAGE_SHIFT;
+               vt->valid = true;
+               vt->cpu = -1;
+
+               kvm_make_request(KVM_REQ_MAP_L1_VNCR_EL2, vcpu);
+       }
+
+       kvm_release_faultin_page(vcpu->kvm, page, false, vt->wr.pw);
+       if (vt->wr.pw)
+               mark_page_dirty(vcpu->kvm, gfn);
+
+       return 0;
+}
+
+static void inject_vncr_perm(struct kvm_vcpu *vcpu)
+{
+       struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
+       u64 esr = kvm_vcpu_get_esr(vcpu);
+
+       /* Adjust the fault level to reflect that of the guest's */
+       esr &= ~ESR_ELx_FSC;
+       esr |= FIELD_PREP(ESR_ELx_FSC,
+                         ESR_ELx_FSC_PERM_L(vt->wr.level));
+
+       kvm_inject_nested_sync(vcpu, esr);
+}
+
+static bool kvm_vncr_tlb_lookup(struct kvm_vcpu *vcpu)
+{
+       struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
+
+       lockdep_assert_held_read(&vcpu->kvm->mmu_lock);
+
+       if (!vt->valid)
+               return false;
+
+       if (read_vncr_el2(vcpu) != vt->gva)
+               return false;
+
+       if (vt->wr.nG) {
+               u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
+               u64 ttbr = ((tcr & TCR_A1) ?
+                           vcpu_read_sys_reg(vcpu, TTBR1_EL2) :
+                           vcpu_read_sys_reg(vcpu, TTBR0_EL2));
+               u16 asid;
+
+               asid = FIELD_GET(TTBR_ASID_MASK, ttbr);
+               if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
+                   !(tcr & TCR_ASID16))
+                       asid &= GENMASK(7, 0);
+
+               return asid != vt->wr.asid;
+       }
+
+       return true;
+}
+
+int kvm_handle_vncr_abort(struct kvm_vcpu *vcpu)
+{
+       struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
+       u64 esr = kvm_vcpu_get_esr(vcpu);
+
+       BUG_ON(!(esr & ESR_ELx_VNCR_SHIFT));
+
+       if (esr_fsc_is_permission_fault(esr)) {
+               inject_vncr_perm(vcpu);
+       } else if (esr_fsc_is_translation_fault(esr)) {
+               bool valid;
+               int ret;
+
+               scoped_guard(read_lock, &vcpu->kvm->mmu_lock)
+                       valid = kvm_vncr_tlb_lookup(vcpu);
+
+               if (!valid)
+                       ret = kvm_translate_vncr(vcpu);
+               else
+                       ret = -EPERM;
+
+               switch (ret) {
+               case -EAGAIN:
+               case -ENOMEM:
+                       /* Let's try again... */
+                       break;
+               case -EFAULT:
+               case -EINVAL:
+               case -ENOENT:
+               case -EACCES:
+                       /*
+                        * Translation failed, inject the corresponding
+                        * exception back to EL2.
+                        */
+                       BUG_ON(!vt->wr.failed);
+
+                       esr &= ~ESR_ELx_FSC;
+                       esr |= FIELD_PREP(ESR_ELx_FSC, vt->wr.fst);
+
+                       kvm_inject_nested_sync(vcpu, esr);
+                       break;
+               case -EPERM:
+                       /* Hack to deal with POE until we get kernel support */
+                       inject_vncr_perm(vcpu);
+                       break;
+               case 0:
+                       break;
+               }
+       } else {
+               WARN_ONCE(1, "Unhandled VNCR abort, ESR=%llx\n", esr);
+       }
+
+       return 1;
+}
+
 /*
  * Our emulated CPU doesn't support all the possible features. For the
  * sake of simplicity (and probably mental sanity), wipe out a number