#include <linux/kvm.h>
 #include <linux/kvm_host.h>
 
+#include <asm/fixmap.h>
 #include <asm/kvm_arm.h>
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_mmu.h>
 void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu)
 {
        /*
-        * The vCPU kept its reference on the MMU after the last put, keep
-        * rolling with it.
+        * If the vCPU kept its reference on the MMU after the last put,
+        * keep rolling with it.
         */
-       if (vcpu->arch.hw_mmu)
-               return;
-
        if (is_hyp_ctxt(vcpu)) {
-               vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
+               if (!vcpu->arch.hw_mmu)
+                       vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
        } else {
-               write_lock(&vcpu->kvm->mmu_lock);
-               vcpu->arch.hw_mmu = get_s2_mmu_nested(vcpu);
-               write_unlock(&vcpu->kvm->mmu_lock);
+               if (!vcpu->arch.hw_mmu) {
+                       scoped_guard(write_lock, &vcpu->kvm->mmu_lock)
+                               vcpu->arch.hw_mmu = get_s2_mmu_nested(vcpu);
+               }
+
+               if (__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_NV)
+                       kvm_make_request(KVM_REQ_MAP_L1_VNCR_EL2, vcpu);
        }
 }
 
 void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu)
 {
+       /* Unconditionally drop the VNCR mapping if we have one */
+       if (host_data_test_flag(L1_VNCR_MAPPED)) {
+               BUG_ON(vcpu->arch.vncr_tlb->cpu != smp_processor_id());
+               BUG_ON(is_hyp_ctxt(vcpu));
+
+               clear_fixmap(vncr_fixmap(vcpu->arch.vncr_tlb->cpu));
+               vcpu->arch.vncr_tlb->cpu = -1;
+               host_data_clear_flag(L1_VNCR_MAPPED);
+       }
+
        /*
         * Keep a reference on the associated stage-2 MMU if the vCPU is
         * scheduling out and not in WFI emulation, suggesting it is likely to
        return 1;
 }
 
+static void kvm_map_l1_vncr(struct kvm_vcpu *vcpu)
+{
+       struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
+       pgprot_t prot;
+
+       guard(preempt)();
+       guard(read_lock)(&vcpu->kvm->mmu_lock);
+
+       /*
+        * The request to map VNCR may have raced against some other
+        * event, such as an interrupt, and may not be valid anymore.
+        */
+       if (is_hyp_ctxt(vcpu))
+               return;
+
+       /*
+        * Check that the pseudo-TLB is valid and that VNCR_EL2 still
+        * contains the expected value. If it doesn't, we simply bail out
+        * without a mapping -- a transformed MSR/MRS will generate the
+        * fault and allows us to populate the pseudo-TLB.
+        */
+       if (!vt->valid)
+               return;
+
+       if (read_vncr_el2(vcpu) != vt->gva)
+               return;
+
+       if (vt->wr.nG) {
+               u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
+               u64 ttbr = ((tcr & TCR_A1) ?
+                           vcpu_read_sys_reg(vcpu, TTBR1_EL2) :
+                           vcpu_read_sys_reg(vcpu, TTBR0_EL2));
+               u16 asid;
+
+               asid = FIELD_GET(TTBR_ASID_MASK, ttbr);
+               if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
+                   !(tcr & TCR_ASID16))
+                       asid &= GENMASK(7, 0);
+
+               if (asid != vt->wr.asid)
+                       return;
+       }
+
+       vt->cpu = smp_processor_id();
+
+       if (vt->wr.pw && vt->wr.pr)
+               prot = PAGE_KERNEL;
+       else if (vt->wr.pr)
+               prot = PAGE_KERNEL_RO;
+       else
+               prot = PAGE_NONE;
+
+       /*
+        * We can't map write-only (or no permission at all) in the kernel,
+        * but the guest can do it if using POE, so we'll have to turn a
+        * translation fault into a permission fault at runtime.
+        * FIXME: WO doesn't work at all, need POE support in the kernel.
+        */
+       if (pgprot_val(prot) != pgprot_val(PAGE_NONE)) {
+               __set_fixmap(vncr_fixmap(vt->cpu), vt->hpa, prot);
+               host_data_set_flag(L1_VNCR_MAPPED);
+       }
+}
+
 /*
  * Our emulated CPU doesn't support all the possible features. For the
  * sake of simplicity (and probably mental sanity), wipe out a number
                write_unlock(&vcpu->kvm->mmu_lock);
        }
 
+       if (kvm_check_request(KVM_REQ_MAP_L1_VNCR_EL2, vcpu))
+               kvm_map_l1_vncr(vcpu);
+
        /* Must be last, as may switch context! */
        if (kvm_check_request(KVM_REQ_GUEST_HYP_IRQ_PENDING, vcpu))
                kvm_inject_nested_irq(vcpu);