]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: arm64: nv: Publish emulated timer interrupt state in the in-memory state
authorMarc Zyngier <maz@kernel.org>
Tue, 17 Dec 2024 14:23:11 +0000 (14:23 +0000)
committerMarc Zyngier <maz@kernel.org>
Thu, 2 Jan 2025 19:19:09 +0000 (19:19 +0000)
With FEAT_NV2, the EL0 timer state is entirely stored in memory,
meaning that the hypervisor can only provide a very poor emulation.

The only thing we can really do is to publish the interrupt state
in the guest view of CNT{P,V}_CTL_EL0, and defer everything else
to the next exit.

Only FEAT_ECV will allow us to fix it, at the cost of extra trapping.

Suggested-by: Chase Conklin <chase.conklin@arm.com>
Suggested-by: Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>
Acked-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20241217142321.763801-4-maz@kernel.org
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/arch_timer.c
arch/arm64/kvm/arm.c

index ee5f732fbbece96049683b385a7b867d2a9bd17d..8bff913ed1264d0a2614b51a8d5f7232c94cdcb4 100644 (file)
@@ -441,11 +441,30 @@ void kvm_timer_update_run(struct kvm_vcpu *vcpu)
                regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
 }
 
+static void kvm_timer_update_status(struct arch_timer_context *ctx, bool level)
+{
+       /*
+        * Paper over NV2 brokenness by publishing the interrupt status
+        * bit. This still results in a poor quality of emulation (guest
+        * writes will have no effect until the next exit).
+        *
+        * But hey, it's fast, right?
+        */
+       if (is_hyp_ctxt(ctx->vcpu) &&
+           (ctx == vcpu_vtimer(ctx->vcpu) || ctx == vcpu_ptimer(ctx->vcpu))) {
+               unsigned long val = timer_get_ctl(ctx);
+               __assign_bit(__ffs(ARCH_TIMER_CTRL_IT_STAT), &val, level);
+               timer_set_ctl(ctx, val);
+       }
+}
+
 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
                                 struct arch_timer_context *timer_ctx)
 {
        int ret;
 
+       kvm_timer_update_status(timer_ctx, new_level);
+
        timer_ctx->irq.level = new_level;
        trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_irq(timer_ctx),
                                   timer_ctx->irq.level);
@@ -471,6 +490,8 @@ static void timer_emulate(struct arch_timer_context *ctx)
                return;
        }
 
+       kvm_timer_update_status(ctx, should_fire);
+
        /*
         * If the timer can fire now, we don't need to have a soft timer
         * scheduled for the future.  If the timer cannot fire at all,
index fa3089822f9f396ad60d2b533ccb28ec5e6656fd..bda905022df40a56aa5ff714619647e9e9c9a583 100644 (file)
@@ -1228,7 +1228,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
                        kvm_timer_sync_user(vcpu);
 
-               if (vcpu_has_nv(vcpu))
+               if (is_hyp_ctxt(vcpu))
                        kvm_timer_sync_nested(vcpu);
 
                kvm_arch_vcpu_ctxsync_fp(vcpu);