]> www.infradead.org Git - users/hch/configfs.git/commitdiff
LoongArch: Fix AP booting issue in VM mode
authorBibo Mao <maobibo@loongson.cn>
Tue, 12 Nov 2024 08:35:39 +0000 (16:35 +0800)
committerHuacai Chen <chenhuacai@loongson.cn>
Tue, 12 Nov 2024 08:35:39 +0000 (16:35 +0800)
Native IPI is used for AP booting, because it is the booting interface
between OS and BIOS firmware. The paravirt IPI is only used inside OS,
and native IPI is necessary to boot AP.

When booting AP, we write the kernel entry address in the HW mailbox of
AP and send IPI interrupt to it. AP executes idle instruction and waits
for interrupts or SW events, then clears IPI interrupt and jumps to the
kernel entry from HW mailbox.

Between writing HW mailbox and sending IPI, AP can be woken up by SW
events and jumps to the kernel entry, so ACTION_BOOT_CPU IPI interrupt
will keep pending during AP booting. And native IPI interrupt handler
needs be registered so that it can clear pending native IPI, else there
will be endless interrupts during AP booting stage.

Here native IPI interrupt is initialized even if paravirt IPI is used.

Cc: stable@vger.kernel.org
Fixes: 74c16b2e2b0c ("LoongArch: KVM: Add PV IPI support on guest side")
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
arch/loongarch/kernel/paravirt.c

index a5fc61f8b3482cfb93d12a44f65ab8a7c83b4f0f..e5a39bbad07801c0ea18aca63b81bc7e0485ceb7 100644 (file)
@@ -51,11 +51,18 @@ static u64 paravt_steal_clock(int cpu)
 }
 
 #ifdef CONFIG_SMP
+static struct smp_ops native_ops;
+
 static void pv_send_ipi_single(int cpu, unsigned int action)
 {
        int min, old;
        irq_cpustat_t *info = &per_cpu(irq_stat, cpu);
 
+       if (unlikely(action == ACTION_BOOT_CPU)) {
+               native_ops.send_ipi_single(cpu, action);
+               return;
+       }
+
        old = atomic_fetch_or(BIT(action), &info->message);
        if (old)
                return;
@@ -75,6 +82,11 @@ static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action)
        if (cpumask_empty(mask))
                return;
 
+       if (unlikely(action == ACTION_BOOT_CPU)) {
+               native_ops.send_ipi_mask(mask, action);
+               return;
+       }
+
        action = BIT(action);
        for_each_cpu(i, mask) {
                info = &per_cpu(irq_stat, i);
@@ -147,6 +159,8 @@ static void pv_init_ipi(void)
 {
        int r, swi;
 
+       /* Init native ipi irq for ACTION_BOOT_CPU */
+       native_ops.init_ipi();
        swi = get_percpu_irq(INT_SWI0);
        if (swi < 0)
                panic("SWI0 IRQ mapping failed\n");
@@ -193,6 +207,7 @@ int __init pv_ipi_init(void)
                return 0;
 
 #ifdef CONFIG_SMP
+       native_ops              = mp_ops;
        mp_ops.init_ipi         = pv_init_ipi;
        mp_ops.send_ipi_single  = pv_send_ipi_single;
        mp_ops.send_ipi_mask    = pv_send_ipi_mask;