]> www.infradead.org Git - users/willy/xarray.git/commitdiff
RISC-V: KVM: Mark the existing SBI implementation as v0.1
authorAtish Patra <atish.patra@wdc.com>
Thu, 18 Nov 2021 08:39:08 +0000 (00:39 -0800)
committerAnup Patel <anup@brainfault.org>
Thu, 6 Jan 2022 09:08:52 +0000 (14:38 +0530)
The existing SBI specification impelementation follows v0.1
specification. The latest specification allows more scalability
and performance improvements.

Rename the existing implementation as v0.1 and provide a way
to allow future extensions.

Signed-off-by: Atish Patra <atish.patra@wdc.com>
Signed-off-by: Atish Patra <atishp@rivosinc.com>
Signed-off-by: Anup Patel <anup.patel@wdc.com>
arch/riscv/include/asm/kvm_vcpu_sbi.h [new file with mode: 0644]
arch/riscv/kvm/vcpu_sbi.c

diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h
new file mode 100644 (file)
index 0000000..1a4cb0d
--- /dev/null
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/**
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ *     Atish Patra <atish.patra@wdc.com>
+ */
+
+#ifndef __RISCV_KVM_VCPU_SBI_H__
+#define __RISCV_KVM_VCPU_SBI_H__
+
+#define KVM_SBI_VERSION_MAJOR 0
+#define KVM_SBI_VERSION_MINOR 2
+
+struct kvm_vcpu_sbi_extension {
+       unsigned long extid_start;
+       unsigned long extid_end;
+       /**
+        * SBI extension handler. It can be defined for a given extension or group of
+        * extension. But it should always return linux error codes rather than SBI
+        * specific error codes.
+        */
+       int (*handler)(struct kvm_vcpu *vcpu, struct kvm_run *run,
+                      unsigned long *out_val, struct kvm_cpu_trap *utrap,
+                      bool *exit);
+};
+
+const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid);
+#endif /* __RISCV_KVM_VCPU_SBI_H__ */
index d0d2bcab2f7b1e8c6aed8c9248646a2982457ae3..8c5b50d2b27e8a8a98d13279b2aa8996b48f27cb 100644 (file)
 #include <asm/csr.h>
 #include <asm/sbi.h>
 #include <asm/kvm_vcpu_timer.h>
+#include <asm/kvm_vcpu_sbi.h>
 
-#define SBI_VERSION_MAJOR                      0
-#define SBI_VERSION_MINOR                      1
+static int kvm_linux_err_map_sbi(int err)
+{
+       switch (err) {
+       case 0:
+               return SBI_SUCCESS;
+       case -EPERM:
+               return SBI_ERR_DENIED;
+       case -EINVAL:
+               return SBI_ERR_INVALID_PARAM;
+       case -EFAULT:
+               return SBI_ERR_INVALID_ADDRESS;
+       case -EOPNOTSUPP:
+               return SBI_ERR_NOT_SUPPORTED;
+       default:
+               return SBI_ERR_FAILURE;
+       };
+}
 
 static void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu,
                                       struct kvm_run *run)
@@ -72,21 +88,19 @@ static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu,
        run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
 }
 
-int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+                                     unsigned long *out_val,
+                                     struct kvm_cpu_trap *utrap,
+                                     bool *exit)
 {
        ulong hmask;
-       int i, ret = 1;
+       int i, ret = 0;
        u64 next_cycle;
        struct kvm_vcpu *rvcpu;
-       bool next_sepc = true;
        struct cpumask cm, hm;
        struct kvm *kvm = vcpu->kvm;
-       struct kvm_cpu_trap utrap = { 0 };
        struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
 
-       if (!cp)
-               return -EINVAL;
-
        switch (cp->a7) {
        case SBI_EXT_0_1_CONSOLE_GETCHAR:
        case SBI_EXT_0_1_CONSOLE_PUTCHAR:
@@ -95,8 +109,7 @@ int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
                 * handled in kernel so we forward these to user-space
                 */
                kvm_riscv_vcpu_sbi_forward(vcpu, run);
-               next_sepc = false;
-               ret = 0;
+               *exit = true;
                break;
        case SBI_EXT_0_1_SET_TIMER:
 #if __riscv_xlen == 32
@@ -104,47 +117,42 @@ int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
 #else
                next_cycle = (u64)cp->a0;
 #endif
-               kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
+               ret = kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
                break;
        case SBI_EXT_0_1_CLEAR_IPI:
-               kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_SOFT);
+               ret = kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_SOFT);
                break;
        case SBI_EXT_0_1_SEND_IPI:
                if (cp->a0)
                        hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0,
-                                                          &utrap);
+                                                          utrap);
                else
                        hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1;
-               if (utrap.scause) {
-                       utrap.sepc = cp->sepc;
-                       kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
-                       next_sepc = false;
+               if (utrap->scause)
                        break;
-               }
+
                for_each_set_bit(i, &hmask, BITS_PER_LONG) {
                        rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i);
-                       kvm_riscv_vcpu_set_interrupt(rvcpu, IRQ_VS_SOFT);
+                       ret = kvm_riscv_vcpu_set_interrupt(rvcpu, IRQ_VS_SOFT);
+                       if (ret < 0)
+                               break;
                }
                break;
        case SBI_EXT_0_1_SHUTDOWN:
                kvm_sbi_system_shutdown(vcpu, run, KVM_SYSTEM_EVENT_SHUTDOWN);
-               next_sepc = false;
-               ret = 0;
+               *exit = true;
                break;
        case SBI_EXT_0_1_REMOTE_FENCE_I:
        case SBI_EXT_0_1_REMOTE_SFENCE_VMA:
        case SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID:
                if (cp->a0)
                        hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0,
-                                                          &utrap);
+                                                          utrap);
                else
                        hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1;
-               if (utrap.scause) {
-                       utrap.sepc = cp->sepc;
-                       kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
-                       next_sepc = false;
+               if (utrap->scause)
                        break;
-               }
+
                cpumask_clear(&cm);
                for_each_set_bit(i, &hmask, BITS_PER_LONG) {
                        rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i);
@@ -154,22 +162,97 @@ int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
                }
                riscv_cpuid_to_hartid_mask(&cm, &hm);
                if (cp->a7 == SBI_EXT_0_1_REMOTE_FENCE_I)
-                       sbi_remote_fence_i(cpumask_bits(&hm));
+                       ret = sbi_remote_fence_i(cpumask_bits(&hm));
                else if (cp->a7 == SBI_EXT_0_1_REMOTE_SFENCE_VMA)
-                       sbi_remote_hfence_vvma(cpumask_bits(&hm),
+                       ret = sbi_remote_hfence_vvma(cpumask_bits(&hm),
                                                cp->a1, cp->a2);
                else
-                       sbi_remote_hfence_vvma_asid(cpumask_bits(&hm),
+                       ret = sbi_remote_hfence_vvma_asid(cpumask_bits(&hm),
                                                cp->a1, cp->a2, cp->a3);
                break;
        default:
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
+       .extid_start = SBI_EXT_0_1_SET_TIMER,
+       .extid_end = SBI_EXT_0_1_SHUTDOWN,
+       .handler = kvm_sbi_ext_v01_handler,
+};
+
+static const struct kvm_vcpu_sbi_extension *sbi_ext[] = {
+       &vcpu_sbi_ext_v01,
+};
+
+const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid)
+{
+       int i = 0;
+
+       for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
+               if (sbi_ext[i]->extid_start <= extid &&
+                   sbi_ext[i]->extid_end >= extid)
+                       return sbi_ext[i];
+       }
+
+       return NULL;
+}
+
+int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       int ret = 1;
+       bool next_sepc = true;
+       bool userspace_exit = false;
+       struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+       const struct kvm_vcpu_sbi_extension *sbi_ext;
+       struct kvm_cpu_trap utrap = { 0 };
+       unsigned long out_val = 0;
+       bool ext_is_v01 = false;
+
+       sbi_ext = kvm_vcpu_sbi_find_ext(cp->a7);
+       if (sbi_ext && sbi_ext->handler) {
+               if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
+                   cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
+                       ext_is_v01 = true;
+               ret = sbi_ext->handler(vcpu, run, &out_val, &utrap, &userspace_exit);
+       } else {
                /* Return error for unsupported SBI calls */
                cp->a0 = SBI_ERR_NOT_SUPPORTED;
-               break;
+               goto ecall_done;
+       }
+
+       /* Handle special error cases i.e trap, exit or userspace forward */
+       if (utrap.scause) {
+               /* No need to increment sepc or exit ioctl loop */
+               ret = 1;
+               utrap.sepc = cp->sepc;
+               kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
+               next_sepc = false;
+               goto ecall_done;
        }
 
+       /* Exit ioctl loop or Propagate the error code the guest */
+       if (userspace_exit) {
+               next_sepc = false;
+               ret = 0;
+       } else {
+               /**
+                * SBI extension handler always returns an Linux error code. Convert
+                * it to the SBI specific error code that can be propagated the SBI
+                * caller.
+                */
+               ret = kvm_linux_err_map_sbi(ret);
+               cp->a0 = ret;
+               ret = 1;
+       }
+ecall_done:
        if (next_sepc)
                cp->sepc += 4;
+       if (!ext_is_v01)
+               cp->a1 = out_val;
 
        return ret;
 }