const struct kvm_one_reg *reg);
 int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
                                   const struct kvm_one_reg *reg);
+int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu,
+                              const struct kvm_one_reg *reg);
+int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu,
+                              const struct kvm_one_reg *reg);
 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
                                struct kvm_vcpu *vcpu, unsigned long extid);
 bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx);
 
        return copy_sbi_ext_reg_indices(vcpu, NULL);
 }
 
+static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+static int copy_sbi_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+       int n = num_sbi_regs(vcpu);
+
+       for (int i = 0; i < n; i++) {
+               u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
+                         KVM_REG_RISCV_SBI_STATE | i;
+
+               if (uindices) {
+                       if (put_user(reg, uindices))
+                               return -EFAULT;
+                       uindices++;
+               }
+       }
+
+       return n;
+}
+
 static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu)
 {
        if (!riscv_isa_extension_available(vcpu->arch.isa, v))
        res += num_vector_regs(vcpu);
        res += num_isa_ext_regs(vcpu);
        res += num_sbi_ext_regs(vcpu);
+       res += num_sbi_regs(vcpu);
 
        return res;
 }
        ret = copy_sbi_ext_reg_indices(vcpu, uindices);
        if (ret < 0)
                return ret;
+       uindices += ret;
+
+       ret = copy_sbi_reg_indices(vcpu, uindices);
+       if (ret < 0)
+               return ret;
+       uindices += ret;
 
        return 0;
 }
        case KVM_REG_RISCV_FP_D:
                return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
                                                 KVM_REG_RISCV_FP_D);
+       case KVM_REG_RISCV_VECTOR:
+               return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
        case KVM_REG_RISCV_ISA_EXT:
                return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
        case KVM_REG_RISCV_SBI_EXT:
                return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
-       case KVM_REG_RISCV_VECTOR:
-               return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
+       case KVM_REG_RISCV_SBI_STATE:
+               return kvm_riscv_vcpu_set_reg_sbi(vcpu, reg);
        default:
                break;
        }
        case KVM_REG_RISCV_FP_D:
                return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
                                                 KVM_REG_RISCV_FP_D);
+       case KVM_REG_RISCV_VECTOR:
+               return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
        case KVM_REG_RISCV_ISA_EXT:
                return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
        case KVM_REG_RISCV_SBI_EXT:
                return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
-       case KVM_REG_RISCV_VECTOR:
-               return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
+       case KVM_REG_RISCV_SBI_STATE:
+               return kvm_riscv_vcpu_get_reg_sbi(vcpu, reg);
        default:
                break;
        }
 
        return 0;
 }
 
+int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu,
+                              const struct kvm_one_reg *reg)
+{
+       unsigned long __user *uaddr =
+                       (unsigned long __user *)(unsigned long)reg->addr;
+       unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
+                                           KVM_REG_SIZE_MASK |
+                                           KVM_REG_RISCV_SBI_STATE);
+       unsigned long reg_subtype, reg_val;
+
+       if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
+               return -EINVAL;
+
+       if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
+               return -EFAULT;
+
+       reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
+       reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
+
+       switch (reg_subtype) {
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu,
+                              const struct kvm_one_reg *reg)
+{
+       unsigned long __user *uaddr =
+                       (unsigned long __user *)(unsigned long)reg->addr;
+       unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
+                                           KVM_REG_SIZE_MASK |
+                                           KVM_REG_RISCV_SBI_STATE);
+       unsigned long reg_subtype, reg_val;
+       int ret;
+
+       if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
+               return -EINVAL;
+
+       reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
+       reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
+
+       switch (reg_subtype) {
+       default:
+               return -EINVAL;
+       }
+
+       if (ret)
+               return ret;
+
+       if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
+               return -EFAULT;
+
+       return 0;
+}
+
 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
                                struct kvm_vcpu *vcpu, unsigned long extid)
 {