u32 cause);
        int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
                         u32 cause);
+       unsigned long (*num_regs)(struct kvm_vcpu *vcpu);
+       int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices);
        int (*get_one_reg)(struct kvm_vcpu *vcpu,
                           const struct kvm_one_reg *reg, s64 *v);
        int (*set_one_reg)(struct kvm_vcpu *vcpu,
 
        KVM_REG_MIPS_COUNT_HZ,
 };
 
+static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
+{
+       unsigned long ret;
+
+       ret = ARRAY_SIZE(kvm_mips_get_one_regs);
+       ret += kvm_mips_callbacks->num_regs(vcpu);
+
+       return ret;
+}
+
+static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
+{
+       if (copy_to_user(indices, kvm_mips_get_one_regs,
+                        sizeof(kvm_mips_get_one_regs)))
+               return -EFAULT;
+       indices += ARRAY_SIZE(kvm_mips_get_one_regs);
+
+       return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
+}
+
 static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
                            const struct kvm_one_reg *reg)
 {
        }
        case KVM_GET_REG_LIST: {
                struct kvm_reg_list __user *user_list = argp;
-               u64 __user *reg_dest;
                struct kvm_reg_list reg_list;
                unsigned n;
 
                if (copy_from_user(®_list, user_list, sizeof(reg_list)))
                        return -EFAULT;
                n = reg_list.n;
-               reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
+               reg_list.n = kvm_mips_num_regs(vcpu);
                if (copy_to_user(user_list, ®_list, sizeof(reg_list)))
                        return -EFAULT;
                if (n < reg_list.n)
                        return -E2BIG;
-               reg_dest = user_list->reg;
-               if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
-                                sizeof(kvm_mips_get_one_regs)))
-                       return -EFAULT;
-               return 0;
+               return kvm_mips_copy_reg_indices(vcpu, user_list->reg);
        }
        case KVM_NMI:
                /* Treat the NMI as a CPU reset */
 
        return 0;
 }
 
+static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu,
+                                         u64 __user *indices)
+{
+       return 0;
+}
+
 static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
                                     const struct kvm_one_reg *reg,
                                     s64 *v)
        .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
        .irq_deliver = kvm_mips_irq_deliver_cb,
        .irq_clear = kvm_mips_irq_clear_cb,
+       .num_regs = kvm_trap_emul_num_regs,
+       .copy_reg_indices = kvm_trap_emul_copy_reg_indices,
        .get_one_reg = kvm_trap_emul_get_one_reg,
        .set_one_reg = kvm_trap_emul_set_one_reg,
        .vcpu_get_regs = kvm_trap_emul_vcpu_get_regs,