MIPS  | KVM_REG_MIPS_CP0_CONFIG5      | 32
   MIPS  | KVM_REG_MIPS_CP0_CONFIG7      | 32
   MIPS  | KVM_REG_MIPS_CP0_ERROREPC     | 64
+  MIPS  | KVM_REG_MIPS_CP0_KSCRATCH1    | 64
+  MIPS  | KVM_REG_MIPS_CP0_KSCRATCH2    | 64
+  MIPS  | KVM_REG_MIPS_CP0_KSCRATCH3    | 64
+  MIPS  | KVM_REG_MIPS_CP0_KSCRATCH4    | 64
+  MIPS  | KVM_REG_MIPS_CP0_KSCRATCH5    | 64
+  MIPS  | KVM_REG_MIPS_CP0_KSCRATCH6    | 64
   MIPS  | KVM_REG_MIPS_COUNT_CTL        | 64
   MIPS  | KVM_REG_MIPS_COUNT_RESUME     | 64
   MIPS  | KVM_REG_MIPS_COUNT_HZ         | 64
 
 #define KVM_REG_MIPS_CP0_CONFIG7       MIPS_CP0_32(16, 7)
 #define KVM_REG_MIPS_CP0_XCONTEXT      MIPS_CP0_64(20, 0)
 #define KVM_REG_MIPS_CP0_ERROREPC      MIPS_CP0_64(30, 0)
+#define KVM_REG_MIPS_CP0_KSCRATCH1     MIPS_CP0_64(31, 2)
+#define KVM_REG_MIPS_CP0_KSCRATCH2     MIPS_CP0_64(31, 3)
+#define KVM_REG_MIPS_CP0_KSCRATCH3     MIPS_CP0_64(31, 4)
+#define KVM_REG_MIPS_CP0_KSCRATCH4     MIPS_CP0_64(31, 5)
+#define KVM_REG_MIPS_CP0_KSCRATCH5     MIPS_CP0_64(31, 6)
+#define KVM_REG_MIPS_CP0_KSCRATCH6     MIPS_CP0_64(31, 7)
 
 
 #define KVM_MAX_VCPUS          1
 
        u8 fpu_enabled;
        u8 msa_enabled;
+       u8 kscratch_enabled;
 };
 
 
 #define kvm_write_c0_guest_config7(cop0, val)  (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
 #define kvm_read_c0_guest_errorepc(cop0)       (cop0->reg[MIPS_CP0_ERROR_PC][0])
 #define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
+#define kvm_read_c0_guest_kscratch1(cop0)      (cop0->reg[MIPS_CP0_DESAVE][2])
+#define kvm_read_c0_guest_kscratch2(cop0)      (cop0->reg[MIPS_CP0_DESAVE][3])
+#define kvm_read_c0_guest_kscratch3(cop0)      (cop0->reg[MIPS_CP0_DESAVE][4])
+#define kvm_read_c0_guest_kscratch4(cop0)      (cop0->reg[MIPS_CP0_DESAVE][5])
+#define kvm_read_c0_guest_kscratch5(cop0)      (cop0->reg[MIPS_CP0_DESAVE][6])
+#define kvm_read_c0_guest_kscratch6(cop0)      (cop0->reg[MIPS_CP0_DESAVE][7])
+#define kvm_write_c0_guest_kscratch1(cop0, val)        (cop0->reg[MIPS_CP0_DESAVE][2] = (val))
+#define kvm_write_c0_guest_kscratch2(cop0, val)        (cop0->reg[MIPS_CP0_DESAVE][3] = (val))
+#define kvm_write_c0_guest_kscratch3(cop0, val)        (cop0->reg[MIPS_CP0_DESAVE][4] = (val))
+#define kvm_write_c0_guest_kscratch4(cop0, val)        (cop0->reg[MIPS_CP0_DESAVE][5] = (val))
+#define kvm_write_c0_guest_kscratch5(cop0, val)        (cop0->reg[MIPS_CP0_DESAVE][6] = (val))
+#define kvm_write_c0_guest_kscratch6(cop0, val)        (cop0->reg[MIPS_CP0_DESAVE][7] = (val))
 
 /*
  * Some of the guest registers may be modified asynchronously (e.g. from a
 
 unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
 {
        /* Config5 is optional */
-       return MIPS_CONF_M;
+       unsigned int mask = MIPS_CONF_M;
+
+       /* KScrExist */
+       mask |= (unsigned int)vcpu->arch.kscratch_enabled << 16;
+
+       return mask;
 }
 
 /**
 
  * Authors: Sanjay Lal <sanjayl@kymasys.com>
  */
 
+#include <linux/bitops.h>
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/kdebug.h>
        KVM_REG_MIPS_MSA_CSR,
 };
 
+static u64 kvm_mips_get_one_regs_kscratch[] = {
+       KVM_REG_MIPS_CP0_KSCRATCH1,
+       KVM_REG_MIPS_CP0_KSCRATCH2,
+       KVM_REG_MIPS_CP0_KSCRATCH3,
+       KVM_REG_MIPS_CP0_KSCRATCH4,
+       KVM_REG_MIPS_CP0_KSCRATCH5,
+       KVM_REG_MIPS_CP0_KSCRATCH6,
+};
+
 static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
 {
        unsigned long ret;
        }
        if (kvm_mips_guest_can_have_msa(&vcpu->arch))
                ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
+       ret += __arch_hweight8(vcpu->arch.kscratch_enabled);
        ret += kvm_mips_callbacks->num_regs(vcpu);
 
        return ret;
                }
        }
 
+       for (i = 0; i < 6; ++i) {
+               if (!(vcpu->arch.kscratch_enabled & BIT(i + 2)))
+                       continue;
+
+               if (copy_to_user(indices, &kvm_mips_get_one_regs_kscratch[i],
+                                sizeof(kvm_mips_get_one_regs_kscratch[i])))
+                       return -EFAULT;
+               ++indices;
+       }
+
        return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
 }
 
        case KVM_REG_MIPS_CP0_ERROREPC:
                v = (long)kvm_read_c0_guest_errorepc(cop0);
                break;
+       case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
+               idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
+               if (!(vcpu->arch.kscratch_enabled & BIT(idx)))
+                       return -EINVAL;
+               switch (idx) {
+               case 2:
+                       v = (long)kvm_read_c0_guest_kscratch1(cop0);
+                       break;
+               case 3:
+                       v = (long)kvm_read_c0_guest_kscratch2(cop0);
+                       break;
+               case 4:
+                       v = (long)kvm_read_c0_guest_kscratch3(cop0);
+                       break;
+               case 5:
+                       v = (long)kvm_read_c0_guest_kscratch4(cop0);
+                       break;
+               case 6:
+                       v = (long)kvm_read_c0_guest_kscratch5(cop0);
+                       break;
+               case 7:
+                       v = (long)kvm_read_c0_guest_kscratch6(cop0);
+                       break;
+               }
+               break;
        /* registers to be handled specially */
        default:
                ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
        case KVM_REG_MIPS_CP0_ERROREPC:
                kvm_write_c0_guest_errorepc(cop0, v);
                break;
+       case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
+               idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
+               if (!(vcpu->arch.kscratch_enabled & BIT(idx)))
+                       return -EINVAL;
+               switch (idx) {
+               case 2:
+                       kvm_write_c0_guest_kscratch1(cop0, v);
+                       break;
+               case 3:
+                       kvm_write_c0_guest_kscratch2(cop0, v);
+                       break;
+               case 4:
+                       kvm_write_c0_guest_kscratch3(cop0, v);
+                       break;
+               case 5:
+                       kvm_write_c0_guest_kscratch4(cop0, v);
+                       break;
+               case 6:
+                       kvm_write_c0_guest_kscratch5(cop0, v);
+                       break;
+               case 7:
+                       kvm_write_c0_guest_kscratch6(cop0, v);
+                       break;
+               }
+               break;
        /* registers to be handled specially */
        default:
                return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
 
        { KVM_TRACE_COP0(16, 7),        "Config7" },            \
        { KVM_TRACE_COP0(26, 0),        "ECC" },                \
        { KVM_TRACE_COP0(30, 0),        "ErrorEPC" },           \
+       { KVM_TRACE_COP0(31, 2),        "KScratch1" },          \
+       { KVM_TRACE_COP0(31, 3),        "KScratch2" },          \
+       { KVM_TRACE_COP0(31, 4),        "KScratch3" },          \
+       { KVM_TRACE_COP0(31, 5),        "KScratch4" },          \
+       { KVM_TRACE_COP0(31, 6),        "KScratch5" },          \
+       { KVM_TRACE_COP0(31, 7),        "KScratch6" },          \
        { KVM_TRACE_HWR( 0, 0),         "CPUNum" },             \
        { KVM_TRACE_HWR( 1, 0),         "SYNCI_Step" },         \
        { KVM_TRACE_HWR( 2, 0),         "CC" },                 \
 
 
 static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
 {
+       vcpu->arch.kscratch_enabled = 0xfc;
+
        return 0;
 }