vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
 }
 
+static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
+                             unsigned int pos)
+{
+       svm->vmcb->control.ghcb_gpa &= ~(mask << pos);
+       svm->vmcb->control.ghcb_gpa |= (value & mask) << pos;
+}
+
+static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos)
+{
+       return (svm->vmcb->control.ghcb_gpa >> pos) & mask;
+}
+
 static void set_ghcb_msr(struct vcpu_svm *svm, u64 value)
 {
        svm->vmcb->control.ghcb_gpa = value;
 static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
 {
        struct vmcb_control_area *control = &svm->vmcb->control;
+       struct kvm_vcpu *vcpu = &svm->vcpu;
        u64 ghcb_info;
+       int ret = 1;
 
        ghcb_info = control->ghcb_gpa & GHCB_MSR_INFO_MASK;
 
                                                    GHCB_VERSION_MIN,
                                                    sev_enc_bit));
                break;
+       case GHCB_MSR_CPUID_REQ: {
+               u64 cpuid_fn, cpuid_reg, cpuid_value;
+
+               cpuid_fn = get_ghcb_msr_bits(svm,
+                                            GHCB_MSR_CPUID_FUNC_MASK,
+                                            GHCB_MSR_CPUID_FUNC_POS);
+
+               /* Initialize the registers needed by the CPUID intercept */
+               vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn;
+               vcpu->arch.regs[VCPU_REGS_RCX] = 0;
+
+               ret = svm_invoke_exit_handler(svm, SVM_EXIT_CPUID);
+               if (!ret) {
+                       ret = -EINVAL;
+                       break;
+               }
+
+               cpuid_reg = get_ghcb_msr_bits(svm,
+                                             GHCB_MSR_CPUID_REG_MASK,
+                                             GHCB_MSR_CPUID_REG_POS);
+               if (cpuid_reg == 0)
+                       cpuid_value = vcpu->arch.regs[VCPU_REGS_RAX];
+               else if (cpuid_reg == 1)
+                       cpuid_value = vcpu->arch.regs[VCPU_REGS_RBX];
+               else if (cpuid_reg == 2)
+                       cpuid_value = vcpu->arch.regs[VCPU_REGS_RCX];
+               else
+                       cpuid_value = vcpu->arch.regs[VCPU_REGS_RDX];
+
+               set_ghcb_msr_bits(svm, cpuid_value,
+                                 GHCB_MSR_CPUID_VALUE_MASK,
+                                 GHCB_MSR_CPUID_VALUE_POS);
+
+               set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP,
+                                 GHCB_MSR_INFO_MASK,
+                                 GHCB_MSR_INFO_POS);
+               break;
+       }
        default:
-               return -EINVAL;
+               ret = -EINVAL;
        }
 
-       return 1;
+       return ret;
 }
 
 int sev_handle_vmgexit(struct vcpu_svm *svm)
 
         (((_cbit) & GHCB_MSR_CBIT_MASK) << GHCB_MSR_CBIT_POS) |        \
         GHCB_MSR_SEV_INFO_RESP)
 
+#define GHCB_MSR_CPUID_REQ             0x004
+#define GHCB_MSR_CPUID_RESP            0x005
+#define GHCB_MSR_CPUID_FUNC_POS                32
+#define GHCB_MSR_CPUID_FUNC_MASK       0xffffffff
+#define GHCB_MSR_CPUID_VALUE_POS       32
+#define GHCB_MSR_CPUID_VALUE_MASK      0xffffffff
+#define GHCB_MSR_CPUID_REG_POS         30
+#define GHCB_MSR_CPUID_REG_MASK                0x3
+
 extern unsigned int max_sev_asid;
 
 static inline bool svm_sev_enabled(void)