out:
if (kick) {
kvm_make_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, target_vcpu);
-
- if (target_vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)
- kvm_make_request(KVM_REQ_UNBLOCK, target_vcpu);
-
kvm_vcpu_kick(target_vcpu);
}
return p;
}
-void sev_vcpu_unblocking(struct kvm_vcpu *vcpu)
-{
- if (!sev_snp_guest(vcpu->kvm))
- return;
-
- if (kvm_test_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, vcpu) &&
- vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)
- vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
-}
-
void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code)
{
struct kvm_memory_slot *slot;
return page_address(page);
}
-static void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
-{
- sev_vcpu_unblocking(vcpu);
- avic_vcpu_unblocking(vcpu);
-}
-
static struct kvm_x86_ops svm_x86_ops __initdata = {
.name = KBUILD_MODNAME,
.vcpu_load = svm_vcpu_load,
.vcpu_put = svm_vcpu_put,
.vcpu_blocking = avic_vcpu_blocking,
- .vcpu_unblocking = svm_vcpu_unblocking,
+ .vcpu_unblocking = avic_vcpu_unblocking,
.update_exception_bitmap = svm_update_exception_bitmap,
.get_msr_feature = svm_get_msr_feature,
int sev_dev_get_attr(u32 group, u64 attr, u64 *val);
extern unsigned int max_sev_asid;
void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code);
-void sev_vcpu_unblocking(struct kvm_vcpu *vcpu);
void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
static inline int sev_dev_get_attr(u32 group, u64 attr, u64 *val) { return -ENXIO; }
#define max_sev_asid 0
static inline void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) {}
-static inline void sev_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
static inline void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu) {}
static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
{