From: David Woodhouse Date: Thu, 21 Jan 2021 11:28:42 +0000 (+0000) Subject: KVM: x86/xen: Fix initialisation of gfn caches for Xen shared pages X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=3eadbaba08f380d0b870928f19a40e9a1f6e826c;p=users%2Fdwmw2%2Flinux.git KVM: x86/xen: Fix initialisation of gfn caches for Xen shared pages When kvm_gfn_to_hva_cache_init() is used to cache the address of the guest pages which KVM needs to access, it uses kvm_memslots(). For which an RCU read lock is required. Add that around the whole of the kvm_xen_hvm_set_attr() function. Signed-off-by: David Woodhouse --- diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 4bc9da9fcfb80..46111de1df53e 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -219,11 +219,14 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) { struct kvm_vcpu *v; int r = -ENOENT; + int idx = srcu_read_lock(&vcpu->kvm->srcu); switch (data->type) { case KVM_XEN_ATTR_TYPE_LONG_MODE: - if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) - return -EINVAL; + if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) { + r = -EINVAL; + goto out; + } kvm->arch.xen.long_mode = !!data->u.long_mode; r = 0; @@ -235,8 +238,11 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) case KVM_XEN_ATTR_TYPE_VCPU_INFO: v = kvm_get_vcpu_by_id(kvm, data->u.vcpu_attr.vcpu_id); - if (!v) - return -EINVAL; + if (!v) { + r = -EINVAL; + goto out; + } + /* No compat necessary here. */ BUILD_BUG_ON(sizeof(struct vcpu_info) != sizeof(struct compat_vcpu_info)); @@ -247,7 +253,7 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) data->u.vcpu_attr.gpa, sizeof(struct vcpu_info)); if (r) - return r; + goto out; v->arch.xen.vcpu_info_set = true; kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); @@ -255,14 +261,16 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) case KVM_XEN_ATTR_TYPE_VCPU_TIME_INFO: v = kvm_get_vcpu_by_id(kvm, data->u.vcpu_attr.vcpu_id); - if (!v) - return -EINVAL; + if (!v) { + r = -EINVAL; + goto out; + } r = kvm_gfn_to_hva_cache_init(kvm, &v->arch.xen.vcpu_time_info_cache, data->u.vcpu_attr.gpa, sizeof(struct pvclock_vcpu_time_info)); if (r) - return r; + goto out; v->arch.xen.vcpu_time_info_set = true; kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); @@ -270,14 +278,16 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) case KVM_XEN_ATTR_TYPE_VCPU_RUNSTATE: v = kvm_get_vcpu_by_id(kvm, data->u.vcpu_attr.vcpu_id); - if (!v) - return -EINVAL; + if (!v) { + r = -EINVAL; + goto out; + } r = kvm_gfn_to_hva_cache_init(kvm, &v->arch.xen.runstate_cache, data->u.vcpu_attr.gpa, sizeof(struct vcpu_runstate_info)); if (r) - return r; + goto out; v->arch.xen.runstate_set = true; v->arch.xen.current_runstate = RUNSTATE_blocked; @@ -285,8 +295,10 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) break; case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR: - if (data->u.vector < 0x10) - return -EINVAL; + if (data->u.vector < 0x10) { + r = -EINVAL; + goto out; + } kvm->arch.xen.upcall_vector = data->u.vector; r = 0; @@ -296,6 +308,7 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) break; } + srcu_read_unlock(&kvm->srcu, idx); return r; }