]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
Catch dwmw2's deadlock kvm-srcu-lockdep
authorMichal Luczaj <mhal@rbox.co>
Sat, 31 Dec 2022 01:26:49 +0000 (02:26 +0100)
committerDavid Woodhouse <dwmw@amazon.co.uk>
Fri, 13 Jan 2023 10:00:16 +0000 (10:00 +0000)
On 12/30/22 14:18, Joel Fernandes wrote:
> I think the patch from Matthew Wilcox will address it because the
> read side section already acquires the dep_map. So lockdep subsystem
> should be able to nail the dependency. (...)

Perhaps it's something misconfigured on my side, but I still don't see any
lockdep splats, just the usual task hang warning after 120s.

If that's any help, here's a crude selftest (actually a severed version of
xen_shinfo_test). Under current mainline 6.2.0-rc1 it results in exactly
the type of deadlocks described by David.

Michal

tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/x86_64/deadlocks_test.c [new file with mode: 0644]

index 1750f91dd936291a524cc89859a09ea0c1aea46b..0f02a4fe93748518050c6b45045a7e9e0d595483 100644 (file)
@@ -61,6 +61,7 @@ TEST_PROGS_x86_64 += x86_64/nx_huge_pages_test.sh
 # Compiled test targets
 TEST_GEN_PROGS_x86_64 = x86_64/cpuid_test
 TEST_GEN_PROGS_x86_64 += x86_64/cr4_cpuid_sync_test
+TEST_GEN_PROGS_x86_64 += x86_64/deadlocks_test
 TEST_GEN_PROGS_x86_64 += x86_64/get_msr_index_features
 TEST_GEN_PROGS_x86_64 += x86_64/exit_on_emulation_failure_test
 TEST_GEN_PROGS_x86_64 += x86_64/fix_hypercall_test
diff --git a/tools/testing/selftests/kvm/x86_64/deadlocks_test.c b/tools/testing/selftests/kvm/x86_64/deadlocks_test.c
new file mode 100644 (file)
index 0000000..e6150a6
--- /dev/null
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <stdio.h>
+#include <string.h>
+#include <pthread.h>
+
+#include "kvm_util.h"
+#include "processor.h"
+
+#define RACE_ITERATIONS                        0x1000
+
+#define EVTCHN_PORT                    1
+#define XEN_HYPERCALL_MSR              0x40000000
+
+#define EVTCHNSTAT_interdomain         2
+#define __HYPERVISOR_event_channel_op  32
+#define EVTCHNOP_send                  4
+
+#define VCPU_INFO_REGION_GPA           0xc0000000ULL
+#define VCPU_INFO_REGION_SLOT          10
+
+struct evtchn_send {
+       u32 port;
+};
+
+static void evtchn_assign(struct kvm_vm *vm)
+{
+       struct kvm_xen_hvm_attr assign = {
+               .type = KVM_XEN_ATTR_TYPE_EVTCHN,
+               .u.evtchn = {
+                       .flags = 0,
+                       .send_port = EVTCHN_PORT,
+                       .type = EVTCHNSTAT_interdomain,
+                       .deliver.port = {
+                               .port = EVTCHN_PORT,
+                               .priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL,
+                               .vcpu = 0
+                       }
+               }
+       };
+
+       vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &assign);
+}
+
+static void *set_msr_filter(void *arg)
+{
+       struct kvm_vm *vm = (struct kvm_vm *)arg;
+
+       struct kvm_msr_filter filter = {
+               .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
+               .ranges = {}
+       };
+
+       for (;;) {
+               vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter);
+               pthread_testcancel();
+       }
+
+       return NULL;
+}
+
+static void *set_pmu_filter(void *arg)
+{
+       struct kvm_vm *vm = (struct kvm_vm *)arg;
+
+       struct kvm_pmu_event_filter filter = {
+               .action = KVM_PMU_EVENT_ALLOW,
+               .flags = 0,
+               .nevents = 0
+       };
+
+       for (;;) {
+               vm_ioctl(vm, KVM_SET_PMU_EVENT_FILTER, &filter);
+               pthread_testcancel();
+       }
+
+       return NULL;
+}
+
+static void guest_code(void)
+{
+       struct evtchn_send s = { .port = EVTCHN_PORT };
+
+       for (;;) {
+               asm volatile("vmcall"
+                            :
+                            : "a" (__HYPERVISOR_event_channel_op),
+                              "D" (EVTCHNOP_send),
+                              "S" (&s)
+                            : "memory");
+               GUEST_SYNC(0);
+       }
+}
+
+static void race_against(struct kvm_vcpu *vcpu, void *(*func)(void *))
+{
+       pthread_t thread;
+       int i, ret;
+
+       ret = pthread_create(&thread, NULL, func, (void *)vcpu->vm);
+       TEST_ASSERT(ret == 0, "pthread_create() failed: %s", strerror(ret));
+
+       for (i = 0; i < RACE_ITERATIONS; ++i) {
+               fprintf(stderr, ".");
+               vcpu_run(vcpu);
+       }
+       printf("\n");
+
+       ret = pthread_cancel(thread);
+       TEST_ASSERT(ret == 0, "pthread_cancel() failed: %s", strerror(ret));
+
+       ret = pthread_join(thread, 0);
+       TEST_ASSERT(ret == 0, "pthread_join() failed: %s", strerror(ret));
+}
+
+int main(int argc, char *argv[])
+{
+       unsigned int xen_caps;
+       struct kvm_vcpu *vcpu;
+       struct kvm_vm *vm;
+
+       xen_caps = kvm_check_cap(KVM_CAP_XEN_HVM);
+       TEST_REQUIRE(xen_caps & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL);
+
+       vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+       vcpu_set_hv_cpuid(vcpu);
+
+       struct kvm_xen_hvm_attr ha = {
+               .type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
+               .u.shared_info.gfn = KVM_XEN_INVALID_GFN,
+       };
+       vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &ha);
+
+       vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
+                                   VCPU_INFO_REGION_GPA, VCPU_INFO_REGION_SLOT, 1, 0);
+
+       struct kvm_xen_vcpu_attr vi = {
+               .type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO,
+               .u.gpa = VCPU_INFO_REGION_GPA,
+       };
+       vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &vi);
+
+       struct kvm_xen_hvm_config hvmc = {
+               .flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL,
+               .msr = XEN_HYPERCALL_MSR
+       };
+       vm_ioctl(vm, KVM_XEN_HVM_CONFIG, &hvmc);
+
+       evtchn_assign(vm);
+
+       race_against(vcpu, set_msr_filter);
+       race_against(vcpu, set_pmu_filter);
+
+       kvm_vm_free(vm);
+       return 0;
+}