}
 
 static void mark_vcpu_memory_idle(struct kvm_vm *vm,
-                                 struct perf_test_vcpu_args *vcpu_args)
+                                 struct memstress_vcpu_args *vcpu_args)
 {
        int vcpu_idx = vcpu_args->vcpu_idx;
        uint64_t base_gva = vcpu_args->gva;
        TEST_ASSERT(pagemap_fd > 0, "Failed to open pagemap.");
 
        for (page = 0; page < pages; page++) {
-               uint64_t gva = base_gva + page * perf_test_args.guest_page_size;
+               uint64_t gva = base_gva + page * memstress_args.guest_page_size;
                uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva);
 
                if (!pfn) {
        return true;
 }
 
-static void vcpu_thread_main(struct perf_test_vcpu_args *vcpu_args)
+static void vcpu_thread_main(struct memstress_vcpu_args *vcpu_args)
 {
        struct kvm_vcpu *vcpu = vcpu_args->vcpu;
-       struct kvm_vm *vm = perf_test_args.vm;
+       struct kvm_vm *vm = memstress_args.vm;
        int vcpu_idx = vcpu_args->vcpu_idx;
        int current_iteration = 0;
 
 static void access_memory(struct kvm_vm *vm, int nr_vcpus,
                          enum access_type access, const char *description)
 {
-       perf_test_set_write_percent(vm, (access == ACCESS_READ) ? 0 : 100);
+       memstress_set_write_percent(vm, (access == ACCESS_READ) ? 0 : 100);
        iteration_work = ITERATION_ACCESS_MEMORY;
        run_iteration(vm, nr_vcpus, description);
 }
        struct kvm_vm *vm;
        int nr_vcpus = params->nr_vcpus;
 
-       vm = perf_test_create_vm(mode, nr_vcpus, params->vcpu_memory_bytes, 1,
+       vm = memstress_create_vm(mode, nr_vcpus, params->vcpu_memory_bytes, 1,
                                 params->backing_src, !overlap_memory_access);
 
-       perf_test_start_vcpu_threads(nr_vcpus, vcpu_thread_main);
+       memstress_start_vcpu_threads(nr_vcpus, vcpu_thread_main);
 
        pr_info("\n");
        access_memory(vm, nr_vcpus, ACCESS_WRITE, "Populating memory");
        /* Set done to signal the vCPU threads to exit */
        done = true;
 
-       perf_test_join_vcpu_threads(nr_vcpus);
-       perf_test_destroy_vm(vm);
+       memstress_join_vcpu_threads(nr_vcpus);
+       memstress_destroy_vm(vm);
 }
 
 static void help(char *name)
 
 static size_t demand_paging_size;
 static char *guest_data_prototype;
 
-static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
+static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
 {
        struct kvm_vcpu *vcpu = vcpu_args->vcpu;
        int vcpu_idx = vcpu_args->vcpu_idx;
        struct kvm_vm *vm;
        int r, i;
 
-       vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
+       vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
                                 p->src_type, p->partition_vcpu_memory_access);
 
        demand_paging_size = get_backing_src_pagesz(p->src_type);
                TEST_ASSERT(pipefds, "Unable to allocate memory for pipefd");
 
                for (i = 0; i < nr_vcpus; i++) {
-                       struct perf_test_vcpu_args *vcpu_args;
+                       struct memstress_vcpu_args *vcpu_args;
                        void *vcpu_hva;
                        void *vcpu_alias;
 
-                       vcpu_args = &perf_test_args.vcpu_args[i];
+                       vcpu_args = &memstress_args.vcpu_args[i];
 
                        /* Cache the host addresses of the region */
                        vcpu_hva = addr_gpa2hva(vm, vcpu_args->gpa);
                                            pipefds[i * 2], p->uffd_mode,
                                            p->uffd_delay, &uffd_args[i],
                                            vcpu_hva, vcpu_alias,
-                                           vcpu_args->pages * perf_test_args.guest_page_size);
+                                           vcpu_args->pages * memstress_args.guest_page_size);
                }
        }
 
        pr_info("Finished creating vCPUs and starting uffd threads\n");
 
        clock_gettime(CLOCK_MONOTONIC, &start);
-       perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
+       memstress_start_vcpu_threads(nr_vcpus, vcpu_worker);
        pr_info("Started all vCPUs\n");
 
-       perf_test_join_vcpu_threads(nr_vcpus);
+       memstress_join_vcpu_threads(nr_vcpus);
        ts_diff = timespec_elapsed(start);
        pr_info("All vCPU threads joined\n");
 
        pr_info("Total guest execution time: %ld.%.9lds\n",
                ts_diff.tv_sec, ts_diff.tv_nsec);
        pr_info("Overall demand paging rate: %f pgs/sec\n",
-               perf_test_args.vcpu_args[0].pages * nr_vcpus /
+               memstress_args.vcpu_args[0].pages * nr_vcpus /
                ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0));
 
-       perf_test_destroy_vm(vm);
+       memstress_destroy_vm(vm);
 
        free(guest_data_prototype);
        if (p->uffd_mode) {
 
 static int iteration;
 static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
 
-static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
+static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
 {
        struct kvm_vcpu *vcpu = vcpu_args->vcpu;
        int vcpu_idx = vcpu_args->vcpu_idx;
        int i;
 
        for (i = 0; i < slots; i++) {
-               int slot = PERF_TEST_MEM_SLOT_INDEX + i;
+               int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
                int flags = enable ? KVM_MEM_LOG_DIRTY_PAGES : 0;
 
                vm_mem_region_set_flags(vm, slot, flags);
        int i;
 
        for (i = 0; i < slots; i++) {
-               int slot = PERF_TEST_MEM_SLOT_INDEX + i;
+               int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
 
                kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);
        }
        int i;
 
        for (i = 0; i < slots; i++) {
-               int slot = PERF_TEST_MEM_SLOT_INDEX + i;
+               int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
 
                kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);
        }
        struct timespec clear_dirty_log_total = (struct timespec){0};
        int i;
 
-       vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
+       vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
                                 p->slots, p->backing_src,
                                 p->partition_vcpu_memory_access);
 
        pr_info("Random seed: %u\n", p->random_seed);
-       perf_test_set_random_seed(vm, p->random_seed);
-       perf_test_set_write_percent(vm, p->write_percent);
+       memstress_set_random_seed(vm, p->random_seed);
+       memstress_set_write_percent(vm, p->write_percent);
 
        guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm->page_shift;
        guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
         * occurring during the dirty memory iterations below, which
         * would pollute the performance results.
         */
-       perf_test_set_write_percent(vm, 100);
-       perf_test_set_random_access(vm, false);
-       perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
+       memstress_set_write_percent(vm, 100);
+       memstress_set_random_access(vm, false);
+       memstress_start_vcpu_threads(nr_vcpus, vcpu_worker);
 
        /* Allow the vCPUs to populate memory */
        pr_debug("Starting iteration %d - Populating\n", iteration);
        pr_info("Enabling dirty logging time: %ld.%.9lds\n\n",
                ts_diff.tv_sec, ts_diff.tv_nsec);
 
-       perf_test_set_write_percent(vm, p->write_percent);
-       perf_test_set_random_access(vm, p->random_access);
+       memstress_set_write_percent(vm, p->write_percent);
+       memstress_set_random_access(vm, p->random_access);
 
        while (iteration < p->iterations) {
                /*
         * wait for them to exit.
         */
        host_quit = true;
-       perf_test_join_vcpu_threads(nr_vcpus);
+       memstress_join_vcpu_threads(nr_vcpus);
 
        avg = timespec_div(get_dirty_log_total, p->iterations);
        pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
 
        free_bitmaps(bitmaps, p->slots);
        arch_cleanup_vm(vm);
-       perf_test_destroy_vm(vm);
+       memstress_destroy_vm(vm);
 }
 
 static void help(char *name)
                        guest_modes_cmdline(optarg);
                        break;
                case 'n':
-                       perf_test_args.nested = true;
+                       memstress_args.nested = true;
                        break;
                case 'o':
                        p.partition_vcpu_memory_access = false;
        }
 
        if (pcpu_list) {
-               kvm_parse_vcpu_pinning(pcpu_list, perf_test_args.vcpu_to_pcpu,
+               kvm_parse_vcpu_pinning(pcpu_list, memstress_args.vcpu_to_pcpu,
                                       nr_vcpus);
-               perf_test_args.pin_vcpus = true;
+               memstress_args.pin_vcpus = true;
        }
 
        TEST_ASSERT(p.iterations >= 2, "The test should have at least two iterations");
 
 
 #define DEFAULT_PER_VCPU_MEM_SIZE      (1 << 30) /* 1G */
 
-#define PERF_TEST_MEM_SLOT_INDEX       1
+#define MEMSTRESS_MEM_SLOT_INDEX       1
 
-struct perf_test_vcpu_args {
+struct memstress_vcpu_args {
        uint64_t gpa;
        uint64_t gva;
        uint64_t pages;
        int vcpu_idx;
 };
 
-struct perf_test_args {
+struct memstress_args {
        struct kvm_vm *vm;
        /* The starting address and size of the guest test region. */
        uint64_t gpa;
        /* The vCPU=>pCPU pinning map. Only valid if pin_vcpus is true. */
        uint32_t vcpu_to_pcpu[KVM_MAX_VCPUS];
 
-       struct perf_test_vcpu_args vcpu_args[KVM_MAX_VCPUS];
+       struct memstress_vcpu_args vcpu_args[KVM_MAX_VCPUS];
 };
 
-extern struct perf_test_args perf_test_args;
+extern struct memstress_args memstress_args;
 
-struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
+struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
                                   uint64_t vcpu_memory_bytes, int slots,
                                   enum vm_mem_backing_src_type backing_src,
                                   bool partition_vcpu_memory_access);
-void perf_test_destroy_vm(struct kvm_vm *vm);
+void memstress_destroy_vm(struct kvm_vm *vm);
 
-void perf_test_set_write_percent(struct kvm_vm *vm, uint32_t write_percent);
-void perf_test_set_random_seed(struct kvm_vm *vm, uint32_t random_seed);
-void perf_test_set_random_access(struct kvm_vm *vm, bool random_access);
+void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent);
+void memstress_set_random_seed(struct kvm_vm *vm, uint32_t random_seed);
+void memstress_set_random_access(struct kvm_vm *vm, bool random_access);
 
-void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *));
-void perf_test_join_vcpu_threads(int vcpus);
-void perf_test_guest_code(uint32_t vcpu_id);
+void memstress_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct memstress_vcpu_args *));
+void memstress_join_vcpu_threads(int vcpus);
+void memstress_guest_code(uint32_t vcpu_id);
 
-uint64_t perf_test_nested_pages(int nr_vcpus);
-void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]);
+uint64_t memstress_nested_pages(int nr_vcpus);
+void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]);
 
 #endif /* SELFTEST_KVM_MEMSTRESS_H */
 
 #include "memstress.h"
 #include "processor.h"
 
-struct perf_test_args perf_test_args;
+struct memstress_args memstress_args;
 
 /*
  * Guest virtual memory offset of the testing memory slot.
 static struct vcpu_thread vcpu_threads[KVM_MAX_VCPUS];
 
 /* The function run by each vCPU thread, as provided by the test. */
-static void (*vcpu_thread_fn)(struct perf_test_vcpu_args *);
+static void (*vcpu_thread_fn)(struct memstress_vcpu_args *);
 
 /* Set to true once all vCPU threads are up and running. */
 static bool all_vcpu_threads_running;
  * Continuously write to the first 8 bytes of each page in the
  * specified region.
  */
-void perf_test_guest_code(uint32_t vcpu_idx)
+void memstress_guest_code(uint32_t vcpu_idx)
 {
-       struct perf_test_args *args = &perf_test_args;
-       struct perf_test_vcpu_args *vcpu_args = &args->vcpu_args[vcpu_idx];
+       struct memstress_args *args = &memstress_args;
+       struct memstress_vcpu_args *vcpu_args = &args->vcpu_args[vcpu_idx];
        struct guest_random_state rand_state;
        uint64_t gva;
        uint64_t pages;
        }
 }
 
-void perf_test_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
+void memstress_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
                           struct kvm_vcpu *vcpus[],
                           uint64_t vcpu_memory_bytes,
                           bool partition_vcpu_memory_access)
 {
-       struct perf_test_args *args = &perf_test_args;
-       struct perf_test_vcpu_args *vcpu_args;
+       struct memstress_args *args = &memstress_args;
+       struct memstress_vcpu_args *vcpu_args;
        int i;
 
        for (i = 0; i < nr_vcpus; i++) {
        }
 }
 
-struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
+struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
                                   uint64_t vcpu_memory_bytes, int slots,
                                   enum vm_mem_backing_src_type backing_src,
                                   bool partition_vcpu_memory_access)
 {
-       struct perf_test_args *args = &perf_test_args;
+       struct memstress_args *args = &memstress_args;
        struct kvm_vm *vm;
        uint64_t guest_num_pages, slot0_pages = 0;
        uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src);
         * in-memory data structures.
         */
        if (args->nested)
-               slot0_pages += perf_test_nested_pages(nr_vcpus);
+               slot0_pages += memstress_nested_pages(nr_vcpus);
 
        /*
         * Pass guest_num_pages to populate the page tables for test memory.
         * effect as KVM allows aliasing HVAs in meslots.
         */
        vm = __vm_create_with_vcpus(mode, nr_vcpus, slot0_pages + guest_num_pages,
-                                   perf_test_guest_code, vcpus);
+                                   memstress_guest_code, vcpus);
 
        args->vm = vm;
 
                vm_paddr_t region_start = args->gpa + region_pages * args->guest_page_size * i;
 
                vm_userspace_mem_region_add(vm, backing_src, region_start,
-                                           PERF_TEST_MEM_SLOT_INDEX + i,
+                                           MEMSTRESS_MEM_SLOT_INDEX + i,
                                            region_pages, 0);
        }
 
        /* Do mapping for the demand paging memory slot */
        virt_map(vm, guest_test_virt_mem, args->gpa, guest_num_pages);
 
-       perf_test_setup_vcpus(vm, nr_vcpus, vcpus, vcpu_memory_bytes,
+       memstress_setup_vcpus(vm, nr_vcpus, vcpus, vcpu_memory_bytes,
                              partition_vcpu_memory_access);
 
        if (args->nested) {
                pr_info("Configuring vCPUs to run in L2 (nested).\n");
-               perf_test_setup_nested(vm, nr_vcpus, vcpus);
+               memstress_setup_nested(vm, nr_vcpus, vcpus);
        }
 
        ucall_init(vm, NULL);
 
        /* Export the shared variables to the guest. */
-       sync_global_to_guest(vm, perf_test_args);
+       sync_global_to_guest(vm, memstress_args);
 
        return vm;
 }
 
-void perf_test_destroy_vm(struct kvm_vm *vm)
+void memstress_destroy_vm(struct kvm_vm *vm)
 {
        ucall_uninit(vm);
        kvm_vm_free(vm);
 }
 
-void perf_test_set_write_percent(struct kvm_vm *vm, uint32_t write_percent)
+void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent)
 {
-       perf_test_args.write_percent = write_percent;
-       sync_global_to_guest(vm, perf_test_args.write_percent);
+       memstress_args.write_percent = write_percent;
+       sync_global_to_guest(vm, memstress_args.write_percent);
 }
 
-void perf_test_set_random_seed(struct kvm_vm *vm, uint32_t random_seed)
+void memstress_set_random_seed(struct kvm_vm *vm, uint32_t random_seed)
 {
-       perf_test_args.random_seed = random_seed;
-       sync_global_to_guest(vm, perf_test_args.random_seed);
+       memstress_args.random_seed = random_seed;
+       sync_global_to_guest(vm, memstress_args.random_seed);
 }
 
-void perf_test_set_random_access(struct kvm_vm *vm, bool random_access)
+void memstress_set_random_access(struct kvm_vm *vm, bool random_access)
 {
-       perf_test_args.random_access = random_access;
-       sync_global_to_guest(vm, perf_test_args.random_access);
+       memstress_args.random_access = random_access;
+       sync_global_to_guest(vm, memstress_args.random_access);
 }
 
-uint64_t __weak perf_test_nested_pages(int nr_vcpus)
+uint64_t __weak memstress_nested_pages(int nr_vcpus)
 {
        return 0;
 }
 
-void __weak perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu **vcpus)
+void __weak memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu **vcpus)
 {
        pr_info("%s() not support on this architecture, skipping.\n", __func__);
        exit(KSFT_SKIP);
        struct vcpu_thread *vcpu = data;
        int vcpu_idx = vcpu->vcpu_idx;
 
-       if (perf_test_args.pin_vcpus)
-               kvm_pin_this_task_to_pcpu(perf_test_args.vcpu_to_pcpu[vcpu_idx]);
+       if (memstress_args.pin_vcpus)
+               kvm_pin_this_task_to_pcpu(memstress_args.vcpu_to_pcpu[vcpu_idx]);
 
        WRITE_ONCE(vcpu->running, true);
 
        while (!READ_ONCE(all_vcpu_threads_running))
                ;
 
-       vcpu_thread_fn(&perf_test_args.vcpu_args[vcpu_idx]);
+       vcpu_thread_fn(&memstress_args.vcpu_args[vcpu_idx]);
 
        return NULL;
 }
 
-void perf_test_start_vcpu_threads(int nr_vcpus,
-                                 void (*vcpu_fn)(struct perf_test_vcpu_args *))
+void memstress_start_vcpu_threads(int nr_vcpus,
+                                 void (*vcpu_fn)(struct memstress_vcpu_args *))
 {
        int i;
 
        WRITE_ONCE(all_vcpu_threads_running, true);
 }
 
-void perf_test_join_vcpu_threads(int nr_vcpus)
+void memstress_join_vcpu_threads(int nr_vcpus)
 {
        int i;
 
 
 #include "processor.h"
 #include "vmx.h"
 
-void perf_test_l2_guest_code(uint64_t vcpu_id)
+void memstress_l2_guest_code(uint64_t vcpu_id)
 {
-       perf_test_guest_code(vcpu_id);
+       memstress_guest_code(vcpu_id);
        vmcall();
 }
 
-extern char perf_test_l2_guest_entry[];
+extern char memstress_l2_guest_entry[];
 __asm__(
-"perf_test_l2_guest_entry:"
+"memstress_l2_guest_entry:"
 "      mov (%rsp), %rdi;"
-"      call perf_test_l2_guest_code;"
+"      call memstress_l2_guest_code;"
 "      ud2;"
 );
 
-static void perf_test_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id)
+static void memstress_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id)
 {
 #define L2_GUEST_STACK_SIZE 64
        unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
 
        rsp = &l2_guest_stack[L2_GUEST_STACK_SIZE - 1];
        *rsp = vcpu_id;
-       prepare_vmcs(vmx, perf_test_l2_guest_entry, rsp);
+       prepare_vmcs(vmx, memstress_l2_guest_entry, rsp);
 
        GUEST_ASSERT(!vmlaunch());
        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
        GUEST_DONE();
 }
 
-uint64_t perf_test_nested_pages(int nr_vcpus)
+uint64_t memstress_nested_pages(int nr_vcpus)
 {
        /*
         * 513 page tables is enough to identity-map 256 TiB of L2 with 1G
        return 513 + 10 * nr_vcpus;
 }
 
-void perf_test_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm)
+void memstress_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm)
 {
        uint64_t start, end;
 
         */
        nested_identity_map_1g(vmx, vm, 0, 0x100000000ULL);
 
-       start = align_down(perf_test_args.gpa, PG_SIZE_1G);
-       end = align_up(perf_test_args.gpa + perf_test_args.size, PG_SIZE_1G);
+       start = align_down(memstress_args.gpa, PG_SIZE_1G);
+       end = align_up(memstress_args.gpa + memstress_args.size, PG_SIZE_1G);
        nested_identity_map_1g(vmx, vm, start, end - start);
 }
 
-void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[])
+void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[])
 {
        struct vmx_pages *vmx, *vmx0 = NULL;
        struct kvm_regs regs;
                vmx = vcpu_alloc_vmx(vm, &vmx_gva);
 
                if (vcpu_id == 0) {
-                       perf_test_setup_ept(vmx, vm);
+                       memstress_setup_ept(vmx, vm);
                        vmx0 = vmx;
                } else {
                        /* Share the same EPT table across all vCPUs. */
                }
 
                /*
-                * Override the vCPU to run perf_test_l1_guest_code() which will
-                * bounce it into L2 before calling perf_test_guest_code().
+                * Override the vCPU to run memstress_l1_guest_code() which will
+                * bounce it into L2 before calling memstress_guest_code().
                 */
                vcpu_regs_get(vcpus[vcpu_id], ®s);
-               regs.rip = (unsigned long) perf_test_l1_guest_code;
+               regs.rip = (unsigned long) memstress_l1_guest_code;
                vcpu_regs_set(vcpus[vcpu_id], ®s);
                vcpu_args_set(vcpus[vcpu_id], 2, vmx_gva, vcpu_id);
        }
 
 
 static bool run_vcpus = true;
 
-static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
+static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
 {
        struct kvm_vcpu *vcpu = vcpu_args->vcpu;
        struct kvm_run *run;
         * Add the dummy memslot just below the memstress memslot, which is
         * at the top of the guest physical address space.
         */
-       gpa = perf_test_args.gpa - pages * vm->page_size;
+       gpa = memstress_args.gpa - pages * vm->page_size;
 
        for (i = 0; i < nr_modifications; i++) {
                usleep(delay);
        struct test_params *p = arg;
        struct kvm_vm *vm;
 
-       vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
+       vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
                                 VM_MEM_SRC_ANONYMOUS,
                                 p->partition_vcpu_memory_access);
 
        pr_info("Finished creating vCPUs\n");
 
-       perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
+       memstress_start_vcpu_threads(nr_vcpus, vcpu_worker);
 
        pr_info("Started all vCPUs\n");
 
 
        run_vcpus = false;
 
-       perf_test_join_vcpu_threads(nr_vcpus);
+       memstress_join_vcpu_threads(nr_vcpus);
        pr_info("All vCPU threads joined\n");
 
-       perf_test_destroy_vm(vm);
+       memstress_destroy_vm(vm);
 }
 
 static void help(char *name)