print_test_banner(mode, p);
 
-       vm = ____vm_create(mode);
+       vm = ____vm_create(VM_SHAPE(mode));
        setup_memslots(vm, p);
        kvm_vm_elf_load(vm, program_invocation_name);
        setup_ucall(vm);
 
 
        pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
 
-       vm = __vm_create(mode, 1, extra_mem_pages);
+       vm = __vm_create(VM_SHAPE(mode), 1, extra_mem_pages);
 
        log_mode_create_vm_done(vm);
        *vcpu = vm_vcpu_add(vm, 0, guest_code);
 
        NUM_VM_MODES,
 };
 
+struct vm_shape {
+       enum vm_guest_mode mode;
+       unsigned int type;
+};
+
+#define VM_TYPE_DEFAULT                        0
+
+#define VM_SHAPE(__mode)                       \
+({                                             \
+       struct vm_shape shape = {               \
+               .mode = (__mode),               \
+               .type = VM_TYPE_DEFAULT         \
+       };                                      \
+                                               \
+       shape;                                  \
+})
+
 #if defined(__aarch64__)
 
 extern enum vm_guest_mode vm_mode_default;
 
 #endif
 
+#define VM_SHAPE_DEFAULT       VM_SHAPE(VM_MODE_DEFAULT)
+
 #define MIN_PAGE_SIZE          (1U << MIN_PAGE_SHIFT)
 #define PTES_PER_MIN_PAGE      ptes_per_page(MIN_PAGE_SIZE)
 
  * __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to
  * calculate the amount of memory needed for per-vCPU data, e.g. stacks.
  */
-struct kvm_vm *____vm_create(enum vm_guest_mode mode);
-struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
+struct kvm_vm *____vm_create(struct vm_shape shape);
+struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
                           uint64_t nr_extra_pages);
 
 static inline struct kvm_vm *vm_create_barebones(void)
 {
-       return ____vm_create(VM_MODE_DEFAULT);
+       return ____vm_create(VM_SHAPE_DEFAULT);
 }
 
 static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
 {
-       return __vm_create(VM_MODE_DEFAULT, nr_runnable_vcpus, 0);
+       return __vm_create(VM_SHAPE_DEFAULT, nr_runnable_vcpus, 0);
 }
 
-struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
+struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
                                      uint64_t extra_mem_pages,
                                      void *guest_code, struct kvm_vcpu *vcpus[]);
 
                                                  void *guest_code,
                                                  struct kvm_vcpu *vcpus[])
 {
-       return __vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, 0,
+       return __vm_create_with_vcpus(VM_SHAPE_DEFAULT, nr_vcpus, 0,
                                      guest_code, vcpus);
 }
 
+
+struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
+                                              struct kvm_vcpu **vcpu,
+                                              uint64_t extra_mem_pages,
+                                              void *guest_code);
+
 /*
  * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages
  * additional pages of guest memory.  Returns the VM and vCPU (via out param).
  */
-struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
-                                        uint64_t extra_mem_pages,
-                                        void *guest_code);
+static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
+                                                      uint64_t extra_mem_pages,
+                                                      void *guest_code)
+{
+       return __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, vcpu,
+                                              extra_mem_pages, guest_code);
+}
 
 static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
                                                     void *guest_code)
        return __vm_create_with_one_vcpu(vcpu, 0, guest_code);
 }
 
+static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape,
+                                                          struct kvm_vcpu **vcpu,
+                                                          void *guest_code)
+{
+       return __vm_create_shape_with_one_vcpu(shape, vcpu, 0, guest_code);
+}
+
 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
 
 void kvm_pin_this_task_to_pcpu(uint32_t pcpu);
 
 
        /* Create a VM with enough guest pages */
        guest_num_pages = test_mem_size / guest_page_size;
-       vm = __vm_create_with_vcpus(mode, nr_vcpus, guest_num_pages,
+       vm = __vm_create_with_vcpus(VM_SHAPE(mode), nr_vcpus, guest_num_pages,
                                    guest_code, test_args.vcpus);
 
        /* Align down GPA of the testing memslot */
 
                (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
 }
 
-struct kvm_vm *____vm_create(enum vm_guest_mode mode)
+struct kvm_vm *____vm_create(struct vm_shape shape)
 {
        struct kvm_vm *vm;
 
        vm->regions.hva_tree = RB_ROOT;
        hash_init(vm->regions.slot_hash);
 
-       vm->mode = mode;
-       vm->type = 0;
+       vm->mode = shape.mode;
+       vm->type = shape.type;
 
-       vm->pa_bits = vm_guest_mode_params[mode].pa_bits;
-       vm->va_bits = vm_guest_mode_params[mode].va_bits;
-       vm->page_size = vm_guest_mode_params[mode].page_size;
-       vm->page_shift = vm_guest_mode_params[mode].page_shift;
+       vm->pa_bits = vm_guest_mode_params[vm->mode].pa_bits;
+       vm->va_bits = vm_guest_mode_params[vm->mode].va_bits;
+       vm->page_size = vm_guest_mode_params[vm->mode].page_size;
+       vm->page_shift = vm_guest_mode_params[vm->mode].page_shift;
 
        /* Setup mode specific traits. */
        switch (vm->mode) {
                /*
                 * Ignore KVM support for 5-level paging (vm->va_bits == 57),
                 * it doesn't take effect unless a CR4.LA57 is set, which it
-                * isn't for this VM_MODE.
+                * isn't for this mode (48-bit virtual address space).
                 */
                TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57,
                            "Linear address width (%d bits) not supported",
                vm->pgtable_levels = 5;
                break;
        default:
-               TEST_FAIL("Unknown guest mode, mode: 0x%x", mode);
+               TEST_FAIL("Unknown guest mode: 0x%x", vm->mode);
        }
 
 #ifdef __aarch64__
+       TEST_ASSERT(!vm->type, "ARM doesn't support test-provided types");
        if (vm->pa_bits != 40)
                vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits);
 #endif
        return vm_adjust_num_guest_pages(mode, nr_pages);
 }
 
-struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
+struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
                           uint64_t nr_extra_pages)
 {
-       uint64_t nr_pages = vm_nr_pages_required(mode, nr_runnable_vcpus,
+       uint64_t nr_pages = vm_nr_pages_required(shape.mode, nr_runnable_vcpus,
                                                 nr_extra_pages);
        struct userspace_mem_region *slot0;
        struct kvm_vm *vm;
        int i;
 
-       pr_debug("%s: mode='%s' pages='%ld'\n", __func__,
-                vm_guest_mode_string(mode), nr_pages);
+       pr_debug("%s: mode='%s' type='%d', pages='%ld'\n", __func__,
+                vm_guest_mode_string(shape.mode), shape.type, nr_pages);
 
-       vm = ____vm_create(mode);
+       vm = ____vm_create(shape);
 
        vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, nr_pages, 0);
        for (i = 0; i < NR_MEM_REGIONS; i++)
  * extra_mem_pages is only used to calculate the maximum page table size,
  * no real memory allocation for non-slot0 memory in this function.
  */
-struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
+struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
                                      uint64_t extra_mem_pages,
                                      void *guest_code, struct kvm_vcpu *vcpus[])
 {
 
        TEST_ASSERT(!nr_vcpus || vcpus, "Must provide vCPU array");
 
-       vm = __vm_create(mode, nr_vcpus, extra_mem_pages);
+       vm = __vm_create(shape, nr_vcpus, extra_mem_pages);
 
        for (i = 0; i < nr_vcpus; ++i)
                vcpus[i] = vm_vcpu_add(vm, i, guest_code);
        return vm;
 }
 
-struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
-                                        uint64_t extra_mem_pages,
-                                        void *guest_code)
+struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
+                                              struct kvm_vcpu **vcpu,
+                                              uint64_t extra_mem_pages,
+                                              void *guest_code)
 {
        struct kvm_vcpu *vcpus[1];
        struct kvm_vm *vm;
 
-       vm = __vm_create_with_vcpus(VM_MODE_DEFAULT, 1, extra_mem_pages,
-                                   guest_code, vcpus);
+       vm = __vm_create_with_vcpus(shape, 1, extra_mem_pages, guest_code, vcpus);
 
        *vcpu = vcpus[0];
        return vm;
 
         * The memory is also added to memslot 0, but that's a benign side
         * effect as KVM allows aliasing HVAs in meslots.
         */
-       vm = __vm_create_with_vcpus(mode, nr_vcpus, slot0_pages + guest_num_pages,
+       vm = __vm_create_with_vcpus(VM_SHAPE(mode), nr_vcpus,
+                                   slot0_pages + guest_num_pages,
                                    memstress_guest_code, vcpus);
 
        args->vm = vm;
 
 
        kvm_check_cap(KVM_CAP_MCE);
 
-       vm = __vm_create(VM_MODE_DEFAULT, 3, 0);
+       vm = __vm_create(VM_SHAPE_DEFAULT, 3, 0);
 
        kvm_ioctl(vm->kvm_fd, KVM_X86_GET_MCE_CAP_SUPPORTED,
                  &supported_mcg_caps);