/* Minimum allocated guest virtual and physical addresses */
 #define KVM_UTIL_MIN_VADDR             0x2000
+#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
 
 #define DEFAULT_GUEST_PHY_PAGES                512
 #define DEFAULT_GUEST_STACK_VADDR_MIN  0xab6000
                             uint32_t memslot);
 vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
                              vm_paddr_t paddr_min, uint32_t memslot);
+vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
 
 /*
  * Create a VM with reasonable defaults
 
 #include "../kvm_util_internal.h"
 #include "processor.h"
 
-#define KVM_GUEST_PAGE_TABLE_MIN_PADDR         0x180000
 #define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN    0xac0000
 
 static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
                paddr, vm->max_gfn, vm->page_size);
 
        ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
-       if (!*ptep) {
-               *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
-               *ptep |= 3;
-       }
+       if (!*ptep)
+               *ptep = vm_alloc_page_table(vm) | 3;
 
        switch (vm->pgtable_levels) {
        case 4:
                ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
-               if (!*ptep) {
-                       *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
-                       *ptep |= 3;
-               }
+               if (!*ptep)
+                       *ptep = vm_alloc_page_table(vm) | 3;
                /* fall through */
        case 3:
                ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
-               if (!*ptep) {
-                       *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
-                       *ptep |= 3;
-               }
+               if (!*ptep)
+                       *ptep = vm_alloc_page_table(vm) | 3;
                /* fall through */
        case 2:
                ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
 
        return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
 }
 
+/* Arbitrary minimum physical address used for virtual translation tables. */
+#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
+
+vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
+{
+       return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
+}
+
 /*
  * Address Guest Virtual to Host Virtual
  *
 
 #include "kvm_util.h"
 #include "../kvm_util_internal.h"
 
-#define KVM_GUEST_PAGE_TABLE_MIN_PADDR         0x180000
-
 #define PAGES_PER_REGION 4
 
 void virt_pgd_alloc(struct kvm_vm *vm)
 
 #define DEFAULT_CODE_SELECTOR 0x8
 #define DEFAULT_DATA_SELECTOR 0x10
 
-/* Minimum physical address used for virtual translation tables. */
-#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
-
 vm_vaddr_t exception_handlers;
 
 /* Virtual translation table structure declarations */
 
        /* If needed, create page map l4 table. */
        if (!vm->pgd_created) {
-               vm_paddr_t paddr = vm_phy_page_alloc(vm,
-                       KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
-               vm->pgd = paddr;
+               vm->pgd = vm_alloc_page_table(vm);
                vm->pgd_created = true;
        }
 }
        /* Allocate page directory pointer table if not present. */
        pml4e = addr_gpa2hva(vm, vm->pgd);
        if (!pml4e[index[3]].present) {
-               pml4e[index[3]].address = vm_phy_page_alloc(vm,
-                       KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0)
-                       >> vm->page_shift;
+               pml4e[index[3]].address = vm_alloc_page_table(vm) >> vm->page_shift;
                pml4e[index[3]].writable = true;
                pml4e[index[3]].present = true;
        }
        struct pageDirectoryPointerEntry *pdpe;
        pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
        if (!pdpe[index[2]].present) {
-               pdpe[index[2]].address = vm_phy_page_alloc(vm,
-                       KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0)
-                       >> vm->page_shift;
+               pdpe[index[2]].address = vm_alloc_page_table(vm) >> vm->page_shift;
                pdpe[index[2]].writable = true;
                pdpe[index[2]].present = true;
        }
        struct pageDirectoryEntry *pde;
        pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
        if (!pde[index[1]].present) {
-               pde[index[1]].address = vm_phy_page_alloc(vm,
-                       KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0)
-                       >> vm->page_shift;
+               pde[index[1]].address = vm_alloc_page_table(vm) >> vm->page_shift;
                pde[index[1]].writable = true;
                pde[index[1]].present = true;
        }
 
        /* Allocate page directory pointer table if not present. */
        pml4e = vmx->eptp_hva;
        if (!pml4e[index[3]].readable) {
-               pml4e[index[3]].address = vm_phy_page_alloc(vm,
-                         KVM_EPT_PAGE_TABLE_MIN_PADDR, 0)
-                       >> vm->page_shift;
+               pml4e[index[3]].address = vm_alloc_page_table(vm) >> vm->page_shift;
                pml4e[index[3]].writable = true;
                pml4e[index[3]].readable = true;
                pml4e[index[3]].executable = true;
        struct eptPageTableEntry *pdpe;
        pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
        if (!pdpe[index[2]].readable) {
-               pdpe[index[2]].address = vm_phy_page_alloc(vm,
-                         KVM_EPT_PAGE_TABLE_MIN_PADDR, 0)
-                       >> vm->page_shift;
+               pdpe[index[2]].address = vm_alloc_page_table(vm) >> vm->page_shift;
                pdpe[index[2]].writable = true;
                pdpe[index[2]].readable = true;
                pdpe[index[2]].executable = true;
        struct eptPageTableEntry *pde;
        pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
        if (!pde[index[1]].readable) {
-               pde[index[1]].address = vm_phy_page_alloc(vm,
-                         KVM_EPT_PAGE_TABLE_MIN_PADDR, 0)
-                       >> vm->page_shift;
+               pde[index[1]].address = vm_alloc_page_table(vm) >> vm->page_shift;
                pde[index[1]].writable = true;
                pde[index[1]].readable = true;
                pde[index[1]].executable = true;