TEST_ASSERT(vm != NULL, "Insufficient Memory");
 
        INIT_LIST_HEAD(&vm->vcpus);
-       INIT_LIST_HEAD(&vm->userspace_mem_regions);
+       vm->regions.gpa_tree = RB_ROOT;
+       vm->regions.hva_tree = RB_ROOT;
+       hash_init(vm->regions.slot_hash);
 
        vm->mode = mode;
        vm->type = 0;
  */
 void kvm_vm_restart(struct kvm_vm *vmp, int perm)
 {
+       int ctr;
        struct userspace_mem_region *region;
 
        vm_open(vmp, perm);
        if (vmp->has_irqchip)
                vm_create_irqchip(vmp);
 
-       list_for_each_entry(region, &vmp->userspace_mem_regions, list) {
+       hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) {
                int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
                TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
                            "  rc: %i errno: %i\n"
 static struct userspace_mem_region *
 userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
 {
-       struct userspace_mem_region *region;
+       struct rb_node *node;
 
-       list_for_each_entry(region, &vm->userspace_mem_regions, list) {
+       for (node = vm->regions.gpa_tree.rb_node; node; ) {
+               struct userspace_mem_region *region =
+                       container_of(node, struct userspace_mem_region, gpa_node);
                uint64_t existing_start = region->region.guest_phys_addr;
                uint64_t existing_end = region->region.guest_phys_addr
                        + region->region.memory_size - 1;
                if (start <= existing_end && end >= existing_start)
                        return region;
+
+               if (start < existing_start)
+                       node = node->rb_left;
+               else
+                       node = node->rb_right;
        }
 
        return NULL;
 }
 
 static void __vm_mem_region_delete(struct kvm_vm *vm,
-                                  struct userspace_mem_region *region)
+                                  struct userspace_mem_region *region,
+                                  bool unlink)
 {
        int ret;
 
-       list_del(®ion->list);
+       if (unlink) {
+               rb_erase(®ion->gpa_node, &vm->regions.gpa_tree);
+               rb_erase(®ion->hva_node, &vm->regions.hva_tree);
+               hash_del(®ion->slot_node);
+       }
 
        region->region.memory_size = 0;
        ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
  */
 void kvm_vm_free(struct kvm_vm *vmp)
 {
-       struct userspace_mem_region *region, *tmp;
+       int ctr;
+       struct hlist_node *node;
+       struct userspace_mem_region *region;
 
        if (vmp == NULL)
                return;
 
        /* Free userspace_mem_regions. */
-       list_for_each_entry_safe(region, tmp, &vmp->userspace_mem_regions, list)
-               __vm_mem_region_delete(vmp, region);
+       hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node)
+               __vm_mem_region_delete(vmp, region, false);
 
        /* Free sparsebit arrays. */
        sparsebit_free(&vmp->vpages_valid);
        return 0;
 }
 
+static void vm_userspace_mem_region_gpa_insert(struct rb_root *gpa_tree,
+                                              struct userspace_mem_region *region)
+{
+       struct rb_node **cur, *parent;
+
+       for (cur = &gpa_tree->rb_node, parent = NULL; *cur; ) {
+               struct userspace_mem_region *cregion;
+
+               cregion = container_of(*cur, typeof(*cregion), gpa_node);
+               parent = *cur;
+               if (region->region.guest_phys_addr <
+                   cregion->region.guest_phys_addr)
+                       cur = &(*cur)->rb_left;
+               else {
+                       TEST_ASSERT(region->region.guest_phys_addr !=
+                                   cregion->region.guest_phys_addr,
+                                   "Duplicate GPA in region tree");
+
+                       cur = &(*cur)->rb_right;
+               }
+       }
+
+       rb_link_node(®ion->gpa_node, parent, cur);
+       rb_insert_color(®ion->gpa_node, gpa_tree);
+}
+
+static void vm_userspace_mem_region_hva_insert(struct rb_root *hva_tree,
+                                              struct userspace_mem_region *region)
+{
+       struct rb_node **cur, *parent;
+
+       for (cur = &hva_tree->rb_node, parent = NULL; *cur; ) {
+               struct userspace_mem_region *cregion;
+
+               cregion = container_of(*cur, typeof(*cregion), hva_node);
+               parent = *cur;
+               if (region->host_mem < cregion->host_mem)
+                       cur = &(*cur)->rb_left;
+               else {
+                       TEST_ASSERT(region->host_mem !=
+                                   cregion->host_mem,
+                                   "Duplicate HVA in region tree");
+
+                       cur = &(*cur)->rb_right;
+               }
+       }
+
+       rb_link_node(®ion->hva_node, parent, cur);
+       rb_insert_color(®ion->hva_node, hva_tree);
+}
+
 /*
  * VM Userspace Memory Region Add
  *
                        (uint64_t) region->region.memory_size);
 
        /* Confirm no region with the requested slot already exists. */
-       list_for_each_entry(region, &vm->userspace_mem_regions, list) {
+       hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
+                              slot) {
                if (region->region.slot != slot)
                        continue;
 
                ret, errno, slot, flags,
                guest_paddr, (uint64_t) region->region.memory_size);
 
-       /* Add to linked-list of memory regions. */
-       list_add(®ion->list, &vm->userspace_mem_regions);
+       /* Add to quick lookup data structures */
+       vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region);
+       vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region);
+       hash_add(vm->regions.slot_hash, ®ion->slot_node, slot);
 }
 
 /*
 {
        struct userspace_mem_region *region;
 
-       list_for_each_entry(region, &vm->userspace_mem_regions, list) {
+       hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
+                              memslot)
                if (region->region.slot == memslot)
                        return region;
-       }
 
        fprintf(stderr, "No mem region with the requested slot found,\n"
                "  requested slot: %u\n", memslot);
  */
 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
 {
-       __vm_mem_region_delete(vm, memslot2region(vm, slot));
+       __vm_mem_region_delete(vm, memslot2region(vm, slot), true);
 }
 
 /*
 {
        struct userspace_mem_region *region;
 
-       list_for_each_entry(region, &vm->userspace_mem_regions, list) {
-               if ((gpa >= region->region.guest_phys_addr)
-                       && (gpa <= (region->region.guest_phys_addr
-                               + region->region.memory_size - 1)))
-                       return (void *) ((uintptr_t) region->host_mem
-                               + (gpa - region->region.guest_phys_addr));
+       region = userspace_mem_region_find(vm, gpa, gpa);
+       if (!region) {
+               TEST_FAIL("No vm physical memory at 0x%lx", gpa);
+               return NULL;
        }
 
-       TEST_FAIL("No vm physical memory at 0x%lx", gpa);
-       return NULL;
+       return (void *)((uintptr_t)region->host_mem
+               + (gpa - region->region.guest_phys_addr));
 }
 
 /*
  */
 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
 {
-       struct userspace_mem_region *region;
+       struct rb_node *node;
+
+       for (node = vm->regions.hva_tree.rb_node; node; ) {
+               struct userspace_mem_region *region =
+                       container_of(node, struct userspace_mem_region, hva_node);
+
+               if (hva >= region->host_mem) {
+                       if (hva <= (region->host_mem
+                               + region->region.memory_size - 1))
+                               return (vm_paddr_t)((uintptr_t)
+                                       region->region.guest_phys_addr
+                                       + (hva - (uintptr_t)region->host_mem));
 
-       list_for_each_entry(region, &vm->userspace_mem_regions, list) {
-               if ((hva >= region->host_mem)
-                       && (hva <= (region->host_mem
-                               + region->region.memory_size - 1)))
-                       return (vm_paddr_t) ((uintptr_t)
-                               region->region.guest_phys_addr
-                               + (hva - (uintptr_t) region->host_mem));
+                       node = node->rb_right;
+               } else
+                       node = node->rb_left;
        }
 
        TEST_FAIL("No mapping to a guest physical address, hva: %p", hva);
  */
 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
 {
+       int ctr;
        struct userspace_mem_region *region;
        struct vcpu *vcpu;
 
        fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
        fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size);
        fprintf(stream, "%*sMem Regions:\n", indent, "");
-       list_for_each_entry(region, &vm->userspace_mem_regions, list) {
+       hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) {
                fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
                        "host_virt: %p\n", indent + 2, "",
                        (uint64_t) region->region.guest_phys_addr,