return 0;
 }
 
-static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
-                                       unsigned long *haddr,
-                                       enum kvm_pgtable_prot prot)
+
+/**
+ * hyp_alloc_private_va_range - Allocates a private VA range.
+ * @size:      The size of the VA range to reserve.
+ * @haddr:     The hypervisor virtual start address of the allocation.
+ *
+ * The private virtual address (VA) range is allocated below io_map_base
+ * and aligned based on the order of @size.
+ *
+ * Return: 0 on success or negative error code on failure.
+ */
+int hyp_alloc_private_va_range(size_t size, unsigned long *haddr)
 {
        unsigned long base;
        int ret = 0;
 
-       if (!kvm_host_owns_hyp_mappings()) {
-               base = kvm_call_hyp_nvhe(__pkvm_create_private_mapping,
-                                        phys_addr, size, prot);
-               if (IS_ERR_OR_NULL((void *)base))
-                       return PTR_ERR((void *)base);
-               *haddr = base;
-
-               return 0;
-       }
-
        mutex_lock(&kvm_hyp_pgd_mutex);
 
        /*
         *
         * The allocated size is always a multiple of PAGE_SIZE.
         */
-       size = PAGE_ALIGN(size + offset_in_page(phys_addr));
-       base = io_map_base - size;
+       base = io_map_base - PAGE_ALIGN(size);
+
+       /* Align the allocation based on the order of its size */
+       base = ALIGN_DOWN(base, PAGE_SIZE << get_order(size));
 
        /*
         * Verify that BIT(VA_BITS - 1) hasn't been flipped by
        if ((base ^ io_map_base) & BIT(VA_BITS - 1))
                ret = -ENOMEM;
        else
-               io_map_base = base;
+               *haddr = io_map_base = base;
 
        mutex_unlock(&kvm_hyp_pgd_mutex);
 
+       return ret;
+}
+
+static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
+                                       unsigned long *haddr,
+                                       enum kvm_pgtable_prot prot)
+{
+       unsigned long addr;
+       int ret = 0;
+
+       if (!kvm_host_owns_hyp_mappings()) {
+               addr = kvm_call_hyp_nvhe(__pkvm_create_private_mapping,
+                                        phys_addr, size, prot);
+               if (IS_ERR_VALUE(addr))
+                       return addr;
+               *haddr = addr;
+
+               return 0;
+       }
+
+       size = PAGE_ALIGN(size + offset_in_page(phys_addr));
+       ret = hyp_alloc_private_va_range(size, &addr);
        if (ret)
-               goto out;
+               return ret;
 
-       ret = __create_hyp_mappings(base, size, phys_addr, prot);
+       ret = __create_hyp_mappings(addr, size, phys_addr, prot);
        if (ret)
-               goto out;
+               return ret;
 
-       *haddr = base + offset_in_page(phys_addr);
-out:
+       *haddr = addr + offset_in_page(phys_addr);
        return ret;
 }