uint32_t xen_start_flags;
 EXPORT_SYMBOL(xen_start_flags);
 
-int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
-                              unsigned long addr,
-                              xen_pfn_t *gfn, int nr,
-                              int *err_ptr, pgprot_t prot,
-                              unsigned domid,
-                              struct page **pages)
-{
-       return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
-                                        prot, domid, pages);
-}
-EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
-
-/* Not used by XENFEAT_auto_translated guests. */
-int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
-                              unsigned long addr,
-                              xen_pfn_t gfn, int nr,
-                              pgprot_t prot, unsigned domid,
-                              struct page **pages)
-{
-       return -ENOSYS;
-}
-EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
-
 int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
                               int nr, struct page **pages)
 {
 }
 EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
 
-/* Not used by XENFEAT_auto_translated guests. */
-int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
-                              unsigned long addr,
-                              xen_pfn_t *mfn, int nr,
-                              int *err_ptr, pgprot_t prot,
-                              unsigned int domid, struct page **pages)
-{
-       return -ENOSYS;
-}
-EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
-
 static void xen_read_wallclock(struct timespec64 *ts)
 {
        u32 version;
 
 CFLAGS_mmu_pv.o                        := $(nostackp)
 
 obj-y                          += enlighten.o
-obj-y                          += multicalls.o
 obj-y                          += mmu.o
 obj-y                          += time.o
 obj-y                          += grant-table.o
 obj-$(CONFIG_XEN_PV)           += enlighten_pv.o
 obj-$(CONFIG_XEN_PV)           += mmu_pv.o
 obj-$(CONFIG_XEN_PV)           += irq.o
+obj-$(CONFIG_XEN_PV)           += multicalls.o
 obj-$(CONFIG_XEN_PV)           += xen-asm.o
 obj-$(CONFIG_XEN_PV)           += xen-asm_$(BITS).o
 
 
 #include "multicalls.h"
 #include "mmu.h"
 
-/*
- * Protects atomic reservation decrease/increase against concurrent increases.
- * Also protects non-atomic updates of current_pages and balloon lists.
- */
-DEFINE_SPINLOCK(xen_reservation_lock);
-
 unsigned long arbitrary_virt_to_mfn(void *vaddr)
 {
        xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
 }
 EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
 
-static noinline void xen_flush_tlb_all(void)
-{
-       struct mmuext_op *op;
-       struct multicall_space mcs;
-
-       preempt_disable();
-
-       mcs = xen_mc_entry(sizeof(*op));
-
-       op = mcs.args;
-       op->cmd = MMUEXT_TLB_FLUSH_ALL;
-       MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
-
-       xen_mc_issue(PARAVIRT_LAZY_MMU);
-
-       preempt_enable();
-}
-
-#define REMAP_BATCH_SIZE 16
-
-struct remap_data {
-       xen_pfn_t *pfn;
-       bool contiguous;
-       bool no_translate;
-       pgprot_t prot;
-       struct mmu_update *mmu_update;
-};
-
-static int remap_area_pfn_pte_fn(pte_t *ptep, pgtable_t token,
-                                unsigned long addr, void *data)
-{
-       struct remap_data *rmd = data;
-       pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot));
-
-       /*
-        * If we have a contiguous range, just update the pfn itself,
-        * else update pointer to be "next pfn".
-        */
-       if (rmd->contiguous)
-               (*rmd->pfn)++;
-       else
-               rmd->pfn++;
-
-       rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
-       rmd->mmu_update->ptr |= rmd->no_translate ?
-               MMU_PT_UPDATE_NO_TRANSLATE :
-               MMU_NORMAL_PT_UPDATE;
-       rmd->mmu_update->val = pte_val_ma(pte);
-       rmd->mmu_update++;
-
-       return 0;
-}
-
-static int do_remap_pfn(struct vm_area_struct *vma,
-                       unsigned long addr,
-                       xen_pfn_t *pfn, int nr,
-                       int *err_ptr, pgprot_t prot,
-                       unsigned int domid,
-                       bool no_translate,
-                       struct page **pages)
-{
-       int err = 0;
-       struct remap_data rmd;
-       struct mmu_update mmu_update[REMAP_BATCH_SIZE];
-       unsigned long range;
-       int mapped = 0;
-
-       BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
-
-       rmd.pfn = pfn;
-       rmd.prot = prot;
-       /*
-        * We use the err_ptr to indicate if there we are doing a contiguous
-        * mapping or a discontigious mapping.
-        */
-       rmd.contiguous = !err_ptr;
-       rmd.no_translate = no_translate;
-
-       while (nr) {
-               int index = 0;
-               int done = 0;
-               int batch = min(REMAP_BATCH_SIZE, nr);
-               int batch_left = batch;
-               range = (unsigned long)batch << PAGE_SHIFT;
-
-               rmd.mmu_update = mmu_update;
-               err = apply_to_page_range(vma->vm_mm, addr, range,
-                                         remap_area_pfn_pte_fn, &rmd);
-               if (err)
-                       goto out;
-
-               /* We record the error for each page that gives an error, but
-                * continue mapping until the whole set is done */
-               do {
-                       int i;
-
-                       err = HYPERVISOR_mmu_update(&mmu_update[index],
-                                                   batch_left, &done, domid);
-
-                       /*
-                        * @err_ptr may be the same buffer as @gfn, so
-                        * only clear it after each chunk of @gfn is
-                        * used.
-                        */
-                       if (err_ptr) {
-                               for (i = index; i < index + done; i++)
-                                       err_ptr[i] = 0;
-                       }
-                       if (err < 0) {
-                               if (!err_ptr)
-                                       goto out;
-                               err_ptr[i] = err;
-                               done++; /* Skip failed frame. */
-                       } else
-                               mapped += done;
-                       batch_left -= done;
-                       index += done;
-               } while (batch_left);
-
-               nr -= batch;
-               addr += range;
-               if (err_ptr)
-                       err_ptr += batch;
-               cond_resched();
-       }
-out:
-
-       xen_flush_tlb_all();
-
-       return err < 0 ? err : mapped;
-}
-
-int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
-                              unsigned long addr,
-                              xen_pfn_t gfn, int nr,
-                              pgprot_t prot, unsigned domid,
-                              struct page **pages)
-{
-       if (xen_feature(XENFEAT_auto_translated_physmap))
-               return -EOPNOTSUPP;
-
-       return do_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false,
-                           pages);
-}
-EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
-
-int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
-                              unsigned long addr,
-                              xen_pfn_t *gfn, int nr,
-                              int *err_ptr, pgprot_t prot,
-                              unsigned domid, struct page **pages)
-{
-       if (xen_feature(XENFEAT_auto_translated_physmap))
-               return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
-                                                prot, domid, pages);
-
-       /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
-        * and the consequences later is quite hard to detect what the actual
-        * cause of "wrong memory was mapped in".
-        */
-       BUG_ON(err_ptr == NULL);
-       return do_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
-                           false, pages);
-}
-EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
-
-int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
-                              unsigned long addr,
-                              xen_pfn_t *mfn, int nr,
-                              int *err_ptr, pgprot_t prot,
-                              unsigned int domid, struct page **pages)
-{
-       if (xen_feature(XENFEAT_auto_translated_physmap))
-               return -EOPNOTSUPP;
-
-       return do_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
-                           true, pages);
-}
-EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
-
 /* Returns: 0 success */
 int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
                               int nr, struct page **pages)
 
 static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
 #endif /* CONFIG_X86_64 */
 
+/*
+ * Protects atomic reservation decrease/increase against concurrent increases.
+ * Also protects non-atomic updates of current_pages and balloon lists.
+ */
+DEFINE_SPINLOCK(xen_reservation_lock);
+
 /*
  * Note about cr3 (pagetable base) values:
  *
 }
 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
 
+static noinline void xen_flush_tlb_all(void)
+{
+       struct mmuext_op *op;
+       struct multicall_space mcs;
+
+       preempt_disable();
+
+       mcs = xen_mc_entry(sizeof(*op));
+
+       op = mcs.args;
+       op->cmd = MMUEXT_TLB_FLUSH_ALL;
+       MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+       xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+       preempt_enable();
+}
+
+#define REMAP_BATCH_SIZE 16
+
+struct remap_data {
+       xen_pfn_t *pfn;
+       bool contiguous;
+       bool no_translate;
+       pgprot_t prot;
+       struct mmu_update *mmu_update;
+};
+
+static int remap_area_pfn_pte_fn(pte_t *ptep, pgtable_t token,
+                                unsigned long addr, void *data)
+{
+       struct remap_data *rmd = data;
+       pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot));
+
+       /*
+        * If we have a contiguous range, just update the pfn itself,
+        * else update pointer to be "next pfn".
+        */
+       if (rmd->contiguous)
+               (*rmd->pfn)++;
+       else
+               rmd->pfn++;
+
+       rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
+       rmd->mmu_update->ptr |= rmd->no_translate ?
+               MMU_PT_UPDATE_NO_TRANSLATE :
+               MMU_NORMAL_PT_UPDATE;
+       rmd->mmu_update->val = pte_val_ma(pte);
+       rmd->mmu_update++;
+
+       return 0;
+}
+
+int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
+                 xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
+                 unsigned int domid, bool no_translate, struct page **pages)
+{
+       int err = 0;
+       struct remap_data rmd;
+       struct mmu_update mmu_update[REMAP_BATCH_SIZE];
+       unsigned long range;
+       int mapped = 0;
+
+       BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
+
+       rmd.pfn = pfn;
+       rmd.prot = prot;
+       /*
+        * We use the err_ptr to indicate if there we are doing a contiguous
+        * mapping or a discontigious mapping.
+        */
+       rmd.contiguous = !err_ptr;
+       rmd.no_translate = no_translate;
+
+       while (nr) {
+               int index = 0;
+               int done = 0;
+               int batch = min(REMAP_BATCH_SIZE, nr);
+               int batch_left = batch;
+
+               range = (unsigned long)batch << PAGE_SHIFT;
+
+               rmd.mmu_update = mmu_update;
+               err = apply_to_page_range(vma->vm_mm, addr, range,
+                                         remap_area_pfn_pte_fn, &rmd);
+               if (err)
+                       goto out;
+
+               /*
+                * We record the error for each page that gives an error, but
+                * continue mapping until the whole set is done
+                */
+               do {
+                       int i;
+
+                       err = HYPERVISOR_mmu_update(&mmu_update[index],
+                                                   batch_left, &done, domid);
+
+                       /*
+                        * @err_ptr may be the same buffer as @gfn, so
+                        * only clear it after each chunk of @gfn is
+                        * used.
+                        */
+                       if (err_ptr) {
+                               for (i = index; i < index + done; i++)
+                                       err_ptr[i] = 0;
+                       }
+                       if (err < 0) {
+                               if (!err_ptr)
+                                       goto out;
+                               err_ptr[i] = err;
+                               done++; /* Skip failed frame. */
+                       } else
+                               mapped += done;
+                       batch_left -= done;
+                       index += done;
+               } while (batch_left);
+
+               nr -= batch;
+               addr += range;
+               if (err_ptr)
+                       err_ptr += batch;
+               cond_resched();
+       }
+out:
+
+       xen_flush_tlb_all();
+
+       return err < 0 ? err : mapped;
+}
+EXPORT_SYMBOL_GPL(xen_remap_pfn);
+
 #ifdef CONFIG_KEXEC_CORE
 phys_addr_t paddr_vmcoreinfo_note(void)
 {
 
 #define XENMEM_machine_memory_map   10
 
 
-/*
- * Prevent the balloon driver from changing the memory reservation
- * during a driver critical region.
- */
-extern spinlock_t xen_reservation_lock;
-
 /*
  * Unmaps the page appearing at a particular GPFN from the specified guest's
  * pseudophysical address space.
 
 #include <linux/percpu.h>
 #include <linux/notifier.h>
 #include <linux/efi.h>
+#include <xen/features.h>
 #include <asm/xen/interface.h>
 #include <xen/interface/vcpu.h>
 
                                dma_addr_t *dma_handle);
 
 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
+
+int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
+                 xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
+                 unsigned int domid, bool no_translate, struct page **pages);
 #else
 static inline int xen_create_contiguous_region(phys_addr_t pstart,
                                               unsigned int order,
 
 static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
                                                 unsigned int order) { }
+
+static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
+                               xen_pfn_t *pfn, int nr, int *err_ptr,
+                               pgprot_t prot,  unsigned int domid,
+                               bool no_translate, struct page **pages)
+{
+       BUG();
+       return 0;
+}
 #endif
 
 struct vm_area_struct;
 
+#ifdef CONFIG_XEN_AUTO_XLATE
+int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
+                             unsigned long addr,
+                             xen_pfn_t *gfn, int nr,
+                             int *err_ptr, pgprot_t prot,
+                             unsigned int domid,
+                             struct page **pages);
+int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
+                             int nr, struct page **pages);
+#else
+/*
+ * These two functions are called from arch/x86/xen/mmu.c and so stubs
+ * are needed for a configuration not specifying CONFIG_XEN_AUTO_XLATE.
+ */
+static inline int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
+                                           unsigned long addr,
+                                           xen_pfn_t *gfn, int nr,
+                                           int *err_ptr, pgprot_t prot,
+                                           unsigned int domid,
+                                           struct page **pages)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
+                                           int nr, struct page **pages)
+{
+       return -EOPNOTSUPP;
+}
+#endif
+
 /*
  * xen_remap_domain_gfn_array() - map an array of foreign frames by gfn
  * @vma:     VMA to map the pages into
  * Returns the number of successfully mapped frames, or a -ve error
  * code.
  */
-int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
-                              unsigned long addr,
-                              xen_pfn_t *gfn, int nr,
-                              int *err_ptr, pgprot_t prot,
-                              unsigned domid,
-                              struct page **pages);
+static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
+                                            unsigned long addr,
+                                            xen_pfn_t *gfn, int nr,
+                                            int *err_ptr, pgprot_t prot,
+                                            unsigned int domid,
+                                            struct page **pages)
+{
+       if (xen_feature(XENFEAT_auto_translated_physmap))
+               return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
+                                                prot, domid, pages);
+
+       /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
+        * and the consequences later is quite hard to detect what the actual
+        * cause of "wrong memory was mapped in".
+        */
+       BUG_ON(err_ptr == NULL);
+       return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
+                            false, pages);
+}
 
 /*
  * xen_remap_domain_mfn_array() - map an array of foreign frames by mfn
  * Returns the number of successfully mapped frames, or a -ve error
  * code.
  */
-int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
-                              unsigned long addr, xen_pfn_t *mfn, int nr,
-                              int *err_ptr, pgprot_t prot,
-                              unsigned int domid, struct page **pages);
+static inline int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+                                            unsigned long addr, xen_pfn_t *mfn,
+                                            int nr, int *err_ptr,
+                                            pgprot_t prot, unsigned int domid,
+                                            struct page **pages)
+{
+       if (xen_feature(XENFEAT_auto_translated_physmap))
+               return -EOPNOTSUPP;
+
+       return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
+                            true, pages);
+}
 
 /* xen_remap_domain_gfn_range() - map a range of foreign frames
  * @vma:     VMA to map the pages into
  * Returns the number of successfully mapped frames, or a -ve error
  * code.
  */
-int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
-                              unsigned long addr,
-                              xen_pfn_t gfn, int nr,
-                              pgprot_t prot, unsigned domid,
-                              struct page **pages);
-int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
-                              int numpgs, struct page **pages);
-
-#ifdef CONFIG_XEN_AUTO_XLATE
-int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
-                             unsigned long addr,
-                             xen_pfn_t *gfn, int nr,
-                             int *err_ptr, pgprot_t prot,
-                             unsigned domid,
-                             struct page **pages);
-int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
-                             int nr, struct page **pages);
-#else
-/*
- * These two functions are called from arch/x86/xen/mmu.c and so stubs
- * are needed for a configuration not specifying CONFIG_XEN_AUTO_XLATE.
- */
-static inline int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
-                                           unsigned long addr,
-                                           xen_pfn_t *gfn, int nr,
-                                           int *err_ptr, pgprot_t prot,
-                                           unsigned int domid,
-                                           struct page **pages)
+static inline int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
+                                            unsigned long addr,
+                                            xen_pfn_t gfn, int nr,
+                                            pgprot_t prot, unsigned int domid,
+                                            struct page **pages)
 {
-       return -EOPNOTSUPP;
-}
+       if (xen_feature(XENFEAT_auto_translated_physmap))
+               return -EOPNOTSUPP;
 
-static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
-                                           int nr, struct page **pages)
-{
-       return -EOPNOTSUPP;
+       return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false,
+                            pages);
 }
-#endif
+
+int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
+                              int numpgs, struct page **pages);
 
 int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr,
                                  unsigned long nr_grant_frames);