#include <asm/segment.h>
 #include <asm/asm.h>
 #include <asm/boot.h>
+#include <asm/pgtable.h>
 #include <asm/processor-flags.h>
 #include <asm/msr.h>
 #include <asm/nospec-branch.h>
        btsl $_EFER_LME, %eax
        wrmsr
 
+       mov %ebp, %ebx
+       subl $_pa(pvh_start_xen), %ebx /* offset */
+       jz .Lpagetable_done
+
+       /* Fixup page-tables for relocation. */
+       leal rva(pvh_init_top_pgt)(%ebp), %edi
+       movl $PTRS_PER_PGD, %ecx
+2:
+       testl $_PAGE_PRESENT, 0x00(%edi)
+       jz 1f
+       addl %ebx, 0x00(%edi)
+1:
+       addl $8, %edi
+       decl %ecx
+       jnz 2b
+
+       /* L3 ident has a single entry. */
+       leal rva(pvh_level3_ident_pgt)(%ebp), %edi
+       addl %ebx, 0x00(%edi)
+
+       leal rva(pvh_level3_kernel_pgt)(%ebp), %edi
+       addl %ebx, (PAGE_SIZE - 16)(%edi)
+       addl %ebx, (PAGE_SIZE - 8)(%edi)
+
+       /* pvh_level2_ident_pgt is fine - large pages */
+
+       /* pvh_level2_kernel_pgt needs adjustment - large pages */
+       leal rva(pvh_level2_kernel_pgt)(%ebp), %edi
+       movl $PTRS_PER_PMD, %ecx
+2:
+       testl $_PAGE_PRESENT, 0x00(%edi)
+       jz 1f
+       addl %ebx, 0x00(%edi)
+1:
+       addl $8, %edi
+       decl %ecx
+       jnz 2b
+
+.Lpagetable_done:
        /* Enable pre-constructed page tables. */
-       leal rva(init_top_pgt)(%ebp), %eax
+       leal rva(pvh_init_top_pgt)(%ebp), %eax
        mov %eax, %cr3
        mov $(X86_CR0_PG | X86_CR0_PE), %eax
        mov %eax, %cr0
        .fill BOOT_STACK_SIZE, 1, 0
 SYM_DATA_END_LABEL(early_stack, SYM_L_LOCAL, early_stack_end)
 
+#ifdef CONFIG_X86_64
+/*
+ * Xen PVH needs a set of identity mapped and kernel high mapping
+ * page tables.  pvh_start_xen starts running on the identity mapped
+ * page tables, but xen_prepare_pvh calls into the high mapping.
+ * These page tables need to be relocatable and are only used until
+ * startup_64 transitions to init_top_pgt.
+ */
+SYM_DATA_START_PAGE_ALIGNED(pvh_init_top_pgt)
+       .quad   pvh_level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
+       .org    pvh_init_top_pgt + L4_PAGE_OFFSET * 8, 0
+       .quad   pvh_level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
+       .org    pvh_init_top_pgt + L4_START_KERNEL * 8, 0
+       /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
+       .quad   pvh_level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
+SYM_DATA_END(pvh_init_top_pgt)
+
+SYM_DATA_START_PAGE_ALIGNED(pvh_level3_ident_pgt)
+       .quad   pvh_level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
+       .fill   511, 8, 0
+SYM_DATA_END(pvh_level3_ident_pgt)
+SYM_DATA_START_PAGE_ALIGNED(pvh_level2_ident_pgt)
+       /*
+        * Since I easily can, map the first 1G.
+        * Don't set NX because code runs from these pages.
+        *
+        * Note: This sets _PAGE_GLOBAL despite whether
+        * the CPU supports it or it is enabled.  But,
+        * the CPU should ignore the bit.
+        */
+       PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
+SYM_DATA_END(pvh_level2_ident_pgt)
+SYM_DATA_START_PAGE_ALIGNED(pvh_level3_kernel_pgt)
+       .fill   L3_START_KERNEL, 8, 0
+       /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
+       .quad   pvh_level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
+       .quad   0 /* no fixmap */
+SYM_DATA_END(pvh_level3_kernel_pgt)
+
+SYM_DATA_START_PAGE_ALIGNED(pvh_level2_kernel_pgt)
+       /*
+        * Kernel high mapping.
+        *
+        * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in
+        * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled,
+        * 512 MiB otherwise.
+        *
+        * (NOTE: after that starts the module area, see MODULES_VADDR.)
+        *
+        * This table is eventually used by the kernel during normal runtime.
+        * Care must be taken to clear out undesired bits later, like _PAGE_RW
+        * or _PAGE_GLOBAL in some cases.
+        */
+       PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE / PMD_SIZE)
+SYM_DATA_END(pvh_level2_kernel_pgt)
+
+       ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_RELOC,
+                    .long CONFIG_PHYSICAL_ALIGN;
+                    .long LOAD_PHYSICAL_ADDR;
+                    .long KERNEL_IMAGE_SIZE - 1)
+#endif
+
        ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY,
                     _ASM_PTR (pvh_start_xen - __START_KERNEL_map))