extern unsigned char _compressed_start[];
 extern unsigned char _compressed_end[];
 extern struct vmlinux_info _vmlinux_info;
+
 #define vmlinux _vmlinux_info
 
+#define __lowcore_pa(x)                ((unsigned long)(x) % sizeof(struct lowcore))
 #define __abs_lowcore_pa(x)    (((unsigned long)(x) - __abs_lowcore) % sizeof(struct lowcore))
 #define __kernel_va(x)         ((void *)((unsigned long)(x) - __kaslr_offset_phys + __kaslr_offset))
 #define __kernel_pa(x)         ((unsigned long)(x) - __kaslr_offset + __kaslr_offset_phys)
 
 #include <linux/init.h>
 #include <linux/ctype.h>
 #include <linux/pgtable.h>
+#include <asm/abs_lowcore.h>
 #include <asm/page-states.h>
 #include <asm/ebcdic.h>
 #include <asm/sclp.h>
 
 unsigned long __bootdata_preserved(MODULES_VADDR);
 unsigned long __bootdata_preserved(MODULES_END);
 unsigned long __bootdata_preserved(max_mappable);
+int __bootdata_preserved(relocate_lowcore);
 
 u64 __bootdata_preserved(stfle_fac_list[16]);
 struct oldmem_data __bootdata_preserved(oldmem_data);
 
 enum populate_mode {
        POPULATE_NONE,
        POPULATE_DIRECT,
+       POPULATE_LOWCORE,
        POPULATE_ABS_LOWCORE,
        POPULATE_IDENTITY,
        POPULATE_KERNEL,
                return -1;
        case POPULATE_DIRECT:
                return addr;
+       case POPULATE_LOWCORE:
+               return __lowcore_pa(addr);
        case POPULATE_ABS_LOWCORE:
                return __abs_lowcore_pa(addr);
        case POPULATE_KERNEL:
 
 void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned long asce_limit)
 {
+       unsigned long lowcore_address = 0;
        unsigned long start, end;
        unsigned long asce_type;
        unsigned long asce_bits;
        __arch_set_page_dat((void *)swapper_pg_dir, 1UL << CRST_ALLOC_ORDER);
        __arch_set_page_dat((void *)invalid_pg_dir, 1UL << CRST_ALLOC_ORDER);
 
+       if (relocate_lowcore)
+               lowcore_address = LOWCORE_ALT_ADDRESS;
+
        /*
         * To allow prefixing the lowcore must be mapped with 4KB pages.
         * To prevent creation of a large page at address 0 first map
         * the lowcore and create the identity mapping only afterwards.
         */
-       pgtable_populate(0, sizeof(struct lowcore), POPULATE_DIRECT);
+       pgtable_populate(lowcore_address,
+                        lowcore_address + sizeof(struct lowcore),
+                        POPULATE_LOWCORE);
        for_each_physmem_usable_range(i, &start, &end) {
                pgtable_populate((unsigned long)__identity_va(start),
                                 (unsigned long)__identity_va(end),
 
 #ifndef _ASM_S390_ABS_LOWCORE_H
 #define _ASM_S390_ABS_LOWCORE_H
 
+#include <asm/sections.h>
 #include <asm/lowcore.h>
 
 #define ABS_LOWCORE_MAP_SIZE   (NR_CPUS * sizeof(struct lowcore))
        put_cpu();
 }
 
+extern int __bootdata_preserved(relocate_lowcore);
+
+static inline int have_relocated_lowcore(void)
+{
+       return relocate_lowcore;
+}
+
 #endif /* _ASM_S390_ABS_LOWCORE_H */
 
 
 #define ALT_TYPE_FACILITY      0
 #define ALT_TYPE_SPEC          1
+#define ALT_TYPE_LOWCORE       2
 
 #define ALT_DATA_SHIFT         0
 #define ALT_TYPE_SHIFT         20
                                         ALT_TYPE_SPEC << ALT_TYPE_SHIFT        | \
                                         (facility) << ALT_DATA_SHIFT)
 
+#define ALT_LOWCORE                    (ALT_CTX_EARLY << ALT_CTX_SHIFT         | \
+                                        ALT_TYPE_LOWCORE << ALT_TYPE_SHIFT)
+
 #ifndef __ASSEMBLY__
 
 #include <linux/types.h>
 
 #include <asm/ctlreg.h>
 #include <asm/cpu.h>
 #include <asm/types.h>
+#include <asm/alternative.h>
 
 #define LC_ORDER 1
 #define LC_PAGES 2
 
+#define LOWCORE_ALT_ADDRESS    _AC(0x70000, UL)
+
+#ifndef __ASSEMBLY__
+
 struct pgm_tdb {
        u64 data[32];
 };
 
 static __always_inline struct lowcore *get_lowcore(void)
 {
-       return NULL;
+       struct lowcore *lc;
+
+       if (__is_defined(__DECOMPRESSOR))
+               return NULL;
+       asm(ALTERNATIVE("llilh %[lc],0", "llilh %[lc],%[alt]", ALT_LOWCORE)
+           : [lc] "=d" (lc)
+           : [alt] "i" (LOWCORE_ALT_ADDRESS >> 16));
+       return lc;
 }
 
 extern struct lowcore *lowcore_ptr[];
        asm volatile("spx %0" : : "Q" (address) : "memory");
 }
 
+#else /* __ASSEMBLY__ */
+
+.macro GET_LC reg
+       ALTERNATIVE "llilh      \reg,0",                                        \
+               __stringify(llilh       \reg, LOWCORE_ALT_ADDRESS >> 16),       \
+               ALT_LOWCORE
+.endm
+
+#endif /* __ASSEMBLY__ */
 #endif /* _ASM_S390_LOWCORE_H */
 
 #include <asm/abs_lowcore.h>
 
 unsigned long __bootdata_preserved(__abs_lowcore);
+int __bootdata_preserved(relocate_lowcore);
 
 int abs_lowcore_map(int cpu, struct lowcore *lc, bool alloc)
 {
 
 
 #include <linux/uaccess.h>
 #include <asm/nospec-branch.h>
+#include <asm/abs_lowcore.h>
 #include <asm/alternative.h>
 #include <asm/facility.h>
 
                case ALT_TYPE_SPEC:
                        replace = nobp_enabled();
                        break;
+               case ALT_TYPE_LOWCORE:
+                       replace = have_relocated_lowcore();
+                       break;
                default:
                        replace = false;
                }
 
 decompressor_handled_param(facilities);
 decompressor_handled_param(nokaslr);
 decompressor_handled_param(cmma);
+decompressor_handled_param(relocate_lowcore);
 #if IS_ENABLED(CONFIG_KVM)
 decompressor_handled_param(prot_virt);
 #endif
 
        else
                pr_info("Linux is running as a guest in 64-bit mode\n");
 
+       if (have_relocated_lowcore())
+               pr_info("Lowcore relocated to 0x%px\n", get_lowcore());
+
        log_component_list();
 
        /* Have one command line that is parsed and saved in /proc/cmdline */