__u64   save_area_async[8];             /* 0x0240 */
        __u64   save_area_restart[1];           /* 0x0280 */
 
-       /* CPU flags. */
-       __u64   cpu_flags;                      /* 0x0288 */
+       __u64   pcpu;                           /* 0x0288 */
 
        /* Return psws. */
        psw_t   return_psw;                     /* 0x0290 */
 
 #include <asm/irqflags.h>
 #include <asm/alternative.h>
 
+struct pcpu {
+       unsigned long ec_mask;          /* bit mask for ec_xxx functions */
+       unsigned long ec_clk;           /* sigp timestamp for ec_xxx */
+       unsigned long flags;            /* per CPU flags */
+       signed char state;              /* physical cpu state */
+       signed char polarization;       /* physical polarization */
+       u16 address;                    /* physical cpu address */
+};
+
+DECLARE_PER_CPU(struct pcpu, pcpu_devices);
+
 typedef long (*sys_call_ptr_t)(struct pt_regs *regs);
 
+static __always_inline struct pcpu *this_pcpu(void)
+{
+       return (struct pcpu *)(get_lowcore()->pcpu);
+}
+
 static __always_inline void set_cpu_flag(int flag)
 {
-       get_lowcore()->cpu_flags |= (1UL << flag);
+       this_pcpu()->flags |= (1UL << flag);
 }
 
 static __always_inline void clear_cpu_flag(int flag)
 {
-       get_lowcore()->cpu_flags &= ~(1UL << flag);
+       this_pcpu()->flags &= ~(1UL << flag);
 }
 
 static __always_inline bool test_cpu_flag(int flag)
 {
-       return get_lowcore()->cpu_flags & (1UL << flag);
+       return this_pcpu()->flags & (1UL << flag);
 }
 
 static __always_inline bool test_and_set_cpu_flag(int flag)
  */
 static __always_inline bool test_cpu_flag_of(int flag, int cpu)
 {
-       struct lowcore *lc = lowcore_ptr[cpu];
-
-       return lc->cpu_flags & (1UL << flag);
+       return per_cpu(pcpu_devices, cpu).flags & (1UL << flag);
 }
 
 #define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
 
        OFFSET(__LC_SAVE_AREA_SYNC, lowcore, save_area_sync);
        OFFSET(__LC_SAVE_AREA_ASYNC, lowcore, save_area_async);
        OFFSET(__LC_SAVE_AREA_RESTART, lowcore, save_area_restart);
-       OFFSET(__LC_CPU_FLAGS, lowcore, cpu_flags);
+       OFFSET(__LC_PCPU, lowcore, pcpu);
        OFFSET(__LC_RETURN_PSW, lowcore, return_psw);
        OFFSET(__LC_RETURN_MCCK_PSW, lowcore, return_mcck_psw);
        OFFSET(__LC_SYS_ENTER_TIMER, lowcore, sys_enter_timer);
 #endif
        OFFSET(__FTRACE_REGS_PT_REGS, ftrace_regs, regs);
        DEFINE(__FTRACE_REGS_SIZE, sizeof(struct ftrace_regs));
+
+       OFFSET(__PCPU_FLAGS, pcpu, flags);
        return 0;
 }
 
        clgrjl  %r9,%r14, 4f
        larl    %r14,.Lsie_leave
        clgrjhe %r9,%r14, 4f
-       oi      __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
+       lg      %r10,__LC_PCPU
+       oi      __PCPU_FLAGS+7(%r10), _CIF_MCCK_GUEST
 4:     BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
        SIEEXIT __SF_SIE_CONTROL(%r15)
 #endif
 
                panic("%s: Failed to allocate %zu bytes align=%zx\n",
                      __func__, sizeof(*lc), sizeof(*lc));
 
+       lc->pcpu = (unsigned long)per_cpu_ptr(&pcpu_devices, 0);
        lc->restart_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_DAT;
        lc->restart_psw.addr = __pa(restart_int_handler);
        lc->external_new_psw.mask = PSW_KERNEL_BITS;
 
        CPU_STATE_CONFIGURED,
 };
 
-struct pcpu {
-       unsigned long ec_mask;          /* bit mask for ec_xxx functions */
-       unsigned long ec_clk;           /* sigp timestamp for ec_xxx */
-       signed char state;              /* physical cpu state */
-       signed char polarization;       /* physical polarization */
-       u16 address;                    /* physical cpu address */
-};
-
 static u8 boot_core_type;
-static DEFINE_PER_CPU(struct pcpu, pcpu_devices);
+DEFINE_PER_CPU(struct pcpu, pcpu_devices);
 /*
  * Pointer to the pcpu area of the boot CPU. This is required when a restart
  * interrupt is triggered on an offline CPU. For that case accessing percpu
        cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
        cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
        lc->cpu_nr = cpu;
+       lc->pcpu = (unsigned long)pcpu;
        lc->restart_flags = RESTART_FLAG_CTLREGS;
        lc->spinlock_lockval = arch_spin_lockval(cpu);
        lc->spinlock_index = 0;
        pcpu_free_lowcore(pcpu, cpu);
        cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
        cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
+       pcpu->flags = 0;
 }
 
 void __noreturn cpu_die(void)
 
 void __init smp_prepare_boot_cpu(void)
 {
+       struct lowcore *lc = get_lowcore();
+
        WARN_ON(!cpu_present(0) || !cpu_online(0));
+       lc->percpu_offset = __per_cpu_offset[0];
        ipl_pcpu = per_cpu_ptr(&pcpu_devices, 0);
        ipl_pcpu->state = CPU_STATE_CONFIGURED;
-       get_lowcore()->percpu_offset = __per_cpu_offset[0];
+       lc->pcpu = (unsigned long)ipl_pcpu;
        smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
 }