#include "decompressor.h"
 #include "boot.h"
 
-unsigned long __bootdata_preserved(s390_invalid_asce);
+struct ctlreg __bootdata_preserved(s390_invalid_asce);
 
 #ifdef CONFIG_PROC_FS
 atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
                asce_type = _REGION3_ENTRY_EMPTY;
                asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
        }
-       s390_invalid_asce = invalid_pg_dir | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
+       s390_invalid_asce.val = invalid_pg_dir | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
 
        crst_table_init((unsigned long *)swapper_pg_dir, asce_type);
        crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY);
 
        kasan_populate_shadow();
 
-       S390_lowcore.kernel_asce = swapper_pg_dir | asce_bits;
+       S390_lowcore.kernel_asce.val = swapper_pg_dir | asce_bits;
        S390_lowcore.user_asce = s390_invalid_asce;
 
        local_ctl_load(1, &S390_lowcore.kernel_asce);
        local_ctl_load(7, &S390_lowcore.user_asce);
        local_ctl_load(13, &S390_lowcore.kernel_asce);
 
-       init_mm.context.asce = S390_lowcore.kernel_asce;
+       init_mm.context.asce = S390_lowcore.kernel_asce.val;
 }
 
 
 #include <linux/bug.h>
 
+struct ctlreg {
+       unsigned long val;
+};
+
 #define __local_ctl_load(low, high, array) do {                                \
        struct addrtype {                                               \
                char _[sizeof(array)];                                  \
        int _low = low;                                                 \
        int _esize;                                                     \
                                                                        \
-       _esize = (_high - _low + 1) * sizeof(unsigned long);            \
+       _esize = (_high - _low + 1) * sizeof(struct ctlreg);            \
        BUILD_BUG_ON(sizeof(struct addrtype) != _esize);                \
-       typecheck(unsigned long, array[0]);                             \
+       typecheck(struct ctlreg, array[0]);                             \
        asm volatile(                                                   \
                "       lctlg   %[_low],%[_high],%[_arr]\n"             \
                :                                                       \
        int _low = low;                                                 \
        int _esize;                                                     \
                                                                        \
-       _esize = (_high - _low + 1) * sizeof(unsigned long);            \
+       _esize = (_high - _low + 1) * sizeof(struct ctlreg);            \
        BUILD_BUG_ON(sizeof(struct addrtype) != _esize);                \
-       typecheck(unsigned long, array[0]);                             \
+       typecheck(struct ctlreg, array[0]);                             \
        asm volatile(                                                   \
                "       stctg   %[_low],%[_high],%[_arr]\n"             \
                : [_arr] "=Q" (*(struct addrtype *)(&array))            \
                : [_low] "i" (low), [_high] "i" (high));                \
 } while (0)
 
-static __always_inline void local_ctl_load(unsigned int cr, unsigned long *reg)
+static __always_inline void local_ctl_load(unsigned int cr, struct ctlreg *reg)
 {
        asm volatile(
                "       lctlg   %[cr],%[cr],%[reg]\n"
                : "memory");
 }
 
-static __always_inline void local_ctl_store(unsigned int cr, unsigned long *reg)
+static __always_inline void local_ctl_store(unsigned int cr, struct ctlreg *reg)
 {
        asm volatile(
                "       stctg   %[cr],%[cr],%[reg]\n"
 
 static __always_inline void local_ctl_set_bit(unsigned int cr, unsigned int bit)
 {
-       unsigned long reg;
+       struct ctlreg reg;
 
        local_ctl_store(cr, ®);
-       reg |= 1UL << bit;
+       reg.val |= 1UL << bit;
        local_ctl_load(cr, ®);
 }
 
 static __always_inline void local_ctl_clear_bit(unsigned int cr, unsigned int bit)
 {
-       unsigned long reg;
+       struct ctlreg reg;
 
        local_ctl_store(cr, ®);
-       reg &= ~(1UL << bit);
+       reg.val &= ~(1UL << bit);
        local_ctl_load(cr, ®);
 }
 
 
 union ctlreg0 {
        unsigned long val;
+       struct ctlreg reg;
        struct {
                unsigned long      : 8;
                unsigned long tcx  : 1; /* Transactional-Execution control */
 
 union ctlreg2 {
        unsigned long val;
+       struct ctlreg reg;
        struct {
                unsigned long       : 33;
                unsigned long ducto : 25;
 
 union ctlreg5 {
        unsigned long val;
+       struct ctlreg reg;
        struct {
                unsigned long       : 33;
                unsigned long pasteo: 25;
 
 union ctlreg15 {
        unsigned long val;
+       struct ctlreg reg;
        struct {
                unsigned long lsea  : 61;
                unsigned long       : 3;
 
  *             <grundym@us.ibm.com>
  */
 #include <linux/types.h>
+#include <asm/ctlreg.h>
 #include <asm-generic/kprobes.h>
 
 #define BREAKPOINT_INSTRUCTION 0x0002
 struct kprobe_ctlblk {
        unsigned long kprobe_status;
        unsigned long kprobe_saved_imask;
-       unsigned long kprobe_saved_ctl[3];
+       struct ctlreg kprobe_saved_ctl[3];
        struct prev_kprobe prev_kprobe;
 };
 
 
 
 #include <linux/types.h>
 #include <asm/ptrace.h>
+#include <asm/ctlreg.h>
 #include <asm/cpu.h>
 #include <asm/types.h>
 
        __u32   restart_flags;                  /* 0x0384 */
 
        /* Address space pointer. */
-       unsigned long kernel_asce;              /* 0x0388 */
-       unsigned long user_asce;                /* 0x0390 */
+       struct ctlreg kernel_asce;              /* 0x0388 */
+       struct ctlreg user_asce;                /* 0x0390 */
 
        /*
         * The lpp and current_pid fields form a
        __u32   clock_comp_save_area[2];        /* 0x1330 */
        __u64   last_break_save_area;           /* 0x1338 */
        __u32   access_regs_save_area[16];      /* 0x1340 */
-       unsigned long cregs_save_area[16];      /* 0x1380 */
+       struct ctlreg cregs_save_area[16];      /* 0x1380 */
        __u8    pad_0x1400[0x1500-0x1400];      /* 0x1400 */
        /* Cryptography-counter designation */
        __u64   ccd;                            /* 0x1500 */
 
        if (next == &init_mm)
                S390_lowcore.user_asce = s390_invalid_asce;
        else
-               S390_lowcore.user_asce = next->context.asce;
+               S390_lowcore.user_asce.val = next->context.asce;
        cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
        /* Clear previous user-ASCE from CR7 */
        local_ctl_load(7, &s390_invalid_asce);
 
 #include <linux/radix-tree.h>
 #include <linux/atomic.h>
 #include <asm/sections.h>
+#include <asm/ctlreg.h>
 #include <asm/bug.h>
 #include <asm/page.h>
 #include <asm/uv.h>
 extern pgd_t swapper_pg_dir[];
 extern pgd_t invalid_pg_dir[];
 extern void paging_init(void);
-extern unsigned long s390_invalid_asce;
+extern struct ctlreg s390_invalid_asce;
 
 enum {
        PG_DIRECT_MAP_4K = 0,
 
 static void ctl_bit_callback(void *info)
 {
        struct ctl_bit_parms *pp = info;
-       unsigned long regs[16];
+       struct ctlreg regs[16];
 
        __local_ctl_store(0, 15, regs);
-       regs[pp->cr] &= pp->andval;
-       regs[pp->cr] |= pp->orval;
+       regs[pp->cr].val &= pp->andval;
+       regs[pp->cr].val |= pp->orval;
        __local_ctl_load(0, 15, regs);
 }
 
        pp.andval = set ? -1UL : ~(1UL << bit);
        system_ctlreg_lock();
        abs_lc = get_abs_lowcore();
-       abs_lc->cregs_save_area[cr] &= pp.andval;
-       abs_lc->cregs_save_area[cr] |= pp.orval;
+       abs_lc->cregs_save_area[cr].val &= pp.andval;
+       abs_lc->cregs_save_area[cr].val |= pp.orval;
        put_abs_lowcore(abs_lc);
        on_each_cpu(ctl_bit_callback, &pp, 1);
        system_ctlreg_unlock();
 
                              unsigned long ip)
 {
        union {
-               unsigned long regs[3];
+               struct ctlreg regs[3];
                struct {
-                       unsigned long control;
-                       unsigned long start;
-                       unsigned long end;
+                       struct ctlreg control;
+                       struct ctlreg start;
+                       struct ctlreg end;
                };
        } per_kprobe;
 
        /* Set up the PER control registers %cr9-%cr11 */
-       per_kprobe.control = PER_EVENT_IFETCH;
-       per_kprobe.start = ip;
-       per_kprobe.end = ip;
+       per_kprobe.control.val = PER_EVENT_IFETCH;
+       per_kprobe.start.val = ip;
+       per_kprobe.end.val = ip;
 
        /* Save control regs and psw mask */
        __local_ctl_store(9, 11, kcb->kprobe_saved_ctl);
 
        if (MACHINE_HAS_VX)
                save_vx_regs((__vector128 *) mcesa->vector_save_area);
        if (MACHINE_HAS_GS) {
-               local_ctl_store(2, &cr2_old.val);
+               local_ctl_store(2, &cr2_old.reg);
                cr2_new = cr2_old;
                cr2_new.gse = 1;
-               local_ctl_load(2, &cr2_new.val);
+               local_ctl_load(2, &cr2_new.reg);
                save_gs_cb((struct gs_cb *) mcesa->guarded_storage_save_area);
-               local_ctl_load(2, &cr2_old.val);
+               local_ctl_load(2, &cr2_old.reg);
        }
        /*
         * To create a good backchain for this CPU in the dump store_status
 
         * Disable low address protection and make machine check new PSW a
         * disabled wait PSW. Any additional machine check cannot be handled.
         */
-       local_ctl_store(0, &cr0.val);
+       local_ctl_store(0, &cr0.reg);
        cr0_new = cr0;
        cr0_new.lap = 0;
-       local_ctl_load(0, &cr0_new.val);
+       local_ctl_load(0, &cr0_new.reg);
        psw_save = S390_lowcore.mcck_new_psw;
        psw_bits(S390_lowcore.mcck_new_psw).io = 0;
        psw_bits(S390_lowcore.mcck_new_psw).ext = 0;
         * values. This makes possible system dump analysis easier.
         */
        S390_lowcore.mcck_new_psw = psw_save;
-       local_ctl_load(0, &cr0.val);
+       local_ctl_load(0, &cr0.reg);
        disabled_wait();
        while (1);
 }
                 */
                if (!mci.vr && !test_cpu_flag(CIF_MCCK_GUEST))
                        kill_task = 1;
-               cr0.val = S390_lowcore.cregs_save_area[0];
+               cr0.reg = S390_lowcore.cregs_save_area[0];
                cr0.afp = cr0.vx = 1;
-               local_ctl_load(0, &cr0.val);
+               local_ctl_load(0, &cr0.reg);
                asm volatile(
                        "       la      1,%0\n"
                        "       VLM     0,15,0,1\n"
        if (!mci.ar)
                kill_task = 1;
        /* Validate guarded storage registers */
-       cr2.val = S390_lowcore.cregs_save_area[2];
+       cr2.reg = S390_lowcore.cregs_save_area[2];
        if (cr2.gse) {
                if (!mci.gs) {
                        /*
 
        union ctlreg2 cr2_old, cr2_new;
        int cr0_changed, cr2_changed;
        union {
-               unsigned long regs[3];
+               struct ctlreg regs[3];
                struct {
-                       unsigned long control;
-                       unsigned long start;
-                       unsigned long end;
+                       struct ctlreg control;
+                       struct ctlreg start;
+                       struct ctlreg end;
                };
        } old, new;
 
-       local_ctl_store(0, &cr0_old.val);
-       local_ctl_store(2, &cr2_old.val);
+       local_ctl_store(0, &cr0_old.reg);
+       local_ctl_store(2, &cr2_old.reg);
        cr0_new = cr0_old;
        cr2_new = cr2_old;
        /* Take care of the enable/disable of transactional execution. */
        cr0_changed = cr0_new.val != cr0_old.val;
        cr2_changed = cr2_new.val != cr2_old.val;
        if (cr0_changed)
-               local_ctl_load(0, &cr0_new.val);
+               local_ctl_load(0, &cr0_new.reg);
        if (cr2_changed)
-               local_ctl_load(2, &cr2_new.val);
+               local_ctl_load(2, &cr2_new.reg);
        /* Copy user specified PER registers */
-       new.control = thread->per_user.control;
-       new.start = thread->per_user.start;
-       new.end = thread->per_user.end;
+       new.control.val = thread->per_user.control;
+       new.start.val = thread->per_user.start;
+       new.end.val = thread->per_user.end;
 
        /* merge TIF_SINGLE_STEP into user specified PER registers. */
        if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
            test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
                if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
-                       new.control |= PER_EVENT_BRANCH;
+                       new.control.val |= PER_EVENT_BRANCH;
                else
-                       new.control |= PER_EVENT_IFETCH;
-               new.control |= PER_CONTROL_SUSPENSION;
-               new.control |= PER_EVENT_TRANSACTION_END;
+                       new.control.val |= PER_EVENT_IFETCH;
+               new.control.val |= PER_CONTROL_SUSPENSION;
+               new.control.val |= PER_EVENT_TRANSACTION_END;
                if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
-                       new.control |= PER_EVENT_IFETCH;
-               new.start = 0;
-               new.end = -1UL;
+                       new.control.val |= PER_EVENT_IFETCH;
+               new.start.val = 0;
+               new.end.val = -1UL;
        }
 
        /* Take care of the PER enablement bit in the PSW. */
-       if (!(new.control & PER_EVENT_MASK)) {
+       if (!(new.control.val & PER_EVENT_MASK)) {
                regs->psw.mask &= ~PSW_MASK_PER;
                return;
        }
 
        __ctl_duct[4] = (unsigned long)__ctl_duald;
 
        /* Update control registers CR2, CR5 and CR15 */
-       local_ctl_store(2, &cr2.val);
-       local_ctl_store(5, &cr5.val);
-       local_ctl_store(15, &cr15.val);
+       local_ctl_store(2, &cr2.reg);
+       local_ctl_store(5, &cr5.reg);
+       local_ctl_store(15, &cr15.reg);
        cr2.ducto = (unsigned long)__ctl_duct >> 6;
        cr5.pasteo = (unsigned long)__ctl_duct >> 6;
        cr15.lsea = (unsigned long)__ctl_linkage_stack >> 3;
-       local_ctl_load(2, &cr2.val);
-       local_ctl_load(5, &cr5.val);
-       local_ctl_load(15, &cr15.val);
+       local_ctl_load(2, &cr2.reg);
+       local_ctl_load(5, &cr5.reg);
+       local_ctl_load(15, &cr15.reg);
 }
 
 /*
 
 
 int __cpu_disable(void)
 {
-       unsigned long cregs[16];
+       struct ctlreg cregs[16];
        int cpu;
 
        /* Handle possible pending IPIs */
        pfault_fini();
        /* Disable interrupt sources via control register. */
        __local_ctl_store(0, 15, cregs);
-       cregs[0]  &= ~0x0000ee70UL;     /* disable all external interrupts */
-       cregs[6]  &= ~0xff000000UL;     /* disable all I/O interrupts */
-       cregs[14] &= ~0x1f000000UL;     /* disable most machine checks */
+       cregs[0].val  &= ~0x0000ee70UL; /* disable all external interrupts */
+       cregs[6].val  &= ~0xff000000UL; /* disable all I/O interrupts */
+       cregs[14].val &= ~0x1f000000UL; /* disable most machine checks */
        __local_ctl_load(0, 15, cregs);
        clear_cpu_flag(CIF_NOHZ_DELAY);
        return 0;
 
 #ifdef CONFIG_DEBUG_ENTRY
 void debug_user_asce(int exit)
 {
-       unsigned long cr1, cr7;
+       struct ctlreg cr1, cr7;
 
        local_ctl_store(1, &cr1);
        local_ctl_store(7, &cr7);
-       if (cr1 == S390_lowcore.kernel_asce && cr7 == S390_lowcore.user_asce)
+       if (cr1.val == S390_lowcore.kernel_asce.val && cr7.val == S390_lowcore.user_asce.val)
                return;
        panic("incorrect ASCE on kernel %s\n"
              "cr1:    %016lx cr7:  %016lx\n"
              "kernel: %016lx user: %016lx\n",
-             exit ? "exit" : "entry", cr1, cr7,
-             S390_lowcore.kernel_asce, S390_lowcore.user_asce);
+             exit ? "exit" : "entry", cr1.val, cr7.val,
+             S390_lowcore.kernel_asce.val, S390_lowcore.user_asce.val);
 }
 #endif /*CONFIG_DEBUG_ENTRY */
 
 
         * kernel ASCE. We need this to keep the page table walker functions
         * from accessing non-existent entries.
         */
-       max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
+       max_addr = (S390_lowcore.kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2;
        max_addr = 1UL << (max_addr * 11 + 31);
        address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size;
        address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31;
 
        pr_cont("mode while using ");
        switch (get_fault_type(regs)) {
        case USER_FAULT:
-               asce = S390_lowcore.user_asce;
+               asce = S390_lowcore.user_asce.val;
                pr_cont("user ");
                break;
        case GMAP_FAULT:
                pr_cont("gmap ");
                break;
        case KERNEL_FAULT:
-               asce = S390_lowcore.kernel_asce;
+               asce = S390_lowcore.kernel_asce.val;
                pr_cont("kernel ");
                break;
        default:
 
 pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
 pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir");
 
-unsigned long __bootdata_preserved(s390_invalid_asce);
+struct ctlreg __bootdata_preserved(s390_invalid_asce);
 
 unsigned long empty_zero_page, zero_page_mask;
 EXPORT_SYMBOL(empty_zero_page);
 
                        break;
                }
                table = (unsigned long *)((unsigned long)old & mask);
-               crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce);
+               crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce.val);
        } else if (MACHINE_HAS_IDTE) {
                cspg(old, *old, new);
        } else {
 
 
        /* change all active ASCEs to avoid the creation of new TLBs */
        if (current->active_mm == mm) {
-               S390_lowcore.user_asce = mm->context.asce;
+               S390_lowcore.user_asce.val = mm->context.asce;
                local_ctl_load(7, &S390_lowcore.user_asce);
        }
        __tlb_flush_local();
 
 sclp_sync_wait(void)
 {
        unsigned long long old_tick;
+       struct ctlreg cr0, cr0_sync;
        unsigned long flags;
-       unsigned long cr0, cr0_sync;
        static u64 sync_count;
        u64 timeout;
        int irq_context;
        old_tick = local_tick_disable();
        trace_hardirqs_on();
        local_ctl_store(0, &cr0);
-       cr0_sync = cr0 & ~CR0_IRQ_SUBCLASS_MASK;
-       cr0_sync |= 1UL << (63 - 54);
+       cr0_sync.val = cr0.val & ~CR0_IRQ_SUBCLASS_MASK;
+       cr0_sync.val |= 1UL << (63 - 54);
        local_ctl_load(0, &cr0_sync);
        __arch_local_irq_stosm(0x01);
        /* Loop until driver state indicates finished request */
 
        psw_t psw_ext_save, psw_wait;
        union ctlreg0 cr0, cr0_new;
 
-       local_ctl_store(0, &cr0.val);
+       local_ctl_store(0, &cr0.reg);
        cr0_new.val = cr0.val & ~CR0_IRQ_SUBCLASS_MASK;
        cr0_new.lap = 0;
        cr0_new.sssm = 1;
-       local_ctl_load(0, &cr0_new.val);
+       local_ctl_load(0, &cr0_new.reg);
 
        psw_ext_save = S390_lowcore.external_new_psw;
        psw_mask = __extract_psw();
        } while (S390_lowcore.ext_int_code != EXT_IRQ_SERVICE_SIG);
 
        S390_lowcore.external_new_psw = psw_ext_save;
-       local_ctl_load(0, &cr0.val);
+       local_ctl_load(0, &cr0.reg);
 }
 
 int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb)