We use __read_cr4() vs __read_cr4_safe() inconsistently.  On
CR4-less CPUs, all CR4 bits are effectively clear, so we can make
the code simpler and more robust by making __read_cr4() always fix
up faults on 32-bit kernels.
This may fix some bugs on old 486-like CPUs, but I don't have any
easy way to test that.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: david@saggiorato.net
Link: http://lkml.kernel.org/r/ea647033d357d9ce2ad2bbde5a631045f5052fb6.1475178370.git.luto@kernel.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
 {
        return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
 }
-static inline unsigned long __read_cr4_safe(void)
-{
-       return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
-}
 
 static inline void __write_cr4(unsigned long x)
 {
 
        unsigned long (*read_cr0)(void);
        void (*write_cr0)(unsigned long);
 
-       unsigned long (*read_cr4_safe)(void);
        unsigned long (*read_cr4)(void);
        void (*write_cr4)(unsigned long);
 
 
 static inline unsigned long native_read_cr4(void)
 {
        unsigned long val;
-       asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
-       return val;
-}
-
-static inline unsigned long native_read_cr4_safe(void)
-{
-       unsigned long val;
-       /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
-        * exists, so it will never fail. */
 #ifdef CONFIG_X86_32
+       /*
+        * This could fault if CR4 does not exist.  Non-existent CR4
+        * is functionally equivalent to CR4 == 0.  Keep it simple and pretend
+        * that CR4 == 0 on CPUs that don't have CR4.
+        */
        asm volatile("1: mov %%cr4, %0\n"
                     "2:\n"
                     _ASM_EXTABLE(1b, 2b)
                     : "=r" (val), "=m" (__force_order) : "0" (0));
 #else
-       val = native_read_cr4();
+       /* CR4 always exists on x86_64. */
+       asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
 #endif
        return val;
 }
        return native_read_cr4();
 }
 
-static inline unsigned long __read_cr4_safe(void)
-{
-       return native_read_cr4_safe();
-}
-
 static inline void __write_cr4(unsigned long x)
 {
        native_write_cr4(x);
 
 /* Initialize cr4 shadow for this CPU. */
 static inline void cr4_init_shadow(void)
 {
-       this_cpu_write(cpu_tlbstate.cr4, __read_cr4_safe());
+       this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
 }
 
 /* Set in this cpu's CR4. */
 
        .read_cr0 = native_read_cr0,
        .write_cr0 = native_write_cr0,
        .read_cr4 = native_read_cr4,
-       .read_cr4_safe = native_read_cr4_safe,
        .write_cr4 = native_write_cr4,
 #ifdef CONFIG_X86_64
        .read_cr8 = native_read_cr8,
 
        cr0 = read_cr0();
        cr2 = read_cr2();
        cr3 = read_cr3();
-       cr4 = __read_cr4_safe();
+       cr4 = __read_cr4();
        printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
                        cr0, cr2, cr3, cr4);
 
 
         * auditing all the early-boot CR4 manipulation would be needed to
         * rule it out.
         */
-       mmu_cr4_features = __read_cr4_safe();
+       mmu_cr4_features = __read_cr4();
 
        memblock_set_current_limit(get_max_mapped());
 
 
        ctxt->cr0 = read_cr0();
        ctxt->cr2 = read_cr2();
        ctxt->cr3 = read_cr3();
-       ctxt->cr4 = __read_cr4_safe();
+       ctxt->cr4 = __read_cr4();
 #ifdef CONFIG_X86_64
        ctxt->cr8 = read_cr8();
 #endif
 
        .write_cr0 = xen_write_cr0,
 
        .read_cr4 = native_read_cr4,
-       .read_cr4_safe = native_read_cr4_safe,
        .write_cr4 = xen_write_cr4,
 
 #ifdef CONFIG_X86_64