#define __put_user(x, ptr) \
        __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 
-extern long __put_user_bad(void);
-
 #define __put_user_size(x, ptr, size, retval)                  \
 do {                                                           \
        __label__ __pu_failed;                                  \
        case 2: __put_user_asm_goto(x, __pus_addr, label, "sth"); break;        \
        case 4: __put_user_asm_goto(x, __pus_addr, label, "stw"); break;        \
        case 8: __put_user_asm2_goto(x, __pus_addr, label); break;              \
-       default: __put_user_bad();                              \
+       default: BUILD_BUG();                                   \
        }                                                       \
 } while (0)
 
-extern long __get_user_bad(void);
-
 /*
  * This does an atomic 128 byte aligned load from userspace.
  * Upto caller to do enable_kernel_vmx() before calling!
 #define __get_user_size_allowed(x, ptr, size, retval)          \
 do {                                                           \
        retval = 0;                                             \
-       if (size > sizeof(x))                                   \
-               (x) = __get_user_bad();                         \
+       BUILD_BUG_ON(size > sizeof(x));                         \
        switch (size) {                                         \
        case 1: __get_user_asm(x, (u8 __user *)ptr, retval, "lbz"); break;      \
        case 2: __get_user_asm(x, (u16 __user *)ptr, retval, "lhz"); break;     \
        case 4: __get_user_asm(x, (u32 __user *)ptr, retval, "lwz"); break;     \
        case 8: __get_user_asm2(x, (u64 __user *)ptr, retval);  break;  \
-       default: (x) = __get_user_bad();                        \
+       default: BUILD_BUG();                                   \
        }                                                       \
 } while (0)