select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
        select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
        select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
+       select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
        select HAVE_ARCH_MMAP_RND_BITS if MMU
 +      select HAVE_ARCH_PFN_VALID
        select HAVE_ARCH_SECCOMP
        select HAVE_ARCH_SECCOMP_FILTER if AEABI && !OABI_COMPAT
        select HAVE_ARCH_THREAD_STRUCT_WHITELIST
 
   UNWIND(.cantunwind   )
        disable_irq_notrace                     @ disable interrupts
        ldr     r2, [tsk, #TI_ADDR_LIMIT]
-       cmp     r2, #TASK_SIZE
+       ldr     r1, =TASK_SIZE
+       cmp     r2, r1
        blne    addr_limit_check_failed
        ldr     r1, [tsk, #TI_FLAGS]            @ re-check for syscall tracing
 -      tst     r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
 +      movs    r1, r1, lsl #16
        bne     fast_work_pending
  
  
  #endif
        disable_irq_notrace                     @ disable interrupts
        ldr     r2, [tsk, #TI_ADDR_LIMIT]
-       cmp     r2, #TASK_SIZE
+       ldr     r1, =TASK_SIZE
+       cmp     r2, r1
        blne    addr_limit_check_failed
        ldr     r1, [tsk, #TI_FLAGS]            @ re-check for syscall tracing
 -      tst     r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
 +      movs    r1, r1, lsl #16
        beq     no_work_pending
   UNWIND(.fnend                )
  ENDPROC(ret_fast_syscall)
        disable_irq_notrace                     @ disable interrupts
  ENTRY(ret_to_user_from_irq)
        ldr     r2, [tsk, #TI_ADDR_LIMIT]
-       cmp     r2, #TASK_SIZE
+       ldr     r1, =TASK_SIZE
+       cmp     r2, r1
        blne    addr_limit_check_failed
        ldr     r1, [tsk, #TI_FLAGS]
 -      tst     r1, #_TIF_WORK_MASK
 +      movs    r1, r1, lsl #16
        bne     slow_work_pending
  no_work_pending:
        asm_trace_hardirqs_on save = 0