select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
        select ARCH_KEEP_MEMBLOCK
        select ARCH_USE_CMPXCHG_LOCKREF
 +      select ARCH_USE_GNU_PROPERTY
        select ARCH_USE_QUEUED_RWLOCKS
        select ARCH_USE_QUEUED_SPINLOCKS
 +      select ARCH_USE_SYM_ANNOTATIONS
        select ARCH_SUPPORTS_MEMORY_FAILURE
+       select ARCH_SUPPORTS_SHADOW_CALL_STACK if CC_HAVE_SHADOW_CALL_STACK
        select ARCH_SUPPORTS_ATOMIC_RMW
        select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG)
        select ARCH_SUPPORTS_NUMA_BALANCING
 
  #include <linux/compiler.h>
  #include <linux/kvm_host.h>
  #include <asm/alternative.h>
 -#include <asm/kvm_mmu.h>
  #include <asm/sysreg.h>
  
- #define __hyp_text __section(.hyp.text) notrace
+ #define __hyp_text __section(.hyp.text) notrace __noscs
  
  #define read_sysreg_elx(r,nvh,vh)                                     \
        ({                                                              \
 
        ldp     x29, x30, [sp], #32
        b.ne    0f
        ret
- 0:    b       efi_handle_corrupted_x18        // tail call
+ 0:
+       /*
+        * With CONFIG_SHADOW_CALL_STACK, the kernel uses x18 to store a
+        * shadow stack pointer, which we need to restore before returning to
+        * potentially instrumented code. This is safe because the wrapper is
+        * called with preemption disabled and a separate shadow stack is used
+        * for interrupts.
+        */
+       mov     x18, x2
+       b       efi_handle_corrupted_x18        // tail call
 -ENDPROC(__efi_rt_asm_wrapper)
 +SYM_FUNC_END(__efi_rt_asm_wrapper)
 
  
        apply_ssbd 1, x22, x23
  
 -      ptrauth_keys_install_kernel tsk, 1, x20, x22, x23
 +      ptrauth_keys_install_kernel tsk, x20, x22, x23
+ 
+       scs_load tsk, x20
        .else
        add     x21, sp, #S_FRAME_SIZE
        get_current_task tsk
        ldr     lr, [x8]
        mov     sp, x9
        msr     sp_el0, x1
 -      ptrauth_keys_install_kernel x1, 1, x8, x9, x10
 +      ptrauth_keys_install_kernel x1, x8, x9, x10
+       scs_save x0, x8
+       scs_load x1, x8
        ret
  SYM_FUNC_END(cpu_switch_to)
  NOKPROBE(cpu_switch_to)
 
        ldr     x2, [x0, #CPU_BOOT_TASK]
        cbz     x2, __secondary_too_slow
        msr     sp_el0, x2
+       scs_load x2, x3
        mov     x29, #0
        mov     x30, #0
 +
 +#ifdef CONFIG_ARM64_PTR_AUTH
 +      ptrauth_keys_init_cpu x2, x3, x4, x5
 +#endif
 +
        b       secondary_start_kernel
  SYM_FUNC_END(__secondary_switched)
  
 
  ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
  ccflags-y += -DDISABLE_BRANCH_PROFILING
  
- CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os
 -VDSO_LDFLAGS := -Bsymbolic
 -
+ CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS)
  KBUILD_CFLAGS                 += $(DISABLE_LTO)
  KASAN_SANITIZE                        := n
  UBSAN_SANITIZE                        := n