leaq _text(%rip), %rdi
- /*
- * initial_gs points to initial fixed_percpu_data struct with storage for
- * the stack protector canary. Global pointer fixups are needed at this
- * stage, so apply them as is done in fixup_pointer(), and initialize %gs
- * such that the canary can be accessed at %gs:40 for subsequent C calls.
- */
+ /* Setup GSBASE to allow stack canary access for C code */
movl $MSR_GS_BASE, %ecx
- movq initial_gs(%rip), %rax
- movq $_text, %rdx
- subq %rdx, %rax
- addq %rdi, %rax
- movq %rax, %rdx
+ leaq INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx
+ movl %edx, %eax
shrq $32, %rdx
wrmsr
ANNOTATE_NOENDBR // above
#ifdef CONFIG_SMP
- /*
- * Is this the boot CPU coming up? If so everything is available
- * in initial_gs.
- */
+ /* Is this the boot CPU coming up? */
movl smpboot_control(%rip), %edx
testl $STARTUP_SECONDARY, %edx
jz .Linit_cpu0_data
.Linit_cpu_data:
/* Get the per cpu offset for the given CPU# which is in ECX */
- leaq __per_cpu_offset(%rip), %rbx
- movq (%rbx,%rcx,8), %rbx
- /* Save it for GS BASE setup */
- movq %rbx, initial_gs(%rip)
-
- movq %rbx, %rdx
+ movq __per_cpu_offset(,%rcx,8), %rdx
#else
xorl %edx, %edx
#endif /* CONFIG_SMP */
* the per cpu areas are set up.
*/
movl $MSR_GS_BASE,%ecx
- movl initial_gs(%rip),%eax
- movl initial_gs+4(%rip),%edx
+#ifndef CONFIG_SMP
+ leaq INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx
+#endif
+ movl %edx, %eax
+ shrq $32, %rdx
wrmsr
/* Drop the realmode protection. For the boot CPU the pointer is NULL! */
__REFDATA
.balign 8
SYM_DATA(initial_code, .quad x86_64_start_kernel)
-SYM_DATA(initial_gs, .quad INIT_PER_CPU_VAR(fixed_percpu_data))
#ifdef CONFIG_AMD_MEM_ENCRYPT
SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb)
#endif