]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
x86/Kconfig: Introduce function padding
authorThomas Gleixner <tglx@linutronix.de>
Thu, 15 Sep 2022 11:11:18 +0000 (13:11 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Mon, 17 Oct 2022 14:41:10 +0000 (16:41 +0200)
Now that all functions are 16 byte aligned, add 16 bytes of NOP
padding in front of each function. This prepares things for software
call stack tracking and kCFI/FineIBT.

This significantly increases kernel .text size, around 5.1% on a
x86_64-defconfig-ish build.

However, per the random access argument used for alignment, these 16
extra bytes are code that wouldn't be used. Performance measurements
back this up by showing no significant performance regressions.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220915111146.950884492@infradead.org
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/entry/vdso/Makefile
arch/x86/include/asm/linkage.h
include/linux/bpf.h

index e18963e77cb1d6971230561784944b1432159699..e368fc0daa4acffd63af66e08287b8a0d7fca0e7 100644 (file)
@@ -2446,9 +2446,27 @@ config CC_HAS_SLS
 config CC_HAS_RETURN_THUNK
        def_bool $(cc-option,-mfunction-return=thunk-extern)
 
+config CC_HAS_ENTRY_PADDING
+       def_bool $(cc-option,-fpatchable-function-entry=16,16)
+
+config FUNCTION_PADDING_CFI
+       int
+       default 59 if FUNCTION_ALIGNMENT_64B
+       default 27 if FUNCTION_ALIGNMENT_32B
+       default 11 if FUNCTION_ALIGNMENT_16B
+       default  3 if FUNCTION_ALIGNMENT_8B
+       default  0
+
+# Basically: FUNCTION_ALIGNMENT - 5*CFI_CLANG
+# except Kconfig can't do arithmetic :/
+config FUNCTION_PADDING_BYTES
+       int
+       default FUNCTION_PADDING_CFI if CFI_CLANG
+       default FUNCTION_ALIGNMENT
+
 config HAVE_CALL_THUNKS
        def_bool y
-       depends on RETHUNK && OBJTOOL
+       depends on CC_HAS_ENTRY_PADDING && RETHUNK && OBJTOOL
 
 config CALL_THUNKS
        def_bool n
index 415a5d138de47c37c90890a71056775d023f5d7d..1640e005092b9d4c67dbf412e098d84dc8721a83 100644 (file)
@@ -208,6 +208,12 @@ ifdef CONFIG_SLS
   KBUILD_CFLAGS += -mharden-sls=all
 endif
 
+ifdef CONFIG_CALL_THUNKS
+PADDING_CFLAGS := -fpatchable-function-entry=$(CONFIG_FUNCTION_PADDING_BYTES),$(CONFIG_FUNCTION_PADDING_BYTES)
+KBUILD_CFLAGS += $(PADDING_CFLAGS)
+export PADDING_CFLAGS
+endif
+
 KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
 
 ifdef CONFIG_LTO_CLANG
index 3ef611044c8f60653882e49a360628b2e3209d69..838613ac15b8205ef7d982d2186b371c8b0fb1b6 100644 (file)
@@ -95,7 +95,7 @@ ifneq ($(RETPOLINE_VDSO_CFLAGS),)
 endif
 endif
 
-$(vobjs): KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO) $(CC_FLAGS_CFI) $(RANDSTRUCT_CFLAGS) $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
+$(vobjs): KBUILD_CFLAGS := $(filter-out $(PADDING_CFLAGS) $(CC_FLAGS_LTO) $(CC_FLAGS_CFI) $(RANDSTRUCT_CFLAGS) $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
 $(vobjs): KBUILD_AFLAGS += -DBUILD_VDSO
 
 #
@@ -158,6 +158,7 @@ KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
 KBUILD_CFLAGS_32 := $(filter-out $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS_32))
 KBUILD_CFLAGS_32 := $(filter-out $(CC_FLAGS_LTO),$(KBUILD_CFLAGS_32))
 KBUILD_CFLAGS_32 := $(filter-out $(CC_FLAGS_CFI),$(KBUILD_CFLAGS_32))
+KBUILD_CFLAGS_32 := $(filter-out $(PADDING_CFLAGS),$(KBUILD_CFLAGS_32))
 KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic
 KBUILD_CFLAGS_32 += -fno-stack-protector
 KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
index c2d6e2733b1111c4f6469bd8d564ffe89db3aced..45e0df850645c37499af8994e4aed42ed424de7f 100644 (file)
 #define __ALIGN                .balign CONFIG_FUNCTION_ALIGNMENT, 0x90;
 #define __ALIGN_STR    __stringify(__ALIGN)
 
-#define ASM_FUNC_ALIGN         __ALIGN_STR
-#define __FUNC_ALIGN           __ALIGN
+#if defined(CONFIG_CALL_THUNKS) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
+#define FUNCTION_PADDING       .skip CONFIG_FUNCTION_ALIGNMENT, 0x90;
+#else
+#define FUNCTION_PADDING
+#endif
+
+#if (CONFIG_FUNCTION_ALIGNMENT > 8) && !defined(__DISABLE_EXPORTS) && !defined(BULID_VDSO)
+# define __FUNC_ALIGN          __ALIGN; FUNCTION_PADDING
+#else
+# define __FUNC_ALIGN          __ALIGN
+#endif
+
+#define ASM_FUNC_ALIGN         __stringify(__FUNC_ALIGN)
 #define SYM_F_ALIGN            __FUNC_ALIGN
 
 #ifdef __ASSEMBLY__
 
 #endif /* __ASSEMBLY__ */
 
+/*
+ * Depending on -fpatchable-function-entry=N,N usage (CONFIG_CALL_THUNKS) the
+ * CFI symbol layout changes.
+ *
+ * Without CALL_THUNKS:
+ *
+ *     .align  FUNCTION_ALIGNMENT
+ * __cfi_##name:
+ *     .skip   FUNCTION_PADDING, 0x90
+ *     .byte   0xb8
+ *     .long   __kcfi_typeid_##name
+ * name:
+ *
+ * With CALL_THUNKS:
+ *
+ *     .align FUNCTION_ALIGNMENT
+ * __cfi_##name:
+ *     .byte   0xb8
+ *     .long   __kcfi_typeid_##name
+ *     .skip   FUNCTION_PADDING, 0x90
+ * name:
+ *
+ * In both cases the whole thing is FUNCTION_ALIGNMENT aligned and sized.
+ */
+
+#ifdef CONFIG_CALL_THUNKS
+#define CFI_PRE_PADDING
+#define CFI_POST_PADDING       .skip   CONFIG_FUNCTION_PADDING_BYTES, 0x90;
+#else
+#define CFI_PRE_PADDING                .skip   CONFIG_FUNCTION_PADDING_BYTES, 0x90;
+#define CFI_POST_PADDING
+#endif
+
 #define __CFI_TYPE(name)                                       \
        SYM_START(__cfi_##name, SYM_L_LOCAL, SYM_A_NONE)        \
-       .fill 11, 1, 0x90 ASM_NL                                \
+       CFI_PRE_PADDING                                         \
        .byte 0xb8 ASM_NL                                       \
        .long __kcfi_typeid_##name ASM_NL                       \
+       CFI_POST_PADDING                                        \
        SYM_FUNC_END(__cfi_##name)
 
 /* SYM_TYPED_FUNC_START -- use for indirectly called globals, w/ CFI type */
index 9e7d46d16032f98236dbf35d4390b956a0dbdaa3..5296aea9b5b40965c23d64882e61693e41a67832 100644 (file)
@@ -984,7 +984,11 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func
 }
 
 #ifdef CONFIG_X86_64
+#ifdef CONFIG_CALL_THUNKS
+#define BPF_DISPATCHER_ATTRIBUTES __attribute__((patchable_function_entry(5+CONFIG_FUNCTION_PADDING_BYTES,CONFIG_FUNCTION_PADDING_BYTES)))
+#else
 #define BPF_DISPATCHER_ATTRIBUTES __attribute__((patchable_function_entry(5)))
+#endif
 #else
 #define BPF_DISPATCHER_ATTRIBUTES
 #endif