]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
x86/static-call: provide a way to do very early static-call updates
authorJuergen Gross <jgross@suse.com>
Fri, 29 Nov 2024 15:15:54 +0000 (16:15 +0100)
committerJuergen Gross <jgross@suse.com>
Fri, 13 Dec 2024 08:28:32 +0000 (09:28 +0100)
Add static_call_update_early() for updating static-call targets in
very early boot.

This will be needed for support of Xen guest type specific hypercall
functions.

This is part of XSA-466 / CVE-2024-53241.

Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
Co-developed-by: Peter Zijlstra <peterz@infradead.org>
Co-developed-by: Josh Poimboeuf <jpoimboe@redhat.com>
arch/x86/include/asm/static_call.h
arch/x86/include/asm/sync_core.h
arch/x86/kernel/static_call.c
include/linux/compiler.h
include/linux/static_call.h
kernel/static_call_inline.c

index 125c407e2abe6da21a05f8a644ecce501ed1c910..41502bd2afd646cb6989901d173a8d6890e768d9 100644 (file)
 
 extern bool __static_call_fixup(void *tramp, u8 op, void *dest);
 
+extern void __static_call_update_early(void *tramp, void *func);
+
+#define static_call_update_early(name, _func)                          \
+({                                                                     \
+       typeof(&STATIC_CALL_TRAMP(name)) __F = (_func);                 \
+       if (static_call_initialized) {                                  \
+               __static_call_update(&STATIC_CALL_KEY(name),            \
+                                    STATIC_CALL_TRAMP_ADDR(name), __F);\
+       } else {                                                        \
+               WRITE_ONCE(STATIC_CALL_KEY(name).func, _func);          \
+               __static_call_update_early(STATIC_CALL_TRAMP_ADDR(name),\
+                                          __F);                        \
+       }                                                               \
+})
+
 #endif /* _ASM_STATIC_CALL_H */
index ab7382f92aff27405127748074a11341cbda474d..96bda43538ee70a302a37bd110860964ac4cf8ec 100644 (file)
@@ -8,7 +8,7 @@
 #include <asm/special_insns.h>
 
 #ifdef CONFIG_X86_32
-static inline void iret_to_self(void)
+static __always_inline void iret_to_self(void)
 {
        asm volatile (
                "pushfl\n\t"
@@ -19,7 +19,7 @@ static inline void iret_to_self(void)
                : ASM_CALL_CONSTRAINT : : "memory");
 }
 #else
-static inline void iret_to_self(void)
+static __always_inline void iret_to_self(void)
 {
        unsigned int tmp;
 
@@ -55,7 +55,7 @@ static inline void iret_to_self(void)
  * Like all of Linux's memory ordering operations, this is a
  * compiler barrier as well.
  */
-static inline void sync_core(void)
+static __always_inline void sync_core(void)
 {
        /*
         * The SERIALIZE instruction is the most straightforward way to
index 4eefaac64c6cbabbd1a73ac714993ce831647792..9eed0c144dad5142780fb7a9b710e25d997f33c0 100644 (file)
@@ -172,6 +172,15 @@ void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
 }
 EXPORT_SYMBOL_GPL(arch_static_call_transform);
 
+noinstr void __static_call_update_early(void *tramp, void *func)
+{
+       BUG_ON(system_state != SYSTEM_BOOTING);
+       BUG_ON(!early_boot_irqs_disabled);
+       BUG_ON(static_call_initialized);
+       __text_gen_insn(tramp, JMP32_INSN_OPCODE, tramp, func, JMP32_INSN_SIZE);
+       sync_core();
+}
+
 #ifdef CONFIG_MITIGATION_RETHUNK
 /*
  * This is called by apply_returns() to fix up static call trampolines,
index 469a64dd6495fefab2c85ffc279568a657b72660..240c632c5b957c4db75998d7a708a7f7572bc6a6 100644 (file)
@@ -216,28 +216,43 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
 
 #endif /* __KERNEL__ */
 
+/**
+ * offset_to_ptr - convert a relative memory offset to an absolute pointer
+ * @off:       the address of the 32-bit offset value
+ */
+static inline void *offset_to_ptr(const int *off)
+{
+       return (void *)((unsigned long)off + *off);
+}
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_64BIT
+#define ARCH_SEL(a,b) a
+#else
+#define ARCH_SEL(a,b) b
+#endif
+
 /*
  * Force the compiler to emit 'sym' as a symbol, so that we can reference
  * it from inline assembler. Necessary in case 'sym' could be inlined
  * otherwise, or eliminated entirely due to lack of references that are
  * visible to the compiler.
  */
-#define ___ADDRESSABLE(sym, __attrs) \
-       static void * __used __attrs \
+#define ___ADDRESSABLE(sym, __attrs)                                           \
+       static void * __used __attrs                                            \
        __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)(uintptr_t)&sym;
+
 #define __ADDRESSABLE(sym) \
        ___ADDRESSABLE(sym, __section(".discard.addressable"))
 
-/**
- * offset_to_ptr - convert a relative memory offset to an absolute pointer
- * @off:       the address of the 32-bit offset value
- */
-static inline void *offset_to_ptr(const int *off)
-{
-       return (void *)((unsigned long)off + *off);
-}
+#define __ADDRESSABLE_ASM(sym)                                         \
+       .pushsection .discard.addressable,"aw";                         \
+       .align ARCH_SEL(8,4);                                           \
+       ARCH_SEL(.quad, .long) __stringify(sym);                        \
+       .popsection;
 
-#endif /* __ASSEMBLY__ */
+#define __ADDRESSABLE_ASM_STR(sym) __stringify(__ADDRESSABLE_ASM(sym))
 
 #ifdef __CHECKER__
 #define __BUILD_BUG_ON_ZERO_MSG(e, msg) (0)
index 141e6b176a1b308c89cedf3481ba7592c6d974a8..785980af89729cc70942f09a4510188933b0badd 100644 (file)
 #ifdef CONFIG_HAVE_STATIC_CALL
 #include <asm/static_call.h>
 
+extern int static_call_initialized;
 /*
  * Either @site or @tramp can be NULL.
  */
index 5259cda486d058f4bb2f75c2cef3be2f6d8f3b57..bb7d066a7c397926ad387094adea9a02dd626e20 100644 (file)
@@ -15,7 +15,7 @@ extern struct static_call_site __start_static_call_sites[],
 extern struct static_call_tramp_key __start_static_call_tramp_key[],
                                    __stop_static_call_tramp_key[];
 
-static int static_call_initialized;
+int static_call_initialized;
 
 /*
  * Must be called before early_initcall() to be effective.