]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
powerpc/code-patching: Add generic memory patching
authorBenjamin Gray <bgray@linux.ibm.com>
Wed, 15 May 2024 02:44:41 +0000 (12:44 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Wed, 21 Aug 2024 10:15:12 +0000 (20:15 +1000)
patch_instruction() is designed for patching instructions in otherwise
readonly memory. Other consumers also sometimes need to patch readonly
memory, so have abused patch_instruction() for arbitrary data patches.

This is a problem on ppc64 as patch_instruction() decides on the patch
width using the 'instruction' opcode to see if it's a prefixed
instruction. Data that triggers this can lead to larger writes, possibly
crossing a page boundary and failing the write altogether.

Introduce patch_uint(), and patch_ulong(), with aliases patch_u32(), and
patch_u64() (on ppc64) designed for aligned data patches. The patch
size is now determined by the called function, and is passed as an
additional parameter to generic internals.

While the instruction flushing is not required for data patches, it
remains unconditional in this patch. A followup series is possible if
benchmarking shows fewer flushes gives an improvement in some
data-patching workload.

ppc32 does not support prefixed instructions, so is unaffected by the
original issue. Care is taken in not exposing the size parameter in the
public (non-static) interface, so the compiler can const-propagate it
away.

Signed-off-by: Benjamin Gray <bgray@linux.ibm.com>
Reviewed-by: Hari Bathini <hbathini@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20240515024445.236364-2-bgray@linux.ibm.com
arch/powerpc/include/asm/code-patching.h
arch/powerpc/lib/code-patching.c

index 0e29ccf903d093526f5f773c4c227aca5a0daccf..21a36e2c4e262446b95cc8f57189f985384aebf3 100644 (file)
@@ -76,6 +76,37 @@ int patch_instruction(u32 *addr, ppc_inst_t instr);
 int raw_patch_instruction(u32 *addr, ppc_inst_t instr);
 int patch_instructions(u32 *addr, u32 *code, size_t len, bool repeat_instr);
 
+/*
+ * The data patching functions patch_uint() and patch_ulong(), etc., must be
+ * called on aligned addresses.
+ *
+ * The instruction patching functions patch_instruction() and similar must be
+ * called on addresses satisfying instruction alignment requirements.
+ */
+
+#ifdef CONFIG_PPC64
+
+int patch_uint(void *addr, unsigned int val);
+int patch_ulong(void *addr, unsigned long val);
+
+#define patch_u64 patch_ulong
+
+#else
+
+static inline int patch_uint(void *addr, unsigned int val)
+{
+       return patch_instruction(addr, ppc_inst(val));
+}
+
+static inline int patch_ulong(void *addr, unsigned long val)
+{
+       return patch_instruction(addr, ppc_inst(val));
+}
+
+#endif
+
+#define patch_u32 patch_uint
+
 static inline unsigned long patch_site_addr(s32 *site)
 {
        return (unsigned long)site + *site;
index 0d1f3ee91115207500c7939aeddcee31a876a642..7f423fa3c51be8806ce6796e0d1fa38a46aaf7e2 100644 (file)
 #include <asm/code-patching.h>
 #include <asm/inst.h>
 
-static int __patch_instruction(u32 *exec_addr, ppc_inst_t instr, u32 *patch_addr)
+static int __patch_mem(void *exec_addr, unsigned long val, void *patch_addr, bool is_dword)
 {
-       if (!ppc_inst_prefixed(instr)) {
-               u32 val = ppc_inst_val(instr);
+       if (!IS_ENABLED(CONFIG_PPC64) || likely(!is_dword)) {
+               /* For big endian correctness: plain address would use the wrong half */
+               u32 val32 = val;
 
-               __put_kernel_nofault(patch_addr, &val, u32, failed);
+               __put_kernel_nofault(patch_addr, &val32, u32, failed);
        } else {
-               u64 val = ppc_inst_as_ulong(instr);
-
                __put_kernel_nofault(patch_addr, &val, u64, failed);
        }
 
@@ -44,7 +43,10 @@ failed:
 
 int raw_patch_instruction(u32 *addr, ppc_inst_t instr)
 {
-       return __patch_instruction(addr, instr, addr);
+       if (ppc_inst_prefixed(instr))
+               return __patch_mem(addr, ppc_inst_as_ulong(instr), addr, true);
+       else
+               return __patch_mem(addr, ppc_inst_val(instr), addr, false);
 }
 
 struct patch_context {
@@ -276,7 +278,7 @@ static void unmap_patch_area(unsigned long addr)
        flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
 }
 
-static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr)
+static int __do_patch_mem_mm(void *addr, unsigned long val, bool is_dword)
 {
        int err;
        u32 *patch_addr;
@@ -305,7 +307,7 @@ static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr)
 
        orig_mm = start_using_temp_mm(patching_mm);
 
-       err = __patch_instruction(addr, instr, patch_addr);
+       err = __patch_mem(addr, val, patch_addr, is_dword);
 
        /* context synchronisation performed by __patch_instruction (isync or exception) */
        stop_using_temp_mm(patching_mm, orig_mm);
@@ -322,7 +324,7 @@ static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr)
        return err;
 }
 
-static int __do_patch_instruction(u32 *addr, ppc_inst_t instr)
+static int __do_patch_mem(void *addr, unsigned long val, bool is_dword)
 {
        int err;
        u32 *patch_addr;
@@ -339,7 +341,7 @@ static int __do_patch_instruction(u32 *addr, ppc_inst_t instr)
        if (radix_enabled())
                asm volatile("ptesync": : :"memory");
 
-       err = __patch_instruction(addr, instr, patch_addr);
+       err = __patch_mem(addr, val, patch_addr, is_dword);
 
        pte_clear(&init_mm, text_poke_addr, pte);
        flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE);
@@ -347,7 +349,7 @@ static int __do_patch_instruction(u32 *addr, ppc_inst_t instr)
        return err;
 }
 
-int patch_instruction(u32 *addr, ppc_inst_t instr)
+static int patch_mem(void *addr, unsigned long val, bool is_dword)
 {
        int err;
        unsigned long flags;
@@ -359,19 +361,51 @@ int patch_instruction(u32 *addr, ppc_inst_t instr)
         */
        if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) ||
            !static_branch_likely(&poking_init_done))
-               return raw_patch_instruction(addr, instr);
+               return __patch_mem(addr, val, addr, is_dword);
 
        local_irq_save(flags);
        if (mm_patch_enabled())
-               err = __do_patch_instruction_mm(addr, instr);
+               err = __do_patch_mem_mm(addr, val, is_dword);
        else
-               err = __do_patch_instruction(addr, instr);
+               err = __do_patch_mem(addr, val, is_dword);
        local_irq_restore(flags);
 
        return err;
 }
+
+#ifdef CONFIG_PPC64
+
+int patch_instruction(u32 *addr, ppc_inst_t instr)
+{
+       if (ppc_inst_prefixed(instr))
+               return patch_mem(addr, ppc_inst_as_ulong(instr), true);
+       else
+               return patch_mem(addr, ppc_inst_val(instr), false);
+}
 NOKPROBE_SYMBOL(patch_instruction);
 
+int patch_uint(void *addr, unsigned int val)
+{
+       return patch_mem(addr, val, false);
+}
+NOKPROBE_SYMBOL(patch_uint);
+
+int patch_ulong(void *addr, unsigned long val)
+{
+       return patch_mem(addr, val, true);
+}
+NOKPROBE_SYMBOL(patch_ulong);
+
+#else
+
+int patch_instruction(u32 *addr, ppc_inst_t instr)
+{
+       return patch_mem(addr, ppc_inst_val(instr), false);
+}
+NOKPROBE_SYMBOL(patch_instruction)
+
+#endif
+
 static int patch_memset64(u64 *addr, u64 val, size_t count)
 {
        for (u64 *end = addr + count; addr < end; addr++)