]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
x86/alternatives: Rename 'poking_mm' to 'text_poke_mm'
authorIngo Molnar <mingo@kernel.org>
Fri, 11 Apr 2025 05:40:20 +0000 (07:40 +0200)
committerIngo Molnar <mingo@kernel.org>
Fri, 11 Apr 2025 09:01:33 +0000 (11:01 +0200)
Put it into the text_poke_* namespace of <asm/text-patching.h>.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Juergen Gross <jgross@suse.com>
Cc: "H . Peter Anvin" <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20250411054105.2341982-9-mingo@kernel.org
arch/x86/include/asm/text-patching.h
arch/x86/kernel/alternative.c
arch/x86/mm/init.c

index 93a6b7bc78bd589836ab3ce04ddcc8c4b5f78504..7a95c0820b3e36504446cf0fd06d846fef2edcc1 100644 (file)
@@ -128,7 +128,7 @@ void *text_gen_insn(u8 opcode, const void *addr, const void *dest)
 }
 
 extern int after_bootmem;
-extern __ro_after_init struct mm_struct *poking_mm;
+extern __ro_after_init struct mm_struct *text_poke_mm;
 extern __ro_after_init unsigned long poking_addr;
 
 #ifndef CONFIG_UML_X86
index d2cd0d81513069ef3355c710e1c007ed607df77c..8ce0d469e32f2dc0a37fff2cd0349d6b76651293 100644 (file)
@@ -2191,7 +2191,7 @@ static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
        return temp_state;
 }
 
-__ro_after_init struct mm_struct *poking_mm;
+__ro_after_init struct mm_struct *text_poke_mm;
 __ro_after_init unsigned long poking_addr;
 
 static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
@@ -2201,7 +2201,7 @@ static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
        switch_mm_irqs_off(NULL, prev_state.mm, current);
 
        /* Clear the cpumask, to indicate no TLB flushing is needed anywhere */
-       cpumask_clear_cpu(raw_smp_processor_id(), mm_cpumask(poking_mm));
+       cpumask_clear_cpu(raw_smp_processor_id(), mm_cpumask(text_poke_mm));
 
        /*
         * Restore the breakpoints if they were disabled before the temporary mm
@@ -2266,7 +2266,7 @@ static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t l
        /*
         * The lock is not really needed, but this allows to avoid open-coding.
         */
-       ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
+       ptep = get_locked_pte(text_poke_mm, poking_addr, &ptl);
 
        /*
         * This must not fail; preallocated in poking_init().
@@ -2276,18 +2276,18 @@ static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t l
        local_irq_save(flags);
 
        pte = mk_pte(pages[0], pgprot);
-       set_pte_at(poking_mm, poking_addr, ptep, pte);
+       set_pte_at(text_poke_mm, poking_addr, ptep, pte);
 
        if (cross_page_boundary) {
                pte = mk_pte(pages[1], pgprot);
-               set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
+               set_pte_at(text_poke_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
        }
 
        /*
         * Loading the temporary mm behaves as a compiler barrier, which
         * guarantees that the PTE will be set at the time memcpy() is done.
         */
-       prev = use_temporary_mm(poking_mm);
+       prev = use_temporary_mm(text_poke_mm);
 
        kasan_disable_current();
        func((u8 *)poking_addr + offset_in_page(addr), src, len);
@@ -2299,9 +2299,9 @@ static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t l
         */
        barrier();
 
-       pte_clear(poking_mm, poking_addr, ptep);
+       pte_clear(text_poke_mm, poking_addr, ptep);
        if (cross_page_boundary)
-               pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
+               pte_clear(text_poke_mm, poking_addr + PAGE_SIZE, ptep + 1);
 
        /*
         * Loading the previous page-table hierarchy requires a serializing
@@ -2314,7 +2314,7 @@ static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t l
         * Flushing the TLB might involve IPIs, which would require enabled
         * IRQs, but not if the mm is not used, as it is in this point.
         */
-       flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
+       flush_tlb_mm_range(text_poke_mm, poking_addr, poking_addr +
                           (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
                           PAGE_SHIFT, false);
 
index bfa444a7dbb0487a90be6385c31361f01e273e8c..84b52a1ebd48608db0da96d467c062e34495342d 100644 (file)
@@ -824,11 +824,11 @@ void __init poking_init(void)
        spinlock_t *ptl;
        pte_t *ptep;
 
-       poking_mm = mm_alloc();
-       BUG_ON(!poking_mm);
+       text_poke_mm = mm_alloc();
+       BUG_ON(!text_poke_mm);
 
        /* Xen PV guests need the PGD to be pinned. */
-       paravirt_enter_mmap(poking_mm);
+       paravirt_enter_mmap(text_poke_mm);
 
        /*
         * Randomize the poking address, but make sure that the following page
@@ -848,7 +848,7 @@ void __init poking_init(void)
         * needed for poking now. Later, poking may be performed in an atomic
         * section, which might cause allocation to fail.
         */
-       ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
+       ptep = get_locked_pte(text_poke_mm, poking_addr, &ptl);
        BUG_ON(!ptep);
        pte_unmap_unlock(ptep, ptl);
 }