return temp_state;
}
-__ro_after_init struct mm_struct *poking_mm;
+__ro_after_init struct mm_struct *text_poke_mm;
__ro_after_init unsigned long poking_addr;
static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
switch_mm_irqs_off(NULL, prev_state.mm, current);
/* Clear the cpumask, to indicate no TLB flushing is needed anywhere */
- cpumask_clear_cpu(raw_smp_processor_id(), mm_cpumask(poking_mm));
+ cpumask_clear_cpu(raw_smp_processor_id(), mm_cpumask(text_poke_mm));
/*
* Restore the breakpoints if they were disabled before the temporary mm
/*
* The lock is not really needed, but this allows to avoid open-coding.
*/
- ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
+ ptep = get_locked_pte(text_poke_mm, poking_addr, &ptl);
/*
* This must not fail; preallocated in poking_init().
local_irq_save(flags);
pte = mk_pte(pages[0], pgprot);
- set_pte_at(poking_mm, poking_addr, ptep, pte);
+ set_pte_at(text_poke_mm, poking_addr, ptep, pte);
if (cross_page_boundary) {
pte = mk_pte(pages[1], pgprot);
- set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
+ set_pte_at(text_poke_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
}
/*
* Loading the temporary mm behaves as a compiler barrier, which
* guarantees that the PTE will be set at the time memcpy() is done.
*/
- prev = use_temporary_mm(poking_mm);
+ prev = use_temporary_mm(text_poke_mm);
kasan_disable_current();
func((u8 *)poking_addr + offset_in_page(addr), src, len);
*/
barrier();
- pte_clear(poking_mm, poking_addr, ptep);
+ pte_clear(text_poke_mm, poking_addr, ptep);
if (cross_page_boundary)
- pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
+ pte_clear(text_poke_mm, poking_addr + PAGE_SIZE, ptep + 1);
/*
* Loading the previous page-table hierarchy requires a serializing
* Flushing the TLB might involve IPIs, which would require enabled
* IRQs, but not if the mm is not used, as it is in this point.
*/
- flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
+ flush_tlb_mm_range(text_poke_mm, poking_addr, poking_addr +
(cross_page_boundary ? 2 : 1) * PAGE_SIZE,
PAGE_SHIFT, false);
spinlock_t *ptl;
pte_t *ptep;
- poking_mm = mm_alloc();
- BUG_ON(!poking_mm);
+ text_poke_mm = mm_alloc();
+ BUG_ON(!text_poke_mm);
/* Xen PV guests need the PGD to be pinned. */
- paravirt_enter_mmap(poking_mm);
+ paravirt_enter_mmap(text_poke_mm);
/*
* Randomize the poking address, but make sure that the following page
* needed for poking now. Later, poking may be performed in an atomic
* section, which might cause allocation to fail.
*/
- ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
+ ptep = get_locked_pte(text_poke_mm, poking_addr, &ptl);
BUG_ON(!ptep);
pte_unmap_unlock(ptep, ptl);
}