static inline lazy_mmu_state_t arch_enter_lazy_mmu_mode(void)
{
- /*
- * lazy_mmu_mode is not supposed to permit nesting. But in practice this
- * does happen with CONFIG_DEBUG_PAGEALLOC, where a page allocation
- * inside a lazy_mmu_mode section (such as zap_pte_range()) will change
- * permissions on the linear map with apply_to_page_range(), which
- * re-enters lazy_mmu_mode. So we tolerate nesting in our
- * implementation. The first call to arch_leave_lazy_mmu_mode() will
- * flush and clear the flag such that the remainder of the work in the
- * outer nest behaves as if outside of lazy mmu mode. This is safe and
- * keeps tracking simple.
- */
+ int lazy_mmu_nested;
if (in_interrupt())
return LAZY_MMU_DEFAULT;
- set_thread_flag(TIF_LAZY_MMU);
+ lazy_mmu_nested = test_and_set_thread_flag(TIF_LAZY_MMU);
- return LAZY_MMU_DEFAULT;
+ return lazy_mmu_nested ? LAZY_MMU_NESTED : LAZY_MMU_DEFAULT;
}
static inline void arch_leave_lazy_mmu_mode(lazy_mmu_state_t state)
if (test_and_clear_thread_flag(TIF_LAZY_MMU_PENDING))
emit_pte_barriers();
- clear_thread_flag(TIF_LAZY_MMU);
+ if (state != LAZY_MMU_NESTED)
+ clear_thread_flag(TIF_LAZY_MMU);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE