From: Linus Torvalds Date: Thu, 13 Sep 2018 09:57:48 +0000 (-1000) Subject: mm: get rid of vmacache_flush_all() entirely X-Git-Tag: v4.1.12-124.31.3~513 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=79acee7e589ffc86958ad8877248ac96365dccca;p=users%2Fjedix%2Flinux-maple.git mm: get rid of vmacache_flush_all() entirely Jann Horn points out that the vmacache_flush_all() function is not only potentially expensive, it's buggy too. It also happens to be entirely unnecessary, because the sequence number overflow case can be avoided by simply making the sequence number be 64-bit. That doesn't even grow the data structures in question, because the other adjacent fields are already 64-bit. So simplify the whole thing by just making the sequence number overflow case go away entirely, which gets rid of all the complications and makes the code faster too. Win-win. [ Oleg Nesterov points out that the VMACACHE_FULL_FLUSHES statistics also just goes away entirely with this ] Reported-by: Jann Horn Suggested-by: Will Deacon Acked-by: Davidlohr Bueso Cc: Oleg Nesterov Cc: stable@kernel.org Signed-off-by: Linus Torvalds (cherry picked from commit 7a9cdebdcc17e426fb5287e4a82db1dfe86339b2) Signed-off-by: Brian Maly Conflicts: include/linux/mm_types.h include/linux/mm_types_task.h mm/debug.c Orabug: 28701016 CVE: CVE-2018-17182 Reviewed-by: Khalid Aziz Signed-off-by: Allen Pais Signed-off-by: Brian Maly --- diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index f5ace8c3f82c..69c3049b08fe 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -1,6 +1,8 @@ #ifndef _LINUX_MM_TYPES_H #define _LINUX_MM_TYPES_H +#include + #include #include #include @@ -373,7 +375,11 @@ struct kioctx_table; struct mm_struct { struct vm_area_struct *mmap; /* list of VMAs */ struct rb_root mm_rb; +#ifndef __GENKSYMS__ + u64 vmacache_seqnum; +#else u32 vmacache_seqnum; /* per-thread vmacache */ +#endif #ifdef CONFIG_MMU unsigned long (*get_unmapped_area) (struct file *filp, unsigned long addr, unsigned long len, diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h new file mode 100644 index 000000000000..47da6f934315 --- /dev/null +++ b/include/linux/mm_types_task.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MM_TYPES_TASK_H +#define _LINUX_MM_TYPES_TASK_H + +/* + * Here are the definitions of the MM data types that are embedded in 'struct task_struct'. + * + * (These are defined separately to decouple sched.h from mm_types.h as much as possible.) + */ + +#include +#include +#include +#include + +#include + +#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH +#include +#endif + +#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) +#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ + IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) +#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) + +/* + * The per task VMA cache array: + */ +#define VMACACHE_BITS 2 +#define VMACACHE_SIZE (1U << VMACACHE_BITS) +#define VMACACHE_MASK (VMACACHE_SIZE - 1) + +struct vmacache { +#ifndef __GENKSYMS__ + u64 seqnum; +#else + u32 seqnum; +#endif + struct vm_area_struct *vmas[VMACACHE_SIZE]; +}; + +#endif /* _LINUX_MM_TYPES_TASK_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 7b94cb7734c1..6c0381f8a2cd 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -35,6 +35,7 @@ struct sched_param { #include #include #include +#include #include #include #include diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 9246d32dc973..730334cdf037 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -90,7 +90,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #ifdef CONFIG_DEBUG_VM_VMACACHE VMACACHE_FIND_CALLS, VMACACHE_FIND_HITS, - VMACACHE_FULL_FLUSHES, #endif NR_VM_EVENT_ITEMS }; diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h index c3fa0fd43949..4f58ff2dacd6 100644 --- a/include/linux/vmacache.h +++ b/include/linux/vmacache.h @@ -15,7 +15,6 @@ static inline void vmacache_flush(struct task_struct *tsk) memset(tsk->vmacache, 0, sizeof(tsk->vmacache)); } -extern void vmacache_flush_all(struct mm_struct *mm); extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma); extern struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr); @@ -29,10 +28,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, static inline void vmacache_invalidate(struct mm_struct *mm) { mm->vmacache_seqnum++; - - /* deal with overflows */ - if (unlikely(mm->vmacache_seqnum == 0)) - vmacache_flush_all(mm); } #endif /* __LINUX_VMACACHE_H */ diff --git a/mm/debug.c b/mm/debug.c index 3eb3ac2fcee7..335d697c7168 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -168,7 +168,7 @@ EXPORT_SYMBOL(dump_vma); void dump_mm(const struct mm_struct *mm) { - pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n" + pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n" #ifdef CONFIG_MMU "get_unmapped_area %p\n" #endif @@ -198,7 +198,7 @@ void dump_mm(const struct mm_struct *mm) #endif "%s", /* This is here to hold the comma */ - mm, mm->mmap, mm->vmacache_seqnum, mm->task_size, + mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size, #ifdef CONFIG_MMU mm->get_unmapped_area, #endif diff --git a/mm/vmacache.c b/mm/vmacache.c index b6e3662fe339..e6e6e92d0d72 100644 --- a/mm/vmacache.c +++ b/mm/vmacache.c @@ -5,44 +5,6 @@ #include #include -/* - * Flush vma caches for threads that share a given mm. - * - * The operation is safe because the caller holds the mmap_sem - * exclusively and other threads accessing the vma cache will - * have mmap_sem held at least for read, so no extra locking - * is required to maintain the vma cache. - */ -void vmacache_flush_all(struct mm_struct *mm) -{ - struct task_struct *g, *p; - - count_vm_vmacache_event(VMACACHE_FULL_FLUSHES); - - /* - * Single threaded tasks need not iterate the entire - * list of process. We can avoid the flushing as well - * since the mm's seqnum was increased and don't have - * to worry about other threads' seqnum. Current's - * flush will occur upon the next lookup. - */ - if (atomic_read(&mm->mm_users) == 1) - return; - - rcu_read_lock(); - for_each_process_thread(g, p) { - /* - * Only flush the vmacache pointers as the - * mm seqnum is already set and curr's will - * be set upon invalidation when the next - * lookup is done. - */ - if (mm == p->mm) - vmacache_flush(p); - } - rcu_read_unlock(); -} - /* * This task may be accessing a foreign mm via (for example) * get_user_pages()->find_vma(). The vmacache is task-local and this