From: Liam R. Howlett Date: Tue, 21 Jun 2022 20:47:14 +0000 (+0000) Subject: mm/mmap.c: pass in mapping to __vma_link_file() X-Git-Tag: maple_v12_fixes~192 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=28f42dbc75cf6c5d2756736bb8c44b779fac21da;p=users%2Fjedix%2Flinux-maple.git mm/mmap.c: pass in mapping to __vma_link_file() __vma_link_file() resolves the mapping from the file, if there is one. Pass through the mapping and check the vm_file externally since most places already have the required information and check of vm_file. Link: https://lkml.kernel.org/r/20220504011345.662299-54-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-70-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- diff --git a/mm/mmap.c b/mm/mmap.c index a9ebe25c5b09..37990e66feeb 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -246,6 +246,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) if (brk < min_brk) goto out; + /* * Check against rlimit here. If this check is done later after the test * of oldbrk with newbrk then it can escape the test and let the data @@ -322,7 +323,6 @@ success: if (populate) mm_populate(oldbrk, newbrk - oldbrk); return brk; - out: mmap_write_unlock(mm); return origbrk; @@ -454,21 +454,15 @@ static unsigned long count_vma_pages_range(struct mm_struct *mm, return nr_pages; } -static void __vma_link_file(struct vm_area_struct *vma) +static void __vma_link_file(struct vm_area_struct *vma, + struct address_space *mapping) { - struct file *file; - - file = vma->vm_file; - if (file) { - struct address_space *mapping = file->f_mapping; - - if (vma->vm_flags & VM_SHARED) - mapping_allow_writable(mapping); + if (vma->vm_flags & VM_SHARED) + mapping_allow_writable(mapping); - flush_dcache_mmap_lock(mapping); - vma_interval_tree_insert(vma, &mapping->i_mmap); - flush_dcache_mmap_unlock(mapping); - } + flush_dcache_mmap_lock(mapping); + vma_interval_tree_insert(vma, &mapping->i_mmap); + flush_dcache_mmap_unlock(mapping); } /* @@ -535,10 +529,11 @@ static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) } vma_mas_store(vma, &mas); - __vma_link_file(vma); - if (mapping) + if (mapping) { + __vma_link_file(vma, mapping); i_mmap_unlock_write(mapping); + } mm->map_count++; validate_mm(mm); @@ -777,14 +772,14 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, uprobe_munmap(next, next->vm_start, next->vm_end); i_mmap_lock_write(mapping); - if (insert) { + if (insert && insert->vm_file) { /* * Put into interval tree now, so instantiated pages * are visible to arm/parisc __flush_dcache_page * throughout; but we cannot insert into address * space until vma start or end is updated. */ - __vma_link_file(insert); + __vma_link_file(insert, insert->vm_file->f_mapping); } } @@ -2982,6 +2977,7 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma, struct mm_struct *mm = current->mm; validate_mm_mt(mm); + /* * Check against address space limits by the changed size * Note: This happens *after* clearing old mappings in some code paths. @@ -3039,6 +3035,7 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma, goto mas_store_fail; mm->map_count++; + out: perf_event_mmap(vma); mm->total_vm += len >> PAGE_SHIFT;