From: Liam R. Howlett <Liam.Howlett@Oracle.com> Date: Wed, 20 Jul 2022 02:18:05 +0000 (+0000) Subject: mm/mmap.c: pass in mapping to __vma_link_file() X-Git-Tag: remove_vma_adjust~76 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=b85d28f7b2b7c72c43b9061acf402729f8cc22c8;p=users%2Fjedix%2Flinux-maple.git mm/mmap.c: pass in mapping to __vma_link_file() __vma_link_file() resolves the mapping from the file, if there is one. Pass through the mapping and check the vm_file externally since most places already have the required information and check of vm_file. Link: https://lkml.kernel.org/r/20220504011345.662299-54-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-70-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220720021727.17018-70-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: David Howells <dhowells@redhat.com> Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org> Cc: SeongJae Park <sj@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Will Deacon <will@kernel.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: David Hildenbrand <david@redhat.com> Cc: Hulk Robot <hulkci@huawei.com> Cc: Lukas Bulwahn <lukas.bulwahn@gmail.com> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Yang Yingliang <yangyingliang@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- diff --git a/mm/mmap.c b/mm/mmap.c index f00bc374a5df..6f7e672fe63d 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -199,6 +199,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) if (brk < min_brk) goto out; + /* * Check against rlimit here. If this check is done later after the test * of oldbrk with newbrk then it can escape the test and let the data @@ -275,7 +276,6 @@ success: if (populate) mm_populate(oldbrk, newbrk - oldbrk); return brk; - out: mmap_write_unlock(mm); return origbrk; @@ -407,21 +407,15 @@ static unsigned long count_vma_pages_range(struct mm_struct *mm, return nr_pages; } -static void __vma_link_file(struct vm_area_struct *vma) +static void __vma_link_file(struct vm_area_struct *vma, + struct address_space *mapping) { - struct file *file; - - file = vma->vm_file; - if (file) { - struct address_space *mapping = file->f_mapping; - - if (vma->vm_flags & VM_SHARED) - mapping_allow_writable(mapping); + if (vma->vm_flags & VM_SHARED) + mapping_allow_writable(mapping); - flush_dcache_mmap_lock(mapping); - vma_interval_tree_insert(vma, &mapping->i_mmap); - flush_dcache_mmap_unlock(mapping); - } + flush_dcache_mmap_lock(mapping); + vma_interval_tree_insert(vma, &mapping->i_mmap); + flush_dcache_mmap_unlock(mapping); } /* @@ -488,10 +482,11 @@ static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) } vma_mas_store(vma, &mas); - __vma_link_file(vma); - if (mapping) + if (mapping) { + __vma_link_file(vma, mapping); i_mmap_unlock_write(mapping); + } mm->map_count++; validate_mm(mm); @@ -730,14 +725,14 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, uprobe_munmap(next, next->vm_start, next->vm_end); i_mmap_lock_write(mapping); - if (insert) { + if (insert && insert->vm_file) { /* * Put into interval tree now, so instantiated pages * are visible to arm/parisc __flush_dcache_page * throughout; but we cannot insert into address * space until vma start or end is updated. */ - __vma_link_file(insert); + __vma_link_file(insert, insert->vm_file->f_mapping); } } @@ -2934,6 +2929,7 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma, struct mm_struct *mm = current->mm; validate_mm_mt(mm); + /* * Check against address space limits by the changed size * Note: This happens *after* clearing old mappings in some code paths. @@ -2991,6 +2987,7 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma, goto mas_store_fail; mm->map_count++; + out: perf_event_mmap(vma); mm->total_vm += len >> PAGE_SHIFT;