]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
kernel/fork: Convert dup_mmap to use maple tree
authorLiam R. Howlett <Liam.Howlett@Oracle.com>
Fri, 24 Jul 2020 17:32:30 +0000 (13:32 -0400)
committerLiam R. Howlett <Liam.Howlett@Oracle.com>
Fri, 30 Oct 2020 19:07:26 +0000 (15:07 -0400)
Use the maple tree iterator to duplicate the mm_struct trees.

Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
kernel/fork.c

index 1174565a3b96fe478231346d3af6e3e134270364..543615b1b0174c291787ea948e990babc9e76db7 100644 (file)
@@ -473,6 +473,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
        struct rb_node **rb_link, *rb_parent;
        int retval;
        unsigned long charge;
+       MA_STATE(old_mas, &oldmm->mm_mt, 0, 0);
        LIST_HEAD(uf);
        MA_STATE(old_mas, &oldmm->mm_mt, 0, 0);
        struct vm_area_struct *old_vma;
@@ -508,100 +509,13 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
                goto out;
 
        prev = NULL;
-#if 0
-       mas_for_each(&old_mas, old_vma, ULONG_MAX) {
-               struct file *file;
 
-               if (mas_retry(&old_mas, old_vma))
-                       continue;
-
-               mas_pause(&old_mas);
-               mas_unlock(&old_mas);
+       rcu_read_lock();
+       mas_for_each(&old_mas, mpnt, ULONG_MAX) {
+               struct file *file;
 
-               if (old_vma->vm_flags & VM_DONTCOPY) {
-                       vm_stat_account(mm, old_vma->vm_flags,
-                                       -vma_pages(old_vma));
+               if (xa_is_zero(mpnt))
                        continue;
-               }
-               charge = 0;
-               /*
-                * Don't duplicate many vmas if we've been oom-killed (for
-                * example)
-                */
-               if (fatal_signal_pending(current)) {
-                       retval = -EINTR;
-                       goto out;
-               }
-               if (old_vma->vm_flags & VM_ACCOUNT) {
-                       unsigned long len = vma_pages(old_vma);
-
-                       if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
-                               goto fail_nomem;
-                       charge = len;
-               }
-               tmp = vm_area_dup(old_vma);
-               if (!tmp)
-                       goto fail_nomem;
-               retval = vma_dup_policy(old_vma, tmp);
-               if (retval)
-                       goto fail_nomem_policy;
-               tmp->vm_mm = mm;
-               retval = dup_userfaultfd(tmp, &uf);
-               if (retval)
-                       goto fail_nomem_anon_vma_fork;
-               if (tmp->vm_flags & VM_WIPEONFORK) {
-                       /* VM_WIPEONFORK gets a clean slate in the child. */
-                       tmp->anon_vma = NULL;
-                       if (anon_vma_prepare(tmp))
-                               goto fail_nomem_anon_vma_fork;
-               } else if (anon_vma_fork(tmp, old_vma))
-                       goto fail_nomem_anon_vma_fork;
-               tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT);
-               tmp->vm_next = tmp->vm_prev = NULL;
-               file = tmp->vm_file;
-               if (file) {
-                       struct inode *inode = file_inode(file);
-                       struct address_space *mapping = file->f_mapping;
-
-                       get_file(file);
-                       if (tmp->vm_flags & VM_DENYWRITE)
-                               atomic_dec(&inode->i_writecount);
-                       i_mmap_lock_write(mapping);
-                       if (tmp->vm_flags & VM_SHARED)
-                               atomic_inc(&mapping->i_mmap_writable);
-                       flush_dcache_mmap_lock(mapping);
-                       /* insert tmp into the share list, just after old_vma*/
-                       vma_interval_tree_insert_after(tmp, old_vma,
-                                       &mapping->i_mmap);
-                       flush_dcache_mmap_unlock(mapping);
-                       i_mmap_unlock_write(mapping);
-               }
-
-               /*
-                * Clear hugetlb-related page reserves for children. This only
-                * affects MAP_PRIVATE mappings. Faults generated by the child
-                * are not guaranteed to succeed, even if read-only
-                */
-               if (is_vm_hugetlb_page(tmp))
-                       reset_vma_resv_huge_pages(tmp);
-
-               /* Actually insert the vma into the new mm. */
-               vma_store(mm, tmp);
-
-               mm->map_count++;
-               if (!(tmp->vm_flags & VM_WIPEONFORK))
-                       retval = copy_page_range(mm, oldmm, old_vma);
-
-               if (tmp->vm_ops && tmp->vm_ops->open)
-                       tmp->vm_ops->open(tmp);
-
-               if (retval)
-                       goto out;
-               mas_lock(&old_mas);
-       }
-#endif
-       for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
-               struct file *file;
 
                if (mpnt->vm_flags & VM_DONTCOPY) {
                        vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
@@ -694,10 +608,13 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
 
                if (retval)
                        goto out;
+
        }
+
        /* a new mm has just been created */
        retval = arch_dup_mmap(oldmm, mm);
 out:
+       rcu_read_unlock();
        mmap_write_unlock(mm);
        flush_tlb_mm(oldmm);
        mmap_write_unlock(oldmm);