si_code = SEGV_ACCERR;
        else {
                struct mm_struct *mm = current->mm;
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
                if (find_vma(mm, (unsigned long)va))
                        si_code = SEGV_ACCERR;
                else
                        si_code = SEGV_MAPERR;
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
        }
        send_sig_fault(SIGSEGV, si_code, va, 0, current);
        return;
 
        if (user_mode(regs))
                flags |= FAULT_FLAG_USER;
 retry:
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        vma = find_vma(mm, address);
        if (!vma)
                goto bad_area;
                }
        }
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        return;
 
        /* Something tried to access memory that isn't in our memory map.
           Fix it, but check if it's kernel or user first.  */
  bad_area:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        if (user_mode(regs))
                goto do_sigsegv;
        /* We ran out of memory, or some other thing happened to us that
           made us unable to handle the page fault gracefully.  */
  out_of_memory:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        if (!user_mode(regs))
                goto no_context;
        pagefault_out_of_memory();
        return;
 
  do_sigbus:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        /* Send a sigbus, regardless of whether we were in kernel
           or user mode.  */
        force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *) address, 0);
 
        if (unlikely(ret != -EFAULT))
                 goto fail;
 
-       down_read(¤t->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr,
                               FAULT_FLAG_WRITE, NULL);
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
 
        if (likely(!ret))
                 goto again;
 
        /* can't use print_vma_addr() yet as it doesn't check for
         * non-inclusive vma
         */
-       down_read(&active_mm->mmap_sem);
+       mmap_read_lock(active_mm);
        vma = find_vma(active_mm, address);
 
        /* check against the find_vma( ) behaviour which returns the next VMA
        } else
                pr_info("    @No matching VMA found\n");
 
-       up_read(&active_mm->mmap_sem);
+       mmap_read_unlock(active_mm);
 }
 
 static void show_ecr_verbose(struct pt_regs *regs)
 
                flags |= FAULT_FLAG_WRITE;
 
 retry:
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
 
        vma = find_vma(mm, address);
        if (!vma)
        }
 
 bad_area:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        /*
         * Major/minor page fault accounting
 
        npages = 1; /* for sigpage */
        npages += vdso_total_pages;
 
-       if (down_write_killable(&mm->mmap_sem))
+       if (mmap_write_lock_killable(mm))
                return -EINTR;
        hint = sigpage_addr(mm, npages);
        addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
        arm_install_vdso(mm, addr + PAGE_SIZE);
 
  up_fail:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return ret;
 }
 #endif
 
 {
        int si_code;
 
-       down_read(¤t->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        if (find_vma(current->mm, addr) == NULL)
                si_code = SEGV_MAPERR;
        else
                si_code = SEGV_ACCERR;
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
 
        pr_debug("SWP{B} emulation: access caused memory abort!\n");
        arm_notify_die("Illegal memory access", regs,
 
        atomic = faulthandler_disabled();
 
        if (!atomic)
-               down_read(¤t->mm->mmap_sem);
+               mmap_read_lock(current->mm);
        while (n) {
                pte_t *pte;
                spinlock_t *ptl;
 
                while (!pin_page_for_write(to, &pte, &ptl)) {
                        if (!atomic)
-                               up_read(¤t->mm->mmap_sem);
+                               mmap_read_unlock(current->mm);
                        if (__put_user(0, (char __user *)to))
                                goto out;
                        if (!atomic)
-                               down_read(¤t->mm->mmap_sem);
+                               mmap_read_lock(current->mm);
                }
 
                tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
                        spin_unlock(ptl);
        }
        if (!atomic)
-               up_read(¤t->mm->mmap_sem);
+               mmap_read_unlock(current->mm);
 
 out:
        return n;
                return 0;
        }
 
-       down_read(¤t->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        while (n) {
                pte_t *pte;
                spinlock_t *ptl;
                int tocopy;
 
                while (!pin_page_for_write(addr, &pte, &ptl)) {
-                       up_read(¤t->mm->mmap_sem);
+                       mmap_read_unlock(current->mm);
                        if (__put_user(0, (char __user *)addr))
                                goto out;
-                       down_read(¤t->mm->mmap_sem);
+                       mmap_read_lock(current->mm);
                }
 
                tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
                else
                        spin_unlock(ptl);
        }
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
 
 out:
        return n;
 
         * validly references user space from well defined areas of the code,
         * we can bug out early if this is from code which shouldn't.
         */
-       if (!down_read_trylock(&mm->mmap_sem)) {
+       if (!mmap_read_trylock(mm)) {
                if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
                        goto no_context;
 retry:
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
        } else {
                /*
                 * The above down_read_trylock() might have succeeded in
                }
        }
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        /*
         * Handle the "normal" case first - VM_FAULT_MAJOR
 
 {
        int code;
 
-       down_read(¤t->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        if (find_vma(current->mm, addr) == NULL)
                code = SEGV_MAPERR;
        else
                code = SEGV_ACCERR;
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
 
        force_signal_inject(SIGSEGV, code, addr);
 }
 
        struct mm_struct *mm = current->mm;
        int ret;
 
-       if (down_write_killable(&mm->mmap_sem))
+       if (mmap_write_lock_killable(mm))
                return -EINTR;
 
        ret = aarch32_kuser_helpers_setup(mm);
 #endif /* CONFIG_COMPAT_VDSO */
 
 out:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return ret;
 }
 #endif /* CONFIG_COMPAT */
        struct mm_struct *mm = current->mm;
        int ret;
 
-       if (down_write_killable(&mm->mmap_sem))
+       if (mmap_write_lock_killable(mm))
                return -EINTR;
 
        ret = __setup_additional_pages(VDSO_ABI_AA64,
                                       bprm,
                                       uses_interp);
 
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
 
        return ret;
 }
 
         * validly references user space from well defined areas of the code,
         * we can bug out early if this is from code which shouldn't.
         */
-       if (!down_read_trylock(&mm->mmap_sem)) {
+       if (!mmap_read_trylock(mm)) {
                if (!user_mode(regs) && !search_exception_tables(regs->pc))
                        goto no_context;
 retry:
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
        } else {
                /*
                 * The above down_read_trylock() might have succeeded in which
                might_sleep();
 #ifdef CONFIG_DEBUG_VM
                if (!user_mode(regs) && !search_exception_tables(regs->pc)) {
-                       up_read(&mm->mmap_sem);
+                       mmap_read_unlock(mm);
                        goto no_context;
                }
 #endif
                        goto retry;
                }
        }
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        /*
         * Handle the "normal" (no error) case first.
 
        unsigned long addr;
        struct mm_struct *mm = current->mm;
 
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
 
        addr = get_unmapped_area(NULL, STACK_TOP, PAGE_SIZE, 0, 0);
        if (IS_ERR_VALUE(addr)) {
        mm->context.vdso = (void *)addr;
 
 up_fail:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return ret;
 }
 
 
        if (in_atomic() || !mm)
                goto bad_area_nosemaphore;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        vma = find_vma(mm, address);
        if (!vma)
                goto bad_area;
                              address);
        }
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return;
 
        /*
         * Fix it, but check if it's kernel or user first..
         */
 bad_area:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
 bad_area_nosemaphore:
        /* User mode accesses just cause a SIGSEGV */
 do_sigbus:
        tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        /* Kernel mode? Handle exceptions or die */
        if (!user_mode(regs))
 
        unsigned long vdso_base;
        struct mm_struct *mm = current->mm;
 
-       if (down_write_killable(&mm->mmap_sem))
+       if (mmap_write_lock_killable(mm))
                return -EINTR;
 
        /* Try to get it loaded right near ld.so/glibc. */
        mm->context.vdso = (void *)vdso_base;
 
 up_fail:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return ret;
 }
 
 
        if (user_mode(regs))
                flags |= FAULT_FLAG_USER;
 retry:
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        vma = find_vma(mm, address);
        if (!vma)
                goto bad_area;
                        }
                }
 
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                return;
        }
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        /* Handle copyin/out exception cases */
        if (!user_mode(regs))
        return;
 
 bad_area:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        if (user_mode(regs)) {
                force_sig_fault(SIGSEGV, si_code, (void __user *)address);
 
         * now we atomically find some area in the address space and
         * remap the buffer in it.
         */
-       down_write(&task->mm->mmap_sem);
+       mmap_write_lock(task->mm);
 
        /* find some free area in address space, must have mmap sem held */
        vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS);
        if (IS_ERR_VALUE(vma->vm_start)) {
                DPRINT(("Cannot find unmapped area for size %ld\n", size));
-               up_write(&task->mm->mmap_sem);
+               mmap_write_unlock(task->mm);
                goto error;
        }
        vma->vm_end = vma->vm_start + size;
        /* can only be applied to current task, need to have the mm semaphore held when called */
        if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
                DPRINT(("Can't remap buffer\n"));
-               up_write(&task->mm->mmap_sem);
+               mmap_write_unlock(task->mm);
                goto error;
        }
 
        insert_vm_struct(mm, vma);
 
        vm_stat_account(vma->vm_mm, vma->vm_flags, vma_pages(vma));
-       up_write(&task->mm->mmap_sem);
+       mmap_write_unlock(task->mm);
 
        /*
         * keep track of user level virtual address
 
        if (mask & VM_WRITE)
                flags |= FAULT_FLAG_WRITE;
 retry:
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
 
        vma = find_vma_prev(mm, address, &prev_vma);
        if (!vma && !prev_vma )
                }
        }
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return;
 
   check_expansion:
        goto good_area;
 
   bad_area:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 #ifdef CONFIG_VIRTUAL_MEM_MAP
   bad_area_no_up:
 #endif
        return;
 
   out_of_memory:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        if (!user_mode(regs))
                goto no_context;
        pagefault_out_of_memory();
 
                vma->vm_end = vma->vm_start + PAGE_SIZE;
                vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
                vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-               down_write(¤t->mm->mmap_sem);
+               mmap_write_lock(current->mm);
                if (insert_vm_struct(current->mm, vma)) {
-                       up_write(¤t->mm->mmap_sem);
+                       mmap_write_unlock(current->mm);
                        vm_area_free(vma);
                        return;
                }
-               up_write(¤t->mm->mmap_sem);
+               mmap_write_unlock(current->mm);
        }
 
        /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
                        vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
                        vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
                                        VM_DONTEXPAND | VM_DONTDUMP;
-                       down_write(¤t->mm->mmap_sem);
+                       mmap_write_lock(current->mm);
                        if (insert_vm_struct(current->mm, vma)) {
-                               up_write(¤t->mm->mmap_sem);
+                               mmap_write_unlock(current->mm);
                                vm_area_free(vma);
                                return;
                        }
-                       up_write(¤t->mm->mmap_sem);
+                       mmap_write_unlock(current->mm);
                }
        }
 }
 
                 * Verify that the specified address region actually belongs
                 * to this process.
                 */
-               down_read(¤t->mm->mmap_sem);
+               mmap_read_lock(current->mm);
                vma = find_vma(current->mm, addr);
                if (!vma || addr < vma->vm_start || addr + len > vma->vm_end)
                        goto out_unlock;
            }
        }
 out_unlock:
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
 out:
        return ret;
 }
                spinlock_t *ptl;
                unsigned long mem_value;
 
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
                pgd = pgd_offset(mm, (unsigned long)mem);
                if (!pgd_present(*pgd))
                        goto bad_access;
                        __put_user(newval, mem);
 
                pte_unmap_unlock(pte, ptl);
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                return mem_value;
 
              bad_access:
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                /* This is not necessarily a bad access, we can get here if
                   a memory we're trying to write to should be copied-on-write.
                   Make the kernel do the necessary page stuff, then re-iterate.
        struct mm_struct *mm = current->mm;
        unsigned long mem_value;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
 
        mem_value = *mem;
        if (mem_value == oldval)
                *mem = newval;
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return mem_value;
 }
 
 
        if (user_mode(regs))
                flags |= FAULT_FLAG_USER;
 retry:
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
 
        vma = find_vma(mm, address);
        if (!vma)
                }
        }
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return 0;
 
 /*
  * us unable to handle the page fault gracefully.
  */
 out_of_memory:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        if (!user_mode(regs))
                goto no_context;
        pagefault_out_of_memory();
        current->thread.faddr = address;
 
 send_sig:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return send_fault_sig(regs);
 }
 
         * source.  If this is invalid we can skip the address space check,
         * thus avoiding the deadlock.
         */
-       if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+       if (unlikely(!mmap_read_trylock(mm))) {
                if (kernel_mode(regs) && !search_exception_tables(regs->pc))
                        goto bad_area_nosemaphore;
 
 retry:
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
        }
 
        vma = find_vma(mm, address);
                }
        }
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        /*
         * keep track of tlb+htab misses that are good addrs but
        return;
 
 bad_area:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
 bad_area_nosemaphore:
        pte_errors++;
  * us unable to handle the page fault gracefully.
  */
 out_of_memory:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        if (!user_mode(regs))
                bad_page_fault(regs, address, SIGKILL);
        else
        return;
 
 do_sigbus:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        if (user_mode(regs)) {
                force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
                return;
 
                return 1;
 
        case SIGSEGV:
-               down_read(¤t->mm->mmap_sem);
+               mmap_read_lock(current->mm);
                vma = find_vma(current->mm, (unsigned long)fault_addr);
                if (vma && (vma->vm_start <= (unsigned long)fault_addr))
                        si_code = SEGV_ACCERR;
                else
                        si_code = SEGV_MAPERR;
-               up_read(¤t->mm->mmap_sem);
+               mmap_read_unlock(current->mm);
                force_sig_fault(SIGSEGV, si_code, fault_addr);
                return 1;
 
 
        struct vm_area_struct *vma;
        int ret;
 
-       if (down_write_killable(&mm->mmap_sem))
+       if (mmap_write_lock_killable(mm))
                return -EINTR;
 
        if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
        ret = 0;
 
 out:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return ret;
 }
 
        vdso_mapping_len += L1_cache_info[DCACHE].aliasing_num - 1;
 #endif
 
-       if (down_write_killable(&mm->mmap_sem))
+       if (mmap_write_lock_killable(mm))
                return -EINTR;
 
        addr = vdso_random_addr(vdso_mapping_len);
                goto up_fail;
        }
 
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return 0;
 
 up_fail:
        mm->context.vdso = NULL;
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return ret;
 }
 
 
         * validly references user space from well defined areas of the code,
         * we can bug out early if this is from code which shouldn't.
         */
-       if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+       if (unlikely(!mmap_read_trylock(mm))) {
                if (!user_mode(regs) &&
                    !search_exception_tables(instruction_pointer(regs)))
                        goto no_context;
 retry:
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
        } else {
                /*
                 * The above down_read_trylock() might have succeeded in which
                }
        }
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return;
 
        /*
         * Fix it, but check if it's kernel or user first..
         */
 bad_area:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
 bad_area_nosemaphore:
 
         */
 
 out_of_memory:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        if (!user_mode(regs))
                goto no_context;
        pagefault_out_of_memory();
        return;
 
 do_sigbus:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        /* Kernel mode? Handle exceptions or die */
        if (!user_mode(regs))
 
        if (user_mode(regs))
                flags |= FAULT_FLAG_USER;
 
-       if (!down_read_trylock(&mm->mmap_sem)) {
+       if (!mmap_read_trylock(mm)) {
                if (!user_mode(regs) && !search_exception_tables(regs->ea))
                        goto bad_area_nosemaphore;
 retry:
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
        }
 
        vma = find_vma(mm, address);
                }
        }
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return;
 
 /*
  * Fix it, but check if it's kernel or user first..
  */
 bad_area:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
 bad_area_nosemaphore:
        /* User mode accesses just cause a SIGSEGV */
  * us unable to handle the page fault gracefully.
  */
 out_of_memory:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        if (!user_mode(regs))
                goto no_context;
        pagefault_out_of_memory();
        return;
 
 do_sigbus:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        /* Kernel mode? Handle exceptions or die */
        if (!user_mode(regs))
 
        struct mm_struct *mm = current->mm;
        int ret;
 
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
 
        /* Map kuser helpers to user space address */
        ret = install_special_mapping(mm, KUSER_BASE, KUSER_SIZE,
                                      VM_READ | VM_EXEC | VM_MAYREAD |
                                      VM_MAYEXEC, kuser_page);
 
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
 
        return ret;
 }
 
                goto no_context;
 
 retry:
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        vma = find_vma(mm, address);
 
        if (!vma)
                }
        }
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return;
 
        /*
         */
 
 bad_area:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
 bad_area_nosemaphore:
 
        __asm__ __volatile__("l.nop 42");
        __asm__ __volatile__("l.nop 1");
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        if (!user_mode(regs))
                goto no_context;
        pagefault_out_of_memory();
        return;
 
 do_sigbus:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        /*
         * Send a sigbus, regardless of whether we were in kernel
 
                if (user_mode(regs)) {
                        struct vm_area_struct *vma;
 
-                       down_read(¤t->mm->mmap_sem);
+                       mmap_read_lock(current->mm);
                        vma = find_vma(current->mm,regs->iaoq[0]);
                        if (vma && (regs->iaoq[0] >= vma->vm_start)
                                && (vma->vm_flags & VM_EXEC)) {
                                fault_address = regs->iaoq[0];
                                fault_space = regs->iasq[0];
 
-                               up_read(¤t->mm->mmap_sem);
+                               mmap_read_unlock(current->mm);
                                break; /* call do_page_fault() */
                        }
-                       up_read(¤t->mm->mmap_sem);
+                       mmap_read_unlock(current->mm);
                }
                /* Fall Through */
        case 27: 
 
        if (acc_type & VM_WRITE)
                flags |= FAULT_FLAG_WRITE;
 retry:
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        vma = find_vma_prev(mm, address, &prev_vma);
        if (!vma || address < vma->vm_start)
                goto check_expansion;
                        goto retry;
                }
        }
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return;
 
 check_expansion:
  * Something tried to access memory that isn't in our memory map..
  */
 bad_area:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        if (user_mode(regs)) {
                int signo, si_code;
        parisc_terminate("Bad Address (null pointer deref?)", regs, code, address);
 
   out_of_memory:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        if (!user_mode(regs))
                goto no_context;
        pagefault_out_of_memory();
 
         * and end up putting it elsewhere.
         * Add enough to the size so that the result can be aligned.
         */
-       if (down_write_killable(&mm->mmap_sem))
+       if (mmap_write_lock_killable(mm))
                return -EINTR;
        vdso_base = get_unmapped_area(NULL, vdso_base,
                                      (vdso_pages << PAGE_SHIFT) +
                goto fail_mmapsem;
        }
 
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return 0;
 
  fail_mmapsem:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return rc;
 }
 
 
 
        /* Look up the VMA for the start of this memory slot */
        hva = memslot->userspace_addr;
-       down_read(&kvm->mm->mmap_sem);
+       mmap_read_lock(kvm->mm);
        vma = find_vma(kvm->mm, hva);
        if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
                goto up_out;
 
        psize = vma_kernel_pagesize(vma);
 
-       up_read(&kvm->mm->mmap_sem);
+       mmap_read_unlock(kvm->mm);
 
        /* We can handle 4k, 64k or 16M pages in the VRMA */
        if (psize >= 0x1000000)
        return err;
 
  up_out:
-       up_read(&kvm->mm->mmap_sem);
+       mmap_read_unlock(kvm->mm);
        goto out_srcu;
 }
 
 
         */
        ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
                          MADV_UNMERGEABLE, &vma->vm_flags);
-       downgrade_write(&kvm->mm->mmap_sem);
+       mmap_write_downgrade(kvm->mm);
        *downgrade = true;
        if (ret)
                return ret;
 
        ret = H_PARAMETER;
        srcu_idx = srcu_read_lock(&kvm->srcu);
-       down_write(&kvm->mm->mmap_sem);
+       mmap_write_lock(kvm->mm);
 
        start = gfn_to_hva(kvm, gfn);
        if (kvm_is_error_hva(start))
        mutex_unlock(&kvm->arch.uvmem_lock);
 out:
        if (downgrade)
-               up_read(&kvm->mm->mmap_sem);
+               mmap_read_unlock(kvm->mm);
        else
-               up_write(&kvm->mm->mmap_sem);
+               mmap_write_unlock(kvm->mm);
        srcu_read_unlock(&kvm->srcu, srcu_idx);
        return ret;
 }
 
        ret = H_PARAMETER;
        srcu_idx = srcu_read_lock(&kvm->srcu);
-       down_read(&kvm->mm->mmap_sem);
+       mmap_read_lock(kvm->mm);
        start = gfn_to_hva(kvm, gfn);
        if (kvm_is_error_hva(start))
                goto out;
        if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa))
                ret = H_SUCCESS;
 out:
-       up_read(&kvm->mm->mmap_sem);
+       mmap_read_unlock(kvm->mm);
        srcu_read_unlock(&kvm->srcu, srcu_idx);
        return ret;
 }
 
 
        if (tlbsel == 1) {
                struct vm_area_struct *vma;
-               down_read(&kvm->mm->mmap_sem);
+               mmap_read_lock(kvm->mm);
 
                vma = find_vma(kvm->mm, hva);
                if (vma && hva >= vma->vm_start &&
                        tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
                }
 
-               up_read(&kvm->mm->mmap_sem);
+               mmap_read_unlock(kvm->mm);
        }
 
        if (likely(!pfnmap)) {
 
                goto unlock_exit;
        }
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        chunk = (1UL << (PAGE_SHIFT + MAX_ORDER - 1)) /
                        sizeof(struct vm_area_struct *);
        chunk = min(chunk, entries);
                        pinned += ret;
                break;
        }
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        if (pinned != entries) {
                if (!ret)
                        ret = -EFAULT;
 
        size_t nw;
        unsigned long next, limit;
 
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
 
        spt = mm_ctx_subpage_prot(&mm->context);
        if (!spt)
        }
 
 err_out:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
        if (!access_ok(map, (len >> PAGE_SHIFT) * sizeof(u32)))
                return -EFAULT;
 
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
 
        spt = mm_ctx_subpage_prot(&mm->context);
        if (!spt) {
                if (addr + (nw << PAGE_SHIFT) > next)
                        nw = (next - addr) >> PAGE_SHIFT;
 
-               up_write(&mm->mmap_sem);
+               mmap_write_unlock(mm);
                if (__copy_from_user(spp, map, nw * sizeof(u32)))
                        return -EFAULT;
                map += nw;
-               down_write(&mm->mmap_sem);
+               mmap_write_lock(mm);
 
                /* now flush any existing HPTEs for the range */
                hpte_flush_range(mm, addr, nw);
                spt->maxaddr = limit;
        err = 0;
  out:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return err;
 }
 
        if (mm->pgd == NULL)
                return -EFAULT;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        ret = -EFAULT;
        vma = find_vma(mm, ea);
        if (!vma)
                current->min_flt++;
 
 out_unlock:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return ret;
 }
 EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
 
         * Something tried to access memory that isn't in our memory map..
         * Fix it, but check if it's kernel or user first..
         */
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        return __bad_area_nosemaphore(regs, address, si_code);
 }
         */
        pkey = vma_pkey(vma);
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        /*
         * If we are in kernel mode, bail out with a SEGV, this will
         * source.  If this is invalid we can skip the address space check,
         * thus avoiding the deadlock.
         */
-       if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+       if (unlikely(!mmap_read_trylock(mm))) {
                if (!is_user && !search_exception_tables(regs->nip))
                        return bad_area_nosemaphore(regs, address);
 
 retry:
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
        } else {
                /*
                 * The above down_read_trylock() might have succeeded in
                if (!must_retry)
                        return bad_area(regs, address);
 
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                if (fault_in_pages_readable((const char __user *)regs->nip,
                                            sizeof(unsigned int)))
                        return bad_area_nosemaphore(regs, address);
                }
        }
 
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
 
        if (unlikely(fault & VM_FAULT_ERROR))
                return mm_fault_error(regs, address, fault);
 
                fput(exe_file);
        }
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                if (vma->vm_start > spu_ref || vma->vm_end <= spu_ref)
                        continue;
        *spu_bin_dcookie = fast_get_dcookie(&vma->vm_file->f_path);
        pr_debug("got dcookie for %pD\n", vma->vm_file);
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
 out:
        return app_cookie;
 
 fail_no_image_cookie:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        printk(KERN_ERR "SPU_PROF: "
                "%s, line %d: Cannot find dcookie for SPU binary\n",
 
                goto refault;
 
        if (ctx->state == SPU_STATE_SAVED) {
-               up_read(¤t->mm->mmap_sem);
+               mmap_read_unlock(current->mm);
                spu_context_nospu_trace(spufs_ps_fault__sleep, ctx);
                err = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
                spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu);
-               down_read(¤t->mm->mmap_sem);
+               mmap_read_lock(current->mm);
        } else {
                area = ctx->spu->problem_phys + ps_offs;
                ret = vmf_insert_pfn(vmf->vma, vmf->address,
 
 
        vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
 
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
        vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
        if (IS_ERR_VALUE(vdso_base)) {
                ret = vdso_base;
                mm->context.vdso = NULL;
 
 end:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return ret;
 }
 
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
 
 retry:
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        vma = find_vma(mm, addr);
        if (unlikely(!vma))
                goto bad_area;
                }
        }
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return;
 
        /*
         * Fix it, but check if it's kernel or user first.
         */
 bad_area:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        /* User mode accesses just cause a SIGSEGV */
        if (user_mode(regs)) {
                do_trap(regs, SIGSEGV, code, addr);
         * (which will retry the fault, or kill us if we got oom-killed).
         */
 out_of_memory:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        if (!user_mode(regs))
                goto no_context;
        pagefault_out_of_memory();
        return;
 
 do_sigbus:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        /* Kernel mode? Handle exceptions or die */
        if (!user_mode(regs))
                goto no_context;
 
 
 again:
        rc = -EFAULT;
-       down_read(&gmap->mm->mmap_sem);
+       mmap_read_lock(gmap->mm);
 
        uaddr = __gmap_translate(gmap, gaddr);
        if (IS_ERR_VALUE(uaddr))
        pte_unmap_unlock(ptep, ptelock);
        unlock_page(page);
 out:
-       up_read(&gmap->mm->mmap_sem);
+       mmap_read_unlock(gmap->mm);
 
        if (rc == -EAGAIN) {
                wait_on_page_writeback(page);
 
         * it at vdso_base which is the "natural" base for it, but we might
         * fail and end up putting it elsewhere.
         */
-       if (down_write_killable(&mm->mmap_sem))
+       if (mmap_write_lock_killable(mm))
                return -EINTR;
        vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
        if (IS_ERR_VALUE(vdso_base)) {
        rc = 0;
 
 out_up:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return rc;
 }
 
 
        int dat_protection, fake;
        int rc;
 
-       down_read(&sg->mm->mmap_sem);
+       mmap_read_lock(sg->mm);
        /*
         * We don't want any guest-2 tables to change - so the parent
         * tables/pointers we read stay valid - unshadowing is however
        if (!rc)
                rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
        ipte_unlock(vcpu);
-       up_read(&sg->mm->mmap_sem);
+       mmap_read_unlock(sg->mm);
        return rc;
 }
 
 {
        struct page *page = NULL;
 
-       down_read(&kvm->mm->mmap_sem);
+       mmap_read_lock(kvm->mm);
        get_user_pages_remote(NULL, kvm->mm, uaddr, 1, FOLL_WRITE,
                              &page, NULL, NULL);
-       up_read(&kvm->mm->mmap_sem);
+       mmap_read_unlock(kvm->mm);
        return page;
 }
 
 
                        r = -EINVAL;
                else {
                        r = 0;
-                       down_write(&kvm->mm->mmap_sem);
+                       mmap_write_lock(kvm->mm);
                        kvm->mm->context.allow_gmap_hpage_1m = 1;
-                       up_write(&kvm->mm->mmap_sem);
+                       mmap_write_unlock(kvm->mm);
                        /*
                         * We might have to create fake 4k page
                         * tables. To avoid that the hardware works on
        if (!keys)
                return -ENOMEM;
 
-       down_read(¤t->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        srcu_idx = srcu_read_lock(&kvm->srcu);
        for (i = 0; i < args->count; i++) {
                hva = gfn_to_hva(kvm, args->start_gfn + i);
                        break;
        }
        srcu_read_unlock(&kvm->srcu, srcu_idx);
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
 
        if (!r) {
                r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
                goto out;
 
        i = 0;
-       down_read(¤t->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        srcu_idx = srcu_read_lock(&kvm->srcu);
         while (i < args->count) {
                unlocked = false;
                        i++;
        }
        srcu_read_unlock(&kvm->srcu, srcu_idx);
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
 out:
        kvfree(keys);
        return r;
        if (!values)
                return -ENOMEM;
 
-       down_read(&kvm->mm->mmap_sem);
+       mmap_read_lock(kvm->mm);
        srcu_idx = srcu_read_lock(&kvm->srcu);
        if (peek)
                ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
        else
                ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
        srcu_read_unlock(&kvm->srcu, srcu_idx);
-       up_read(&kvm->mm->mmap_sem);
+       mmap_read_unlock(kvm->mm);
 
        if (kvm->arch.migration_mode)
                args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
                goto out;
        }
 
-       down_read(&kvm->mm->mmap_sem);
+       mmap_read_lock(kvm->mm);
        srcu_idx = srcu_read_lock(&kvm->srcu);
        for (i = 0; i < args->count; i++) {
                hva = gfn_to_hva(kvm, args->start_gfn + i);
                set_pgste_bits(kvm->mm, hva, mask, pgstev);
        }
        srcu_read_unlock(&kvm->srcu, srcu_idx);
-       up_read(&kvm->mm->mmap_sem);
+       mmap_read_unlock(kvm->mm);
 
        if (!kvm->mm->context.uses_cmm) {
-               down_write(&kvm->mm->mmap_sem);
+               mmap_write_lock(kvm->mm);
                kvm->mm->context.uses_cmm = 1;
-               up_write(&kvm->mm->mmap_sem);
+               mmap_write_unlock(kvm->mm);
        }
 out:
        vfree(bits);
                if (r)
                        break;
 
-               down_write(¤t->mm->mmap_sem);
+               mmap_write_lock(current->mm);
                r = gmap_mark_unmergeable();
-               up_write(¤t->mm->mmap_sem);
+               mmap_write_unlock(current->mm);
                if (r)
                        break;
 
 
                return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 retry:
        unlocked = false;
-       down_read(¤t->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        rc = get_guest_storage_key(current->mm, vmaddr, &key);
 
        if (rc) {
                rc = fixup_user_fault(current, current->mm, vmaddr,
                                      FAULT_FLAG_WRITE, &unlocked);
                if (!rc) {
-                       up_read(¤t->mm->mmap_sem);
+                       mmap_read_unlock(current->mm);
                        goto retry;
                }
        }
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
        if (rc == -EFAULT)
                return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
        if (rc < 0)
                return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 retry:
        unlocked = false;
-       down_read(¤t->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        rc = reset_guest_reference_bit(current->mm, vmaddr);
        if (rc < 0) {
                rc = fixup_user_fault(current, current->mm, vmaddr,
                                      FAULT_FLAG_WRITE, &unlocked);
                if (!rc) {
-                       up_read(¤t->mm->mmap_sem);
+                       mmap_read_unlock(current->mm);
                        goto retry;
                }
        }
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
        if (rc == -EFAULT)
                return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
        if (rc < 0)
                if (kvm_is_error_hva(vmaddr))
                        return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 
-               down_read(¤t->mm->mmap_sem);
+               mmap_read_lock(current->mm);
                rc = cond_set_guest_storage_key(current->mm, vmaddr, key, &oldkey,
                                                m3 & SSKE_NQ, m3 & SSKE_MR,
                                                m3 & SSKE_MC);
                                              FAULT_FLAG_WRITE, &unlocked);
                        rc = !rc ? -EAGAIN : rc;
                }
-               up_read(¤t->mm->mmap_sem);
+               mmap_read_unlock(current->mm);
                if (rc == -EFAULT)
                        return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
                if (rc < 0)
 
                        if (rc)
                                return rc;
-                       down_read(¤t->mm->mmap_sem);
+                       mmap_read_lock(current->mm);
                        rc = cond_set_guest_storage_key(current->mm, vmaddr,
                                                        key, NULL, nq, mr, mc);
                        if (rc < 0) {
                                                      FAULT_FLAG_WRITE, &unlocked);
                                rc = !rc ? -EAGAIN : rc;
                        }
-                       up_read(¤t->mm->mmap_sem);
+                       mmap_read_unlock(current->mm);
                        if (rc == -EFAULT)
                                return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
                        if (rc == -EAGAIN)
                 * already correct, we do nothing and avoid the lock.
                 */
                if (vcpu->kvm->mm->context.uses_cmm == 0) {
-                       down_write(&vcpu->kvm->mm->mmap_sem);
+                       mmap_write_lock(vcpu->kvm->mm);
                        vcpu->kvm->mm->context.uses_cmm = 1;
-                       up_write(&vcpu->kvm->mm->mmap_sem);
+                       mmap_write_unlock(vcpu->kvm->mm);
                }
                /*
                 * If we are here, we are supposed to have CMMA enabled in
        } else {
                int srcu_idx;
 
-               down_read(&vcpu->kvm->mm->mmap_sem);
+               mmap_read_lock(vcpu->kvm->mm);
                srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
                i = __do_essa(vcpu, orc);
                srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
-               up_read(&vcpu->kvm->mm->mmap_sem);
+               mmap_read_unlock(vcpu->kvm->mm);
                if (i < 0)
                        return i;
                /* Account for the possible extra cbrl entry */
        }
        vcpu->arch.sie_block->cbrlo &= PAGE_MASK;       /* reset nceo */
        cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
-       down_read(&gmap->mm->mmap_sem);
+       mmap_read_lock(gmap->mm);
        for (i = 0; i < entries; ++i)
                __gmap_zap(gmap, cbrlo[i]);
-       up_read(&gmap->mm->mmap_sem);
+       mmap_read_unlock(gmap->mm);
        return 0;
 }
 
 
                flags |= FAULT_FLAG_USER;
        if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
                flags |= FAULT_FLAG_WRITE;
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
 
        gmap = NULL;
        if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
                        }
                        flags &= ~FAULT_FLAG_RETRY_NOWAIT;
                        flags |= FAULT_FLAG_TRIED;
-                       down_read(&mm->mmap_sem);
+                       mmap_read_lock(mm);
                        goto retry;
                }
        }
        }
        fault = 0;
 out_up:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 out:
        return fault;
 }
        switch (get_fault_type(regs)) {
        case USER_FAULT:
                mm = current->mm;
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
                vma = find_vma(mm, addr);
                if (!vma) {
-                       up_read(&mm->mmap_sem);
+                       mmap_read_unlock(mm);
                        do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
                        break;
                }
                page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
                if (IS_ERR_OR_NULL(page)) {
-                       up_read(&mm->mmap_sem);
+                       mmap_read_unlock(mm);
                        break;
                }
                if (arch_make_page_accessible(page))
                        send_sig(SIGSEGV, current, 0);
                put_page(page);
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                break;
        case KERNEL_FAULT:
                page = phys_to_page(addr);
 
                return -EINVAL;
 
        flush = 0;
-       down_write(&gmap->mm->mmap_sem);
+       mmap_write_lock(gmap->mm);
        for (off = 0; off < len; off += PMD_SIZE)
                flush |= __gmap_unmap_by_gaddr(gmap, to + off);
-       up_write(&gmap->mm->mmap_sem);
+       mmap_write_unlock(gmap->mm);
        if (flush)
                gmap_flush_tlb(gmap);
        return 0;
                return -EINVAL;
 
        flush = 0;
-       down_write(&gmap->mm->mmap_sem);
+       mmap_write_lock(gmap->mm);
        for (off = 0; off < len; off += PMD_SIZE) {
                /* Remove old translation */
                flush |= __gmap_unmap_by_gaddr(gmap, to + off);
                                      (void *) from + off))
                        break;
        }
-       up_write(&gmap->mm->mmap_sem);
+       mmap_write_unlock(gmap->mm);
        if (flush)
                gmap_flush_tlb(gmap);
        if (off >= len)
 {
        unsigned long rc;
 
-       down_read(&gmap->mm->mmap_sem);
+       mmap_read_lock(gmap->mm);
        rc = __gmap_translate(gmap, gaddr);
-       up_read(&gmap->mm->mmap_sem);
+       mmap_read_unlock(gmap->mm);
        return rc;
 }
 EXPORT_SYMBOL_GPL(gmap_translate);
        int rc;
        bool unlocked;
 
-       down_read(&gmap->mm->mmap_sem);
+       mmap_read_lock(gmap->mm);
 
 retry:
        unlocked = false;
 
        rc = __gmap_link(gmap, gaddr, vmaddr);
 out_up:
-       up_read(&gmap->mm->mmap_sem);
+       mmap_read_unlock(gmap->mm);
        return rc;
 }
 EXPORT_SYMBOL_GPL(gmap_fault);
        unsigned long gaddr, vmaddr, size;
        struct vm_area_struct *vma;
 
-       down_read(&gmap->mm->mmap_sem);
+       mmap_read_lock(gmap->mm);
        for (gaddr = from; gaddr < to;
             gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
                /* Find the vm address for the guest address */
                size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
                zap_page_range(vma, vmaddr, size);
        }
-       up_read(&gmap->mm->mmap_sem);
+       mmap_read_unlock(gmap->mm);
 }
 EXPORT_SYMBOL_GPL(gmap_discard);
 
                return -EINVAL;
        if (!MACHINE_HAS_ESOP && prot == PROT_READ)
                return -EINVAL;
-       down_read(&gmap->mm->mmap_sem);
+       mmap_read_lock(gmap->mm);
        rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT);
-       up_read(&gmap->mm->mmap_sem);
+       mmap_read_unlock(gmap->mm);
        return rc;
 }
 EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
        }
        spin_unlock(&parent->shadow_lock);
        /* protect after insertion, so it will get properly invalidated */
-       down_read(&parent->mm->mmap_sem);
+       mmap_read_lock(parent->mm);
        rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
                                ((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
                                PROT_READ, GMAP_NOTIFY_SHADOW);
-       up_read(&parent->mm->mmap_sem);
+       mmap_read_unlock(parent->mm);
        spin_lock(&parent->shadow_lock);
        new->initialized = true;
        if (rc) {
        /* Fail if the page tables are 2K */
        if (!mm_alloc_pgste(mm))
                return -EINVAL;
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
        mm->context.has_pgste = 1;
        /* split thp mappings and disable thp for future mappings */
        thp_split_mm(mm);
        walk_page_range(mm, 0, TASK_SIZE, &zap_zero_walk_ops, NULL);
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return 0;
 }
 EXPORT_SYMBOL_GPL(s390_enable_sie);
        struct mm_struct *mm = current->mm;
        int rc = 0;
 
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
        if (mm_uses_skeys(mm))
                goto out_up;
 
        walk_page_range(mm, 0, TASK_SIZE, &enable_skey_walk_ops, NULL);
 
 out_up:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return rc;
 }
 EXPORT_SYMBOL_GPL(s390_enable_skey);
 
 void s390_reset_cmma(struct mm_struct *mm)
 {
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
        walk_page_range(mm, 0, TASK_SIZE, &reset_cmma_walk_ops, NULL);
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
 }
 EXPORT_SYMBOL_GPL(s390_reset_cmma);
 
         */
        if (!mmget_not_zero(mm))
                return;
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        walk_page_range(mm, 0, TASK_SIZE, &reset_acc_walk_ops, NULL);
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        mmput(mm);
 }
 EXPORT_SYMBOL_GPL(s390_reset_acc);
 
        struct vm_area_struct *vma;
        long ret;
 
-       down_read(¤t->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        ret = -EINVAL;
        vma = find_vma(current->mm, user_addr);
        if (!vma)
                goto out;
        ret = follow_pfn(vma, user_addr, pfn);
 out:
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
        return ret;
 }
 
 
        if (addr + len < addr)
                return -EFAULT;
 
-       down_read(¤t->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        vma = find_vma (current->mm, addr);
        if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) {
-               up_read(¤t->mm->mmap_sem);
+               mmap_read_unlock(current->mm);
                return -EFAULT;
        }
 
        if (op & CACHEFLUSH_I)
                flush_icache_range(addr, addr+len);
 
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
        return 0;
 }
 
        unsigned long addr;
        int ret;
 
-       if (down_write_killable(&mm->mmap_sem))
+       if (mmap_write_lock_killable(mm))
                return -EINTR;
 
        addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
        current->mm->context.vdso = (void *)addr;
 
 up_fail:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return ret;
 }
 
 
         * Something tried to access memory that isn't in our memory map..
         * Fix it, but check if it's kernel or user first..
         */
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        __bad_area_nosemaphore(regs, error_code, address, si_code);
 }
        struct task_struct *tsk = current;
        struct mm_struct *mm = tsk->mm;
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        /* Kernel mode? Handle exceptions or die: */
        if (!user_mode(regs))
 
        /* Release mmap_sem first if necessary */
        if (!(fault & VM_FAULT_RETRY))
-               up_read(¤t->mm->mmap_sem);
+               mmap_read_unlock(current->mm);
 
        if (!(fault & VM_FAULT_ERROR))
                return 0;
        }
 
 retry:
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
 
        vma = find_vma(mm, address);
        if (unlikely(!vma)) {
                }
        }
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 }
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
 retry:
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
 
        if (!from_user && address >= PAGE_OFFSET)
                goto bad_area;
                }
        }
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return;
 
        /*
         * Fix it, but check if it's kernel or user first..
         */
 bad_area:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
 bad_area_nosemaphore:
        /* User mode accesses just cause a SIGSEGV */
  * us unable to handle the page fault gracefully.
  */
 out_of_memory:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        if (from_user) {
                pagefault_out_of_memory();
                return;
        goto no_context;
 
 do_sigbus:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
        if (!from_user)
                goto no_context;
 
        code = SEGV_MAPERR;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        vma = find_vma(mm, address);
        if (!vma)
                goto bad_area;
        case VM_FAULT_OOM:
                goto do_sigbus;
        }
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return;
 bad_area:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
        return;
 
 do_sigbus:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
 }
 
 
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
-       if (!down_read_trylock(&mm->mmap_sem)) {
+       if (!mmap_read_trylock(mm)) {
                if ((regs->tstate & TSTATE_PRIV) &&
                    !search_exception_tables(regs->tpc)) {
                        insn = get_fault_insn(regs, insn);
                }
 
 retry:
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
        }
 
        if (fault_code & FAULT_CODE_BAD_RA)
                        goto retry;
                }
        }
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        mm_rss = get_mm_rss(mm);
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE)
         */
 bad_area:
        insn = get_fault_insn(regs, insn);
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
 handle_kernel_fault:
        do_kernel_fault(regs, si_code, fault_code, insn, address);
  */
 out_of_memory:
        insn = get_fault_insn(regs, insn);
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        if (!(regs->tstate & TSTATE_PRIV)) {
                pagefault_out_of_memory();
                goto exit_exception;
 
 do_sigbus:
        insn = get_fault_insn(regs, insn);
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        /*
         * Send a sigbus, regardless of whether we were in kernel
 
        unsigned long text_start, addr = 0;
        int ret = 0;
 
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
 
        /*
         * First, get an unmapped region: then randomize it, and make sure that
        if (ret)
                current->mm->context.vdso = NULL;
 
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return ret;
 }
 
 
        __switch_mm(&new->context.id);
        down_write_nested(&new->mmap_sem, 1);
        uml_setup_stubs(new);
-       up_write(&new->mmap_sem);
+       mmap_write_unlock(new);
 }
 
 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 
 
                printk(KERN_ERR "fix_range_common: failed, killing current "
                       "process: %d\n", task_tgid_vnr(current));
                /* We are under mmap_sem, release it such that current can terminate */
-               up_write(¤t->mm->mmap_sem);
+               mmap_write_unlock(current->mm);
                force_sig(SIGKILL);
                do_signal(¤t->thread.regs);
        }
 
        if (is_user)
                flags |= FAULT_FLAG_USER;
 retry:
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        vma = find_vma(mm, address);
        if (!vma)
                goto out;
 #endif
        flush_tlb_page(vma, address);
 out:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 out_nosemaphore:
        return err;
 
         * We ran out of memory, call the OOM killer, and return the userspace
         * (which will retry the fault, or kill us if we got oom-killed).
         */
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        if (!is_user)
                goto out_nosemaphore;
        pagefault_out_of_memory();
 
         * validly references user space from well defined areas of the code,
         * we can bug out early if this is from code which shouldn't.
         */
-       if (!down_read_trylock(&mm->mmap_sem)) {
+       if (!mmap_read_trylock(mm)) {
                if (!user_mode(regs)
                    && !search_exception_tables(regs->UCreg_pc))
                        goto no_context;
 retry:
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
        } else {
                /*
                 * The above down_read_trylock() might have succeeded in
                }
        }
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        /*
         * Handle the "normal" case first - VM_FAULT_MAJOR
 
        struct mm_struct *mm = task->mm;
        struct vm_area_struct *vma;
 
-       if (down_write_killable(&mm->mmap_sem))
+       if (mmap_write_lock_killable(mm))
                return -EINTR;
 
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                        zap_page_range(vma, vma->vm_start, size);
        }
 
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return 0;
 }
 #else
        unsigned long text_start;
        int ret = 0;
 
-       if (down_write_killable(&mm->mmap_sem))
+       if (mmap_write_lock_killable(mm))
                return -EINTR;
 
        addr = get_unmapped_area(NULL, addr,
        }
 
 up_fail:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return ret;
 }
 
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
 
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
        /*
         * Check if we have already mapped vdso blob - fail to prevent
         * abusing from userspace install_speciall_mapping, which may
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                if (vma_is_special_mapping(vma, &vdso_mapping) ||
                                vma_is_special_mapping(vma, &vvar_mapping)) {
-                       up_write(&mm->mmap_sem);
+                       mmap_write_unlock(mm);
                        return -EEXIST;
                }
        }
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
 
        return map_vdso(image, addr);
 }
 
        pte_t *pte;
        int i;
 
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
        pgd = pgd_offset(mm, 0xA0000);
        if (pgd_none_or_clear_bad(pgd))
                goto out;
        }
        pte_unmap_unlock(pte, ptl);
 out:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, PAGE_SHIFT, false);
 }
 
 
         * Something tried to access memory that isn't in our memory map..
         * Fix it, but check if it's kernel or user first..
         */
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        __bad_area_nosemaphore(regs, error_code, address, pkey, si_code);
 }
         * 1. Failed to acquire mmap_sem, and
         * 2. The access did not originate in userspace.
         */
-       if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+       if (unlikely(!mmap_read_trylock(mm))) {
                if (!user_mode(regs) && !search_exception_tables(regs->ip)) {
                        /*
                         * Fault from code in kernel from
                        return;
                }
 retry:
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
        } else {
                /*
                 * The above down_read_trylock() might have succeeded in
                goto retry;
        }
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                mm_fault_error(regs, hw_error_code, address, fault);
                return;
 
        if (!vdso_enabled)
                return 0;
 
-       if (down_write_killable(&mm->mmap_sem))
+       if (mmap_write_lock_killable(mm))
                return -EINTR;
 
        err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE,
                VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
                vdsop);
 
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
 
        return err;
 }
 
        if (user_mode(regs))
                flags |= FAULT_FLAG_USER;
 retry:
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        vma = find_vma(mm, address);
 
        if (!vma)
                }
        }
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
        if (flags & VM_FAULT_MAJOR)
                perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
         * Fix it, but check if it's kernel or user first..
         */
 bad_area:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        if (user_mode(regs)) {
                current->thread.bad_vaddr = address;
                current->thread.error_code = is_write;
         * us unable to handle the page fault gracefully.
         */
 out_of_memory:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        if (!user_mode(regs))
                bad_page_fault(regs, address, SIGKILL);
        else
        return;
 
 do_sigbus:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        /* Send a sigbus, regardless of whether we were in kernel
         * or user mode.
 
                mm = alloc->vma_vm_mm;
 
        if (mm) {
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
                vma = alloc->vma;
        }
 
                trace_binder_alloc_page_end(alloc, index);
        }
        if (mm) {
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                mmput(mm);
        }
        return 0;
        }
 err_no_vma:
        if (mm) {
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                mmput(mm);
        }
        return vma ? -ENOMEM : -ESRCH;
        mm = alloc->vma_vm_mm;
        if (!mmget_not_zero(mm))
                goto err_mmget;
-       if (!down_read_trylock(&mm->mmap_sem))
+       if (!mmap_read_trylock(mm))
                goto err_down_read_mmap_sem_failed;
        vma = binder_alloc_get_vma(alloc);
 
 
                trace_binder_unmap_user_end(alloc, index);
        }
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        mmput(mm);
 
        trace_binder_unmap_kernel_start(alloc, index);
 
         * concurrently and the queues are actually stopped
         */
        if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
-               down_write(¤t->mm->mmap_sem);
+               mmap_write_lock(current->mm);
                is_invalid_userptr = atomic_read(&mem->invalid);
-               up_write(¤t->mm->mmap_sem);
+               mmap_write_unlock(current->mm);
        }
 
        mutex_lock(&mem->lock);
 
                goto out_free_ranges;
        }
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        vma = find_vma(mm, start);
        if (unlikely(!vma || start < vma->vm_start)) {
                r = -EFAULT;
                r = -EPERM;
                goto out_unlock;
        }
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
 
 retry:
        range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        r = hmm_range_fault(range);
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        if (unlikely(r)) {
                /*
                 * FIXME: This timeout should encompass the retry from
        return 0;
 
 out_unlock:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 out_free_pfns:
        kvfree(range->hmm_pfns);
 out_free_ranges:
 
 
        memset(&memory_exception_data, 0, sizeof(memory_exception_data));
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        vma = find_vma(mm, address);
 
        memory_exception_data.gpu_id = dev->id;
                        memory_exception_data.failure.NoExecute = 0;
        }
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        mmput(mm);
 
        pr_debug("notpresent %d, noexecute %d, readonly %d\n",
 
                struct mm_struct *mm = current->mm;
                struct vm_area_struct *vma;
 
-               if (down_write_killable(&mm->mmap_sem)) {
+               if (mmap_write_lock_killable(mm)) {
                        addr = -EINTR;
                        goto err;
                }
                                pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
                else
                        addr = -ENOMEM;
-               up_write(&mm->mmap_sem);
+               mmap_write_unlock(mm);
                if (IS_ERR_VALUE(addr))
                        goto err;
        }
 
        if (IS_ERR(mn))
                err = PTR_ERR(mn);
 
-       down_write(&mm->mm->mmap_sem);
+       mmap_write_lock(mm->mm);
        mutex_lock(&mm->i915->mm_lock);
        if (mm->mn == NULL && !err) {
                /* Protected by mmap_sem (write-lock) */
                err = 0;
        }
        mutex_unlock(&mm->i915->mm_lock);
-       up_write(&mm->mm->mmap_sem);
+       mmap_write_unlock(mm->mm);
 
        if (mn && !IS_ERR(mn))
                kfree(mn);
                if (mmget_not_zero(mm)) {
                        while (pinned < npages) {
                                if (!locked) {
-                                       down_read(&mm->mmap_sem);
+                                       mmap_read_lock(mm);
                                        locked = 1;
                                }
                                ret = pin_user_pages_remote
                                pinned += ret;
                        }
                        if (locked)
-                               up_read(&mm->mmap_sem);
+                               mmap_read_unlock(mm);
                        mmput(mm);
                }
        }
 
         */
 
        mm = get_task_mm(current);
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
 
        if (!cli->svm.svmm) {
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                return -EINVAL;
        }
 
         */
        args->result = 0;
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        mmput(mm);
 
        return 0;
        if (ret)
                goto out_free;
 
-       down_write(¤t->mm->mmap_sem);
+       mmap_write_lock(current->mm);
        svmm->notifier.ops = &nouveau_mn_ops;
        ret = __mmu_notifier_register(&svmm->notifier, current->mm);
        if (ret)
 
        cli->svm.svmm = svmm;
        cli->svm.cli = cli;
-       up_write(¤t->mm->mmap_sem);
+       mmap_write_unlock(current->mm);
        mutex_unlock(&cli->mutex);
        return 0;
 
 out_mm_unlock:
-       up_write(¤t->mm->mmap_sem);
+       mmap_write_unlock(current->mm);
 out_free:
        mutex_unlock(&cli->mutex);
        kfree(svmm);
                        return -EBUSY;
 
                range.notifier_seq = mmu_interval_read_begin(range.notifier);
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
                ret = hmm_range_fault(&range);
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                if (ret) {
                        /*
                         * FIXME: the input PFN_REQ flags are destroyed on
                /* Intersect fault window with the CPU VMA, cancelling
                 * the fault if the address is invalid.
                 */
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
                vma = find_vma_intersection(mm, start, limit);
                if (!vma) {
                        SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit);
-                       up_read(&mm->mmap_sem);
+                       mmap_read_unlock(mm);
                        mmput(mm);
                        nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
                        continue;
                }
                start = max_t(u64, start, vma->vm_start);
                limit = min_t(u64, limit, vma->vm_end);
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
 
                if (buffer->fault[fi]->addr != start) {
 
                p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
                                              &p->validated);
        if (need_mmap_lock)
-               down_read(¤t->mm->mmap_sem);
+               mmap_read_lock(current->mm);
 
        r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
 
        if (need_mmap_lock)
-               up_read(¤t->mm->mmap_sem);
+               mmap_read_unlock(current->mm);
 
        return r;
 }
 
        }
 
        if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
-               down_read(¤t->mm->mmap_sem);
+               mmap_read_lock(current->mm);
                r = radeon_bo_reserve(bo, true);
                if (r) {
-                       up_read(¤t->mm->mmap_sem);
+                       mmap_read_unlock(current->mm);
                        goto release_object;
                }
 
                radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
                r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
                radeon_bo_unreserve(bo);
-               up_read(¤t->mm->mmap_sem);
+               mmap_read_unlock(current->mm);
                if (r)
                        goto release_object;
        }
 
                        goto out_unlock;
 
                ttm_bo_get(bo);
-               up_read(&vmf->vma->vm_mm->mmap_sem);
+               mmap_read_unlock(vmf->vma->vm_mm);
                (void) dma_fence_wait(bo->moving, true);
                dma_resv_unlock(bo->base.resv);
                ttm_bo_put(bo);
                if (fault_flag_allow_retry_first(vmf->flags)) {
                        if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
                                ttm_bo_get(bo);
-                               up_read(&vmf->vma->vm_mm->mmap_sem);
+                               mmap_read_unlock(vmf->vma->vm_mm);
                                if (!dma_resv_lock_interruptible(bo->base.resv,
                                                                 NULL))
                                        dma_resv_unlock(bo->base.resv);
 
                                ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE,
                                PAGE_SIZE / sizeof(struct page *));
 
-               down_read(&owning_mm->mmap_sem);
+               mmap_read_lock(owning_mm);
                /*
                 * Note: this might result in redundent page getting. We can
                 * avoid this by checking dma_list to be 0 before calling
                npages = get_user_pages_remote(owning_process, owning_mm,
                                user_virt, gup_num_pages,
                                flags, local_page_list, NULL, NULL);
-               up_read(&owning_mm->mmap_sem);
+               mmap_read_unlock(owning_mm);
 
                if (npages < 0) {
                        if (npages != -EAGAIN)
 
                 * at a time to get the lock ordering right. Typically there
                 * will only be one mm, so no big deal.
                 */
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
                if (!mmget_still_valid(mm))
                        goto skip_mm;
                mutex_lock(&ufile->umap_lock);
                }
                mutex_unlock(&ufile->umap_lock);
        skip_mm:
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                mmput(mm);
        }
 }
 
                unsigned long untagged_start = untagged_addr(start);
                struct vm_area_struct *vma;
 
-               down_read(¤t->mm->mmap_sem);
+               mmap_read_lock(current->mm);
                /*
                 * FIXME: Ideally this would iterate over all the vmas that
                 * cover the memory, but for now it requires a single vma to
                        access_flags |= IB_ACCESS_LOCAL_WRITE;
                }
 
-               up_read(¤t->mm->mmap_sem);
+               mmap_read_unlock(current->mm);
        }
 
        return ib_umem_get(device, start, length, access_flags);
 
                goto bail;
        }
 
-       down_read(¤t->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        for (got = 0; got < num_pages; got += ret) {
                ret = pin_user_pages(start_page + got * PAGE_SIZE,
                                     num_pages - got,
                                     FOLL_LONGTERM | FOLL_WRITE | FOLL_FORCE,
                                     p + got, NULL);
                if (ret < 0) {
-                       up_read(¤t->mm->mmap_sem);
+                       mmap_read_unlock(current->mm);
                        goto bail_release;
                }
        }
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
 
        return 0;
 bail_release:
 
        npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
 
        uiomr->owning_mm = mm = current->mm;
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
 
        locked = atomic64_add_return(npages, ¤t->mm->pinned_vm);
        lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
        } else
                mmgrab(uiomr->owning_mm);
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        free_page((unsigned long) page_list);
        return ret;
 }
 
        if (!writable)
                foll_flags |= FOLL_FORCE;
 
-       down_read(&mm_s->mmap_sem);
+       mmap_read_lock(mm_s);
 
        mlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 
                num_pages -= got;
        }
 out_sem_up:
-       up_read(&mm_s->mmap_sem);
+       mmap_read_unlock(mm_s);
 
        if (rv > 0)
                return umem;
 
                flags |= FAULT_FLAG_WRITE;
        flags |= FAULT_FLAG_REMOTE;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        vma = find_extend_vma(mm, address);
        if (!vma || address < vma->vm_start)
                /* failed to get a vma in the right range */
 
        ret = handle_mm_fault(vma, address, flags);
 out:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        if (ret & VM_FAULT_ERROR)
                /* failed to service fault */
 
                if (!mmget_not_zero(svm->mm))
                        goto bad_req;
 
-               down_read(&svm->mm->mmap_sem);
+               mmap_read_lock(svm->mm);
                vma = find_extend_vma(svm->mm, address);
                if (!vma || address < vma->vm_start)
                        goto invalid;
 
                result = QI_RESP_SUCCESS;
        invalid:
-               up_read(&svm->mm->mmap_sem);
+               mmap_read_unlock(svm->mm);
                mmput(svm->mm);
        bad_req:
                /* Accounting for major/minor faults? */
 
        MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
 
        if (b->memory == V4L2_MEMORY_MMAP)
-               down_read(¤t->mm->mmap_sem);
+               mmap_read_lock(current->mm);
 
        videobuf_queue_lock(q);
        retval = -EBUSY;
        videobuf_queue_unlock(q);
 
        if (b->memory == V4L2_MEMORY_MMAP)
-               up_read(¤t->mm->mmap_sem);
+               mmap_read_unlock(current->mm);
 
        return retval;
 }
 
        mem->size = PAGE_ALIGN(vb->size + offset);
        ret = -EINVAL;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
 
        vma = find_vma(mm, untagged_baddr);
        if (!vma)
        }
 
 out_up:
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
 
        return ret;
 }
 
 {
        int ret;
 
-       down_read(¤t->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        ret = videobuf_dma_init_user_locked(dma, direction, data, size);
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
 
        return ret;
 }
 
        struct vm_area_struct *vma = NULL;
        int rc = 0;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
 
        vma = find_vma(mm, addr);
        if (!vma) {
        *vma_start = vma->vm_start;
        *vma_end = vma->vm_end;
 out:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return rc;
 }
 
 
                return;
        }
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                for (ea = vma->vm_start; ea < vma->vm_end;
                                ea = next_segment(ea, slb.vsid)) {
                        last_esid = slb.esid;
                }
        }
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        mmput(mm);
 }
 
        struct vm_area_struct *vma;
        struct gru_thread_state *gts = NULL;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        vma = gru_find_vma(vaddr);
        if (vma)
                gts = gru_find_thread_state(vma, TSID(vaddr, vma));
        if (gts)
                mutex_lock(>s->ts_ctxlock);
        else
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
        return gts;
 }
 
        struct vm_area_struct *vma;
        struct gru_thread_state *gts = ERR_PTR(-EINVAL);
 
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
        vma = gru_find_vma(vaddr);
        if (!vma)
                goto err;
        if (IS_ERR(gts))
                goto err;
        mutex_lock(>s->ts_ctxlock);
-       downgrade_write(&mm->mmap_sem);
+       mmap_write_downgrade(mm);
        return gts;
 
 err:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return gts;
 }
 
 static void gru_unlock_gts(struct gru_thread_state *gts)
 {
        mutex_unlock(>s->ts_ctxlock);
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
 }
 
 /*
                 */
                gts->ustats.fmm_tlbmiss++;
                if (!gts->ts_force_cch_reload &&
-                                       down_read_trylock(>s->ts_mm->mmap_sem)) {
+                                       mmap_read_trylock(gts->ts_mm)) {
                        gru_try_dropin(gru, gts, tfh, NULL);
-                       up_read(>s->ts_mm->mmap_sem);
+                       mmap_read_unlock(gts->ts_mm);
                } else {
                        tfh_user_polling_mode(tfh);
                        STAT(intr_mm_lock_failed);
 
        if (!(req.options & GRU_OPT_MISS_MASK))
                req.options |= GRU_OPT_MISS_FMM_INTR;
 
-       down_write(¤t->mm->mmap_sem);
+       mmap_write_lock(current->mm);
        vma = gru_find_vma(req.gseg);
        if (vma) {
                vdata = vma->vm_private_data;
                vdata->vd_tlb_preload_count = req.tlb_preload_count;
                ret = 0;
        }
-       up_write(¤t->mm->mmap_sem);
+       mmap_write_unlock(current->mm);
 
        return ret;
 }
 
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *mpnt;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
 
        mpnt = find_vma(mm, addr);
        if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                /* To avoid latency problems, we only process the current CPU,
                 * hoping that most samples for the task are on this CPU
                 */
                return 0;
        }
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return 0;
 }
 
        unsigned long cookie = NO_COOKIE;
        struct vm_area_struct *vma;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
 
                if (addr < vma->vm_start || addr >= vma->vm_end)
 
        if (!vma)
                cookie = INVALID_COOKIE;
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        return cookie;
 }
 
        }
 
        // Lock the user buffer pages in memory, and hold on to the page pointers (for the sglist)
-       down_read(¤t->mm->mmap_sem);      /*  get memory map semaphore */
+       mmap_read_lock(current->mm);      /*  get memory map semaphore */
        rv = get_user_pages(iov_base, acd->page_count, FOLL_TOUCH | FOLL_WRITE | FOLL_GET, acd->user_pages, NULL);
-       up_read(¤t->mm->mmap_sem);        /*  release the semaphore */
+       mmap_read_unlock(current->mm);        /*  release the semaphore */
        if (rv != acd->page_count) {
                dev_err(&priv->ldev->pldev->dev, "Couldn't get_user_pages (%ld)\n", rv);
                goto err_get_user_pages;
 
        if (virt_addr_valid(start))
                return 0;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        rc = __check_mem_type(find_vma(mm, start),
                              start + num_pages * PAGE_SIZE);
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        return rc;
 }
 
        if (!mm)
                return -ESRCH; /* process exited */
 
-       ret = down_write_killable(&mm->mmap_sem);
+       ret = mmap_write_lock_killable(mm);
        if (!ret) {
                ret = __account_locked_vm(mm, abs(npage), npage > 0, dma->task,
                                          dma->lock_cap);
-               up_write(&mm->mmap_sem);
+               mmap_write_unlock(mm);
        }
 
        if (async)
        if (prot & IOMMU_WRITE)
                flags |= FOLL_WRITE;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        ret = pin_user_pages_remote(NULL, mm, vaddr, 1, flags | FOLL_LONGTERM,
                                    page, NULL, NULL);
        if (ret == 1) {
                        ret = -EFAULT;
        }
 done:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return ret;
 }
 
 
        if (!npages)
                return -EINVAL;
 
-       down_read(&dev->mm->mmap_sem);
+       mmap_read_lock(dev->mm);
 
        locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
        lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
                vhost_vdpa_unmap(v, msg->iova, msg->size);
                atomic64_sub(npages, &dev->mm->pinned_vm);
        }
-       up_read(&dev->mm->mmap_sem);
+       mmap_read_unlock(dev->mm);
        free_page((unsigned long)page_list);
        return ret;
 }
 
                return -EFAULT;
        pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
 
-       down_read(¤t->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        vma = find_vma(current->mm, op.vaddr);
        if (!vma || vma->vm_ops != &gntdev_vmops)
                goto out_unlock;
        rv = 0;
 
  out_unlock:
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
 
        if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
                return -EFAULT;
 
        if (rc || list_empty(&pagelist))
                goto out;
 
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
 
        {
                struct page *page = list_first_entry(&pagelist,
 
 
 out_up:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
 
 out:
        free_page_list(&pagelist);
                }
        }
 
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
 
        vma = find_vma(mm, m.addr);
        if (!vma ||
        BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
                                    &pagelist, mmap_batch_fn, &state));
 
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
 
        if (state.global_error) {
                /* Write back errors in second pass. */
        return ret;
 
 out_unlock:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        goto out;
 }
 
        if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
                return -EPERM;
 
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
 
        vma = find_vma(mm, kdata.addr);
        if (!vma || vma->vm_ops != &privcmd_vm_ops) {
        }
 
 out:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        kfree(pfns);
 
        return rc;
 
        ctx->mmap_size = nr_pages * PAGE_SIZE;
        pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
 
-       if (down_write_killable(&mm->mmap_sem)) {
+       if (mmap_write_lock_killable(mm)) {
                ctx->mmap_size = 0;
                aio_free_ring(ctx);
                return -EINTR;
        ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
                                       PROT_READ | PROT_WRITE,
                                       MAP_SHARED, 0, &unused, NULL);
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        if (IS_ERR((void *)ctx->mmap_base)) {
                ctx->mmap_size = 0;
                aio_free_ring(ctx);
 
        core_state->dumper.task = tsk;
        core_state->dumper.next = NULL;
 
-       if (down_write_killable(&mm->mmap_sem))
+       if (mmap_write_lock_killable(mm))
                return -EINTR;
 
        if (!mm->core_state)
                core_waiters = zap_threads(tsk, mm, core_state, exit_code);
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
 
        if (core_waiters > 0) {
                struct core_thread *ptr;
 
                return -ENOMEM;
        vma_set_anonymous(vma);
 
-       if (down_write_killable(&mm->mmap_sem)) {
+       if (mmap_write_lock_killable(mm)) {
                err = -EINTR;
                goto err_free;
        }
                goto err;
 
        mm->stack_vm = mm->total_vm = 1;
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        bprm->p = vma->vm_end - sizeof(void *);
        return 0;
 err:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
 err_free:
        bprm->vma = NULL;
        vm_area_free(vma);
                bprm->loader -= stack_shift;
        bprm->exec -= stack_shift;
 
-       if (down_write_killable(&mm->mmap_sem))
+       if (mmap_write_lock_killable(mm))
                return -EINTR;
 
        vm_flags = VM_STACK_FLAGS;
                ret = -EFAULT;
 
 out_unlock:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return ret;
 }
 EXPORT_SYMBOL(setup_arg_pages);
                 * through with the exec.  We must hold mmap_sem around
                 * checking core_state and changing tsk->mm.
                 */
-               down_read(&old_mm->mmap_sem);
+               mmap_read_lock(old_mm);
                if (unlikely(old_mm->core_state)) {
-                       up_read(&old_mm->mmap_sem);
+                       mmap_read_unlock(old_mm);
                        mutex_unlock(&tsk->signal->exec_update_mutex);
                        return -EINTR;
                }
        vmacache_flush(tsk);
        task_unlock(tsk);
        if (old_mm) {
-               up_read(&old_mm->mmap_sem);
+               mmap_read_unlock(old_mm);
                BUG_ON(active_mm != old_mm);
                setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
                mm_update_next_owner(old_mm);
 
                }
 
                ret = 0;
-               down_read(¤t->mm->mmap_sem);
+               mmap_read_lock(current->mm);
                pret = pin_user_pages(ubuf, nr_pages,
                                      FOLL_WRITE | FOLL_LONGTERM,
                                      pages, vmas);
                } else {
                        ret = pret < 0 ? pret : -EFAULT;
                }
-               up_read(¤t->mm->mmap_sem);
+               mmap_read_unlock(current->mm);
                if (ret) {
                        /*
                         * if we did partial map, or found file backed vmas,
 
                goto out;
 
        if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) {
-               status = down_read_killable(&mm->mmap_sem);
+               status = mmap_read_lock_killable(mm);
                if (!status) {
                        exact_vma_exists = !!find_exact_vma(mm, vm_start,
                                                            vm_end);
-                       up_read(&mm->mmap_sem);
+                       mmap_read_unlock(mm);
                }
        }
 
        if (rc)
                goto out_mmput;
 
-       rc = down_read_killable(&mm->mmap_sem);
+       rc = mmap_read_lock_killable(mm);
        if (rc)
                goto out_mmput;
 
                path_get(path);
                rc = 0;
        }
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
 out_mmput:
        mmput(mm);
                goto out_put_task;
 
        result = ERR_PTR(-EINTR);
-       if (down_read_killable(&mm->mmap_sem))
+       if (mmap_read_lock_killable(mm))
                goto out_put_mm;
 
        result = ERR_PTR(-ENOENT);
                                (void *)(unsigned long)vma->vm_file->f_mode);
 
 out_no_vma:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 out_put_mm:
        mmput(mm);
 out_put_task:
 
                return NULL;
        }
 
-       if (down_read_killable(&mm->mmap_sem)) {
+       if (mmap_read_lock_killable(mm)) {
                mmput(mm);
                put_task_struct(priv->task);
                priv->task = NULL;
                return;
 
        release_task_mempolicy(priv);
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        mmput(mm);
        put_task_struct(priv->task);
        priv->task = NULL;
 
        memset(&mss, 0, sizeof(mss));
 
-       ret = down_read_killable(&mm->mmap_sem);
+       ret = mmap_read_lock_killable(mm);
        if (ret)
                goto out_put_mm;
 
        __show_smap(m, &mss, true);
 
        release_task_mempolicy(priv);
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
 out_put_mm:
        mmput(mm);
                };
 
                if (type == CLEAR_REFS_MM_HIWATER_RSS) {
-                       if (down_write_killable(&mm->mmap_sem)) {
+                       if (mmap_write_lock_killable(mm)) {
                                count = -EINTR;
                                goto out_mm;
                        }
                         * resident set size to this mm's current rss value.
                         */
                        reset_mm_hiwater_rss(mm);
-                       up_write(&mm->mmap_sem);
+                       mmap_write_unlock(mm);
                        goto out_mm;
                }
 
-               if (down_read_killable(&mm->mmap_sem)) {
+               if (mmap_read_lock_killable(mm)) {
                        count = -EINTR;
                        goto out_mm;
                }
                        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                                if (!(vma->vm_flags & VM_SOFTDIRTY))
                                        continue;
-                               up_read(&mm->mmap_sem);
-                               if (down_write_killable(&mm->mmap_sem)) {
+                               mmap_read_unlock(mm);
+                               if (mmap_write_lock_killable(mm)) {
                                        count = -EINTR;
                                        goto out_mm;
                                }
                                         * failed like if
                                         * get_proc_task() fails?
                                         */
-                                       up_write(&mm->mmap_sem);
+                                       mmap_write_unlock(mm);
                                        goto out_mm;
                                }
                                for (vma = mm->mmap; vma; vma = vma->vm_next) {
                                        vma->vm_flags &= ~VM_SOFTDIRTY;
                                        vma_set_page_prot(vma);
                                }
-                               downgrade_write(&mm->mmap_sem);
+                               mmap_write_downgrade(mm);
                                break;
                        }
 
                if (type == CLEAR_REFS_SOFT_DIRTY)
                        mmu_notifier_invalidate_range_end(&range);
                tlb_finish_mmu(&tlb, 0, -1);
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
 out_mm:
                mmput(mm);
        }
                /* overflow ? */
                if (end < start_vaddr || end > end_vaddr)
                        end = end_vaddr;
-               ret = down_read_killable(&mm->mmap_sem);
+               ret = mmap_read_lock_killable(mm);
                if (ret)
                        goto out_free;
                ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm);
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                start_vaddr = end;
 
                len = min(count, PM_ENTRY_BYTES * pm.pos);
 
        struct rb_node *p;
        unsigned long bytes = 0, sbytes = 0, slack = 0, size;
         
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
                vma = rb_entry(p, struct vm_area_struct, vm_rb);
 
                "Shared:\t%8lu bytes\n",
                bytes, slack, sbytes);
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 }
 
 unsigned long task_vsize(struct mm_struct *mm)
        struct rb_node *p;
        unsigned long vsize = 0;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
                vma = rb_entry(p, struct vm_area_struct, vm_rb);
                vsize += vma->vm_end - vma->vm_start;
        }
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return vsize;
 }
 
        struct rb_node *p;
        unsigned long size = kobjsize(mm);
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
                vma = rb_entry(p, struct vm_area_struct, vm_rb);
                size += kobjsize(vma);
                >> PAGE_SHIFT;
        *data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
                >> PAGE_SHIFT;
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        size >>= PAGE_SHIFT;
        size += *text + *data;
        *resident = size;
        if (!mm || !mmget_not_zero(mm))
                return NULL;
 
-       if (down_read_killable(&mm->mmap_sem)) {
+       if (mmap_read_lock_killable(mm)) {
                mmput(mm);
                return ERR_PTR(-EINTR);
        }
                if (n-- == 0)
                        return p;
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        mmput(mm);
        return NULL;
 }
        struct proc_maps_private *priv = m->private;
 
        if (!IS_ERR_OR_NULL(_vml)) {
-               up_read(&priv->mm->mmap_sem);
+               mmap_read_unlock(priv->mm);
                mmput(priv->mm);
        }
        if (priv->task) {
 
                must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma,
                                                       vmf->address,
                                                       vmf->flags, reason);
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        if (likely(must_wait && !READ_ONCE(ctx->released) &&
                   !userfaultfd_signal_pending(vmf->flags))) {
                struct mm_struct *mm = release_new_ctx->mm;
 
                /* the various vma->vm_userfaultfd_ctx still points to it */
-               down_write(&mm->mmap_sem);
+               mmap_write_lock(mm);
                /* no task can run (and in turn coredump) yet */
                VM_WARN_ON(!mmget_still_valid(mm));
                for (vma = mm->mmap; vma; vma = vma->vm_next)
                                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
                                vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
                        }
-               up_write(&mm->mmap_sem);
+               mmap_write_unlock(mm);
 
                userfaultfd_ctx_put(release_new_ctx);
        }
 
        userfaultfd_ctx_get(ctx);
        WRITE_ONCE(ctx->mmap_changing, true);
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        msg_init(&ewq.msg);
 
         * it's critical that released is set to true (above), before
         * taking the mmap_sem for writing.
         */
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
        still_valid = mmget_still_valid(mm);
        prev = NULL;
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                vma->vm_flags = new_flags;
                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
        }
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        mmput(mm);
 wakeup:
        /*
        if (!mmget_not_zero(mm))
                goto out;
 
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
        if (!mmget_still_valid(mm))
                goto out_unlock;
        vma = find_vma_prev(mm, start, &prev);
                vma = vma->vm_next;
        } while (vma && vma->vm_start < end);
 out_unlock:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        mmput(mm);
        if (!ret) {
                __u64 ioctls_out;
        if (!mmget_not_zero(mm))
                goto out;
 
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
        if (!mmget_still_valid(mm))
                goto out_unlock;
        vma = find_vma_prev(mm, start, &prev);
                vma = vma->vm_next;
        } while (vma && vma->vm_start < end);
 out_unlock:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        mmput(mm);
 out:
        return ret;
 
        if (err)
                goto out_fput;
 
-       if (down_write_killable(¤t->mm->mmap_sem)) {
+       if (mmap_write_lock_killable(current->mm)) {
                err = -EINTR;
                goto out_fput;
        }
        if (IS_ERR_VALUE(addr))
                err = (long)addr;
 invalid:
-       up_write(¤t->mm->mmap_sem);
+       mmap_write_unlock(current->mm);
        if (populate)
                mm_populate(addr, populate);
 
        if (addr & ~PAGE_MASK)
                return retval;
 
-       if (down_write_killable(&mm->mmap_sem))
+       if (mmap_write_lock_killable(mm))
                return -EINTR;
 
        /*
 
 #endif
 
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return retval;
 }
 
 
        if (group_dead && current->mm) {
                struct vm_area_struct *vma;
 
-               down_read(¤t->mm->mmap_sem);
+               mmap_read_lock(current->mm);
                vma = current->mm->mmap;
                while (vma) {
                        vsize += vma->vm_end - vma->vm_start;
                        vma = vma->vm_next;
                }
-               up_read(¤t->mm->mmap_sem);
+               mmap_read_unlock(current->mm);
        }
 
        spin_lock_irq(¤t->sighand->siglock);
 
         * with build_id.
         */
        if (!user || !current || !current->mm || irq_work_busy ||
-           down_read_trylock(¤t->mm->mmap_sem) == 0) {
+           mmap_read_trylock(current->mm) == 0) {
                /* cannot access current->mm, fall back to ips */
                for (i = 0; i < trace_nr; i++) {
                        id_offs[i].status = BPF_STACK_BUILD_ID_IP;
        }
 
        if (!work) {
-               up_read(¤t->mm->mmap_sem);
+               mmap_read_unlock(current->mm);
        } else {
                work->sem = ¤t->mm->mmap_sem;
                irq_work_queue(&work->irq_work);
 
                if (!mm)
                        goto restart;
 
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
        }
 
        raw_spin_lock_irqsave(&ifh->lock, flags);
        raw_spin_unlock_irqrestore(&ifh->lock, flags);
 
        if (ifh->nr_file_filters) {
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
 
                mmput(mm);
        }
 
                if (err && is_register)
                        goto free;
 
-               down_write(&mm->mmap_sem);
+               mmap_write_lock(mm);
                vma = find_vma(mm, info->vaddr);
                if (!vma || !valid_vma(vma, is_register) ||
                    file_inode(vma->vm_file) != uprobe->inode)
                }
 
  unlock:
-               up_write(&mm->mmap_sem);
+               mmap_write_unlock(mm);
  free:
                mmput(mm);
                info = free_map_info(info);
        struct vm_area_struct *vma;
        int err = 0;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                unsigned long vaddr;
                loff_t offset;
                vaddr = offset_to_vaddr(vma, uprobe->offset);
                err |= remove_breakpoint(uprobe, mm, vaddr);
        }
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        return err;
 }
        struct vm_area_struct *vma;
        int ret;
 
-       if (down_write_killable(&mm->mmap_sem))
+       if (mmap_write_lock_killable(mm))
                return -EINTR;
 
        if (mm->uprobes_state.xol_area) {
        /* pairs with get_xol_area() */
        smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */
  fail:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
 
        return ret;
 }
        struct uprobe *uprobe = NULL;
        struct vm_area_struct *vma;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        vma = find_vma(mm, bp_vaddr);
        if (vma && vma->vm_start <= bp_vaddr) {
                if (valid_vma(vma, false)) {
 
        if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
                mmf_recalc_uprobes(mm);
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        return uprobe;
 }
 
         * will increment ->nr_threads for each thread in the
         * group with ->mm != NULL.
         */
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        core_state = mm->core_state;
        if (core_state) {
                struct core_thread self;
 
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
 
                self.task = current;
                self.next = xchg(&core_state->dumper.next, &self);
                        freezable_schedule();
                }
                __set_current_state(TASK_RUNNING);
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
        }
        mmgrab(mm);
        BUG_ON(mm != current->active_mm);
        /* more a memory barrier than a real lock */
        task_lock(current);
        current->mm = NULL;
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        enter_lazy_tlb(mm, current);
        task_unlock(current);
        mm_update_next_owner(mm);
 
        LIST_HEAD(uf);
 
        uprobe_start_dup_mmap();
-       if (down_write_killable(&oldmm->mmap_sem)) {
+       if (mmap_write_lock_killable(oldmm)) {
                retval = -EINTR;
                goto fail_uprobe_end;
        }
        /* a new mm has just been created */
        retval = arch_dup_mmap(oldmm, mm);
 out:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        flush_tlb_mm(oldmm);
-       up_write(&oldmm->mmap_sem);
+       mmap_write_unlock(oldmm);
        dup_userfaultfd_complete(&uf);
 fail_uprobe_end:
        uprobe_end_dup_mmap();
 #else
 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
 {
-       down_write(&oldmm->mmap_sem);
+       mmap_write_lock(oldmm);
        RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
-       up_write(&oldmm->mmap_sem);
+       mmap_write_unlock(oldmm);
        return 0;
 }
 #define mm_alloc_pgd(mm)       (0)
        mm->vmacache_seqnum = 0;
        atomic_set(&mm->mm_users, 1);
        atomic_set(&mm->mm_count, 1);
-       init_rwsem(&mm->mmap_sem);
+       mmap_init_lock(mm);
        INIT_LIST_HEAD(&mm->mmlist);
        mm->core_state = NULL;
        mm_pgtables_bytes_init(mm);
 
        struct mm_struct *mm = current->mm;
        int ret;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
                               FAULT_FLAG_WRITE, NULL);
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        return ret < 0 ? ret : 0;
 }
 
                return;
 
 
-       if (!down_read_trylock(&mm->mmap_sem))
+       if (!mmap_read_trylock(mm))
                return;
        vma = find_vma(mm, start);
        if (!vma) {
                mm->numa_scan_offset = start;
        else
                reset_ptenuma_scan(p);
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        /*
         * Make sure tasks use at least 32x as much time to run other code
 
        if (exe_file) {
                struct vm_area_struct *vma;
 
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
                for (vma = mm->mmap; vma; vma = vma->vm_next) {
                        if (!vma->vm_file)
                                continue;
                                goto exit_err;
                }
 
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                fput(exe_file);
        }
 
        fdput(exe);
        return err;
 exit_err:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        fput(exe_file);
        goto exit;
 }
         * arg_lock protects concurent updates but we still need mmap_sem for
         * read to exclude races with sys_brk.
         */
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
 
        /*
         * We don't validate if these members are pointing to
        if (prctl_map.auxv_size)
                memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return 0;
 }
 #endif /* CONFIG_CHECKPOINT_RESTORE */
         * mmap_sem for a) concurrent sys_brk, b) finding VMA for addr
         * validation.
         */
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        vma = find_vma(mm, addr);
 
        spin_lock(&mm->arg_lock);
        error = 0;
 out:
        spin_unlock(&mm->arg_lock);
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return error;
 }
 
        case PR_SET_THP_DISABLE:
                if (arg3 || arg4 || arg5)
                        return -EINVAL;
-               if (down_write_killable(&me->mm->mmap_sem))
+               if (mmap_write_lock_killable(me->mm))
                        return -EINTR;
                if (arg2)
                        set_bit(MMF_DISABLE_THP, &me->mm->flags);
                else
                        clear_bit(MMF_DISABLE_THP, &me->mm->flags);
-               up_write(&me->mm->mmap_sem);
+               mmap_write_unlock(me->mm);
                break;
        case PR_MPX_ENABLE_MANAGEMENT:
        case PR_MPX_DISABLE_MANAGEMENT:
 
        if (mm) {
                const struct vm_area_struct *vma;
 
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
                vma = find_vma(mm, ip);
                if (vma) {
                        file = vma->vm_file;
                                trace_seq_printf(s, "[+0x%lx]",
                                                 ip - vmstart);
                }
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
        }
        if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
                trace_seq_printf(s, " <" IP_FMT ">", ip);
 
                if (verbose)
                        pr_notice("lock mmap_sem pid=%d\n", main_task->pid);
                if (lock_read)
-                       down_read(&main_task->mm->mmap_sem);
+                       mmap_read_lock(main_task->mm);
                else
-                       down_write(&main_task->mm->mmap_sem);
+                       mmap_write_lock(main_task->mm);
        }
 
        if (test_disable_irq)
 
        if (lock_mmap_sem && master) {
                if (lock_read)
-                       up_read(&main_task->mm->mmap_sem);
+                       mmap_read_unlock(main_task->mm);
                else
-                       up_write(&main_task->mm->mmap_sem);
+                       mmap_write_unlock(main_task->mm);
                if (verbose)
                        pr_notice("unlock mmap_sem pid=%d\n", main_task->pid);
        }
 
                if (flags & FAULT_FLAG_RETRY_NOWAIT)
                        return 0;
 
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                if (flags & FAULT_FLAG_KILLABLE)
                        wait_on_page_locked_killable(page);
                else
 
                        ret = __lock_page_killable(page);
                        if (ret) {
-                               up_read(&mm->mmap_sem);
+                               mmap_read_unlock(mm);
                                return 0;
                        }
                } else
                         * mmap_sem here and return 0 if we don't have a fpin.
                         */
                        if (*fpin == NULL)
-                               up_read(&vmf->vma->vm_mm->mmap_sem);
+                               mmap_read_unlock(vmf->vma->vm_mm);
                        return 0;
                }
        } else
 
 
        start = untagged_addr(start);
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        locked = 1;
        vma = find_vma_intersection(mm, start, start + 1);
        if (!vma) {
        } while (vma && vma->vm_flags & (VM_IO | VM_PFNMAP));
 out:
        if (locked)
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
        if (!ret)
                ret = -EFAULT;
        if (ret > 0)
 
        }
 
        if (ret & VM_FAULT_RETRY) {
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
                *unlocked = true;
                fault_flags |= FAULT_FLAG_TRIED;
                goto retry;
                        break;
                }
 
-               ret = down_read_killable(&mm->mmap_sem);
+               ret = mmap_read_lock_killable(mm);
                if (ret) {
                        BUG_ON(ret > 0);
                        if (!pages_done)
                 * We must let the caller know we temporarily dropped the lock
                 * and so the critical section protected by it was lost.
                 */
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                *locked = 0;
        }
        return pages_done;
                 */
                if (!locked) {
                        locked = 1;
-                       down_read(&mm->mmap_sem);
+                       mmap_read_lock(mm);
                        vma = find_vma(mm, nstart);
                } else if (nstart >= vma->vm_end)
                        vma = vma->vm_next;
                ret = 0;
        }
        if (locked)
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
        return ret;     /* 0 or negative error code */
 }
 
        if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
                return -EINVAL;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
                                      &locked, gup_flags | FOLL_TOUCH);
        if (locked)
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
        return ret;
 }
 EXPORT_SYMBOL(get_user_pages_unlocked);
         * get_user_pages_unlocked() (see comments in that function)
         */
        if (gup_flags & FOLL_LONGTERM) {
-               down_read(¤t->mm->mmap_sem);
+               mmap_read_lock(current->mm);
                ret = __gup_longterm_locked(current, current->mm,
                                            start, nr_pages,
                                            pages, NULL, gup_flags);
-               up_read(¤t->mm->mmap_sem);
+               mmap_read_unlock(current->mm);
        } else {
                ret = get_user_pages_unlocked(start, nr_pages,
                                              pages, gup_flags);
 
        if (fault_flag_allow_retry_first(flags) &&
            !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
                fpin = get_file(vmf->vma->vm_file);
-               up_read(&vmf->vma->vm_mm->mmap_sem);
+               mmap_read_unlock(vmf->vma->vm_mm);
        }
        return fpin;
 }
 
                 * khugepaged has finished working on the pagetables
                 * under the mmap_sem.
                 */
-               down_write(&mm->mmap_sem);
-               up_write(&mm->mmap_sem);
+               mmap_write_lock(mm);
+               mmap_write_unlock(mm);
        }
 }
 
 
                /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
                if (ret & VM_FAULT_RETRY) {
-                       down_read(&mm->mmap_sem);
+                       mmap_read_lock(mm);
                        if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
                                /* vma is no longer available, don't continue to swapin */
                                trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
         * sync compaction, and we do not need to hold the mmap_sem during
         * that. We will recheck the vma after taking it again in write mode.
         */
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        new_page = khugepaged_alloc_page(hpage, gfp, node);
        if (!new_page) {
                result = SCAN_ALLOC_HUGE_PAGE_FAIL;
        }
        count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        result = hugepage_vma_revalidate(mm, address, &vma);
        if (result) {
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                goto out_nolock;
        }
 
        pmd = mm_find_pmd(mm, address);
        if (!pmd) {
                result = SCAN_PMD_NULL;
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                goto out_nolock;
        }
 
         */
        if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
                                                     pmd, referenced)) {
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                goto out_nolock;
        }
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        /*
         * Prevent all access to pagetables with the exception of
         * gup_fast later handled by the ptep_clear_flush and the VM
         * handled by the anon_vma lock + PG_lock.
         */
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
        result = SCAN_ANY_PROCESS;
        if (!mmget_still_valid(mm))
                goto out;
        khugepaged_pages_collapsed++;
        result = SCAN_SUCCEED;
 out_up_write:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
 out_nolock:
        if (!IS_ERR_OR_NULL(*hpage))
                mem_cgroup_uncharge(*hpage);
        if (likely(mm_slot->nr_pte_mapped_thp == 0))
                return 0;
 
-       if (!down_write_trylock(&mm->mmap_sem))
+       if (!mmap_write_trylock(mm))
                return -EBUSY;
 
        if (unlikely(khugepaged_test_exit(mm)))
 
 out:
        mm_slot->nr_pte_mapped_thp = 0;
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return 0;
 }
 
                 * mmap_sem while holding page lock. Fault path does it in
                 * reverse order. Trylock is a way to avoid deadlock.
                 */
-               if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
+               if (mmap_write_trylock(vma->vm_mm)) {
                        spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
                        /* assume page table is clear */
                        _pmd = pmdp_collapse_flush(vma, addr, pmd);
                        spin_unlock(ptl);
-                       up_write(&vma->vm_mm->mmap_sem);
+                       mmap_write_unlock(vma->vm_mm);
                        mm_dec_nr_ptes(vma->vm_mm);
                        pte_free(vma->vm_mm, pmd_pgtable(_pmd));
                } else {
         * the next mm on the list.
         */
        vma = NULL;
-       if (unlikely(!down_read_trylock(&mm->mmap_sem)))
+       if (unlikely(!mmap_read_trylock(mm)))
                goto breakouterloop_mmap_sem;
        if (likely(!khugepaged_test_exit(mm)))
                vma = find_vma(mm, khugepaged_scan.address);
                                pgoff_t pgoff = linear_page_index(vma,
                                                khugepaged_scan.address);
 
-                               up_read(&mm->mmap_sem);
+                               mmap_read_unlock(mm);
                                ret = 1;
                                khugepaged_scan_file(mm, file, pgoff, hpage);
                                fput(file);
                }
        }
 breakouterloop:
-       up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
+       mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
 breakouterloop_mmap_sem:
 
        spin_lock(&khugepaged_mm_lock);
 
         */
        put_anon_vma(rmap_item->anon_vma);
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        vma = find_mergeable_vma(mm, addr);
        if (vma)
                break_ksm(vma, addr);
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 }
 
 static struct page *get_mergeable_page(struct rmap_item *rmap_item)
        struct vm_area_struct *vma;
        struct page *page;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        vma = find_mergeable_vma(mm, addr);
        if (!vma)
                goto out;
 out:
                page = NULL;
        }
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return page;
 }
 
        for (mm_slot = ksm_scan.mm_slot;
                        mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) {
                mm = mm_slot->mm;
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
                for (vma = mm->mmap; vma; vma = vma->vm_next) {
                        if (ksm_test_exit(mm))
                                break;
                }
 
                remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list);
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
 
                spin_lock(&ksm_mmlist_lock);
                ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
        return 0;
 
 error:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        spin_lock(&ksm_mmlist_lock);
        ksm_scan.mm_slot = &ksm_mm_head;
        spin_unlock(&ksm_mmlist_lock);
        struct vm_area_struct *vma;
        int err = -EFAULT;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        vma = find_mergeable_vma(mm, rmap_item->address);
        if (!vma)
                goto out;
        rmap_item->anon_vma = vma->anon_vma;
        get_anon_vma(vma->anon_vma);
 out:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return err;
 }
 
        if (ksm_use_zero_pages && (checksum == zero_checksum)) {
                struct vm_area_struct *vma;
 
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
                vma = find_mergeable_vma(mm, rmap_item->address);
                if (vma) {
                        err = try_to_merge_one_page(vma, page,
                         */
                        err = 0;
                }
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                /*
                 * In case of failure, the page was not really empty, so we
                 * need to continue. Otherwise we're done.
        }
 
        mm = slot->mm;
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        if (ksm_test_exit(mm))
                vma = NULL;
        else
                                        ksm_scan.address += PAGE_SIZE;
                                } else
                                        put_page(*page);
-                               up_read(&mm->mmap_sem);
+                               mmap_read_unlock(mm);
                                return rmap_item;
                        }
                        put_page(*page);
 
                free_mm_slot(slot);
                clear_bit(MMF_VM_MERGEABLE, &mm->flags);
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                mmdrop(mm);
        } else {
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                /*
                 * up_read(&mm->mmap_sem) first because after
                 * spin_unlock(&ksm_mmlist_lock) run, the "mm" may
                clear_bit(MMF_VM_MERGEABLE, &mm->flags);
                mmdrop(mm);
        } else if (mm_slot) {
-               down_write(&mm->mmap_sem);
-               up_write(&mm->mmap_sem);
+               mmap_write_lock(mm);
+               mmap_write_unlock(mm);
        }
 }
 
 
         */
        *prev = NULL;   /* tell sys_madvise we drop mmap_sem */
        get_file(file);
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
        offset = (loff_t)(start - vma->vm_start)
                        + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
        vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
        fput(file);
-       down_read(¤t->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        return 0;
 }
 
        if (!userfaultfd_remove(vma, start, end)) {
                *prev = NULL; /* mmap_sem has been dropped, prev is stale */
 
-               down_read(¤t->mm->mmap_sem);
+               mmap_read_lock(current->mm);
                vma = find_vma(current->mm, start);
                if (!vma)
                        return -ENOMEM;
        get_file(f);
        if (userfaultfd_remove(vma, start, end)) {
                /* mmap_sem was not released by userfaultfd_remove() */
-               up_read(¤t->mm->mmap_sem);
+               mmap_read_unlock(current->mm);
        }
        error = vfs_fallocate(f,
                                FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
                                offset, end - start);
        fput(f);
-       down_read(¤t->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        return error;
 }
 
 
        write = madvise_need_mmap_write(behavior);
        if (write) {
-               if (down_write_killable(¤t->mm->mmap_sem))
+               if (mmap_write_lock_killable(current->mm))
                        return -EINTR;
 
                /*
                 * model.
                 */
                if (!mmget_still_valid(current->mm)) {
-                       up_write(¤t->mm->mmap_sem);
+                       mmap_write_unlock(current->mm);
                        return -EINTR;
                }
        } else {
-               down_read(¤t->mm->mmap_sem);
+               mmap_read_lock(current->mm);
        }
 
        /*
 out:
        blk_finish_plug(&plug);
        if (write)
-               up_write(¤t->mm->mmap_sem);
+               mmap_write_unlock(current->mm);
        else
-               up_read(¤t->mm->mmap_sem);
+               mmap_read_unlock(current->mm);
 
        return error;
 }
 
 {
        unsigned long precharge;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        precharge = mc.precharge;
        mc.precharge = 0;
        atomic_inc(&mc.from->moving_account);
        synchronize_rcu();
 retry:
-       if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
+       if (unlikely(!mmap_read_trylock(mc.mm))) {
                /*
                 * Someone who are holding the mmap_sem might be waiting in
                 * waitq. So we cancel all extra charges, wake up all waiters,
        walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
                        NULL);
 
-       up_read(&mc.mm->mmap_sem);
+       mmap_read_unlock(mc.mm);
        atomic_dec(&mc.from->moving_account);
 }
 
 
        if (addr < vma->vm_start || end_addr >= vma->vm_end)
                return -EFAULT;
        if (!(vma->vm_flags & VM_MIXEDMAP)) {
-               BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
+               BUG_ON(mmap_read_trylock(vma->vm_mm));
                BUG_ON(vma->vm_flags & VM_PFNMAP);
                vma->vm_flags |= VM_MIXEDMAP;
        }
        if (!page_count(page))
                return -EINVAL;
        if (!(vma->vm_flags & VM_MIXEDMAP)) {
-               BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
+               BUG_ON(mmap_read_trylock(vma->vm_mm));
                BUG_ON(vma->vm_flags & VM_PFNMAP);
                vma->vm_flags |= VM_MIXEDMAP;
        }
        void *old_buf = buf;
        int write = gup_flags & FOLL_WRITE;
 
-       if (down_read_killable(&mm->mmap_sem))
+       if (mmap_read_lock_killable(mm))
                return 0;
 
        /* ignore errors, just check how much was successfully transferred */
                buf += bytes;
                addr += bytes;
        }
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        return buf - old_buf;
 }
        /*
         * we might be running from an atomic context so we cannot sleep
         */
-       if (!down_read_trylock(&mm->mmap_sem))
+       if (!mmap_read_trylock(mm))
                return;
 
        vma = find_vma(mm, ip);
                        free_page((unsigned long)buf);
                }
        }
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 }
 
 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
 
 {
        struct vm_area_struct *vma;
 
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
        for (vma = mm->mmap; vma; vma = vma->vm_next)
                mpol_rebind_policy(vma->vm_policy, new);
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
 }
 
 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
                put_page(p);
        }
        if (locked)
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
        return err;
 }
 
                 * vma/shared policy at addr is NULL.  We
                 * want to return MPOL_DEFAULT in this case.
                 */
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
                vma = find_vma_intersection(mm, addr, addr+1);
                if (!vma) {
-                       up_read(&mm->mmap_sem);
+                       mmap_read_unlock(mm);
                        return -EFAULT;
                }
                if (vma->vm_ops && vma->vm_ops->get_policy)
  out:
        mpol_cond_put(pol);
        if (vma)
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
        if (pol_refcount)
                mpol_put(pol_refcount);
        return err;
        if (err)
                return err;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
 
        /*
         * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
                if (err < 0)
                        break;
        }
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        if (err < 0)
                return err;
        return busy;
        {
                NODEMASK_SCRATCH(scratch);
                if (scratch) {
-                       down_write(&mm->mmap_sem);
+                       mmap_write_lock(mm);
                        task_lock(current);
                        err = mpol_set_nodemask(new, nmask, scratch);
                        task_unlock(current);
                        if (err)
-                               up_write(&mm->mmap_sem);
+                               mmap_write_unlock(mm);
                } else
                        err = -ENOMEM;
                NODEMASK_SCRATCH_FREE(scratch);
                        putback_movable_pages(&pagelist);
        }
 
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
 mpol_out:
        mpol_put(new);
        return err;
 
        unsigned int follflags;
        int err;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        err = -EFAULT;
        vma = find_vma(mm, addr);
        if (!vma || addr < vma->vm_start || !vma_migratable(vma))
         */
        put_page(page);
 out:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return err;
 }
 
 {
        unsigned long i;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
 
        for (i = 0; i < nr_pages; i++) {
                unsigned long addr = (unsigned long)(*pages);
                status++;
        }
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 }
 
 /*
 
                 * Do at most PAGE_SIZE entries per iteration, due to
                 * the temporary buffer size.
                 */
-               down_read(¤t->mm->mmap_sem);
+               mmap_read_lock(current->mm);
                retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
-               up_read(¤t->mm->mmap_sem);
+               mmap_read_unlock(current->mm);
 
                if (retval <= 0)
                        break;
 
        lock_limit >>= PAGE_SHIFT;
        locked = len >> PAGE_SHIFT;
 
-       if (down_write_killable(¤t->mm->mmap_sem))
+       if (mmap_write_lock_killable(current->mm))
                return -EINTR;
 
        locked += current->mm->locked_vm;
        if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
                error = apply_vma_lock_flags(start, len, flags);
 
-       up_write(¤t->mm->mmap_sem);
+       mmap_write_unlock(current->mm);
        if (error)
                return error;
 
        len = PAGE_ALIGN(len + (offset_in_page(start)));
        start &= PAGE_MASK;
 
-       if (down_write_killable(¤t->mm->mmap_sem))
+       if (mmap_write_lock_killable(current->mm))
                return -EINTR;
        ret = apply_vma_lock_flags(start, len, 0);
-       up_write(¤t->mm->mmap_sem);
+       mmap_write_unlock(current->mm);
 
        return ret;
 }
        lock_limit = rlimit(RLIMIT_MEMLOCK);
        lock_limit >>= PAGE_SHIFT;
 
-       if (down_write_killable(¤t->mm->mmap_sem))
+       if (mmap_write_lock_killable(current->mm))
                return -EINTR;
 
        ret = -ENOMEM;
        if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
            capable(CAP_IPC_LOCK))
                ret = apply_mlockall_flags(flags);
-       up_write(¤t->mm->mmap_sem);
+       mmap_write_unlock(current->mm);
        if (!ret && (flags & MCL_CURRENT))
                mm_populate(0, TASK_SIZE);
 
 {
        int ret;
 
-       if (down_write_killable(¤t->mm->mmap_sem))
+       if (mmap_write_lock_killable(current->mm))
                return -EINTR;
        ret = apply_mlockall_flags(0);
-       up_write(¤t->mm->mmap_sem);
+       mmap_write_unlock(current->mm);
        return ret;
 }
 
 
        bool downgraded = false;
        LIST_HEAD(uf);
 
-       if (down_write_killable(&mm->mmap_sem))
+       if (mmap_write_lock_killable(mm))
                return -EINTR;
 
        origbrk = mm->brk;
 success:
        populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
        if (downgraded)
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
        else
-               up_write(&mm->mmap_sem);
+               mmap_write_unlock(mm);
        userfaultfd_unmap_complete(mm, &uf);
        if (populate)
                mm_populate(oldbrk, newbrk - oldbrk);
 
 out:
        retval = origbrk;
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return retval;
 }
 
        detach_vmas_to_be_unmapped(mm, vma, prev, end);
 
        if (downgrade)
-               downgrade_write(&mm->mmap_sem);
+               mmap_write_downgrade(mm);
 
        unmap_region(mm, vma, prev, start, end);
 
        struct mm_struct *mm = current->mm;
        LIST_HEAD(uf);
 
-       if (down_write_killable(&mm->mmap_sem))
+       if (mmap_write_lock_killable(mm))
                return -EINTR;
 
        ret = __do_munmap(mm, start, len, &uf, downgrade);
         * it to 0 before return.
         */
        if (ret == 1) {
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                ret = 0;
        } else
-               up_write(&mm->mmap_sem);
+               mmap_write_unlock(mm);
 
        userfaultfd_unmap_complete(mm, &uf);
        return ret;
        if (pgoff + (size >> PAGE_SHIFT) < pgoff)
                return ret;
 
-       if (down_write_killable(&mm->mmap_sem))
+       if (mmap_write_lock_killable(mm))
                return -EINTR;
 
        vma = find_vma(mm, start);
                        prot, flags, pgoff, &populate, NULL);
        fput(file);
 out:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        if (populate)
                mm_populate(ret, populate);
        if (!IS_ERR_VALUE(ret))
        if (!len)
                return 0;
 
-       if (down_write_killable(&mm->mmap_sem))
+       if (mmap_write_lock_killable(mm))
                return -EINTR;
 
        ret = do_brk_flags(addr, len, flags, &uf);
        populate = ((mm->def_flags & VM_LOCKED) != 0);
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        userfaultfd_unmap_complete(mm, &uf);
        if (populate && !ret)
                mm_populate(addr, len);
                (void)__oom_reap_task_mm(mm);
 
                set_bit(MMF_OOM_SKIP, &mm->flags);
-               down_write(&mm->mmap_sem);
-               up_write(&mm->mmap_sem);
+               mmap_write_lock(mm);
+               mmap_write_unlock(mm);
        }
 
        if (mm->locked_vm) {
        struct vm_area_struct *vma;
        struct anon_vma_chain *avc;
 
-       BUG_ON(down_read_trylock(&mm->mmap_sem));
+       BUG_ON(mmap_read_trylock(mm));
 
        mutex_lock(&mm_all_locks_mutex);
 
        struct vm_area_struct *vma;
        struct anon_vma_chain *avc;
 
-       BUG_ON(down_read_trylock(&mm->mmap_sem));
+       BUG_ON(mmap_read_trylock(mm));
        BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
 
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
 
 {
        int ret;
 
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
        ret = __mmu_notifier_register(subscription, mm);
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return ret;
 }
 EXPORT_SYMBOL_GPL(mmu_notifier_register);
 
 
        reqprot = prot;
 
-       if (down_write_killable(¤t->mm->mmap_sem))
+       if (mmap_write_lock_killable(current->mm))
                return -EINTR;
 
        /*
                prot = reqprot;
        }
 out:
-       up_write(¤t->mm->mmap_sem);
+       mmap_write_unlock(current->mm);
        return error;
 }
 
        if (init_val & ~PKEY_ACCESS_MASK)
                return -EINVAL;
 
-       down_write(¤t->mm->mmap_sem);
+       mmap_write_lock(current->mm);
        pkey = mm_pkey_alloc(current->mm);
 
        ret = -ENOSPC;
        }
        ret = pkey;
 out:
-       up_write(¤t->mm->mmap_sem);
+       mmap_write_unlock(current->mm);
        return ret;
 }
 
 {
        int ret;
 
-       down_write(¤t->mm->mmap_sem);
+       mmap_write_lock(current->mm);
        ret = mm_pkey_free(current->mm, pkey);
-       up_write(¤t->mm->mmap_sem);
+       mmap_write_unlock(current->mm);
 
        /*
         * We could provie warnings or errors if any VMA still
 
        if (!new_len)
                return ret;
 
-       if (down_write_killable(¤t->mm->mmap_sem))
+       if (mmap_write_lock_killable(current->mm))
                return -EINTR;
 
        if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) {
                locked = false;
        }
        if (downgraded)
-               up_read(¤t->mm->mmap_sem);
+               mmap_read_unlock(current->mm);
        else
-               up_write(¤t->mm->mmap_sem);
+               mmap_write_unlock(current->mm);
        if (locked && new_len > old_len)
                mm_populate(new_addr + old_len, new_len - old_len);
        userfaultfd_unmap_complete(mm, &uf_unmap_early);
 
         * If the interval [start,end) covers some unmapped address ranges,
         * just ignore them, but return -ENOMEM at the end.
         */
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        vma = find_vma(mm, start);
        for (;;) {
                struct file *file;
                if ((flags & MS_SYNC) && file &&
                                (vma->vm_flags & VM_SHARED)) {
                        get_file(file);
-                       up_read(&mm->mmap_sem);
+                       mmap_read_unlock(mm);
                        error = vfs_fsync_range(file, fstart, fend, 1);
                        fput(file);
                        if (error || start >= end)
                                goto out;
-                       down_read(&mm->mmap_sem);
+                       mmap_read_lock(mm);
                        vma = find_vma(mm, start);
                } else {
                        if (start >= end) {
                }
        }
 out_unlock:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 out:
        return error ? : unmapped_error;
 }
 
        if (ret) {
                struct vm_area_struct *vma;
 
-               down_write(¤t->mm->mmap_sem);
+               mmap_write_lock(current->mm);
                vma = find_vma(current->mm, (unsigned long)ret);
                if (vma)
                        vma->vm_flags |= VM_USERMAP;
-               up_write(¤t->mm->mmap_sem);
+               mmap_write_unlock(current->mm);
        }
 
        return ret;
        struct mm_struct *mm = current->mm;
        int ret;
 
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
        ret = do_munmap(mm, addr, len, NULL);
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return ret;
 }
 EXPORT_SYMBOL(vm_munmap);
 {
        unsigned long ret;
 
-       down_write(¤t->mm->mmap_sem);
+       mmap_write_lock(current->mm);
        ret = do_mremap(addr, old_len, new_len, flags, new_addr);
-       up_write(¤t->mm->mmap_sem);
+       mmap_write_unlock(current->mm);
        return ret;
 }
 
        struct vm_area_struct *vma;
        int write = gup_flags & FOLL_WRITE;
 
-       if (down_read_killable(&mm->mmap_sem))
+       if (mmap_read_lock_killable(mm))
                return 0;
 
        /* the access must start within one of the target process's mappings */
                len = 0;
        }
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        return len;
 }
 
 {
        bool ret = true;
 
-       if (!down_read_trylock(&mm->mmap_sem)) {
+       if (!mmap_read_trylock(mm)) {
                trace_skip_task_reaping(tsk->pid);
                return false;
        }
 out_finish:
        trace_finish_task_reaping(tsk->pid);
 out_unlock:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        return ret;
 }
 
                 * access remotely because task/mm might not
                 * current/current->mm
                 */
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
                pinned_pages = pin_user_pages_remote(task, mm, pa, pinned_pages,
                                                     flags, process_pages,
                                                     NULL, &locked);
                if (locked)
-                       up_read(&mm->mmap_sem);
+                       mmap_read_unlock(mm);
                if (pinned_pages <= 0)
                        return -EFAULT;
 
 
 {
        const struct ptdump_range *range = st->range;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        while (range->start != range->end) {
                walk_page_range_novma(mm, range->start, range->end,
                                      &ptdump_ops, pgd, st);
                range++;
        }
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        /* Flush out the last page */
        st->note_page(st, 0, -1, 0);
 
        struct vm_area_struct *vma;
        int ret = 0;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                if (vma->anon_vma) {
                        ret = unuse_vma(vma, type, frontswap,
                }
                cond_resched();
        }
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return ret;
 }
 
 
         * feature is not supported.
         */
        if (zeropage) {
-               up_read(&dst_mm->mmap_sem);
+               mmap_read_unlock(dst_mm);
                return -EINVAL;
        }
 
                cond_resched();
 
                if (unlikely(err == -ENOENT)) {
-                       up_read(&dst_mm->mmap_sem);
+                       mmap_read_unlock(dst_mm);
                        BUG_ON(!page);
 
                        err = copy_huge_page_from_user(page,
                                err = -EFAULT;
                                goto out;
                        }
-                       down_read(&dst_mm->mmap_sem);
+                       mmap_read_lock(dst_mm);
 
                        dst_vma = NULL;
                        goto retry;
        }
 
 out_unlock:
-       up_read(&dst_mm->mmap_sem);
+       mmap_read_unlock(dst_mm);
 out:
        if (page) {
                /*
        copied = 0;
        page = NULL;
 retry:
-       down_read(&dst_mm->mmap_sem);
+       mmap_read_lock(dst_mm);
 
        /*
         * If memory mappings are changing because of non-cooperative
                if (unlikely(err == -ENOENT)) {
                        void *page_kaddr;
 
-                       up_read(&dst_mm->mmap_sem);
+                       mmap_read_unlock(dst_mm);
                        BUG_ON(!page);
 
                        page_kaddr = kmap(page);
        }
 
 out_unlock:
-       up_read(&dst_mm->mmap_sem);
+       mmap_read_unlock(dst_mm);
 out:
        if (page)
                put_page(page);
        /* Does the address range wrap, or is the span zero-sized? */
        BUG_ON(start + len <= start);
 
-       down_read(&dst_mm->mmap_sem);
+       mmap_read_lock(dst_mm);
 
        /*
         * If memory mappings are changing because of non-cooperative
 
        err = 0;
 out_unlock:
-       up_read(&dst_mm->mmap_sem);
+       mmap_read_unlock(dst_mm);
        return err;
 }
 
        if (pages == 0 || !mm)
                return 0;
 
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
        ret = __account_locked_vm(mm, pages, inc, current,
                                  capable(CAP_IPC_LOCK));
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
 
        return ret;
 }
 
        ret = security_mmap_file(file, prot, flag);
        if (!ret) {
-               if (down_write_killable(&mm->mmap_sem))
+               if (mmap_write_lock_killable(mm))
                        return -EINTR;
                ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
                                    &populate, &uf);
-               up_write(&mm->mmap_sem);
+               mmap_write_unlock(mm);
                userfaultfd_unmap_complete(mm, &uf);
                if (populate)
                        mm_populate(ret, populate);
 
 
        sock_rps_record_flow(sk);
 
-       down_read(¤t->mm->mmap_sem);
+       mmap_read_lock(current->mm);
 
        vma = find_vma(current->mm, address);
        if (!vma || vma->vm_start > address || vma->vm_ops != &tcp_vm_ops) {
-               up_read(¤t->mm->mmap_sem);
+               mmap_read_unlock(current->mm);
                return -EINVAL;
        }
        zc->length = min_t(unsigned long, zc->length, vma->vm_end - address);
                frags++;
        }
 out:
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
        if (length) {
                WRITE_ONCE(tp->copied_seq, seq);
                tcp_rcv_space_adjust(sk);
 
        if (!umem->pgs)
                return -ENOMEM;
 
-       down_read(¤t->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        npgs = pin_user_pages(address, umem->npgs,
                              gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL);
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
 
        if (npgs != umem->npgs) {
                if (npgs >= 0) {
 
         * mm and might be done in another context, so we must
         * access remotely.
         */
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE, NULL, NULL,
                        &locked);
        if (locked)
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
 
        if (IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC))
                kvm_arch_async_page_present(vcpu, apf);
 
        if (kvm_is_error_hva(addr))
                return PAGE_SIZE;
 
-       down_read(¤t->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        vma = find_vma(current->mm, addr);
        if (!vma)
                goto out;
        size = vma_kernel_pagesize(vma);
 
 out:
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
 
        return size;
 }
        if (npages == 1)
                return pfn;
 
-       down_read(¤t->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        if (npages == -EHWPOISON ||
              (!async && check_user_page_hwpoison(addr))) {
                pfn = KVM_PFN_ERR_HWPOISON;
                pfn = KVM_PFN_ERR_FAULT;
        }
 exit:
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
        return pfn;
 }