int idx;
 
        idx = srcu_read_lock(&kvm->srcu);
-       down_read(¤t->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        spin_lock(&kvm->mmu_lock);
 
        slots = kvm_memslots(kvm);
                stage2_unmap_memslot(kvm, memslot);
 
        spin_unlock(&kvm->mmu_lock);
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
        srcu_read_unlock(&kvm->srcu, idx);
 }
 
        }
 
        /* Let's check if we will get back a huge page backed by hugetlbfs */
-       down_read(¤t->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        vma = find_vma_intersection(current->mm, hva, hva + 1);
        if (unlikely(!vma)) {
                kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
-               up_read(¤t->mm->mmap_sem);
+               mmap_read_unlock(current->mm);
                return -EFAULT;
        }
 
        if (vma_pagesize == PMD_SIZE ||
            (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
                gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
 
        /* We need minimum second+third level pages */
        ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm),
            (kvm_phys_size(kvm) >> PAGE_SHIFT))
                return -EFAULT;
 
-       down_read(¤t->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        /*
         * A memory region could potentially cover multiple VMAs, and any holes
         * between them, so iterate over all of them to find out if we can map
                stage2_flush_memslot(kvm, memslot);
        spin_unlock(&kvm->mmu_lock);
 out:
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
        return ret;
 }
 
 
        if (user_mode(regs))
                flags |= FAULT_FLAG_USER;
 retry:
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        vma = find_vma(mm, address);
        if (!vma)
                goto bad_area;
                }
        }
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return;
 
 /*
  * Fix it, but check if it's kernel or user first..
  */
 bad_area:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
 bad_area_nosemaphore:
        /* User mode accesses just cause a SIGSEGV */
         * We ran out of memory, call the OOM killer, and return the userspace
         * (which will retry the fault, or kill us if we got oom-killed).
         */
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        if (!user_mode(regs))
                goto no_context;
        pagefault_out_of_memory();
        return;
 
 do_sigbus:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        /* Kernel mode? Handle exceptions or die */
        if (!user_mode(regs))
 
        if (!numpages)
                return 0;
 
-       down_read(&init_mm.mmap_sem);
+       mmap_read_lock(&init_mm);
        ret =  walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
                                     &masks);
-       up_read(&init_mm.mmap_sem);
+       mmap_read_unlock(&init_mm);
 
        flush_tlb_kernel_range(start, end);
 
 
                unsigned long pfn;
                unsigned long paddr;
 
-               down_read(¤t->mm->mmap_sem);
+               mmap_read_lock(current->mm);
                vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE);
                if (!vma || !(vma->vm_flags & VM_PFNMAP)) {
-                       up_read(¤t->mm->mmap_sem);
+                       mmap_read_unlock(current->mm);
                        return -EFAULT;
                }
                pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
                paddr = pfn << PAGE_SHIFT;
                table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB);
                if (!table) {
-                       up_read(¤t->mm->mmap_sem);
+                       mmap_read_unlock(current->mm);
                        return -EFAULT;
                }
                ret = CMPXCHG(&table[index], orig_pte, new_pte);
                memunmap(table);
-               up_read(¤t->mm->mmap_sem);
+               mmap_read_unlock(current->mm);
        }
 
        return (ret != orig_pte);
 
        }
 
        mutex_unlock(&bo->mutex);
-       down_read(¤t->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        vma = find_vma(current->mm, (unsigned long)userptr);
-       up_read(¤t->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
        if (!vma) {
                dev_err(atomisp_dev, "find_vma failed\n");
                kfree(bo->page_obj);
 
                mutex_unlock(&vdev->vma_lock);
 
                if (try) {
-                       if (!down_read_trylock(&mm->mmap_sem)) {
+                       if (!mmap_read_trylock(mm)) {
                                mmput(mm);
                                return 0;
                        }
                } else {
-                       down_read(&mm->mmap_sem);
+                       mmap_read_lock(mm);
                }
                if (mmget_still_valid(mm)) {
                        if (try) {
                                if (!mutex_trylock(&vdev->vma_lock)) {
-                                       up_read(&mm->mmap_sem);
+                                       mmap_read_unlock(mm);
                                        mmput(mm);
                                        return 0;
                                }
                        }
                        mutex_unlock(&vdev->vma_lock);
                }
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                mmput(mm);
        }
 }
 
        if (!mm)
                goto out_put_task;
 
-       ret = down_read_killable(&mm->mmap_sem);
+       ret = mmap_read_lock_killable(mm);
        if (ret) {
                mmput(mm);
                goto out_put_task;
                p = genradix_ptr_alloc(&fa, nr_files++, GFP_KERNEL);
                if (!p) {
                        ret = -ENOMEM;
-                       up_read(&mm->mmap_sem);
+                       mmap_read_unlock(mm);
                        mmput(mm);
                        goto out_put_task;
                }
                p->end = vma->vm_end;
                p->mode = vma->vm_file->f_mode;
        }
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        mmput(mm);
 
        for (i = 0; i < nr_files; i++) {
 
                }
 
                range->notifier_seq = mmu_interval_read_begin(range->notifier);
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
                ret = hmm_range_fault(range);
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                if (ret) {
                        if (ret == -EBUSY)
                                continue;
        if (!mmget_not_zero(mm))
                return -EINVAL;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        for (addr = start; addr < end; addr = next) {
                vma = find_vma(mm, addr);
                if (!vma || addr < vma->vm_start ||
                dmirror_migrate_finalize_and_map(&args, dmirror);
                migrate_vma_finalize(&args);
        }
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        mmput(mm);
 
        /* Return the migrated data for verification. */
        return ret;
 
 out:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        mmput(mm);
        return ret;
 }
 
                range->notifier_seq = mmu_interval_read_begin(range->notifier);
 
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
                ret = hmm_range_fault(range);
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                if (ret) {
                        if (ret == -EBUSY)
                                continue;