* @prot:      Permissions and attributes for the mapping.
  * @mc:                Cache of pre-allocated and zeroed memory from which to allocate
  *             page-table pages.
+ * @flags:     Flags to control the page-table walk (ex. a shared walk)
  *
  * The offset of @addr within a page is ignored, @size is rounded-up to
  * the next page boundary and @phys is rounded-down to the previous page
  */
 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
                           u64 phys, enum kvm_pgtable_prot prot,
-                          void *mc);
+                          void *mc, enum kvm_pgtable_walk_flags flags);
 
 /**
  * kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to
 
 
                write_lock(&kvm->mmu_lock);
                ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot,
-                                            &cache);
+                                            &cache, 0);
                write_unlock(&kvm->mmu_lock);
                if (ret)
                        break;
        gfn_t gfn;
        kvm_pfn_t pfn;
        bool logging_active = memslot_is_logging(memslot);
-       bool use_read_lock = false;
        unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
        unsigned long vma_pagesize, fault_granule;
        enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
        if (logging_active) {
                force_pte = true;
                vma_shift = PAGE_SHIFT;
-               use_read_lock = (fault_status == FSC_PERM && write_fault &&
-                                fault_granule == PAGE_SIZE);
        } else {
                vma_shift = get_vma_page_shift(vma, hva);
        }
        if (exec_fault && device)
                return -ENOEXEC;
 
-       /*
-        * To reduce MMU contentions and enhance concurrency during dirty
-        * logging dirty logging, only acquire read lock for permission
-        * relaxation.
-        */
-       if (use_read_lock)
-               read_lock(&kvm->mmu_lock);
-       else
-               write_lock(&kvm->mmu_lock);
+       read_lock(&kvm->mmu_lock);
        pgt = vcpu->arch.hw_mmu->pgt;
        if (mmu_invalidate_retry(kvm, mmu_seq))
                goto out_unlock;
         * permissions only if vma_pagesize equals fault_granule. Otherwise,
         * kvm_pgtable_stage2_map() should be called to change block size.
         */
-       if (fault_status == FSC_PERM && vma_pagesize == fault_granule) {
+       if (fault_status == FSC_PERM && vma_pagesize == fault_granule)
                ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
-       } else {
-               WARN_ONCE(use_read_lock, "Attempted stage-2 map outside of write lock\n");
-
+       else
                ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
                                             __pfn_to_phys(pfn), prot,
-                                            memcache);
-       }
+                                            memcache, KVM_PGTABLE_WALK_SHARED);
 
        /* Mark the page dirty only if the fault is handled successfully */
        if (writable && !ret) {
        }
 
 out_unlock:
-       if (use_read_lock)
-               read_unlock(&kvm->mmu_lock);
-       else
-               write_unlock(&kvm->mmu_lock);
+       read_unlock(&kvm->mmu_lock);
        kvm_set_pfn_accessed(pfn);
        kvm_release_pfn_clean(pfn);
        return ret != -EAGAIN ? ret : 0;
         */
        kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT,
                               PAGE_SIZE, __pfn_to_phys(pfn),
-                              KVM_PGTABLE_PROT_R, NULL);
+                              KVM_PGTABLE_PROT_R, NULL, 0);
 
        return false;
 }