#include <asm/book3s/64/mmu-hash.h>
 #include <asm/cpu_has_feature.h>
 #include <asm/ppc-opcode.h>
+#include <asm/pte-walk.h>
 
 #ifdef CONFIG_PPC_PSERIES
 static inline bool kvmhv_on_pseries(void)
                                unsigned long gpa, unsigned long hpa,
                                unsigned long nbytes);
 
+static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea,
+                                           unsigned *hshift)
+{
+       pte_t *pte;
+
+       VM_WARN(!spin_is_locked(&kvm->mmu_lock),
+               "%s called with kvm mmu_lock not held \n", __func__);
+       pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
+
+       return pte;
+}
+
 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
 
 #endif /* __ASM_KVM_BOOK3S_64_H__ */
 
                return 0;
        }
 
-       ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
+       ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
        if (ptep && pte_present(*ptep))
                kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
                                 kvm->arch.lpid);
-       return 0;                               
+       return 0;
 }
 
 /* Called with kvm->mmu_lock held */
        if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
                return ref;
 
-       ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
+       ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
        if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
                old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
                                              gpa, shift);
        if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
                return ref;
 
-       ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
+       ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
        if (ptep && pte_present(*ptep) && pte_young(*ptep))
                ref = 1;
        return ref;
        if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
                return ret;
 
-       ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
+       ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
        if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
                ret = 1;
                if (shift)
        gpa = memslot->base_gfn << PAGE_SHIFT;
        spin_lock(&kvm->mmu_lock);
        for (n = memslot->npages; n; --n) {
-               ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
+               ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
                if (ptep && pte_present(*ptep))
                        kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
                                         kvm->arch.lpid);