return ret;
 
        /* Check if we've been invalidated */
-       raw_spin_lock(&kvm->mmu_lock.rlock);
+       arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
        if (mmu_notifier_retry(kvm, mmu_seq)) {
                ret = H_TOO_HARD;
                goto out_unlock;
        kvmppc_update_dirty_map(memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
 
 out_unlock:
-       raw_spin_unlock(&kvm->mmu_lock.rlock);
+       arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
        return ret;
 }
 
                return ret;
 
        /* Check if we've been invalidated */
-       raw_spin_lock(&kvm->mmu_lock.rlock);
+       arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
        if (mmu_notifier_retry(kvm, mmu_seq)) {
                ret = H_TOO_HARD;
                goto out_unlock;
        kvmppc_update_dirty_map(dest_memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
 
 out_unlock:
-       raw_spin_unlock(&kvm->mmu_lock.rlock);
+       arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
        return ret;
 }