* change the value
         */
 
+       spin_lock(&kvm->mmu_lock);
+
        if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
                while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages &&
                        !list_empty(&kvm->arch.active_mmu_pages)) {
        }
 
        kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
+
+       spin_unlock(&kvm->mmu_lock);
 }
 
 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
 
                return -EINVAL;
 
        mutex_lock(&kvm->slots_lock);
-       spin_lock(&kvm->mmu_lock);
 
        kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
        kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
 
-       spin_unlock(&kvm->mmu_lock);
        mutex_unlock(&kvm->slots_lock);
        return 0;
 }
        if (!kvm->arch.n_requested_mmu_pages)
                nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
 
-       spin_lock(&kvm->mmu_lock);
        if (nr_mmu_pages)
                kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
        /*
         * Existing largepage mappings are destroyed here and new ones will
         * not be created until the end of the logging.
         */
-       if (npages && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES))
+       if (npages && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
+               spin_lock(&kvm->mmu_lock);
                kvm_mmu_slot_remove_write_access(kvm, mem->slot);
-       spin_unlock(&kvm->mmu_lock);
+               spin_unlock(&kvm->mmu_lock);
+       }
        /*
         * If memory slot is created, or moved, we need to clear all
         * mmio sptes.