]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: MMU: protect kvm_mmu_change_mmu_pages with mmu_lock
authorMarcelo Tosatti <mtosatti@redhat.com>
Tue, 12 May 2009 21:55:43 +0000 (18:55 -0300)
committerAvi Kivity <avi@redhat.com>
Wed, 13 May 2009 07:39:37 +0000 (10:39 +0300)
kvm_handle_hva, called by MMU notifiers, manipulates mmu data only with
the protection of mmu_lock.

Update kvm_mmu_change_mmu_pages callers to take mmu_lock, thus protecting
against kvm_handle_hva.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/mmu.c
arch/x86/kvm/x86.c

index 8fcdae9660f48e5dbd7477a6fc9dc89e16aa441a..80c76f44d886f00f5851912ea17def09576c96f9 100644 (file)
@@ -2723,7 +2723,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
 {
        struct kvm_mmu_page *sp;
 
-       spin_lock(&kvm->mmu_lock);
        list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
                int i;
                u64 *pt;
@@ -2738,7 +2737,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
                                pt[i] &= ~PT_WRITABLE_MASK;
        }
        kvm_flush_remote_tlbs(kvm);
-       spin_unlock(&kvm->mmu_lock);
 }
 
 void kvm_mmu_zap_all(struct kvm *kvm)
index cf97ea076fe254e2bd90612237c295b6251b9e0d..ecc35c658dee7894fd92f25feef4ef7c496050ba 100644 (file)
@@ -1621,10 +1621,12 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
                return -EINVAL;
 
        down_write(&kvm->slots_lock);
+       spin_lock(&kvm->mmu_lock);
 
        kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
        kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
 
+       spin_unlock(&kvm->mmu_lock);
        up_write(&kvm->slots_lock);
        return 0;
 }
@@ -1800,7 +1802,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
 
        /* If nothing is dirty, don't bother messing with page tables. */
        if (is_dirty) {
+               spin_lock(&kvm->mmu_lock);
                kvm_mmu_slot_remove_write_access(kvm, log->slot);
+               spin_unlock(&kvm->mmu_lock);
                kvm_flush_remote_tlbs(kvm);
                memslot = &kvm->memslots[log->slot];
                n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
@@ -4544,12 +4548,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
                }
        }
 
+       spin_lock(&kvm->mmu_lock);
        if (!kvm->arch.n_requested_mmu_pages) {
                unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
                kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
        }
 
        kvm_mmu_slot_remove_write_access(kvm, mem->slot);
+       spin_unlock(&kvm->mmu_lock);
        kvm_flush_remote_tlbs(kvm);
 
        return 0;