struct kvm {
        spinlock_t mmu_lock;
-       spinlock_t requests_lock;
+       raw_spinlock_t requests_lock;
        struct mutex slots_lock;
        struct mm_struct *mm; /* userspace tied to this vm */
        struct kvm_memslots *memslots;
 
 
        zalloc_cpumask_var(&cpus, GFP_ATOMIC);
 
-       spin_lock(&kvm->requests_lock);
+       raw_spin_lock(&kvm->requests_lock);
        me = smp_processor_id();
        kvm_for_each_vcpu(i, vcpu, kvm) {
                if (test_and_set_bit(req, &vcpu->requests))
                smp_call_function_many(cpus, ack_flush, NULL, 1);
        else
                called = false;
-       spin_unlock(&kvm->requests_lock);
+       raw_spin_unlock(&kvm->requests_lock);
        free_cpumask_var(cpus);
        return called;
 }
        kvm->mm = current->mm;
        atomic_inc(&kvm->mm->mm_count);
        spin_lock_init(&kvm->mmu_lock);
-       spin_lock_init(&kvm->requests_lock);
+       raw_spin_lock_init(&kvm->requests_lock);
        kvm_eventfd_init(kvm);
        mutex_init(&kvm->lock);
        mutex_init(&kvm->irq_lock);