struct mm_struct *mm; /* userspace tied to this vm */
        struct kvm_memslots *memslots;
        struct srcu_struct srcu;
+       struct srcu_struct irq_srcu;
 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
        u32 bsp_vcpu_id;
 #endif
 
 #include <linux/list.h>
 #include <linux/eventfd.h>
 #include <linux/kernel.h>
+#include <linux/srcu.h>
 #include <linux/slab.h>
 
 #include "iodev.h"
 irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
 {
        struct _irqfd_resampler *resampler;
+       struct kvm *kvm;
        struct _irqfd *irqfd;
+       int idx;
 
        resampler = container_of(kian, struct _irqfd_resampler, notifier);
+       kvm = resampler->kvm;
 
-       kvm_set_irq(resampler->kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
+       kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
                    resampler->notifier.gsi, 0, false);
 
-       rcu_read_lock();
+       idx = srcu_read_lock(&kvm->irq_srcu);
 
        list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
                eventfd_signal(irqfd->resamplefd, 1);
 
-       rcu_read_unlock();
+       srcu_read_unlock(&kvm->irq_srcu, idx);
 }
 
 static void
        mutex_lock(&kvm->irqfds.resampler_lock);
 
        list_del_rcu(&irqfd->resampler_link);
-       synchronize_rcu();
+       synchronize_srcu(&kvm->irq_srcu);
 
        if (list_empty(&resampler->list)) {
                list_del(&resampler->link);
        unsigned long flags = (unsigned long)key;
        struct kvm_kernel_irq_routing_entry *irq;
        struct kvm *kvm = irqfd->kvm;
+       int idx;
 
        if (flags & POLLIN) {
-               rcu_read_lock();
-               irq = rcu_dereference(irqfd->irq_entry);
+               idx = srcu_read_lock(&kvm->irq_srcu);
+               irq = srcu_dereference(irqfd->irq_entry, &kvm->irq_srcu);
                /* An event has been signaled, inject an interrupt */
                if (irq)
                        kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
                                        false);
                else
                        schedule_work(&irqfd->inject);
-               rcu_read_unlock();
+               srcu_read_unlock(&kvm->irq_srcu, idx);
        }
 
        if (flags & POLLHUP) {
                }
 
                list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
-               synchronize_rcu();
+               synchronize_srcu(&kvm->irq_srcu);
 
                mutex_unlock(&kvm->irqfds.resampler_lock);
        }
                         * another thread calls kvm_irq_routing_update before
                         * we flush workqueue below (we synchronize with
                         * kvm_irq_routing_update using irqfds.lock).
-                        * It is paired with synchronize_rcu done by caller
+                        * It is paired with synchronize_srcu done by caller
                         * of that function.
                         */
                        rcu_assign_pointer(irqfd->irq_entry, NULL);
 
 /*
  * Change irq_routing and irqfd.
- * Caller must invoke synchronize_rcu afterwards.
+ * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
  */
 void kvm_irq_routing_update(struct kvm *kvm,
                            struct kvm_irq_routing_table *irq_rt)
 
        struct kvm_kernel_irq_routing_entry *e;
        int ret = -EINVAL;
        struct kvm_irq_routing_table *irq_rt;
+       int idx;
 
        trace_kvm_set_irq(irq, level, irq_source_id);
 
         * Since there's no easy way to do this, we only support injecting MSI
         * which is limited to 1:1 GSI mapping.
         */
-       rcu_read_lock();
-       irq_rt = rcu_dereference(kvm->irq_routing);
+       idx = srcu_read_lock(&kvm->irq_srcu);
+       irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
        if (irq < irq_rt->nr_rt_entries)
                hlist_for_each_entry(e, &irq_rt->map[irq], link) {
                        if (likely(e->type == KVM_IRQ_ROUTING_MSI))
                                ret = -EWOULDBLOCK;
                        break;
                }
-       rcu_read_unlock();
+       srcu_read_unlock(&kvm->irq_srcu, idx);
        return ret;
 }
 
        mutex_lock(&kvm->irq_lock);
        hlist_del_rcu(&kimn->link);
        mutex_unlock(&kvm->irq_lock);
-       synchronize_rcu();
+       synchronize_srcu(&kvm->irq_srcu);
 }
 
 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
                             bool mask)
 {
        struct kvm_irq_mask_notifier *kimn;
-       int gsi;
+       int idx, gsi;
 
-       rcu_read_lock();
-       gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
+       idx = srcu_read_lock(&kvm->irq_srcu);
+       gsi = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu)->chip[irqchip][pin];
        if (gsi != -1)
                hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link)
                        if (kimn->irq == gsi)
                                kimn->func(kimn, mask);
-       rcu_read_unlock();
+       srcu_read_unlock(&kvm->irq_srcu, idx);
 }
 
 int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
 
 
 #include <linux/kvm_host.h>
 #include <linux/slab.h>
+#include <linux/srcu.h>
 #include <linux/export.h>
 #include <trace/events/kvm.h>
 #include "irq.h"
 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
 {
        struct kvm_irq_ack_notifier *kian;
-       int gsi;
+       int gsi, idx;
 
-       rcu_read_lock();
-       gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
+       idx = srcu_read_lock(&kvm->irq_srcu);
+       gsi = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu)->chip[irqchip][pin];
        if (gsi != -1)
                hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
                                         link)
                        if (kian->gsi == gsi) {
-                               rcu_read_unlock();
+                               srcu_read_unlock(&kvm->irq_srcu, idx);
                                return true;
                        }
 
-       rcu_read_unlock();
+       srcu_read_unlock(&kvm->irq_srcu, idx);
 
        return false;
 }
 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
 {
        struct kvm_irq_ack_notifier *kian;
-       int gsi;
+       int gsi, idx;
 
        trace_kvm_ack_irq(irqchip, pin);
 
-       rcu_read_lock();
-       gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
+       idx = srcu_read_lock(&kvm->irq_srcu);
+       gsi = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu)->chip[irqchip][pin];
        if (gsi != -1)
                hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
                                         link)
                        if (kian->gsi == gsi)
                                kian->irq_acked(kian);
-       rcu_read_unlock();
+       srcu_read_unlock(&kvm->irq_srcu, idx);
 }
 
 void kvm_register_irq_ack_notifier(struct kvm *kvm,
        mutex_lock(&kvm->irq_lock);
        hlist_del_init_rcu(&kian->link);
        mutex_unlock(&kvm->irq_lock);
-       synchronize_rcu();
+       synchronize_srcu(&kvm->irq_srcu);
 #ifdef __KVM_HAVE_IOAPIC
        kvm_vcpu_request_scan_ioapic(kvm);
 #endif
                bool line_status)
 {
        struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS];
-       int ret = -1, i = 0;
+       int ret = -1, i = 0, idx;
        struct kvm_irq_routing_table *irq_rt;
 
        trace_kvm_set_irq(irq, level, irq_source_id);
         * IOAPIC.  So set the bit in both. The guest will ignore
         * writes to the unused one.
         */
-       rcu_read_lock();
-       irq_rt = rcu_dereference(kvm->irq_routing);
+       idx = srcu_read_lock(&kvm->irq_srcu);
+       irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
        if (irq < irq_rt->nr_rt_entries)
                hlist_for_each_entry(e, &irq_rt->map[irq], link)
                        irq_set[i++] = *e;
-       rcu_read_unlock();
+       srcu_read_unlock(&kvm->irq_srcu, idx);
 
        while(i--) {
                int r;
        kvm_irq_routing_update(kvm, new);
        mutex_unlock(&kvm->irq_lock);
 
-       synchronize_rcu();
+       synchronize_srcu_expedited(&kvm->irq_srcu);
 
        new = old;
        r = 0;
 
 
        r = kvm_arch_init_vm(kvm, type);
        if (r)
-               goto out_err_nodisable;
+               goto out_err_no_disable;
 
        r = hardware_enable_all();
        if (r)
-               goto out_err_nodisable;
+               goto out_err_no_disable;
 
 #ifdef CONFIG_HAVE_KVM_IRQCHIP
        INIT_HLIST_HEAD(&kvm->mask_notifier_list);
        r = -ENOMEM;
        kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
        if (!kvm->memslots)
-               goto out_err_nosrcu;
+               goto out_err_no_srcu;
        kvm_init_memslots_id(kvm);
        if (init_srcu_struct(&kvm->srcu))
-               goto out_err_nosrcu;
+               goto out_err_no_srcu;
+       if (init_srcu_struct(&kvm->irq_srcu))
+               goto out_err_no_irq_srcu;
        for (i = 0; i < KVM_NR_BUSES; i++) {
                kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
                                        GFP_KERNEL);
        return kvm;
 
 out_err:
+       cleanup_srcu_struct(&kvm->irq_srcu);
+out_err_no_irq_srcu:
        cleanup_srcu_struct(&kvm->srcu);
-out_err_nosrcu:
+out_err_no_srcu:
        hardware_disable_all();
-out_err_nodisable:
+out_err_no_disable:
        for (i = 0; i < KVM_NR_BUSES; i++)
                kfree(kvm->buses[i]);
        kfree(kvm->memslots);