#include "irq_remapping.h"
 
-/* No locks are needed as DMA remapping hardware unit
- * list is constructed at boot time and hotplug of
- * these units are not supported by the architecture.
+/*
+ * Assumptions:
+ * 1) The hotplug framework guarentees that DMAR unit will be hot-added
+ *    before IO devices managed by that unit.
+ * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
+ *    after IO devices managed by that unit.
+ * 3) Hotplug events are rare.
+ *
+ * Locking rules for DMA and interrupt remapping related global data structures:
+ * 1) Use dmar_global_lock in process context
+ * 2) Use RCU in interrupt context
  */
+DECLARE_RWSEM(dmar_global_lock);
 LIST_HEAD(dmar_drhd_units);
 
 struct acpi_table_header * __initdata dmar_tbl;
 {
        int ret;
 
+       down_write(&dmar_global_lock);
        ret = dmar_table_detect();
        if (ret)
                ret = check_zero_address();
        }
        early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
        dmar_tbl = NULL;
+       up_write(&dmar_global_lock);
 
        return ret ? 1 : -ENODEV;
 }
        if (irq_remapping_enabled || intel_iommu_enabled)
                return 0;
 
+       down_write(&dmar_global_lock);
        list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
                list_del(&dmaru->list);
                dmar_free_drhd(dmaru);
        }
+       up_write(&dmar_global_lock);
 
        return 0;
 }
 
        if (!domain)
                return 0;
 
+       down_read(&dmar_global_lock);
        domain_remove_one_dev_info(domain, pdev);
        if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
            !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
            list_empty(&domain->devices))
                domain_exit(domain);
+       up_read(&dmar_global_lock);
 
        return 0;
 }
        /* VT-d is required for a TXT/tboot launch, so enforce that */
        force_on = tboot_force_iommu();
 
+       if (iommu_init_mempool()) {
+               if (force_on)
+                       panic("tboot: Failed to initialize iommu memory\n");
+               return -ENOMEM;
+       }
+
+       down_write(&dmar_global_lock);
        if (dmar_table_init()) {
                if (force_on)
                        panic("tboot: Failed to initialize DMAR table\n");
        if (no_iommu || dmar_disabled)
                goto out_free_dmar;
 
-       if (iommu_init_mempool()) {
-               if (force_on)
-                       panic("tboot: Failed to initialize iommu memory\n");
-               goto out_free_dmar;
-       }
-
        if (list_empty(&dmar_rmrr_units))
                printk(KERN_INFO "DMAR: No RMRR found\n");
 
        if (dmar_init_reserved_ranges()) {
                if (force_on)
                        panic("tboot: Failed to reserve iommu ranges\n");
-               goto out_free_mempool;
+               goto out_free_reserved_range;
        }
 
        init_no_remapping_devices();
                printk(KERN_ERR "IOMMU: dmar init failed\n");
                goto out_free_reserved_range;
        }
+       up_write(&dmar_global_lock);
        printk(KERN_INFO
        "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
 
 
 out_free_reserved_range:
        put_iova_domain(&reserved_iova_list);
-out_free_mempool:
-       iommu_exit_mempool();
 out_free_dmar:
        intel_iommu_free_dmars();
+       up_write(&dmar_global_lock);
+       iommu_exit_mempool();
        return ret;
 }
 
 
 static struct hpet_scope ir_hpet[MAX_HPET_TBS];
 static int ir_ioapic_num, ir_hpet_num;
 
+/*
+ * Lock ordering:
+ * ->dmar_global_lock
+ *     ->irq_2_ir_lock
+ *             ->qi->q_lock
+ *     ->iommu->register_lock
+ * Note:
+ * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called
+ * in single-threaded environment with interrupt disabled, so no need to tabke
+ * the dmar_global_lock.
+ */
 static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
 
 static int __init parse_ioapics_under_ir(void);
        if (!irte)
                return -1;
 
+       down_read(&dmar_global_lock);
        for (i = 0; i < MAX_IO_APICS; i++) {
                if (ir_ioapic[i].id == apic) {
                        sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
                        break;
                }
        }
+       up_read(&dmar_global_lock);
 
        if (sid == 0) {
                pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
        if (!irte)
                return -1;
 
+       down_read(&dmar_global_lock);
        for (i = 0; i < MAX_HPET_TBS; i++) {
                if (ir_hpet[i].id == id) {
                        sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
                        break;
                }
        }
+       up_read(&dmar_global_lock);
 
        if (sid == 0) {
                pr_warning("Failed to set source-id of HPET block (%d)\n", id);
 
 static int __init ir_dev_scope_init(void)
 {
+       int ret;
+
        if (!irq_remapping_enabled)
                return 0;
 
-       return dmar_dev_scope_init();
+       down_write(&dmar_global_lock);
+       ret = dmar_dev_scope_init();
+       up_write(&dmar_global_lock);
+
+       return ret;
 }
 rootfs_initcall(ir_dev_scope_init);
 
                                    struct io_apic_irq_attr *attr)
 {
        int ioapic_id = mpc_ioapic_id(attr->ioapic);
-       struct intel_iommu *iommu = map_ioapic_to_ir(ioapic_id);
+       struct intel_iommu *iommu;
        struct IR_IO_APIC_route_entry *entry;
        struct irte irte;
        int index;
 
+       down_read(&dmar_global_lock);
+       iommu = map_ioapic_to_ir(ioapic_id);
        if (!iommu) {
                pr_warn("No mapping iommu for ioapic %d\n", ioapic_id);
-               return -ENODEV;
-       }
-
-       entry = (struct IR_IO_APIC_route_entry *)route_entry;
-
-       index = alloc_irte(iommu, irq, 1);
-       if (index < 0) {
-               pr_warn("Failed to allocate IRTE for ioapic %d\n", ioapic_id);
-               return -ENOMEM;
+               index = -ENODEV;
+       } else {
+               index = alloc_irte(iommu, irq, 1);
+               if (index < 0) {
+                       pr_warn("Failed to allocate IRTE for ioapic %d\n",
+                               ioapic_id);
+                       index = -ENOMEM;
+               }
        }
+       up_read(&dmar_global_lock);
+       if (index < 0)
+               return index;
 
        prepare_irte(&irte, vector, destination);
 
                irte.avail, irte.vector, irte.dest_id,
                irte.sid, irte.sq, irte.svt);
 
+       entry = (struct IR_IO_APIC_route_entry *)route_entry;
        memset(entry, 0, sizeof(*entry));
 
        entry->index2   = (index >> 15) & 0x1;
        struct intel_iommu *iommu;
        int index;
 
+       down_read(&dmar_global_lock);
        iommu = map_dev_to_ir(dev);
        if (!iommu) {
                printk(KERN_ERR
                       "Unable to map PCI %s to iommu\n", pci_name(dev));
-               return -ENOENT;
+               index = -ENOENT;
+       } else {
+               index = alloc_irte(iommu, irq, nvec);
+               if (index < 0) {
+                       printk(KERN_ERR
+                              "Unable to allocate %d IRTE for PCI %s\n",
+                              nvec, pci_name(dev));
+                       index = -ENOSPC;
+               }
        }
+       up_read(&dmar_global_lock);
 
-       index = alloc_irte(iommu, irq, nvec);
-       if (index < 0) {
-               printk(KERN_ERR
-                      "Unable to allocate %d IRTE for PCI %s\n", nvec,
-                      pci_name(dev));
-               return -ENOSPC;
-       }
        return index;
 }
 
                               int index, int sub_handle)
 {
        struct intel_iommu *iommu;
+       int ret = -ENOENT;
 
+       down_read(&dmar_global_lock);
        iommu = map_dev_to_ir(pdev);
-       if (!iommu)
-               return -ENOENT;
-       /*
-        * setup the mapping between the irq and the IRTE
-        * base index, the sub_handle pointing to the
-        * appropriate interrupt remap table entry.
-        */
-       set_irte_irq(irq, iommu, index, sub_handle);
+       if (iommu) {
+               /*
+                * setup the mapping between the irq and the IRTE
+                * base index, the sub_handle pointing to the
+                * appropriate interrupt remap table entry.
+                */
+               set_irte_irq(irq, iommu, index, sub_handle);
+               ret = 0;
+       }
+       up_read(&dmar_global_lock);
 
-       return 0;
+       return ret;
 }
 
 static int intel_setup_hpet_msi(unsigned int irq, unsigned int id)
 {
-       struct intel_iommu *iommu = map_hpet_to_ir(id);
+       int ret = -1;
+       struct intel_iommu *iommu;
        int index;
 
-       if (!iommu)
-               return -1;
-
-       index = alloc_irte(iommu, irq, 1);
-       if (index < 0)
-               return -1;
+       down_read(&dmar_global_lock);
+       iommu = map_hpet_to_ir(id);
+       if (iommu) {
+               index = alloc_irte(iommu, irq, 1);
+               if (index >= 0)
+                       ret = 0;
+       }
+       up_read(&dmar_global_lock);
 
-       return 0;
+       return ret;
 }
 
 struct irq_remap_ops intel_irq_remap_ops = {