return dev_data;
 }
 
-static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid)
+struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid)
 {
        struct iommu_dev_data *dev_data;
        struct llist_node *node;
                                        bool enable)
 {
        struct protection_domain *pdomain = to_pdomain(domain);
-       struct dev_table_entry *dev_table;
+       struct dev_table_entry *dte;
        struct iommu_dev_data *dev_data;
        bool domain_flush = false;
        struct amd_iommu *iommu;
        unsigned long flags;
-       u64 pte_root;
+       u64 new;
 
        spin_lock_irqsave(&pdomain->lock, flags);
        if (!(pdomain->dirty_tracking ^ enable)) {
        }
 
        list_for_each_entry(dev_data, &pdomain->dev_list, list) {
+               spin_lock(&dev_data->dte_lock);
                iommu = get_amd_iommu_from_dev_data(dev_data);
-
-               dev_table = get_dev_table(iommu);
-               pte_root = dev_table[dev_data->devid].data[0];
-
-               pte_root = (enable ? pte_root | DTE_FLAG_HAD :
-                                    pte_root & ~DTE_FLAG_HAD);
+               dte = &get_dev_table(iommu)[dev_data->devid];
+               new = dte->data[0];
+               new = (enable ? new | DTE_FLAG_HAD : new & ~DTE_FLAG_HAD);
+               dte->data[0] = new;
+               spin_unlock(&dev_data->dte_lock);
 
                /* Flush device DTE */
-               dev_table[dev_data->devid].data[0] = pte_root;
                device_flush_dte(dev_data);
                domain_flush = true;
        }
 static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
                              struct irq_remap_table *table)
 {
-       u64 dte;
-       struct dev_table_entry *dev_table = get_dev_table(iommu);
+       u64 new;
+       struct dev_table_entry *dte = &get_dev_table(iommu)[devid];
+       struct iommu_dev_data *dev_data = search_dev_data(iommu, devid);
+
+       if (dev_data)
+               spin_lock(&dev_data->dte_lock);
 
-       dte     = dev_table[devid].data[2];
-       dte     &= ~DTE_IRQ_PHYS_ADDR_MASK;
-       dte     |= iommu_virt_to_phys(table->table);
-       dte     |= DTE_IRQ_REMAP_INTCTL;
-       dte     |= DTE_INTTABLEN;
-       dte     |= DTE_IRQ_REMAP_ENABLE;
+       new = READ_ONCE(dte->data[2]);
+       new &= ~DTE_IRQ_PHYS_ADDR_MASK;
+       new |= iommu_virt_to_phys(table->table);
+       new |= DTE_IRQ_REMAP_INTCTL;
+       new |= DTE_INTTABLEN;
+       new |= DTE_IRQ_REMAP_ENABLE;
+       WRITE_ONCE(dte->data[2], new);
 
-       dev_table[devid].data[2] = dte;
+       if (dev_data)
+               spin_unlock(&dev_data->dte_lock);
 }
 
 static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)