struct arm_smmu_domain {
        struct arm_smmu_device          *smmu;
        struct io_pgtable_ops           *pgtbl_ops;
-       spinlock_t                      pgtbl_lock;
        struct arm_smmu_cfg             cfg;
        enum arm_smmu_domain_stage      stage;
        struct mutex                    init_mutex; /* Protects smmu pointer */
+       spinlock_t                      cb_lock; /* Serialises ATS1* ops */
        struct iommu_domain             domain;
 };
 
        }
 
        mutex_init(&smmu_domain->init_mutex);
-       spin_lock_init(&smmu_domain->pgtbl_lock);
+       spin_lock_init(&smmu_domain->cb_lock);
 
        return &smmu_domain->domain;
 }
 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
                        phys_addr_t paddr, size_t size, int prot)
 {
-       int ret;
-       unsigned long flags;
-       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-       struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
+       struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
 
        if (!ops)
                return -ENODEV;
 
-       spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
-       ret = ops->map(ops, iova, paddr, size, prot);
-       spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
-       return ret;
+       return ops->map(ops, iova, paddr, size, prot);
 }
 
 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
                             size_t size)
 {
-       size_t ret;
-       unsigned long flags;
-       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-       struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
+       struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
 
        if (!ops)
                return 0;
 
-       spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
-       ret = ops->unmap(ops, iova, size);
-       spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
-       return ret;
+       return ops->unmap(ops, iova, size);
 }
 
 static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
        void __iomem *cb_base;
        u32 tmp;
        u64 phys;
-       unsigned long va;
+       unsigned long va, flags;
 
        cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
 
+       spin_lock_irqsave(&smmu_domain->cb_lock, flags);
        /* ATS1 registers can only be written atomically */
        va = iova & ~0xfffUL;
        if (smmu->version == ARM_SMMU_V2)
 
        if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
                                      !(tmp & ATSR_ACTIVE), 5, 50)) {
+               spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
                dev_err(dev,
                        "iova to phys timed out on %pad. Falling back to software table walk.\n",
                        &iova);
        }
 
        phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
+       spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
        if (phys & CB_PAR_F) {
                dev_err(dev, "translation fault!\n");
                dev_err(dev, "PAR = 0x%llx\n", phys);
 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
                                        dma_addr_t iova)
 {
-       phys_addr_t ret;
-       unsigned long flags;
        struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-       struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
+       struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
 
        if (domain->type == IOMMU_DOMAIN_IDENTITY)
                return iova;
        if (!ops)
                return 0;
 
-       spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
        if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
-                       smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
-               ret = arm_smmu_iova_to_phys_hard(domain, iova);
-       } else {
-               ret = ops->iova_to_phys(ops, iova);
-       }
-
-       spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+                       smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
+               return arm_smmu_iova_to_phys_hard(domain, iova);
 
-       return ret;
+       return ops->iova_to_phys(ops, iova);
 }
 
 static bool arm_smmu_capable(enum iommu_cap cap)