struct arm_smmu_master *master = master_domain->master;
                struct arm_smmu_cd *cdptr;
 
-               /* S1 domains only support RID attachment right now */
-               cdptr = arm_smmu_get_cd_ptr(master, IOMMU_NO_PASID);
+               cdptr = arm_smmu_get_cd_ptr(master, master_domain->ssid);
                if (WARN_ON(!cdptr))
                        continue;
 
                arm_smmu_make_s1_cd(&target_cd, master, smmu_domain);
-               arm_smmu_write_cd_entry(master, IOMMU_NO_PASID, cdptr,
+               arm_smmu_write_cd_entry(master, master_domain->ssid, cdptr,
                                        &target_cd);
        }
        spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
                                                    smmu_domain);
        }
 
-       arm_smmu_atc_inv_domain(smmu_domain, mm_get_enqcmd_pasid(mm), start,
-                               size);
+       arm_smmu_atc_inv_domain_sva(smmu_domain, mm_get_enqcmd_pasid(mm), start,
+                                   size);
 }
 
 static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
        spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
 
        arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid);
-       arm_smmu_atc_inv_domain(smmu_domain, mm_get_enqcmd_pasid(mm), 0, 0);
+       arm_smmu_atc_inv_domain_sva(smmu_domain, mm_get_enqcmd_pasid(mm), 0, 0);
 
        smmu_mn->cleared = true;
        mutex_unlock(&sva_lock);
         */
        if (!smmu_mn->cleared) {
                arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid);
-               arm_smmu_atc_inv_domain(smmu_domain, mm_get_enqcmd_pasid(mm), 0,
-                                       0);
+               arm_smmu_atc_inv_domain_sva(smmu_domain,
+                                           mm_get_enqcmd_pasid(mm), 0, 0);
        }
 
        /* Frees smmu_mn */
 
        return arm_smmu_cmdq_batch_submit(master->smmu, &cmds);
 }
 
-int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
-                           unsigned long iova, size_t size)
+static int __arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
+                                    ioasid_t ssid, unsigned long iova, size_t size)
 {
        struct arm_smmu_master_domain *master_domain;
        int i;
        if (!atomic_read(&smmu_domain->nr_ats_masters))
                return 0;
 
-       arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd);
-
        cmds.num = 0;
 
        spin_lock_irqsave(&smmu_domain->devices_lock, flags);
                if (!master->ats_enabled)
                        continue;
 
+               /*
+                * Non-zero ssid means SVA is co-opting the S1 domain to issue
+                * invalidations for SVA PASIDs.
+                */
+               if (ssid != IOMMU_NO_PASID)
+                       arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd);
+               else
+                       arm_smmu_atc_inv_to_cmd(master_domain->ssid, iova, size,
+                                               &cmd);
+
                for (i = 0; i < master->num_streams; i++) {
                        cmd.atc.sid = master->streams[i].id;
                        arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd);
        return arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds);
 }
 
+static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
+                                  unsigned long iova, size_t size)
+{
+       return __arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, iova,
+                                        size);
+}
+
+int arm_smmu_atc_inv_domain_sva(struct arm_smmu_domain *smmu_domain,
+                               ioasid_t ssid, unsigned long iova, size_t size)
+{
+       return __arm_smmu_atc_inv_domain(smmu_domain, ssid, iova, size);
+}
+
 /* IO_PGTABLE API */
 static void arm_smmu_tlb_inv_context(void *cookie)
 {
                cmd.tlbi.vmid   = smmu_domain->s2_cfg.vmid;
                arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
        }
-       arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, 0, 0);
+       arm_smmu_atc_inv_domain(smmu_domain, 0, 0);
 }
 
 static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
         * Unfortunately, this can't be leaf-only since we may have
         * zapped an entire table.
         */
-       arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, iova, size);
+       arm_smmu_atc_inv_domain(smmu_domain, iova, size);
 }
 
 void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
 
 static struct arm_smmu_master_domain *
 arm_smmu_find_master_domain(struct arm_smmu_domain *smmu_domain,
-                           struct arm_smmu_master *master)
+                           struct arm_smmu_master *master,
+                           ioasid_t ssid)
 {
        struct arm_smmu_master_domain *master_domain;
 
 
        list_for_each_entry(master_domain, &smmu_domain->devices,
                            devices_elm) {
-               if (master_domain->master == master)
+               if (master_domain->master == master &&
+                   master_domain->ssid == ssid)
                        return master_domain;
        }
        return NULL;
                return;
 
        spin_lock_irqsave(&smmu_domain->devices_lock, flags);
-       master_domain = arm_smmu_find_master_domain(smmu_domain, master);
+       master_domain = arm_smmu_find_master_domain(smmu_domain, master,
+                                                   IOMMU_NO_PASID);
        if (master_domain) {
                list_del(&master_domain->devices_elm);
                kfree(master_domain);
 
 struct arm_smmu_master_domain {
        struct list_head devices_elm;
        struct arm_smmu_master *master;
+       ioasid_t ssid;
 };
 
 static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
                                 size_t granule, bool leaf,
                                 struct arm_smmu_domain *smmu_domain);
 bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd);
-int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
-                           unsigned long iova, size_t size);
+int arm_smmu_atc_inv_domain_sva(struct arm_smmu_domain *smmu_domain,
+                               ioasid_t ssid, unsigned long iova, size_t size);
 
 #ifdef CONFIG_ARM_SMMU_V3_SVA
 bool arm_smmu_sva_supported(struct arm_smmu_device *smmu);