]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
iommu/arm-smmu-v3: Make SVA allocate a normal arm_smmu_domain
authorJason Gunthorpe <jgg@nvidia.com>
Tue, 25 Jun 2024 12:37:39 +0000 (09:37 -0300)
committerWill Deacon <will@kernel.org>
Tue, 2 Jul 2024 14:39:47 +0000 (15:39 +0100)
Currently the SVA domain is a naked struct iommu_domain, allocate a struct
arm_smmu_domain instead.

This is necessary to be able to use the struct arm_master_domain
mechanism.

Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Tested-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Reviewed-by: Michael Shavit <mshavit@google.com>
Reviewed-by: Nicolin Chen <nicolinc@nvidia.com>
Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/8-v9-5cd718286059+79186-smmuv3_newapi_p2b_jgg@nvidia.com
Signed-off-by: Will Deacon <will@kernel.org>
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h

index d31caceb584984464a0e2e5558542d586159254d..aa033cd65adc5af5f6531c6641de2534ac5a427a 100644 (file)
@@ -639,7 +639,7 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
        }
 
        arm_smmu_make_sva_cd(&target, master, mm, bond->smmu_mn->cd->asid);
-       ret = arm_smmu_set_pasid(master, NULL, id, &target);
+       ret = arm_smmu_set_pasid(master, to_smmu_domain(domain), id, &target);
        if (ret) {
                list_del(&bond->list);
                arm_smmu_mmu_notifier_put(bond->smmu_mn);
@@ -653,7 +653,7 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
 
 static void arm_smmu_sva_domain_free(struct iommu_domain *domain)
 {
-       kfree(domain);
+       kfree(to_smmu_domain(domain));
 }
 
 static const struct iommu_domain_ops arm_smmu_sva_domain_ops = {
@@ -664,13 +664,16 @@ static const struct iommu_domain_ops arm_smmu_sva_domain_ops = {
 struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev,
                                               struct mm_struct *mm)
 {
-       struct iommu_domain *domain;
+       struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+       struct arm_smmu_device *smmu = master->smmu;
+       struct arm_smmu_domain *smmu_domain;
 
-       domain = kzalloc(sizeof(*domain), GFP_KERNEL);
-       if (!domain)
-               return ERR_PTR(-ENOMEM);
-       domain->type = IOMMU_DOMAIN_SVA;
-       domain->ops = &arm_smmu_sva_domain_ops;
+       smmu_domain = arm_smmu_domain_alloc();
+       if (IS_ERR(smmu_domain))
+               return ERR_CAST(smmu_domain);
+       smmu_domain->domain.type = IOMMU_DOMAIN_SVA;
+       smmu_domain->domain.ops = &arm_smmu_sva_domain_ops;
+       smmu_domain->smmu = smmu;
 
-       return domain;
+       return &smmu_domain->domain;
 }
index 7794f4ee34314be6958213dbffd15d2813e10f5f..0d6142d8011276aa0aa9aa2c105c2952a0b320a7 100644 (file)
@@ -2272,15 +2272,10 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
        }
 }
 
-static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
+struct arm_smmu_domain *arm_smmu_domain_alloc(void)
 {
        struct arm_smmu_domain *smmu_domain;
 
-       /*
-        * Allocate the domain and initialise some of its data structures.
-        * We can't really do anything meaningful until we've added a
-        * master.
-        */
        smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
        if (!smmu_domain)
                return ERR_PTR(-ENOMEM);
@@ -2290,6 +2285,22 @@ static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
        spin_lock_init(&smmu_domain->devices_lock);
        INIT_LIST_HEAD(&smmu_domain->mmu_notifiers);
 
+       return smmu_domain;
+}
+
+static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
+{
+       struct arm_smmu_domain *smmu_domain;
+
+       /*
+        * Allocate the domain and initialise some of its data structures.
+        * We can't really do anything meaningful until we've added a
+        * master.
+        */
+       smmu_domain = arm_smmu_domain_alloc();
+       if (IS_ERR(smmu_domain))
+               return ERR_CAST(smmu_domain);
+
        if (dev) {
                struct arm_smmu_master *master = dev_iommu_priv_get(dev);
                int ret;
@@ -2303,7 +2314,7 @@ static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
        return &smmu_domain->domain;
 }
 
-static void arm_smmu_domain_free(struct iommu_domain *domain)
+static void arm_smmu_domain_free_paging(struct iommu_domain *domain)
 {
        struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
        struct arm_smmu_device *smmu = smmu_domain->smmu;
@@ -3305,7 +3316,7 @@ static struct iommu_ops arm_smmu_ops = {
                .iotlb_sync             = arm_smmu_iotlb_sync,
                .iova_to_phys           = arm_smmu_iova_to_phys,
                .enable_nesting         = arm_smmu_enable_nesting,
-               .free                   = arm_smmu_domain_free,
+               .free                   = arm_smmu_domain_free_paging,
        }
 };
 
index 65b75dbfd159143a5d6d139b3317ab5460d2e79a..212c18c70fa03e2aa27b6e996abd8341fde7c891 100644 (file)
@@ -790,6 +790,8 @@ static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
 extern struct xarray arm_smmu_asid_xa;
 extern struct mutex arm_smmu_asid_lock;
 
+struct arm_smmu_domain *arm_smmu_domain_alloc(void);
+
 void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid);
 struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master,
                                        u32 ssid);