From ba1057ab5d01091a6fce428c0f42a57a655aabb2 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 9 Oct 2024 12:11:46 +0800 Subject: [PATCH 01/16] drm/nouveau/tegra: Use iommu_paging_domain_alloc() In nvkm_device_tegra_probe_iommu(), a paging domain is allocated for @dev and attached to it on success. Use iommu_paging_domain_alloc() to make it explicit. Signed-off-by: Lu Baolu Acked-by: Thierry Reding Reviewed-by: Lyude Paul Link: https://lore.kernel.org/r/20241009041147.28391-4-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c index d1c294f00665..78a83f904bbd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c @@ -120,8 +120,8 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev) mutex_init(&tdev->iommu.mutex); if (device_iommu_mapped(dev)) { - tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type); - if (!tdev->iommu.domain) + tdev->iommu.domain = iommu_paging_domain_alloc(dev); + if (IS_ERR(tdev->iommu.domain)) goto error; /* -- 2.50.1 From f6440fcc9c7b7aaad2e52830ab83fa71990f76ff Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 9 Oct 2024 12:11:47 +0800 Subject: [PATCH 02/16] iommu: Remove iommu_domain_alloc() The iommu_domain_alloc() interface is no longer used in the tree anymore. Remove it to avoid dead code. There is increasing demand for supporting multiple IOMMU drivers, and this is the last bus-based thing standing in the way of that. Signed-off-by: Lu Baolu Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20241009041147.28391-5-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 36 ------------------------------------ include/linux/iommu.h | 6 ------ 2 files changed, 42 deletions(-) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index e5d5b4fd322a..fe967b5fd094 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1965,42 +1965,6 @@ __iommu_group_domain_alloc(struct iommu_group *group, unsigned int type) return __iommu_domain_alloc(dev_iommu_ops(dev), dev, type); } -static int __iommu_domain_alloc_dev(struct device *dev, void *data) -{ - const struct iommu_ops **ops = data; - - if (!dev_has_iommu(dev)) - return 0; - - if (WARN_ONCE(*ops && *ops != dev_iommu_ops(dev), - "Multiple IOMMU drivers present for bus %s, which the public IOMMU API can't fully support yet. You will still need to disable one or more for this to work, sorry!\n", - dev_bus_name(dev))) - return -EBUSY; - - *ops = dev_iommu_ops(dev); - return 0; -} - -/* - * The iommu ops in bus has been retired. Do not use this interface in - * new drivers. - */ -struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus) -{ - const struct iommu_ops *ops = NULL; - int err = bus_for_each_dev(bus, NULL, &ops, __iommu_domain_alloc_dev); - struct iommu_domain *domain; - - if (err || !ops) - return NULL; - - domain = __iommu_domain_alloc(ops, NULL, IOMMU_DOMAIN_UNMANAGED); - if (IS_ERR(domain)) - return NULL; - return domain; -} -EXPORT_SYMBOL_GPL(iommu_domain_alloc); - /** * iommu_paging_domain_alloc() - Allocate a paging domain * @dev: device for which the domain is allocated diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 03f9fa833db9..d93c87f0c003 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -787,7 +787,6 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) extern int bus_iommu_probe(const struct bus_type *bus); extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap); extern bool iommu_group_has_isolated_msi(struct iommu_group *group); -extern struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus); struct iommu_domain *iommu_paging_domain_alloc(struct device *dev); extern void iommu_domain_free(struct iommu_domain *domain); extern int iommu_attach_device(struct iommu_domain *domain, @@ -1079,11 +1078,6 @@ static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap) return false; } -static inline struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus) -{ - return NULL; -} - static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev) { return ERR_PTR(-ENODEV); -- 2.50.1 From 541b967f5a9163b5c36b68599f1141114fbb26fa Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Mon, 28 Oct 2024 09:37:59 +0000 Subject: [PATCH 03/16] iommu: Refactor __iommu_domain_alloc() Following patch will introduce iommu_paging_domain_alloc_flags() API. Hence move domain init code to separate function so that it can be reused. Also move iommu_get_dma_cookie() setup iommu_setup_default_domain() as it is required in DMA API mode only. Signed-off-by: Jason Gunthorpe [Split the patch and added description - Vasant] Signed-off-by: Vasant Hegde Reviewed-by: Lu Baolu Reviewed-by: Jason Gunthorpe Reviewed-by: Kevin Tian Reviewed-by: Yi Liu Link: https://lore.kernel.org/r/20241028093810.5901-2-vasant.hegde@amd.com Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 46 +++++++++++++++++++++++-------------------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index fe967b5fd094..cd91c0eabb34 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1905,6 +1905,22 @@ void iommu_set_fault_handler(struct iommu_domain *domain, } EXPORT_SYMBOL_GPL(iommu_set_fault_handler); +static void iommu_domain_init(struct iommu_domain *domain, unsigned int type, + const struct iommu_ops *ops) +{ + domain->type = type; + domain->owner = ops; + if (!domain->ops) + domain->ops = ops->default_domain_ops; + + /* + * If not already set, assume all sizes by default; the driver + * may override this later + */ + if (!domain->pgsize_bitmap) + domain->pgsize_bitmap = ops->pgsize_bitmap; +} + static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops, struct device *dev, unsigned int type) @@ -1933,27 +1949,7 @@ static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops, if (!domain) return ERR_PTR(-ENOMEM); - domain->type = type; - domain->owner = ops; - /* - * If not already set, assume all sizes by default; the driver - * may override this later - */ - if (!domain->pgsize_bitmap) - domain->pgsize_bitmap = ops->pgsize_bitmap; - - if (!domain->ops) - domain->ops = ops->default_domain_ops; - - if (iommu_is_dma_domain(domain)) { - int rc; - - rc = iommu_get_dma_cookie(domain); - if (rc) { - iommu_domain_free(domain); - return ERR_PTR(rc); - } - } + iommu_domain_init(domain, type, ops); return domain; } @@ -2900,6 +2896,14 @@ static int iommu_setup_default_domain(struct iommu_group *group, if (group->default_domain == dom) return 0; + if (iommu_is_dma_domain(dom)) { + ret = iommu_get_dma_cookie(dom); + if (ret) { + iommu_domain_free(dom); + return ret; + } + } + /* * IOMMU_RESV_DIRECT and IOMMU_RESV_DIRECT_RELAXABLE regions must be * mapped before their device is attached, in order to guarantee -- 2.50.1 From 20858d4ebb423826b7a978d54cde195f51d87e20 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Mon, 28 Oct 2024 09:38:00 +0000 Subject: [PATCH 04/16] iommu: Introduce iommu_paging_domain_alloc_flags() Currently drivers calls iommu_paging_domain_alloc(dev) to get an UNMANAGED domain. This is not sufficient to support PASID with UNMANAGED domain as some HW like AMD requires certain page table type to support PASIDs. Also the domain_alloc_paging op only passes device as param for domain allocation. This is not sufficient for AMD driver to decide the right page table. Instead of extending ops->domain_alloc_paging() it was decided to enhance ops->domain_alloc_user() so that caller can pass various additional flags. Hence add iommu_paging_domain_alloc_flags() API which takes flags as parameter. Caller can pass additional parameter to indicate type of domain required, etc. iommu_paging_domain_alloc_flags() internally calls appropriate callback function to allocate a domain. Signed-off-by: Jason Gunthorpe [Added description - Vasant] Signed-off-by: Vasant Hegde Reviewed-by: Jason Gunthorpe Reviewed-by: Lu Baolu Reviewed-by: Yi Liu Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20241028093810.5901-3-vasant.hegde@amd.com Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 32 +++++++++++++++++++++++++++----- include/linux/iommu.h | 14 +++++++++++--- 2 files changed, 38 insertions(+), 8 deletions(-) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index cd91c0eabb34..d82aa6563d84 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1962,20 +1962,42 @@ __iommu_group_domain_alloc(struct iommu_group *group, unsigned int type) } /** - * iommu_paging_domain_alloc() - Allocate a paging domain + * iommu_paging_domain_alloc_flags() - Allocate a paging domain * @dev: device for which the domain is allocated + * @flags: Enum of iommufd_hwpt_alloc_flags * * Allocate a paging domain which will be managed by a kernel driver. Return - * allocated domain if successful, or a ERR pointer for failure. + * allocated domain if successful, or an ERR pointer for failure. */ -struct iommu_domain *iommu_paging_domain_alloc(struct device *dev) +struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, + unsigned int flags) { + const struct iommu_ops *ops; + struct iommu_domain *domain; + if (!dev_has_iommu(dev)) return ERR_PTR(-ENODEV); - return __iommu_domain_alloc(dev_iommu_ops(dev), dev, IOMMU_DOMAIN_UNMANAGED); + ops = dev_iommu_ops(dev); + + if (ops->domain_alloc_paging && !flags) + domain = ops->domain_alloc_paging(dev); + else if (ops->domain_alloc_user) + domain = ops->domain_alloc_user(dev, flags, NULL, NULL); + else if (ops->domain_alloc && !flags) + domain = ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED); + else + return ERR_PTR(-EOPNOTSUPP); + + if (IS_ERR(domain)) + return domain; + if (!domain) + return ERR_PTR(-ENOMEM); + + iommu_domain_init(domain, IOMMU_DOMAIN_UNMANAGED, ops); + return domain; } -EXPORT_SYMBOL_GPL(iommu_paging_domain_alloc); +EXPORT_SYMBOL_GPL(iommu_paging_domain_alloc_flags); void iommu_domain_free(struct iommu_domain *domain) { diff --git a/include/linux/iommu.h b/include/linux/iommu.h index d93c87f0c003..aa78d911fdda 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -511,8 +511,6 @@ static inline int __iommu_copy_struct_from_user_array( * the caller iommu_domain_alloc() returns. * @domain_alloc_user: Allocate an iommu domain corresponding to the input * parameters as defined in include/uapi/linux/iommufd.h. - * Unlike @domain_alloc, it is called only by IOMMUFD and - * must fully initialize the new domain before return. * Upon success, if the @user_data is valid and the @parent * points to a kernel-managed domain, the new domain must be * IOMMU_DOMAIN_NESTED type; otherwise, the @parent must be @@ -787,7 +785,11 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) extern int bus_iommu_probe(const struct bus_type *bus); extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap); extern bool iommu_group_has_isolated_msi(struct iommu_group *group); -struct iommu_domain *iommu_paging_domain_alloc(struct device *dev); +struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, unsigned int flags); +static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev) +{ + return iommu_paging_domain_alloc_flags(dev, 0); +} extern void iommu_domain_free(struct iommu_domain *domain); extern int iommu_attach_device(struct iommu_domain *domain, struct device *dev); @@ -1078,6 +1080,12 @@ static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap) return false; } +struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, + unsigned int flags) +{ + return ERR_PTR(-ENODEV); +} + static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev) { return ERR_PTR(-ENODEV); -- 2.50.1 From b7a0855eb95f6db8ac8e17596e76f7b94a790fe6 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Mon, 28 Oct 2024 09:38:01 +0000 Subject: [PATCH 05/16] iommu: Add new flag to explictly request PASID capable domain Introduce new flag (IOMMU_HWPT_ALLOC_PASID) to domain_alloc_users() ops. If IOMMU supports PASID it will allocate domain. Otherwise return error. In error path check for -EOPNOTSUPP and try to allocate non-PASID domain so that DMA-API mode work fine for drivers which does not support PASID as well. Also modify __iommu_group_alloc_default_domain() to call iommu_paging_domain_alloc_flags() with appropriate flag when allocating paging domain. Signed-off-by: Jason Gunthorpe Co-developed-by: Vasant Hegde Signed-off-by: Vasant Hegde Reviewed-by: Lu Baolu Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20241028093810.5901-4-vasant.hegde@amd.com Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 56 +++++++++++++++++++++++++++++------- include/uapi/linux/iommufd.h | 8 ++++++ 2 files changed, 53 insertions(+), 11 deletions(-) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index d82aa6563d84..97ad43144736 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -32,6 +32,7 @@ #include #include #include +#include #include "dma-iommu.h" #include "iommu-priv.h" @@ -99,6 +100,9 @@ static int __iommu_attach_device(struct iommu_domain *domain, struct device *dev); static int __iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group); +static struct iommu_domain *__iommu_paging_domain_alloc_flags(struct device *dev, + unsigned int type, + unsigned int flags); enum { IOMMU_SET_DOMAIN_MUST_SUCCEED = 1 << 0, @@ -1585,8 +1589,30 @@ EXPORT_SYMBOL_GPL(fsl_mc_device_group); static struct iommu_domain * __iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) { + struct device *dev = iommu_group_first_dev(group); + struct iommu_domain *dom; + if (group->default_domain && group->default_domain->type == req_type) return group->default_domain; + + /* + * When allocating the DMA API domain assume that the driver is going to + * use PASID and make sure the RID's domain is PASID compatible. + */ + if (req_type & __IOMMU_DOMAIN_PAGING) { + dom = __iommu_paging_domain_alloc_flags(dev, req_type, + dev->iommu->max_pasids ? IOMMU_HWPT_ALLOC_PASID : 0); + + /* + * If driver does not support PASID feature then + * try to allocate non-PASID domain + */ + if (PTR_ERR(dom) == -EOPNOTSUPP) + dom = __iommu_paging_domain_alloc_flags(dev, req_type, 0); + + return dom; + } + return __iommu_group_domain_alloc(group, req_type); } @@ -1961,16 +1987,9 @@ __iommu_group_domain_alloc(struct iommu_group *group, unsigned int type) return __iommu_domain_alloc(dev_iommu_ops(dev), dev, type); } -/** - * iommu_paging_domain_alloc_flags() - Allocate a paging domain - * @dev: device for which the domain is allocated - * @flags: Enum of iommufd_hwpt_alloc_flags - * - * Allocate a paging domain which will be managed by a kernel driver. Return - * allocated domain if successful, or an ERR pointer for failure. - */ -struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, - unsigned int flags) +static struct iommu_domain * +__iommu_paging_domain_alloc_flags(struct device *dev, unsigned int type, + unsigned int flags) { const struct iommu_ops *ops; struct iommu_domain *domain; @@ -1994,9 +2013,24 @@ struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, if (!domain) return ERR_PTR(-ENOMEM); - iommu_domain_init(domain, IOMMU_DOMAIN_UNMANAGED, ops); + iommu_domain_init(domain, type, ops); return domain; } + +/** + * iommu_paging_domain_alloc_flags() - Allocate a paging domain + * @dev: device for which the domain is allocated + * @flags: Bitmap of iommufd_hwpt_alloc_flags + * + * Allocate a paging domain which will be managed by a kernel driver. Return + * allocated domain if successful, or an ERR pointer for failure. + */ +struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, + unsigned int flags) +{ + return __iommu_paging_domain_alloc_flags(dev, + IOMMU_DOMAIN_UNMANAGED, flags); +} EXPORT_SYMBOL_GPL(iommu_paging_domain_alloc_flags); void iommu_domain_free(struct iommu_domain *domain) diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h index 72010f71c5e4..0c0ed28ee113 100644 --- a/include/uapi/linux/iommufd.h +++ b/include/uapi/linux/iommufd.h @@ -359,11 +359,19 @@ struct iommu_vfio_ioas { * enforced on device attachment * @IOMMU_HWPT_FAULT_ID_VALID: The fault_id field of hwpt allocation data is * valid. + * @IOMMU_HWPT_ALLOC_PASID: Requests a domain that can be used with PASID. The + * domain can be attached to any PASID on the device. + * Any domain attached to the non-PASID part of the + * device must also be flaged, otherwise attaching a + * PASID will blocked. + * If IOMMU does not support PASID it will return + * error (-EOPNOTSUPP). */ enum iommufd_hwpt_alloc_flags { IOMMU_HWPT_ALLOC_NEST_PARENT = 1 << 0, IOMMU_HWPT_ALLOC_DIRTY_TRACKING = 1 << 1, IOMMU_HWPT_FAULT_ID_VALID = 1 << 2, + IOMMU_HWPT_ALLOC_PASID = 1 << 3, }; /** -- 2.50.1 From 60c30aa6afa2397cde39f15849d808536374b330 Mon Sep 17 00:00:00 2001 From: Vasant Hegde Date: Mon, 28 Oct 2024 09:38:02 +0000 Subject: [PATCH 06/16] iommu/arm-smmu-v3: Enhance domain_alloc_user() to allocate PASID capable domain Core layer is modified to call domain_alloc_user() to allocate PASID capable domain. Enhance arm_smmu_domain_alloc_user() to allocate PASID capable domain based on the 'flags' parameter. Signed-off-by: Vasant Hegde Reviewed-by: Jason Gunthorpe Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20241028093810.5901-5-vasant.hegde@amd.com Signed-off-by: Joerg Roedel --- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 737c5b882355..8a193141f003 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -3084,7 +3084,8 @@ arm_smmu_domain_alloc_user(struct device *dev, u32 flags, const struct iommu_user_data *user_data) { struct arm_smmu_master *master = dev_iommu_priv_get(dev); - const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING; + const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING | + IOMMU_HWPT_ALLOC_PASID; struct arm_smmu_domain *smmu_domain; int ret; @@ -3093,6 +3094,9 @@ arm_smmu_domain_alloc_user(struct device *dev, u32 flags, if (parent || user_data) return ERR_PTR(-EOPNOTSUPP); + if (flags & IOMMU_HWPT_ALLOC_PASID) + return arm_smmu_domain_alloc_paging(dev); + smmu_domain = arm_smmu_domain_alloc(); if (IS_ERR(smmu_domain)) return ERR_CAST(smmu_domain); -- 2.50.1 From b0ffdb23e94fcb30185bc618edf8608a7b0c9dfc Mon Sep 17 00:00:00 2001 From: Vasant Hegde Date: Mon, 28 Oct 2024 09:38:03 +0000 Subject: [PATCH 07/16] iommu/amd: Add helper function to check GIOSUP/GTSUP amd_iommu_gt_ppr_supported() only checks for GTSUP. To support PASID with V2 page table we need GIOSUP as well. Hence add new helper function to check GIOSUP/GTSUP. Signed-off-by: Vasant Hegde Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20241028093810.5901-6-vasant.hegde@amd.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/amd_iommu.h | 7 ++++++- drivers/iommu/amd/init.c | 3 +-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index 6386fa4556d9..3b6c6332b09f 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -118,9 +118,14 @@ static inline bool check_feature2(u64 mask) return (amd_iommu_efr2 & mask); } +static inline bool amd_iommu_v2_pgtbl_supported(void) +{ + return (check_feature(FEATURE_GIOSUP) && check_feature(FEATURE_GT)); +} + static inline bool amd_iommu_gt_ppr_supported(void) { - return (check_feature(FEATURE_GT) && + return (amd_iommu_v2_pgtbl_supported() && check_feature(FEATURE_PPR) && check_feature(FEATURE_EPHSUP)); } diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 43131c3a2172..7ae478d95e2b 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -2071,8 +2071,7 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) init_iommu_perf_ctr(iommu); if (amd_iommu_pgtable == AMD_IOMMU_V2) { - if (!check_feature(FEATURE_GIOSUP) || - !check_feature(FEATURE_GT)) { + if (!amd_iommu_v2_pgtbl_supported()) { pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n"); amd_iommu_pgtable = AMD_IOMMU_V1; } -- 2.50.1 From d15f55d645a8d8235593aa888d76a694c73e391c Mon Sep 17 00:00:00 2001 From: Vasant Hegde Date: Mon, 28 Oct 2024 09:38:04 +0000 Subject: [PATCH 08/16] iommu/amd: Move V2 page table support check to early_amd_iommu_init() amd_iommu_pgtable validation has to be done before calling iommu_snp_enable(). It can be done immediately after reading IOMMU features. Hence move this check to early_amd_iommu_init(). Signed-off-by: Vasant Hegde Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20241028093810.5901-7-vasant.hegde@amd.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/init.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 7ae478d95e2b..7c883be22559 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -2070,13 +2070,6 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) init_iommu_perf_ctr(iommu); - if (amd_iommu_pgtable == AMD_IOMMU_V2) { - if (!amd_iommu_v2_pgtbl_supported()) { - pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n"); - amd_iommu_pgtable = AMD_IOMMU_V1; - } - } - if (is_rd890_iommu(iommu->dev)) { int i, j; @@ -3090,6 +3083,13 @@ static int __init early_amd_iommu_init(void) FIELD_GET(FEATURE_GATS, amd_iommu_efr) == GUEST_PGTABLE_5_LEVEL) amd_iommu_gpt_level = PAGE_MODE_5_LEVEL; + if (amd_iommu_pgtable == AMD_IOMMU_V2) { + if (!amd_iommu_v2_pgtbl_supported()) { + pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n"); + amd_iommu_pgtable = AMD_IOMMU_V1; + } + } + /* Disable any previously enabled IOMMUs */ if (!is_kdump_kernel() || amd_iommu_disabled) disable_iommus(); -- 2.50.1 From b3c989083dabab472eb766d59b1d1fb9f11495d6 Mon Sep 17 00:00:00 2001 From: Vasant Hegde Date: Mon, 28 Oct 2024 09:38:05 +0000 Subject: [PATCH 09/16] iommu/amd: Separate page table setup from domain allocation Currently protection_domain_alloc() allocates domain and also sets up page table. Page table setup is required for PAGING domain only. Domain type like SVA doesn't need page table. Hence move page table setup code to separate function. Also SVA domain allocation path does not call pdom_setup_pgtable(). Hence remove IOMMU_DOMAIN_SVA type check. Signed-off-by: Vasant Hegde Reviewed-by: Jacob Pan Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20241028093810.5901-8-vasant.hegde@amd.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/iommu.c | 42 ++++++++++++++++++++++++--------------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 8364cd6fa47d..6285fd1afd50 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2265,28 +2265,36 @@ void protection_domain_free(struct protection_domain *domain) struct protection_domain *protection_domain_alloc(unsigned int type, int nid) { - struct io_pgtable_ops *pgtbl_ops; struct protection_domain *domain; - int pgtable; domain = kzalloc(sizeof(*domain), GFP_KERNEL); if (!domain) return NULL; domain->id = domain_id_alloc(); - if (!domain->id) - goto err_free; + if (!domain->id) { + kfree(domain); + return NULL; + } spin_lock_init(&domain->lock); INIT_LIST_HEAD(&domain->dev_list); INIT_LIST_HEAD(&domain->dev_data_list); domain->iop.pgtbl.cfg.amd.nid = nid; + return domain; +} + +static int pdom_setup_pgtable(struct protection_domain *domain, + unsigned int type) +{ + struct io_pgtable_ops *pgtbl_ops; + int pgtable; + switch (type) { /* No need to allocate io pgtable ops in passthrough mode */ case IOMMU_DOMAIN_IDENTITY: - case IOMMU_DOMAIN_SVA: - return domain; + return 0; case IOMMU_DOMAIN_DMA: pgtable = amd_iommu_pgtable; break; @@ -2298,7 +2306,7 @@ struct protection_domain *protection_domain_alloc(unsigned int type, int nid) pgtable = AMD_IOMMU_V1; break; default: - goto err_id; + return -EINVAL; } switch (pgtable) { @@ -2309,20 +2317,14 @@ struct protection_domain *protection_domain_alloc(unsigned int type, int nid) domain->pd_mode = PD_MODE_V2; break; default: - goto err_id; + return -EINVAL; } - pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl.cfg, domain); if (!pgtbl_ops) - goto err_id; + return -ENOMEM; - return domain; -err_id: - domain_id_free(domain->id); -err_free: - kfree(domain); - return NULL; + return 0; } static inline u64 dma_max_address(void) @@ -2345,6 +2347,7 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type, bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; struct protection_domain *domain; struct amd_iommu *iommu = NULL; + int ret; if (dev) iommu = get_amd_iommu_from_dev(dev); @@ -2364,6 +2367,13 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type, if (!domain) return ERR_PTR(-ENOMEM); + ret = pdom_setup_pgtable(domain, type); + if (ret) { + domain_id_free(domain->id); + kfree(domain); + return ERR_PTR(ret); + } + domain->domain.geometry.aperture_start = 0; domain->domain.geometry.aperture_end = dma_max_address(); domain->domain.geometry.force_aperture = true; -- 2.50.1 From a005ef62f9922ae023aec63d673217486656b6bc Mon Sep 17 00:00:00 2001 From: Vasant Hegde Date: Mon, 28 Oct 2024 09:38:06 +0000 Subject: [PATCH 10/16] iommu/amd: Pass page table type as param to pdom_setup_pgtable() Current code forces v1 page table for UNMANAGED domain and global page table type (amd_iommu_pgtable) for rest of paging domain. Following patch series adds support for domain_alloc_paging() ops. Also enhances domain_alloc_user() to allocate page table based on 'flags. Hence pass page table type as parameter to pdomain_setup_pgtable(). So that caller can decide right page table type. Also update dma_max_address() to take pgtable as parameter. Signed-off-by: Vasant Hegde Reviewed-by: Jacob Pan Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20241028093810.5901-9-vasant.hegde@amd.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/iommu.c | 43 +++++++++++++++++---------------------- 1 file changed, 19 insertions(+), 24 deletions(-) diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 6285fd1afd50..cb6a72564c23 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2286,28 +2286,13 @@ struct protection_domain *protection_domain_alloc(unsigned int type, int nid) } static int pdom_setup_pgtable(struct protection_domain *domain, - unsigned int type) + unsigned int type, int pgtable) { struct io_pgtable_ops *pgtbl_ops; - int pgtable; - switch (type) { /* No need to allocate io pgtable ops in passthrough mode */ - case IOMMU_DOMAIN_IDENTITY: + if (!(type & __IOMMU_DOMAIN_PAGING)) return 0; - case IOMMU_DOMAIN_DMA: - pgtable = amd_iommu_pgtable; - break; - /* - * Force IOMMU v1 page table when allocating - * domain for pass-through devices. - */ - case IOMMU_DOMAIN_UNMANAGED: - pgtable = AMD_IOMMU_V1; - break; - default: - return -EINVAL; - } switch (pgtable) { case AMD_IOMMU_V1: @@ -2319,6 +2304,7 @@ static int pdom_setup_pgtable(struct protection_domain *domain, default: return -EINVAL; } + pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl.cfg, domain); if (!pgtbl_ops) @@ -2327,9 +2313,9 @@ static int pdom_setup_pgtable(struct protection_domain *domain, return 0; } -static inline u64 dma_max_address(void) +static inline u64 dma_max_address(int pgtable) { - if (amd_iommu_pgtable == AMD_IOMMU_V1) + if (pgtable == AMD_IOMMU_V1) return ~0ULL; /* V2 with 4/5 level page table */ @@ -2342,7 +2328,8 @@ static bool amd_iommu_hd_support(struct amd_iommu *iommu) } static struct iommu_domain *do_iommu_domain_alloc(unsigned int type, - struct device *dev, u32 flags) + struct device *dev, + u32 flags, int pgtable) { bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; struct protection_domain *domain; @@ -2367,7 +2354,7 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type, if (!domain) return ERR_PTR(-ENOMEM); - ret = pdom_setup_pgtable(domain, type); + ret = pdom_setup_pgtable(domain, type, pgtable); if (ret) { domain_id_free(domain->id); kfree(domain); @@ -2375,7 +2362,7 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type, } domain->domain.geometry.aperture_start = 0; - domain->domain.geometry.aperture_end = dma_max_address(); + domain->domain.geometry.aperture_end = dma_max_address(pgtable); domain->domain.geometry.force_aperture = true; domain->domain.pgsize_bitmap = domain->iop.pgtbl.cfg.pgsize_bitmap; @@ -2393,8 +2380,16 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type, static struct iommu_domain *amd_iommu_domain_alloc(unsigned int type) { struct iommu_domain *domain; + int pgtable = amd_iommu_pgtable; + + /* + * Force IOMMU v1 page table when allocating + * domain for pass-through devices. + */ + if (type == IOMMU_DOMAIN_UNMANAGED) + pgtable = AMD_IOMMU_V1; - domain = do_iommu_domain_alloc(type, NULL, 0); + domain = do_iommu_domain_alloc(type, NULL, 0, pgtable); if (IS_ERR(domain)) return NULL; @@ -2412,7 +2407,7 @@ amd_iommu_domain_alloc_user(struct device *dev, u32 flags, if ((flags & ~IOMMU_HWPT_ALLOC_DIRTY_TRACKING) || parent || user_data) return ERR_PTR(-EOPNOTSUPP); - return do_iommu_domain_alloc(type, dev, flags); + return do_iommu_domain_alloc(type, dev, flags, AMD_IOMMU_V1); } void amd_iommu_domain_free(struct iommu_domain *dom) -- 2.50.1 From ce2cd175469f6cb56e8de9bf87805206c8d19764 Mon Sep 17 00:00:00 2001 From: Vasant Hegde Date: Mon, 28 Oct 2024 09:38:07 +0000 Subject: [PATCH 11/16] iommu/amd: Enhance amd_iommu_domain_alloc_user() Previous patch enhanced core layer to check device PASID capability and pass right flags to ops->domain_alloc_user(). Enhance amd_iommu_domain_alloc_user() to allocate domain with appropriate page table based on flags parameter. - If flags is empty then allocate domain with default page table type. This will eventually replace ops->domain_alloc(). For UNMANAGED domain, core will call this interface with flags=0. So AMD driver will continue to allocate V1 page table. - If IOMMU_HWPT_ALLOC_PASID flags is passed then allocate domain with v2 page table. Signed-off-by: Vasant Hegde Reviewed-by: Kevin Tian Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20241028093810.5901-10-vasant.hegde@amd.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/iommu.c | 32 +++++++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index cb6a72564c23..671131c22d9d 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2346,9 +2346,6 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type, if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY)) return ERR_PTR(-EINVAL); - if (dirty_tracking && !amd_iommu_hd_support(iommu)) - return ERR_PTR(-EOPNOTSUPP); - domain = protection_domain_alloc(type, dev ? dev_to_node(dev) : NUMA_NO_NODE); if (!domain) @@ -2403,11 +2400,36 @@ amd_iommu_domain_alloc_user(struct device *dev, u32 flags, { unsigned int type = IOMMU_DOMAIN_UNMANAGED; + struct amd_iommu *iommu = NULL; + const u32 supported_flags = IOMMU_HWPT_ALLOC_DIRTY_TRACKING | + IOMMU_HWPT_ALLOC_PASID; + + if (dev) + iommu = get_amd_iommu_from_dev(dev); + + if ((flags & ~supported_flags) || parent || user_data) + return ERR_PTR(-EOPNOTSUPP); + + /* Allocate domain with v2 page table if IOMMU supports PASID. */ + if (flags & IOMMU_HWPT_ALLOC_PASID) { + if (!amd_iommu_pasid_supported()) + return ERR_PTR(-EOPNOTSUPP); + + return do_iommu_domain_alloc(type, dev, flags, AMD_IOMMU_V2); + } + + /* Allocate domain with v1 page table for dirty tracking */ + if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) { + if (iommu && amd_iommu_hd_support(iommu)) { + return do_iommu_domain_alloc(type, dev, + flags, AMD_IOMMU_V1); + } - if ((flags & ~IOMMU_HWPT_ALLOC_DIRTY_TRACKING) || parent || user_data) return ERR_PTR(-EOPNOTSUPP); + } - return do_iommu_domain_alloc(type, dev, flags, AMD_IOMMU_V1); + /* If nothing specific is required use the kernel commandline default */ + return do_iommu_domain_alloc(type, dev, 0, amd_iommu_pgtable); } void amd_iommu_domain_free(struct iommu_domain *dom) -- 2.50.1 From 4402f2627d30c44f56180295e14561f1820eeacc Mon Sep 17 00:00:00 2001 From: Vasant Hegde Date: Mon, 28 Oct 2024 09:38:08 +0000 Subject: [PATCH 12/16] iommu/amd: Implement global identity domain Implement global identity domain. All device groups in identity domain will share this domain. In attach device path, based on device capability it will allocate per device domain ID and GCR3 table. So that it can support SVA. Signed-off-by: Vasant Hegde Reviewed-by: Jason Gunthorpe Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20241028093810.5901-11-vasant.hegde@amd.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/amd_iommu.h | 1 + drivers/iommu/amd/init.c | 3 +++ drivers/iommu/amd/iommu.c | 36 +++++++++++++++++++++++++++++++---- 3 files changed, 36 insertions(+), 4 deletions(-) diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index 3b6c6332b09f..38509e1019e9 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -46,6 +46,7 @@ extern int amd_iommu_gpt_level; extern unsigned long amd_iommu_pgsize_bitmap; /* Protection domain ops */ +void amd_iommu_init_identity_domain(void); struct protection_domain *protection_domain_alloc(unsigned int type, int nid); void protection_domain_free(struct protection_domain *domain); struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev, diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 7c883be22559..45fdba48d54d 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -2164,6 +2164,9 @@ static int __init amd_iommu_init_pci(void) struct amd_iommu_pci_seg *pci_seg; int ret; + /* Init global identity domain before registering IOMMU */ + amd_iommu_init_identity_domain(); + for_each_iommu(iommu) { ret = iommu_init_pci(iommu); if (ret) { diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 671131c22d9d..477aaf76b7ad 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -74,6 +74,9 @@ struct kmem_cache *amd_iommu_irq_cache; static void detach_device(struct device *dev); +static int amd_iommu_attach_device(struct iommu_domain *dom, + struct device *dev); + static void set_dte_entry(struct amd_iommu *iommu, struct iommu_dev_data *dev_data); @@ -2263,6 +2266,14 @@ void protection_domain_free(struct protection_domain *domain) kfree(domain); } +static void protection_domain_init(struct protection_domain *domain, int nid) +{ + spin_lock_init(&domain->lock); + INIT_LIST_HEAD(&domain->dev_list); + INIT_LIST_HEAD(&domain->dev_data_list); + domain->iop.pgtbl.cfg.amd.nid = nid; +} + struct protection_domain *protection_domain_alloc(unsigned int type, int nid) { struct protection_domain *domain; @@ -2277,10 +2288,7 @@ struct protection_domain *protection_domain_alloc(unsigned int type, int nid) return NULL; } - spin_lock_init(&domain->lock); - INIT_LIST_HEAD(&domain->dev_list); - INIT_LIST_HEAD(&domain->dev_data_list); - domain->iop.pgtbl.cfg.amd.nid = nid; + protection_domain_init(domain, nid); return domain; } @@ -2471,6 +2479,25 @@ static struct iommu_domain blocked_domain = { } }; +static struct protection_domain identity_domain; + +static const struct iommu_domain_ops identity_domain_ops = { + .attach_dev = amd_iommu_attach_device, +}; + +void amd_iommu_init_identity_domain(void) +{ + struct iommu_domain *domain = &identity_domain.domain; + + domain->type = IOMMU_DOMAIN_IDENTITY; + domain->ops = &identity_domain_ops; + domain->owner = &amd_iommu_ops; + + identity_domain.id = domain_id_alloc(); + + protection_domain_init(&identity_domain, NUMA_NO_NODE); +} + static int amd_iommu_attach_device(struct iommu_domain *dom, struct device *dev) { @@ -2869,6 +2896,7 @@ static int amd_iommu_dev_disable_feature(struct device *dev, const struct iommu_ops amd_iommu_ops = { .capable = amd_iommu_capable, .blocked_domain = &blocked_domain, + .identity_domain = &identity_domain.domain, .domain_alloc = amd_iommu_domain_alloc, .domain_alloc_user = amd_iommu_domain_alloc_user, .domain_alloc_sva = amd_iommu_domain_alloc_sva, -- 2.50.1 From 4208849ec7a6c46d41e623c359c5619704717140 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Mon, 28 Oct 2024 09:38:09 +0000 Subject: [PATCH 13/16] iommu: Put domain allocation in __iommu_group_alloc_blocking_domain() There is no longer a reason to call __iommu_domain_alloc() to allocate the blocking domain. All drivers that support a native blocking domain provide it via the ops, for other drivers we should call iommu_paging_domain_alloc(). __iommu_group_alloc_blocking_domain() is the only place that allocates an BLOCKED domain, so move the ops->blocked_domain logic there. Signed-off-by: Jason Gunthorpe Signed-off-by: Vasant Hegde Reviewed-by: Lu Baolu Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20241028093810.5901-12-vasant.hegde@amd.com Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 97ad43144736..fc6aaded94bf 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1956,8 +1956,6 @@ static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops, if (alloc_type == IOMMU_DOMAIN_IDENTITY && ops->identity_domain) return ops->identity_domain; - else if (alloc_type == IOMMU_DOMAIN_BLOCKED && ops->blocked_domain) - return ops->blocked_domain; else if (type & __IOMMU_DOMAIN_PAGING && ops->domain_alloc_paging) domain = ops->domain_alloc_paging(dev); else if (ops->domain_alloc) @@ -3147,22 +3145,25 @@ void iommu_device_unuse_default_domain(struct device *dev) static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) { + struct device *dev = iommu_group_first_dev(group); + const struct iommu_ops *ops = dev_iommu_ops(dev); struct iommu_domain *domain; if (group->blocking_domain) return 0; - domain = __iommu_group_domain_alloc(group, IOMMU_DOMAIN_BLOCKED); - if (IS_ERR(domain)) { - /* - * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED - * create an empty domain instead. - */ - domain = __iommu_group_domain_alloc(group, - IOMMU_DOMAIN_UNMANAGED); - if (IS_ERR(domain)) - return PTR_ERR(domain); + if (ops->blocked_domain) { + group->blocking_domain = ops->blocked_domain; + return 0; } + + /* + * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED create an + * empty PAGING domain instead. + */ + domain = iommu_paging_domain_alloc(dev); + if (IS_ERR(domain)) + return PTR_ERR(domain); group->blocking_domain = domain; return 0; } -- 2.50.1 From 4490ccc45fb77f77550bdd65e663af9e1cae6bcb Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Mon, 28 Oct 2024 09:38:10 +0000 Subject: [PATCH 14/16] iommu: Create __iommu_alloc_identity_domain() Consolidate all the code to create an IDENTITY domain into one function. This removes the legacy __iommu_domain_alloc() path from all paths, and preps it for final removal. BLOCKED/IDENTITY/PAGING are now always allocated via a type specific function. [Joerg: Actually remove __iommu_domain_alloc()] Signed-off-by: Jason Gunthorpe Signed-off-by: Vasant Hegde Reviewed-by: Lu Baolu Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20241028093810.5901-13-vasant.hegde@amd.com Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 69 ++++++++++++++++++------------------------- 1 file changed, 28 insertions(+), 41 deletions(-) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index fc6aaded94bf..0d08a3256085 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -94,8 +94,6 @@ static const char * const iommu_group_resv_type_string[] = { static int iommu_bus_notifier(struct notifier_block *nb, unsigned long action, void *data); static void iommu_release_device(struct device *dev); -static struct iommu_domain * -__iommu_group_domain_alloc(struct iommu_group *group, unsigned int type); static int __iommu_attach_device(struct iommu_domain *domain, struct device *dev); static int __iommu_attach_group(struct iommu_domain *domain, @@ -137,6 +135,8 @@ static struct group_device *iommu_group_alloc_device(struct iommu_group *group, struct device *dev); static void __iommu_group_free_device(struct iommu_group *group, struct group_device *grp_dev); +static void iommu_domain_init(struct iommu_domain *domain, unsigned int type, + const struct iommu_ops *ops); #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ struct iommu_group_attribute iommu_group_attr_##_name = \ @@ -1586,6 +1586,28 @@ struct iommu_group *fsl_mc_device_group(struct device *dev) } EXPORT_SYMBOL_GPL(fsl_mc_device_group); +static struct iommu_domain *__iommu_alloc_identity_domain(struct device *dev) +{ + const struct iommu_ops *ops = dev_iommu_ops(dev); + struct iommu_domain *domain; + + if (ops->identity_domain) + return ops->identity_domain; + + /* Older drivers create the identity domain via ops->domain_alloc() */ + if (!ops->domain_alloc) + return ERR_PTR(-EOPNOTSUPP); + + domain = ops->domain_alloc(IOMMU_DOMAIN_IDENTITY); + if (IS_ERR(domain)) + return domain; + if (!domain) + return ERR_PTR(-ENOMEM); + + iommu_domain_init(domain, IOMMU_DOMAIN_IDENTITY, ops); + return domain; +} + static struct iommu_domain * __iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) { @@ -1613,7 +1635,10 @@ __iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) return dom; } - return __iommu_group_domain_alloc(group, req_type); + if (req_type == IOMMU_DOMAIN_IDENTITY) + return __iommu_alloc_identity_domain(dev); + + return ERR_PTR(-EINVAL); } /* @@ -1947,44 +1972,6 @@ static void iommu_domain_init(struct iommu_domain *domain, unsigned int type, domain->pgsize_bitmap = ops->pgsize_bitmap; } -static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops, - struct device *dev, - unsigned int type) -{ - struct iommu_domain *domain; - unsigned int alloc_type = type & IOMMU_DOMAIN_ALLOC_FLAGS; - - if (alloc_type == IOMMU_DOMAIN_IDENTITY && ops->identity_domain) - return ops->identity_domain; - else if (type & __IOMMU_DOMAIN_PAGING && ops->domain_alloc_paging) - domain = ops->domain_alloc_paging(dev); - else if (ops->domain_alloc) - domain = ops->domain_alloc(alloc_type); - else - return ERR_PTR(-EOPNOTSUPP); - - /* - * Many domain_alloc ops now return ERR_PTR, make things easier for the - * driver by accepting ERR_PTR from all domain_alloc ops instead of - * having two rules. - */ - if (IS_ERR(domain)) - return domain; - if (!domain) - return ERR_PTR(-ENOMEM); - - iommu_domain_init(domain, type, ops); - return domain; -} - -static struct iommu_domain * -__iommu_group_domain_alloc(struct iommu_group *group, unsigned int type) -{ - struct device *dev = iommu_group_first_dev(group); - - return __iommu_domain_alloc(dev_iommu_ops(dev), dev, type); -} - static struct iommu_domain * __iommu_paging_domain_alloc_flags(struct device *dev, unsigned int type, unsigned int flags) -- 2.50.1 From d14772c0d88c387f881a577aa136e1e9b1291d07 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 29 Oct 2024 11:54:25 +0100 Subject: [PATCH 15/16] iommu: Fix prototype of iommu_paging_domain_alloc_flags() The iommu_paging_domain_alloc_flags() prototype for non-iommu kernel configurations lacks the 'static inline' prefixes. Cc: Jason Gunthorpe Cc: Vasant Hegde Fixes: 20858d4ebb42 ("iommu: Introduce iommu_paging_domain_alloc_flags()") Reviewed-by: Jason Gunthorpe Signed-off-by: Joerg Roedel --- include/linux/iommu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index aa78d911fdda..522efdc7d815 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -1080,7 +1080,7 @@ static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap) return false; } -struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, +static inline struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, unsigned int flags) { return ERR_PTR(-ENODEV); -- 2.50.1 From a33bf8d8ce7e06bf0f033865b0cea5887cd2ac8c Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 29 Oct 2024 12:11:49 +0100 Subject: [PATCH 16/16] iommu: Restore iommu_flush_iotlb_all() This patch restores the iommu_flush_iotlb_all() function. Commit 69e5a17511f6 ("iommu: Remove useless flush from iommu_create_device_direct_mappings()") claims it removed the last call-site, except it did not. There is still at least one caller in drivers/gpu/drm/msm/msm_iommu.c so keep the function around until all call-sites are updated. Cc: Jason Gunthorpe Fixes: 69e5a17511f6 ("iommu: Remove useless flush from iommu_create_device_direct_mappings()") Acked-by: Will Deacon Reviewed-by: Jason Gunthorpe Signed-off-by: Joerg Roedel --- include/linux/iommu.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 522efdc7d815..8cce372a33f1 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -853,6 +853,12 @@ void iommu_set_dma_strict(void); extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags); +static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) +{ + if (domain->ops->flush_iotlb_all) + domain->ops->flush_iotlb_all(domain); +} + static inline void iommu_iotlb_sync(struct iommu_domain *domain, struct iommu_iotlb_gather *iotlb_gather) { @@ -1137,6 +1143,10 @@ static inline ssize_t iommu_map_sg(struct iommu_domain *domain, return -ENODEV; } +static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) +{ +} + static inline void iommu_iotlb_sync(struct iommu_domain *domain, struct iommu_iotlb_gather *iotlb_gather) { -- 2.50.1