static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
u32 streamid)
{
- const struct iommu_ops *ops;
struct fwnode_handle *iort_fwnode;
- if (!node)
+ /* If there's no SMMU driver at all, give up now */
+ if (!node || !iort_iommu_driver_enabled(node->type))
return -ENODEV;
iort_fwnode = iort_get_fwnode(node);
return -ENODEV;
/*
- * If the ops look-up fails, this means that either
- * the SMMU drivers have not been probed yet or that
- * the SMMU drivers are not built in the kernel;
- * Depending on whether the SMMU drivers are built-in
- * in the kernel or not, defer the IOMMU configuration
- * or just abort it.
+ * If the SMMU drivers are enabled but not loaded/probed
+ * yet, this will defer.
*/
- ops = iommu_ops_from_fwnode(iort_fwnode);
- if (!ops)
- return iort_iommu_driver_enabled(node->type) ?
- -EPROBE_DEFER : -ENODEV;
-
- return acpi_iommu_fwspec_init(dev, streamid, iort_fwnode, ops);
+ return acpi_iommu_fwspec_init(dev, streamid, iort_fwnode);
}
struct iort_pci_alias_info {
#ifdef CONFIG_IOMMU_API
int acpi_iommu_fwspec_init(struct device *dev, u32 id,
- struct fwnode_handle *fwnode,
- const struct iommu_ops *ops)
+ struct fwnode_handle *fwnode)
{
int ret;
- ret = iommu_fwspec_init(dev, fwnode, ops);
+ ret = iommu_fwspec_init(dev, fwnode);
if (ret)
return ret;
#else /* !CONFIG_IOMMU_API */
int acpi_iommu_fwspec_init(struct device *dev, u32 id,
- struct fwnode_handle *fwnode,
- const struct iommu_ops *ops)
+ struct fwnode_handle *fwnode)
{
return -ENODEV;
}
static int viot_dev_iommu_init(struct device *dev, struct viot_iommu *viommu,
u32 epid)
{
- const struct iommu_ops *ops;
-
- if (!viommu)
+ if (!viommu || !IS_ENABLED(CONFIG_VIRTIO_IOMMU))
return -ENODEV;
/* We're not translating ourself */
if (device_match_fwnode(dev, viommu->fwnode))
return -EINVAL;
- ops = iommu_ops_from_fwnode(viommu->fwnode);
- if (!ops)
- return IS_ENABLED(CONFIG_VIRTIO_IOMMU) ?
- -EPROBE_DEFER : -ENODEV;
-
- return acpi_iommu_fwspec_init(dev, epid, viommu->fwnode, ops);
+ return acpi_iommu_fwspec_init(dev, epid, viommu->fwnode);
}
static int viot_pci_dev_iommu_init(struct pci_dev *pdev, u16 dev_id, void *data)
it.cur_count = 1;
}
- err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
- &arm_smmu_ops);
+ err = iommu_fwspec_init(dev, NULL);
if (err)
return err;
return dev->iommu->iommu_dev->ops;
}
+const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode);
+
int iommu_group_replace_domain(struct iommu_group *group,
struct iommu_domain *new_domain);
return ops;
}
-int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
- const struct iommu_ops *ops)
+int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode)
{
+ const struct iommu_ops *ops = iommu_ops_from_fwnode(iommu_fwnode);
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ if (!ops)
+ return -EPROBE_DEFER;
+
if (fwspec)
return ops == fwspec->ops ? 0 : -EINVAL;
if (!fwspec)
return -ENOMEM;
- of_node_get(to_of_node(iommu_fwnode));
+ fwnode_handle_get(iommu_fwnode);
fwspec->iommu_fwnode = iommu_fwnode;
fwspec->ops = ops;
dev_iommu_fwspec_set(dev, fwspec);
return -EINVAL;
}
- ret = iommu_fwspec_init(dev, &args->np->fwnode, &mtk_iommu_v1_ops);
+ ret = iommu_fwspec_init(dev, of_fwnode_handle(args->np));
if (ret)
return ret;
struct of_phandle_args *iommu_spec)
{
const struct iommu_ops *ops;
- struct fwnode_handle *fwnode = &iommu_spec->np->fwnode;
int ret;
- ops = iommu_ops_from_fwnode(fwnode);
- if ((ops && !ops->of_xlate) ||
- !of_device_is_available(iommu_spec->np))
+ if (!of_device_is_available(iommu_spec->np))
return -ENODEV;
- ret = iommu_fwspec_init(dev, fwnode, ops);
+ ret = iommu_fwspec_init(dev, of_fwnode_handle(iommu_spec->np));
+ if (ret == -EPROBE_DEFER)
+ return driver_deferred_probe_check_state(dev);
if (ret)
return ret;
- /*
- * The otherwise-empty fwspec handily serves to indicate the specific
- * IOMMU device we're waiting for, which will be useful if we ever get
- * a proper probe-ordering dependency mechanism in future.
- */
- if (!ops)
- return driver_deferred_probe_check_state(dev);
- if (!try_module_get(ops->owner))
+ ops = dev_iommu_fwspec_get(dev)->ops;
+ if (!ops->of_xlate || !try_module_get(ops->owner))
return -ENODEV;
ret = ops->of_xlate(dev, iommu_spec);
const struct iommu_ops *ops = smmu->iommu.ops;
int err;
- err = iommu_fwspec_init(dev, &dev->of_node->fwnode, ops);
+ err = iommu_fwspec_init(dev, of_fwnode_handle(dev->of_node));
if (err < 0) {
dev_err(dev, "failed to initialize fwspec: %d\n", err);
return err;
bool acpi_dma_supported(const struct acpi_device *adev);
enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
int acpi_iommu_fwspec_init(struct device *dev, u32 id,
- struct fwnode_handle *fwnode,
- const struct iommu_ops *ops);
+ struct fwnode_handle *fwnode);
int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map);
int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
const u32 *input_id);
struct list_head sva_handles;
};
-int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
- const struct iommu_ops *ops);
+int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode);
void iommu_fwspec_free(struct device *dev);
int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids);
-const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode);
static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
{
}
static inline int iommu_fwspec_init(struct device *dev,
- struct fwnode_handle *iommu_fwnode,
- const struct iommu_ops *ops)
+ struct fwnode_handle *iommu_fwnode)
{
return -ENODEV;
}
return -ENODEV;
}
-static inline
-const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode)
-{
- return NULL;
-}
-
static inline int
iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
{