]> www.infradead.org Git - linux.git/commitdiff
drm/amdgpu: Take IOMMU remapping into account for p2p checks
authorRahul Jain <Rahul.Jain@amd.com>
Tue, 13 Aug 2024 08:11:11 +0000 (13:41 +0530)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 23 Aug 2024 14:53:05 +0000 (10:53 -0400)
when trying to enable p2p the amdgpu_device_is_peer_accessible()
checks the condition where address_mask overlaps the aper_base
and hence returns 0, due to which the p2p disables for this platform

IOMMU should remap the BAR addresses so the device can access
them. Hence check if peer_adev is remapping DMA

v5: (Felix, Alex)
- fixing comment as per Alex feedback
- refactor code as per Felix

v4: (Alex)
- fix the comment and description

v3:
- remove iommu_remap variable

v2: (Alex)
- Fix as per review comments
- add new function amdgpu_device_check_iommu_remap to check if iommu
  remap

Signed-off-by: Rahul Jain <Rahul.Jain@amd.com>
Reviewed-by: Felix Kuehling <felix.kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

index ad97f03f135853ebafca16ae63d17d9aafa9402a..da06705f00264350b300f7b239f3c75b130b7e24 100644 (file)
@@ -3957,6 +3957,27 @@ static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
                adev->ram_is_direct_mapped = true;
 }
 
+#if defined(CONFIG_HSA_AMD_P2P)
+/**
+ * amdgpu_device_check_iommu_remap - Check if DMA remapping is enabled.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * return if IOMMU remapping bar address
+ */
+static bool amdgpu_device_check_iommu_remap(struct amdgpu_device *adev)
+{
+       struct iommu_domain *domain;
+
+       domain = iommu_get_domain_for_dev(adev->dev);
+       if (domain && (domain->type == IOMMU_DOMAIN_DMA ||
+               domain->type == IOMMU_DOMAIN_DMA_FQ))
+               return true;
+
+       return false;
+}
+#endif
+
 static const struct attribute *amdgpu_dev_attributes[] = {
        &dev_attr_pcie_replay_count.attr,
        NULL
@@ -6151,18 +6172,24 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
                                      struct amdgpu_device *peer_adev)
 {
 #ifdef CONFIG_HSA_AMD_P2P
-       uint64_t address_mask = peer_adev->dev->dma_mask ?
-               ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
-       resource_size_t aper_limit =
-               adev->gmc.aper_base + adev->gmc.aper_size - 1;
        bool p2p_access =
                !adev->gmc.xgmi.connected_to_cpu &&
                !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
 
-       return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
-               adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
-               !(adev->gmc.aper_base & address_mask ||
-                 aper_limit & address_mask));
+       bool is_large_bar = adev->gmc.visible_vram_size &&
+               adev->gmc.real_vram_size == adev->gmc.visible_vram_size;
+       bool p2p_addressable = amdgpu_device_check_iommu_remap(peer_adev);
+
+       if (!p2p_addressable) {
+               uint64_t address_mask = peer_adev->dev->dma_mask ?
+                       ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
+               resource_size_t aper_limit =
+                       adev->gmc.aper_base + adev->gmc.aper_size - 1;
+
+               p2p_addressable = !(adev->gmc.aper_base & address_mask ||
+                                    aper_limit & address_mask);
+       }
+       return is_large_bar && p2p_access && p2p_addressable;
 #else
        return false;
 #endif