swiotlb expands our card accessing range, but its path always is slower
than ttm pool allocation.
So add condition to use it.
v2: move a bit later
Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Monk Liu <monk.liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180209024410.1469-3-david1.zhou@amd.com
        struct radeon_dummy_page        dummy_page;
        bool                            shutdown;
        bool                            need_dma32;
+       bool                            need_swiotlb;
        bool                            accel_working;
        bool                            fastfb_working; /* IGP feature*/
        bool                            needs_reset, in_reset;
 
 #include <linux/slab.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_cache.h>
 #include <drm/radeon_drm.h>
 #include <linux/pm_runtime.h>
 #include <linux/vgaarb.h>
                pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
                pr_warn("radeon: No coherent DMA available\n");
        }
+       rdev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
 
        /* Registers mapping */
        /* TODO: block userspace mapping of io register */
 
 #endif
 
 #ifdef CONFIG_SWIOTLB
-       if (swiotlb_nr_tbl()) {
+       if (rdev->need_swiotlb && swiotlb_nr_tbl()) {
                return ttm_dma_populate(>t->ttm, rdev->dev, ctx);
        }
 #endif
 #endif
 
 #ifdef CONFIG_SWIOTLB
-       if (swiotlb_nr_tbl()) {
+       if (rdev->need_swiotlb && swiotlb_nr_tbl()) {
                ttm_dma_unpopulate(>t->ttm, rdev->dev);
                return;
        }
        count = ARRAY_SIZE(radeon_ttm_debugfs_list);
 
 #ifdef CONFIG_SWIOTLB
-       if (!swiotlb_nr_tbl())
+       if (!(rdev->need_swiotlb && swiotlb_nr_tbl()))
                --count;
 #endif