* If GART size is bigger than space left then we ajust GART size.
  * Thus function will never fails.
  */
-void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
+void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
+                             enum amdgpu_gart_placement gart_placement)
 {
        const uint64_t four_gb = 0x100000000ULL;
        u64 size_af, size_bf;
                mc->gart_size = max(size_bf, size_af);
        }
 
-       if ((size_bf >= mc->gart_size && size_bf < size_af) ||
-           (size_af < mc->gart_size))
-               mc->gart_start = 0;
-       else
+       switch (gart_placement) {
+       case AMDGPU_GART_PLACEMENT_HIGH:
                mc->gart_start = max_mc_address - mc->gart_size + 1;
+               break;
+       case AMDGPU_GART_PLACEMENT_LOW:
+               mc->gart_start = 0;
+               break;
+       case AMDGPU_GART_PLACEMENT_BEST_FIT:
+       default:
+               if ((size_bf >= mc->gart_size && size_bf < size_af) ||
+                   (size_af < mc->gart_size))
+                       mc->gart_start = 0;
+               else
+                       mc->gart_start = max_mc_address - mc->gart_size + 1;
+               break;
+       }
 
        mc->gart_start &= ~(four_gb - 1);
        mc->gart_end = mc->gart_start + mc->gart_size - 1;
 
 
 #define INVALID_PFN    -1
 
+enum amdgpu_gart_placement {
+       AMDGPU_GART_PLACEMENT_BEST_FIT = 0,
+       AMDGPU_GART_PLACEMENT_HIGH,
+       AMDGPU_GART_PLACEMENT_LOW,
+};
+
 struct amdgpu_gmc {
        /* FB's physical address in MMIO space (for CPU to
         * map FB). This is different compared to the agp/
 void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
                              u64 base);
 void amdgpu_gmc_gart_location(struct amdgpu_device *adev,
-                             struct amdgpu_gmc *mc);
+                             struct amdgpu_gmc *mc,
+                             enum amdgpu_gart_placement gart_placement);
 void amdgpu_gmc_agp_location(struct amdgpu_device *adev,
                             struct amdgpu_gmc *mc);
 void amdgpu_gmc_set_agp_default(struct amdgpu_device *adev,
 
        base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
 
        amdgpu_gmc_vram_location(adev, &adev->gmc, base);
-       amdgpu_gmc_gart_location(adev, mc);
+       amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
        if (!amdgpu_sriov_vf(adev))
                amdgpu_gmc_agp_location(adev, mc);
 
 
        base = adev->mmhub.funcs->get_fb_location(adev);
 
        amdgpu_gmc_vram_location(adev, &adev->gmc, base);
-       amdgpu_gmc_gart_location(adev, mc);
+       amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
        if (!amdgpu_sriov_vf(adev) ||
            (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(11, 5, 0)))
                amdgpu_gmc_agp_location(adev, mc);
 
        base <<= 24;
 
        amdgpu_gmc_vram_location(adev, mc, base);
-       amdgpu_gmc_gart_location(adev, mc);
+       amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
 }
 
 static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
 
        base <<= 24;
 
        amdgpu_gmc_vram_location(adev, mc, base);
-       amdgpu_gmc_gart_location(adev, mc);
+       amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
 }
 
 /**
 
        base <<= 24;
 
        amdgpu_gmc_vram_location(adev, mc, base);
-       amdgpu_gmc_gart_location(adev, mc);
+       amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
 }
 
 /**
 
                amdgpu_gmc_sysvm_location(adev, mc);
        } else {
                amdgpu_gmc_vram_location(adev, mc, base);
-               amdgpu_gmc_gart_location(adev, mc);
+               amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
                if (!amdgpu_sriov_vf(adev))
                        amdgpu_gmc_agp_location(adev, mc);
        }