bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
 
+void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev);
+
 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
                                 struct amdgpu_reset_context *reset_context);
 
 
        }
 }
 
+void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
+{
+       if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
+               adev->mode_info.num_crtc = 1;
+               adev->enable_virtual_display = true;
+               DRM_INFO("virtual_display:%d, num_crtc:%d\n",
+                        adev->enable_virtual_display, adev->mode_info.num_crtc);
+       }
+}
+
 /**
  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
  *
  */
 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
 {
-       if (amdgpu_sriov_vf(adev) ||
-           adev->enable_virtual_display ||
+       if (adev->enable_virtual_display ||
            (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
                return false;
 
 
        return 0;
 }
 
+static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
+{
+       amdgpu_device_set_sriov_virtual_display(adev);
+       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
+}
+
 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
 {
-       if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) {
+       if (adev->enable_virtual_display) {
                amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
                return 0;
        }
                case IP_VERSION(3, 1, 6):
                case IP_VERSION(3, 2, 0):
                case IP_VERSION(3, 2, 1):
-                       amdgpu_device_ip_block_add(adev, &dm_ip_block);
+                       if (amdgpu_sriov_vf(adev))
+                               amdgpu_discovery_set_sriov_display(adev);
+                       else
+                               amdgpu_device_ip_block_add(adev, &dm_ip_block);
                        break;
                default:
                        dev_err(adev->dev,
                case IP_VERSION(12, 0, 0):
                case IP_VERSION(12, 0, 1):
                case IP_VERSION(12, 1, 0):
-                       amdgpu_device_ip_block_add(adev, &dm_ip_block);
+                       if (amdgpu_sriov_vf(adev))
+                               amdgpu_discovery_set_sriov_display(adev);
+                       else
+                               amdgpu_device_ip_block_add(adev, &dm_ip_block);
                        break;
                default:
                        dev_err(adev->dev,
 
 
 int vi_set_ip_blocks(struct amdgpu_device *adev)
 {
+       amdgpu_device_set_sriov_virtual_display(adev);
+
        switch (adev->asic_type) {
        case CHIP_TOPAZ:
                /* topaz has no DCE, UVD, VCE */
                amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
                amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
                amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
-               if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
+               if (adev->enable_virtual_display)
                        amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
                else if (amdgpu_device_has_dc_support(adev))
                amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
                amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
                amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
-               if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
+               if (adev->enable_virtual_display)
                        amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
                else if (amdgpu_device_has_dc_support(adev))