if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
                adev->enable_mes = true;
 
+       /* detect hw virtualization here */
+       amdgpu_detect_virtualization(adev);
+
        if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
                r = amdgpu_discovery_init(adev);
                if (r) {
 
                                      u32 *flags);
        void (*ih_control)(struct amdgpu_device *adev);
        void (*init_registers)(struct amdgpu_device *adev);
-       void (*detect_hw_virt)(struct amdgpu_device *adev);
        void (*remap_hdp_registers)(struct amdgpu_device *adev);
        void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev);
        void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev);
 
                }
        }
 }
+
+void amdgpu_detect_virtualization(struct amdgpu_device *adev)
+{
+       uint32_t reg;
+
+       switch (adev->asic_type) {
+       case CHIP_TONGA:
+       case CHIP_FIJI:
+               reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+               break;
+       case CHIP_VEGA10:
+       case CHIP_VEGA20:
+       case CHIP_NAVI10:
+       case CHIP_NAVI12:
+       case CHIP_ARCTURUS:
+               reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
+               break;
+       default: /* other chip doesn't support SRIOV */
+               reg = 0;
+               break;
+       }
+
+       if (reg & 1)
+               adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
+
+       if (reg & 0x80000000)
+               adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
+
+       if (!reg) {
+               if (is_virtual_machine())       /* passthrough mode exclus sriov mod */
+                       adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
+       }
+}
 
 #define AMDGPU_PASSTHROUGH_MODE        (1 << 3) /* thw whole GPU is pass through for VM */
 #define AMDGPU_SRIOV_CAPS_RUNTIME      (1 << 4) /* is out of full access mode */
 
+/* all asic after AI use this offset */
+#define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5
+/* tonga/fiji use this offset */
+#define mmBIF_IOV_FUNC_IDENTIFIER 0x1503
+
 struct amdgpu_mm_table {
        struct amdgpu_bo        *bo;
        uint32_t                *cpu_addr;
                                        unsigned int key,
                                        unsigned int chksum);
 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+void amdgpu_detect_virtualization(struct amdgpu_device *adev);
 #endif
 
                >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
 }
 
-static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
-{
-       if (is_virtual_machine()) /* passthrough mode */
-               adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-}
-
 static void cik_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
 {
        if (!ring || !ring->funcs->emit_wreg) {
 
 int cik_set_ip_blocks(struct amdgpu_device *adev)
 {
-       cik_detect_hw_virtualization(adev);
-
        switch (adev->asic_type) {
        case CHIP_BONAIRE:
                amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
 
        .ref_and_mask_sdma1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
 };
 
-static void nbio_v2_3_detect_hw_virt(struct amdgpu_device *adev)
-{
-       uint32_t reg;
-
-       reg = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_RCC_IOV_FUNC_IDENTIFIER);
-       if (reg & 1)
-               adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-
-       if (reg & 0x80000000)
-               adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
-
-       if (!reg) {
-               if (is_virtual_machine())       /* passthrough mode exclus sriov mod */
-                       adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-       }
-}
-
 static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
 {
        uint32_t def, data;
        .get_clockgating_state = nbio_v2_3_get_clockgating_state,
        .ih_control = nbio_v2_3_ih_control,
        .init_registers = nbio_v2_3_init_registers,
-       .detect_hw_virt = nbio_v2_3_detect_hw_virt,
        .remap_hdp_registers = nbio_v2_3_remap_hdp_registers,
 };
 
        .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK
 };
 
-static void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
-{
-       uint32_t reg;
-
-       reg = RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_IOV_FUNC_IDENTIFIER);
-       if (reg & 1)
-               adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-
-       if (reg & 0x80000000)
-               adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
-
-       if (!reg) {
-               if (is_virtual_machine())       /* passthrough mode exclus sriov mod */
-                       adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-       }
-}
-
 static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
 {
        uint32_t def, data;
        .get_clockgating_state = nbio_v6_1_get_clockgating_state,
        .ih_control = nbio_v6_1_ih_control,
        .init_registers = nbio_v6_1_init_registers,
-       .detect_hw_virt = nbio_v6_1_detect_hw_virt,
 };
 
        .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
 };
 
-static void nbio_v7_0_detect_hw_virt(struct amdgpu_device *adev)
-{
-       if (is_virtual_machine())       /* passthrough mode exclus sriov mod */
-               adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-}
-
 static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
 {
 
        .get_clockgating_state = nbio_v7_0_get_clockgating_state,
        .ih_control = nbio_v7_0_ih_control,
        .init_registers = nbio_v7_0_init_registers,
-       .detect_hw_virt = nbio_v7_0_detect_hw_virt,
        .remap_hdp_registers = nbio_v7_0_remap_hdp_registers,
 };
 
        .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK,
 };
 
-static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev)
-{
-       uint32_t reg;
-
-       reg = RREG32_SOC15(NBIO, 0, mmRCC_IOV_FUNC_IDENTIFIER);
-       if (reg & 1)
-               adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-
-       if (reg & 0x80000000)
-               adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
-
-       if (!reg) {
-               if (is_virtual_machine())       /* passthrough mode exclus sriov mod */
-                       adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-       }
-}
-
 static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
 {
 
        .get_clockgating_state = nbio_v7_4_get_clockgating_state,
        .ih_control = nbio_v7_4_ih_control,
        .init_registers = nbio_v7_4_init_registers,
-       .detect_hw_virt = nbio_v7_4_detect_hw_virt,
        .remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
        .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
        .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
 
        adev->nbio.funcs = &nbio_v2_3_funcs;
        adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
 
-       adev->nbio.funcs->detect_hw_virt(adev);
-
        if (amdgpu_sriov_vf(adev))
                adev->virt.ops = &xgpu_nv_virt_ops;
 
 
        return 0;
 }
 
-static void si_detect_hw_virtualization(struct amdgpu_device *adev)
-{
-       if (is_virtual_machine()) /* passthrough mode */
-               adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-}
-
 static void si_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
 {
        if (!ring || !ring->funcs->emit_wreg) {
 
 int si_set_ip_blocks(struct amdgpu_device *adev)
 {
-       si_detect_hw_virtualization(adev);
-
        switch (adev->asic_type) {
        case CHIP_VERDE:
        case CHIP_TAHITI:
 
                adev->df.funcs = &df_v1_7_funcs;
 
        adev->rev_id = soc15_get_rev_id(adev);
-       adev->nbio.funcs->detect_hw_virt(adev);
 
        if (amdgpu_sriov_vf(adev))
                adev->virt.ops = &xgpu_ai_virt_ops;
 
        return true;
 }
 
-static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
-{
-       uint32_t reg = 0;
-
-       if (adev->asic_type == CHIP_TONGA ||
-           adev->asic_type == CHIP_FIJI) {
-              reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
-              /* bit0: 0 means pf and 1 means vf */
-              if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
-                      adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-              /* bit31: 0 means disable IOV and 1 means enable */
-              if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
-                      adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
-       }
-
-       if (reg == 0) {
-               if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
-                       adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-       }
-}
-
 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
        {mmGRBM_STATUS},
        {mmGRBM_STATUS2},
 
 int vi_set_ip_blocks(struct amdgpu_device *adev)
 {
-       /* in early init stage, vbios code won't work */
-       vi_detect_hw_virtualization(adev);
-
        if (amdgpu_sriov_vf(adev))
                adev->virt.ops = &xgpu_vi_virt_ops;
 
 
 #define mmRCC_CONFIG_MEMSIZE_BASE_IDX                                                                  0
 #define mmRCC_CONFIG_RESERVED                                                                          0x0de4 // duplicate 
 #define mmRCC_CONFIG_RESERVED_BASE_IDX                                                                 0
+#ifndef mmRCC_IOV_FUNC_IDENTIFIER
 #define mmRCC_IOV_FUNC_IDENTIFIER                                                                      0x0de5 // duplicate 
 #define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX                                                             0
+#endif
 
 
 // addressBlock: syshub_mmreg_ind_syshubdec
 
 #define mmRCC_CONFIG_MEMSIZE_BASE_IDX                                                                  2
 #define mmRCC_CONFIG_RESERVED                                                                          0x00c4
 #define mmRCC_CONFIG_RESERVED_BASE_IDX                                                                 2
+#ifndef mmRCC_IOV_FUNC_IDENTIFIER
 #define mmRCC_IOV_FUNC_IDENTIFIER                                                                      0x00c5
 #define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX                                                             2
+#endif
 
 
 // addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1
 
 #define mmRCC_CONFIG_MEMSIZE_BASE_IDX                                                                  2
 #define mmRCC_CONFIG_RESERVED                                                                          0x00c4
 #define mmRCC_CONFIG_RESERVED_BASE_IDX                                                                 2
+#ifndef mmRCC_IOV_FUNC_IDENTIFIER
 #define mmRCC_IOV_FUNC_IDENTIFIER                                                                      0x00c5
 #define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX                                                             2
+#endif
 
 
 // addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1