static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
        u32 ref_and_mask, reg_mem_engine;
-       struct nbio_hdp_flush_reg *nbio_hf_reg;
+       const struct nbio_hdp_flush_reg *nbio_hf_reg;
 
        if (ring->adev->flags & AMD_IS_APU)
                nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
 
                *flags |= AMD_CG_SUPPORT_BIF_LS;
 }
 
-struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
+const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
+       .hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ),
+       .hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE),
+       .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
+       .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
+       .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
+       .ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK,
+       .ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK,
+       .ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK,
+       .ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK,
+       .ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK,
+       .ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK,
+       .ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK,
+       .ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK,
+       .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK
+};
+
 const struct nbio_pcie_index_data nbio_v6_1_pcie_index_data = {
        .index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX),
        .data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA),
 };
 
-int nbio_v6_1_init(struct amdgpu_device *adev)
-{
-       nbio_v6_1_hdp_flush_reg.hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
-       nbio_v6_1_hdp_flush_reg.hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
-       nbio_v6_1_hdp_flush_reg.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK;
-       nbio_v6_1_hdp_flush_reg.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK;
-       nbio_v6_1_hdp_flush_reg.ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK;
-       nbio_v6_1_hdp_flush_reg.ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK;
-       nbio_v6_1_hdp_flush_reg.ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK;
-       nbio_v6_1_hdp_flush_reg.ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK;
-       nbio_v6_1_hdp_flush_reg.ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK;
-       nbio_v6_1_hdp_flush_reg.ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK;
-       nbio_v6_1_hdp_flush_reg.ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK;
-       nbio_v6_1_hdp_flush_reg.ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK;
-       nbio_v6_1_hdp_flush_reg.ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK;
-       nbio_v6_1_hdp_flush_reg.ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK;
-
-       return 0;
-}
-
 void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
 {
        uint32_t reg;
 
 
 #include "soc15_common.h"
 
-extern struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
+extern const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
 extern const struct nbio_pcie_index_data nbio_v6_1_pcie_index_data;
 int nbio_v6_1_init(struct amdgpu_device *adev);
 u32 nbio_v6_1_get_atombios_scratch_regs(struct amdgpu_device *adev,
 
        WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
 }
 
-struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
+const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
+       .hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ),
+       .hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE),
+       .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
+       .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
+       .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
+       .ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK,
+       .ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK,
+       .ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK,
+       .ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK,
+       .ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK,
+       .ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK,
+       .ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK,
+       .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK,
+       .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
+};
+
 const struct nbio_pcie_index_data nbio_v7_0_pcie_index_data = {
        .index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2),
        .data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2)
 };
-
-int nbio_v7_0_init(struct amdgpu_device *adev)
-{
-       nbio_v7_0_hdp_flush_reg.hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ);
-       nbio_v7_0_hdp_flush_reg.hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE);
-       nbio_v7_0_hdp_flush_reg.ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK;
-       nbio_v7_0_hdp_flush_reg.ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK;
-       nbio_v7_0_hdp_flush_reg.ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK;
-       nbio_v7_0_hdp_flush_reg.ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK;
-       nbio_v7_0_hdp_flush_reg.ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK;
-       nbio_v7_0_hdp_flush_reg.ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK;
-       nbio_v7_0_hdp_flush_reg.ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK;
-       nbio_v7_0_hdp_flush_reg.ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK;
-       nbio_v7_0_hdp_flush_reg.ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK;
-       nbio_v7_0_hdp_flush_reg.ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK;
-       nbio_v7_0_hdp_flush_reg.ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
-       nbio_v7_0_hdp_flush_reg.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
-
-       return 0;
-}
 
 
 #include "soc15_common.h"
 
-extern struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
+extern const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
 extern const struct nbio_pcie_index_data nbio_v7_0_pcie_index_data;
 int nbio_v7_0_init(struct amdgpu_device *adev);
 u32 nbio_v7_0_get_atombios_scratch_regs(struct amdgpu_device *adev,
 
 static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
        u32 ref_and_mask = 0;
-       struct nbio_hdp_flush_reg *nbio_hf_reg;
+       const struct nbio_hdp_flush_reg *nbio_hf_reg;
 
        if (ring->adev->flags & AMD_IS_APU)
                nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
 
                (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
                psp_enabled = true;
 
-       /*
-        * nbio need be used for both sdma and gfx9, but only
-        * initializes once
-        */
-       switch(adev->asic_type) {
-       case CHIP_VEGA10:
-               nbio_v6_1_init(adev);
-               break;
-       case CHIP_RAVEN:
-               nbio_v7_0_init(adev);
-               break;
-       default:
-               return -EINVAL;
-       }
-
        adev->rev_id = soc15_get_rev_id(adev);
        adev->external_rev_id = 0xFF;
        switch (adev->asic_type) {