#include "amdgpu_gmc.h"
 #include "amdgpu_gfx.h"
 #include "amdgpu_sdma.h"
+#include "amdgpu_nbio.h"
 #include "amdgpu_dm.h"
 #include "amdgpu_virt.h"
 #include "amdgpu_csa.h"
 typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
 typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
 
-
-/*
- * amdgpu nbio functions
- *
- */
-struct nbio_hdp_flush_reg {
-       u32 ref_and_mask_cp0;
-       u32 ref_and_mask_cp1;
-       u32 ref_and_mask_cp2;
-       u32 ref_and_mask_cp3;
-       u32 ref_and_mask_cp4;
-       u32 ref_and_mask_cp5;
-       u32 ref_and_mask_cp6;
-       u32 ref_and_mask_cp7;
-       u32 ref_and_mask_cp8;
-       u32 ref_and_mask_cp9;
-       u32 ref_and_mask_sdma0;
-       u32 ref_and_mask_sdma1;
-       u32 ref_and_mask_sdma2;
-       u32 ref_and_mask_sdma3;
-       u32 ref_and_mask_sdma4;
-       u32 ref_and_mask_sdma5;
-       u32 ref_and_mask_sdma6;
-       u32 ref_and_mask_sdma7;
-};
-
 struct amdgpu_mmio_remap {
        u32 reg_offset;
        resource_size_t bus_addr;
 };
 
-struct amdgpu_nbio_funcs {
-       const struct nbio_hdp_flush_reg *hdp_flush_reg;
-       u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
-       u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
-       u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
-       u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
-       u32 (*get_rev_id)(struct amdgpu_device *adev);
-       void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
-       void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
-       u32 (*get_memsize)(struct amdgpu_device *adev);
-       void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
-                       bool use_doorbell, int doorbell_index, int doorbell_size);
-       void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell,
-                                  int doorbell_index, int instance);
-       void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
-                                        bool enable);
-       void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
-                                                 bool enable);
-       void (*ih_doorbell_range)(struct amdgpu_device *adev,
-                                 bool use_doorbell, int doorbell_index);
-       void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
-                                                bool enable);
-       void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
-                                               bool enable);
-       void (*get_clockgating_state)(struct amdgpu_device *adev,
-                                     u32 *flags);
-       void (*ih_control)(struct amdgpu_device *adev);
-       void (*init_registers)(struct amdgpu_device *adev);
-       void (*detect_hw_virt)(struct amdgpu_device *adev);
-       void (*remap_hdp_registers)(struct amdgpu_device *adev);
-};
-
 struct amdgpu_df_funcs {
        void (*sw_init)(struct amdgpu_device *adev);
        void (*enable_broadcast_mode)(struct amdgpu_device *adev,
        u32                             cg_flags;
        u32                             pg_flags;
 
+       /* nbio */
+       struct amdgpu_nbio              nbio;
+
        /* gfx */
        struct amdgpu_gfx               gfx;
 
        /* soc15 register offset based on ip, instance and  segment */
        uint32_t                *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
 
-       const struct amdgpu_nbio_funcs  *nbio_funcs;
        const struct amdgpu_df_funcs    *df_funcs;
        const struct amdgpu_mmhub_funcs *mmhub_funcs;
 
 
        unsigned long flags, address, data;
        uint32_t ficadl_val, ficadh_val;
 
-       address = adev->nbio_funcs->get_pcie_index_offset(adev);
-       data = adev->nbio_funcs->get_pcie_data_offset(adev);
+       address = adev->nbio.funcs->get_pcie_index_offset(adev);
+       data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
        WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
 {
        unsigned long flags, address, data;
 
-       address = adev->nbio_funcs->get_pcie_index_offset(adev);
-       data = adev->nbio_funcs->get_pcie_data_offset(adev);
+       address = adev->nbio.funcs->get_pcie_index_offset(adev);
+       data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
        WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
 {
        unsigned long flags, address, data;
 
-       address = adev->nbio_funcs->get_pcie_index_offset(adev);
-       data = adev->nbio_funcs->get_pcie_data_offset(adev);
+       address = adev->nbio.funcs->get_pcie_index_offset(adev);
+       data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
        WREG32(address, lo_addr);
 {
        unsigned long flags, address, data;
 
-       address = adev->nbio_funcs->get_pcie_index_offset(adev);
-       data = adev->nbio_funcs->get_pcie_data_offset(adev);
+       address = adev->nbio.funcs->get_pcie_index_offset(adev);
+       data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
        WREG32(address, lo_addr);
 
        }
 
        if (amdgpu_emu_mode == 1)
-               adev->nbio_funcs->hdp_flush(adev, NULL);
+               adev->nbio.funcs->hdp_flush(adev, NULL);
 
        tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
        tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
        }
 
        if (amdgpu_emu_mode == 1)
-               adev->nbio_funcs->hdp_flush(adev, NULL);
+               adev->nbio.funcs->hdp_flush(adev, NULL);
 
        tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
        tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
        }
 
        if (amdgpu_emu_mode == 1)
-               adev->nbio_funcs->hdp_flush(adev, NULL);
+               adev->nbio.funcs->hdp_flush(adev, NULL);
 
        tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
        tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
        }
 
        if (amdgpu_emu_mode == 1)
-               adev->nbio_funcs->hdp_flush(adev, NULL);
+               adev->nbio.funcs->hdp_flush(adev, NULL);
 
        tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
        tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
 {
        struct amdgpu_device *adev = ring->adev;
        u32 ref_and_mask, reg_mem_engine;
-       const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+       const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
 
        if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
                switch (ring->me) {
        }
 
        gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
-                              adev->nbio_funcs->get_hdp_flush_req_offset(adev),
-                              adev->nbio_funcs->get_hdp_flush_done_offset(adev),
+                              adev->nbio.funcs->get_hdp_flush_req_offset(adev),
+                              adev->nbio.funcs->get_hdp_flush_done_offset(adev),
                               ref_and_mask, ref_and_mask, 0x20);
 }
 
 
 {
        struct amdgpu_device *adev = ring->adev;
        u32 ref_and_mask, reg_mem_engine;
-       const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+       const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
 
        if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
                switch (ring->me) {
        }
 
        gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
-                             adev->nbio_funcs->get_hdp_flush_req_offset(adev),
-                             adev->nbio_funcs->get_hdp_flush_done_offset(adev),
+                             adev->nbio.funcs->get_hdp_flush_req_offset(adev),
+                             adev->nbio.funcs->get_hdp_flush_done_offset(adev),
                              ref_and_mask, ref_and_mask, 0x20);
 }
 
 
        int r;
 
        /* flush hdp cache */
-       adev->nbio_funcs->hdp_flush(adev, NULL);
+       adev->nbio.funcs->hdp_flush(adev, NULL);
 
        mutex_lock(&adev->mman.gtt_window_lock);
 
 
        /* size in MB on si */
        adev->gmc.mc_vram_size =
-               adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
+               adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
        adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
        adev->gmc.visible_vram_size = adev->gmc.aper_size;
 
        WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
 
        /* Flush HDP after it is initialized */
-       adev->nbio_funcs->hdp_flush(adev, NULL);
+       adev->nbio.funcs->hdp_flush(adev, NULL);
 
        value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
                false : true;
 
 
        /* size in MB on si */
        adev->gmc.mc_vram_size =
-               adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
+               adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
        adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
 
        if (!(adev->flags & AMD_IS_APU)) {
        WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
 
        /* After HDP is initialized, flush HDP.*/
-       adev->nbio_funcs->hdp_flush(adev, NULL);
+       adev->nbio.funcs->hdp_flush(adev, NULL);
 
        if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
                value = false;
 
        /* disable irqs */
        navi10_ih_disable_interrupts(adev);
 
-       adev->nbio_funcs->ih_control(adev);
+       adev->nbio.funcs->ih_control(adev);
 
        /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
        WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
        }
        WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
 
-       adev->nbio_funcs->ih_doorbell_range(adev, ih->use_doorbell,
+       adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell,
                                            ih->doorbell_index);
 
        tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
 
 }
 
 const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
-       .hdp_flush_reg = &nbio_v2_3_hdp_flush_reg,
        .get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset,
        .get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset,
        .get_pcie_index_offset = nbio_v2_3_get_pcie_index_offset,
 
 
 #include "soc15_common.h"
 
+extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg;
 extern const struct amdgpu_nbio_funcs nbio_v2_3_funcs;
 
 #endif
 
        return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
 }
 
-static const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
+const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
        .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
        .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
        .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
 }
 
 const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
-       .hdp_flush_reg = &nbio_v6_1_hdp_flush_reg,
        .get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset,
        .get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset,
        .get_pcie_index_offset = nbio_v6_1_get_pcie_index_offset,
 
 
 #include "soc15_common.h"
 
+extern const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
 extern const struct amdgpu_nbio_funcs nbio_v6_1_funcs;
 
 #endif
 
 }
 
 const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
-       .hdp_flush_reg = &nbio_v7_0_hdp_flush_reg,
        .get_hdp_flush_req_offset = nbio_v7_0_get_hdp_flush_req_offset,
        .get_hdp_flush_done_offset = nbio_v7_0_get_hdp_flush_done_offset,
        .get_pcie_index_offset = nbio_v7_0_get_pcie_index_offset,
 
 
 #include "soc15_common.h"
 
+extern const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
 extern const struct amdgpu_nbio_funcs nbio_v7_0_funcs;
 
 #endif
 
        return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
 }
 
-static const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
+const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
        .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
        .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
        .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
 }
 
 const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
-       .hdp_flush_reg = &nbio_v7_4_hdp_flush_reg,
        .get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
        .get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
        .get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset,
 
 
 #include "soc15_common.h"
 
+extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg;
 extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs;
 
 #endif
 
 #include "gmc_v10_0.h"
 #include "gfxhub_v2_0.h"
 #include "mmhub_v2_0.h"
+#include "nbio_v2_3.h"
 #include "nv.h"
 #include "navi10_ih.h"
 #include "gfx_v10_0.h"
 {
        unsigned long flags, address, data;
        u32 r;
-       address = adev->nbio_funcs->get_pcie_index_offset(adev);
-       data = adev->nbio_funcs->get_pcie_data_offset(adev);
+       address = adev->nbio.funcs->get_pcie_index_offset(adev);
+       data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
        WREG32(address, reg);
 {
        unsigned long flags, address, data;
 
-       address = adev->nbio_funcs->get_pcie_index_offset(adev);
-       data = adev->nbio_funcs->get_pcie_data_offset(adev);
+       address = adev->nbio.funcs->get_pcie_index_offset(adev);
+       data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
        WREG32(address, reg);
 
 static u32 nv_get_config_memsize(struct amdgpu_device *adev)
 {
-       return adev->nbio_funcs->get_memsize(adev);
+       return adev->nbio.funcs->get_memsize(adev);
 }
 
 static u32 nv_get_xclk(struct amdgpu_device *adev)
 
        /* wait for asic to come out of reset */
        for (i = 0; i < adev->usec_timeout; i++) {
-               u32 memsize = adev->nbio_funcs->get_memsize(adev);
+               u32 memsize = adev->nbio.funcs->get_memsize(adev);
 
                if (memsize != 0xffffffff)
                        break;
 static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
                                        bool enable)
 {
-       adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
-       adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
+       adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
+       adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
 }
 
 static const struct amdgpu_ip_block_version nv_common_ip_block =
        if (r)
                return r;
 
-       adev->nbio_funcs = &nbio_v2_3_funcs;
+       adev->nbio.funcs = &nbio_v2_3_funcs;
+       adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
 
-       adev->nbio_funcs->detect_hw_virt(adev);
+       adev->nbio.funcs->detect_hw_virt(adev);
 
        switch (adev->asic_type) {
        case CHIP_NAVI10:
 
 static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
 {
-       return adev->nbio_funcs->get_rev_id(adev);
+       return adev->nbio.funcs->get_rev_id(adev);
 }
 
 static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
 {
-       adev->nbio_funcs->hdp_flush(adev, ring);
+       adev->nbio.funcs->hdp_flush(adev, ring);
 }
 
 static void nv_invalidate_hdp(struct amdgpu_device *adev,
        /* enable aspm */
        nv_program_aspm(adev);
        /* setup nbio registers */
-       adev->nbio_funcs->init_registers(adev);
+       adev->nbio.funcs->init_registers(adev);
        /* enable the doorbell aperture */
        nv_enable_doorbell_aperture(adev, true);
 
        case CHIP_NAVI10:
        case CHIP_NAVI14:
        case CHIP_NAVI12:
-               adev->nbio_funcs->update_medium_grain_clock_gating(adev,
+               adev->nbio.funcs->update_medium_grain_clock_gating(adev,
                                state == AMD_CG_STATE_GATE ? true : false);
-               adev->nbio_funcs->update_medium_grain_light_sleep(adev,
+               adev->nbio.funcs->update_medium_grain_light_sleep(adev,
                                state == AMD_CG_STATE_GATE ? true : false);
                nv_update_hdp_mem_power_gating(adev,
                                   state == AMD_CG_STATE_GATE ? true : false);
        if (amdgpu_sriov_vf(adev))
                *flags = 0;
 
-       adev->nbio_funcs->get_clockgating_state(adev, flags);
+       adev->nbio.funcs->get_clockgating_state(adev, flags);
 
        /* AMD_CG_SUPPORT_HDP_MGCG */
        tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
 
 {
        struct amdgpu_device *adev = ring->adev;
        u32 ref_and_mask = 0;
-       const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+       const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
 
        ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
 
        sdma_v4_0_wait_reg_mem(ring, 0, 1,
-                              adev->nbio_funcs->get_hdp_flush_done_offset(adev),
-                              adev->nbio_funcs->get_hdp_flush_req_offset(adev),
+                              adev->nbio.funcs->get_hdp_flush_done_offset(adev),
+                              adev->nbio.funcs->get_hdp_flush_req_offset(adev),
                               ref_and_mask, ref_and_mask, 10);
 }
 
 
 {
        struct amdgpu_device *adev = ring->adev;
        u32 ref_and_mask = 0;
-       const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+       const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
 
        if (ring->me == 0)
                ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
                          SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
                          SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
-       amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2);
-       amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2);
+       amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
+       amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
        amdgpu_ring_write(ring, ref_and_mask); /* reference */
        amdgpu_ring_write(ring, ref_and_mask); /* mask */
        amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
                WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
                WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
 
-               adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+               adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
                                                      ring->doorbell_index, 20);
 
                if (amdgpu_sriov_vf(adev))
 
 #include "mmhub_v1_0.h"
 #include "df_v1_7.h"
 #include "df_v3_6.h"
+#include "nbio_v6_1.h"
+#include "nbio_v7_0.h"
+#include "nbio_v7_4.h"
 #include "vega10_ih.h"
 #include "sdma_v4_0.h"
 #include "uvd_v7_0.h"
 {
        unsigned long flags, address, data;
        u32 r;
-       address = adev->nbio_funcs->get_pcie_index_offset(adev);
-       data = adev->nbio_funcs->get_pcie_data_offset(adev);
+       address = adev->nbio.funcs->get_pcie_index_offset(adev);
+       data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
        WREG32(address, reg);
 {
        unsigned long flags, address, data;
 
-       address = adev->nbio_funcs->get_pcie_index_offset(adev);
-       data = adev->nbio_funcs->get_pcie_data_offset(adev);
+       address = adev->nbio.funcs->get_pcie_index_offset(adev);
+       data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
        WREG32(address, reg);
 {
        unsigned long flags, address, data;
        u64 r;
-       address = adev->nbio_funcs->get_pcie_index_offset(adev);
-       data = adev->nbio_funcs->get_pcie_data_offset(adev);
+       address = adev->nbio.funcs->get_pcie_index_offset(adev);
+       data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
        /* read low 32 bit */
 {
        unsigned long flags, address, data;
 
-       address = adev->nbio_funcs->get_pcie_index_offset(adev);
-       data = adev->nbio_funcs->get_pcie_data_offset(adev);
+       address = adev->nbio.funcs->get_pcie_index_offset(adev);
+       data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
        /* write low 32 bit */
 
 static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
 {
-       return adev->nbio_funcs->get_memsize(adev);
+       return adev->nbio.funcs->get_memsize(adev);
 }
 
 static u32 soc15_get_xclk(struct amdgpu_device *adev)
 
        /* wait for asic to come out of reset */
        for (i = 0; i < adev->usec_timeout; i++) {
-               u32 memsize = adev->nbio_funcs->get_memsize(adev);
+               u32 memsize = adev->nbio.funcs->get_memsize(adev);
 
                if (memsize != 0xffffffff)
                        break;
 static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
                                           bool enable)
 {
-       adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
-       adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
+       adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
+       adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
 }
 
 static const struct amdgpu_ip_block_version vega10_common_ip_block =
 
 static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
 {
-       return adev->nbio_funcs->get_rev_id(adev);
+       return adev->nbio.funcs->get_rev_id(adev);
 }
 
 int soc15_set_ip_blocks(struct amdgpu_device *adev)
        if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
                adev->gmc.xgmi.supported = true;
 
-       if (adev->flags & AMD_IS_APU)
-               adev->nbio_funcs = &nbio_v7_0_funcs;
-       else if (adev->asic_type == CHIP_VEGA20 ||
-               adev->asic_type == CHIP_ARCTURUS)
-               adev->nbio_funcs = &nbio_v7_4_funcs;
-       else
-               adev->nbio_funcs = &nbio_v6_1_funcs;
+       if (adev->flags & AMD_IS_APU) {
+               adev->nbio.funcs = &nbio_v7_0_funcs;
+               adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
+       } else if (adev->asic_type == CHIP_VEGA20 ||
+                  adev->asic_type == CHIP_ARCTURUS) {
+               adev->nbio.funcs = &nbio_v7_4_funcs;
+               adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
+       } else {
+               adev->nbio.funcs = &nbio_v6_1_funcs;
+               adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
+       }
 
        if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
                adev->df_funcs = &df_v3_6_funcs;
                adev->df_funcs = &df_v1_7_funcs;
 
        adev->rev_id = soc15_get_rev_id(adev);
-       adev->nbio_funcs->detect_hw_virt(adev);
+       adev->nbio.funcs->detect_hw_virt(adev);
 
        if (amdgpu_sriov_vf(adev))
                adev->virt.ops = &xgpu_ai_virt_ops;
 
 static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
 {
-       adev->nbio_funcs->hdp_flush(adev, ring);
+       adev->nbio.funcs->hdp_flush(adev, ring);
 }
 
 static void soc15_invalidate_hdp(struct amdgpu_device *adev,
        if (!amdgpu_sriov_vf(adev)) {
                for (i = 0; i < adev->sdma.num_instances; i++) {
                        ring = &adev->sdma.instance[i].ring;
-                       adev->nbio_funcs->sdma_doorbell_range(adev, i,
+                       adev->nbio.funcs->sdma_doorbell_range(adev, i,
                                ring->use_doorbell, ring->doorbell_index,
                                adev->doorbell_index.sdma_doorbell_range);
                }
 
-               adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+               adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
                                                adev->irq.ih.doorbell_index);
        }
 }
        /* enable aspm */
        soc15_program_aspm(adev);
        /* setup nbio registers */
-       adev->nbio_funcs->init_registers(adev);
+       adev->nbio.funcs->init_registers(adev);
        /* remap HDP registers to a hole in mmio space,
         * for the purpose of expose those registers
         * to process space
         */
-       if (adev->nbio_funcs->remap_hdp_registers)
-               adev->nbio_funcs->remap_hdp_registers(adev);
+       if (adev->nbio.funcs->remap_hdp_registers)
+               adev->nbio.funcs->remap_hdp_registers(adev);
 
        /* enable the doorbell aperture */
        soc15_enable_doorbell_aperture(adev, true);
        case CHIP_VEGA10:
        case CHIP_VEGA12:
        case CHIP_VEGA20:
-               adev->nbio_funcs->update_medium_grain_clock_gating(adev,
+               adev->nbio.funcs->update_medium_grain_clock_gating(adev,
                                state == AMD_CG_STATE_GATE ? true : false);
-               adev->nbio_funcs->update_medium_grain_light_sleep(adev,
+               adev->nbio.funcs->update_medium_grain_light_sleep(adev,
                                state == AMD_CG_STATE_GATE ? true : false);
                soc15_update_hdp_light_sleep(adev,
                                state == AMD_CG_STATE_GATE ? true : false);
                break;
        case CHIP_RAVEN:
        case CHIP_RENOIR:
-               adev->nbio_funcs->update_medium_grain_clock_gating(adev,
+               adev->nbio.funcs->update_medium_grain_clock_gating(adev,
                                state == AMD_CG_STATE_GATE ? true : false);
-               adev->nbio_funcs->update_medium_grain_light_sleep(adev,
+               adev->nbio.funcs->update_medium_grain_light_sleep(adev,
                                state == AMD_CG_STATE_GATE ? true : false);
                soc15_update_hdp_light_sleep(adev,
                                state == AMD_CG_STATE_GATE ? true : false);
        if (amdgpu_sriov_vf(adev))
                *flags = 0;
 
-       adev->nbio_funcs->get_clockgating_state(adev, flags);
+       adev->nbio.funcs->get_clockgating_state(adev, flags);
 
        /* AMD_CG_SUPPORT_HDP_LS */
        data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
 
        struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
        int i, r;
 
-       adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+       adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
                                             ring->doorbell_index, 0);
 
        ring->sched.ready = true;
 
                        continue;
                ring = &adev->vcn.inst[j].ring_dec;
 
-               adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+               adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
                                                     ring->doorbell_index, j);
 
                r = amdgpu_ring_test_ring(ring);
 
        /* disable irqs */
        vega10_ih_disable_interrupts(adev);
 
-       adev->nbio_funcs->ih_control(adev);
+       adev->nbio.funcs->ih_control(adev);
 
        ih = &adev->irq.ih;
        /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
 
                return ret;
 
        /* flush hdp cache */
-       adev->nbio_funcs->hdp_flush(adev, NULL);
+       adev->nbio.funcs->hdp_flush(adev, NULL);
 
        if (!drv2smu)
                memcpy(table_data, table->cpu_addr, table->size);
 
                        priv->smu_tables.entry[table_id].table_id);
 
        /* flush hdp cache */
-       adev->nbio_funcs->hdp_flush(adev, NULL);
+       adev->nbio.funcs->hdp_flush(adev, NULL);
 
        memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
                        priv->smu_tables.entry[table_id].size);
 
                        priv->smu_tables.entry[table_id].table_id);
 
        /* flush hdp cache */
-       adev->nbio_funcs->hdp_flush(adev, NULL);
+       adev->nbio.funcs->hdp_flush(adev, NULL);
 
        memcpy(table, priv->smu_tables.entry[table_id].table,
                        priv->smu_tables.entry[table_id].size);
 
                        return -EINVAL);
 
        /* flush hdp cache */
-       adev->nbio_funcs->hdp_flush(adev, NULL);
+       adev->nbio.funcs->hdp_flush(adev, NULL);
 
        memcpy(table, priv->smu_tables.entry[table_id].table,
                        priv->smu_tables.entry[table_id].size);
 
                        return ret);
 
        /* flush hdp cache */
-       adev->nbio_funcs->hdp_flush(adev, NULL);
+       adev->nbio.funcs->hdp_flush(adev, NULL);
 
        memcpy(table, priv->smu_tables.entry[table_id].table,
                        priv->smu_tables.entry[table_id].size);
                        return ret);
 
        /* flush hdp cache */
-       adev->nbio_funcs->hdp_flush(adev, NULL);
+       adev->nbio.funcs->hdp_flush(adev, NULL);
 
        memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
                        priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);