return 0;
 }
 
-static void kgd_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
-               uint64_t page_table_base)
-{
-       struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
-       if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
-               pr_err("trying to set page table base for wrong VMID %u\n",
-                      vmid);
-               return;
-       }
-
-       mmhub_v9_4_setup_vm_pt_regs(adev, vmid, page_table_base);
-
-       gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
-}
-
 const struct kfd2kgd_calls arcturus_kfd2kgd = {
        .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
        .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
        .wave_control_execute = kgd_gfx_v9_wave_control_execute,
        .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
        .get_atc_vmid_pasid_mapping_info =
-                       kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
-       .set_vm_context_page_table_base = kgd_set_vm_context_page_table_base,
+                               kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
+       .set_vm_context_page_table_base =
+                               kgd_gfx_v9_set_vm_context_page_table_base,
        .get_hive_id = amdgpu_amdkfd_get_hive_id,
 };
 
        return 0;
 }
 
-static void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
+void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
                        uint32_t vmid, uint64_t page_table_base)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
                return;
        }
 
-       mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
+       adev->mmhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
 
        gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
 }
 
 
 bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
                                        uint8_t vmid, uint16_t *p_pasid);
+
+void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
+                       uint32_t vmid, uint64_t page_table_base);
 
        void (*query_ras_error_count)(struct amdgpu_device *adev,
                                        void *ras_error_status);
        void (*reset_ras_error_count)(struct amdgpu_device *adev);
+       u64 (*get_fb_location)(struct amdgpu_device *adev);
+       void (*init)(struct amdgpu_device *adev);
+       int (*gart_enable)(struct amdgpu_device *adev);
+       void (*set_fault_enable_default)(struct amdgpu_device *adev,
+                       bool value);
+       void (*gart_disable)(struct amdgpu_device *adev);
+       int (*set_clockgating)(struct amdgpu_device *adev,
+                              enum amd_clockgating_state state);
+       void (*get_clockgating)(struct amdgpu_device *adev, u32 *flags);
+       void (*setup_vm_pt_regs)(struct amdgpu_device *adev, uint32_t vmid,
+                               uint64_t page_table_base);
+       void (*update_power_gating)(struct amdgpu_device *adev,
+                                bool enable);
 };
 
 struct amdgpu_mmhub {
 
        }
 }
 
+
+static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
+{
+       adev->mmhub.funcs = &mmhub_v2_0_funcs;
+}
+
 static int gmc_v10_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       gmc_v10_0_set_mmhub_funcs(adev);
        gmc_v10_0_set_gmc_funcs(adev);
        gmc_v10_0_set_irq_funcs(adev);
        gmc_v10_0_set_umc_funcs(adev);
        else
                gfxhub_v2_0_init(adev);
 
-       mmhub_v2_0_init(adev);
+       adev->mmhub.funcs->init(adev);
 
        spin_lock_init(&adev->gmc.invalidate_lock);
 
        if (r)
                return r;
 
-       r = mmhub_v2_0_gart_enable(adev);
+       r = adev->mmhub.funcs->gart_enable(adev);
        if (r)
                return r;
 
                gfxhub_v2_1_set_fault_enable_default(adev, value);
        else
                gfxhub_v2_0_set_fault_enable_default(adev, value);
-       mmhub_v2_0_set_fault_enable_default(adev, value);
+       adev->mmhub.funcs->set_fault_enable_default(adev, value);
        gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
        gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
 
                gfxhub_v2_1_gart_disable(adev);
        else
                gfxhub_v2_0_gart_disable(adev);
-       mmhub_v2_0_gart_disable(adev);
+       adev->mmhub.funcs->gart_disable(adev);
        amdgpu_gart_table_vram_unpin(adev);
 }
 
        int r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       r = mmhub_v2_0_set_clockgating(adev, state);
+       r = adev->mmhub.funcs->set_clockgating(adev, state);
        if (r)
                return r;
 
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       mmhub_v2_0_get_clockgating(adev, flags);
+       adev->mmhub.funcs->get_clockgating(adev, flags);
 
        if (adev->asic_type == CHIP_SIENNA_CICHLID ||
            adev->asic_type == CHIP_NAVY_FLOUNDER)
 
 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
 {
        switch (adev->asic_type) {
-       case CHIP_VEGA20:
-               adev->mmhub.funcs = &mmhub_v1_0_funcs;
-               break;
        case CHIP_ARCTURUS:
                adev->mmhub.funcs = &mmhub_v9_4_funcs;
                break;
        default:
+               adev->mmhub.funcs = &mmhub_v1_0_funcs;
                break;
        }
 }
 {
        u64 base = 0;
 
-       if (adev->asic_type == CHIP_ARCTURUS)
-               base = mmhub_v9_4_get_fb_location(adev);
-       else if (!amdgpu_sriov_vf(adev))
-               base = mmhub_v1_0_get_fb_location(adev);
+       if (!amdgpu_sriov_vf(adev))
+               base = adev->mmhub.funcs->get_fb_location(adev);
 
        /* add the xgmi offset of the physical node */
        base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        gfxhub_v1_0_init(adev);
-       if (adev->asic_type == CHIP_ARCTURUS)
-               mmhub_v9_4_init(adev);
-       else
-               mmhub_v1_0_init(adev);
+
+       adev->mmhub.funcs->init(adev);
 
        spin_lock_init(&adev->gmc.invalidate_lock);
 
        if (r)
                return r;
 
-       if (adev->asic_type == CHIP_ARCTURUS)
-               r = mmhub_v9_4_gart_enable(adev);
-       else
-               r = mmhub_v1_0_gart_enable(adev);
+       r = adev->mmhub.funcs->gart_enable(adev);
        if (r)
                return r;
 
                                                golden_settings_vega10_hdp,
                                                ARRAY_SIZE(golden_settings_vega10_hdp));
 
+       if (adev->mmhub.funcs->update_power_gating)
+               adev->mmhub.funcs->update_power_gating(adev, true);
+
        switch (adev->asic_type) {
-       case CHIP_RAVEN:
-               /* TODO for renoir */
-               mmhub_v1_0_update_power_gating(adev, true);
-               break;
        case CHIP_ARCTURUS:
                WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
                break;
 
        if (!amdgpu_sriov_vf(adev)) {
                gfxhub_v1_0_set_fault_enable_default(adev, value);
-               if (adev->asic_type == CHIP_ARCTURUS)
-                       mmhub_v9_4_set_fault_enable_default(adev, value);
-               else
-                       mmhub_v1_0_set_fault_enable_default(adev, value);
+               adev->mmhub.funcs->set_fault_enable_default(adev, value);
        }
        for (i = 0; i < adev->num_vmhubs; ++i)
                gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
 {
        gfxhub_v1_0_gart_disable(adev);
-       if (adev->asic_type == CHIP_ARCTURUS)
-               mmhub_v9_4_gart_disable(adev);
-       else
-               mmhub_v1_0_gart_disable(adev);
+       adev->mmhub.funcs->gart_disable(adev);
        amdgpu_gart_table_vram_unpin(adev);
 }
 
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (adev->asic_type == CHIP_ARCTURUS)
-               mmhub_v9_4_set_clockgating(adev, state);
-       else
-               mmhub_v1_0_set_clockgating(adev, state);
+       adev->mmhub.funcs->set_clockgating(adev, state);
 
        athub_v1_0_set_clockgating(adev, state);
 
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (adev->asic_type == CHIP_ARCTURUS)
-               mmhub_v9_4_get_clockgating(adev, flags);
-       else
-               mmhub_v1_0_get_clockgating(adev, flags);
+       adev->mmhub.funcs->get_clockgating(adev, flags);
 
        athub_v1_0_get_clockgating(adev, flags);
 }
 
 #define mmDAGB0_CNTL_MISC2_RV 0x008f
 #define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0
 
-u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
+static u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
 {
        u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE);
        u64 top = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP);
        return base;
 }
 
-void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+static void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
                                uint64_t page_table_base)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
        }
 }
 
-void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
+static void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
                                bool enable)
 {
        if (amdgpu_sriov_vf(adev))
        }
 }
 
-int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
+static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
 {
        if (amdgpu_sriov_vf(adev)) {
                /*
        return 0;
 }
 
-void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
+static void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
        u32 tmp;
  * @adev: amdgpu_device pointer
  * @value: true redirects VM faults to the default page
  */
-void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
+static void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
 {
        u32 tmp;
 
        WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
 }
 
-void mmhub_v1_0_init(struct amdgpu_device *adev)
+static void mmhub_v1_0_init(struct amdgpu_device *adev)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
 
                WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
 }
 
-int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
+static int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
                               enum amd_clockgating_state state)
 {
        if (amdgpu_sriov_vf(adev))
        return 0;
 }
 
-void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
+static void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
 {
        int data, data1;
 
        .ras_late_init = amdgpu_mmhub_ras_late_init,
        .query_ras_error_count = mmhub_v1_0_query_ras_error_count,
        .reset_ras_error_count = mmhub_v1_0_reset_ras_error_count,
+       .get_fb_location = mmhub_v1_0_get_fb_location,
+       .init = mmhub_v1_0_init,
+       .gart_enable = mmhub_v1_0_gart_enable,
+       .set_fault_enable_default = mmhub_v1_0_set_fault_enable_default,
+       .gart_disable = mmhub_v1_0_gart_disable,
+       .set_clockgating = mmhub_v1_0_set_clockgating,
+       .get_clockgating = mmhub_v1_0_get_clockgating,
+       .setup_vm_pt_regs = mmhub_v1_0_setup_vm_pt_regs,
+       .update_power_gating = mmhub_v1_0_update_power_gating,
 };
 
 
 extern const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs;
 
-u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev);
-int mmhub_v1_0_gart_enable(struct amdgpu_device *adev);
-void mmhub_v1_0_gart_disable(struct amdgpu_device *adev);
-void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
-                                        bool value);
-void mmhub_v1_0_init(struct amdgpu_device *adev);
-int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
-                              enum amd_clockgating_state state);
-void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
-void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
-                                bool enable);
-void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
-                               uint64_t page_table_base);
-
 #endif
 
                MMVM_L2_PROTECTION_FAULT_STATUS, RW));
 }
 
-void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+static void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
                                uint64_t page_table_base)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
        }
 }
 
-int mmhub_v2_0_gart_enable(struct amdgpu_device *adev)
+static int mmhub_v2_0_gart_enable(struct amdgpu_device *adev)
 {
        /* GART Enable. */
        mmhub_v2_0_init_gart_aperture_regs(adev);
        return 0;
 }
 
-void mmhub_v2_0_gart_disable(struct amdgpu_device *adev)
+static void mmhub_v2_0_gart_disable(struct amdgpu_device *adev)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
        u32 tmp;
  * @adev: amdgpu_device pointer
  * @value: true redirects VM faults to the default page
  */
-void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
+static void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
 {
        u32 tmp;
 
        .get_invalidate_req = mmhub_v2_0_get_invalidate_req,
 };
 
-void mmhub_v2_0_init(struct amdgpu_device *adev)
+static void mmhub_v2_0_init(struct amdgpu_device *adev)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
 
        }
 }
 
-int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
+static int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
                               enum amd_clockgating_state state)
 {
        if (amdgpu_sriov_vf(adev))
        return 0;
 }
 
-void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
+static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
 {
        int data, data1;
 
        if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
                *flags |= AMD_CG_SUPPORT_MC_LS;
 }
+
+const struct amdgpu_mmhub_funcs mmhub_v2_0_funcs = {
+       .ras_late_init = amdgpu_mmhub_ras_late_init,
+       .init = mmhub_v2_0_init,
+       .gart_enable = mmhub_v2_0_gart_enable,
+       .set_fault_enable_default = mmhub_v2_0_set_fault_enable_default,
+       .gart_disable = mmhub_v2_0_gart_disable,
+       .set_clockgating = mmhub_v2_0_set_clockgating,
+       .get_clockgating = mmhub_v2_0_get_clockgating,
+       .setup_vm_pt_regs = mmhub_v2_0_setup_vm_pt_regs,
+};
 
 #ifndef __MMHUB_V2_0_H__
 #define __MMHUB_V2_0_H__
 
-int mmhub_v2_0_gart_enable(struct amdgpu_device *adev);
-void mmhub_v2_0_gart_disable(struct amdgpu_device *adev);
-void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
-                                        bool value);
-void mmhub_v2_0_init(struct amdgpu_device *adev);
-int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
-                              enum amd_clockgating_state state);
-void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
-void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
-                               uint64_t page_table_base);
+extern const struct amdgpu_mmhub_funcs mmhub_v2_0_funcs;
 
 #endif
 
 #define MMHUB_NUM_INSTANCES                    2
 #define MMHUB_INSTANCE_REGISTER_OFFSET         0x3000
 
-u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev)
+static u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev)
 {
        /* The base should be same b/t 2 mmhubs on Acrturus. Read one here. */
        u64 base = RREG32_SOC15(MMHUB, 0, mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE);
                            (u32)(adev->gmc.gart_end >> 44));
 }
 
-void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+static void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
                                uint64_t page_table_base)
 {
        int i;
        }
 }
 
-int mmhub_v9_4_gart_enable(struct amdgpu_device *adev)
+static int mmhub_v9_4_gart_enable(struct amdgpu_device *adev)
 {
        int i;
 
        return 0;
 }
 
-void mmhub_v9_4_gart_disable(struct amdgpu_device *adev)
+static void mmhub_v9_4_gart_disable(struct amdgpu_device *adev)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
        u32 tmp;
  * @adev: amdgpu_device pointer
  * @value: true redirects VM faults to the default page
  */
-void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, bool value)
+static void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, bool value)
 {
        u32 tmp;
        int i;
        }
 }
 
-void mmhub_v9_4_init(struct amdgpu_device *adev)
+static void mmhub_v9_4_init(struct amdgpu_device *adev)
 {
        struct amdgpu_vmhub *hub[MMHUB_NUM_INSTANCES] =
                {&adev->vmhub[AMDGPU_MMHUB_0], &adev->vmhub[AMDGPU_MMHUB_1]};
        }
 }
 
-int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
+static int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
                               enum amd_clockgating_state state)
 {
        if (amdgpu_sriov_vf(adev))
        return 0;
 }
 
-void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags)
+static void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags)
 {
        int data, data1;
 
        .ras_late_init = amdgpu_mmhub_ras_late_init,
        .query_ras_error_count = mmhub_v9_4_query_ras_error_count,
        .reset_ras_error_count = mmhub_v9_4_reset_ras_error_count,
+       .get_fb_location = mmhub_v9_4_get_fb_location,
+       .init = mmhub_v9_4_init,
+       .gart_enable = mmhub_v9_4_gart_enable,
+       .set_fault_enable_default = mmhub_v9_4_set_fault_enable_default,
+       .gart_disable = mmhub_v9_4_gart_disable,
+       .set_clockgating = mmhub_v9_4_set_clockgating,
+       .get_clockgating = mmhub_v9_4_get_clockgating,
+       .setup_vm_pt_regs = mmhub_v9_4_setup_vm_pt_regs,
 };
 
 
 extern const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs;
 
-u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev);
-int mmhub_v9_4_gart_enable(struct amdgpu_device *adev);
-void mmhub_v9_4_gart_disable(struct amdgpu_device *adev);
-void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev,
-                                        bool value);
-void mmhub_v9_4_init(struct amdgpu_device *adev);
-int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
-                              enum amd_clockgating_state state);
-void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags);
-void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
-                               uint64_t page_table_base);
-
 #endif