if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
                return;
 
+       if ((!pe->adev->df.funcs) ||
+           (!pe->adev->df.funcs->pmc_start))
+               return;
+
        WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
        hwc->state = 0;
 
                                                  pmu);
        u64 count, prev;
 
+       if ((!pe->adev->df.funcs) ||
+           (!pe->adev->df.funcs->pmc_get_count))
+               return;
+
        do {
                prev = local64_read(&hwc->prev_count);
 
        if (hwc->state & PERF_HES_UPTODATE)
                return;
 
+       if ((!pe->adev->df.funcs) ||
+           (!pe->adev->df.funcs->pmc_stop))
+               return;
+
        switch (hwc->config_base) {
        case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
        case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
                                                  struct amdgpu_pmu_entry,
                                                  pmu);
 
+       if ((!pe->adev->df.funcs) ||
+           (!pe->adev->df.funcs->pmc_start))
+               return -EINVAL;
+
        switch (pe->pmu_perf_type) {
        case AMDGPU_PMU_PERF_TYPE_DF:
                hwc->config_base = AMDGPU_PMU_EVENT_CONFIG_TYPE_DF;
        struct amdgpu_pmu_entry *pe = container_of(event->pmu,
                                                  struct amdgpu_pmu_entry,
                                                  pmu);
+       if ((!pe->adev->df.funcs) ||
+           (!pe->adev->df.funcs->pmc_stop))
+               return;
 
        amdgpu_perf_stop(event, PERF_EF_UPDATE);
 
 
        ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200);
        ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208);
 
+       if ((!adev->df.funcs) ||
+           (!adev->df.funcs->get_fica) ||
+           (!adev->df.funcs->set_fica))
+               return -EINVAL;
+
        fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in);
        if (fica_out != 0x1f)
                pr_err("xGMI error counters not enabled!\n");
 
        if (!amdgpu_sriov_vf(adev) &&
            (adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) {
                if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) {
-                       if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
+                       if (adev->df.funcs &&
+                           adev->df.funcs->enable_ecc_force_par_wr_rmw)
                                adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
                }
        }
                        chansize = 64;
                else
                        chansize = 128;
-
-               numchan = adev->df.funcs->get_hbm_channel_number(adev);
-               adev->gmc.vram_width = numchan * chansize;
+               if (adev->df.funcs &&
+                   adev->df.funcs->get_hbm_channel_number) {
+                       numchan = adev->df.funcs->get_hbm_channel_number(adev);
+                       adev->gmc.vram_width = numchan * chansize;
+               }
        }
 
        adev->gmc.vram_type = vram_type;
 
        if (amdgpu_sriov_vf(adev))
                xgpu_ai_mailbox_add_irq_id(adev);
 
-       adev->df.funcs->sw_init(adev);
+       if (adev->df.funcs &&
+           adev->df.funcs->sw_init)
+               adev->df.funcs->sw_init(adev);
 
        return 0;
 }
        if (adev->nbio.ras_funcs &&
            adev->nbio.ras_funcs->ras_fini)
                adev->nbio.ras_funcs->ras_fini(adev);
-       adev->df.funcs->sw_fini(adev);
+
+       if (adev->df.funcs &&
+           adev->df.funcs->sw_fini)
+               adev->df.funcs->sw_fini(adev);
        return 0;
 }