We always want those to be setup correctly.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
 
 static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev)
 {
-       if (adev->irq.ih_funcs == NULL)
-               adev->irq.ih_funcs = &cik_ih_funcs;
+       adev->irq.ih_funcs = &cik_ih_funcs;
 }
 
 const struct amdgpu_ip_block_version cik_ih_ip_block =
 
 
 static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
 {
-       if (adev->mman.buffer_funcs == NULL) {
-               adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
-               adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
-       }
+       adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
+       adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
 }
 
 static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
        struct drm_gpu_scheduler *sched;
        unsigned i;
 
-       if (adev->vm_manager.vm_pte_funcs == NULL) {
-               adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
-               for (i = 0; i < adev->sdma.num_instances; i++) {
-                       sched = &adev->sdma.instance[i].ring.sched;
-                       adev->vm_manager.vm_pte_rqs[i] =
-                               &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-               }
-               adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+       adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               sched = &adev->sdma.instance[i].ring.sched;
+               adev->vm_manager.vm_pte_rqs[i] =
+                       &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
        }
+       adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
 }
 
 const struct amdgpu_ip_block_version cik_sdma_ip_block =
 
 
 static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev)
 {
-       if (adev->irq.ih_funcs == NULL)
-               adev->irq.ih_funcs = &cz_ih_funcs;
+       adev->irq.ih_funcs = &cz_ih_funcs;
 }
 
 const struct amdgpu_ip_block_version cz_ih_ip_block =
 
 
 static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev)
 {
-       if (adev->mode_info.funcs == NULL)
-               adev->mode_info.funcs = &dce_v10_0_display_funcs;
+       adev->mode_info.funcs = &dce_v10_0_display_funcs;
 }
 
 static const struct amdgpu_irq_src_funcs dce_v10_0_crtc_irq_funcs = {
 
 
 static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev)
 {
-       if (adev->mode_info.funcs == NULL)
-               adev->mode_info.funcs = &dce_v11_0_display_funcs;
+       adev->mode_info.funcs = &dce_v11_0_display_funcs;
 }
 
 static const struct amdgpu_irq_src_funcs dce_v11_0_crtc_irq_funcs = {
 
 
 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
 {
-       if (adev->mode_info.funcs == NULL)
-               adev->mode_info.funcs = &dce_v6_0_display_funcs;
+       adev->mode_info.funcs = &dce_v6_0_display_funcs;
 }
 
 static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
 
 
 static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
 {
-       if (adev->mode_info.funcs == NULL)
-               adev->mode_info.funcs = &dce_v8_0_display_funcs;
+       adev->mode_info.funcs = &dce_v8_0_display_funcs;
 }
 
 static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
 
 
 static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
 {
-       if (adev->mode_info.funcs == NULL)
-               adev->mode_info.funcs = &dce_virtual_display_funcs;
+       adev->mode_info.funcs = &dce_virtual_display_funcs;
 }
 
 static int dce_virtual_pageflip(struct amdgpu_device *adev,
 
 
 static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev)
 {
-       if (adev->gmc.gmc_funcs == NULL)
-               adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
+       adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
 }
 
 static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
 
 
 static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev)
 {
-       if (adev->gmc.gmc_funcs == NULL)
-               adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs;
+       adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs;
 }
 
 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
 
 
 static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev)
 {
-       if (adev->gmc.gmc_funcs == NULL)
-               adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
+       adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
 }
 
 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
 
 
 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
 {
-       if (adev->gmc.gmc_funcs == NULL)
-               adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
+       adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
 }
 
 static int gmc_v9_0_early_init(void *handle)
 
 
 static void iceland_ih_set_interrupt_funcs(struct amdgpu_device *adev)
 {
-       if (adev->irq.ih_funcs == NULL)
-               adev->irq.ih_funcs = &iceland_ih_funcs;
+       adev->irq.ih_funcs = &iceland_ih_funcs;
 }
 
 const struct amdgpu_ip_block_version iceland_ih_ip_block =
 
 
 static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
 {
-       if (adev->mman.buffer_funcs == NULL) {
-               adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
-               adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
-       }
+       adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
+       adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
 }
 
 static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
        struct drm_gpu_scheduler *sched;
        unsigned i;
 
-       if (adev->vm_manager.vm_pte_funcs == NULL) {
-               adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
-               for (i = 0; i < adev->sdma.num_instances; i++) {
-                       sched = &adev->sdma.instance[i].ring.sched;
-                       adev->vm_manager.vm_pte_rqs[i] =
-                               &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-               }
-               adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+       adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               sched = &adev->sdma.instance[i].ring.sched;
+               adev->vm_manager.vm_pte_rqs[i] =
+                       &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
        }
+       adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
 }
 
 const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
 
 
 static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
 {
-       if (adev->mman.buffer_funcs == NULL) {
-               adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
-               adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
-       }
+       adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
+       adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
 }
 
 static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
        struct drm_gpu_scheduler *sched;
        unsigned i;
 
-       if (adev->vm_manager.vm_pte_funcs == NULL) {
-               adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
-               for (i = 0; i < adev->sdma.num_instances; i++) {
-                       sched = &adev->sdma.instance[i].ring.sched;
-                       adev->vm_manager.vm_pte_rqs[i] =
-                               &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-               }
-               adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+       adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               sched = &adev->sdma.instance[i].ring.sched;
+               adev->vm_manager.vm_pte_rqs[i] =
+                       &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
        }
+       adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
 }
 
 const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
 
 
 static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
 {
-       if (adev->mman.buffer_funcs == NULL) {
-               adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
-               adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
-       }
+       adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
+       adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
 }
 
 static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
        struct drm_gpu_scheduler *sched;
        unsigned i;
 
-       if (adev->vm_manager.vm_pte_funcs == NULL) {
-               adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
-               for (i = 0; i < adev->sdma.num_instances; i++) {
-                       sched = &adev->sdma.instance[i].ring.sched;
-                       adev->vm_manager.vm_pte_rqs[i] =
-                               &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-               }
-               adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+       adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               sched = &adev->sdma.instance[i].ring.sched;
+               adev->vm_manager.vm_pte_rqs[i] =
+                       &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
        }
+       adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
 }
 
 const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
 
 
 static void si_dma_set_buffer_funcs(struct amdgpu_device *adev)
 {
-       if (adev->mman.buffer_funcs == NULL) {
-               adev->mman.buffer_funcs = &si_dma_buffer_funcs;
-               adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
-       }
+       adev->mman.buffer_funcs = &si_dma_buffer_funcs;
+       adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
 }
 
 static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
        struct drm_gpu_scheduler *sched;
        unsigned i;
 
-       if (adev->vm_manager.vm_pte_funcs == NULL) {
-               adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
-               for (i = 0; i < adev->sdma.num_instances; i++) {
-                       sched = &adev->sdma.instance[i].ring.sched;
-                       adev->vm_manager.vm_pte_rqs[i] =
-                               &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-               }
-               adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+       adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               sched = &adev->sdma.instance[i].ring.sched;
+               adev->vm_manager.vm_pte_rqs[i] =
+                       &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
        }
+       adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
 }
 
 const struct amdgpu_ip_block_version si_dma_ip_block =
 
 
 static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
 {
-       if (adev->irq.ih_funcs == NULL)
-               adev->irq.ih_funcs = &si_ih_funcs;
+       adev->irq.ih_funcs = &si_ih_funcs;
 }
 
 const struct amdgpu_ip_block_version si_ih_ip_block =
 
 
 static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev)
 {
-       if (adev->irq.ih_funcs == NULL)
-               adev->irq.ih_funcs = &tonga_ih_funcs;
+       adev->irq.ih_funcs = &tonga_ih_funcs;
 }
 
 const struct amdgpu_ip_block_version tonga_ih_ip_block =
 
 
 static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev)
 {
-       if (adev->irq.ih_funcs == NULL)
-               adev->irq.ih_funcs = &vega10_ih_funcs;
+       adev->irq.ih_funcs = &vega10_ih_funcs;
 }
 
 const struct amdgpu_ip_block_version vega10_ih_ip_block =