}
 
        if (amdgpu_sriov_vf(adev)) {
-               r = amdgpu_map_static_csa(adev, &fpriv->vm);
+               r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
                if (r)
                        goto out_suspend;
        }
        if (amdgpu_sriov_vf(adev)) {
                /* TODO: how to handle reserve failure */
                BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
-               amdgpu_vm_bo_rmv(adev, fpriv->vm.csa_bo_va);
-               fpriv->vm.csa_bo_va = NULL;
+               amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
+               fpriv->csa_va = NULL;
                amdgpu_bo_unreserve(adev->virt.csa_obj);
        }
 
 
  * address within META_DATA init package to support SRIOV gfx preemption.
  */
 
-int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+                         struct amdgpu_bo_va **bo_va)
 {
-       int r;
-       struct amdgpu_bo_va *bo_va;
        struct ww_acquire_ctx ticket;
        struct list_head list;
        struct amdgpu_bo_list_entry pd;
        struct ttm_validate_buffer csa_tv;
+       int r;
 
        INIT_LIST_HEAD(&list);
        INIT_LIST_HEAD(&csa_tv.head);
                return r;
        }
 
-       bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
-       if (!bo_va) {
+       *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
+       if (!*bo_va) {
                ttm_eu_backoff_reservation(&ticket, &list);
                DRM_ERROR("failed to create bo_va for static CSA\n");
                return -ENOMEM;
        }
 
-       r = amdgpu_vm_alloc_pts(adev, bo_va->vm, AMDGPU_CSA_VADDR,
-                                  AMDGPU_CSA_SIZE);
+       r = amdgpu_vm_alloc_pts(adev, (*bo_va)->vm, AMDGPU_CSA_VADDR,
+                               AMDGPU_CSA_SIZE);
        if (r) {
                DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
-               amdgpu_vm_bo_rmv(adev, bo_va);
+               amdgpu_vm_bo_rmv(adev, *bo_va);
                ttm_eu_backoff_reservation(&ticket, &list);
                return r;
        }
 
-       r = amdgpu_vm_bo_map(adev, bo_va, AMDGPU_CSA_VADDR, 0,AMDGPU_CSA_SIZE,
-                                               AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
-                                               AMDGPU_PTE_EXECUTABLE);
+       r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE,
+                            AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
+                            AMDGPU_PTE_EXECUTABLE);
 
        if (r) {
                DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
-               amdgpu_vm_bo_rmv(adev, bo_va);
+               amdgpu_vm_bo_rmv(adev, *bo_va);
                ttm_eu_backoff_reservation(&ticket, &list);
                return r;
        }
 
-       vm->csa_bo_va = bo_va;
        ttm_eu_backoff_reservation(&ticket, &list);
        return 0;
 }
 
 
 struct amdgpu_vm;
 int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
-int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+                         struct amdgpu_bo_va **bo_va);
 void amdgpu_virt_init_setting(struct amdgpu_device *adev);
 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);