if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
                        goto out_unlock;
 
-               ttm_bo_reference(bo);
+               ttm_bo_get(bo);
                up_read(&vmf->vma->vm_mm->mmap_sem);
                (void) dma_fence_wait(bo->moving, true);
                ttm_bo_unreserve(bo);
 
                if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
                        if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
-                               ttm_bo_reference(bo);
+                               ttm_bo_get(bo);
                                up_read(&vmf->vma->vm_mm->mmap_sem);
                                (void) ttm_bo_wait_unreserved(bo);
                                ttm_bo_unref(&bo);
 
        WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
 
-       (void)ttm_bo_reference(bo);
+       ttm_bo_get(bo);
 }
 
 static void ttm_bo_vm_close(struct vm_area_struct *vma)
        if (vma->vm_pgoff != 0)
                return -EACCES;
 
+       ttm_bo_get(bo);
+
        vma->vm_ops = &ttm_bo_vm_ops;
-       vma->vm_private_data = ttm_bo_reference(bo);
+       vma->vm_private_data = bo;
        vma->vm_flags |= VM_MIXEDMAP;
        vma->vm_flags |= VM_IO | VM_DONTEXPAND;
        return 0;