struct nouveau_uvma_region *reg;
        int ret;
 
-       if (!drm_gpuvm_interval_empty(&uvmm->umgr, addr, range))
+       if (!drm_gpuvm_interval_empty(&uvmm->base, addr, range))
                return -ENOSPC;
 
        ret = nouveau_uvma_region_alloc(®);
 {
        struct nouveau_uvmm *uvmm = reg->uvmm;
 
-       return drm_gpuvm_interval_empty(&uvmm->umgr,
+       return drm_gpuvm_interval_empty(&uvmm->base,
                                        reg->va.addr,
                                        reg->va.range);
 }
        uvma->region = args->region;
        uvma->kind = args->kind;
 
-       drm_gpuva_map(&uvmm->umgr, &uvma->va, op);
+       drm_gpuva_map(&uvmm->base, &uvma->va, op);
 
        /* Keep a reference until this uvma is destroyed. */
        nouveau_uvma_gem_get(uvma);
                                goto unwind_continue;
                        }
 
-                       op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->umgr,
+                       op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->base,
                                                                op->va.addr,
                                                                op->va.range);
                        if (IS_ERR(op->ops)) {
                        ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new,
                                                            op->ops);
                        if (ret) {
-                               drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+                               drm_gpuva_ops_free(&uvmm->base, op->ops);
                                op->ops = NULL;
                                op->reg = NULL;
                                goto unwind_continue;
                                }
                        }
 
-                       op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->umgr,
+                       op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base,
                                                              op->va.addr,
                                                              op->va.range,
                                                              op->gem.obj,
                                                          op->va.range,
                                                          op->flags & 0xff);
                        if (ret) {
-                               drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+                               drm_gpuva_ops_free(&uvmm->base, op->ops);
                                op->ops = NULL;
                                goto unwind_continue;
                        }
                        break;
                }
                case OP_UNMAP:
-                       op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->umgr,
+                       op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->base,
                                                                op->va.addr,
                                                                op->va.range);
                        if (IS_ERR(op->ops)) {
                        ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new,
                                                            op->ops);
                        if (ret) {
-                               drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+                               drm_gpuva_ops_free(&uvmm->base, op->ops);
                                op->ops = NULL;
                                goto unwind_continue;
                        }
                        break;
                }
 
-               drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+               drm_gpuva_ops_free(&uvmm->base, op->ops);
                op->ops = NULL;
                op->reg = NULL;
        }
                }
 
                if (!IS_ERR_OR_NULL(op->ops))
-                       drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+                       drm_gpuva_ops_free(&uvmm->base, op->ops);
 
                if (obj)
                        drm_gem_object_put(obj);
        uvmm->kernel_managed_addr = kernel_managed_addr;
        uvmm->kernel_managed_size = kernel_managed_size;
 
-       drm_gpuvm_init(&uvmm->umgr, cli->name,
+       drm_gpuvm_init(&uvmm->base, cli->name,
                       NOUVEAU_VA_SPACE_START,
                       NOUVEAU_VA_SPACE_END,
                       kernel_managed_addr, kernel_managed_size,
        return 0;
 
 out_free_gpuva_mgr:
-       drm_gpuvm_destroy(&uvmm->umgr);
+       drm_gpuvm_destroy(&uvmm->base);
 out_unlock:
        mutex_unlock(&cli->mutex);
        return ret;
        wait_event(entity->job.wq, list_empty(&entity->job.list.head));
 
        nouveau_uvmm_lock(uvmm);
-       drm_gpuvm_for_each_va_safe(va, next, &uvmm->umgr) {
+       drm_gpuvm_for_each_va_safe(va, next, &uvmm->base) {
                struct nouveau_uvma *uvma = uvma_from_va(va);
                struct drm_gem_object *obj = va->gem.obj;
 
-               if (unlikely(va == &uvmm->umgr.kernel_alloc_node))
+               if (unlikely(va == &uvmm->base.kernel_alloc_node))
                        continue;
 
                drm_gpuva_remove(va);
 
        mutex_lock(&cli->mutex);
        nouveau_vmm_fini(&uvmm->vmm);
-       drm_gpuvm_destroy(&uvmm->umgr);
+       drm_gpuvm_destroy(&uvmm->base);
        mutex_unlock(&cli->mutex);
 
        dma_resv_fini(&uvmm->resv);