return ret;
 }
 
-int
-nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
-                          void *data,
-                          struct drm_file *file_priv)
-{
-       struct nouveau_cli *cli = nouveau_cli(file_priv);
-       struct drm_nouveau_vm_init *init = data;
-
-       return nouveau_uvmm_init(&cli->uvmm, cli, init->kernel_managed_addr,
-                                init->kernel_managed_size);
-}
-
 static int
 nouveau_uvmm_vm_bind(struct nouveau_uvmm_bind_job_args *args)
 {
 }
 
 int
-nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
-                 u64 kernel_managed_addr, u64 kernel_managed_size)
+nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
+                          void *data,
+                          struct drm_file *file_priv)
 {
+       struct nouveau_uvmm *uvmm;
+       struct nouveau_cli *cli = nouveau_cli(file_priv);
        struct drm_device *drm = cli->drm->dev;
        struct drm_gem_object *r_obj;
-       u64 kernel_managed_end = kernel_managed_addr + kernel_managed_size;
+       struct drm_nouveau_vm_init *init = data;
+       u64 kernel_managed_end;
        int ret;
 
-       mutex_init(&uvmm->mutex);
-       mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN);
-       mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex);
+       if (check_add_overflow(init->kernel_managed_addr,
+                              init->kernel_managed_size,
+                              &kernel_managed_end))
+               return -EINVAL;
+
+       if (kernel_managed_end > NOUVEAU_VA_SPACE_END)
+               return -EINVAL;
 
        mutex_lock(&cli->mutex);
 
                goto out_unlock;
        }
 
-       if (kernel_managed_end <= kernel_managed_addr) {
-               ret = -EINVAL;
-               goto out_unlock;
-       }
-
-       if (kernel_managed_end > NOUVEAU_VA_SPACE_END) {
-               ret = -EINVAL;
+       uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL);
+       if (!uvmm) {
+               ret = -ENOMEM;
                goto out_unlock;
        }
 
        r_obj = drm_gpuvm_resv_object_alloc(drm);
        if (!r_obj) {
+               kfree(uvmm);
                ret = -ENOMEM;
                goto out_unlock;
        }
 
+       mutex_init(&uvmm->mutex);
+       mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN);
+       mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex);
+
        drm_gpuvm_init(&uvmm->base, cli->name, 0, drm, r_obj,
                       NOUVEAU_VA_SPACE_START,
                       NOUVEAU_VA_SPACE_END,
-                      kernel_managed_addr, kernel_managed_size,
+                      init->kernel_managed_addr,
+                      init->kernel_managed_size,
                       NULL);
        /* GPUVM takes care from here on. */
        drm_gem_object_put(r_obj);
 
        ret = nvif_vmm_ctor(&cli->mmu, "uvmm",
                            cli->vmm.vmm.object.oclass, RAW,
-                           kernel_managed_addr, kernel_managed_size,
-                           NULL, 0, &cli->uvmm.vmm.vmm);
+                           init->kernel_managed_addr,
+                           init->kernel_managed_size,
+                           NULL, 0, &uvmm->vmm.vmm);
        if (ret)
                goto out_gpuvm_fini;
 
-       cli->uvmm.vmm.cli = cli;
+       uvmm->vmm.cli = cli;
+       cli->uvmm.ptr = uvmm;
        mutex_unlock(&cli->mutex);
 
        return 0;
 
 out_gpuvm_fini:
        drm_gpuvm_destroy(&uvmm->base);
+       kfree(uvmm);
 out_unlock:
        mutex_unlock(&cli->mutex);
        return ret;
        struct nouveau_sched_entity *entity = &cli->sched_entity;
        struct drm_gpuva *va, *next;
 
-       if (!cli)
-               return;
-
        rmb(); /* for list_empty to work without lock */
        wait_event(entity->job.wq, list_empty(&entity->job.list.head));
 
        mutex_lock(&cli->mutex);
        nouveau_vmm_fini(&uvmm->vmm);
        drm_gpuvm_destroy(&uvmm->base);
+       kfree(uvmm);
        mutex_unlock(&cli->mutex);
 }