static dma_addr_t vma_addr(struct xe_vma *vma, u64 offset,
                           size_t page_size, bool *is_vram)
 {
+       if (xe_vma_is_null(vma))
+               return 0;
+
        if (xe_vma_is_userptr(vma)) {
                struct xe_res_cursor cur;
                u64 page;
        if (unlikely(flags & XE_PTE_FLAG_READ_ONLY))
                pte &= ~XE_PAGE_RW;
 
+       if (unlikely(flags & XE_PTE_FLAG_NULL))
+               pte |= XE_PTE_NULL;
+
        /* FIXME: I don't think the PPAT handling is correct for MTL */
 
        switch (cache) {
        if (next - xe_walk->va_curs_start > xe_walk->curs->size)
                return false;
 
+       /* null VMA's do not have dma addresses */
+       if (xe_walk->pte_flags & XE_PTE_FLAG_NULL)
+               return true;
+
        /* Is the DMA address huge PTE size aligned? */
        size = next - addr;
        dma = addr - xe_walk->va_curs_start + xe_res_dma(xe_walk->curs);
        if (next > xe_walk->l0_end_addr)
                return false;
 
+       /* null VMA's do not have dma addresses */
+       if (xe_walk->pte_flags & XE_PTE_FLAG_NULL)
+               return true;
+
        xe_res_next(&curs, addr - xe_walk->va_curs_start);
        for (; addr < next; addr += SZ_64K) {
                if (!IS_ALIGNED(xe_res_dma(&curs), SZ_64K) || curs.size < SZ_64K)
        /* Is this a leaf entry ?*/
        if (level == 0 || xe_pt_hugepte_possible(addr, next, level, xe_walk)) {
                struct xe_res_cursor *curs = xe_walk->curs;
+               bool is_null = xe_walk->pte_flags & XE_PTE_FLAG_NULL;
 
                XE_WARN_ON(xe_walk->va_curs_start != addr);
 
-               pte = __pte_encode(xe_res_dma(curs) + xe_walk->dma_offset,
+               pte = __pte_encode(is_null ? 0 :
+                                  xe_res_dma(curs) + xe_walk->dma_offset,
                                   xe_walk->cache, xe_walk->pte_flags,
                                   level);
                pte |= xe_walk->default_pte;
                if (unlikely(ret))
                        return ret;
 
-               xe_res_next(curs, next - addr);
+               if (!is_null)
+                       xe_res_next(curs, next - addr);
                xe_walk->va_curs_start = next;
                *action = ACTION_CONTINUE;
 
                xe_walk.dma_offset = vram_region_gpu_offset(bo->ttm.resource);
                xe_walk.cache = XE_CACHE_WB;
        } else {
-               if (!xe_vma_is_userptr(vma) && bo->flags & XE_BO_SCANOUT_BIT)
+               if (!xe_vma_has_no_bo(vma) && bo->flags & XE_BO_SCANOUT_BIT)
                        xe_walk.cache = XE_CACHE_WT;
                else
                        xe_walk.cache = XE_CACHE_WB;
        }
-       if (!xe_vma_is_userptr(vma) && xe_bo_is_stolen(bo))
+       if (!xe_vma_has_no_bo(vma) && xe_bo_is_stolen(bo))
                xe_walk.dma_offset = xe_ttm_stolen_gpu_offset(xe_bo_device(bo));
 
        xe_bo_assert_held(bo);
-       if (xe_vma_is_userptr(vma))
-               xe_res_first_sg(vma->userptr.sg, 0, vma->end - vma->start + 1,
-                               &curs);
-       else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo))
-               xe_res_first(bo->ttm.resource, vma->bo_offset,
-                            vma->end - vma->start + 1, &curs);
-       else
-               xe_res_first_sg(xe_bo_get_sg(bo), vma->bo_offset,
-                               vma->end - vma->start + 1, &curs);
+
+       if (!xe_vma_is_null(vma)) {
+               if (xe_vma_is_userptr(vma))
+                       xe_res_first_sg(vma->userptr.sg, 0,
+                                       vma->end - vma->start + 1, &curs);
+               else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo))
+                       xe_res_first(bo->ttm.resource, vma->bo_offset,
+                                    vma->end - vma->start + 1, &curs);
+               else
+                       xe_res_first_sg(xe_bo_get_sg(bo), vma->bo_offset,
+                                       vma->end - vma->start + 1, &curs);
+       } else {
+               curs.size = vma->end - vma->start + 1;
+       }
 
        ret = xe_pt_walk_range(&pt->base, pt->level, vma->start, vma->end + 1,
                                &xe_walk.base);
 
        if (xe_vma_is_userptr(vma))
                lockdep_assert_held_read(&vm->userptr.notifier_lock);
-       else
+       else if (!xe_vma_is_null(vma))
                dma_resv_assert_held(vma->bo->ttm.base.resv);
 
        dma_resv_assert_held(&vm->resv);
                                   DMA_RESV_USAGE_KERNEL :
                                   DMA_RESV_USAGE_BOOKKEEP);
 
-               if (!xe_vma_is_userptr(vma) && !vma->bo->vm)
+               if (!xe_vma_has_no_bo(vma) && !vma->bo->vm)
                        dma_resv_add_fence(vma->bo->ttm.base.resv, fence,
                                           DMA_RESV_USAGE_BOOKKEEP);
                xe_pt_commit_bind(vma, entries, num_entries, rebind,
                                   DMA_RESV_USAGE_BOOKKEEP);
 
                /* This fence will be installed by caller when doing eviction */
-               if (!xe_vma_is_userptr(vma) && !vma->bo->vm)
+               if (!xe_vma_has_no_bo(vma) && !vma->bo->vm)
                        dma_resv_add_fence(vma->bo->ttm.base.resv, fence,
                                           DMA_RESV_USAGE_BOOKKEEP);
                xe_pt_commit_unbind(vma, entries, num_entries,
 
                goto out_unlock;
 
        list_for_each_entry(vma, &vm->rebind_list, rebind_link) {
-               if (xe_vma_is_userptr(vma) || vma->destroyed)
+               if (xe_vma_has_no_bo(vma) || vma->destroyed)
                        continue;
 
                err = xe_bo_validate(vma->bo, vm, false);
                                    u64 bo_offset_or_userptr,
                                    u64 start, u64 end,
                                    bool read_only,
+                                   bool is_null,
                                    u64 tile_mask)
 {
        struct xe_vma *vma;
        vma->vm = vm;
        vma->start = start;
        vma->end = end;
+       vma->pte_flags = 0;
        if (read_only)
-               vma->pte_flags = XE_PTE_FLAG_READ_ONLY;
+               vma->pte_flags |= XE_PTE_FLAG_READ_ONLY;
+       if (is_null)
+               vma->pte_flags |= XE_PTE_FLAG_NULL;
 
        if (tile_mask) {
                vma->tile_mask = tile_mask;
                vma->bo_offset = bo_offset_or_userptr;
                vma->bo = xe_bo_get(bo);
                list_add_tail(&vma->bo_link, &bo->vmas);
-       } else /* userptr */ {
-               u64 size = end - start + 1;
-               int err;
+       } else /* userptr or null */ {
+               if (!is_null) {
+                       u64 size = end - start + 1;
+                       int err;
 
-               vma->userptr.ptr = bo_offset_or_userptr;
+                       vma->userptr.ptr = bo_offset_or_userptr;
 
-               err = mmu_interval_notifier_insert(&vma->userptr.notifier,
-                                                  current->mm,
-                                                  vma->userptr.ptr, size,
-                                                  &vma_userptr_notifier_ops);
-               if (err) {
-                       kfree(vma);
-                       vma = ERR_PTR(err);
-                       return vma;
+                       err = mmu_interval_notifier_insert(&vma->userptr.notifier,
+                                                          current->mm,
+                                                          vma->userptr.ptr, size,
+                                                          &vma_userptr_notifier_ops);
+                       if (err) {
+                               kfree(vma);
+                               vma = ERR_PTR(err);
+                               return vma;
+                       }
+
+                       vma->userptr.notifier_seq = LONG_MAX;
                }
 
-               vma->userptr.notifier_seq = LONG_MAX;
                xe_vm_get(vm);
        }
 
                 */
                mmu_interval_notifier_remove(&vma->userptr.notifier);
                xe_vm_put(vm);
+       } else if (xe_vma_is_null(vma)) {
+               xe_vm_put(vm);
        } else {
                xe_bo_put(vma->bo);
        }
                list_del_init(&vma->userptr.invalidate_link);
                spin_unlock(&vm->userptr.invalidated_lock);
                list_del(&vma->userptr_link);
-       } else {
+       } else if (!xe_vma_is_null(vma)) {
                xe_bo_assert_held(vma->bo);
                list_del(&vma->bo_link);
 
        while (vm->vmas.rb_node) {
                struct xe_vma *vma = to_xe_vma(vm->vmas.rb_node);
 
-               if (xe_vma_is_userptr(vma)) {
+               if (xe_vma_has_no_bo(vma)) {
                        down_read(&vm->userptr.notifier_lock);
                        vma->destroyed = true;
                        up_read(&vm->userptr.notifier_lock);
                rb_erase(&vma->vm_node, &vm->vmas);
 
                /* easy case, remove from VMA? */
-               if (xe_vma_is_userptr(vma) || vma->bo->vm) {
+               if (xe_vma_has_no_bo(vma) || vma->bo->vm) {
                        xe_vma_destroy(vma, NULL);
                        continue;
                }
 
        XE_BUG_ON(region > ARRAY_SIZE(region_to_mem_type));
 
-       if (!xe_vma_is_userptr(vma)) {
+       if (!xe_vma_has_no_bo(vma)) {
                err = xe_bo_migrate(vma->bo, region_to_mem_type[region]);
                if (err)
                        return err;
                                          lookup->start - 1,
                                          (first->pte_flags &
                                           XE_PTE_FLAG_READ_ONLY),
+                                         (first->pte_flags &
+                                          XE_PTE_FLAG_NULL),
                                          first->tile_mask);
                if (first->bo)
                        xe_bo_unlock(first->bo, &ww);
                        err = -ENOMEM;
                        goto unwind;
                }
-               if (!first->bo) {
+               if (xe_vma_is_userptr(first)) {
                        err = xe_vma_userptr_pin_pages(new_first);
                        if (err)
                                goto unwind;
                                         last->end,
                                         (last->pte_flags &
                                          XE_PTE_FLAG_READ_ONLY),
+                                        (last->pte_flags & XE_PTE_FLAG_NULL),
                                         last->tile_mask);
                if (last->bo)
                        xe_bo_unlock(last->bo, &ww);
                        err = -ENOMEM;
                        goto unwind;
                }
-               if (!last->bo) {
+               if (xe_vma_is_userptr(last)) {
                        err = xe_vma_userptr_pin_pages(new_last);
                        if (err)
                                goto unwind;
                      *next;
        struct rb_node *node;
 
-       if (!xe_vma_is_userptr(vma)) {
+       if (!xe_vma_has_no_bo(vma)) {
                if (!xe_bo_can_migrate(vma->bo, region_to_mem_type[region]))
                        return ERR_PTR(-EINVAL);
        }
        while ((node = rb_next(node))) {
                if (!xe_vma_cmp_vma_cb(lookup, node)) {
                        __vma = to_xe_vma(node);
-                       if (!xe_vma_is_userptr(__vma)) {
+                       if (!xe_vma_has_no_bo(__vma)) {
                                if (!xe_bo_can_migrate(__vma->bo, region_to_mem_type[region]))
                                        goto flush_list;
                        }
        while ((node = rb_prev(node))) {
                if (!xe_vma_cmp_vma_cb(lookup, node)) {
                        __vma = to_xe_vma(node);
-                       if (!xe_vma_is_userptr(__vma)) {
+                       if (!xe_vma_has_no_bo(__vma)) {
                                if (!xe_bo_can_migrate(__vma->bo, region_to_mem_type[region]))
                                        goto flush_list;
                        }
 
        switch (VM_BIND_OP(op)) {
        case XE_VM_BIND_OP_MAP:
-               XE_BUG_ON(!bo);
-
-               err = xe_bo_lock(bo, &ww, 0, true);
-               if (err)
-                       return ERR_PTR(err);
+               if (bo) {
+                       err = xe_bo_lock(bo, &ww, 0, true);
+                       if (err)
+                               return ERR_PTR(err);
+               }
                vma = xe_vma_create(vm, bo, bo_offset_or_userptr, addr,
                                    addr + range - 1,
                                    op & XE_VM_BIND_FLAG_READONLY,
+                                   op & XE_VM_BIND_FLAG_NULL,
                                    tile_mask);
-               xe_bo_unlock(bo, &ww);
+               if (bo)
+                       xe_bo_unlock(bo, &ww);
                if (!vma)
                        return ERR_PTR(-ENOMEM);
 
                xe_vm_insert_vma(vm, vma);
-               if (!bo->vm) {
+               if (bo && !bo->vm) {
                        vm_insert_extobj(vm, vma);
                        err = add_preempt_fences(vm, bo);
                        if (err) {
                vma = xe_vma_create(vm, NULL, bo_offset_or_userptr, addr,
                                    addr + range - 1,
                                    op & XE_VM_BIND_FLAG_READONLY,
+                                   op & XE_VM_BIND_FLAG_NULL,
                                    tile_mask);
                if (!vma)
                        return ERR_PTR(-ENOMEM);
 #ifdef TEST_VM_ASYNC_OPS_ERROR
 #define SUPPORTED_FLAGS        \
        (FORCE_ASYNC_OP_ERROR | XE_VM_BIND_FLAG_ASYNC | \
-        XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | 0xffff)
+        XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | \
+        XE_VM_BIND_FLAG_NULL | 0xffff)
 #else
 #define SUPPORTED_FLAGS        \
        (XE_VM_BIND_FLAG_ASYNC | XE_VM_BIND_FLAG_READONLY | \
-        XE_VM_BIND_FLAG_IMMEDIATE | 0xffff)
+        XE_VM_BIND_FLAG_IMMEDIATE | XE_VM_BIND_FLAG_NULL | 0xffff)
 #endif
 #define XE_64K_PAGE_MASK 0xffffull
 
                u32 obj = (*bind_ops)[i].obj;
                u64 obj_offset = (*bind_ops)[i].obj_offset;
                u32 region = (*bind_ops)[i].region;
+               bool is_null = op &  XE_VM_BIND_FLAG_NULL;
 
                if (XE_IOCTL_ERR(xe, (*bind_ops)[i].pad) ||
                    XE_IOCTL_ERR(xe, (*bind_ops)[i].reserved[0] ||
                if (XE_IOCTL_ERR(xe, VM_BIND_OP(op) >
                                 XE_VM_BIND_OP_PREFETCH) ||
                    XE_IOCTL_ERR(xe, op & ~SUPPORTED_FLAGS) ||
+                   XE_IOCTL_ERR(xe, obj && is_null) ||
+                   XE_IOCTL_ERR(xe, obj_offset && is_null) ||
+                   XE_IOCTL_ERR(xe, VM_BIND_OP(op) != XE_VM_BIND_OP_MAP &&
+                                is_null) ||
                    XE_IOCTL_ERR(xe, !obj &&
-                                VM_BIND_OP(op) == XE_VM_BIND_OP_MAP) ||
+                                VM_BIND_OP(op) == XE_VM_BIND_OP_MAP &&
+                                !is_null) ||
                    XE_IOCTL_ERR(xe, !obj &&
                                 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
                    XE_IOCTL_ERR(xe, addr &&
        int ret;
 
        XE_BUG_ON(!xe_vm_in_fault_mode(vma->vm));
+       XE_WARN_ON(xe_vma_is_null(vma));
        trace_xe_vma_usm_invalidate(vma);
 
        /* Check that we don't race with page-table updates */
        for (node = rb_first(&vm->vmas); node; node = rb_next(node)) {
                struct xe_vma *vma = to_xe_vma(node);
                bool is_userptr = xe_vma_is_userptr(vma);
+               bool is_null = xe_vma_is_null(vma);
 
-               if (is_userptr) {
+               if (is_null) {
+                       addr = 0;
+               } else if (is_userptr) {
                        struct xe_res_cursor cur;
 
                        if (vma->userptr.sg) {
                }
                drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
                           vma->start, vma->end, vma->end - vma->start + 1ull,
-                          addr, is_userptr ? "USR" : is_vram ? "VRAM" : "SYS");
+                          addr, is_null ? "NULL" : is_userptr ? "USR" :
+                          is_vram ? "VRAM" : "SYS");
        }
        up_read(&vm->lock);