]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
drm/xe: Convert multiple bind ops into single job
authorMatthew Brost <matthew.brost@intel.com>
Thu, 4 Jul 2024 04:16:49 +0000 (21:16 -0700)
committerMatthew Brost <matthew.brost@intel.com>
Thu, 4 Jul 2024 05:28:04 +0000 (22:28 -0700)
This aligns with the uAPI of an array of binds or single bind that
results in multiple GPUVA ops to be considered a single atomic
operations.

The design is roughly:
- xe_vma_ops is a list of xe_vma_op (GPUVA op)
- each xe_vma_op resolves to 0-3 PT ops
- xe_vma_ops creates a single job
- if at any point during binding a failure occurs, xe_vma_ops contains
  the information necessary unwind the PT and VMA (GPUVA) state

v2:
 - add missing dma-resv slot reservation (CI, testing)
v4:
 - Fix TLB invalidation (Paulo)
 - Add missing xe_sched_job_last_fence_add/test_dep check (Inspection)
v5:
 - Invert i, j usage (Matthew Auld)
 - Add helper to test and add job dep (Matthew Auld)
 - Return on anything but -ETIME for cpu bind (Matthew Auld)
 - Return -ENOBUFS if suballoc of BB fails due to size (Matthew Auld)
 - s/do/Do (Matthew Auld)
 - Add missing comma (Matthew Auld)
 - Do not assign return value to xe_range_fence_insert (Matthew Auld)
v6:
 - s/0x1ff/MAX_PTE_PER_SDI (Matthew Auld, CI)
 - Check to large of SA in Xe to avoid triggering WARN (Matthew Auld)
 - Fix checkpatch issues
v7:
 - Rebase
 - Support more than 510 PTEs updates in a bind job (Paulo, mesa testing)
v8:
 - Rebase

Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240704041652.272920-5-matthew.brost@intel.com
drivers/gpu/drm/xe/xe_bo_types.h
drivers/gpu/drm/xe/xe_migrate.c
drivers/gpu/drm/xe/xe_migrate.h
drivers/gpu/drm/xe/xe_pt.c
drivers/gpu/drm/xe/xe_pt.h
drivers/gpu/drm/xe/xe_pt_types.h
drivers/gpu/drm/xe/xe_sa.c
drivers/gpu/drm/xe/xe_vm.c
drivers/gpu/drm/xe/xe_vm.h
drivers/gpu/drm/xe/xe_vm_types.h

index 86422e113d39625043e16780a831b718f3464584..02d68873558a6db40fe7ce8491f2ea1413585b1e 100644 (file)
@@ -58,6 +58,8 @@ struct xe_bo {
 #endif
        /** @freed: List node for delayed put. */
        struct llist_node freed;
+       /** @update_index: Update index if PT BO */
+       int update_index;
        /** @created: Whether the bo has passed initial creation */
        bool created;
 
index ef5ad0efc5ddf17b098ef07f6a2e3d64cb484cbe..fa23a7e7ec4352470762b3404c9e3316077dc06d 100644 (file)
@@ -1125,6 +1125,7 @@ err_sync:
 }
 
 static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
+                         const struct xe_vm_pgtable_update_op *pt_op,
                          const struct xe_vm_pgtable_update *update,
                          struct xe_migrate_pt_update *pt_update)
 {
@@ -1159,8 +1160,12 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
                bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
                bb->cs[bb->len++] = lower_32_bits(addr);
                bb->cs[bb->len++] = upper_32_bits(addr);
-               ops->populate(pt_update, tile, NULL, bb->cs + bb->len, ofs, chunk,
-                             update);
+               if (pt_op->bind)
+                       ops->populate(pt_update, tile, NULL, bb->cs + bb->len,
+                                     ofs, chunk, update);
+               else
+                       ops->clear(pt_update, tile, NULL, bb->cs + bb->len,
+                                  ofs, chunk, update);
 
                bb->len += chunk * 2;
                ofs += chunk;
@@ -1185,114 +1190,58 @@ struct migrate_test_params {
 
 static struct dma_fence *
 xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
-                              struct xe_vm *vm, struct xe_bo *bo,
-                              const struct  xe_vm_pgtable_update *updates,
-                              u32 num_updates, bool wait_vm,
                               struct xe_migrate_pt_update *pt_update)
 {
        XE_TEST_DECLARE(struct migrate_test_params *test =
                        to_migrate_test_params
                        (xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));)
        const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
-       struct dma_fence *fence;
+       struct xe_vm *vm = pt_update->vops->vm;
+       struct xe_vm_pgtable_update_ops *pt_update_ops =
+               &pt_update->vops->pt_update_ops[pt_update->tile_id];
        int err;
-       u32 i;
+       u32 i, j;
 
        if (XE_TEST_ONLY(test && test->force_gpu))
                return ERR_PTR(-ETIME);
 
-       if (bo && !dma_resv_test_signaled(bo->ttm.base.resv,
-                                         DMA_RESV_USAGE_KERNEL))
-               return ERR_PTR(-ETIME);
-
-       if (wait_vm && !dma_resv_test_signaled(xe_vm_resv(vm),
-                                              DMA_RESV_USAGE_BOOKKEEP))
-               return ERR_PTR(-ETIME);
-
        if (ops->pre_commit) {
                pt_update->job = NULL;
                err = ops->pre_commit(pt_update);
                if (err)
                        return ERR_PTR(err);
        }
-       for (i = 0; i < num_updates; i++) {
-               const struct xe_vm_pgtable_update *update = &updates[i];
-
-               ops->populate(pt_update, m->tile, &update->pt_bo->vmap, NULL,
-                             update->ofs, update->qwords, update);
-       }
-
-       if (vm) {
-               trace_xe_vm_cpu_bind(vm);
-               xe_device_wmb(vm->xe);
-       }
-
-       fence = dma_fence_get_stub();
-
-       return fence;
-}
 
-static bool no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q,
-                       struct xe_sync_entry *syncs, u32 num_syncs)
-{
-       struct dma_fence *fence;
-       int i;
-
-       for (i = 0; i < num_syncs; i++) {
-               fence = syncs[i].fence;
-
-               if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
-                                      &fence->flags))
-                       return false;
-       }
-       if (q) {
-               fence = xe_exec_queue_last_fence_get(q, vm);
-               if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
-                       dma_fence_put(fence);
-                       return false;
+       for (i = 0; i < pt_update_ops->num_ops; ++i) {
+               const struct xe_vm_pgtable_update_op *pt_op =
+                       &pt_update_ops->ops[i];
+
+               for (j = 0; j < pt_op->num_entries; j++) {
+                       const struct xe_vm_pgtable_update *update =
+                               &pt_op->entries[j];
+
+                       if (pt_op->bind)
+                               ops->populate(pt_update, m->tile,
+                                             &update->pt_bo->vmap, NULL,
+                                             update->ofs, update->qwords,
+                                             update);
+                       else
+                               ops->clear(pt_update, m->tile,
+                                          &update->pt_bo->vmap, NULL,
+                                          update->ofs, update->qwords, update);
                }
-               dma_fence_put(fence);
        }
 
-       return true;
+       trace_xe_vm_cpu_bind(vm);
+       xe_device_wmb(vm->xe);
+
+       return dma_fence_get_stub();
 }
 
-/**
- * xe_migrate_update_pgtables() - Pipelined page-table update
- * @m: The migrate context.
- * @vm: The vm we'll be updating.
- * @bo: The bo whose dma-resv we will await before updating, or NULL if userptr.
- * @q: The exec queue to be used for the update or NULL if the default
- * migration engine is to be used.
- * @updates: An array of update descriptors.
- * @num_updates: Number of descriptors in @updates.
- * @syncs: Array of xe_sync_entry to await before updating. Note that waits
- * will block the engine timeline.
- * @num_syncs: Number of entries in @syncs.
- * @pt_update: Pointer to a struct xe_migrate_pt_update, which contains
- * pointers to callback functions and, if subclassed, private arguments to
- * those.
- *
- * Perform a pipelined page-table update. The update descriptors are typically
- * built under the same lock critical section as a call to this function. If
- * using the default engine for the updates, they will be performed in the
- * order they grab the job_mutex. If different engines are used, external
- * synchronization is needed for overlapping updates to maintain page-table
- * consistency. Note that the meaing of "overlapping" is that the updates
- * touch the same page-table, which might be a higher-level page-directory.
- * If no pipelining is needed, then updates may be performed by the cpu.
- *
- * Return: A dma_fence that, when signaled, indicates the update completion.
- */
-struct dma_fence *
-xe_migrate_update_pgtables(struct xe_migrate *m,
-                          struct xe_vm *vm,
-                          struct xe_bo *bo,
-                          struct xe_exec_queue *q,
-                          const struct xe_vm_pgtable_update *updates,
-                          u32 num_updates,
-                          struct xe_sync_entry *syncs, u32 num_syncs,
-                          struct xe_migrate_pt_update *pt_update)
+static struct dma_fence *
+__xe_migrate_update_pgtables(struct xe_migrate *m,
+                            struct xe_migrate_pt_update *pt_update,
+                            struct xe_vm_pgtable_update_ops *pt_update_ops)
 {
        const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
        struct xe_tile *tile = m->tile;
@@ -1301,59 +1250,53 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
        struct xe_sched_job *job;
        struct dma_fence *fence;
        struct drm_suballoc *sa_bo = NULL;
-       struct xe_vma *vma = pt_update->vma;
        struct xe_bb *bb;
-       u32 i, batch_size, ppgtt_ofs, update_idx, page_ofs = 0;
+       u32 i, j, batch_size = 0, ppgtt_ofs, update_idx, page_ofs = 0;
+       u32 num_updates = 0, current_update = 0;
        u64 addr;
        int err = 0;
-       bool usm = !q && xe->info.has_usm;
-       bool first_munmap_rebind = vma &&
-               vma->gpuva.flags & XE_VMA_FIRST_REBIND;
-       struct xe_exec_queue *q_override = !q ? m->q : q;
-       u16 pat_index = xe->pat.idx[XE_CACHE_WB];
+       bool is_migrate = pt_update_ops->q == m->q;
+       bool usm = is_migrate && xe->info.has_usm;
+
+       for (i = 0; i < pt_update_ops->num_ops; ++i) {
+               struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
+               struct xe_vm_pgtable_update *updates = pt_op->entries;
+
+               num_updates += pt_op->num_entries;
+               for (j = 0; j < pt_op->num_entries; ++j) {
+                       u32 num_cmds = DIV_ROUND_UP(updates[j].qwords,
+                                                   MAX_PTE_PER_SDI);
 
-       /* Use the CPU if no in syncs and engine is idle */
-       if (no_in_syncs(vm, q, syncs, num_syncs) && xe_exec_queue_is_idle(q_override)) {
-               fence =  xe_migrate_update_pgtables_cpu(m, vm, bo, updates,
-                                                       num_updates,
-                                                       first_munmap_rebind,
-                                                       pt_update);
-               if (!IS_ERR(fence) || fence == ERR_PTR(-EAGAIN))
-                       return fence;
+                       /* align noop + MI_STORE_DATA_IMM cmd prefix */
+                       batch_size += 4 * num_cmds + updates[j].qwords * 2;
+               }
        }
 
        /* fixed + PTE entries */
        if (IS_DGFX(xe))
-               batch_size = 2;
+               batch_size += 2;
        else
-               batch_size = 6 + num_updates * 2;
-
-       for (i = 0; i < num_updates; i++) {
-               u32 num_cmds = DIV_ROUND_UP(updates[i].qwords, MAX_PTE_PER_SDI);
+               batch_size += 6 * (num_updates / MAX_PTE_PER_SDI + 1) +
+                       num_updates * 2;
 
-               /* align noop + MI_STORE_DATA_IMM cmd prefix */
-               batch_size += 4 * num_cmds + updates[i].qwords * 2;
-       }
-
-       /*
-        * XXX: Create temp bo to copy from, if batch_size becomes too big?
-        *
-        * Worst case: Sum(2 * (each lower level page size) + (top level page size))
-        * Should be reasonably bound..
-        */
-       xe_tile_assert(tile, batch_size < SZ_128K);
-
-       bb = xe_bb_new(gt, batch_size, !q && xe->info.has_usm);
+       bb = xe_bb_new(gt, batch_size, usm);
        if (IS_ERR(bb))
                return ERR_CAST(bb);
 
        /* For sysmem PTE's, need to map them in our hole.. */
        if (!IS_DGFX(xe)) {
+               u32 ptes, ofs;
+
                ppgtt_ofs = NUM_KERNEL_PDE - 1;
-               if (q) {
-                       xe_tile_assert(tile, num_updates <= NUM_VMUSA_WRITES_PER_UNIT);
+               if (!is_migrate) {
+                       u32 num_units = DIV_ROUND_UP(num_updates,
+                                                    NUM_VMUSA_WRITES_PER_UNIT);
 
-                       sa_bo = drm_suballoc_new(&m->vm_update_sa, 1,
+                       if (num_units > m->vm_update_sa.size) {
+                               err = -ENOBUFS;
+                               goto err_bb;
+                       }
+                       sa_bo = drm_suballoc_new(&m->vm_update_sa, num_units,
                                                 GFP_KERNEL, true, 0);
                        if (IS_ERR(sa_bo)) {
                                err = PTR_ERR(sa_bo);
@@ -1369,18 +1312,49 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
                }
 
                /* Map our PT's to gtt */
-               bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(num_updates);
-               bb->cs[bb->len++] = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
-               bb->cs[bb->len++] = 0; /* upper_32_bits */
-
-               for (i = 0; i < num_updates; i++) {
-                       struct xe_bo *pt_bo = updates[i].pt_bo;
+               i = 0;
+               j = 0;
+               ptes = num_updates;
+               ofs = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
+               while (ptes) {
+                       u32 chunk = min(MAX_PTE_PER_SDI, ptes);
+                       u32 idx = 0;
+
+                       bb->cs[bb->len++] = MI_STORE_DATA_IMM |
+                               MI_SDI_NUM_QW(chunk);
+                       bb->cs[bb->len++] = ofs;
+                       bb->cs[bb->len++] = 0; /* upper_32_bits */
+
+                       for (; i < pt_update_ops->num_ops; ++i) {
+                               struct xe_vm_pgtable_update_op *pt_op =
+                                       &pt_update_ops->ops[i];
+                               struct xe_vm_pgtable_update *updates = pt_op->entries;
+
+                               for (; j < pt_op->num_entries; ++j, ++current_update, ++idx) {
+                                       struct xe_vm *vm = pt_update->vops->vm;
+                                       struct xe_bo *pt_bo = updates[j].pt_bo;
+
+                                       if (idx == chunk)
+                                               goto next_cmd;
+
+                                       xe_tile_assert(tile, pt_bo->size == SZ_4K);
+
+                                       /* Map a PT at most once */
+                                       if (pt_bo->update_index < 0)
+                                               pt_bo->update_index = current_update;
+
+                                       addr = vm->pt_ops->pte_encode_bo(pt_bo, 0,
+                                                                        XE_CACHE_WB, 0);
+                                       bb->cs[bb->len++] = lower_32_bits(addr);
+                                       bb->cs[bb->len++] = upper_32_bits(addr);
+                               }
 
-                       xe_tile_assert(tile, pt_bo->size == SZ_4K);
+                               j = 0;
+                       }
 
-                       addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, pat_index, 0);
-                       bb->cs[bb->len++] = lower_32_bits(addr);
-                       bb->cs[bb->len++] = upper_32_bits(addr);
+next_cmd:
+                       ptes -= chunk;
+                       ofs += chunk * sizeof(u64);
                }
 
                bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
@@ -1388,19 +1362,36 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
 
                addr = xe_migrate_vm_addr(ppgtt_ofs, 0) +
                        (page_ofs / sizeof(u64)) * XE_PAGE_SIZE;
-               for (i = 0; i < num_updates; i++)
-                       write_pgtable(tile, bb, addr + i * XE_PAGE_SIZE,
-                                     &updates[i], pt_update);
+               for (i = 0; i < pt_update_ops->num_ops; ++i) {
+                       struct xe_vm_pgtable_update_op *pt_op =
+                               &pt_update_ops->ops[i];
+                       struct xe_vm_pgtable_update *updates = pt_op->entries;
+
+                       for (j = 0; j < pt_op->num_entries; ++j) {
+                               struct xe_bo *pt_bo = updates[j].pt_bo;
+
+                               write_pgtable(tile, bb, addr +
+                                             pt_bo->update_index * XE_PAGE_SIZE,
+                                             pt_op, &updates[j], pt_update);
+                       }
+               }
        } else {
                /* phys pages, no preamble required */
                bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
                update_idx = bb->len;
 
-               for (i = 0; i < num_updates; i++)
-                       write_pgtable(tile, bb, 0, &updates[i], pt_update);
+               for (i = 0; i < pt_update_ops->num_ops; ++i) {
+                       struct xe_vm_pgtable_update_op *pt_op =
+                               &pt_update_ops->ops[i];
+                       struct xe_vm_pgtable_update *updates = pt_op->entries;
+
+                       for (j = 0; j < pt_op->num_entries; ++j)
+                               write_pgtable(tile, bb, 0, pt_op, &updates[j],
+                                             pt_update);
+               }
        }
 
-       job = xe_bb_create_migration_job(q ?: m->q, bb,
+       job = xe_bb_create_migration_job(pt_update_ops->q, bb,
                                         xe_migrate_batch_base(m, usm),
                                         update_idx);
        if (IS_ERR(job)) {
@@ -1408,46 +1399,20 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
                goto err_sa;
        }
 
-       /* Wait on BO move */
-       if (bo) {
-               err = xe_sched_job_add_deps(job, bo->ttm.base.resv,
-                                           DMA_RESV_USAGE_KERNEL);
-               if (err)
-                       goto err_job;
-       }
-
-       /*
-        * Munmap style VM unbind, need to wait for all jobs to be complete /
-        * trigger preempts before moving forward
-        */
-       if (first_munmap_rebind) {
-               err = xe_sched_job_add_deps(job, xe_vm_resv(vm),
-                                           DMA_RESV_USAGE_BOOKKEEP);
-               if (err)
-                       goto err_job;
-       }
-
-       err = xe_sched_job_last_fence_add_dep(job, vm);
-       for (i = 0; !err && i < num_syncs; i++)
-               err = xe_sync_entry_add_deps(&syncs[i], job);
-
-       if (err)
-               goto err_job;
-
        if (ops->pre_commit) {
                pt_update->job = job;
                err = ops->pre_commit(pt_update);
                if (err)
                        goto err_job;
        }
-       if (!q)
+       if (is_migrate)
                mutex_lock(&m->job_mutex);
 
        xe_sched_job_arm(job);
        fence = dma_fence_get(&job->drm.s_fence->finished);
        xe_sched_job_push(job);
 
-       if (!q)
+       if (is_migrate)
                mutex_unlock(&m->job_mutex);
 
        xe_bb_free(bb, fence);
@@ -1464,6 +1429,40 @@ err_bb:
        return ERR_PTR(err);
 }
 
+/**
+ * xe_migrate_update_pgtables() - Pipelined page-table update
+ * @m: The migrate context.
+ * @pt_update: PT update arguments
+ *
+ * Perform a pipelined page-table update. The update descriptors are typically
+ * built under the same lock critical section as a call to this function. If
+ * using the default engine for the updates, they will be performed in the
+ * order they grab the job_mutex. If different engines are used, external
+ * synchronization is needed for overlapping updates to maintain page-table
+ * consistency. Note that the meaing of "overlapping" is that the updates
+ * touch the same page-table, which might be a higher-level page-directory.
+ * If no pipelining is needed, then updates may be performed by the cpu.
+ *
+ * Return: A dma_fence that, when signaled, indicates the update completion.
+ */
+struct dma_fence *
+xe_migrate_update_pgtables(struct xe_migrate *m,
+                          struct xe_migrate_pt_update *pt_update)
+
+{
+       struct xe_vm_pgtable_update_ops *pt_update_ops =
+               &pt_update->vops->pt_update_ops[pt_update->tile_id];
+       struct dma_fence *fence;
+
+       fence =  xe_migrate_update_pgtables_cpu(m, pt_update);
+
+       /* -ETIME indicates a job is needed, anything else is legit error */
+       if (!IS_ERR(fence) || PTR_ERR(fence) != -ETIME)
+               return fence;
+
+       return __xe_migrate_update_pgtables(m, pt_update, pt_update_ops);
+}
+
 /**
  * xe_migrate_wait() - Complete all operations using the xe_migrate context
  * @m: Migrate context to wait for.
index a5bcaafe4a99d7d58b8f22d95f1774aa280d658b..453e0ecf503487533e89380ba44712008634f285 100644 (file)
@@ -47,6 +47,24 @@ struct xe_migrate_pt_update_ops {
                         struct xe_tile *tile, struct iosys_map *map,
                         void *pos, u32 ofs, u32 num_qwords,
                         const struct xe_vm_pgtable_update *update);
+       /**
+        * @clear: Clear a command buffer or page-table with ptes.
+        * @pt_update: Embeddable callback argument.
+        * @tile: The tile for the current operation.
+        * @map: struct iosys_map into the memory to be populated.
+        * @pos: If @map is NULL, map into the memory to be populated.
+        * @ofs: qword offset into @map, unused if @map is NULL.
+        * @num_qwords: Number of qwords to write.
+        * @update: Information about the PTEs to be inserted.
+        *
+        * This interface is intended to be used as a callback into the
+        * page-table system to populate command buffers or shared
+        * page-tables with PTEs.
+        */
+       void (*clear)(struct xe_migrate_pt_update *pt_update,
+                     struct xe_tile *tile, struct iosys_map *map,
+                     void *pos, u32 ofs, u32 num_qwords,
+                     const struct xe_vm_pgtable_update *update);
 
        /**
         * @pre_commit: Callback to be called just before arming the
@@ -67,14 +85,10 @@ struct xe_migrate_pt_update_ops {
 struct xe_migrate_pt_update {
        /** @ops: Pointer to the struct xe_migrate_pt_update_ops callbacks */
        const struct xe_migrate_pt_update_ops *ops;
-       /** @vma: The vma we're updating the pagetable for. */
-       struct xe_vma *vma;
+       /** @vops: VMA operations */
+       struct xe_vma_ops *vops;
        /** @job: The job if a GPU page-table update. NULL otherwise */
        struct xe_sched_job *job;
-       /** @start: Start of update for the range fence */
-       u64 start;
-       /** @last: Last of update for the range fence */
-       u64 last;
        /** @tile_id: Tile ID of the update */
        u8 tile_id;
 };
@@ -96,12 +110,6 @@ struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m);
 
 struct dma_fence *
 xe_migrate_update_pgtables(struct xe_migrate *m,
-                          struct xe_vm *vm,
-                          struct xe_bo *bo,
-                          struct xe_exec_queue *q,
-                          const struct xe_vm_pgtable_update *updates,
-                          u32 num_updates,
-                          struct xe_sync_entry *syncs, u32 num_syncs,
                           struct xe_migrate_pt_update *pt_update);
 
 void xe_migrate_wait(struct xe_migrate *m);
index ade9e7a3a0adb4a480a46cbdafbafd5e32990927..f46f46d4681906ce881f1a5137d539b68e3085b9 100644 (file)
@@ -9,12 +9,15 @@
 #include "xe_bo.h"
 #include "xe_device.h"
 #include "xe_drm_client.h"
+#include "xe_exec_queue.h"
 #include "xe_gt.h"
 #include "xe_gt_tlb_invalidation.h"
 #include "xe_migrate.h"
 #include "xe_pt_types.h"
 #include "xe_pt_walk.h"
 #include "xe_res_cursor.h"
+#include "xe_sched_job.h"
+#include "xe_sync.h"
 #include "xe_trace.h"
 #include "xe_ttm_stolen_mgr.h"
 #include "xe_vm.h"
@@ -325,6 +328,7 @@ xe_pt_new_shared(struct xe_walk_update *wupd, struct xe_pt *parent,
        entry->pt = parent;
        entry->flags = 0;
        entry->qwords = 0;
+       entry->pt_bo->update_index = -1;
 
        if (alloc_entries) {
                entry->pt_entries = kmalloc_array(XE_PDES,
@@ -864,9 +868,7 @@ static void xe_pt_commit_locks_assert(struct xe_vma *vma)
 
        lockdep_assert_held(&vm->lock);
 
-       if (xe_vma_is_userptr(vma))
-               lockdep_assert_held_read(&vm->userptr.notifier_lock);
-       else if (!xe_vma_is_null(vma))
+       if (!xe_vma_is_userptr(vma) && !xe_vma_is_null(vma))
                dma_resv_assert_held(xe_vma_bo(vma)->ttm.base.resv);
 
        xe_vm_assert_held(vm);
@@ -888,10 +890,8 @@ static void xe_pt_commit_bind(struct xe_vma *vma,
                if (!rebind)
                        pt->num_live += entries[i].qwords;
 
-               if (!pt->level) {
-                       kfree(entries[i].pt_entries);
+               if (!pt->level)
                        continue;
-               }
 
                pt_dir = as_xe_pt_dir(pt);
                for (j = 0; j < entries[i].qwords; j++) {
@@ -904,10 +904,18 @@ static void xe_pt_commit_bind(struct xe_vma *vma,
 
                        pt_dir->children[j_] = &newpte->base;
                }
-               kfree(entries[i].pt_entries);
        }
 }
 
+static void xe_pt_free_bind(struct xe_vm_pgtable_update *entries,
+                           u32 num_entries)
+{
+       u32 i;
+
+       for (i = 0; i < num_entries; i++)
+               kfree(entries[i].pt_entries);
+}
+
 static int
 xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
                   struct xe_vm_pgtable_update *entries, u32 *num_entries)
@@ -926,12 +934,13 @@ xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
 
 static void xe_vm_dbg_print_entries(struct xe_device *xe,
                                    const struct xe_vm_pgtable_update *entries,
-                                   unsigned int num_entries)
+                                   unsigned int num_entries, bool bind)
 #if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM))
 {
        unsigned int i;
 
-       vm_dbg(&xe->drm, "%u entries to update\n", num_entries);
+       vm_dbg(&xe->drm, "%s: %u entries to update\n", bind ? "bind" : "unbind",
+              num_entries);
        for (i = 0; i < num_entries; i++) {
                const struct xe_vm_pgtable_update *entry = &entries[i];
                struct xe_pt *xe_pt = entry->pt;
@@ -952,66 +961,108 @@ static void xe_vm_dbg_print_entries(struct xe_device *xe,
 {}
 #endif
 
-#ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
-
-static int xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
+static bool no_in_syncs(struct xe_sync_entry *syncs, u32 num_syncs)
 {
-       u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2;
-       static u32 count;
+       int i;
 
-       if (count++ % divisor == divisor - 1) {
-               struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+       for (i = 0; i < num_syncs; i++) {
+               struct dma_fence *fence = syncs[i].fence;
 
-               uvma->userptr.divisor = divisor << 1;
-               spin_lock(&vm->userptr.invalidated_lock);
-               list_move_tail(&uvma->userptr.invalidate_link,
-                              &vm->userptr.invalidated);
-               spin_unlock(&vm->userptr.invalidated_lock);
-               return true;
+               if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+                                      &fence->flags))
+                       return false;
        }
 
-       return false;
+       return true;
 }
 
-#else
+static int job_test_add_deps(struct xe_sched_job *job,
+                            struct dma_resv *resv,
+                            enum dma_resv_usage usage)
+{
+       if (!job) {
+               if (!dma_resv_test_signaled(resv, usage))
+                       return -ETIME;
 
-static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
+               return 0;
+       }
+
+       return xe_sched_job_add_deps(job, resv, usage);
+}
+
+static int vma_add_deps(struct xe_vma *vma, struct xe_sched_job *job)
 {
-       return false;
+       struct xe_bo *bo = xe_vma_bo(vma);
+
+       xe_bo_assert_held(bo);
+
+       if (bo && !bo->vm)
+               return job_test_add_deps(job, bo->ttm.base.resv,
+                                        DMA_RESV_USAGE_KERNEL);
+
+       return 0;
 }
 
-#endif
+static int op_add_deps(struct xe_vm *vm, struct xe_vma_op *op,
+                      struct xe_sched_job *job)
+{
+       int err = 0;
 
-/**
- * struct xe_pt_migrate_pt_update - Callback argument for pre-commit callbacks
- * @base: Base we derive from.
- * @bind: Whether this is a bind or an unbind operation. A bind operation
- *        makes the pre-commit callback error with -EAGAIN if it detects a
- *        pending invalidation.
- * @locked: Whether the pre-commit callback locked the userptr notifier lock
- *          and it needs unlocking.
- */
-struct xe_pt_migrate_pt_update {
-       struct xe_migrate_pt_update base;
-       bool bind;
-       bool locked;
-};
+       switch (op->base.op) {
+       case DRM_GPUVA_OP_MAP:
+               if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+                       break;
+
+               err = vma_add_deps(op->map.vma, job);
+               break;
+       case DRM_GPUVA_OP_REMAP:
+               if (op->remap.prev)
+                       err = vma_add_deps(op->remap.prev, job);
+               if (!err && op->remap.next)
+                       err = vma_add_deps(op->remap.next, job);
+               break;
+       case DRM_GPUVA_OP_UNMAP:
+               break;
+       case DRM_GPUVA_OP_PREFETCH:
+               err = vma_add_deps(gpuva_to_vma(op->base.prefetch.va), job);
+               break;
+       default:
+               drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+       }
+
+       return err;
+}
 
-/*
- * This function adds the needed dependencies to a page-table update job
- * to make sure racing jobs for separate bind engines don't race writing
- * to the same page-table range, wreaking havoc. Initially use a single
- * fence for the entire VM. An optimization would use smaller granularity.
- */
 static int xe_pt_vm_dependencies(struct xe_sched_job *job,
-                                struct xe_range_fence_tree *rftree,
-                                u64 start, u64 last)
+                                struct xe_vm *vm,
+                                struct xe_vma_ops *vops,
+                                struct xe_vm_pgtable_update_ops *pt_update_ops,
+                                struct xe_range_fence_tree *rftree)
 {
        struct xe_range_fence *rtfence;
        struct dma_fence *fence;
-       int err;
+       struct xe_vma_op *op;
+       int err = 0, i;
+
+       xe_vm_assert_held(vm);
+
+       if (!job && !no_in_syncs(vops->syncs, vops->num_syncs))
+               return -ETIME;
 
-       rtfence = xe_range_fence_tree_first(rftree, start, last);
+       if (!job && !xe_exec_queue_is_idle(pt_update_ops->q))
+               return -ETIME;
+
+       if (pt_update_ops->wait_vm_bookkeep || pt_update_ops->wait_vm_kernel) {
+               err = job_test_add_deps(job, xe_vm_resv(vm),
+                                       pt_update_ops->wait_vm_bookkeep ?
+                                       DMA_RESV_USAGE_BOOKKEEP :
+                                       DMA_RESV_USAGE_KERNEL);
+               if (err)
+                       return err;
+       }
+
+       rtfence = xe_range_fence_tree_first(rftree, pt_update_ops->start,
+                                           pt_update_ops->last);
        while (rtfence) {
                fence = rtfence->fence;
 
@@ -1029,80 +1080,173 @@ static int xe_pt_vm_dependencies(struct xe_sched_job *job,
                                return err;
                }
 
-               rtfence = xe_range_fence_tree_next(rtfence, start, last);
+               rtfence = xe_range_fence_tree_next(rtfence,
+                                                  pt_update_ops->start,
+                                                  pt_update_ops->last);
        }
 
-       return 0;
+       list_for_each_entry(op, &vops->list, link) {
+               err = op_add_deps(vm, op, job);
+               if (err)
+                       return err;
+       }
+
+       if (job)
+               err = xe_sched_job_last_fence_add_dep(job, vm);
+       else
+               err = xe_exec_queue_last_fence_test_dep(pt_update_ops->q, vm);
+
+       for (i = 0; job && !err && i < vops->num_syncs; i++)
+               err = xe_sync_entry_add_deps(&vops->syncs[i], job);
+
+       return err;
 }
 
 static int xe_pt_pre_commit(struct xe_migrate_pt_update *pt_update)
 {
-       struct xe_range_fence_tree *rftree =
-               &xe_vma_vm(pt_update->vma)->rftree[pt_update->tile_id];
+       struct xe_vma_ops *vops = pt_update->vops;
+       struct xe_vm *vm = vops->vm;
+       struct xe_range_fence_tree *rftree = &vm->rftree[pt_update->tile_id];
+       struct xe_vm_pgtable_update_ops *pt_update_ops =
+               &vops->pt_update_ops[pt_update->tile_id];
+
+       return xe_pt_vm_dependencies(pt_update->job, vm, pt_update->vops,
+                                    pt_update_ops, rftree);
+}
+
+#ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
+
+static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
+{
+       u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2;
+       static u32 count;
+
+       if (count++ % divisor == divisor - 1) {
+               uvma->userptr.divisor = divisor << 1;
+               return true;
+       }
 
-       return xe_pt_vm_dependencies(pt_update->job, rftree,
-                                    pt_update->start, pt_update->last);
+       return false;
 }
 
-static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
+#else
+
+static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
 {
-       struct xe_pt_migrate_pt_update *userptr_update =
-               container_of(pt_update, typeof(*userptr_update), base);
-       struct xe_userptr_vma *uvma = to_userptr_vma(pt_update->vma);
-       unsigned long notifier_seq = uvma->userptr.notifier_seq;
-       struct xe_vm *vm = xe_vma_vm(&uvma->vma);
-       int err = xe_pt_vm_dependencies(pt_update->job,
-                                       &vm->rftree[pt_update->tile_id],
-                                       pt_update->start,
-                                       pt_update->last);
+       return false;
+}
 
-       if (err)
-               return err;
+#endif
 
-       userptr_update->locked = false;
+static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
+                            struct xe_vm_pgtable_update_ops *pt_update)
+{
+       struct xe_userptr_vma *uvma;
+       unsigned long notifier_seq;
 
-       /*
-        * Wait until nobody is running the invalidation notifier, and
-        * since we're exiting the loop holding the notifier lock,
-        * nobody can proceed invalidating either.
-        *
-        * Note that we don't update the vma->userptr.notifier_seq since
-        * we don't update the userptr pages.
-        */
-       do {
-               down_read(&vm->userptr.notifier_lock);
-               if (!mmu_interval_read_retry(&uvma->userptr.notifier,
-                                            notifier_seq))
-                       break;
+       lockdep_assert_held_read(&vm->userptr.notifier_lock);
 
-               up_read(&vm->userptr.notifier_lock);
+       if (!xe_vma_is_userptr(vma))
+               return 0;
+
+       uvma = to_userptr_vma(vma);
+       notifier_seq = uvma->userptr.notifier_seq;
 
-               if (userptr_update->bind)
-                       return -EAGAIN;
+       if (uvma->userptr.initial_bind && !xe_vm_in_fault_mode(vm))
+               return 0;
 
-               notifier_seq = mmu_interval_read_begin(&uvma->userptr.notifier);
-       } while (true);
+       if (!mmu_interval_read_retry(&uvma->userptr.notifier,
+                                    notifier_seq) &&
+           !xe_pt_userptr_inject_eagain(uvma))
+               return 0;
 
-       /* Inject errors to test_whether they are handled correctly */
-       if (userptr_update->bind && xe_pt_userptr_inject_eagain(uvma)) {
-               up_read(&vm->userptr.notifier_lock);
+       if (xe_vm_in_fault_mode(vm)) {
                return -EAGAIN;
-       }
+       } else {
+               spin_lock(&vm->userptr.invalidated_lock);
+               list_move_tail(&uvma->userptr.invalidate_link,
+                              &vm->userptr.invalidated);
+               spin_unlock(&vm->userptr.invalidated_lock);
 
-       userptr_update->locked = true;
+               if (xe_vm_in_preempt_fence_mode(vm)) {
+                       struct dma_resv_iter cursor;
+                       struct dma_fence *fence;
+                       long err;
+
+                       dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
+                                           DMA_RESV_USAGE_BOOKKEEP);
+                       dma_resv_for_each_fence_unlocked(&cursor, fence)
+                               dma_fence_enable_sw_signaling(fence);
+                       dma_resv_iter_end(&cursor);
+
+                       err = dma_resv_wait_timeout(xe_vm_resv(vm),
+                                                   DMA_RESV_USAGE_BOOKKEEP,
+                                                   false, MAX_SCHEDULE_TIMEOUT);
+                       XE_WARN_ON(err <= 0);
+               }
+       }
 
        return 0;
 }
 
-static const struct xe_migrate_pt_update_ops bind_ops = {
-       .populate = xe_vm_populate_pgtable,
-       .pre_commit = xe_pt_pre_commit,
-};
+static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op,
+                           struct xe_vm_pgtable_update_ops *pt_update)
+{
+       int err = 0;
 
-static const struct xe_migrate_pt_update_ops userptr_bind_ops = {
-       .populate = xe_vm_populate_pgtable,
-       .pre_commit = xe_pt_userptr_pre_commit,
-};
+       lockdep_assert_held_read(&vm->userptr.notifier_lock);
+
+       switch (op->base.op) {
+       case DRM_GPUVA_OP_MAP:
+               if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+                       break;
+
+               err = vma_check_userptr(vm, op->map.vma, pt_update);
+               break;
+       case DRM_GPUVA_OP_REMAP:
+               if (op->remap.prev)
+                       err = vma_check_userptr(vm, op->remap.prev, pt_update);
+               if (!err && op->remap.next)
+                       err = vma_check_userptr(vm, op->remap.next, pt_update);
+               break;
+       case DRM_GPUVA_OP_UNMAP:
+               break;
+       case DRM_GPUVA_OP_PREFETCH:
+               err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va),
+                                       pt_update);
+               break;
+       default:
+               drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+       }
+
+       return err;
+}
+
+static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
+{
+       struct xe_vm *vm = pt_update->vops->vm;
+       struct xe_vma_ops *vops = pt_update->vops;
+       struct xe_vm_pgtable_update_ops *pt_update_ops =
+               &vops->pt_update_ops[pt_update->tile_id];
+       struct xe_vma_op *op;
+       int err;
+
+       err = xe_pt_pre_commit(pt_update);
+       if (err)
+               return err;
+
+       down_read(&vm->userptr.notifier_lock);
+
+       list_for_each_entry(op, &vops->list, link) {
+               err = op_check_userptr(vm, op, pt_update_ops);
+               if (err) {
+                       up_read(&vm->userptr.notifier_lock);
+                       break;
+               }
+       }
+
+       return err;
+}
 
 struct invalidation_fence {
        struct xe_gt_tlb_invalidation_fence base;
@@ -1200,190 +1344,6 @@ static int invalidation_fence_init(struct xe_gt *gt,
        return ret && ret != -ENOENT ? ret : 0;
 }
 
-static void xe_pt_calc_rfence_interval(struct xe_vma *vma,
-                                      struct xe_pt_migrate_pt_update *update,
-                                      struct xe_vm_pgtable_update *entries,
-                                      u32 num_entries)
-{
-       int i, level = 0;
-
-       for (i = 0; i < num_entries; i++) {
-               const struct xe_vm_pgtable_update *entry = &entries[i];
-
-               if (entry->pt->level > level)
-                       level = entry->pt->level;
-       }
-
-       /* Greedy (non-optimal) calculation but simple */
-       update->base.start = ALIGN_DOWN(xe_vma_start(vma),
-                                       0x1ull << xe_pt_shift(level));
-       update->base.last = ALIGN(xe_vma_end(vma),
-                                 0x1ull << xe_pt_shift(level)) - 1;
-}
-
-/**
- * __xe_pt_bind_vma() - Build and connect a page-table tree for the vma
- * address range.
- * @tile: The tile to bind for.
- * @vma: The vma to bind.
- * @q: The exec_queue with which to do pipelined page-table updates.
- * @syncs: Entries to sync on before binding the built tree to the live vm tree.
- * @num_syncs: Number of @sync entries.
- * @rebind: Whether we're rebinding this vma to the same address range without
- * an unbind in-between.
- *
- * This function builds a page-table tree (see xe_pt_stage_bind() for more
- * information on page-table building), and the xe_vm_pgtable_update entries
- * abstracting the operations needed to attach it to the main vm tree. It
- * then takes the relevant locks and updates the metadata side of the main
- * vm tree and submits the operations for pipelined attachment of the
- * gpu page-table to the vm main tree, (which can be done either by the
- * cpu and the GPU).
- *
- * Return: A valid dma-fence representing the pipelined attachment operation
- * on success, an error pointer on error.
- */
-struct dma_fence *
-__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
-                struct xe_sync_entry *syncs, u32 num_syncs,
-                bool rebind)
-{
-       struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1];
-       struct xe_pt_migrate_pt_update bind_pt_update = {
-               .base = {
-                       .ops = xe_vma_is_userptr(vma) ? &userptr_bind_ops : &bind_ops,
-                       .vma = vma,
-                       .tile_id = tile->id,
-               },
-               .bind = true,
-       };
-       struct xe_vm *vm = xe_vma_vm(vma);
-       u32 num_entries;
-       struct dma_fence *fence;
-       struct invalidation_fence *ifence = NULL;
-       struct xe_range_fence *rfence;
-       int err;
-
-       bind_pt_update.locked = false;
-       xe_bo_assert_held(xe_vma_bo(vma));
-       xe_vm_assert_held(vm);
-
-       vm_dbg(&xe_vma_vm(vma)->xe->drm,
-              "Preparing bind, with range [%llx...%llx) engine %p.\n",
-              xe_vma_start(vma), xe_vma_end(vma), q);
-
-       err = xe_pt_prepare_bind(tile, vma, entries, &num_entries);
-       if (err)
-               goto err;
-
-       err = dma_resv_reserve_fences(xe_vm_resv(vm), 1);
-       if (!err && !xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
-               err = dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, 1);
-       if (err)
-               goto err;
-
-       xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries));
-
-       xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries);
-       xe_pt_calc_rfence_interval(vma, &bind_pt_update, entries,
-                                  num_entries);
-
-       /*
-        * If rebind, we have to invalidate TLB on !LR vms to invalidate
-        * cached PTEs point to freed memory. on LR vms this is done
-        * automatically when the context is re-enabled by the rebind worker,
-        * or in fault mode it was invalidated on PTE zapping.
-        *
-        * If !rebind, and scratch enabled VMs, there is a chance the scratch
-        * PTE is already cached in the TLB so it needs to be invalidated.
-        * on !LR VMs this is done in the ring ops preceding a batch, but on
-        * non-faulting LR, in particular on user-space batch buffer chaining,
-        * it needs to be done here.
-        */
-       if ((!rebind && xe_vm_has_scratch(vm) && xe_vm_in_preempt_fence_mode(vm))) {
-               ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
-               if (!ifence)
-                       return ERR_PTR(-ENOMEM);
-       } else if (rebind && !xe_vm_in_lr_mode(vm)) {
-               /* We bump also if batch_invalidate_tlb is true */
-               vm->tlb_flush_seqno++;
-       }
-
-       rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
-       if (!rfence) {
-               kfree(ifence);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       fence = xe_migrate_update_pgtables(tile->migrate,
-                                          vm, xe_vma_bo(vma), q,
-                                          entries, num_entries,
-                                          syncs, num_syncs,
-                                          &bind_pt_update.base);
-       if (!IS_ERR(fence)) {
-               bool last_munmap_rebind = vma->gpuva.flags & XE_VMA_LAST_REBIND;
-               LLIST_HEAD(deferred);
-               int err;
-
-               err = xe_range_fence_insert(&vm->rftree[tile->id], rfence,
-                                           &xe_range_fence_kfree_ops,
-                                           bind_pt_update.base.start,
-                                           bind_pt_update.base.last, fence);
-               if (err)
-                       dma_fence_wait(fence, false);
-
-               /* TLB invalidation must be done before signaling rebind */
-               if (ifence) {
-                       int err = invalidation_fence_init(tile->primary_gt,
-                                                         ifence, fence,
-                                                         xe_vma_start(vma),
-                                                         xe_vma_end(vma),
-                                                         xe_vma_vm(vma)->usm.asid);
-                       if (err) {
-                               dma_fence_put(fence);
-                               kfree(ifence);
-                               return ERR_PTR(err);
-                       }
-                       fence = &ifence->base.base;
-               }
-
-               /* add shared fence now for pagetable delayed destroy */
-               dma_resv_add_fence(xe_vm_resv(vm), fence, rebind ||
-                                  last_munmap_rebind ?
-                                  DMA_RESV_USAGE_KERNEL :
-                                  DMA_RESV_USAGE_BOOKKEEP);
-
-               if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
-                       dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
-                                          DMA_RESV_USAGE_BOOKKEEP);
-               xe_pt_commit_bind(vma, entries, num_entries, rebind,
-                                 bind_pt_update.locked ? &deferred : NULL);
-
-               /* This vma is live (again?) now */
-               vma->tile_present |= BIT(tile->id);
-
-               if (bind_pt_update.locked) {
-                       to_userptr_vma(vma)->userptr.initial_bind = true;
-                       up_read(&vm->userptr.notifier_lock);
-                       xe_bo_put_commit(&deferred);
-               }
-               if (!rebind && last_munmap_rebind &&
-                   xe_vm_in_preempt_fence_mode(vm))
-                       xe_vm_queue_rebind_worker(vm);
-       } else {
-               kfree(rfence);
-               kfree(ifence);
-               if (bind_pt_update.locked)
-                       up_read(&vm->userptr.notifier_lock);
-               xe_pt_abort_bind(vma, entries, num_entries);
-       }
-
-       return fence;
-
-err:
-       return ERR_PTR(err);
-}
-
 struct xe_pt_stage_unbind_walk {
        /** @base: The pagewalk base-class. */
        struct xe_pt_walk base;
@@ -1534,8 +1494,8 @@ xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update,
                                  void *ptr, u32 qword_ofs, u32 num_qwords,
                                  const struct xe_vm_pgtable_update *update)
 {
-       struct xe_vma *vma = pt_update->vma;
-       u64 empty = __xe_pt_empty_pte(tile, xe_vma_vm(vma), update->pt->level);
+       struct xe_vm *vm = pt_update->vops->vm;
+       u64 empty = __xe_pt_empty_pte(tile, vm, update->pt->level);
        int i;
 
        if (map && map->is_iomem)
@@ -1579,151 +1539,487 @@ xe_pt_commit_unbind(struct xe_vma *vma,
        }
 }
 
-static const struct xe_migrate_pt_update_ops unbind_ops = {
-       .populate = xe_migrate_clear_pgtable_callback,
+static void
+xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops *pt_update_ops,
+                                struct xe_vma *vma)
+{
+       u32 current_op = pt_update_ops->current_op;
+       struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
+       int i, level = 0;
+       u64 start, last;
+
+       for (i = 0; i < pt_op->num_entries; i++) {
+               const struct xe_vm_pgtable_update *entry = &pt_op->entries[i];
+
+               if (entry->pt->level > level)
+                       level = entry->pt->level;
+       }
+
+       /* Greedy (non-optimal) calculation but simple */
+       start = ALIGN_DOWN(xe_vma_start(vma), 0x1ull << xe_pt_shift(level));
+       last = ALIGN(xe_vma_end(vma), 0x1ull << xe_pt_shift(level)) - 1;
+
+       if (start < pt_update_ops->start)
+               pt_update_ops->start = start;
+       if (last > pt_update_ops->last)
+               pt_update_ops->last = last;
+}
+
+static int vma_reserve_fences(struct xe_device *xe, struct xe_vma *vma)
+{
+       if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
+               return dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv,
+                                              xe->info.tile_count);
+
+       return 0;
+}
+
+static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
+                          struct xe_vm_pgtable_update_ops *pt_update_ops,
+                          struct xe_vma *vma)
+{
+       u32 current_op = pt_update_ops->current_op;
+       struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
+       struct llist_head *deferred = &pt_update_ops->deferred;
+       int err;
+
+       xe_bo_assert_held(xe_vma_bo(vma));
+
+       vm_dbg(&xe_vma_vm(vma)->xe->drm,
+              "Preparing bind, with range [%llx...%llx)\n",
+              xe_vma_start(vma), xe_vma_end(vma) - 1);
+
+       pt_op->vma = NULL;
+       pt_op->bind = true;
+       pt_op->rebind = BIT(tile->id) & vma->tile_present;
+
+       err = vma_reserve_fences(tile_to_xe(tile), vma);
+       if (err)
+               return err;
+
+       err = xe_pt_prepare_bind(tile, vma, pt_op->entries,
+                                &pt_op->num_entries);
+       if (!err) {
+               xe_tile_assert(tile, pt_op->num_entries <=
+                              ARRAY_SIZE(pt_op->entries));
+               xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
+                                       pt_op->num_entries, true);
+
+               xe_pt_update_ops_rfence_interval(pt_update_ops, vma);
+               ++pt_update_ops->current_op;
+               pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
+
+               /*
+                * If rebind, we have to invalidate TLB on !LR vms to invalidate
+                * cached PTEs point to freed memory. On LR vms this is done
+                * automatically when the context is re-enabled by the rebind worker,
+                * or in fault mode it was invalidated on PTE zapping.
+                *
+                * If !rebind, and scratch enabled VMs, there is a chance the scratch
+                * PTE is already cached in the TLB so it needs to be invalidated.
+                * On !LR VMs this is done in the ring ops preceding a batch, but on
+                * non-faulting LR, in particular on user-space batch buffer chaining,
+                * it needs to be done here.
+                */
+               if ((!pt_op->rebind && xe_vm_has_scratch(vm) &&
+                    xe_vm_in_preempt_fence_mode(vm)))
+                       pt_update_ops->needs_invalidation = true;
+               else if (pt_op->rebind && !xe_vm_in_lr_mode(vm))
+                       /* We bump also if batch_invalidate_tlb is true */
+                       vm->tlb_flush_seqno++;
+
+               /* FIXME: Don't commit right away */
+               vma->tile_staged |= BIT(tile->id);
+               pt_op->vma = vma;
+               xe_pt_commit_bind(vma, pt_op->entries, pt_op->num_entries,
+                                 pt_op->rebind, deferred);
+       }
+
+       return err;
+}
+
+static int unbind_op_prepare(struct xe_tile *tile,
+                            struct xe_vm_pgtable_update_ops *pt_update_ops,
+                            struct xe_vma *vma)
+{
+       u32 current_op = pt_update_ops->current_op;
+       struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
+       struct llist_head *deferred = &pt_update_ops->deferred;
+       int err;
+
+       if (!((vma->tile_present | vma->tile_staged) & BIT(tile->id)))
+               return 0;
+
+       xe_bo_assert_held(xe_vma_bo(vma));
+
+       vm_dbg(&xe_vma_vm(vma)->xe->drm,
+              "Preparing unbind, with range [%llx...%llx)\n",
+              xe_vma_start(vma), xe_vma_end(vma) - 1);
+
+       /*
+        * Wait for invalidation to complete. Can corrupt internal page table
+        * state if an invalidation is running while preparing an unbind.
+        */
+       if (xe_vma_is_userptr(vma) && xe_vm_in_fault_mode(xe_vma_vm(vma)))
+               mmu_interval_read_begin(&to_userptr_vma(vma)->userptr.notifier);
+
+       pt_op->vma = vma;
+       pt_op->bind = false;
+       pt_op->rebind = false;
+
+       err = vma_reserve_fences(tile_to_xe(tile), vma);
+       if (err)
+               return err;
+
+       pt_op->num_entries = xe_pt_stage_unbind(tile, vma, pt_op->entries);
+
+       xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
+                               pt_op->num_entries, false);
+       xe_pt_update_ops_rfence_interval(pt_update_ops, vma);
+       ++pt_update_ops->current_op;
+       pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
+       pt_update_ops->needs_invalidation = true;
+
+       /* FIXME: Don't commit right away */
+       xe_pt_commit_unbind(vma, pt_op->entries, pt_op->num_entries,
+                           deferred);
+
+       return 0;
+}
+
+static int op_prepare(struct xe_vm *vm,
+                     struct xe_tile *tile,
+                     struct xe_vm_pgtable_update_ops *pt_update_ops,
+                     struct xe_vma_op *op)
+{
+       int err = 0;
+
+       xe_vm_assert_held(vm);
+
+       switch (op->base.op) {
+       case DRM_GPUVA_OP_MAP:
+               if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+                       break;
+
+               err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma);
+               pt_update_ops->wait_vm_kernel = true;
+               break;
+       case DRM_GPUVA_OP_REMAP:
+               err = unbind_op_prepare(tile, pt_update_ops,
+                                       gpuva_to_vma(op->base.remap.unmap->va));
+
+               if (!err && op->remap.prev) {
+                       err = bind_op_prepare(vm, tile, pt_update_ops,
+                                             op->remap.prev);
+                       pt_update_ops->wait_vm_bookkeep = true;
+               }
+               if (!err && op->remap.next) {
+                       err = bind_op_prepare(vm, tile, pt_update_ops,
+                                             op->remap.next);
+                       pt_update_ops->wait_vm_bookkeep = true;
+               }
+               break;
+       case DRM_GPUVA_OP_UNMAP:
+               err = unbind_op_prepare(tile, pt_update_ops,
+                                       gpuva_to_vma(op->base.unmap.va));
+               break;
+       case DRM_GPUVA_OP_PREFETCH:
+               err = bind_op_prepare(vm, tile, pt_update_ops,
+                                     gpuva_to_vma(op->base.prefetch.va));
+               pt_update_ops->wait_vm_kernel = true;
+               break;
+       default:
+               drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+       }
+
+       return err;
+}
+
+static void
+xe_pt_update_ops_init(struct xe_vm_pgtable_update_ops *pt_update_ops)
+{
+       init_llist_head(&pt_update_ops->deferred);
+       pt_update_ops->start = ~0x0ull;
+       pt_update_ops->last = 0x0ull;
+}
+
+/**
+ * xe_pt_update_ops_prepare() - Prepare PT update operations
+ * @tile: Tile of PT update operations
+ * @vops: VMA operationa
+ *
+ * Prepare PT update operations which includes updating internal PT state,
+ * allocate memory for page tables, populate page table being pruned in, and
+ * create PT update operations for leaf insertion / removal.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops)
+{
+       struct xe_vm_pgtable_update_ops *pt_update_ops =
+               &vops->pt_update_ops[tile->id];
+       struct xe_vma_op *op;
+       int err;
+
+       lockdep_assert_held(&vops->vm->lock);
+       xe_vm_assert_held(vops->vm);
+
+       xe_pt_update_ops_init(pt_update_ops);
+
+       err = dma_resv_reserve_fences(xe_vm_resv(vops->vm),
+                                     tile_to_xe(tile)->info.tile_count);
+       if (err)
+               return err;
+
+       list_for_each_entry(op, &vops->list, link) {
+               err = op_prepare(vops->vm, tile, pt_update_ops, op);
+
+               if (err)
+                       return err;
+       }
+
+       xe_tile_assert(tile, pt_update_ops->current_op <=
+                      pt_update_ops->num_ops);
+
+       return 0;
+}
+
+static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
+                          struct xe_vm_pgtable_update_ops *pt_update_ops,
+                          struct xe_vma *vma, struct dma_fence *fence)
+{
+       if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
+               dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
+                                  pt_update_ops->wait_vm_bookkeep ?
+                                  DMA_RESV_USAGE_KERNEL :
+                                  DMA_RESV_USAGE_BOOKKEEP);
+       vma->tile_present |= BIT(tile->id);
+       vma->tile_staged &= ~BIT(tile->id);
+       if (xe_vma_is_userptr(vma)) {
+               lockdep_assert_held_read(&vm->userptr.notifier_lock);
+               to_userptr_vma(vma)->userptr.initial_bind = true;
+       }
+
+       /*
+        * Kick rebind worker if this bind triggers preempt fences and not in
+        * the rebind worker
+        */
+       if (pt_update_ops->wait_vm_bookkeep &&
+           xe_vm_in_preempt_fence_mode(vm) &&
+           !current->mm)
+               xe_vm_queue_rebind_worker(vm);
+}
+
+static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
+                            struct xe_vm_pgtable_update_ops *pt_update_ops,
+                            struct xe_vma *vma, struct dma_fence *fence)
+{
+       if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
+               dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
+                                  pt_update_ops->wait_vm_bookkeep ?
+                                  DMA_RESV_USAGE_KERNEL :
+                                  DMA_RESV_USAGE_BOOKKEEP);
+       vma->tile_present &= ~BIT(tile->id);
+       if (!vma->tile_present) {
+               list_del_init(&vma->combined_links.rebind);
+               if (xe_vma_is_userptr(vma)) {
+                       lockdep_assert_held_read(&vm->userptr.notifier_lock);
+
+                       spin_lock(&vm->userptr.invalidated_lock);
+                       list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link);
+                       spin_unlock(&vm->userptr.invalidated_lock);
+               }
+       }
+}
+
+static void op_commit(struct xe_vm *vm,
+                     struct xe_tile *tile,
+                     struct xe_vm_pgtable_update_ops *pt_update_ops,
+                     struct xe_vma_op *op, struct dma_fence *fence)
+{
+       xe_vm_assert_held(vm);
+
+       switch (op->base.op) {
+       case DRM_GPUVA_OP_MAP:
+               if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+                       break;
+
+               bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence);
+               break;
+       case DRM_GPUVA_OP_REMAP:
+               unbind_op_commit(vm, tile, pt_update_ops,
+                                gpuva_to_vma(op->base.remap.unmap->va), fence);
+
+               if (op->remap.prev)
+                       bind_op_commit(vm, tile, pt_update_ops, op->remap.prev,
+                                      fence);
+               if (op->remap.next)
+                       bind_op_commit(vm, tile, pt_update_ops, op->remap.next,
+                                      fence);
+               break;
+       case DRM_GPUVA_OP_UNMAP:
+               unbind_op_commit(vm, tile, pt_update_ops,
+                                gpuva_to_vma(op->base.unmap.va), fence);
+               break;
+       case DRM_GPUVA_OP_PREFETCH:
+               bind_op_commit(vm, tile, pt_update_ops,
+                              gpuva_to_vma(op->base.prefetch.va), fence);
+               break;
+       default:
+               drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+       }
+}
+
+static const struct xe_migrate_pt_update_ops migrate_ops = {
+       .populate = xe_vm_populate_pgtable,
+       .clear = xe_migrate_clear_pgtable_callback,
        .pre_commit = xe_pt_pre_commit,
 };
 
-static const struct xe_migrate_pt_update_ops userptr_unbind_ops = {
-       .populate = xe_migrate_clear_pgtable_callback,
+static const struct xe_migrate_pt_update_ops userptr_migrate_ops = {
+       .populate = xe_vm_populate_pgtable,
+       .clear = xe_migrate_clear_pgtable_callback,
        .pre_commit = xe_pt_userptr_pre_commit,
 };
 
 /**
- * __xe_pt_unbind_vma() - Disconnect and free a page-table tree for the vma
- * address range.
- * @tile: The tile to unbind for.
- * @vma: The vma to unbind.
- * @q: The exec_queue with which to do pipelined page-table updates.
- * @syncs: Entries to sync on before disconnecting the tree to be destroyed.
- * @num_syncs: Number of @sync entries.
+ * xe_pt_update_ops_run() - Run PT update operations
+ * @tile: Tile of PT update operations
+ * @vops: VMA operationa
  *
- * This function builds a the xe_vm_pgtable_update entries abstracting the
- * operations needed to detach the page-table tree to be destroyed from the
- * man vm tree.
- * It then takes the relevant locks and submits the operations for
- * pipelined detachment of the gpu page-table from  the vm main tree,
- * (which can be done either by the cpu and the GPU), Finally it frees the
- * detached page-table tree.
+ * Run PT update operations which includes committing internal PT state changes,
+ * creating job for PT update operations for leaf insertion / removal, and
+ * installing job fence in various places.
  *
- * Return: A valid dma-fence representing the pipelined detachment operation
- * on success, an error pointer on error.
+ * Return: fence on success, negative ERR_PTR on error.
  */
 struct dma_fence *
-__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
-                  struct xe_sync_entry *syncs, u32 num_syncs)
+xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
 {
-       struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1];
-       struct xe_pt_migrate_pt_update unbind_pt_update = {
-               .base = {
-                       .ops = xe_vma_is_userptr(vma) ? &userptr_unbind_ops :
-                       &unbind_ops,
-                       .vma = vma,
-                       .tile_id = tile->id,
-               },
-       };
-       struct xe_vm *vm = xe_vma_vm(vma);
-       u32 num_entries;
-       struct dma_fence *fence = NULL;
-       struct invalidation_fence *ifence;
+       struct xe_vm *vm = vops->vm;
+       struct xe_vm_pgtable_update_ops *pt_update_ops =
+               &vops->pt_update_ops[tile->id];
+       struct dma_fence *fence;
+       struct invalidation_fence *ifence = NULL;
        struct xe_range_fence *rfence;
-       int err;
-
-       LLIST_HEAD(deferred);
+       struct xe_vma_op *op;
+       int err = 0;
+       struct xe_migrate_pt_update update = {
+               .ops = pt_update_ops->needs_userptr_lock ?
+                       &userptr_migrate_ops :
+                       &migrate_ops,
+               .vops = vops,
+               .tile_id = tile->id,
+       };
 
-       xe_bo_assert_held(xe_vma_bo(vma));
+       lockdep_assert_held(&vm->lock);
        xe_vm_assert_held(vm);
 
-       vm_dbg(&xe_vma_vm(vma)->xe->drm,
-              "Preparing unbind, with range [%llx...%llx) engine %p.\n",
-              xe_vma_start(vma), xe_vma_end(vma), q);
-
-       num_entries = xe_pt_stage_unbind(tile, vma, entries);
-       xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries));
-
-       xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries);
-       xe_pt_calc_rfence_interval(vma, &unbind_pt_update, entries,
-                                  num_entries);
+       if (!pt_update_ops->current_op) {
+               xe_tile_assert(tile, xe_vm_in_fault_mode(vm));
 
-       err = dma_resv_reserve_fences(xe_vm_resv(vm), 1);
-       if (!err && !xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
-               err = dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, 1);
-       if (err)
-               return ERR_PTR(err);
+               return dma_fence_get_stub();
+       }
 
-       ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
-       if (!ifence)
-               return ERR_PTR(-ENOMEM);
+       if (pt_update_ops->needs_invalidation) {
+               ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
+               if (!ifence)
+                       return ERR_PTR(-ENOMEM);
+       }
 
        rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
        if (!rfence) {
-               kfree(ifence);
-               return ERR_PTR(-ENOMEM);
+               err = -ENOMEM;
+               goto free_ifence;
        }
 
-       /*
-        * Even if we were already evicted and unbind to destroy, we need to
-        * clear again here. The eviction may have updated pagetables at a
-        * lower level, because it needs to be more conservative.
-        */
-       fence = xe_migrate_update_pgtables(tile->migrate,
-                                          vm, NULL, q ? q :
-                                          vm->q[tile->id],
-                                          entries, num_entries,
-                                          syncs, num_syncs,
-                                          &unbind_pt_update.base);
-       if (!IS_ERR(fence)) {
-               int err;
-
-               err = xe_range_fence_insert(&vm->rftree[tile->id], rfence,
-                                           &xe_range_fence_kfree_ops,
-                                           unbind_pt_update.base.start,
-                                           unbind_pt_update.base.last, fence);
-               if (err)
-                       dma_fence_wait(fence, false);
+       fence = xe_migrate_update_pgtables(tile->migrate, &update);
+       if (IS_ERR(fence)) {
+               err = PTR_ERR(fence);
+               goto free_rfence;
+       }
 
-               /* TLB invalidation must be done before signaling unbind */
+       if (xe_range_fence_insert(&vm->rftree[tile->id], rfence,
+                                 &xe_range_fence_kfree_ops,
+                                 pt_update_ops->start,
+                                 pt_update_ops->last, fence))
+               dma_fence_wait(fence, false);
+
+       /* tlb invalidation must be done before signaling rebind */
+       if (ifence) {
                err = invalidation_fence_init(tile->primary_gt, ifence, fence,
-                                             xe_vma_start(vma),
-                                             xe_vma_end(vma),
-                                             xe_vma_vm(vma)->usm.asid);
-               if (err) {
-                       dma_fence_put(fence);
-                       kfree(ifence);
-                       return ERR_PTR(err);
-               }
+                                             pt_update_ops->start,
+                                             pt_update_ops->last,
+                                             vm->usm.asid);
+               if (err)
+                       goto put_fence;
                fence = &ifence->base.base;
+       }
 
-               /* add shared fence now for pagetable delayed destroy */
-               dma_resv_add_fence(xe_vm_resv(vm), fence,
-                                  DMA_RESV_USAGE_BOOKKEEP);
+       dma_resv_add_fence(xe_vm_resv(vm), fence,
+                          pt_update_ops->wait_vm_bookkeep ?
+                          DMA_RESV_USAGE_KERNEL :
+                          DMA_RESV_USAGE_BOOKKEEP);
 
-               /* This fence will be installed by caller when doing eviction */
-               if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
-                       dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
-                                          DMA_RESV_USAGE_BOOKKEEP);
-               xe_pt_commit_unbind(vma, entries, num_entries,
-                                   unbind_pt_update.locked ? &deferred : NULL);
-               vma->tile_present &= ~BIT(tile->id);
-       } else {
-               kfree(rfence);
-               kfree(ifence);
-       }
+       list_for_each_entry(op, &vops->list, link)
+               op_commit(vops->vm, tile, pt_update_ops, op, fence);
 
-       if (!vma->tile_present)
-               list_del_init(&vma->combined_links.rebind);
+       if (pt_update_ops->needs_userptr_lock)
+               up_read(&vm->userptr.notifier_lock);
 
-       if (unbind_pt_update.locked) {
-               xe_tile_assert(tile, xe_vma_is_userptr(vma));
+       return fence;
 
-               if (!vma->tile_present) {
-                       spin_lock(&vm->userptr.invalidated_lock);
-                       list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link);
-                       spin_unlock(&vm->userptr.invalidated_lock);
-               }
+put_fence:
+       if (pt_update_ops->needs_userptr_lock)
                up_read(&vm->userptr.notifier_lock);
-               xe_bo_put_commit(&deferred);
+       dma_fence_put(fence);
+free_rfence:
+       kfree(rfence);
+free_ifence:
+       kfree(ifence);
+
+       return ERR_PTR(err);
+}
+
+/**
+ * xe_pt_update_ops_fini() - Finish PT update operations
+ * @tile: Tile of PT update operations
+ * @vops: VMA operations
+ *
+ * Finish PT update operations by committing to destroy page table memory
+ */
+void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops)
+{
+       struct xe_vm_pgtable_update_ops *pt_update_ops =
+               &vops->pt_update_ops[tile->id];
+       int i;
+
+       lockdep_assert_held(&vops->vm->lock);
+       xe_vm_assert_held(vops->vm);
+
+       /* FIXME: Not 100% correct */
+       for (i = 0; i < pt_update_ops->num_ops; ++i) {
+               struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
+
+               if (pt_op->bind)
+                       xe_pt_free_bind(pt_op->entries, pt_op->num_entries);
        }
+       xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred);
+}
 
-       return fence;
+/**
+ * xe_pt_update_ops_abort() - Abort PT update operations
+ * @tile: Tile of PT update operations
+ * @vops: VMA operationa
+ *
+ *  Abort PT update operations by unwinding internal PT state
+ */
+void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops)
+{
+       lockdep_assert_held(&vops->vm->lock);
+       xe_vm_assert_held(vops->vm);
+
+       /* FIXME: Just kill VM for now + cleanup PTs */
+       xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred);
+       xe_vm_kill(vops->vm, false);
 }
index 71a4fbfcff43a4db53d0ff138cdfffd194cffdbe..9ab386431caddb3d1172c5ecfa34f842df97d052 100644 (file)
@@ -17,6 +17,7 @@ struct xe_sync_entry;
 struct xe_tile;
 struct xe_vm;
 struct xe_vma;
+struct xe_vma_ops;
 
 /* Largest huge pte is currently 1GiB. May become device dependent. */
 #define MAX_HUGEPTE_LEVEL 2
@@ -34,14 +35,11 @@ void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
 
 void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred);
 
-struct dma_fence *
-__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
-                struct xe_sync_entry *syncs, u32 num_syncs,
-                bool rebind);
-
-struct dma_fence *
-__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
-                  struct xe_sync_entry *syncs, u32 num_syncs);
+int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops);
+struct dma_fence *xe_pt_update_ops_run(struct xe_tile *tile,
+                                      struct xe_vma_ops *vops);
+void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops);
+void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops);
 
 bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma);
 
index 2093150f461e42a14fdccba8944dcb4775e373fd..384cc04de71946f33c43e44bb96199565d542307 100644 (file)
@@ -78,6 +78,8 @@ struct xe_vm_pgtable_update {
 struct xe_vm_pgtable_update_op {
        /** @entries: entries to update for this operation */
        struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1];
+       /** @vma: VMA for operation, operation not valid if NULL */
+       struct xe_vma *vma;
        /** @num_entries: number of entries for this update operation */
        u32 num_entries;
        /** @bind: is a bind */
@@ -86,4 +88,38 @@ struct xe_vm_pgtable_update_op {
        bool rebind;
 };
 
+/** struct xe_vm_pgtable_update_ops: page table update operations */
+struct xe_vm_pgtable_update_ops {
+       /** @ops: operations */
+       struct xe_vm_pgtable_update_op *ops;
+       /** @deferred: deferred list to destroy PT entries */
+       struct llist_head deferred;
+       /** @q: exec queue for PT operations */
+       struct xe_exec_queue *q;
+       /** @start: start address of ops */
+       u64 start;
+       /** @last: last address of ops */
+       u64 last;
+       /** @num_ops: number of operations */
+       u32 num_ops;
+       /** @current_op: current operations */
+       u32 current_op;
+       /** @needs_userptr_lock: Needs userptr lock */
+       bool needs_userptr_lock;
+       /** @needs_invalidation: Needs invalidation */
+       bool needs_invalidation;
+       /**
+        * @wait_vm_bookkeep: PT operations need to wait until VM is idle
+        * (bookkeep dma-resv slots are idle) and stage all future VM activity
+        * behind these operations (install PT operations into VM kernel
+        * dma-resv slot).
+        */
+       bool wait_vm_bookkeep;
+       /**
+        * @wait_vm_kernel: PT operations need to wait until VM kernel dma-resv
+        * slots are idle.
+        */
+       bool wait_vm_kernel;
+};
+
 #endif
index 8941522b7705d7ddd9f996f064ff709744280e92..f3060979e63f052e5edc5eea589c65df682ee8de 100644 (file)
@@ -84,6 +84,13 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32
 struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager,
                                  unsigned int size)
 {
+       /*
+        * BB to large, return -ENOBUFS indicating user should split
+        * array of binds into smaller chunks.
+        */
+       if (size > sa_manager->base.size)
+               return ERR_PTR(-ENOBUFS);
+
        return drm_suballoc_new(&sa_manager->base, size, GFP_KERNEL, true, 0);
 }
 
index 6677874af5a444fcbde8eede35c93331bad9c266..73cc6b0efcef7a494d631f8a39ba24e1f3d161bb 100644 (file)
@@ -313,7 +313,7 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
 
 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
 
-/*
+/**
  * xe_vm_kill() - VM Kill
  * @vm: The VM.
  * @unlocked: Flag indicates the VM's dma-resv is not held
@@ -321,7 +321,7 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
  * Kill the VM by setting banned flag indicated VM is no longer available for
  * use. If in preempt fence mode, also kill all exec queue attached to the VM.
  */
-static void xe_vm_kill(struct xe_vm *vm, bool unlocked)
+void xe_vm_kill(struct xe_vm *vm, bool unlocked)
 {
        struct xe_exec_queue *q;
 
@@ -798,7 +798,7 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
        struct xe_vma *vma, *next;
        struct xe_vma_ops vops;
        struct xe_vma_op *op, *next_op;
-       int err;
+       int err, i;
 
        lockdep_assert_held(&vm->lock);
        if ((xe_vm_in_lr_mode(vm) && !rebind_worker) ||
@@ -806,6 +806,8 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
                return 0;
 
        xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+       for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
+               vops.pt_update_ops[i].wait_vm_bookkeep = true;
 
        xe_vm_assert_held(vm);
        list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
@@ -850,6 +852,8 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma
        struct dma_fence *fence = NULL;
        struct xe_vma_ops vops;
        struct xe_vma_op *op, *next_op;
+       struct xe_tile *tile;
+       u8 id;
        int err;
 
        lockdep_assert_held(&vm->lock);
@@ -857,6 +861,11 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma
        xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
 
        xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+       for_each_tile(tile, vm->xe, id) {
+               vops.pt_update_ops[id].wait_vm_bookkeep = true;
+               vops.pt_update_ops[tile->id].q =
+                       xe_tile_migrate_exec_queue(tile);
+       }
 
        err = xe_vm_ops_add_rebind(&vops, vma, tile_mask);
        if (err)
@@ -1697,147 +1706,6 @@ to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
        return q ? q : vm->q[0];
 }
 
-static struct dma_fence *
-xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
-                struct xe_sync_entry *syncs, u32 num_syncs,
-                bool first_op, bool last_op)
-{
-       struct xe_vm *vm = xe_vma_vm(vma);
-       struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
-       struct xe_tile *tile;
-       struct dma_fence *fence = NULL;
-       struct dma_fence **fences = NULL;
-       struct dma_fence_array *cf = NULL;
-       int cur_fence = 0;
-       int number_tiles = hweight8(vma->tile_present);
-       int err;
-       u8 id;
-
-       trace_xe_vma_unbind(vma);
-
-       if (number_tiles > 1) {
-               fences = kmalloc_array(number_tiles, sizeof(*fences),
-                                      GFP_KERNEL);
-               if (!fences)
-                       return ERR_PTR(-ENOMEM);
-       }
-
-       for_each_tile(tile, vm->xe, id) {
-               if (!(vma->tile_present & BIT(id)))
-                       goto next;
-
-               fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id],
-                                          first_op ? syncs : NULL,
-                                          first_op ? num_syncs : 0);
-               if (IS_ERR(fence)) {
-                       err = PTR_ERR(fence);
-                       goto err_fences;
-               }
-
-               if (fences)
-                       fences[cur_fence++] = fence;
-
-next:
-               if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
-                       q = list_next_entry(q, multi_gt_list);
-       }
-
-       if (fences) {
-               cf = dma_fence_array_create(number_tiles, fences,
-                                           vm->composite_fence_ctx,
-                                           vm->composite_fence_seqno++,
-                                           false);
-               if (!cf) {
-                       --vm->composite_fence_seqno;
-                       err = -ENOMEM;
-                       goto err_fences;
-               }
-       }
-
-       fence = cf ? &cf->base : !fence ?
-               xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
-
-       return fence;
-
-err_fences:
-       if (fences) {
-               while (cur_fence)
-                       dma_fence_put(fences[--cur_fence]);
-               kfree(fences);
-       }
-
-       return ERR_PTR(err);
-}
-
-static struct dma_fence *
-xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
-              struct xe_sync_entry *syncs, u32 num_syncs,
-              u8 tile_mask, bool first_op, bool last_op)
-{
-       struct xe_tile *tile;
-       struct dma_fence *fence;
-       struct dma_fence **fences = NULL;
-       struct dma_fence_array *cf = NULL;
-       struct xe_vm *vm = xe_vma_vm(vma);
-       int cur_fence = 0;
-       int number_tiles = hweight8(tile_mask);
-       int err;
-       u8 id;
-
-       trace_xe_vma_bind(vma);
-
-       if (number_tiles > 1) {
-               fences = kmalloc_array(number_tiles, sizeof(*fences),
-                                      GFP_KERNEL);
-               if (!fences)
-                       return ERR_PTR(-ENOMEM);
-       }
-
-       for_each_tile(tile, vm->xe, id) {
-               if (!(tile_mask & BIT(id)))
-                       goto next;
-
-               fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
-                                        first_op ? syncs : NULL,
-                                        first_op ? num_syncs : 0,
-                                        vma->tile_present & BIT(id));
-               if (IS_ERR(fence)) {
-                       err = PTR_ERR(fence);
-                       goto err_fences;
-               }
-
-               if (fences)
-                       fences[cur_fence++] = fence;
-
-next:
-               if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
-                       q = list_next_entry(q, multi_gt_list);
-       }
-
-       if (fences) {
-               cf = dma_fence_array_create(number_tiles, fences,
-                                           vm->composite_fence_ctx,
-                                           vm->composite_fence_seqno++,
-                                           false);
-               if (!cf) {
-                       --vm->composite_fence_seqno;
-                       err = -ENOMEM;
-                       goto err_fences;
-               }
-       }
-
-       return cf ? &cf->base : fence;
-
-err_fences:
-       if (fences) {
-               while (cur_fence)
-                       dma_fence_put(fences[--cur_fence]);
-               kfree(fences);
-       }
-
-       return ERR_PTR(err);
-}
-
 static struct xe_user_fence *
 find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
 {
@@ -1853,48 +1721,6 @@ find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
        return NULL;
 }
 
-static struct dma_fence *
-xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
-          struct xe_bo *bo, struct xe_sync_entry *syncs, u32 num_syncs,
-          u8 tile_mask, bool immediate, bool first_op, bool last_op)
-{
-       struct dma_fence *fence;
-       struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
-
-       xe_vm_assert_held(vm);
-       xe_bo_assert_held(bo);
-
-       if (immediate) {
-               fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, tile_mask,
-                                      first_op, last_op);
-               if (IS_ERR(fence))
-                       return fence;
-       } else {
-               xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
-
-               fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
-       }
-
-       return fence;
-}
-
-static struct dma_fence *
-xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
-            struct xe_exec_queue *q, struct xe_sync_entry *syncs,
-            u32 num_syncs, bool first_op, bool last_op)
-{
-       struct dma_fence *fence;
-
-       xe_vm_assert_held(vm);
-       xe_bo_assert_held(xe_vma_bo(vma));
-
-       fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
-       if (IS_ERR(fence))
-               return fence;
-
-       return fence;
-}
-
 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
                                    DRM_XE_VM_CREATE_FLAG_LR_MODE | \
                                    DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
@@ -2035,21 +1861,6 @@ static const u32 region_to_mem_type[] = {
        XE_PL_VRAM1,
 };
 
-static struct dma_fence *
-xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
-              struct xe_exec_queue *q, struct xe_sync_entry *syncs,
-              u32 num_syncs, bool first_op, bool last_op)
-{
-       struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
-
-       if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
-               return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
-                                 vma->tile_mask, true, first_op, last_op);
-       } else {
-               return xe_exec_queue_last_fence_get(wait_exec_queue, vm);
-       }
-}
-
 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
                             bool post_commit)
 {
@@ -2337,13 +2148,10 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
        return err;
 }
 
-static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
-                                  struct drm_gpuva_ops *ops,
-                                  struct xe_sync_entry *syncs, u32 num_syncs,
-                                  struct xe_vma_ops *vops, bool last)
+static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
+                                  struct xe_vma_ops *vops)
 {
        struct xe_device *xe = vm->xe;
-       struct xe_vma_op *last_op = NULL;
        struct drm_gpuva_op *__op;
        struct xe_tile *tile;
        u8 id, tile_mask = 0;
@@ -2357,19 +2165,10 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
        drm_gpuva_for_each_op(__op, ops) {
                struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
                struct xe_vma *vma;
-               bool first = list_empty(&vops->list);
                unsigned int flags = 0;
 
                INIT_LIST_HEAD(&op->link);
                list_add_tail(&op->link, &vops->list);
-
-               if (first) {
-                       op->flags |= XE_VMA_OP_FIRST;
-                       op->num_syncs = num_syncs;
-                       op->syncs = syncs;
-               }
-
-               op->q = q;
                op->tile_mask = tile_mask;
 
                switch (op->base.op) {
@@ -2482,197 +2281,21 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
                }
                case DRM_GPUVA_OP_UNMAP:
                case DRM_GPUVA_OP_PREFETCH:
+                       /* FIXME: Need to skip some prefetch ops */
                        xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
                        break;
                default:
                        drm_warn(&vm->xe->drm, "NOT POSSIBLE");
                }
 
-               last_op = op;
-
                err = xe_vma_op_commit(vm, op);
                if (err)
                        return err;
        }
 
-       /* FIXME: Unhandled corner case */
-       XE_WARN_ON(!last_op && last && !list_empty(&vops->list));
-
-       if (!last_op)
-               return 0;
-
-       if (last) {
-               last_op->flags |= XE_VMA_OP_LAST;
-               last_op->num_syncs = num_syncs;
-               last_op->syncs = syncs;
-       }
-
        return 0;
 }
 
-static struct dma_fence *op_execute(struct xe_vm *vm, struct xe_vma *vma,
-                                   struct xe_vma_op *op)
-{
-       struct dma_fence *fence = NULL;
-
-       lockdep_assert_held(&vm->lock);
-
-       xe_vm_assert_held(vm);
-       xe_bo_assert_held(xe_vma_bo(vma));
-
-       switch (op->base.op) {
-       case DRM_GPUVA_OP_MAP:
-               fence = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
-                                  op->syncs, op->num_syncs,
-                                  op->tile_mask,
-                                  op->map.immediate || !xe_vm_in_fault_mode(vm),
-                                  op->flags & XE_VMA_OP_FIRST,
-                                  op->flags & XE_VMA_OP_LAST);
-               break;
-       case DRM_GPUVA_OP_REMAP:
-       {
-               bool prev = !!op->remap.prev;
-               bool next = !!op->remap.next;
-
-               if (!op->remap.unmap_done) {
-                       if (prev || next)
-                               vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
-                       fence = xe_vm_unbind(vm, vma, op->q, op->syncs,
-                                            op->num_syncs,
-                                            op->flags & XE_VMA_OP_FIRST,
-                                            op->flags & XE_VMA_OP_LAST &&
-                                            !prev && !next);
-                       if (IS_ERR(fence))
-                               break;
-                       op->remap.unmap_done = true;
-               }
-
-               if (prev) {
-                       op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
-                       dma_fence_put(fence);
-                       fence = xe_vm_bind(vm, op->remap.prev, op->q,
-                                          xe_vma_bo(op->remap.prev), op->syncs,
-                                          op->num_syncs,
-                                          op->remap.prev->tile_mask, true,
-                                          false,
-                                          op->flags & XE_VMA_OP_LAST && !next);
-                       op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
-                       if (IS_ERR(fence))
-                               break;
-                       op->remap.prev = NULL;
-               }
-
-               if (next) {
-                       op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
-                       dma_fence_put(fence);
-                       fence = xe_vm_bind(vm, op->remap.next, op->q,
-                                          xe_vma_bo(op->remap.next),
-                                          op->syncs, op->num_syncs,
-                                          op->remap.next->tile_mask, true,
-                                          false, op->flags & XE_VMA_OP_LAST);
-                       op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
-                       if (IS_ERR(fence))
-                               break;
-                       op->remap.next = NULL;
-               }
-
-               break;
-       }
-       case DRM_GPUVA_OP_UNMAP:
-               fence = xe_vm_unbind(vm, vma, op->q, op->syncs,
-                                    op->num_syncs, op->flags & XE_VMA_OP_FIRST,
-                                    op->flags & XE_VMA_OP_LAST);
-               break;
-       case DRM_GPUVA_OP_PREFETCH:
-               fence = xe_vm_prefetch(vm, vma, op->q, op->syncs, op->num_syncs,
-                                      op->flags & XE_VMA_OP_FIRST,
-                                      op->flags & XE_VMA_OP_LAST);
-               break;
-       default:
-               drm_warn(&vm->xe->drm, "NOT POSSIBLE");
-       }
-
-       if (IS_ERR(fence))
-               trace_xe_vma_fail(vma);
-
-       return fence;
-}
-
-static struct dma_fence *
-__xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
-                   struct xe_vma_op *op)
-{
-       struct dma_fence *fence;
-       int err;
-
-retry_userptr:
-       fence = op_execute(vm, vma, op);
-       if (IS_ERR(fence) && PTR_ERR(fence) == -EAGAIN) {
-               lockdep_assert_held_write(&vm->lock);
-
-               if (op->base.op == DRM_GPUVA_OP_REMAP) {
-                       if (!op->remap.unmap_done)
-                               vma = gpuva_to_vma(op->base.remap.unmap->va);
-                       else if (op->remap.prev)
-                               vma = op->remap.prev;
-                       else
-                               vma = op->remap.next;
-               }
-
-               if (xe_vma_is_userptr(vma)) {
-                       err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
-                       if (!err)
-                               goto retry_userptr;
-
-                       fence = ERR_PTR(err);
-                       trace_xe_vma_fail(vma);
-               }
-       }
-
-       return fence;
-}
-
-static struct dma_fence *
-xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
-{
-       struct dma_fence *fence = ERR_PTR(-ENOMEM);
-
-       lockdep_assert_held(&vm->lock);
-
-       switch (op->base.op) {
-       case DRM_GPUVA_OP_MAP:
-               fence = __xe_vma_op_execute(vm, op->map.vma, op);
-               break;
-       case DRM_GPUVA_OP_REMAP:
-       {
-               struct xe_vma *vma;
-
-               if (!op->remap.unmap_done)
-                       vma = gpuva_to_vma(op->base.remap.unmap->va);
-               else if (op->remap.prev)
-                       vma = op->remap.prev;
-               else
-                       vma = op->remap.next;
-
-               fence = __xe_vma_op_execute(vm, vma, op);
-               break;
-       }
-       case DRM_GPUVA_OP_UNMAP:
-               fence = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
-                                           op);
-               break;
-       case DRM_GPUVA_OP_PREFETCH:
-               fence = __xe_vma_op_execute(vm,
-                                           gpuva_to_vma(op->base.prefetch.va),
-                                           op);
-               break;
-       default:
-               drm_warn(&vm->xe->drm, "NOT POSSIBLE");
-       }
-
-       return fence;
-}
-
 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
                             bool post_commit, bool prev_post_commit,
                             bool next_post_commit)
@@ -2858,23 +2481,110 @@ static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
        return 0;
 }
 
+static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops)
+{
+       struct xe_exec_queue *q = vops->q;
+       struct xe_tile *tile;
+       int number_tiles = 0;
+       u8 id;
+
+       for_each_tile(tile, vm->xe, id) {
+               if (vops->pt_update_ops[id].num_ops)
+                       ++number_tiles;
+
+               if (vops->pt_update_ops[id].q)
+                       continue;
+
+               if (q) {
+                       vops->pt_update_ops[id].q = q;
+                       if (vm->pt_root[id] && !list_empty(&q->multi_gt_list))
+                               q = list_next_entry(q, multi_gt_list);
+               } else {
+                       vops->pt_update_ops[id].q = vm->q[id];
+               }
+       }
+
+       return number_tiles;
+}
+
 static struct dma_fence *ops_execute(struct xe_vm *vm,
                                     struct xe_vma_ops *vops)
 {
-       struct xe_vma_op *op, *next;
+       struct xe_tile *tile;
        struct dma_fence *fence = NULL;
+       struct dma_fence **fences = NULL;
+       struct dma_fence_array *cf = NULL;
+       int number_tiles = 0, current_fence = 0, err;
+       u8 id;
 
-       list_for_each_entry_safe(op, next, &vops->list, link) {
-               dma_fence_put(fence);
-               fence = xe_vma_op_execute(vm, op);
-               if (IS_ERR(fence)) {
-                       drm_warn(&vm->xe->drm, "VM op(%d) failed with %ld",
-                                op->base.op, PTR_ERR(fence));
-                       fence = ERR_PTR(-ENOSPC);
-                       break;
+       number_tiles = vm_ops_setup_tile_args(vm, vops);
+       if (number_tiles == 0)
+               return ERR_PTR(-ENODATA);
+
+       if (number_tiles > 1) {
+               fences = kmalloc_array(number_tiles, sizeof(*fences),
+                                      GFP_KERNEL);
+               if (!fences)
+                       return ERR_PTR(-ENOMEM);
+       }
+
+       for_each_tile(tile, vm->xe, id) {
+               if (!vops->pt_update_ops[id].num_ops)
+                       continue;
+
+               err = xe_pt_update_ops_prepare(tile, vops);
+               if (err) {
+                       fence = ERR_PTR(err);
+                       goto err_out;
                }
        }
 
+       for_each_tile(tile, vm->xe, id) {
+               if (!vops->pt_update_ops[id].num_ops)
+                       continue;
+
+               fence = xe_pt_update_ops_run(tile, vops);
+               if (IS_ERR(fence))
+                       goto err_out;
+
+               if (fences)
+                       fences[current_fence++] = fence;
+       }
+
+       if (fences) {
+               cf = dma_fence_array_create(number_tiles, fences,
+                                           vm->composite_fence_ctx,
+                                           vm->composite_fence_seqno++,
+                                           false);
+               if (!cf) {
+                       --vm->composite_fence_seqno;
+                       fence = ERR_PTR(-ENOMEM);
+                       goto err_out;
+               }
+               fence = &cf->base;
+       }
+
+       for_each_tile(tile, vm->xe, id) {
+               if (!vops->pt_update_ops[id].num_ops)
+                       continue;
+
+               xe_pt_update_ops_fini(tile, vops);
+       }
+
+       return fence;
+
+err_out:
+       for_each_tile(tile, vm->xe, id) {
+               if (!vops->pt_update_ops[id].num_ops)
+                       continue;
+
+               xe_pt_update_ops_abort(tile, vops);
+       }
+       while (current_fence)
+               dma_fence_put(fences[--current_fence]);
+       kfree(fences);
+       kfree(cf);
+
        return fence;
 }
 
@@ -2955,12 +2665,10 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
                fence = ops_execute(vm, vops);
                if (IS_ERR(fence)) {
                        err = PTR_ERR(fence);
-                       /* FIXME: Killing VM rather than proper error handling */
-                       xe_vm_kill(vm, false);
                        goto unlock;
-               } else {
-                       vm_bind_ioctl_ops_fini(vm, vops, fence);
                }
+
+               vm_bind_ioctl_ops_fini(vm, vops, fence);
        }
 
 unlock:
@@ -3317,8 +3025,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                        goto unwind_ops;
                }
 
-               err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
-                                             &vops, i == args->num_binds - 1);
+               err = vm_bind_ioctl_ops_parse(vm, ops[i], &vops);
                if (err)
                        goto unwind_ops;
        }
index b481608b12f1bfa605371dde8bbb977019136531..c864dba35e1d5cf75b6a83edda80929294fd5f71 100644 (file)
@@ -259,6 +259,8 @@ static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
        return drm_gpuvm_resv(&vm->gpuvm);
 }
 
+void xe_vm_kill(struct xe_vm *vm, bool unlocked);
+
 /**
  * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
  * @vm: The vm
index 211c88801182957b9c1fe3be2a87de8c0eecaa47..27d651093d3073272a2082e746ebcd745c2a3330 100644 (file)
@@ -26,14 +26,12 @@ struct xe_vm_pgtable_update_op;
 #define XE_VMA_READ_ONLY       DRM_GPUVA_USERBITS
 #define XE_VMA_DESTROYED       (DRM_GPUVA_USERBITS << 1)
 #define XE_VMA_ATOMIC_PTE_BIT  (DRM_GPUVA_USERBITS << 2)
-#define XE_VMA_FIRST_REBIND    (DRM_GPUVA_USERBITS << 3)
-#define XE_VMA_LAST_REBIND     (DRM_GPUVA_USERBITS << 4)
-#define XE_VMA_PTE_4K          (DRM_GPUVA_USERBITS << 5)
-#define XE_VMA_PTE_2M          (DRM_GPUVA_USERBITS << 6)
-#define XE_VMA_PTE_1G          (DRM_GPUVA_USERBITS << 7)
-#define XE_VMA_PTE_64K         (DRM_GPUVA_USERBITS << 8)
-#define XE_VMA_PTE_COMPACT     (DRM_GPUVA_USERBITS << 9)
-#define XE_VMA_DUMPABLE                (DRM_GPUVA_USERBITS << 10)
+#define XE_VMA_PTE_4K          (DRM_GPUVA_USERBITS << 3)
+#define XE_VMA_PTE_2M          (DRM_GPUVA_USERBITS << 4)
+#define XE_VMA_PTE_1G          (DRM_GPUVA_USERBITS << 5)
+#define XE_VMA_PTE_64K         (DRM_GPUVA_USERBITS << 6)
+#define XE_VMA_PTE_COMPACT     (DRM_GPUVA_USERBITS << 7)
+#define XE_VMA_DUMPABLE                (DRM_GPUVA_USERBITS << 8)
 
 /** struct xe_userptr - User pointer */
 struct xe_userptr {
@@ -100,6 +98,9 @@ struct xe_vma {
         */
        u8 tile_present;
 
+       /** @tile_staged: bind is staged for this VMA */
+       u8 tile_staged;
+
        /**
         * @pat_index: The pat index to use when encoding the PTEs for this vma.
         */
@@ -315,31 +316,18 @@ struct xe_vma_op_prefetch {
 
 /** enum xe_vma_op_flags - flags for VMA operation */
 enum xe_vma_op_flags {
-       /** @XE_VMA_OP_FIRST: first VMA operation for a set of syncs */
-       XE_VMA_OP_FIRST                 = BIT(0),
-       /** @XE_VMA_OP_LAST: last VMA operation for a set of syncs */
-       XE_VMA_OP_LAST                  = BIT(1),
        /** @XE_VMA_OP_COMMITTED: VMA operation committed */
-       XE_VMA_OP_COMMITTED             = BIT(2),
+       XE_VMA_OP_COMMITTED             = BIT(0),
        /** @XE_VMA_OP_PREV_COMMITTED: Previous VMA operation committed */
-       XE_VMA_OP_PREV_COMMITTED        = BIT(3),
+       XE_VMA_OP_PREV_COMMITTED        = BIT(1),
        /** @XE_VMA_OP_NEXT_COMMITTED: Next VMA operation committed */
-       XE_VMA_OP_NEXT_COMMITTED        = BIT(4),
+       XE_VMA_OP_NEXT_COMMITTED        = BIT(2),
 };
 
 /** struct xe_vma_op - VMA operation */
 struct xe_vma_op {
        /** @base: GPUVA base operation */
        struct drm_gpuva_op base;
-       /** @q: exec queue for this operation */
-       struct xe_exec_queue *q;
-       /**
-        * @syncs: syncs for this operation, only used on first and last
-        * operation
-        */
-       struct xe_sync_entry *syncs;
-       /** @num_syncs: number of syncs */
-       u32 num_syncs;
        /** @link: async operation link */
        struct list_head link;
        /** @flags: operation flags */
@@ -363,19 +351,14 @@ struct xe_vma_ops {
        struct list_head list;
        /** @vm: VM */
        struct xe_vm *vm;
-       /** @q: exec queue these operations */
+       /** @q: exec queue for VMA operations */
        struct xe_exec_queue *q;
        /** @syncs: syncs these operation */
        struct xe_sync_entry *syncs;
        /** @num_syncs: number of syncs */
        u32 num_syncs;
        /** @pt_update_ops: page table update operations */
-       struct {
-               /** @ops: operations */
-               struct xe_vm_pgtable_update_op *ops;
-               /** @num_ops: number of operations */
-               u32 num_ops;
-       } pt_update_ops[XE_MAX_TILES_PER_DEVICE];
+       struct xe_vm_pgtable_update_ops pt_update_ops[XE_MAX_TILES_PER_DEVICE];
 };
 
 #endif