for (i = 0; i < num_vmas; i++) {
/*TODO: Extend attributes to bo based vmas */
- if (!xe_vma_is_cpu_addr_mirror(vmas[i]))
- continue;
-
- vmas[i]->attr.preferred_loc.devmem_fd = op->preferred_mem_loc.devmem_fd;
-
- /* Till multi-device support is not added migration_policy
- * is of no use and can be ignored.
- */
- vmas[i]->attr.preferred_loc.migration_policy =
+ if ((vmas[i]->attr.preferred_loc.devmem_fd == op->preferred_mem_loc.devmem_fd &&
+ vmas[i]->attr.preferred_loc.migration_policy ==
+ op->preferred_mem_loc.migration_policy) ||
+ !xe_vma_is_cpu_addr_mirror(vmas[i])) {
+ vmas[i]->skip_invalidation = true;
+ } else {
+ vmas[i]->skip_invalidation = false;
+ vmas[i]->attr.preferred_loc.devmem_fd = op->preferred_mem_loc.devmem_fd;
+ /* Till multi-device support is not added migration_policy
+ * is of no use and can be ignored.
+ */
+ vmas[i]->attr.preferred_loc.migration_policy =
op->preferred_mem_loc.migration_policy;
+ }
}
}
xe_assert(vm->xe, op->atomic.val <= DRM_XE_ATOMIC_CPU);
for (i = 0; i < num_vmas; i++) {
- if ((xe_vma_is_userptr(vmas[i]) &&
- !(op->atomic.val == DRM_XE_ATOMIC_DEVICE &&
- xe->info.has_device_atomics_on_smem)))
+ if (xe_vma_is_userptr(vmas[i]) &&
+ !(op->atomic.val == DRM_XE_ATOMIC_DEVICE &&
+ xe->info.has_device_atomics_on_smem)) {
+ vmas[i]->skip_invalidation = true;
continue;
+ }
+
+ if (vmas[i]->attr.atomic_access == op->atomic.val) {
+ vmas[i]->skip_invalidation = true;
+ } else {
+ vmas[i]->skip_invalidation = false;
+ vmas[i]->attr.atomic_access = op->atomic.val;
+ }
vmas[i]->attr.atomic_access = op->atomic.val;
bo = xe_vma_bo(vmas[i]);
- if (!bo)
+ if (!bo || bo->attr.atomic_access == op->atomic.val)
continue;
+ vmas[i]->skip_invalidation = false;
xe_bo_assert_held(bo);
bo->attr.atomic_access = op->atomic.val;
xe_assert(vm->xe, op->type == DRM_XE_MEM_RANGE_ATTR_PAT);
- for (i = 0; i < num_vmas; i++)
- vmas[i]->attr.pat_index = op->pat_index.val;
+ for (i = 0; i < num_vmas; i++) {
+ if (vmas[i]->attr.pat_index == op->pat_index.val) {
+ vmas[i]->skip_invalidation = true;
+ } else {
+ vmas[i]->skip_invalidation = false;
+ vmas[i]->attr.pat_index = op->pat_index.val;
+ }
+ }
}
typedef void (*madvise_func)(struct xe_device *xe, struct xe_vm *vm,
{
struct drm_gpuva *gpuva;
struct xe_tile *tile;
- u8 id, tile_mask;
+ u8 id, tile_mask = 0;
lockdep_assert_held_write(&vm->lock);
false, MAX_SCHEDULE_TIMEOUT) <= 0)
XE_WARN_ON(1);
- tile_mask = xe_svm_ranges_zap_ptes_in_range(vm, start, end);
-
drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end) {
struct xe_vma *vma = gpuva_to_vma(gpuva);
- if (xe_vma_is_cpu_addr_mirror(vma) || xe_vma_is_null(vma))
+ if (vma->skip_invalidation || xe_vma_is_null(vma))
continue;
- for_each_tile(tile, vm->xe, id) {
- if (xe_pt_zap_ptes(tile, vma)) {
- tile_mask |= BIT(id);
+ if (xe_vma_is_cpu_addr_mirror(vma)) {
+ tile_mask |= xe_svm_ranges_zap_ptes_in_range(vm,
+ xe_vma_start(vma),
+ xe_vma_end(vma));
+ } else {
+ for_each_tile(tile, vm->xe, id) {
+ if (xe_pt_zap_ptes(tile, vma)) {
+ tile_mask |= BIT(id);
/*
* WRITE_ONCE pairs with READ_ONCE
*/
WRITE_ONCE(vma->tile_invalidated,
vma->tile_invalidated | BIT(id));
+ }
}
}
}