__entry->offset, __entry->flags)
 );
 
-TRACE_EVENT(amdgpu_vm_bo_update,
+DECLARE_EVENT_CLASS(amdgpu_vm_mapping,
            TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
            TP_ARGS(mapping),
            TP_STRUCT__entry(
                      __entry->soffset, __entry->eoffset, __entry->flags)
 );
 
+DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_update,
+           TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
+           TP_ARGS(mapping)
+);
+
+DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping,
+           TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
+           TP_ARGS(mapping)
+);
+
 TRACE_EVENT(amdgpu_vm_set_page,
            TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
                     uint32_t incr, uint32_t flags),
 
                        return r;
        }
 
+       if (trace_amdgpu_vm_bo_mapping_enabled()) {
+               list_for_each_entry(mapping, &bo_va->valids, list)
+                       trace_amdgpu_vm_bo_mapping(mapping);
+
+               list_for_each_entry(mapping, &bo_va->invalids, list)
+                       trace_amdgpu_vm_bo_mapping(mapping);
+       }
+
        spin_lock(&vm->status_lock);
        list_splice_init(&bo_va->invalids, &bo_va->valids);
        list_del_init(&bo_va->vm_status);