dma_addr_t dma;
 
                for_each_sgt_daddr(dma, iter, vma->pages) {
+                       mutex_lock(&ggtt->error_mutex);
                        ggtt->vm.insert_page(&ggtt->vm, dma, slot,
                                             I915_CACHE_NONE, 0);
                        mb();
                                            (void  __force *)s, dst,
                                            true);
                        io_mapping_unmap(s);
+
+                       mb();
+                       ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
+                       mutex_unlock(&ggtt->error_mutex);
                        if (ret)
                                break;
                }
        return error_uc;
 }
 
-static void gt_capture_prepare(struct intel_gt_coredump *gt)
-{
-       struct i915_ggtt *ggtt = gt->_gt->ggtt;
-
-       mutex_lock(&ggtt->error_mutex);
-}
-
-static void gt_capture_finish(struct intel_gt_coredump *gt)
-{
-       struct i915_ggtt *ggtt = gt->_gt->ggtt;
-
-       if (drm_mm_node_allocated(&ggtt->error_capture))
-               ggtt->vm.clear_range(&ggtt->vm,
-                                    ggtt->error_capture.start,
-                                    PAGE_SIZE);
-
-       mutex_unlock(&ggtt->error_mutex);
-}
-
 /* Capture all registers which don't fit into another category. */
 static void gt_record_regs(struct intel_gt_coredump *gt)
 {
                return NULL;
        }
 
-       gt_capture_prepare(gt);
-
        return compress;
 }
 
        if (!compress)
                return;
 
-       gt_capture_finish(gt);
-
        compress_fini(compress);
        kfree(compress);
 }