#include <drm/ttm/ttm_range_manager.h>
 
 #include <drm/amdgpu_drm.h>
+#include <drm/drm_drv.h>
 
 #include "amdgpu.h"
 #include "amdgpu_object.h"
        }
 }
 
+static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
+                                       unsigned long offset, void *buf, int len, int write)
+{
+       struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
+       struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+       struct amdgpu_job *job;
+       struct dma_fence *fence;
+       uint64_t src_addr, dst_addr;
+       unsigned int num_dw;
+       int r, idx;
+
+       if (len != PAGE_SIZE)
+               return -EINVAL;
+
+       if (!adev->mman.sdma_access_ptr)
+               return -EACCES;
+
+       r = drm_dev_enter(adev_to_drm(adev), &idx);
+       if (r)
+               return r;
+
+       if (write)
+               memcpy(adev->mman.sdma_access_ptr, buf, len);
+
+       num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, &job);
+       if (r)
+               goto out;
+
+       src_addr = write ? amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo) :
+                       amdgpu_bo_gpu_offset(abo);
+       dst_addr = write ? amdgpu_bo_gpu_offset(abo) :
+                       amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo);
+       amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, PAGE_SIZE, false);
+
+       amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]);
+       WARN_ON(job->ibs[0].length_dw > num_dw);
+
+       r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
+       if (r) {
+               amdgpu_job_free(job);
+               goto out;
+       }
+
+       if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
+               r = -ETIMEDOUT;
+       dma_fence_put(fence);
+
+       if (!(r || write))
+               memcpy(buf, adev->mman.sdma_access_ptr, len);
+out:
+       drm_dev_exit(idx);
+       return r;
+}
+
+static inline bool amdgpu_ttm_allow_post_mortem_debug(struct amdgpu_device *adev)
+{
+       return amdgpu_gpu_recovery == 0 ||
+               adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
+               adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
+               adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
+               adev->video_timeout == MAX_SCHEDULE_TIMEOUT;
+}
+
 /**
  * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
  *
        if (bo->resource->mem_type != TTM_PL_VRAM)
                return -EIO;
 
+       if (!amdgpu_ttm_allow_post_mortem_debug(adev) &&
+                       !amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write))
+               return len;
+
        amdgpu_res_first(bo->resource, offset, len, &cursor);
        while (cursor.remaining) {
                size_t count, size = cursor.size;
                return r;
        }
 
+       if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
+                               AMDGPU_GEM_DOMAIN_GTT,
+                               &adev->mman.sdma_access_bo, NULL,
+                               adev->mman.sdma_access_ptr))
+               DRM_WARN("Debug VRAM access will use slowpath MM access\n");
+
        return 0;
 }
 
        ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
        ttm_device_fini(&adev->mman.bdev);
        adev->mman.initialized = false;
+       if (adev->mman.sdma_access_ptr)
+               amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
+                                       &adev->mman.sdma_access_ptr);
        DRM_INFO("amdgpu: ttm finalized\n");
 }