if (bo->pin_count > 0)
                amdgpu_bo_subtract_pin_size(bo);
 
-       if (bo->kfd_bo)
-               amdgpu_amdkfd_unreserve_memory_limit(bo);
-
        amdgpu_bo_kunmap(bo);
 
        if (bo->gem_base.import_attach)
        trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
 }
 
+/**
+ * amdgpu_bo_move_notify - notification about a BO being released
+ * @bo: pointer to a buffer object
+ *
+ * Wipes VRAM buffers whose contents should not be leaked before the
+ * memory is released.
+ */
+void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
+{
+       struct dma_fence *fence = NULL;
+       struct amdgpu_bo *abo;
+       int r;
+
+       if (!amdgpu_bo_is_amdgpu_bo(bo))
+               return;
+
+       abo = ttm_to_amdgpu_bo(bo);
+
+       if (abo->kfd_bo)
+               amdgpu_amdkfd_unreserve_memory_limit(abo);
+
+       if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
+           !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
+               return;
+
+       reservation_object_lock(bo->resv, NULL);
+
+       r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->resv, &fence);
+       if (!WARN_ON(r)) {
+               amdgpu_bo_fence(abo, fence, false);
+               dma_fence_put(fence);
+       }
+
+       reservation_object_unlock(bo->resv);
+}
+
 /**
  * amdgpu_bo_fault_reserve_notify - notification about a memory fault
  * @bo: pointer to a buffer object
 
 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
                           bool evict,
                           struct ttm_mem_reg *new_mem);
+void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
                     bool shared);
 
        if (r)
                goto error;
 
+       /* clear the space being freed */
+       if (old_mem->mem_type == TTM_PL_VRAM &&
+           (ttm_to_amdgpu_bo(bo)->flags &
+            AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
+               struct dma_fence *wipe_fence = NULL;
+
+               r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
+                                      NULL, &wipe_fence);
+               if (r) {
+                       goto error;
+               } else if (wipe_fence) {
+                       dma_fence_put(fence);
+                       fence = wipe_fence;
+               }
+       }
+
        /* Always block for VM page tables before committing the new location */
        if (bo->type == ttm_bo_type_kernel)
                r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem);
        .move = &amdgpu_bo_move,
        .verify_access = &amdgpu_verify_access,
        .move_notify = &amdgpu_bo_move_notify,
+       .release_notify = &amdgpu_bo_release_notify,
        .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
        .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
        .io_mem_free = &amdgpu_ttm_io_mem_free,
 
 #define AMDGPU_GTT_MAX_TRANSFER_SIZE   512
 #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS        2
 
+#define AMDGPU_POISON  0xd0bed0be
+
 struct amdgpu_mman {
        struct ttm_bo_device            bdev;
        bool                            mem_global_referenced;