struct amdgpu_fpriv *fpriv = file->driver_priv;
        struct amdgpu_vm *vm = &fpriv->vm;
 
-       struct amdgpu_mem_stats stats;
+       struct amdgpu_mem_stats stats[__AMDGPU_PL_LAST + 1] = { };
        ktime_t usage[AMDGPU_HW_IP_NUM];
-       unsigned int hw_ip;
+       const char *pl_name[] = {
+               [TTM_PL_VRAM] = "vram",
+               [TTM_PL_TT] = "gtt",
+               [TTM_PL_SYSTEM] = "cpu",
+       };
+       unsigned int hw_ip, i;
        int ret;
 
-       memset(&stats, 0, sizeof(stats));
-
        ret = amdgpu_bo_reserve(vm->root.bo, false);
        if (ret)
                return;
 
-       amdgpu_vm_get_memory(vm, &stats);
+       amdgpu_vm_get_memory(vm, stats, ARRAY_SIZE(stats));
        amdgpu_bo_unreserve(vm->root.bo);
 
        amdgpu_ctx_mgr_usage(&fpriv->ctx_mgr, usage);
         */
 
        drm_printf(p, "pasid:\t%u\n", fpriv->vm.pasid);
-       drm_printf(p, "drm-memory-vram:\t%llu KiB\n", stats.vram/1024UL);
-       drm_printf(p, "drm-memory-gtt: \t%llu KiB\n", stats.gtt/1024UL);
-       drm_printf(p, "drm-memory-cpu: \t%llu KiB\n", stats.cpu/1024UL);
+
+       for (i = 0; i < TTM_PL_PRIV; i++)
+               drm_print_memory_stats(p,
+                                      &stats[i].drm,
+                                      DRM_GEM_OBJECT_RESIDENT |
+                                      DRM_GEM_OBJECT_PURGEABLE,
+                                      pl_name[i]);
+
+       /* Legacy amdgpu keys, alias to drm-resident-memory-: */
+       drm_printf(p, "drm-memory-vram:\t%llu KiB\n",
+                  stats[TTM_PL_VRAM].total/1024UL);
+       drm_printf(p, "drm-memory-gtt: \t%llu KiB\n",
+                  stats[TTM_PL_TT].total/1024UL);
+       drm_printf(p, "drm-memory-cpu: \t%llu KiB\n",
+                  stats[TTM_PL_SYSTEM].total/1024UL);
+
+       /* Amdgpu specific memory accounting keys: */
        drm_printf(p, "amd-memory-visible-vram:\t%llu KiB\n",
-                  stats.visible_vram/1024UL);
+                  stats[TTM_PL_VRAM].visible/1024UL);
        drm_printf(p, "amd-evicted-vram:\t%llu KiB\n",
-                  stats.evicted_vram/1024UL);
+                  stats[TTM_PL_VRAM].evicted/1024UL);
        drm_printf(p, "amd-evicted-visible-vram:\t%llu KiB\n",
-                  stats.evicted_visible_vram/1024UL);
+                  stats[TTM_PL_VRAM].evicted_visible/1024UL);
        drm_printf(p, "amd-requested-vram:\t%llu KiB\n",
-                  stats.requested_vram/1024UL);
+                  stats[TTM_PL_VRAM].requested/1024UL);
        drm_printf(p, "amd-requested-visible-vram:\t%llu KiB\n",
-                  stats.requested_visible_vram/1024UL);
+                  stats[TTM_PL_VRAM].requested_visible/1024UL);
        drm_printf(p, "amd-requested-gtt:\t%llu KiB\n",
-                  stats.requested_gtt/1024UL);
-       drm_printf(p, "drm-shared-vram:\t%llu KiB\n", stats.vram_shared/1024UL);
-       drm_printf(p, "drm-shared-gtt:\t%llu KiB\n", stats.gtt_shared/1024UL);
-       drm_printf(p, "drm-shared-cpu:\t%llu KiB\n", stats.cpu_shared/1024UL);
+                  stats[TTM_PL_TT].requested/1024UL);
 
        for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
                if (!usage[hw_ip])
 
 }
 
 void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
-                         struct amdgpu_mem_stats *stats)
+                         struct amdgpu_mem_stats *stats,
+                         unsigned int sz)
 {
+       const unsigned int domain_to_pl[] = {
+               [ilog2(AMDGPU_GEM_DOMAIN_CPU)]      = TTM_PL_SYSTEM,
+               [ilog2(AMDGPU_GEM_DOMAIN_GTT)]      = TTM_PL_TT,
+               [ilog2(AMDGPU_GEM_DOMAIN_VRAM)]     = TTM_PL_VRAM,
+               [ilog2(AMDGPU_GEM_DOMAIN_GDS)]      = AMDGPU_PL_GDS,
+               [ilog2(AMDGPU_GEM_DOMAIN_GWS)]      = AMDGPU_PL_GWS,
+               [ilog2(AMDGPU_GEM_DOMAIN_OA)]       = AMDGPU_PL_OA,
+               [ilog2(AMDGPU_GEM_DOMAIN_DOORBELL)] = AMDGPU_PL_DOORBELL,
+       };
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
        struct ttm_resource *res = bo->tbo.resource;
+       struct drm_gem_object *obj = &bo->tbo.base;
        uint64_t size = amdgpu_bo_size(bo);
-       struct drm_gem_object *obj;
-       bool shared;
-
-       /* Abort if the BO doesn't currently have a backing store */
-       if (!res)
-               return;
+       unsigned int type;
 
-       obj = &bo->tbo.base;
-       shared = drm_gem_object_is_shared_for_memory_stats(obj);
+       if (!res) {
+               /*
+                * If no backing store use one of the preferred domain for basic
+                * stats. We take the MSB since that should give a reasonable
+                * view.
+                */
+               BUILD_BUG_ON(TTM_PL_VRAM < TTM_PL_TT ||
+                            TTM_PL_VRAM < TTM_PL_SYSTEM);
+               type = fls(bo->preferred_domains & AMDGPU_GEM_DOMAIN_MASK);
+               if (!type)
+                       return;
+               type--;
+               if (drm_WARN_ON_ONCE(&adev->ddev,
+                                    type >= ARRAY_SIZE(domain_to_pl)))
+                       return;
+               type = domain_to_pl[type];
+       } else {
+               type = res->mem_type;
+       }
 
-       switch (res->mem_type) {
+       /* Squash some into 'cpu' to keep the legacy userspace view. */
+       switch (type) {
        case TTM_PL_VRAM:
-               stats->vram += size;
-               if (amdgpu_res_cpu_visible(adev, res))
-                       stats->visible_vram += size;
-               if (shared)
-                       stats->vram_shared += size;
-               break;
        case TTM_PL_TT:
-               stats->gtt += size;
-               if (shared)
-                       stats->gtt_shared += size;
-               break;
        case TTM_PL_SYSTEM:
+               break;
        default:
-               stats->cpu += size;
-               if (shared)
-                       stats->cpu_shared += size;
+               type = TTM_PL_SYSTEM;
                break;
        }
 
+       if (drm_WARN_ON_ONCE(&adev->ddev, type >= sz))
+               return;
+
+       /* DRM stats common fields: */
+
+       stats[type].total += size;
+       if (drm_gem_object_is_shared_for_memory_stats(obj))
+               stats[type].drm.shared += size;
+       else
+               stats[type].drm.private += size;
+
+       if (res) {
+               stats[type].drm.resident += size;
+
+               if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_BOOKKEEP))
+                       stats[type].drm.active += size;
+               else if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
+                       stats[type].drm.purgeable += size;
+
+               if (type == TTM_PL_VRAM && amdgpu_res_cpu_visible(adev, res))
+                       stats[type].visible += size;
+       }
+
+       /* amdgpu specific stats: */
+
        if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) {
-               stats->requested_vram += size;
+               stats[TTM_PL_VRAM].requested += size;
                if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
-                       stats->requested_visible_vram += size;
+                       stats[TTM_PL_VRAM].requested_visible += size;
 
-               if (res->mem_type != TTM_PL_VRAM) {
-                       stats->evicted_vram += size;
+               if (type != TTM_PL_VRAM) {
+                       stats[TTM_PL_VRAM].evicted += size;
                        if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
-                               stats->evicted_visible_vram += size;
+                               stats[TTM_PL_VRAM].evicted_visible += size;
                }
        } else if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_GTT) {
-               stats->requested_gtt += size;
+               stats[TTM_PL_TT].requested += size;
        }
 }
 
 
 };
 
 struct amdgpu_mem_stats {
-       /* current VRAM usage, includes visible VRAM */
-       uint64_t vram;
-       /* current shared VRAM usage, includes visible VRAM */
-       uint64_t vram_shared;
-       /* current visible VRAM usage */
-       uint64_t visible_vram;
-       /* current GTT usage */
-       uint64_t gtt;
-       /* current shared GTT usage */
-       uint64_t gtt_shared;
-       /* current system memory usage */
-       uint64_t cpu;
-       /* current shared system memory usage */
-       uint64_t cpu_shared;
-       /* sum of evicted buffers, includes visible VRAM */
-       uint64_t evicted_vram;
-       /* sum of evicted buffers due to CPU access */
-       uint64_t evicted_visible_vram;
-       /* how much userspace asked for, includes vis.VRAM */
-       uint64_t requested_vram;
-       /* how much userspace asked for */
-       uint64_t requested_visible_vram;
-       /* how much userspace asked for */
-       uint64_t requested_gtt;
+       struct drm_memory_stats drm;
+
+       uint64_t total;
+       uint64_t visible;
+       uint64_t evicted;
+       uint64_t evicted_visible;
+       uint64_t requested;
+       uint64_t requested_visible;
 };
 
 static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
 u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
 void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
-                         struct amdgpu_mem_stats *stats);
+                         struct amdgpu_mem_stats *stats,
+                         unsigned int size);
 uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
                                            uint32_t domain);
 
 
 #define AMDGPU_PL_OA           (TTM_PL_PRIV + 2)
 #define AMDGPU_PL_PREEMPT      (TTM_PL_PRIV + 3)
 #define AMDGPU_PL_DOORBELL     (TTM_PL_PRIV + 4)
+#define __AMDGPU_PL_LAST       (TTM_PL_PRIV + 4)
 
 #define AMDGPU_GTT_MAX_TRANSFER_SIZE   512
 #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS        2
 
 }
 
 static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va,
-                                   struct amdgpu_mem_stats *stats)
+                                   struct amdgpu_mem_stats *stats,
+                                   unsigned int size)
 {
        struct amdgpu_vm *vm = bo_va->base.vm;
        struct amdgpu_bo *bo = bo_va->base.bo;
            !dma_resv_trylock(bo->tbo.base.resv))
                return;
 
-       amdgpu_bo_get_memory(bo, stats);
+       amdgpu_bo_get_memory(bo, stats, size);
        if (!amdgpu_vm_is_bo_always_valid(vm, bo))
                dma_resv_unlock(bo->tbo.base.resv);
 }
 
 void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
-                         struct amdgpu_mem_stats *stats)
+                         struct amdgpu_mem_stats *stats,
+                         unsigned int size)
 {
        struct amdgpu_bo_va *bo_va, *tmp;
 
        spin_lock(&vm->status_lock);
        list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status)
-               amdgpu_vm_bo_get_memory(bo_va, stats);
+               amdgpu_vm_bo_get_memory(bo_va, stats, size);
 
        list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status)
-               amdgpu_vm_bo_get_memory(bo_va, stats);
+               amdgpu_vm_bo_get_memory(bo_va, stats, size);
 
        list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status)
-               amdgpu_vm_bo_get_memory(bo_va, stats);
+               amdgpu_vm_bo_get_memory(bo_va, stats, size);
 
        list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status)
-               amdgpu_vm_bo_get_memory(bo_va, stats);
+               amdgpu_vm_bo_get_memory(bo_va, stats, size);
 
        list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status)
-               amdgpu_vm_bo_get_memory(bo_va, stats);
+               amdgpu_vm_bo_get_memory(bo_va, stats, size);
 
        list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status)
-               amdgpu_vm_bo_get_memory(bo_va, stats);
+               amdgpu_vm_bo_get_memory(bo_va, stats, size);
        spin_unlock(&vm->status_lock);
 }
 
 
 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
                                struct amdgpu_vm *vm);
 void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
-                         struct amdgpu_mem_stats *stats);
+                         struct amdgpu_mem_stats *stats,
+                         unsigned int size);
 
 int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                       struct amdgpu_bo_vm *vmbo, bool immediate);