struct nvkm_memory {
        const struct nvkm_memory_func *func;
        const struct nvkm_memory_ptrs *ptrs;
+       struct kref kref;
        struct nvkm_tags *tags;
 };
 
 void nvkm_memory_ctor(const struct nvkm_memory_func *, struct nvkm_memory *);
 int nvkm_memory_new(struct nvkm_device *, enum nvkm_memory_target,
                    u64 size, u32 align, bool zero, struct nvkm_memory **);
-void nvkm_memory_del(struct nvkm_memory **);
+struct nvkm_memory *nvkm_memory_ref(struct nvkm_memory *);
+void nvkm_memory_unref(struct nvkm_memory **);
 int nvkm_memory_tags_get(struct nvkm_memory *, struct nvkm_device *, u32 tags,
                         void (*clear)(struct nvkm_device *, u32, u32),
                         struct nvkm_tags **);
 
                if (gpuobj->parent)
                        nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node);
                nvkm_mm_fini(&gpuobj->heap);
-               nvkm_memory_del(&gpuobj->memory);
+               nvkm_memory_unref(&gpuobj->memory);
                kfree(*pgpuobj);
                *pgpuobj = NULL;
        }
 
                 struct nvkm_memory *memory)
 {
        memory->func = func;
+       kref_init(&memory->kref);
+}
+
+static void
+nvkm_memory_del(struct kref *kref)
+{
+       struct nvkm_memory *memory = container_of(kref, typeof(*memory), kref);
+       if (!WARN_ON(!memory->func)) {
+               if (memory->func->dtor)
+                       memory = memory->func->dtor(memory);
+               kfree(memory);
+       }
 }
 
 void
-nvkm_memory_del(struct nvkm_memory **pmemory)
+nvkm_memory_unref(struct nvkm_memory **pmemory)
 {
        struct nvkm_memory *memory = *pmemory;
-       if (memory && !WARN_ON(!memory->func)) {
-               if (memory->func->dtor)
-                       *pmemory = memory->func->dtor(memory);
-               kfree(*pmemory);
+       if (memory) {
+               kref_put(&memory->kref, nvkm_memory_del);
                *pmemory = NULL;
        }
 }
 
+struct nvkm_memory *
+nvkm_memory_ref(struct nvkm_memory *memory)
+{
+       if (memory)
+               kref_get(&memory->kref);
+       return memory;
+}
+
 int
 nvkm_memory_new(struct nvkm_device *device, enum nvkm_memory_target target,
                u64 size, u32 align, bool zero,
 
        const u32 base = falcon->addr;
 
        if (!suspend) {
-               nvkm_memory_del(&falcon->core);
+               nvkm_memory_unref(&falcon->core);
                if (falcon->external) {
                        vfree(falcon->data.data);
                        vfree(falcon->code.data);
 
 {
        struct gf100_fifo *fifo = gf100_fifo(base);
        nvkm_vm_put(&fifo->user.bar);
-       nvkm_memory_del(&fifo->user.mem);
-       nvkm_memory_del(&fifo->runlist.mem[0]);
-       nvkm_memory_del(&fifo->runlist.mem[1]);
+       nvkm_memory_unref(&fifo->user.mem);
+       nvkm_memory_unref(&fifo->runlist.mem[0]);
+       nvkm_memory_unref(&fifo->runlist.mem[1]);
        return fifo;
 }
 
 
        int i;
 
        nvkm_vm_put(&fifo->user.bar);
-       nvkm_memory_del(&fifo->user.mem);
+       nvkm_memory_unref(&fifo->user.mem);
 
        for (i = 0; i < fifo->runlist_nr; i++) {
-               nvkm_memory_del(&fifo->runlist[i].mem[1]);
-               nvkm_memory_del(&fifo->runlist[i].mem[0]);
+               nvkm_memory_unref(&fifo->runlist[i].mem[1]);
+               nvkm_memory_unref(&fifo->runlist[i].mem[0]);
        }
 
        return fifo;
 
 nv50_fifo_dtor(struct nvkm_fifo *base)
 {
        struct nv50_fifo *fifo = nv50_fifo(base);
-       nvkm_memory_del(&fifo->runlist[1]);
-       nvkm_memory_del(&fifo->runlist[0]);
+       nvkm_memory_unref(&fifo->runlist[1]);
+       nvkm_memory_unref(&fifo->runlist[0]);
        return fifo;
 }
 
 
        }
 
 done:
-       nvkm_memory_del(&chan);
+       nvkm_memory_unref(&chan);
        return ret;
 }
 
 
                        nvkm_vm_unmap(&chan->data[i].vma);
                        nvkm_vm_put(&chan->data[i].vma);
                }
-               nvkm_memory_del(&chan->data[i].mem);
+               nvkm_memory_unref(&chan->data[i].mem);
        }
 
        if (chan->mmio_vma.node) {
                nvkm_vm_unmap(&chan->mmio_vma);
                nvkm_vm_put(&chan->mmio_vma);
        }
-       nvkm_memory_del(&chan->mmio);
+       nvkm_memory_unref(&chan->mmio);
        return chan;
 }
 
 
 nv20_gr_chan_dtor(struct nvkm_object *object)
 {
        struct nv20_gr_chan *chan = nv20_gr_chan(object);
-       nvkm_memory_del(&chan->inst);
+       nvkm_memory_unref(&chan->inst);
        return chan;
 }
 
 nv20_gr_dtor(struct nvkm_gr *base)
 {
        struct nv20_gr *gr = nv20_gr(base);
-       nvkm_memory_del(&gr->ctxtab);
+       nvkm_memory_unref(&gr->ctxtab);
        return gr;
 }
 
 
        nvkm_wr32(device, base + 0xd94, 0); /* FIFO_CTRL */
 
        if (!suspend)
-               nvkm_memory_del(&xtensa->gpu_fw);
+               nvkm_memory_unref(&xtensa->gpu_fw);
        return 0;
 }
 
 
 
        nvkm_vm_ref(NULL, &bar->bar[1].vm, bar->bar[1].pgd);
        nvkm_gpuobj_del(&bar->bar[1].pgd);
-       nvkm_memory_del(&bar->bar[1].mem);
+       nvkm_memory_unref(&bar->bar[1].mem);
 
        if (bar->bar[0].vm) {
-               nvkm_memory_del(&bar->bar[0].vm->pgt[0].mem[0]);
+               nvkm_memory_unref(&bar->bar[0].vm->pgt[0].mem[0]);
                nvkm_vm_ref(NULL, &bar->bar[0].vm, bar->bar[0].pgd);
        }
        nvkm_gpuobj_del(&bar->bar[0].pgd);
-       nvkm_memory_del(&bar->bar[0].mem);
+       nvkm_memory_unref(&bar->bar[0].mem);
        return bar;
 }
 
 
        nvkm_vm_ref(NULL, &bar->bar1_vm, bar->pgd);
        nvkm_gpuobj_del(&bar->bar2);
        if (bar->bar2_vm) {
-               nvkm_memory_del(&bar->bar2_vm->pgt[0].mem[0]);
+               nvkm_memory_unref(&bar->bar2_vm->pgt[0].mem[0]);
                nvkm_vm_ref(NULL, &bar->bar2_vm, bar->pgd);
        }
        nvkm_gpuobj_del(&bar->pgd);
 
        struct nvkm_fb *fb = nvkm_fb(subdev);
        int i;
 
-       nvkm_memory_del(&fb->mmu_wr);
-       nvkm_memory_del(&fb->mmu_rd);
+       nvkm_memory_unref(&fb->mmu_wr);
+       nvkm_memory_unref(&fb->mmu_rd);
 
        for (i = 0; i < fb->tile.regions; i++)
                fb->func->tile.fini(fb, i, &fb->tile.region[i]);
 
 
 done:
        if (ret)
-               nvkm_memory_del(&memory);
+               nvkm_memory_unref(&memory);
        *pmemory = memory;
        return ret;
 }
 
 nv04_instmem_dtor(struct nvkm_instmem *base)
 {
        struct nv04_instmem *imem = nv04_instmem(base);
-       nvkm_memory_del(&imem->base.ramfc);
-       nvkm_memory_del(&imem->base.ramro);
+       nvkm_memory_unref(&imem->base.ramfc);
+       nvkm_memory_unref(&imem->base.ramro);
        nvkm_ramht_del(&imem->base.ramht);
-       nvkm_memory_del(&imem->base.vbios);
+       nvkm_memory_unref(&imem->base.vbios);
        nvkm_mm_fini(&imem->heap);
        return imem;
 }
 
 nv40_instmem_dtor(struct nvkm_instmem *base)
 {
        struct nv40_instmem *imem = nv40_instmem(base);
-       nvkm_memory_del(&imem->base.ramfc);
-       nvkm_memory_del(&imem->base.ramro);
+       nvkm_memory_unref(&imem->base.ramfc);
+       nvkm_memory_unref(&imem->base.ramro);
        nvkm_ramht_del(&imem->base.ramht);
-       nvkm_memory_del(&imem->base.vbios);
+       nvkm_memory_unref(&imem->base.vbios);
        nvkm_mm_fini(&imem->heap);
        if (imem->iomem)
                iounmap(imem->iomem);
 
 
                mmu->func->flush(vm);
 
-               nvkm_memory_del(&pgt);
+               nvkm_memory_unref(&pgt);
        }
 }
 
 
        struct nv04_mmu *mmu = nv04_mmu(base);
        struct nvkm_device *device = mmu->base.subdev.device;
        if (mmu->base.vmm) {
-               nvkm_memory_del(&mmu->base.vmm->pgt[0].mem[0]);
+               nvkm_memory_unref(&mmu->base.vmm->pgt[0].mem[0]);
                nvkm_vm_ref(NULL, &mmu->base.vmm, NULL);
        }
        if (mmu->nullp) {