/* CPU mapping */
        u32 *vaddr;
-       struct list_head vaddr_node;
-       /* How many clients are using vaddr? */
-       u32 use_cpt;
 };
 #define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
 
 struct gk20a_instobj_dma {
        struct gk20a_instobj base;
 
-       u32 *cpuaddr;
        dma_addr_t handle;
        struct nvkm_mm_node r;
 };
 struct gk20a_instobj_iommu {
        struct gk20a_instobj base;
 
+       /* to link into gk20a_instmem::vaddr_lru */
+       struct list_head vaddr_node;
+       /* how many clients are using vaddr? */
+       u32 use_cpt;
+
        /* will point to the higher half of pages */
        dma_addr_t *dma_addrs;
        /* array of base.mem->size pages (+ dma_addr_ts) */
 
        /* Only used by DMA API */
        struct dma_attrs attrs;
-
-       void __iomem * (*cpu_map)(struct nvkm_memory *);
 };
 #define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
 
        return (u64)gk20a_instobj(memory)->mem.size << 12;
 }
 
-static void __iomem *
-gk20a_instobj_cpu_map_dma(struct nvkm_memory *memory)
-{
-#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-       struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory);
-       struct device *dev = node->base.imem->base.subdev.device->dev;
-       int npages = nvkm_memory_size(memory) >> 12;
-       struct page *pages[npages];
-       int i;
-
-       /* we shouldn't see a gk20a on anything but arm/arm64 anyways */
-       /* phys_to_page does not exist on all platforms... */
-       pages[0] = pfn_to_page(dma_to_phys(dev, node->handle) >> PAGE_SHIFT);
-       for (i = 1; i < npages; i++)
-               pages[i] = pages[0] + i;
-
-       return vmap(pages, npages, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
-#else
-       BUG();
-       return NULL;
-#endif
-}
-
-static void __iomem *
-gk20a_instobj_cpu_map_iommu(struct nvkm_memory *memory)
-{
-       struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
-       int npages = nvkm_memory_size(memory) >> 12;
-
-       return vmap(node->pages, npages, VM_MAP,
-                   pgprot_writecombine(PAGE_KERNEL));
-}
-
 /*
  * Recycle the vaddr of obj. Must be called with gk20a_instmem::lock held.
  */
 static void
-gk20a_instobj_recycle_vaddr(struct gk20a_instobj *obj)
+gk20a_instobj_iommu_recycle_vaddr(struct gk20a_instobj_iommu *obj)
 {
-       struct gk20a_instmem *imem = obj->imem;
+       struct gk20a_instmem *imem = obj->base.imem;
        /* there should not be any user left... */
        WARN_ON(obj->use_cpt);
        list_del(&obj->vaddr_node);
-       vunmap(obj->vaddr);
-       obj->vaddr = NULL;
-       imem->vaddr_use -= nvkm_memory_size(&obj->memory);
+       vunmap(obj->base.vaddr);
+       obj->base.vaddr = NULL;
+       imem->vaddr_use -= nvkm_memory_size(&obj->base.memory);
        nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", imem->vaddr_use,
                   imem->vaddr_max);
 }
                if (list_empty(&imem->vaddr_lru))
                        break;
 
-               gk20a_instobj_recycle_vaddr(list_first_entry(&imem->vaddr_lru,
-                                            struct gk20a_instobj, vaddr_node));
+               gk20a_instobj_iommu_recycle_vaddr(
+                               list_first_entry(&imem->vaddr_lru,
+                               struct gk20a_instobj_iommu, vaddr_node));
        }
 }
 
 static void __iomem *
-gk20a_instobj_acquire(struct nvkm_memory *memory)
+gk20a_instobj_acquire_dma(struct nvkm_memory *memory)
 {
        struct gk20a_instobj *node = gk20a_instobj(memory);
        struct gk20a_instmem *imem = node->imem;
        struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
+
+       nvkm_ltc_flush(ltc);
+
+       return node->vaddr;
+}
+
+static void __iomem *
+gk20a_instobj_acquire_iommu(struct nvkm_memory *memory)
+{
+       struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
+       struct gk20a_instmem *imem = node->base.imem;
+       struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
        const u64 size = nvkm_memory_size(memory);
        unsigned long flags;
 
 
        spin_lock_irqsave(&imem->lock, flags);
 
-       if (node->vaddr) {
+       if (node->base.vaddr) {
                if (!node->use_cpt) {
                        /* remove from LRU list since mapping in use again */
                        list_del(&node->vaddr_node);
        /* try to free some address space if we reached the limit */
        gk20a_instmem_vaddr_gc(imem, size);
 
-       node->vaddr = imem->cpu_map(memory);
-
-       if (!node->vaddr) {
+       /* map the pages */
+       node->base.vaddr = vmap(node->pages, size >> PAGE_SHIFT, VM_MAP,
+                               pgprot_writecombine(PAGE_KERNEL));
+       if (!node->base.vaddr) {
                nvkm_error(&imem->base.subdev, "cannot map instobj - "
                           "this is not going to end well...\n");
                goto out;
        node->use_cpt++;
        spin_unlock_irqrestore(&imem->lock, flags);
 
-       return node->vaddr;
+       return node->base.vaddr;
 }
 
 static void
-gk20a_instobj_release(struct nvkm_memory *memory)
+gk20a_instobj_release_dma(struct nvkm_memory *memory)
 {
        struct gk20a_instobj *node = gk20a_instobj(memory);
        struct gk20a_instmem *imem = node->imem;
        struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
+
+       nvkm_ltc_invalidate(ltc);
+}
+
+static void
+gk20a_instobj_release_iommu(struct nvkm_memory *memory)
+{
+       struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
+       struct gk20a_instmem *imem = node->base.imem;
+       struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
        unsigned long flags;
 
        spin_lock_irqsave(&imem->lock, flags);
        nvkm_vm_map_at(vma, offset, &node->mem);
 }
 
-/*
- * Clear the CPU mapping of an instobj if it exists
- */
-static void
-gk20a_instobj_dtor(struct gk20a_instobj *node)
-{
-       struct gk20a_instmem *imem = node->imem;
-       unsigned long flags;
-
-       spin_lock_irqsave(&imem->lock, flags);
-
-       /* vaddr has already been recycled */
-       if (!node->vaddr)
-               goto out;
-
-       gk20a_instobj_recycle_vaddr(node);
-
-out:
-       spin_unlock_irqrestore(&imem->lock, flags);
-}
-
 static void *
 gk20a_instobj_dtor_dma(struct nvkm_memory *memory)
 {
        struct gk20a_instmem *imem = node->base.imem;
        struct device *dev = imem->base.subdev.device->dev;
 
-       gk20a_instobj_dtor(&node->base);
-
-       if (unlikely(!node->cpuaddr))
+       if (unlikely(!node->base.vaddr))
                goto out;
 
-       dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->cpuaddr,
+       dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->base.vaddr,
                       node->handle, &imem->attrs);
 
 out:
        struct gk20a_instmem *imem = node->base.imem;
        struct device *dev = imem->base.subdev.device->dev;
        struct nvkm_mm_node *r;
+       unsigned long flags;
        int i;
 
-       gk20a_instobj_dtor(&node->base);
-
        if (unlikely(list_empty(&node->base.mem.regions)))
                goto out;
 
+       spin_lock_irqsave(&imem->lock, flags);
+
+       /* vaddr has already been recycled */
+       if (node->base.vaddr)
+               gk20a_instobj_iommu_recycle_vaddr(node);
+
+       spin_unlock_irqrestore(&imem->lock, flags);
+
        r = list_first_entry(&node->base.mem.regions, struct nvkm_mm_node,
                             rl_entry);
 
        .target = gk20a_instobj_target,
        .addr = gk20a_instobj_addr,
        .size = gk20a_instobj_size,
-       .acquire = gk20a_instobj_acquire,
-       .release = gk20a_instobj_release,
+       .acquire = gk20a_instobj_acquire_dma,
+       .release = gk20a_instobj_release_dma,
        .rd32 = gk20a_instobj_rd32,
        .wr32 = gk20a_instobj_wr32,
        .map = gk20a_instobj_map,
        .target = gk20a_instobj_target,
        .addr = gk20a_instobj_addr,
        .size = gk20a_instobj_size,
-       .acquire = gk20a_instobj_acquire,
-       .release = gk20a_instobj_release,
+       .acquire = gk20a_instobj_acquire_iommu,
+       .release = gk20a_instobj_release_iommu,
        .rd32 = gk20a_instobj_rd32,
        .wr32 = gk20a_instobj_wr32,
        .map = gk20a_instobj_map,
 
        nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory);
 
-       node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
-                                       &node->handle, GFP_KERNEL,
-                                       &imem->attrs);
-       if (!node->cpuaddr) {
+       node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
+                                          &node->handle, GFP_KERNEL,
+                                          &imem->attrs);
+       if (!node->base.vaddr) {
                nvkm_error(subdev, "cannot allocate DMA memory\n");
                return -ENOMEM;
        }
                imem->mm = &tdev->iommu.mm;
                imem->domain = tdev->iommu.domain;
                imem->iommu_pgshift = tdev->iommu.pgshift;
-               imem->cpu_map = gk20a_instobj_cpu_map_iommu;
                imem->iommu_bit = tdev->func->iommu_bit;
 
                nvkm_info(&imem->base.subdev, "using IOMMU\n");
        } else {
                init_dma_attrs(&imem->attrs);
-               /* We will access the memory through our own mapping */
                dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs);
                dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs);
                dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs);
-               dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &imem->attrs);
-               imem->cpu_map = gk20a_instobj_cpu_map_dma;
 
                nvkm_info(&imem->base.subdev, "using DMA API\n");
        }