#define nvxx_therm(a) nvxx_device(a)->therm
 #define nvxx_volt(a) nvxx_device(a)->volt
 
-#include <core/device.h>
 #include <engine/fifo.h>
 #include <engine/gr.h>
-#include <engine/sw.h>
 
 #define nvxx_fifo(a) nvxx_device(a)->fifo
 #define nvxx_gr(a) nvxx_device(a)->gr
 
        void *data;
        int (*ntfy)(const void *, u32, const void *, u32);
 
-       struct nvkm_vm *vm;
-
        struct list_head umem;
        spinlock_t lock;
 };
 
 #ifndef __NVKM_OS_H__
 #define __NVKM_OS_H__
 #include <nvif/os.h>
-#define nvkm_vmm nvkm_vm
 
 #ifdef __BIG_ENDIAN
 #define ioread16_native ioread16be
        iowrite32_native(lower_32_bits(_v), &_p[0]);                           \
        iowrite32_native(upper_32_bits(_v), &_p[1]);                           \
 } while(0)
-
 #endif
 
 #define NVKM_RAM_TYPE_VM 0x7f
 #define NV_MEM_COMP_VM 0x03
 
-struct nvkm_mem {
-       struct nvkm_mm_node *mem;
-       dma_addr_t *pages;
-       u32 memtype;
-       u64 offset;
-       u64 size;
-       struct sg_table *sg;
-
-       struct nvkm_memory *memory;
-};
-
 struct nvkm_fb_tile {
        struct nvkm_mm_node *tag;
        u32 addr;
        struct nvkm_memory *mmu_wr;
 };
 
-bool nvkm_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
 void nvkm_fb_tile_init(struct nvkm_fb *, int region, u32 addr, u32 size,
                       u32 pitch, u32 flags, struct nvkm_fb_tile *);
 void nvkm_fb_tile_fini(struct nvkm_fb *, int region, struct nvkm_fb_tile *);
        int (*prog)(struct nvkm_ram *);
        void (*tidy)(struct nvkm_ram *);
 };
-
-extern const u8 gf100_pte_storage_type_map[256];
 #endif
 
 #ifndef __NVKM_MMU_H__
 #define __NVKM_MMU_H__
 #include <core/subdev.h>
-struct nvkm_mem;
-
-struct nvkm_vm_pgt {
-       struct nvkm_memory *mem[2];
-       u32 refcount[2];
-};
 
 struct nvkm_vma {
        struct list_head head;
        bool busy:1; /* Region busy (for temporarily preventing user access). */
        struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
        struct nvkm_tags *tags; /* Compression tag reference. */
-
-       struct nvkm_vma *node;
-       struct nvkm_vm *vm;
-       u64 offset;
-       u32 access;
 };
 
-struct nvkm_vm {
+struct nvkm_vmm {
        const struct nvkm_vmm_func *func;
        struct nvkm_mmu *mmu;
        const char *name;
        void *nullp;
 };
 
-int  nvkm_vm_new(struct nvkm_device *, u64 offset, u64 length, u64 mm_offset,
-                struct lock_class_key *, struct nvkm_vm **);
-int  nvkm_vm_ref(struct nvkm_vm *, struct nvkm_vm **, struct nvkm_memory *inst);
-int  nvkm_vm_boot(struct nvkm_vm *, u64 size);
-int  nvkm_vm_get(struct nvkm_vm *, u64 size, u32 page_shift, u32 access,
-                struct nvkm_vma *);
-void nvkm_vm_put(struct nvkm_vma *);
-void nvkm_vm_map(struct nvkm_vma *, struct nvkm_mem *);
-void nvkm_vm_map_at(struct nvkm_vma *, u64 offset, struct nvkm_mem *);
-void nvkm_vm_unmap(struct nvkm_vma *);
-
 int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
                 struct lock_class_key *, const char *name, struct nvkm_vmm **);
 struct nvkm_vmm *nvkm_vmm_ref(struct nvkm_vmm *);
        const struct nvkm_mmu_func *func;
        struct nvkm_subdev subdev;
 
-       u64 limit;
        u8  dma_bits;
-       u8  lpg_shift;
 
        int heap_nr;
        struct {
 
        }
 
        cli->mem = &mems[ret];
-
-       if (1) {
-               cli->vm = cli->vmm.vm;
-               nvxx_client(&cli->base)->vm = cli->vm;
-       }
-
        return 0;
 done:
        if (ret)
 
        struct nouveau_vmm vmm;
        const struct nvif_mclass *mem;
 
-       struct nvkm_vm *vm;
        struct list_head head;
        void *abi16;
        struct list_head objects;
 
 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
                      struct drm_file *file_priv)
 {
-       struct nouveau_drm *drm = nouveau_drm(dev);
        struct nouveau_cli *cli = nouveau_cli(file_priv);
-       struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
        struct drm_nouveau_gem_new *req = data;
        struct nouveau_bo *nvbo = NULL;
        int ret = 0;
 
-       if (!nvkm_fb_memtype_valid(fb, req->info.tile_flags)) {
-               NV_PRINTK(err, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
-               return -EINVAL;
-       }
-
        ret = nouveau_gem_new(cli, req->info.size, req->align,
                              req->info.domain, req->info.tile_mode,
                              req->info.tile_flags, &nvbo);
 
                return ret;
 
        vmm->cli = cli;
-       vmm->vm = nvkm_uvmm(vmm->vmm.object.priv)->vmm;
        return 0;
 }
 
 #ifndef __NOUVEAU_VMA_H__
 #define __NOUVEAU_VMA_H__
-#include <subdev/mmu/uvmm.h>
 #include <nvif/vmm.h>
 struct nouveau_bo;
 struct nouveau_mem;
 
 #include <engine/gr.h>
 #include <engine/mpeg.h>
 
-bool
-nvkm_fb_memtype_valid(struct nvkm_fb *fb, u32 memtype)
-{
-       return fb->func->memtype_valid(fb, memtype);
-}
-
 void
 nvkm_fb_tile_fini(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile)
 {
 
 #include <core/memory.h>
 #include <core/option.h>
 
-extern const u8 gf100_pte_storage_type_map[256];
-
-bool
-gf100_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
-{
-       u8 memtype = (tile_flags & 0x0000ff00) >> 8;
-       return likely((gf100_pte_storage_type_map[memtype] != 0xff));
-}
-
 void
 gf100_fb_intr(struct nvkm_fb *base)
 {
        .init_page = gf100_fb_init_page,
        .intr = gf100_fb_intr,
        .ram_new = gf100_ram_new,
-       .memtype_valid = gf100_fb_memtype_valid,
        .default_bigpage = 17,
 };
 
 
        .init_page = gf100_fb_init_page,
        .intr = gf100_fb_intr,
        .ram_new = gf108_ram_new,
-       .memtype_valid = gf100_fb_memtype_valid,
        .default_bigpage = 17,
 };
 
 
        .init_page = gf100_fb_init_page,
        .intr = gf100_fb_intr,
        .ram_new = gk104_ram_new,
-       .memtype_valid = gf100_fb_memtype_valid,
        .default_bigpage = 17,
 };
 
 
        .init = gf100_fb_init,
        .init_page = gf100_fb_init_page,
        .intr = gf100_fb_intr,
-       .memtype_valid = gf100_fb_memtype_valid,
        .default_bigpage = 17,
 };
 
 
        .init_page = gf100_fb_init_page,
        .intr = gf100_fb_intr,
        .ram_new = gm107_ram_new,
-       .memtype_valid = gf100_fb_memtype_valid,
        .default_bigpage = 17,
 };
 
 
        .init_page = gm200_fb_init_page,
        .intr = gf100_fb_intr,
        .ram_new = gm200_ram_new,
-       .memtype_valid = gf100_fb_memtype_valid,
        .default_bigpage = 0 /* per-instance. */,
 };
 
 
        .init = gm200_fb_init,
        .init_page = gm200_fb_init_page,
        .intr = gf100_fb_intr,
-       .memtype_valid = gf100_fb_memtype_valid,
        .default_bigpage = 0 /* per-instance. */,
 };
 
 
        .init_page = gm200_fb_init_page,
        .init_unkn = gp100_fb_init_unkn,
        .ram_new = gp100_ram_new,
-       .memtype_valid = gf100_fb_memtype_valid,
 };
 
 int
 
        .init = gp100_fb_init,
        .init_page = gm200_fb_init_page,
        .ram_new = gp100_ram_new,
-       .memtype_valid = gf100_fb_memtype_valid,
 };
 
 int
 
        .init = gm200_fb_init,
        .init_page = gm200_fb_init_page,
        .intr = gf100_fb_intr,
-       .memtype_valid = gf100_fb_memtype_valid,
 };
 
 int
 
 #include "ram.h"
 #include "regsnv04.h"
 
-bool
-nv04_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
-{
-       if (!(tile_flags & 0xff00))
-               return true;
-       return false;
-}
-
 static void
 nv04_fb_init(struct nvkm_fb *fb)
 {
 nv04_fb = {
        .init = nv04_fb_init,
        .ram_new = nv04_ram_new,
-       .memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
 
        .tile.fini = nv10_fb_tile_fini,
        .tile.prog = nv10_fb_tile_prog,
        .ram_new = nv10_ram_new,
-       .memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
 
        .tile.fini = nv10_fb_tile_fini,
        .tile.prog = nv10_fb_tile_prog,
        .ram_new = nv1a_ram_new,
-       .memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
 
        .tile.fini = nv20_fb_tile_fini,
        .tile.prog = nv20_fb_tile_prog,
        .ram_new = nv20_ram_new,
-       .memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
 
        .tile.fini = nv20_fb_tile_fini,
        .tile.prog = nv20_fb_tile_prog,
        .ram_new = nv20_ram_new,
-       .memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
 
        .tile.fini = nv20_fb_tile_fini,
        .tile.prog = nv20_fb_tile_prog,
        .ram_new = nv20_ram_new,
-       .memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
 
        .tile.fini = nv20_fb_tile_fini,
        .tile.prog = nv20_fb_tile_prog,
        .ram_new = nv20_ram_new,
-       .memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
 
        .tile.fini = nv20_fb_tile_fini,
        .tile.prog = nv20_fb_tile_prog,
        .ram_new = nv20_ram_new,
-       .memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
 
        .tile.fini = nv20_fb_tile_fini,
        .tile.prog = nv20_fb_tile_prog,
        .ram_new = nv40_ram_new,
-       .memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
 
        .tile.fini = nv20_fb_tile_fini,
        .tile.prog = nv41_fb_tile_prog,
        .ram_new = nv41_ram_new,
-       .memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
 
        .tile.fini = nv20_fb_tile_fini,
        .tile.prog = nv44_fb_tile_prog,
        .ram_new = nv44_ram_new,
-       .memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
 
        .tile.fini = nv20_fb_tile_fini,
        .tile.prog = nv44_fb_tile_prog,
        .ram_new = nv44_ram_new,
-       .memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
 
        .tile.fini = nv20_fb_tile_fini,
        .tile.prog = nv41_fb_tile_prog,
        .ram_new = nv41_ram_new,
-       .memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
 
        .tile.fini = nv20_fb_tile_fini,
        .tile.prog = nv41_fb_tile_prog,
        .ram_new = nv49_ram_new,
-       .memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
 
        .tile.fini = nv20_fb_tile_fini,
        .tile.prog = nv44_fb_tile_prog,
        .ram_new = nv44_ram_new,
-       .memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
 
 #include <core/enum.h>
 #include <engine/fifo.h>
 
-int
-nv50_fb_memtype[0x80] = {
-       1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-       1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
-       1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
-       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-       1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
-       0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-       1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
-       1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
-};
-
 static int
 nv50_fb_ram_new(struct nvkm_fb *base, struct nvkm_ram **pram)
 {
        return fb->func->ram_new(&fb->base, pram);
 }
 
-static bool
-nv50_fb_memtype_valid(struct nvkm_fb *fb, u32 memtype)
-{
-       return nv50_fb_memtype[(memtype & 0xff00) >> 8] != 0;
-}
-
 static const struct nvkm_enum vm_dispatch_subclients[] = {
        { 0x00000000, "GRCTX" },
        { 0x00000001, "NOTIFY" },
        .init = nv50_fb_init,
        .intr = nv50_fb_intr,
        .ram_new = nv50_fb_ram_new,
-       .memtype_valid = nv50_fb_memtype_valid,
 };
 
 int
 
 
 int nv50_fb_new_(const struct nv50_fb_func *, struct nvkm_device *, int index,
                 struct nvkm_fb **pfb);
-extern int nv50_fb_memtype[0x80];
 #endif
 
 
        int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **);
 
-       bool (*memtype_valid)(struct nvkm_fb *, u32 memtype);
-
        u8 default_bigpage;
 };
 
                 int index, struct nvkm_fb **);
 int nvkm_fb_bios_memtype(struct nvkm_bios *);
 
-bool nv04_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
-
 void nv10_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
                       u32 pitch, u32 flags, struct nvkm_fb_tile *);
 void nv10_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
 
 int gf100_fb_oneinit(struct nvkm_fb *);
 int gf100_fb_init_page(struct nvkm_fb *);
-bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
 
 int gm200_fb_init_page(struct nvkm_fb *);
 #endif
 
                .mem = vram->mn,
        };
 
-       if (vma->vm) {
-               struct nvkm_mem mem = {
-                       .mem = vram->mn,
-                       .memory = &vram->memory,
-               };
-               nvkm_vm_map_at(vma, offset, &mem);
-               return 0;
-       }
-
        return nvkm_vmm_map(vmm, vma, argv, argc, &map);
 }
 
 
 #include "priv.h"
 
 #include <core/memory.h>
-#include <core/mm.h>
 #include <core/tegra.h>
-#include <subdev/fb.h>
 #include <subdev/ltc.h>
 #include <subdev/mmu.h>
 
                .mem = node->mn,
        };
 
-       if (vma->vm) {
-               struct nvkm_mem mem = {
-                       .mem = node->mn,
-                       .memory = &node->memory,
-               };
-               nvkm_vm_map_at(vma, 0, &mem);
-               return 0;
-       }
-
        return nvkm_vmm_map(vmm, vma, argv, argc, &map);
 }
 
 
        return pt;
 }
 
-static void
-nvkm_vm_map_(const struct nvkm_vmm_page *page, struct nvkm_vma *vma, u64 delta,
-            struct nvkm_mem *mem, nvkm_vmm_pte_func fn,
-            struct nvkm_vmm_map *map)
-{
-       union {
-               struct nv50_vmm_map_v0 nv50;
-               struct gf100_vmm_map_v0 gf100;
-       } args;
-       struct nvkm_vmm *vmm = vma->vm;
-       void *argv = NULL;
-       u32 argc = 0;
-       int ret;
-
-       map->memory = mem->memory;
-       map->page = page;
-
-       if (vmm->func->valid) {
-               switch (vmm->mmu->subdev.device->card_type) {
-               case NV_50:
-                       args.nv50.version = 0;
-                       args.nv50.ro = !(vma->access & NV_MEM_ACCESS_WO);
-                       args.nv50.priv = !!(vma->access & NV_MEM_ACCESS_SYS);
-                       args.nv50.kind = (mem->memtype & 0x07f);
-                       args.nv50.comp = (mem->memtype & 0x180) >> 7;
-                       argv = &args.nv50;
-                       argc = sizeof(args.nv50);
-                       break;
-               case NV_C0:
-               case NV_E0:
-               case GM100:
-               case GP100: {
-                       args.gf100.version = 0;
-                       args.gf100.vol = (nvkm_memory_target(map->memory) != NVKM_MEM_TARGET_VRAM);
-                       args.gf100.ro = !(vma->access & NV_MEM_ACCESS_WO);
-                       args.gf100.priv = !!(vma->access & NV_MEM_ACCESS_SYS);
-                       args.gf100.kind = (mem->memtype & 0x0ff);
-                       argv = &args.gf100;
-                       argc = sizeof(args.gf100);
-               }
-                       break;
-               default:
-                       break;
-               }
-
-               ret = vmm->func->valid(vmm, argv, argc, map);
-               if (WARN_ON(ret))
-                       return;
-       }
-
-       mutex_lock(&vmm->mutex);
-       nvkm_vmm_ptes_map(vmm, page, vma->node->addr + delta,
-                                    vma->node->size, map, fn);
-       mutex_unlock(&vmm->mutex);
-
-       nvkm_memory_tags_put(vma->node->memory, vmm->mmu->subdev.device, &vma->node->tags);
-       nvkm_memory_unref(&vma->node->memory);
-       vma->node->memory = nvkm_memory_ref(map->memory);
-       vma->node->tags = map->tags;
-}
-
 void
 nvkm_mmu_ptc_dump(struct nvkm_mmu *mmu)
 {
        INIT_LIST_HEAD(&mmu->ptp.list);
 }
 
-void
-nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
-{
-       const struct nvkm_vmm_page *page = &vma->vm->func->page[vma->node->page];
-       if (page->desc->func->unmap) {
-               struct nvkm_vmm_map map = { .mem = node->mem };
-               nvkm_vm_map_(page, vma, delta, node, page->desc->func->mem, &map);
-               return;
-       }
-}
-
-static void
-nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
-                    struct nvkm_mem *mem)
-{
-       const struct nvkm_vmm_page *page = &vma->vm->func->page[vma->node->page];
-       if (page->desc->func->unmap) {
-               struct nvkm_vmm_map map = { .sgl = mem->sg->sgl };
-               nvkm_vm_map_(page, vma, delta, mem, page->desc->func->sgl, &map);
-               return;
-       }
-}
-
-static void
-nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
-              struct nvkm_mem *mem)
-{
-       const struct nvkm_vmm_page *page = &vma->vm->func->page[vma->node->page];
-       if (page->desc->func->unmap) {
-               struct nvkm_vmm_map map = { .dma = mem->pages };
-               nvkm_vm_map_(page, vma, delta, mem, page->desc->func->dma, &map);
-               return;
-       }
-}
-
-void
-nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node)
-{
-       if (node->sg)
-               nvkm_vm_map_sg_table(vma, 0, node->size << 12, node);
-       else
-       if (node->pages)
-               nvkm_vm_map_sg(vma, 0, node->size << 12, node);
-       else
-               nvkm_vm_map_at(vma, 0, node);
-}
-
-void
-nvkm_vm_unmap(struct nvkm_vma *vma)
-{
-       nvkm_vmm_unmap(vma->vm, vma->node);
-}
-
-int
-nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
-           struct nvkm_vma *vma)
-{
-       int ret;
-
-       mutex_lock(&vm->mutex);
-       ret = nvkm_vmm_get_locked(vm, true, false, false, page_shift, 0,
-                                 size, &vma->node);
-       mutex_unlock(&vm->mutex);
-       if (ret)
-               return ret;
-
-       vma->memory = NULL;
-       vma->tags = NULL;
-       vma->vm = NULL;
-       nvkm_vm_ref(vm, &vma->vm, NULL);
-       vma->offset = vma->addr = vma->node->addr;
-       vma->access = access;
-       return 0;
-}
-
-void
-nvkm_vm_put(struct nvkm_vma *vma)
-{
-       nvkm_vmm_put(vma->vm, &vma->node);
-       nvkm_vm_ref(NULL, &vma->vm, NULL);
-}
-
-int
-nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
-{
-       return nvkm_vmm_boot(vm);
-}
-
-int
-nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
-           struct lock_class_key *key, struct nvkm_vm **pvm)
-{
-       struct nvkm_mmu *mmu = device->mmu;
-
-       *pvm = NULL;
-       if (mmu->func->vmm.ctor) {
-               int ret = mmu->func->vmm.ctor(mmu, mm_offset,
-                                             offset + length - mm_offset,
-                                             NULL, 0, key, "legacy", pvm);
-               if (ret) {
-                       nvkm_vm_ref(NULL, pvm, NULL);
-                       return ret;
-               }
-
-               return ret;
-       }
-
-       return -EINVAL;
-}
-
-int
-nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_memory *inst)
-{
-       if (ref) {
-               if (inst) {
-                       int ret = nvkm_vmm_join(ref, inst);
-                       if (ret)
-                               return ret;
-               }
-
-               nvkm_vmm_ref(ref);
-       }
-
-       if (*ptr) {
-               nvkm_vmm_part(*ptr, inst);
-               nvkm_vmm_unref(ptr);
-       }
-
-       *ptr = ref;
-       return 0;
-}
-
 static void
 nvkm_mmu_type(struct nvkm_mmu *mmu, int heap, u8 type)
 {
 {
        nvkm_subdev_ctor(&nvkm_mmu, device, index, &mmu->subdev);
        mmu->func = func;
-       mmu->limit = func->limit;
        mmu->dma_bits = func->dma_bits;
-       mmu->lpg_shift = func->lpg_shift;
        nvkm_mmu_ptc_init(mmu);
        mmu->user.ctor = nvkm_ummu_new;
        mmu->user.base = func->mmu.user;
 
 
 static const struct nvkm_mmu_func
 g84_mmu = {
-       .limit = (1ULL << 40),
        .dma_bits = 40,
-       .lpg_shift = 16,
        .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
        .mem = {{ -1,  0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
        .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x0200 },
 
 /* Map from compressed to corresponding uncompressed storage type.
  * The value 0xff represents an invalid storage type.
  */
-const u8 gf100_pte_storage_type_map[256] =
-{
-       0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
-       0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
-       0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
-       0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
-       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
-       0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
-       0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
-       0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
-       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-       0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
-       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
-       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
-       0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
-       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
-       0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
-       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
-       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-       0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
-       0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
-       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
-       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
-       0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
-       0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
-       0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
-       0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
-       0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
-       0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
-       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
-       0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
-};
-
 const u8 *
 gf100_mmu_kind(struct nvkm_mmu *mmu, int *count)
 {
-       *count = ARRAY_SIZE(gf100_pte_storage_type_map);
-       return gf100_pte_storage_type_map;
+       static const u8
+       kind[256] = {
+               0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
+               0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
+               0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
+               0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
+               0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
+               0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+               0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
+               0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
+               0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
+               0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+               0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
+               0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+               0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
+               0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+               0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
+               0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
+               0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
+               0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
+               0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
+               0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+               0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
+               0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
+               0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
+               0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
+               0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
+               0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
+               0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
+               0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
+               0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
+               0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
+               0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
+               0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
+       };
+
+       *count = ARRAY_SIZE(kind);
+       return kind;
 }
 
 static const struct nvkm_mmu_func
 gf100_mmu = {
-       .limit = (1ULL << 40),
        .dma_bits = 40,
-       .lpg_shift = 17,
        .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
        .mem = {{ -1,  0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
        .vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gf100_vmm_new },
 
 
 static const struct nvkm_mmu_func
 gk104_mmu = {
-       .limit = (1ULL << 40),
        .dma_bits = 40,
-       .lpg_shift = 17,
        .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
        .mem = {{ -1,  0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
        .vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk104_vmm_new },
 
 
 static const struct nvkm_mmu_func
 gk20a_mmu = {
-       .limit = (1ULL << 40),
        .dma_bits = 40,
-       .lpg_shift = 17,
        .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
        .mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
        .vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk20a_vmm_new },
 
 
 static const struct nvkm_mmu_func
 gm200_mmu = {
-       .limit = (1ULL << 40),
        .dma_bits = 40,
-       .lpg_shift = 17,
        .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
        .mem = {{ -1,  0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
        .vmm = {{ -1,  0, NVIF_CLASS_VMM_GM200}, gm200_vmm_new },
 
 static const struct nvkm_mmu_func
 gm200_mmu_fixed = {
-       .limit = (1ULL << 40),
        .dma_bits = 40,
-       .lpg_shift = 17,
        .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
        .mem = {{ -1,  0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
        .vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm200_vmm_new_fixed },
 
 
 static const struct nvkm_mmu_func
 gm20b_mmu = {
-       .limit = (1ULL << 40),
        .dma_bits = 40,
-       .lpg_shift = 17,
        .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
        .mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
        .vmm = {{ -1,  0, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new },
 
 static const struct nvkm_mmu_func
 gm20b_mmu_fixed = {
-       .limit = (1ULL << 40),
        .dma_bits = 40,
-       .lpg_shift = 17,
        .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
        .mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
        .vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new_fixed },
 
 
 static const struct nvkm_mmu_func
 gp100_mmu = {
-       .limit = (1ULL << 49),
        .dma_bits = 47,
-       .lpg_shift = 16,
        .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
        .mem = {{ -1,  0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
        .vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp100_vmm_new },
 
 
 static const struct nvkm_mmu_func
 gp10b_mmu = {
-       .limit = (1ULL << 49),
        .dma_bits = 47,
-       .lpg_shift = 16,
        .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
        .mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
        .vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp10b_vmm_new },
 
 
 #include <nvif/class.h>
 
-#define NV04_PDMA_SIZE (128 * 1024 * 1024)
-
 const struct nvkm_mmu_func
 nv04_mmu = {
-       .limit = NV04_PDMA_SIZE,
        .dma_bits = 32,
-       .lpg_shift = 12,
        .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
        .mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
        .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv04_vmm_new, true },
 
 
 #include <nvif/class.h>
 
-#define NV41_GART_SIZE (512 * 1024 * 1024)
-
 static void
 nv41_mmu_init(struct nvkm_mmu *mmu)
 {
 static const struct nvkm_mmu_func
 nv41_mmu = {
        .init = nv41_mmu_init,
-       .limit = NV41_GART_SIZE,
        .dma_bits = 39,
-       .lpg_shift = 12,
        .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
        .mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
        .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv41_vmm_new, true },
 
 
 #include <nvif/class.h>
 
-#define NV44_GART_SIZE (512 * 1024 * 1024)
-
 static void
 nv44_mmu_init(struct nvkm_mmu *mmu)
 {
 static const struct nvkm_mmu_func
 nv44_mmu = {
        .init = nv44_mmu_init,
-       .limit = NV44_GART_SIZE,
        .dma_bits = 39,
-       .lpg_shift = 12,
        .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
        .mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
        .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv44_vmm_new, true },
 
 
 static const struct nvkm_mmu_func
 nv50_mmu = {
-       .limit = (1ULL << 40),
        .dma_bits = 40,
-       .lpg_shift = 16,
        .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
        .mem = {{ -1,  0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
        .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x1400 },
 
 struct nvkm_mmu_func {
        void (*init)(struct nvkm_mmu *);
 
-       u64 limit;
        u8  dma_bits;
-       u8  lpg_shift;
 
        struct {
                struct nvkm_sclass user;
 
                                                              func->unmap);
 }
 
-void
+static void
 nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
                  u64 addr, u64 size, struct nvkm_vmm_map *map,
                  nvkm_vmm_pte_func func)
 
 void nvkm_vmm_put_locked(struct nvkm_vmm *, struct nvkm_vma *);
 void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *);
 void nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma);
-void nvkm_vmm_ptes_map(struct nvkm_vmm *, const struct nvkm_vmm_page *,
-                      u64 addr, u64 size, struct nvkm_vmm_map *,
-                      nvkm_vmm_pte_func);
 
 struct nvkm_vma *nvkm_vma_tail(struct nvkm_vma *, u64 tail);
 void nvkm_vmm_node_insert(struct nvkm_vmm *, struct nvkm_vma *);