struct nvkm_mm_node {
        struct list_head nl_entry;
        struct list_head fl_entry;
-       struct list_head rl_entry;
+       struct nvkm_mm_node *next;
 
 #define NVKM_MM_HEAP_ANY 0x00
        u8  heap;
                  u32 size_min, u32 align, struct nvkm_mm_node **);
 void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **);
 void nvkm_mm_dump(struct nvkm_mm *, const char *);
+
+static inline bool
+nvkm_mm_contiguous(struct nvkm_mm_node *node)
+{
+       return !node->next;
+}
 #endif
 
        u8  page_shift;
 
        struct nvkm_mm_node *tag;
-       struct list_head regions;
+       struct nvkm_mm_node *mem;
        dma_addr_t *pages;
        u32 memtype;
        u64 offset;
 
                if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
                        if (bo->mem.mem_type == TTM_PL_VRAM) {
                                struct nvkm_mem *mem = bo->mem.mm_node;
-                               if (!list_is_singular(&mem->regions))
+                               if (!nvkm_mm_contiguous(mem->mem))
                                        evict = true;
                        }
                        nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG;
 
                if (!this)
                        return -ENOMEM;
 
+               this->next = NULL;
                this->type = type;
                list_del(&this->fl_entry);
                *pnode = this;
                if (!this)
                        return -ENOMEM;
 
+               this->next = NULL;
                this->type = type;
                list_del(&this->fl_entry);
                *pnode = this;
 
 {
        struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
        struct nvkm_mm *mm = &ram->vram;
-       struct nvkm_mm_node *r;
+       struct nvkm_mm_node **node, *r;
        struct nvkm_mem *mem;
        int type = (memtype & 0x0ff);
        int back = (memtype & 0x800);
        if (!mem)
                return -ENOMEM;
 
-       INIT_LIST_HEAD(&mem->regions);
        mem->size = size;
 
        mutex_lock(&ram->fb->subdev.mutex);
        }
        mem->memtype = type;
 
+       node = &mem->mem;
        do {
                if (back)
                        ret = nvkm_mm_tail(mm, 0, 1, size, ncmin, align, &r);
                        return ret;
                }
 
-               list_add_tail(&r->rl_entry, &mem->regions);
+               *node = r;
+               node = &r->next;
                size -= r->length;
        } while (size);
        mutex_unlock(&ram->fb->subdev.mutex);
 
-       r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
-       mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT;
+       mem->offset = (u64)mem->mem->offset << NVKM_RAM_MM_SHIFT;
        *pmem = mem;
        return 0;
 }
 
 void
 __nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem *mem)
 {
-       struct nvkm_mm_node *this;
-
-       while (!list_empty(&mem->regions)) {
-               this = list_first_entry(&mem->regions, typeof(*this), rl_entry);
-
-               list_del(&this->rl_entry);
-               nvkm_mm_free(&ram->vram, &this);
+       struct nvkm_mm_node *next = mem->mem;
+       struct nvkm_mm_node *node;
+       while ((node = next)) {
+               next = node->next;
+               nvkm_mm_free(&ram->vram, &node);
        }
-
        nvkm_mm_free(&ram->tags, &mem->tag);
 }
 
 {
        struct nvkm_mm *heap = &ram->vram;
        struct nvkm_mm *tags = &ram->tags;
-       struct nvkm_mm_node *r;
+       struct nvkm_mm_node **node, *r;
        struct nvkm_mem *mem;
        int comp = (memtype & 0x300) >> 8;
        int type = (memtype & 0x07f);
                        comp = 0;
        }
 
-       INIT_LIST_HEAD(&mem->regions);
        mem->memtype = (comp << 7) | type;
        mem->size = max;
 
        type = nv50_fb_memtype[type];
+       node = &mem->mem;
        do {
                if (back)
                        ret = nvkm_mm_tail(heap, 0, type, max, min, align, &r);
                        return ret;
                }
 
-               list_add_tail(&r->rl_entry, &mem->regions);
+               *node = r;
+               node = &r->next;
                max -= r->length;
        } while (max);
        mutex_unlock(&ram->fb->subdev.mutex);
 
-       r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
-       mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT;
+       mem->offset = (u64)mem->mem->offset << NVKM_RAM_MM_SHIFT;
        *pmem = mem;
        return 0;
 }
 
        struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
        struct gk20a_instmem *imem = node->base.imem;
        struct device *dev = imem->base.subdev.device->dev;
-       struct nvkm_mm_node *r;
+       struct nvkm_mm_node *r = node->base.mem.mem;
        unsigned long flags;
        int i;
 
-       if (unlikely(list_empty(&node->base.mem.regions)))
+       if (unlikely(!r))
                goto out;
 
        spin_lock_irqsave(&imem->lock, flags);
 
        spin_unlock_irqrestore(&imem->lock, flags);
 
-       r = list_first_entry(&node->base.mem.regions, struct nvkm_mm_node,
-                            rl_entry);
-
        /* clear IOMMU bit to unmap pages */
        r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift);
 
        node->r.length = (npages << PAGE_SHIFT) >> 12;
 
        node->base.mem.offset = node->handle;
-
-       INIT_LIST_HEAD(&node->base.mem.regions);
-       list_add_tail(&node->r.rl_entry, &node->base.mem.regions);
-
+       node->base.mem.mem = &node->r;
        return 0;
 }
 
        r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift);
 
        node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift;
-
-       INIT_LIST_HEAD(&node->base.mem.regions);
-       list_add_tail(&r->rl_entry, &node->base.mem.regions);
-
+       node->base.mem.mem = r;
        return 0;
 
 release_area:
 
 {
        struct nvkm_vm *vm = vma->vm;
        struct nvkm_mmu *mmu = vm->mmu;
-       struct nvkm_mm_node *r;
+       struct nvkm_mm_node *r = node->mem;
        int big = vma->node->type != mmu->func->spg_shift;
        u32 offset = vma->node->offset + (delta >> 12);
        u32 bits = vma->node->type - 12;
        u32 end, len;
 
        delta = 0;
-       list_for_each_entry(r, &node->regions, rl_entry) {
+       while (r) {
                u64 phys = (u64)r->offset << 12;
                u32 num  = r->length >> bits;
 
 
                        delta += (u64)len << vma->node->type;
                }
-       }
+               r = r->next;
+       };
 
        mmu->func->flush(vm);
 }