struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_address_space *vm = &dev_priv->gtt.base;
-       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
        size_t total_obj_size, total_gtt_size;
        int count, ret;
 
        if (ret)
                return ret;
 
+       /* FIXME: the user of this interface might want more than just GGTT */
        switch (list) {
        case ACTIVE_LIST:
                seq_puts(m, "Active:\n");
        }
 
        total_obj_size = total_gtt_size = count = 0;
-       list_for_each_entry(obj, head, mm_list) {
-               seq_puts(m, "   ");
-               describe_obj(m, obj);
-               seq_putc(m, '\n');
-               total_obj_size += obj->base.size;
-               total_gtt_size += i915_gem_obj_ggtt_size(obj);
+       list_for_each_entry(vma, head, mm_list) {
+               seq_printf(m, "   ");
+               describe_obj(m, vma->obj);
+               seq_printf(m, "\n");
+               total_obj_size += vma->obj->base.size;
+               total_gtt_size += vma->node.size;
                count++;
        }
        mutex_unlock(&dev->struct_mutex);
        return 0;
 }
 
-static int i915_gem_object_info(struct seq_file *m, void *data)
+#define count_vmas(list, member) do { \
+       list_for_each_entry(vma, list, member) { \
+               size += i915_gem_obj_ggtt_size(vma->obj); \
+               ++count; \
+               if (vma->obj->map_and_fenceable) { \
+                       mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
+                       ++mappable_count; \
+               } \
+       } \
+} while (0)
+
+static int i915_gem_object_info(struct seq_file *m, void* data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_gem_object *obj;
        struct i915_address_space *vm = &dev_priv->gtt.base;
        struct drm_file *file;
+       struct i915_vma *vma;
        int ret;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
                   count, mappable_count, size, mappable_size);
 
        size = count = mappable_size = mappable_count = 0;
-       count_objects(&vm->active_list, mm_list);
+       count_vmas(&vm->active_list, mm_list);
        seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
                   count, mappable_count, size, mappable_size);
 
        size = count = mappable_size = mappable_count = 0;
-       count_objects(&vm->inactive_list, mm_list);
+       count_vmas(&vm->inactive_list, mm_list);
        seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
                   count, mappable_count, size, mappable_size);
 
        struct drm_device *dev = data;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj, *next;
-       struct i915_address_space *vm = &dev_priv->gtt.base;
+       struct i915_address_space *vm;
+       struct i915_vma *vma, *x;
        int ret;
 
        DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
                i915_gem_retire_requests(dev);
 
        if (val & DROP_BOUND) {
-               list_for_each_entry_safe(obj, next, &vm->inactive_list,
-                                        mm_list) {
-                       if (obj->pin_count)
-                               continue;
-
-                       ret = i915_gem_object_ggtt_unbind(obj);
-                       if (ret)
-                               goto unlock;
+               list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+                       list_for_each_entry_safe(vma, x, &vm->inactive_list,
+                                                mm_list) {
+                               if (vma->obj->pin_count)
+                                       continue;
+
+                               ret = i915_vma_unbind(vma);
+                               if (ret)
+                                       goto unlock;
+                       }
                }
        }
 
 
        struct drm_i915_gem_object *obj;
        struct i915_address_space *vm;
 
+       /** This object's place on the active/inactive lists */
+       struct list_head mm_list;
+
        struct list_head vma_link; /* Link in the object's VMA list */
 };
 
        struct drm_mm_node *stolen;
        struct list_head global_list;
 
-       /** This object's place on the active/inactive lists */
        struct list_head ring_list;
-       struct list_head mm_list;
        /** This object's place in the batchbuffer or on the eviction list */
        struct list_head exec_list;
 
 
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_address_space *vm = &dev_priv->gtt.base;
        u32 seqno = intel_ring_get_seqno(ring);
 
        BUG_ON(ring == NULL);
                obj->active = 1;
        }
 
-       /* Move from whatever list we were on to the tail of execution. */
-       list_move_tail(&obj->mm_list, &vm->active_list);
        list_move_tail(&obj->ring_list, &ring->active_list);
 
        obj->last_read_seqno = seqno;
 static void
 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_address_space *vm = &dev_priv->gtt.base;
+       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
+       struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
 
        BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
        BUG_ON(!obj->active);
 
-       list_move_tail(&obj->mm_list, &vm->inactive_list);
+       list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
 
        list_del_init(&obj->ring_list);
        obj->ring = NULL;
        i915_gem_gtt_finish_object(obj);
        i915_gem_object_unpin_pages(obj);
 
-       list_del(&obj->mm_list);
+       list_del(&vma->mm_list);
        /* Avoid an unnecessary call to unbind on rebind. */
        if (i915_is_ggtt(vma->vm))
                obj->map_and_fenceable = true;
                goto err_remove_node;
 
        list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
-       list_add_tail(&obj->mm_list, &vm->inactive_list);
+       list_add_tail(&vma->mm_list, &vm->inactive_list);
 
        /* Keep GGTT vmas first to make debug easier */
        if (i915_is_ggtt(vm))
                                            old_write_domain);
 
        /* And bump the LRU for this access */
-       if (i915_gem_object_is_inactive(obj))
-               list_move_tail(&obj->mm_list,
-                              &dev_priv->gtt.base.inactive_list);
+       if (i915_gem_object_is_inactive(obj)) {
+               struct i915_vma *vma = i915_gem_obj_to_vma(obj,
+                                                          &dev_priv->gtt.base);
+               if (vma)
+                       list_move_tail(&vma->mm_list,
+                                      &dev_priv->gtt.base.inactive_list);
+
+       }
 
        return 0;
 }
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
                          const struct drm_i915_gem_object_ops *ops)
 {
-       INIT_LIST_HEAD(&obj->mm_list);
        INIT_LIST_HEAD(&obj->global_list);
        INIT_LIST_HEAD(&obj->ring_list);
        INIT_LIST_HEAD(&obj->exec_list);
                return ERR_PTR(-ENOMEM);
 
        INIT_LIST_HEAD(&vma->vma_link);
+       INIT_LIST_HEAD(&vma->mm_list);
        vma->vm = vm;
        vma->obj = obj;
 
 
         * MI_SET_CONTEXT instead of when the next seqno has completed.
         */
        if (from != NULL) {
+               struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
+               struct i915_address_space *ggtt = &dev_priv->gtt.base;
                from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+               list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
                i915_gem_object_move_to_active(from->obj, ring);
                /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
                 * whole damn pipeline, we don't need to explicitly mark the
 
                drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
 
        /* First see if there is a large enough contiguous idle region... */
-       list_for_each_entry(obj, &vm->inactive_list, mm_list) {
-               struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm);
+       list_for_each_entry(vma, &vm->inactive_list, mm_list) {
                if (mark_free(vma, &unwind_list))
                        goto found;
        }
                goto none;
 
        /* Now merge in the soon-to-be-expired objects... */
-       list_for_each_entry(obj, &vm->active_list, mm_list) {
-               struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm);
+       list_for_each_entry(vma, &vm->active_list, mm_list) {
                if (mark_free(vma, &unwind_list))
                        goto found;
        }
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct i915_address_space *vm;
-       struct drm_i915_gem_object *obj, *next;
+       struct i915_vma *vma, *next;
        bool lists_empty = true;
        int ret;
 
 
        /* Having flushed everything, unbind() should never raise an error */
        list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
-               list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list)
-                       if (obj->pin_count == 0)
-                               WARN_ON(i915_vma_unbind(i915_gem_obj_to_vma(obj, vm)));
+               list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
+                       if (vma->obj->pin_count == 0)
+                               WARN_ON(i915_vma_unbind(vma));
        }
 
        return 0;
 
                obj->base.read_domains = obj->base.pending_read_domains;
                obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
 
+               /* FIXME: This lookup gets fixed later <-- danvet */
+               list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
                i915_gem_object_move_to_active(obj, ring);
                if (obj->base.write_domain) {
                        obj->dirty = 1;
 
        obj->has_global_gtt_mapping = 1;
 
        list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
-       list_add_tail(&obj->mm_list, &ggtt->inactive_list);
+       list_add_tail(&vma->mm_list, &ggtt->inactive_list);
 
        return obj;
 
 
 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
                             int count, struct list_head *head)
 {
-       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
        int i = 0;
 
-       list_for_each_entry(obj, head, mm_list) {
-               capture_bo(err++, obj);
+       list_for_each_entry(vma, head, mm_list) {
+               capture_bo(err++, vma->obj);
                if (++i == count)
                        break;
        }
 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
                             struct intel_ring_buffer *ring)
 {
-       struct i915_address_space *vm = &dev_priv->gtt.base;
+       struct i915_address_space *vm;
+       struct i915_vma *vma;
        struct drm_i915_gem_object *obj;
        u32 seqno;
 
        }
 
        seqno = ring->get_seqno(ring, false);
-       list_for_each_entry(obj, &vm->active_list, mm_list) {
-               if (obj->ring != ring)
-                       continue;
+       list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+               list_for_each_entry(vma, &vm->active_list, mm_list) {
+                       obj = vma->obj;
+                       if (obj->ring != ring)
+                               continue;
 
-               if (i915_seqno_passed(seqno, obj->last_read_seqno))
-                       continue;
+                       if (i915_seqno_passed(seqno, obj->last_read_seqno))
+                               continue;
 
-               if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
-                       continue;
+                       if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
+                               continue;
 
-               /* We need to copy these to an anonymous buffer as the simplest
-                * method to avoid being overwritten by userspace.
-                */
-               return i915_error_object_create(dev_priv, obj);
+                       /* We need to copy these to an anonymous buffer as the simplest
+                        * method to avoid being overwritten by userspace.
+                        */
+                       return i915_error_object_create(dev_priv, obj);
+               }
        }
 
        return NULL;
                                     struct drm_i915_error_state *error)
 {
        struct i915_address_space *vm = &dev_priv->gtt.base;
+       struct i915_vma *vma;
        struct drm_i915_gem_object *obj;
        int i;
 
        i = 0;
-       list_for_each_entry(obj, &vm->active_list, mm_list)
+       list_for_each_entry(vma, &vm->active_list, mm_list)
                i++;
        error->active_bo_count = i;
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)