for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
                if (ctx->engine[n].state)
-                       per_file_stats(0, ctx->engine[n].state, data);
+                       per_file_stats(0, ctx->engine[n].state->obj, data);
                if (ctx->engine[n].ring)
                        per_file_stats(0, ctx->engine[n].ring->obj, data);
        }
                        seq_printf(m, "%s: ", engine->name);
                        seq_putc(m, ce->initialised ? 'I' : 'i');
                        if (ce->state)
-                               describe_obj(m, ce->state);
+                               describe_obj(m, ce->state->obj);
                        if (ce->ring)
                                describe_ctx_ring(m, ce->ring);
                        seq_putc(m, '\n');
                              struct i915_gem_context *ctx,
                              struct intel_engine_cs *engine)
 {
-       struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
+       struct i915_vma *vma = ctx->engine[engine->id].state;
        struct page *page;
-       uint32_t *reg_state;
        int j;
-       unsigned long ggtt_offset = 0;
 
        seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
 
-       if (ctx_obj == NULL) {
-               seq_puts(m, "\tNot allocated\n");
+       if (!vma) {
+               seq_puts(m, "\tFake context\n");
                return;
        }
 
-       if (!i915_gem_obj_ggtt_bound(ctx_obj))
-               seq_puts(m, "\tNot bound in GGTT\n");
-       else
-               ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
+       if (vma->flags & I915_VMA_GLOBAL_BIND)
+               seq_printf(m, "\tBound in GGTT at 0x%08x\n",
+                          lower_32_bits(vma->node.start));
 
-       if (i915_gem_object_get_pages(ctx_obj)) {
-               seq_puts(m, "\tFailed to get pages for context object\n");
+       if (i915_gem_object_get_pages(vma->obj)) {
+               seq_puts(m, "\tFailed to get pages for context object\n\n");
                return;
        }
 
-       page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
-       if (!WARN_ON(page == NULL)) {
-               reg_state = kmap_atomic(page);
+       page = i915_gem_object_get_page(vma->obj, LRC_STATE_PN);
+       if (page) {
+               u32 *reg_state = kmap_atomic(page);
 
                for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
-                       seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
-                                  ggtt_offset + 4096 + (j * 4),
+                       seq_printf(m,
+                                  "\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+                                  j * 4,
                                   reg_state[j], reg_state[j + 1],
                                   reg_state[j + 2], reg_state[j + 3]);
                }
 
        u32 ggtt_alignment;
 
        struct intel_context {
-               struct drm_i915_gem_object *state;
+               struct i915_vma *state;
                struct intel_ring *ring;
-               struct i915_vma *lrc_vma;
                uint32_t *lrc_reg_state;
                u64 lrc_desc;
                int pin_count;
 
                if (ce->ring)
                        intel_ring_free(ce->ring);
 
-               i915_gem_object_put(ce->state);
+               i915_vma_put(ce->state);
        }
 
        list_del(&ctx->link);
        ctx->ggtt_alignment = get_context_alignment(dev_priv);
 
        if (dev_priv->hw_context_size) {
-               struct drm_i915_gem_object *obj =
-                               i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
+               struct drm_i915_gem_object *obj;
+               struct i915_vma *vma;
+
+               obj = i915_gem_alloc_context_obj(dev,
+                                                dev_priv->hw_context_size);
                if (IS_ERR(obj)) {
                        ret = PTR_ERR(obj);
                        goto err_out;
                }
-               ctx->engine[RCS].state = obj;
+
+               vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+               if (IS_ERR(vma)) {
+                       i915_gem_object_put(obj);
+                       ret = PTR_ERR(vma);
+                       goto err_out;
+               }
+
+               ctx->engine[RCS].state = vma;
        }
 
        /* Default context will never have a file_priv */
                struct intel_context *ce = &ctx->engine[engine->id];
 
                if (ce->state)
-                       i915_gem_object_ggtt_unpin(ce->state);
+                       i915_vma_unpin(ce->state);
 
                i915_gem_context_put(ctx);
        }
 
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_emit(ring, MI_SET_CONTEXT);
-       intel_ring_emit(ring,
-                       i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) |
-                       flags);
+       intel_ring_emit(ring, req->ctx->engine[RCS].state->node.start | flags);
        /*
         * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
         * WaMiSetContext_Hang:snb,ivb,vlv
        struct i915_gem_context *to = req->ctx;
        struct intel_engine_cs *engine = req->engine;
        struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
+       struct i915_vma *vma = to->engine[RCS].state;
        struct i915_gem_context *from;
        u32 hw_flags;
        int ret, i;
                return 0;
 
        /* Trying to pin first makes error handling easier. */
-       ret = i915_gem_object_ggtt_pin(to->engine[RCS].state, NULL, 0,
-                                      to->ggtt_alignment, 0);
+       ret = i915_vma_pin(vma, 0, to->ggtt_alignment, PIN_GLOBAL);
        if (ret)
                return ret;
 
         *
         * XXX: We need a real interface to do this instead of trickery.
         */
-       ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false);
+       ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
        if (ret)
-               goto unpin_out;
+               goto err;
 
        if (needs_pd_load_pre(ppgtt, engine, to)) {
                /* Older GENs and non render rings still want the load first,
                trace_switch_mm(engine, to);
                ret = ppgtt->switch_mm(ppgtt, req);
                if (ret)
-                       goto unpin_out;
+                       goto err;
        }
 
        if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
        if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
                ret = mi_set_context(req, hw_flags);
                if (ret)
-                       goto unpin_out;
+                       goto err;
        }
 
        /* The backing object for the context is done after switching to the
         * MI_SET_CONTEXT instead of when the next seqno has completed.
         */
        if (from != NULL) {
-               struct drm_i915_gem_object *obj = from->engine[RCS].state;
-
                /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
                 * whole damn pipeline, we don't need to explicitly mark the
                 * object dirty. The only exception is that the context must be
                 * able to defer doing this until we know the object would be
                 * swapped, but there is no way to do that yet.
                 */
-               obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-               i915_vma_move_to_active(i915_gem_obj_to_ggtt(obj), req, 0);
-
-               /* obj is kept alive until the next request by its active ref */
-               i915_gem_object_ggtt_unpin(obj);
+               i915_vma_move_to_active(from->engine[RCS].state, req, 0);
+               /* state is kept alive until the next request */
+               i915_vma_unpin(from->engine[RCS].state);
                i915_gem_context_put(from);
        }
        engine->last_context = i915_gem_context_get(to);
 
        return 0;
 
-unpin_out:
-       i915_gem_object_ggtt_unpin(to->engine[RCS].state);
+err:
+       i915_vma_unpin(vma);
        return ret;
 }
 
 
                                        i915_error_ggtt_object_create(dev_priv,
                                                                      engine->scratch.obj);
 
-                       ee->ctx =
-                               i915_error_ggtt_object_create(dev_priv,
-                                                             request->ctx->engine[i].state);
+                       if (request->ctx->engine[i].state) {
+                               ee->ctx = i915_error_ggtt_object_create(dev_priv,
+                                                                       request->ctx->engine[i].state->obj);
+                       }
 
                        if (request->pid) {
                                struct task_struct *task;
 
                lrc->context_desc = lower_32_bits(ce->lrc_desc);
 
                /* The state page is after PPHWSP */
-               gfx_addr = i915_gem_obj_ggtt_offset(ce->state);
+               gfx_addr = ce->state->node.start;
                lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE;
                lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
                                (guc_engine_id << GUC_ELC_ENGINE_OFFSET);
        /* any value greater than GUC_POWER_D0 */
        data[1] = GUC_POWER_D1;
        /* first page is shared data with GuC */
-       data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state);
+       data[2] = ctx->engine[RCS].state->node.start;
 
        return host2guc_action(guc, data, ARRAY_SIZE(data));
 }
        data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
        data[1] = GUC_POWER_D0;
        /* first page is shared data with GuC */
-       data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state);
+       data[2] = ctx->engine[RCS].state->node.start;
 
        return host2guc_action(guc, data, ARRAY_SIZE(data));
 }
 
 
        desc = ctx->desc_template;                              /* bits  3-4  */
        desc |= engine->ctx_desc_template;                      /* bits  0-11 */
-       desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
+       desc |= ce->state->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
                                                                /* bits 12-31 */
        desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT;           /* bits 32-52 */
 
 static int intel_lr_context_pin(struct i915_gem_context *ctx,
                                struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = ctx->i915;
        struct intel_context *ce = &ctx->engine[engine->id];
        void *vaddr;
        u32 *lrc_reg_state;
        if (ce->pin_count++)
                return 0;
 
-       ret = i915_gem_object_ggtt_pin(ce->state, NULL,
-                                      0, GEN8_LR_CONTEXT_ALIGN,
-                                      PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+       ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN,
+                          PIN_OFFSET_BIAS | GUC_WOPCM_TOP | PIN_GLOBAL);
        if (ret)
                goto err;
 
-       vaddr = i915_gem_object_pin_map(ce->state, I915_MAP_WB);
+       vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
        if (IS_ERR(vaddr)) {
                ret = PTR_ERR(vaddr);
-               goto unpin_ctx_obj;
+               goto unpin_vma;
        }
 
        lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
        if (ret)
                goto unpin_map;
 
-       ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
        intel_lr_context_descriptor_update(ctx, engine);
 
        lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ring->vma->node.start;
        ce->lrc_reg_state = lrc_reg_state;
-       ce->state->dirty = true;
+       ce->state->obj->dirty = true;
 
        /* Invalidate GuC TLB. */
-       if (i915.enable_guc_submission)
+       if (i915.enable_guc_submission) {
+               struct drm_i915_private *dev_priv = ctx->i915;
                I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+       }
 
        i915_gem_context_get(ctx);
        return 0;
 
 unpin_map:
-       i915_gem_object_unpin_map(ce->state);
-unpin_ctx_obj:
-       i915_gem_object_ggtt_unpin(ce->state);
+       i915_gem_object_unpin_map(ce->state->obj);
+unpin_vma:
+       __i915_vma_unpin(ce->state);
 err:
        ce->pin_count = 0;
        return ret;
 
        intel_ring_unpin(ce->ring);
 
-       i915_gem_object_unpin_map(ce->state);
-       i915_gem_object_ggtt_unpin(ce->state);
-
-       ce->lrc_vma = NULL;
-       ce->lrc_desc = 0;
-       ce->lrc_reg_state = NULL;
+       i915_gem_object_unpin_map(ce->state->obj);
+       i915_vma_unpin(ce->state);
 
        i915_gem_context_put(ctx);
 }
 }
 
 static int
-lrc_setup_hws(struct intel_engine_cs *engine,
-             struct drm_i915_gem_object *dctx_obj)
+lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma)
 {
        void *hws;
 
        /* The HWSP is part of the default context object in LRC mode. */
-       engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
-                                      LRC_PPHWSP_PN * PAGE_SIZE;
-       hws = i915_gem_object_pin_map(dctx_obj, I915_MAP_WB);
+       engine->status_page.gfx_addr =
+               vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
+       hws = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
        if (IS_ERR(hws))
                return PTR_ERR(hws);
        engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
-       engine->status_page.obj = dctx_obj;
+       engine->status_page.obj = vma->obj;
 
        return 0;
 }
 {
        struct drm_i915_gem_object *ctx_obj;
        struct intel_context *ce = &ctx->engine[engine->id];
+       struct i915_vma *vma;
        uint32_t context_size;
        struct intel_ring *ring;
        int ret;
                return PTR_ERR(ctx_obj);
        }
 
+       vma = i915_vma_create(ctx_obj, &ctx->i915->ggtt.base, NULL);
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               goto error_deref_obj;
+       }
+
        ring = intel_engine_create_ring(engine, ctx->ring_size);
        if (IS_ERR(ring)) {
                ret = PTR_ERR(ring);
        }
 
        ce->ring = ring;
-       ce->state = ctx_obj;
+       ce->state = vma;
        ce->initialised = engine->init_context == NULL;
 
        return 0;
        intel_ring_free(ring);
 error_deref_obj:
        i915_gem_object_put(ctx_obj);
-       ce->ring = NULL;
-       ce->state = NULL;
        return ret;
 }
 
 
        for_each_engine(engine, dev_priv) {
                struct intel_context *ce = &ctx->engine[engine->id];
-               struct drm_i915_gem_object *ctx_obj = ce->state;
                void *vaddr;
                uint32_t *reg_state;
 
-               if (!ctx_obj)
+               if (!ce->state)
                        continue;
 
-               vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
+               vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
                if (WARN_ON(IS_ERR(vaddr)))
                        continue;
 
                reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
-               ctx_obj->dirty = true;
 
                reg_state[CTX_RING_HEAD+1] = 0;
                reg_state[CTX_RING_TAIL+1] = 0;
 
-               i915_gem_object_unpin_map(ctx_obj);
+               ce->state->obj->dirty = true;
+               i915_gem_object_unpin_map(ce->state->obj);
 
                ce->ring->head = 0;
                ce->ring->tail = 0;
 
                return 0;
 
        if (ce->state) {
-               ret = i915_gem_object_ggtt_pin(ce->state, NULL, 0,
-                                              ctx->ggtt_alignment, PIN_HIGH);
+               ret = i915_vma_pin(ce->state, 0, ctx->ggtt_alignment,
+                                  PIN_GLOBAL | PIN_HIGH);
                if (ret)
                        goto error;
        }
                return;
 
        if (ce->state)
-               i915_gem_object_ggtt_unpin(ce->state);
+               i915_vma_unpin(ce->state);
 
        i915_gem_context_put(ctx);
 }