intel_runtime_pm_get(dev_priv);
 
        for_each_ring(ring, dev_priv, ring_id) {
-               struct intel_ctx_submit_request *head_req = NULL;
+               struct drm_i915_gem_request *head_req = NULL;
                int count = 0;
                unsigned long flags;
 
                list_for_each(cursor, &ring->execlist_queue)
                        count++;
                head_req = list_first_entry_or_null(&ring->execlist_queue,
-                               struct intel_ctx_submit_request, execlist_link);
+                               struct drm_i915_gem_request, execlist_link);
                spin_unlock_irqrestore(&ring->execlist_lock, flags);
 
                seq_printf(m, "\t%d requests in queue\n", count);
                if (head_req) {
                        struct drm_i915_gem_object *ctx_obj;
 
-                       ctx_obj = head_req->request->ctx->engine[ring_id].state;
+                       ctx_obj = head_req->ctx->engine[ring_id].state;
                        seq_printf(m, "\tHead request id: %u\n",
                                   intel_execlists_ctx_id(ctx_obj));
                        seq_printf(m, "\tHead request tail: %u\n",
-                                  head_req->request->tail);
+                                  head_req->tail);
                }
 
                seq_putc(m, '\n');
 
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        struct drm_i915_gem_request *request;
        struct intel_ringbuffer *ringbuf;
-       u32 request_ring_position, request_start;
+       u32 request_start;
        int ret;
 
        request = ring->outstanding_lazy_request;
         * GPU processing the request, we never over-estimate the
         * position of the head.
         */
-       request_ring_position = intel_ring_get_tail(ringbuf);
+       request->postfix = intel_ring_get_tail(ringbuf);
 
        if (i915.enable_execlists) {
                ret = ring->emit_request(ringbuf, request);
        }
 
        request->head = request_start;
-       request->postfix = request_ring_position;
+       request->tail = intel_ring_get_tail(ringbuf);
 
        /* Whilst this request exists, batch_obj will be on the
         * active_list, and so will hold the active reference. Only when this
         * pinned in place.
         */
        while (!list_empty(&ring->execlist_queue)) {
-               struct intel_ctx_submit_request *submit_req;
+               struct drm_i915_gem_request *submit_req;
 
                submit_req = list_first_entry(&ring->execlist_queue,
-                               struct intel_ctx_submit_request,
+                               struct drm_i915_gem_request,
                                execlist_link);
                list_del(&submit_req->execlist_link);
                intel_runtime_pm_put(dev_priv);
-               i915_gem_context_unreference(submit_req->request->ctx);
+               i915_gem_context_unreference(submit_req->ctx);
                kfree(submit_req);
        }
 
 
 
 static void execlists_context_unqueue(struct intel_engine_cs *ring)
 {
-       struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL;
-       struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL;
+       struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
+       struct drm_i915_gem_request *cursor = NULL, *tmp = NULL;
 
        assert_spin_locked(&ring->execlist_lock);
 
                                 execlist_link) {
                if (!req0) {
                        req0 = cursor;
-               } else if (req0->request->ctx == cursor->request->ctx) {
+               } else if (req0->ctx == cursor->ctx) {
                        /* Same ctx: ignore first request, as second request
                         * will update tail past first request's workload */
                        cursor->elsp_submitted = req0->elsp_submitted;
 
        WARN_ON(req1 && req1->elsp_submitted);
 
-       execlists_submit_contexts(ring, req0->request->ctx, req0->request->tail,
-                                 req1 ? req1->request->ctx : NULL,
-                                 req1 ? req1->request->tail : 0);
+       execlists_submit_contexts(ring, req0->ctx, req0->tail,
+                                 req1 ? req1->ctx : NULL,
+                                 req1 ? req1->tail : 0);
 
        req0->elsp_submitted++;
        if (req1)
 static bool execlists_check_remove_request(struct intel_engine_cs *ring,
                                           u32 request_id)
 {
-       struct intel_ctx_submit_request *head_req;
+       struct drm_i915_gem_request *head_req;
 
        assert_spin_locked(&ring->execlist_lock);
 
        head_req = list_first_entry_or_null(&ring->execlist_queue,
-                                           struct intel_ctx_submit_request,
+                                           struct drm_i915_gem_request,
                                            execlist_link);
 
        if (head_req != NULL) {
                struct drm_i915_gem_object *ctx_obj =
-                               head_req->request->ctx->engine[ring->id].state;
+                               head_req->ctx->engine[ring->id].state;
                if (intel_execlists_ctx_id(ctx_obj) == request_id) {
                        WARN(head_req->elsp_submitted == 0,
                             "Never submitted head request\n");
                                   u32 tail,
                                   struct drm_i915_gem_request *request)
 {
-       struct intel_ctx_submit_request *req = NULL, *cursor;
+       struct drm_i915_gem_request *cursor;
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        unsigned long flags;
        int num_elements = 0;
 
-       req = kzalloc(sizeof(*req), GFP_KERNEL);
-       if (req == NULL)
-               return -ENOMEM;
-
        if (to != ring->default_context)
                intel_lr_context_pin(ring, to);
 
                if (request == NULL)
                        return -ENOMEM;
                request->ring = ring;
+               request->ctx = to;
        } else {
                WARN_ON(to != request->ctx);
        }
-       request->ctx = to;
        request->tail = tail;
-       req->request = request;
        i915_gem_request_reference(request);
-       i915_gem_context_reference(req->request->ctx);
+       i915_gem_context_reference(request->ctx);
 
        intel_runtime_pm_get(dev_priv);
 
                        break;
 
        if (num_elements > 2) {
-               struct intel_ctx_submit_request *tail_req;
+               struct drm_i915_gem_request *tail_req;
 
                tail_req = list_last_entry(&ring->execlist_queue,
-                                          struct intel_ctx_submit_request,
+                                          struct drm_i915_gem_request,
                                           execlist_link);
 
-               if (to == tail_req->request->ctx) {
+               if (to == tail_req->ctx) {
                        WARN(tail_req->elsp_submitted != 0,
                                "More than 2 already-submitted reqs queued\n");
                        list_del(&tail_req->execlist_link);
                }
        }
 
-       list_add_tail(&req->execlist_link, &ring->execlist_queue);
+       list_add_tail(&request->execlist_link, &ring->execlist_queue);
        if (num_elements == 0)
                execlists_context_unqueue(ring);
 
 
 void intel_execlists_retire_requests(struct intel_engine_cs *ring)
 {
-       struct intel_ctx_submit_request *req, *tmp;
+       struct drm_i915_gem_request *req, *tmp;
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        unsigned long flags;
        struct list_head retired_list;
        spin_unlock_irqrestore(&ring->execlist_lock, flags);
 
        list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
-               struct intel_context *ctx = req->request->ctx;
+               struct intel_context *ctx = req->ctx;
                struct drm_i915_gem_object *ctx_obj =
                                ctx->engine[ring->id].state;
 
                        intel_lr_context_unpin(ring, ctx);
                intel_runtime_pm_put(dev_priv);
                i915_gem_context_unreference(ctx);
-               i915_gem_request_unreference(req->request);
+               i915_gem_request_unreference(req);
                list_del(&req->execlist_link);
-               kfree(req);
        }
 }
 
 
                               u64 exec_start, u32 flags);
 u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
 
-/**
- * struct intel_ctx_submit_request - queued context submission request
- * @ctx: Context to submit to the ELSP.
- * @ring: Engine to submit it to.
- * @tail: how far in the context's ringbuffer this request goes to.
- * @execlist_link: link in the submission queue.
- * @work: workqueue for processing this request in a bottom half.
- * @elsp_submitted: no. of times this request has been sent to the ELSP.
- *
- * The ELSP only accepts two elements at a time, so we queue context/tail
- * pairs on a given queue (ring->execlist_queue) until the hardware is
- * available. The queue serves a double purpose: we also use it to keep track
- * of the up to 2 contexts currently in the hardware (usually one in execution
- * and the other queued up by the GPU): We only remove elements from the head
- * of the queue when the hardware informs us that an element has been
- * completed.
- *
- * All accesses to the queue are mediated by a spinlock (ring->execlist_lock).
- */
-struct intel_ctx_submit_request {
-       struct list_head execlist_link;
-
-       int elsp_submitted;
-
-       struct drm_i915_gem_request *request;
-};
-
 void intel_lrc_irq_handler(struct intel_engine_cs *ring);
 void intel_execlists_retire_requests(struct intel_engine_cs *ring);