BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
 
        ret = 0;
-       if (seqno == ring->outstanding_lazy_seqno)
+       if (seqno == i915_gem_request_get_seqno(ring->outstanding_lazy_request))
                ret = i915_add_request(ring, NULL);
 
        return ret;
        u32 request_ring_position, request_start;
        int ret;
 
-       request = ring->preallocated_lazy_request;
+       request = ring->outstanding_lazy_request;
        if (WARN_ON(request == NULL))
                return -ENOMEM;
 
                        return ret;
        }
 
-       request->seqno = intel_ring_get_seqno(ring);
        request->ring = ring;
        request->head = request_start;
        request->tail = request_ring_position;
        }
 
        trace_i915_gem_request_add(ring, request->seqno);
-       ring->outstanding_lazy_seqno = 0;
-       ring->preallocated_lazy_request = NULL;
+       ring->outstanding_lazy_request = NULL;
 
        i915_queue_hangcheck(ring->dev);
 
                i915_gem_free_request(request);
        }
 
-       /* These may not have been flush before the reset, do so now */
-       i915_gem_request_assign(&ring->preallocated_lazy_request, NULL);
-       ring->outstanding_lazy_seqno = 0;
+       /* This may not have been flushed before the reset, so clean it now */
+       i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
 }
 
 void i915_gem_restore_fences(struct drm_device *dev)
 
        }
 }
 
-static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
-                                   struct intel_context *ctx)
+static int logical_ring_alloc_request(struct intel_engine_cs *ring,
+                                     struct intel_context *ctx)
 {
        struct drm_i915_gem_request *request;
        int ret;
 
-       /* XXX: The aim is to replace seqno values with request structures.
-        * A step along the way is to switch to using the PLR in preference
-        * to the OLS. That requires the PLR to only be valid when the OLS is
-        * also valid. I.e., the two must be kept in step. */
-
-       if (ring->outstanding_lazy_seqno) {
-               WARN_ON(ring->preallocated_lazy_request == NULL);
+       if (ring->outstanding_lazy_request)
                return 0;
-       }
-       WARN_ON(ring->preallocated_lazy_request != NULL);
 
        request = kmalloc(sizeof(*request), GFP_KERNEL);
        if (request == NULL)
 
        kref_init(&request->ref);
 
-       ret = i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
+       ret = i915_gem_get_seqno(ring->dev, &request->seqno);
        if (ret) {
                intel_lr_context_unpin(ring, ctx);
                kfree(request);
        request->ctx = ctx;
        i915_gem_context_reference(request->ctx);
 
-       ring->preallocated_lazy_request = request;
+       ring->outstanding_lazy_request = request;
        return 0;
 }
 
                return ret;
 
        /* Preallocate the olr before touching the ring */
-       ret = logical_ring_alloc_seqno(ring, ringbuf->FIXME_lrc_ctx);
+       ret = logical_ring_alloc_request(ring, ringbuf->FIXME_lrc_ctx);
        if (ret)
                return ret;
 
                                (ring->status_page.gfx_addr +
                                (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
        intel_logical_ring_emit(ringbuf, 0);
-       intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno);
+       intel_logical_ring_emit(ringbuf,
+               i915_gem_request_get_seqno(ring->outstanding_lazy_request));
        intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
        intel_logical_ring_emit(ringbuf, MI_NOOP);
        intel_logical_ring_advance_and_submit(ringbuf);
 
        intel_logical_ring_stop(ring);
        WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
-       i915_gem_request_assign(&ring->preallocated_lazy_request, NULL);
-       ring->outstanding_lazy_seqno = 0;
+       i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
 
        if (ring->cleanup)
                ring->cleanup(ring);
 
                return ret;
 
        for_each_ring(waiter, dev_priv, i) {
+               u32 seqno;
                u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
                        continue;
 
+               seqno = i915_gem_request_get_seqno(
+                                          signaller->outstanding_lazy_request);
                intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
                intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
                                           PIPE_CONTROL_QW_WRITE |
                                           PIPE_CONTROL_FLUSH_ENABLE);
                intel_ring_emit(signaller, lower_32_bits(gtt_offset));
                intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-               intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+               intel_ring_emit(signaller, seqno);
                intel_ring_emit(signaller, 0);
                intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
                                           MI_SEMAPHORE_TARGET(waiter->id));
                return ret;
 
        for_each_ring(waiter, dev_priv, i) {
+               u32 seqno;
                u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
                        continue;
 
+               seqno = i915_gem_request_get_seqno(
+                                          signaller->outstanding_lazy_request);
                intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
                                           MI_FLUSH_DW_OP_STOREDW);
                intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
                                           MI_FLUSH_DW_USE_GTT);
                intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-               intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+               intel_ring_emit(signaller, seqno);
                intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
                                           MI_SEMAPHORE_TARGET(waiter->id));
                intel_ring_emit(signaller, 0);
        for_each_ring(useless, dev_priv, i) {
                u32 mbox_reg = signaller->semaphore.mbox.signal[i];
                if (mbox_reg != GEN6_NOSYNC) {
+                       u32 seqno = i915_gem_request_get_seqno(
+                                          signaller->outstanding_lazy_request);
                        intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
                        intel_ring_emit(signaller, mbox_reg);
-                       intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+                       intel_ring_emit(signaller, seqno);
                }
        }
 
 
        intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+       intel_ring_emit(ring,
+                   i915_gem_request_get_seqno(ring->outstanding_lazy_request));
        intel_ring_emit(ring, MI_USER_INTERRUPT);
        __intel_ring_advance(ring);
 
                        PIPE_CONTROL_WRITE_FLUSH |
                        PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
        intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+       intel_ring_emit(ring,
+                   i915_gem_request_get_seqno(ring->outstanding_lazy_request));
        intel_ring_emit(ring, 0);
        PIPE_CONTROL_FLUSH(ring, scratch_addr);
        scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
                        PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
                        PIPE_CONTROL_NOTIFY);
        intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+       intel_ring_emit(ring,
+                   i915_gem_request_get_seqno(ring->outstanding_lazy_request));
        intel_ring_emit(ring, 0);
        __intel_ring_advance(ring);
 
 
        intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+       intel_ring_emit(ring,
+                   i915_gem_request_get_seqno(ring->outstanding_lazy_request));
        intel_ring_emit(ring, MI_USER_INTERRUPT);
        __intel_ring_advance(ring);
 
 
        intel_unpin_ringbuffer_obj(ringbuf);
        intel_destroy_ringbuffer_obj(ringbuf);
-       i915_gem_request_assign(&ring->preallocated_lazy_request, NULL);
-       ring->outstanding_lazy_seqno = 0;
+       i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
 
        if (ring->cleanup)
                ring->cleanup(ring);
        int ret;
 
        /* We need to add any requests required to flush the objects and ring */
-       if (ring->outstanding_lazy_seqno) {
+       if (ring->outstanding_lazy_request) {
                ret = i915_add_request(ring, NULL);
                if (ret)
                        return ret;
 }
 
 static int
-intel_ring_alloc_seqno(struct intel_engine_cs *ring)
+intel_ring_alloc_request(struct intel_engine_cs *ring)
 {
        int ret;
        struct drm_i915_gem_request *request;
 
-       /* XXX: The aim is to replace seqno values with request structures.
-        * A step along the way is to switch to using the PLR in preference
-        * to the OLS. That requires the PLR to only be valid when the OLS
-        * is also valid. I.e., the two must be kept in step. */
-
-       if (ring->outstanding_lazy_seqno) {
-               WARN_ON(ring->preallocated_lazy_request == NULL);
+       if (ring->outstanding_lazy_request)
                return 0;
-       }
-
-       WARN_ON(ring->preallocated_lazy_request != NULL);
 
        request = kmalloc(sizeof(*request), GFP_KERNEL);
        if (request == NULL)
 
        kref_init(&request->ref);
 
-       ret = i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
+       ret = i915_gem_get_seqno(ring->dev, &request->seqno);
        if (ret) {
                kfree(request);
                return ret;
        }
 
-       ring->preallocated_lazy_request = request;
+       ring->outstanding_lazy_request = request;
        return 0;
 }
 
                return ret;
 
        /* Preallocate the olr before touching the ring */
-       ret = intel_ring_alloc_seqno(ring);
+       ret = intel_ring_alloc_request(ring);
        if (ret)
                return ret;
 
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       BUG_ON(ring->outstanding_lazy_seqno);
+       BUG_ON(ring->outstanding_lazy_request);
 
        if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
                I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);