* should already have been reserved in the ring buffer. Let the ring
         * know that it is time to use that space up.
         */
-       request_start = intel_ring_get_tail(ring);
+       request_start = ring->tail;
        reserved_tail = request->reserved_space;
        request->reserved_space = 0;
 
         * GPU processing the request, we never over-estimate the
         * position of the head.
         */
-       request->postfix = intel_ring_get_tail(ring);
+       request->postfix = ring->tail;
 
        if (i915.enable_execlists) {
                ret = engine->emit_request(request);
        } else {
                ret = engine->add_request(request);
 
-               request->tail = intel_ring_get_tail(ring);
+               request->tail = ring->tail;
        }
        /* Not allowed to fail! */
        WARN(ret, "emit|add_request failed: %d!\n", ret);
        /* Sanity check that the reserved size was large enough. */
-       ret = intel_ring_get_tail(ring) - request_start;
+       ret = ring->tail - request_start;
        if (ret < 0)
                ret += ring->size;
        WARN_ONCE(ret > reserved_tail,
 
 
 int init_workarounds_ring(struct intel_engine_cs *engine);
 
-static inline u32 intel_ring_get_tail(struct intel_ring *ring)
-{
-       return ring->tail;
-}
-
 /*
  * Arbitrary size for largest possible 'add request' sequence. The code paths
  * are complex and variable. Empirical measurement shows that the worst case