Only execbuffer needed all the parameters on i915_add_request().
By putting __i915_add_request behind macro, all current callsites
become cleaner. Following patch will introduce a new parameter
for __i915_add_request. With this patch, only the relevant callsite
will reflect the change making commit smaller and easier to understand.
v2: _i915_add_request as function name (Chris Wilson)
v3: change name __i915_add_request and fix ordering of params (Ben Widawsky)
Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 int __must_check i915_gpu_idle(struct drm_device *dev);
 int __must_check i915_gem_idle(struct drm_device *dev);
-int i915_add_request(struct intel_ring_buffer *ring,
-                    struct drm_file *file,
-                    u32 *seqno);
+int __i915_add_request(struct intel_ring_buffer *ring,
+                      struct drm_file *file,
+                      u32 *seqno);
+#define i915_add_request(ring, seqno) \
+       __i915_add_request(ring, NULL, seqno);
 int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
                                 uint32_t seqno);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 
 
        ret = 0;
        if (seqno == ring->outstanding_lazy_request)
-               ret = i915_add_request(ring, NULL, NULL);
+               ret = i915_add_request(ring, NULL);
 
        return ret;
 }
        return 0;
 }
 
-int
-i915_add_request(struct intel_ring_buffer *ring,
-                struct drm_file *file,
-                u32 *out_seqno)
+int __i915_add_request(struct intel_ring_buffer *ring,
+                      struct drm_file *file,
+                      u32 *out_seqno)
 {
        drm_i915_private_t *dev_priv = ring->dev->dev_private;
        struct drm_i915_gem_request *request;
        idle = true;
        for_each_ring(ring, dev_priv, i) {
                if (ring->gpu_caches_dirty)
-                       i915_add_request(ring, NULL, NULL);
+                       i915_add_request(ring, NULL);
 
                idle &= list_empty(&ring->request_list);
        }
 
                from->obj->dirty = 1;
                BUG_ON(from->obj->ring != ring);
 
-               ret = i915_add_request(ring, NULL, NULL);
+               ret = i915_add_request(ring, NULL);
                if (ret) {
                        /* Too late, we've already scheduled a context switch.
                         * Try to undo the change so that the hw state is
 
        ring->gpu_caches_dirty = true;
 
        /* Add a breadcrumb for the completion of the batch buffer */
-       (void)i915_add_request(ring, file, NULL);
+       (void)__i915_add_request(ring, file, NULL);
 }
 
 static int
 
        int ret;
 
        BUG_ON(overlay->last_flip_req);
-       ret = i915_add_request(ring, NULL, &overlay->last_flip_req);
+       ret = i915_add_request(ring, &overlay->last_flip_req);
        if (ret)
                return ret;
 
        intel_ring_emit(ring, flip_addr);
        intel_ring_advance(ring);
 
-       return i915_add_request(ring, NULL, &overlay->last_flip_req);
+       return i915_add_request(ring, &overlay->last_flip_req);
 }
 
 static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
 
 
        /* We need to add any requests required to flush the objects and ring */
        if (ring->outstanding_lazy_request) {
-               ret = i915_add_request(ring, NULL, NULL);
+               ret = i915_add_request(ring, NULL);
                if (ret)
                        return ret;
        }