There are a number of places where the driver needs a request, but isn't
working on behalf of any specific user or in a specific context. At
present, we associate them with the per-engine default context. A future
patch will abolish those per-engine context pointers; but we can already
eliminate a lot of the references to them, just by making the allocator
allow NULL as a shorthand for "an appropriate context for this ring",
which will mean that the callers don't need to know anything about how
the "appropriate context" is found (e.g. per-ring vs per-device, etc).
So this patch renames the existing i915_gem_request_alloc(), and makes
it local (static inline), and replaces it with a wrapper that provides
a default if the context is NULL, and also has a nicer calling
convention (doesn't require a pointer to an output parameter). Then we
change all callers to use the new convention:
OLD:
	err = i915_gem_request_alloc(ring, user_ctx, &req);
	if (err) ...
NEW:
	req = i915_gem_request_alloc(ring, user_ctx);
	if (IS_ERR(req)) ...
OLD:
	err = i915_gem_request_alloc(ring, ring->default_context, &req);
	if (err) ...
NEW:
	req = i915_gem_request_alloc(ring, NULL);
	if (IS_ERR(req)) ...
v4:	Rebased
Signed-off-by: Dave Gordon <david.s.gordon@intel.com>
Reviewed-by: Nick Hoath <nicholas.hoath@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1453230175-19330-2-git-send-email-david.s.gordon@intel.com
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
 
 
 };
 
-int i915_gem_request_alloc(struct intel_engine_cs *ring,
-                          struct intel_context *ctx,
-                          struct drm_i915_gem_request **req_out);
+struct drm_i915_gem_request * __must_check
+i915_gem_request_alloc(struct intel_engine_cs *engine,
+                      struct intel_context *ctx);
 void i915_gem_request_cancel(struct drm_i915_gem_request *req);
 void i915_gem_request_free(struct kref *req_ref);
 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
 
        kmem_cache_free(req->i915->requests, req);
 }
 
-int i915_gem_request_alloc(struct intel_engine_cs *ring,
-                          struct intel_context *ctx,
-                          struct drm_i915_gem_request **req_out)
+static inline int
+__i915_gem_request_alloc(struct intel_engine_cs *ring,
+                        struct intel_context *ctx,
+                        struct drm_i915_gem_request **req_out)
 {
        struct drm_i915_private *dev_priv = to_i915(ring->dev);
        struct drm_i915_gem_request *req;
        return ret;
 }
 
+/**
+ * i915_gem_request_alloc - allocate a request structure
+ *
+ * @engine: engine that we wish to issue the request on.
+ * @ctx: context that the request will be associated with.
+ *       This can be NULL if the request is not directly related to
+ *       any specific user context, in which case this function will
+ *       choose an appropriate context to use.
+ *
+ * Returns a pointer to the allocated request if successful,
+ * or an error code if not.
+ */
+struct drm_i915_gem_request *
+i915_gem_request_alloc(struct intel_engine_cs *engine,
+                      struct intel_context *ctx)
+{
+       struct drm_i915_gem_request *req;
+       int err;
+
+       if (ctx == NULL)
+               ctx = engine->default_context;
+       err = __i915_gem_request_alloc(engine, ctx, &req);
+       return err ? ERR_PTR(err) : req;
+}
+
 void i915_gem_request_cancel(struct drm_i915_gem_request *req)
 {
        intel_ring_reserved_space_cancel(req->ringbuf);
                        return 0;
 
                if (*to_req == NULL) {
-                       ret = i915_gem_request_alloc(to, to->default_context, to_req);
-                       if (ret)
-                               return ret;
+                       struct drm_i915_gem_request *req;
+
+                       req = i915_gem_request_alloc(to, NULL);
+                       if (IS_ERR(req))
+                               return PTR_ERR(req);
+
+                       *to_req = req;
                }
 
                trace_i915_gem_ring_sync_to(*to_req, from, from_req);
                if (!i915.enable_execlists) {
                        struct drm_i915_gem_request *req;
 
-                       ret = i915_gem_request_alloc(ring, ring->default_context, &req);
-                       if (ret)
-                               return ret;
+                       req = i915_gem_request_alloc(ring, NULL);
+                       if (IS_ERR(req))
+                               return PTR_ERR(req);
 
                        ret = i915_switch_context(req);
                        if (ret) {
        for_each_ring(ring, dev_priv, i) {
                struct drm_i915_gem_request *req;
 
-               WARN_ON(!ring->default_context);
-
-               ret = i915_gem_request_alloc(ring, ring->default_context, &req);
-               if (ret) {
+               req = i915_gem_request_alloc(ring, NULL);
+               if (IS_ERR(req)) {
+                       ret = PTR_ERR(req);
                        i915_gem_cleanup_ringbuffer(dev);
                        goto out;
                }
 
                       struct drm_i915_gem_exec_object2 *exec)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_request *req = NULL;
        struct eb_vmas *eb;
        struct drm_i915_gem_object *batch_obj;
        struct drm_i915_gem_exec_object2 shadow_exec_entry;
                params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
 
        /* Allocate a request for this batch buffer nice and early. */
-       ret = i915_gem_request_alloc(ring, ctx, ¶ms->request);
-       if (ret)
+       req = i915_gem_request_alloc(ring, ctx);
+       if (IS_ERR(req)) {
+               ret = PTR_ERR(req);
                goto err_batch_unpin;
+       }
 
-       ret = i915_gem_request_add_to_client(params->request, file);
+       ret = i915_gem_request_add_to_client(req, file);
        if (ret)
                goto err_batch_unpin;
 
        params->dispatch_flags          = dispatch_flags;
        params->batch_obj               = batch_obj;
        params->ctx                     = ctx;
+       params->request                 = req;
 
        ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
 
         * must be freed again. If it was submitted then it is being tracked
         * on the active request list and no clean up is required here.
         */
-       if (ret && params->request)
-               i915_gem_request_cancel(params->request);
+       if (ret && req)
+               i915_gem_request_cancel(req);
 
        mutex_unlock(&dev->struct_mutex);
 
 
                                        obj->last_write_req);
        } else {
                if (!request) {
-                       ret = i915_gem_request_alloc(ring, ring->default_context, &request);
-                       if (ret)
+                       request = i915_gem_request_alloc(ring, NULL);
+                       if (IS_ERR(request)) {
+                               ret = PTR_ERR(request);
                                goto cleanup_unpin;
+                       }
                }
 
                ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
 
        if (ctx != ring->default_context && ring->init_context) {
                struct drm_i915_gem_request *req;
 
-               ret = i915_gem_request_alloc(ring,
-                       ctx, &req);
-               if (ret) {
-                       DRM_ERROR("ring create req: %d\n",
-                               ret);
+               req = i915_gem_request_alloc(ring, ctx);
+               if (IS_ERR(req)) {
+                       ret = PTR_ERR(req);
+                       DRM_ERROR("ring create req: %d\n", ret);
                        goto error_ringbuf;
                }
 
 
        WARN_ON(overlay->active);
        WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
 
-       ret = i915_gem_request_alloc(ring, ring->default_context, &req);
-       if (ret)
-               return ret;
+       req = i915_gem_request_alloc(ring, NULL);
+       if (IS_ERR(req))
+               return PTR_ERR(req);
 
        ret = intel_ring_begin(req, 4);
        if (ret) {
        if (tmp & (1 << 17))
                DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
 
-       ret = i915_gem_request_alloc(ring, ring->default_context, &req);
-       if (ret)
-               return ret;
+       req = i915_gem_request_alloc(ring, NULL);
+       if (IS_ERR(req))
+               return PTR_ERR(req);
 
        ret = intel_ring_begin(req, 2);
        if (ret) {
         * of the hw. Do it in both cases */
        flip_addr |= OFC_UPDATE;
 
-       ret = i915_gem_request_alloc(ring, ring->default_context, &req);
-       if (ret)
-               return ret;
+       req = i915_gem_request_alloc(ring, NULL);
+       if (IS_ERR(req))
+               return PTR_ERR(req);
 
        ret = intel_ring_begin(req, 6);
        if (ret) {
                /* synchronous slowpath */
                struct drm_i915_gem_request *req;
 
-               ret = i915_gem_request_alloc(ring, ring->default_context, &req);
-               if (ret)
-                       return ret;
+               req = i915_gem_request_alloc(ring, NULL);
+               if (IS_ERR(req))
+                       return PTR_ERR(req);
 
                ret = intel_ring_begin(req, 2);
                if (ret) {