i915_gem_tiling.o \
          i915_gem_userptr.o \
          i915_gemfs.o \
+         i915_globals.o \
          i915_query.o \
          i915_request.o \
          i915_scheduler.o \
 
        return 0;
 }
 
-void __exit i915_global_active_exit(void)
+void i915_global_active_shrink(void)
+{
+       kmem_cache_shrink(global.slab_cache);
+}
+
+void i915_global_active_exit(void)
 {
        kmem_cache_destroy(global.slab_cache);
 }
 
 #endif
 
 int i915_global_active_init(void);
+void i915_global_active_shrink(void);
 void i915_global_active_exit(void);
 
 #endif /* _I915_ACTIVE_H_ */
 
        struct kmem_cache *objects;
        struct kmem_cache *vmas;
        struct kmem_cache *luts;
-       struct kmem_cache *requests;
-       struct kmem_cache *dependencies;
-       struct kmem_cache *priorities;
 
        const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
        struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
 
 #include "i915_drv.h"
 #include "i915_gem_clflush.h"
 #include "i915_gemfs.h"
+#include "i915_globals.h"
 #include "i915_reset.h"
 #include "i915_trace.h"
 #include "i915_vgpu.h"
        if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
                i915->gt.epoch = 1;
 
+       i915_globals_unpark();
+
        intel_enable_gt_powersave(i915);
        i915_update_gfx_val(i915);
        if (INTEL_GEN(i915) >= 6)
         * filled slabs to prioritise allocating from the mostly full slabs,
         * with the aim of reducing fragmentation.
         */
-       kmem_cache_shrink(i915->priorities);
-       kmem_cache_shrink(i915->dependencies);
-       kmem_cache_shrink(i915->requests);
        kmem_cache_shrink(i915->luts);
        kmem_cache_shrink(i915->vmas);
        kmem_cache_shrink(i915->objects);
+
+       i915_globals_park();
 }
 
 struct sleep_rcu_work {
        if (!dev_priv->luts)
                goto err_vmas;
 
-       dev_priv->requests = KMEM_CACHE(i915_request,
-                                       SLAB_HWCACHE_ALIGN |
-                                       SLAB_RECLAIM_ACCOUNT |
-                                       SLAB_TYPESAFE_BY_RCU);
-       if (!dev_priv->requests)
-               goto err_luts;
-
-       dev_priv->dependencies = KMEM_CACHE(i915_dependency,
-                                           SLAB_HWCACHE_ALIGN |
-                                           SLAB_RECLAIM_ACCOUNT);
-       if (!dev_priv->dependencies)
-               goto err_requests;
-
-       dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
-       if (!dev_priv->priorities)
-               goto err_dependencies;
-
        INIT_LIST_HEAD(&dev_priv->gt.active_rings);
        INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
 
 
        return 0;
 
-err_dependencies:
-       kmem_cache_destroy(dev_priv->dependencies);
-err_requests:
-       kmem_cache_destroy(dev_priv->requests);
-err_luts:
-       kmem_cache_destroy(dev_priv->luts);
 err_vmas:
        kmem_cache_destroy(dev_priv->vmas);
 err_objects:
 
        cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
 
-       kmem_cache_destroy(dev_priv->priorities);
-       kmem_cache_destroy(dev_priv->dependencies);
-       kmem_cache_destroy(dev_priv->requests);
        kmem_cache_destroy(dev_priv->luts);
        kmem_cache_destroy(dev_priv->vmas);
        kmem_cache_destroy(dev_priv->objects);
 
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "i915_active.h"
+#include "i915_globals.h"
+#include "i915_request.h"
+#include "i915_scheduler.h"
+
+int __init i915_globals_init(void)
+{
+       int err;
+
+       err = i915_global_active_init();
+       if (err)
+               return err;
+
+       err = i915_global_request_init();
+       if (err)
+               goto err_active;
+
+       err = i915_global_scheduler_init();
+       if (err)
+               goto err_request;
+
+       return 0;
+
+err_request:
+       i915_global_request_exit();
+err_active:
+       i915_global_active_exit();
+       return err;
+}
+
+static void i915_globals_shrink(void)
+{
+       /*
+        * kmem_cache_shrink() discards empty slabs and reorders partially
+        * filled slabs to prioritise allocating from the mostly full slabs,
+        * with the aim of reducing fragmentation.
+        */
+       i915_global_active_shrink();
+       i915_global_request_shrink();
+       i915_global_scheduler_shrink();
+}
+
+static atomic_t active;
+static atomic_t epoch;
+struct park_work {
+       struct rcu_work work;
+       int epoch;
+};
+
+static void __i915_globals_park(struct work_struct *work)
+{
+       struct park_work *wrk = container_of(work, typeof(*wrk), work.work);
+
+       /* Confirm nothing woke up in the last grace period */
+       if (wrk->epoch == atomic_read(&epoch))
+               i915_globals_shrink();
+
+       kfree(wrk);
+}
+
+void i915_globals_park(void)
+{
+       struct park_work *wrk;
+
+       /*
+        * Defer shrinking the global slab caches (and other work) until
+        * after a RCU grace period has completed with no activity. This
+        * is to try and reduce the latency impact on the consumers caused
+        * by us shrinking the caches the same time as they are trying to
+        * allocate, with the assumption being that if we idle long enough
+        * for an RCU grace period to elapse since the last use, it is likely
+        * to be longer until we need the caches again.
+        */
+       if (!atomic_dec_and_test(&active))
+               return;
+
+       wrk = kmalloc(sizeof(*wrk), GFP_KERNEL);
+       if (!wrk)
+               return;
+
+       wrk->epoch = atomic_inc_return(&epoch);
+       INIT_RCU_WORK(&wrk->work, __i915_globals_park);
+       queue_rcu_work(system_wq, &wrk->work);
+}
+
+void i915_globals_unpark(void)
+{
+       atomic_inc(&epoch);
+       atomic_inc(&active);
+}
+
+void __exit i915_globals_exit(void)
+{
+       /* Flush any residual park_work */
+       rcu_barrier();
+       flush_scheduled_work();
+
+       i915_global_scheduler_exit();
+       i915_global_request_exit();
+       i915_global_active_exit();
+
+       /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
+       rcu_barrier();
+}
 
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef _I915_GLOBALS_H_
+#define _I915_GLOBALS_H_
+
+int i915_globals_init(void);
+void i915_globals_park(void);
+void i915_globals_unpark(void);
+void i915_globals_exit(void);
+
+#endif /* _I915_GLOBALS_H_ */
 
 
 #include <drm/drm_drv.h>
 
-#include "i915_active.h"
 #include "i915_drv.h"
+#include "i915_globals.h"
 #include "i915_selftest.h"
 
 #define PLATFORM(x) .platform = (x), .platform_mask = BIT(x)
        bool use_kms = true;
        int err;
 
-       i915_global_active_init();
+       err = i915_globals_init();
+       if (err)
+               return err;
 
        err = i915_mock_selftests();
        if (err)
                return;
 
        pci_unregister_driver(&i915_pci_driver);
-       i915_global_active_exit();
+       i915_globals_exit();
 }
 
 module_init(i915_init);
 
 #include "i915_active.h"
 #include "i915_reset.h"
 
+static struct i915_global_request {
+       struct kmem_cache *slab_requests;
+       struct kmem_cache *slab_dependencies;
+} global;
+
 static const char *i915_fence_get_driver_name(struct dma_fence *fence)
 {
        return "i915";
         */
        i915_sw_fence_fini(&rq->submit);
 
-       kmem_cache_free(rq->i915->requests, rq);
+       kmem_cache_free(global.slab_requests, rq);
 }
 
 const struct dma_fence_ops i915_fence_ops = {
 
        unreserve_gt(request->i915);
 
-       i915_sched_node_fini(request->i915, &request->sched);
+       i915_sched_node_fini(&request->sched);
        i915_request_put(request);
 }
 
        ring_retire_requests(ring);
 
 out:
-       return kmem_cache_alloc(ce->gem_context->i915->requests, GFP_KERNEL);
+       return kmem_cache_alloc(global.slab_requests, GFP_KERNEL);
 }
 
 static int add_timeline_barrier(struct i915_request *rq)
         *
         * Do not use kmem_cache_zalloc() here!
         */
-       rq = kmem_cache_alloc(i915->requests,
+       rq = kmem_cache_alloc(global.slab_requests,
                              GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
        if (unlikely(!rq)) {
                rq = i915_request_alloc_slow(ce);
        GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
        GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
 
-       kmem_cache_free(i915->requests, rq);
+       kmem_cache_free(global.slab_requests, rq);
 err_unreserve:
        unreserve_gt(i915);
        intel_context_unpin(ce);
                return 0;
 
        if (to->engine->schedule) {
-               ret = i915_sched_node_add_dependency(to->i915,
-                                                    &to->sched,
-                                                    &from->sched);
+               ret = i915_sched_node_add_dependency(&to->sched, &from->sched);
                if (ret < 0)
                        return ret;
        }
 #include "selftests/mock_request.c"
 #include "selftests/i915_request.c"
 #endif
+
+int __init i915_global_request_init(void)
+{
+       global.slab_requests = KMEM_CACHE(i915_request,
+                                         SLAB_HWCACHE_ALIGN |
+                                         SLAB_RECLAIM_ACCOUNT |
+                                         SLAB_TYPESAFE_BY_RCU);
+       if (!global.slab_requests)
+               return -ENOMEM;
+
+       global.slab_dependencies = KMEM_CACHE(i915_dependency,
+                                             SLAB_HWCACHE_ALIGN |
+                                             SLAB_RECLAIM_ACCOUNT);
+       if (!global.slab_dependencies)
+               goto err_requests;
+
+       return 0;
+
+err_requests:
+       kmem_cache_destroy(global.slab_requests);
+       return -ENOMEM;
+}
+
+void i915_global_request_shrink(void)
+{
+       kmem_cache_shrink(global.slab_dependencies);
+       kmem_cache_shrink(global.slab_requests);
+}
+
+void i915_global_request_exit(void)
+{
+       kmem_cache_destroy(global.slab_dependencies);
+       kmem_cache_destroy(global.slab_requests);
+}
 
 
 #include "i915_gem.h"
 #include "i915_scheduler.h"
+#include "i915_selftest.h"
 #include "i915_sw_fence.h"
 
 #include <uapi/drm/i915_drm.h>
        struct drm_i915_file_private *file_priv;
        /** file_priv list entry for this request */
        struct list_head client_link;
+
+       I915_SELFTEST_DECLARE(struct {
+               struct list_head link;
+               unsigned long delay;
+       } mock;)
 };
 
 #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
 
 void i915_retire_requests(struct drm_i915_private *i915);
 
+int i915_global_request_init(void);
+void i915_global_request_shrink(void);
+void i915_global_request_exit(void);
+
 #endif /* I915_REQUEST_H */
 
 #include "i915_request.h"
 #include "i915_scheduler.h"
 
+static struct i915_global_scheduler {
+       struct kmem_cache *slab_dependencies;
+       struct kmem_cache *slab_priorities;
+} global;
+
 static DEFINE_SPINLOCK(schedule_lock);
 
 static const struct i915_request *
 }
 
 static struct i915_dependency *
-i915_dependency_alloc(struct drm_i915_private *i915)
+i915_dependency_alloc(void)
 {
-       return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
+       return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
 }
 
 static void
-i915_dependency_free(struct drm_i915_private *i915,
-                    struct i915_dependency *dep)
+i915_dependency_free(struct i915_dependency *dep)
 {
-       kmem_cache_free(i915->dependencies, dep);
+       kmem_cache_free(global.slab_dependencies, dep);
 }
 
 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
        return ret;
 }
 
-int i915_sched_node_add_dependency(struct drm_i915_private *i915,
-                                  struct i915_sched_node *node,
+int i915_sched_node_add_dependency(struct i915_sched_node *node,
                                   struct i915_sched_node *signal)
 {
        struct i915_dependency *dep;
 
-       dep = i915_dependency_alloc(i915);
+       dep = i915_dependency_alloc();
        if (!dep)
                return -ENOMEM;
 
        if (!__i915_sched_node_add_dependency(node, signal, dep,
                                              I915_DEPENDENCY_ALLOC))
-               i915_dependency_free(i915, dep);
+               i915_dependency_free(dep);
 
        return 0;
 }
 
-void i915_sched_node_fini(struct drm_i915_private *i915,
-                         struct i915_sched_node *node)
+void i915_sched_node_fini(struct i915_sched_node *node)
 {
        struct i915_dependency *dep, *tmp;
 
 
                list_del(&dep->wait_link);
                if (dep->flags & I915_DEPENDENCY_ALLOC)
-                       i915_dependency_free(i915, dep);
+                       i915_dependency_free(dep);
        }
 
        /* Remove ourselves from everyone who depends upon us */
 
                list_del(&dep->signal_link);
                if (dep->flags & I915_DEPENDENCY_ALLOC)
-                       i915_dependency_free(i915, dep);
+                       i915_dependency_free(dep);
        }
 
        spin_unlock(&schedule_lock);
        if (prio == I915_PRIORITY_NORMAL) {
                p = &execlists->default_priolist;
        } else {
-               p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
+               p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
                /* Convert an allocation failure to a priority bump */
                if (unlikely(!p)) {
                        prio = I915_PRIORITY_NORMAL; /* recurses just once */
 
        spin_unlock_bh(&schedule_lock);
 }
+
+void __i915_priolist_free(struct i915_priolist *p)
+{
+       kmem_cache_free(global.slab_priorities, p);
+}
+
+int __init i915_global_scheduler_init(void)
+{
+       global.slab_dependencies = KMEM_CACHE(i915_dependency,
+                                             SLAB_HWCACHE_ALIGN);
+       if (!global.slab_dependencies)
+               return -ENOMEM;
+
+       global.slab_priorities = KMEM_CACHE(i915_priolist,
+                                           SLAB_HWCACHE_ALIGN);
+       if (!global.slab_priorities)
+               goto err_priorities;
+
+       return 0;
+
+err_priorities:
+       kmem_cache_destroy(global.slab_priorities);
+       return -ENOMEM;
+}
+
+void i915_global_scheduler_shrink(void)
+{
+       kmem_cache_shrink(global.slab_dependencies);
+       kmem_cache_shrink(global.slab_priorities);
+}
+
+void i915_global_scheduler_exit(void)
+{
+       kmem_cache_destroy(global.slab_dependencies);
+       kmem_cache_destroy(global.slab_priorities);
+}
 
 #define I915_DEPENDENCY_ALLOC BIT(0)
 };
 
+struct i915_priolist {
+       struct list_head requests[I915_PRIORITY_COUNT];
+       struct rb_node node;
+       unsigned long used;
+       int priority;
+};
+
+#define priolist_for_each_request(it, plist, idx) \
+       for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
+               list_for_each_entry(it, &(plist)->requests[idx], sched.link)
+
+#define priolist_for_each_request_consume(it, n, plist, idx) \
+       for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \
+               list_for_each_entry_safe(it, n, \
+                                        &(plist)->requests[idx - 1], \
+                                        sched.link)
+
 void i915_sched_node_init(struct i915_sched_node *node);
 
 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
                                      struct i915_dependency *dep,
                                      unsigned long flags);
 
-int i915_sched_node_add_dependency(struct drm_i915_private *i915,
-                                  struct i915_sched_node *node,
+int i915_sched_node_add_dependency(struct i915_sched_node *node,
                                   struct i915_sched_node *signal);
 
-void i915_sched_node_fini(struct drm_i915_private *i915,
-                         struct i915_sched_node *node);
+void i915_sched_node_fini(struct i915_sched_node *node);
 
 void i915_schedule(struct i915_request *request,
                   const struct i915_sched_attr *attr);
 struct list_head *
 i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio);
 
+void __i915_priolist_free(struct i915_priolist *p);
+static inline void i915_priolist_free(struct i915_priolist *p)
+{
+       if (p->priority != I915_PRIORITY_NORMAL)
+               __i915_priolist_free(p);
+}
+
+int i915_global_scheduler_init(void);
+void i915_global_scheduler_shrink(void);
+void i915_global_scheduler_exit(void);
+
 #endif /* _I915_SCHEDULER_H_ */
 
                }
 
                rb_erase_cached(&p->node, &execlists->queue);
-               if (p->priority != I915_PRIORITY_NORMAL)
-                       kmem_cache_free(engine->i915->priorities, p);
+               i915_priolist_free(p);
        }
 done:
        execlists->queue_priority_hint =
 
                }
 
                rb_erase_cached(&p->node, &execlists->queue);
-               if (p->priority != I915_PRIORITY_NORMAL)
-                       kmem_cache_free(engine->i915->priorities, p);
+               i915_priolist_free(p);
        }
 
 done:
                }
 
                rb_erase_cached(&p->node, &execlists->queue);
-               if (p->priority != I915_PRIORITY_NORMAL)
-                       kmem_cache_free(engine->i915->priorities, p);
+               i915_priolist_free(p);
        }
 
        /* Remaining _unready_ requests will be nop'ed when submitted */
 
 #define _VECS(n) (VECS + (n))
 };
 
-struct i915_priolist {
-       struct list_head requests[I915_PRIORITY_COUNT];
-       struct rb_node node;
-       unsigned long used;
-       int priority;
-};
-
-#define priolist_for_each_request(it, plist, idx) \
-       for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
-               list_for_each_entry(it, &(plist)->requests[idx], sched.link)
-
-#define priolist_for_each_request_consume(it, n, plist, idx) \
-       for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \
-               list_for_each_entry_safe(it, n, \
-                                        &(plist)->requests[idx - 1], \
-                                        sched.link)
-
 struct st_preempt_hang {
        struct completion completion;
        unsigned int count;
 
        kfree(ring);
 }
 
-static struct mock_request *first_request(struct mock_engine *engine)
+static struct i915_request *first_request(struct mock_engine *engine)
 {
        return list_first_entry_or_null(&engine->hw_queue,
-                                       struct mock_request,
-                                       link);
+                                       struct i915_request,
+                                       mock.link);
 }
 
-static void advance(struct mock_request *request)
+static void advance(struct i915_request *request)
 {
-       list_del_init(&request->link);
-       i915_request_mark_complete(&request->base);
-       GEM_BUG_ON(!i915_request_completed(&request->base));
+       list_del_init(&request->mock.link);
+       i915_request_mark_complete(request);
+       GEM_BUG_ON(!i915_request_completed(request));
 
-       intel_engine_queue_breadcrumbs(request->base.engine);
+       intel_engine_queue_breadcrumbs(request->engine);
 }
 
 static void hw_delay_complete(struct timer_list *t)
 {
        struct mock_engine *engine = from_timer(engine, t, hw_delay);
-       struct mock_request *request;
+       struct i915_request *request;
        unsigned long flags;
 
        spin_lock_irqsave(&engine->hw_lock, flags);
         * requeue the timer for the next delayed request.
         */
        while ((request = first_request(engine))) {
-               if (request->delay) {
-                       mod_timer(&engine->hw_delay, jiffies + request->delay);
+               if (request->mock.delay) {
+                       mod_timer(&engine->hw_delay,
+                                 jiffies + request->mock.delay);
                        break;
                }
 
 
 static int mock_request_alloc(struct i915_request *request)
 {
-       struct mock_request *mock = container_of(request, typeof(*mock), base);
-
-       INIT_LIST_HEAD(&mock->link);
-       mock->delay = 0;
+       INIT_LIST_HEAD(&request->mock.link);
+       request->mock.delay = 0;
 
        return 0;
 }
 
 static void mock_submit_request(struct i915_request *request)
 {
-       struct mock_request *mock = container_of(request, typeof(*mock), base);
        struct mock_engine *engine =
                container_of(request->engine, typeof(*engine), base);
        unsigned long flags;
        i915_request_submit(request);
 
        spin_lock_irqsave(&engine->hw_lock, flags);
-       list_add_tail(&mock->link, &engine->hw_queue);
-       if (mock->link.prev == &engine->hw_queue) {
-               if (mock->delay)
-                       mod_timer(&engine->hw_delay, jiffies + mock->delay);
+       list_add_tail(&request->mock.link, &engine->hw_queue);
+       if (list_is_first(&request->mock.link, &engine->hw_queue)) {
+               if (request->mock.delay)
+                       mod_timer(&engine->hw_delay,
+                                 jiffies + request->mock.delay);
                else
-                       advance(mock);
+                       advance(request);
        }
        spin_unlock_irqrestore(&engine->hw_lock, flags);
 }
 {
        struct mock_engine *mock =
                container_of(engine, typeof(*mock), base);
-       struct mock_request *request, *rn;
+       struct i915_request *request, *rn;
 
        del_timer_sync(&mock->hw_delay);
 
        spin_lock_irq(&mock->hw_lock);
-       list_for_each_entry_safe(request, rn, &mock->hw_queue, link)
+       list_for_each_entry_safe(request, rn, &mock->hw_queue, mock.link)
                advance(request);
        spin_unlock_irq(&mock->hw_lock);
 }
 
 
        destroy_workqueue(i915->wq);
 
-       kmem_cache_destroy(i915->priorities);
-       kmem_cache_destroy(i915->dependencies);
-       kmem_cache_destroy(i915->requests);
        kmem_cache_destroy(i915->vmas);
        kmem_cache_destroy(i915->objects);
 
        if (!i915->vmas)
                goto err_objects;
 
-       i915->requests = KMEM_CACHE(mock_request,
-                                   SLAB_HWCACHE_ALIGN |
-                                   SLAB_RECLAIM_ACCOUNT |
-                                   SLAB_TYPESAFE_BY_RCU);
-       if (!i915->requests)
-               goto err_vmas;
-
-       i915->dependencies = KMEM_CACHE(i915_dependency,
-                                       SLAB_HWCACHE_ALIGN |
-                                       SLAB_RECLAIM_ACCOUNT);
-       if (!i915->dependencies)
-               goto err_requests;
-
-       i915->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
-       if (!i915->priorities)
-               goto err_dependencies;
-
        i915_timelines_init(i915);
 
        INIT_LIST_HEAD(&i915->gt.active_rings);
 err_unlock:
        mutex_unlock(&i915->drm.struct_mutex);
        i915_timelines_fini(i915);
-       kmem_cache_destroy(i915->priorities);
-err_dependencies:
-       kmem_cache_destroy(i915->dependencies);
-err_requests:
-       kmem_cache_destroy(i915->requests);
-err_vmas:
        kmem_cache_destroy(i915->vmas);
 err_objects:
        kmem_cache_destroy(i915->objects);
 
             unsigned long delay)
 {
        struct i915_request *request;
-       struct mock_request *mock;
 
        /* NB the i915->requests slab cache is enlarged to fit mock_request */
        request = i915_request_alloc(engine, context);
        if (IS_ERR(request))
                return NULL;
 
-       mock = container_of(request, typeof(*mock), base);
-       mock->delay = delay;
-
-       return &mock->base;
+       request->mock.delay = delay;
+       return request;
 }
 
 bool mock_cancel_request(struct i915_request *request)
 {
-       struct mock_request *mock = container_of(request, typeof(*mock), base);
        struct mock_engine *engine =
                container_of(request->engine, typeof(*engine), base);
        bool was_queued;
 
        spin_lock_irq(&engine->hw_lock);
-       was_queued = !list_empty(&mock->link);
-       list_del_init(&mock->link);
+       was_queued = !list_empty(&request->mock.link);
+       list_del_init(&request->mock.link);
        spin_unlock_irq(&engine->hw_lock);
 
        if (was_queued)
 
 
 #include "../i915_request.h"
 
-struct mock_request {
-       struct i915_request base;
-
-       struct list_head link;
-       unsigned long delay;
-};
-
 struct i915_request *
 mock_request(struct intel_engine_cs *engine,
             struct i915_gem_context *context,