struct drm_i915_private *dev_priv = engine->i915;
        struct intel_guc *guc = &dev_priv->guc;
        struct i915_guc_client *client = guc->execbuf_client;
-       struct execlist_port *port = engine->execlist_port;
-       unsigned int engine_id = engine->id;
+       struct intel_engine_execlists * const execlists = &engine->execlists;
+       struct execlist_port *port = execlists->port;
+       const unsigned int engine_id = engine->id;
        unsigned int n;
 
-       for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) {
+       for (n = 0; n < ARRAY_SIZE(execlists->port); n++) {
                struct drm_i915_gem_request *rq;
                unsigned int count;
 
 
 static void i915_guc_dequeue(struct intel_engine_cs *engine)
 {
-       struct execlist_port *port = engine->execlist_port;
+       struct intel_engine_execlists * const execlists = &engine->execlists;
+       struct execlist_port *port = execlists->port;
        struct drm_i915_gem_request *last = NULL;
        bool submit = false;
        struct rb_node *rb;
                port++;
 
        spin_lock_irq(&engine->timeline->lock);
-       rb = engine->execlist_first;
-       GEM_BUG_ON(rb_first(&engine->execlist_queue) != rb);
+       rb = execlists->first;
+       GEM_BUG_ON(rb_first(&execlists->queue) != rb);
        while (rb) {
                struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
                struct drm_i915_gem_request *rq, *rn;
 
                list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
                        if (last && rq->ctx != last->ctx) {
-                               if (port != engine->execlist_port) {
+                               if (port != execlists->port) {
                                        __list_del_many(&p->requests,
                                                        &rq->priotree.link);
                                        goto done;
                }
 
                rb = rb_next(rb);
-               rb_erase(&p->node, &engine->execlist_queue);
+               rb_erase(&p->node, &execlists->queue);
                INIT_LIST_HEAD(&p->requests);
                if (p->priority != I915_PRIORITY_NORMAL)
                        kmem_cache_free(engine->i915->priorities, p);
        }
 done:
-       engine->execlist_first = rb;
+       execlists->first = rb;
        if (submit) {
                port_assign(port, last);
                i915_guc_submit(engine);
 
 static void i915_guc_irq_handler(unsigned long data)
 {
-       struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
-       struct execlist_port *port = engine->execlist_port;
+       struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+       struct execlist_port *port = engine->execlists.port;
        struct drm_i915_gem_request *rq;
 
        rq = port_request(&port[0]);
         * and it is guaranteed that it will remove the work item from the
         * queue before our request is completed.
         */
-       BUILD_BUG_ON(ARRAY_SIZE(engine->execlist_port) *
+       BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.port) *
                     sizeof(struct guc_wq_item) *
                     I915_NUM_ENGINES > GUC_WQ_SIZE);
 
        guc_interrupts_capture(dev_priv);
 
        for_each_engine(engine, dev_priv, id) {
+               struct intel_engine_execlists * const execlists = &engine->execlists;
                /* The tasklet was initialised by execlists, and may be in
                 * a state of flux (across a reset) and so we just want to
                 * take over the callback without changing any other state
                 * in the tasklet.
                 */
-               engine->irq_tasklet.func = i915_guc_irq_handler;
+               execlists->irq_tasklet.func = i915_guc_irq_handler;
                clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
-               tasklet_schedule(&engine->irq_tasklet);
+               tasklet_schedule(&execlists->irq_tasklet);
        }
 
        return 0;
 
                struct i915_priotree *pt,
                int prio)
 {
+       struct intel_engine_execlists * const execlists = &engine->execlists;
        struct i915_priolist *p;
        struct rb_node **parent, *rb;
        bool first = true;
 
-       if (unlikely(engine->no_priolist))
+       if (unlikely(execlists->no_priolist))
                prio = I915_PRIORITY_NORMAL;
 
 find_priolist:
        /* most positive priority is scheduled first, equal priorities fifo */
        rb = NULL;
-       parent = &engine->execlist_queue.rb_node;
+       parent = &execlists->queue.rb_node;
        while (*parent) {
                rb = *parent;
                p = rb_entry(rb, typeof(*p), node);
        }
 
        if (prio == I915_PRIORITY_NORMAL) {
-               p = &engine->default_priolist;
+               p = &execlists->default_priolist;
        } else {
                p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
                /* Convert an allocation failure to a priority bump */
                         * requests, so if userspace lied about their
                         * dependencies that reordering may be visible.
                         */
-                       engine->no_priolist = true;
+                       execlists->no_priolist = true;
                        goto find_priolist;
                }
        }
        p->priority = prio;
        INIT_LIST_HEAD(&p->requests);
        rb_link_node(&p->node, rb, parent);
-       rb_insert_color(&p->node, &engine->execlist_queue);
+       rb_insert_color(&p->node, &execlists->queue);
 
        if (first)
-               engine->execlist_first = &p->node;
+               execlists->first = &p->node;
 
        return ptr_pack_bits(p, first, 1);
 }
 
 static void execlists_submit_ports(struct intel_engine_cs *engine)
 {
-       struct execlist_port *port = engine->execlist_port;
+       struct execlist_port *port = engine->execlists.port;
        u32 __iomem *elsp =
                engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
        unsigned int n;
 
-       for (n = ARRAY_SIZE(engine->execlist_port); n--; ) {
+       for (n = ARRAY_SIZE(engine->execlists.port); n--; ) {
                struct drm_i915_gem_request *rq;
                unsigned int count;
                u64 desc;
 static void execlists_dequeue(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *last;
-       struct execlist_port *port = engine->execlist_port;
+       struct execlist_port *port = engine->execlists.port;
        struct rb_node *rb;
        bool submit = false;
 
         */
 
        spin_lock_irq(&engine->timeline->lock);
-       rb = engine->execlist_first;
-       GEM_BUG_ON(rb_first(&engine->execlist_queue) != rb);
+       rb = engine->execlists.first;
+       GEM_BUG_ON(rb_first(&engine->execlists.queue) != rb);
        while (rb) {
                struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
                struct drm_i915_gem_request *rq, *rn;
                                 * combine this request with the last, then we
                                 * are done.
                                 */
-                               if (port != engine->execlist_port) {
+                               if (port != engine->execlists.port) {
                                        __list_del_many(&p->requests,
                                                        &rq->priotree.link);
                                        goto done;
                }
 
                rb = rb_next(rb);
-               rb_erase(&p->node, &engine->execlist_queue);
+               rb_erase(&p->node, &engine->execlists.queue);
                INIT_LIST_HEAD(&p->requests);
                if (p->priority != I915_PRIORITY_NORMAL)
                        kmem_cache_free(engine->i915->priorities, p);
        }
 done:
-       engine->execlist_first = rb;
+       engine->execlists.first = rb;
        if (submit)
                port_assign(port, last);
        spin_unlock_irq(&engine->timeline->lock);
 
 static void execlists_cancel_requests(struct intel_engine_cs *engine)
 {
-       struct execlist_port *port = engine->execlist_port;
+       struct intel_engine_execlists * const execlists = &engine->execlists;
+       struct execlist_port *port = execlists->port;
        struct drm_i915_gem_request *rq, *rn;
        struct rb_node *rb;
        unsigned long flags;
        spin_lock_irqsave(&engine->timeline->lock, flags);
 
        /* Cancel the requests on the HW and clear the ELSP tracker. */
-       for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
+       for (n = 0; n < ARRAY_SIZE(execlists->port); n++)
                i915_gem_request_put(port_request(&port[n]));
-       memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
+       memset(execlists->port, 0, sizeof(execlists->port));
 
        /* Mark all executing requests as skipped. */
        list_for_each_entry(rq, &engine->timeline->requests, link) {
        }
 
        /* Flush the queued requests to the timeline list (for retiring). */
-       rb = engine->execlist_first;
+       rb = execlists->first;
        while (rb) {
                struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
 
                }
 
                rb = rb_next(rb);
-               rb_erase(&p->node, &engine->execlist_queue);
+               rb_erase(&p->node, &execlists->queue);
                INIT_LIST_HEAD(&p->requests);
                if (p->priority != I915_PRIORITY_NORMAL)
                        kmem_cache_free(engine->i915->priorities, p);
 
        /* Remaining _unready_ requests will be nop'ed when submitted */
 
-       engine->execlist_queue = RB_ROOT;
-       engine->execlist_first = NULL;
+       execlists->queue = RB_ROOT;
+       execlists->first = NULL;
        GEM_BUG_ON(port_isset(&port[0]));
 
        /*
 
 static bool execlists_elsp_ready(const struct intel_engine_cs *engine)
 {
-       const struct execlist_port *port = engine->execlist_port;
+       const struct execlist_port *port = engine->execlists.port;
 
        return port_count(&port[0]) + port_count(&port[1]) < 2;
 }
  */
 static void intel_lrc_irq_handler(unsigned long data)
 {
-       struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
-       struct execlist_port *port = engine->execlist_port;
+       struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+       struct intel_engine_execlists * const execlists = &engine->execlists;
+       struct execlist_port *port = execlists->port;
        struct drm_i915_private *dev_priv = engine->i915;
 
        /* We can skip acquiring intel_runtime_pm_get() here as it was taken
         */
        GEM_BUG_ON(!dev_priv->gt.awake);
 
-       intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
+       intel_uncore_forcewake_get(dev_priv, execlists->fw_domains);
 
        /* Prefer doing test_and_clear_bit() as a two stage operation to avoid
         * imposing the cost of a locked atomic transaction when submitting a
                unsigned int head, tail;
 
                /* However GVT emulation depends upon intercepting CSB mmio */
-               if (unlikely(engine->csb_use_mmio)) {
+               if (unlikely(execlists->csb_use_mmio)) {
                        buf = (u32 * __force)
                                (dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
-                       engine->csb_head = -1; /* force mmio read of CSB ptrs */
+                       execlists->csb_head = -1; /* force mmio read of CSB ptrs */
                }
 
                /* The write will be ordered by the uncached read (itself
                 * is set and we do a new loop.
                 */
                __clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
-               if (unlikely(engine->csb_head == -1)) { /* following a reset */
+               if (unlikely(execlists->csb_head == -1)) { /* following a reset */
                        head = readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
                        tail = GEN8_CSB_WRITE_PTR(head);
                        head = GEN8_CSB_READ_PTR(head);
-                       engine->csb_head = head;
+                       execlists->csb_head = head;
                } else {
                        const int write_idx =
                                intel_hws_csb_write_index(dev_priv) -
                                I915_HWS_CSB_BUF0_INDEX;
 
-                       head = engine->csb_head;
+                       head = execlists->csb_head;
                        tail = READ_ONCE(buf[write_idx]);
                }
+
                while (head != tail) {
                        struct drm_i915_gem_request *rq;
                        unsigned int status;
                                   !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
                }
 
-               if (head != engine->csb_head) {
-                       engine->csb_head = head;
+               if (head != execlists->csb_head) {
+                       execlists->csb_head = head;
                        writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8),
                               dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
                }
        if (execlists_elsp_ready(engine))
                execlists_dequeue(engine);
 
-       intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
+       intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
 }
 
 static void insert_request(struct intel_engine_cs *engine,
 
        list_add_tail(&pt->link, &ptr_mask_bits(p, 1)->requests);
        if (ptr_unmask_bits(p, 1) && execlists_elsp_ready(engine))
-               tasklet_hi_schedule(&engine->irq_tasklet);
+               tasklet_hi_schedule(&engine->execlists.irq_tasklet);
 }
 
 static void execlists_submit_request(struct drm_i915_gem_request *request)
 
        insert_request(engine, &request->priotree, request->priotree.priority);
 
-       GEM_BUG_ON(!engine->execlist_first);
+       GEM_BUG_ON(!engine->execlists.first);
        GEM_BUG_ON(list_empty(&request->priotree.link));
 
        spin_unlock_irqrestore(&engine->timeline->lock, flags);
 static int gen8_init_common_ring(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
+       struct intel_engine_execlists * const execlists = &engine->execlists;
        int ret;
 
        ret = intel_mocs_init_engine(engine);
        I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
                   GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
        clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
-       engine->csb_head = -1;
+       execlists->csb_head = -1;
 
        /* After a GPU reset, we may have requests to replay */
-       if (!i915_modparams.enable_guc_submission && engine->execlist_first)
-               tasklet_schedule(&engine->irq_tasklet);
+       if (!i915_modparams.enable_guc_submission && execlists->first)
+               tasklet_schedule(&execlists->irq_tasklet);
 
        return 0;
 }
 static void reset_common_ring(struct intel_engine_cs *engine,
                              struct drm_i915_gem_request *request)
 {
-       struct execlist_port *port = engine->execlist_port;
+       struct intel_engine_execlists * const execlists = &engine->execlists;
+       struct execlist_port *port = execlists->port;
        struct drm_i915_gem_request *rq, *rn;
        struct intel_context *ce;
        unsigned long flags;
         * guessing the missed context-switch events by looking at what
         * requests were completed.
         */
-       for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
+       for (n = 0; n < ARRAY_SIZE(execlists->port); n++)
                i915_gem_request_put(port_request(&port[n]));
-       memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
+       memset(execlists->port, 0, sizeof(execlists->port));
 
        /* Push back any incomplete requests for replay after the reset. */
        list_for_each_entry_safe_reverse(rq, rn,
         * Tasklet cannot be active at this point due intel_mark_active/idle
         * so this is just for documentation.
         */
-       if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
-               tasklet_kill(&engine->irq_tasklet);
+       if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->execlists.irq_tasklet.state)))
+               tasklet_kill(&engine->execlists.irq_tasklet);
 
        dev_priv = engine->i915;
 
        engine->submit_request = execlists_submit_request;
        engine->cancel_requests = execlists_cancel_requests;
        engine->schedule = execlists_schedule;
-       engine->irq_tasklet.func = intel_lrc_irq_handler;
+       engine->execlists.irq_tasklet.func = intel_lrc_irq_handler;
 }
 
 static void
        /* Intentionally left blank. */
        engine->buffer = NULL;
 
-       engine->csb_use_mmio = irq_handler_force_mmio(dev_priv);
+       engine->execlists.csb_use_mmio = irq_handler_force_mmio(dev_priv);
 
        fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
                                                    RING_ELSP(engine),
                                                     RING_CONTEXT_STATUS_BUF_BASE(engine),
                                                     FW_REG_READ);
 
-       engine->fw_domains = fw_domains;
+       engine->execlists.fw_domains = fw_domains;
 
-       tasklet_init(&engine->irq_tasklet,
+       tasklet_init(&engine->execlists.irq_tasklet,
                     intel_lrc_irq_handler, (unsigned long)engine);
 
        logical_ring_default_vfuncs(engine);
 
        int priority;
 };
 
+/**
+ * struct intel_engine_execlists - execlist submission queue and port state
+ *
+ * The struct intel_engine_execlists represents the combined logical state of
+ * driver and the hardware state for execlist mode of submission.
+ */
+struct intel_engine_execlists {
+       /**
+        * @irq_tasklet: softirq tasklet for bottom handler
+        */
+       struct tasklet_struct irq_tasklet;
+
+       /**
+        * @default_priolist: priority list for I915_PRIORITY_NORMAL
+        */
+       struct i915_priolist default_priolist;
+
+       /**
+        * @no_priolist: priority lists disabled
+        */
+       bool no_priolist;
+
+       /**
+        * @port: execlist port states
+        *
+        * For each hardware ELSP (ExecList Submission Port) we keep
+        * track of the last request and the number of times we submitted
+        * that port to hw. We then count the number of times the hw reports
+        * a context completion or preemption. As only one context can
+        * be active on hw, we limit resubmission of context to port[0]. This
+        * is called Lite Restore, of the context.
+        */
+       struct execlist_port {
+               /**
+                * @request_count: combined request and submission count
+                */
+               struct drm_i915_gem_request *request_count;
+#define EXECLIST_COUNT_BITS 2
+#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
+#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
+#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
+#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
+#define port_set(p, packed) ((p)->request_count = (packed))
+#define port_isset(p) ((p)->request_count)
+#define port_index(p, e) ((p) - (e)->execlists.port)
+
+               /**
+                * @context_id: context ID for port
+                */
+               GEM_DEBUG_DECL(u32 context_id);
+       } port[2];
+
+       /**
+        * @queue: queue of requests, in priority lists
+        */
+       struct rb_root queue;
+
+       /**
+        * @first: leftmost level in priority @queue
+        */
+       struct rb_node *first;
+
+       /**
+        * @fw_domains: forcewake domains for irq tasklet
+        */
+       unsigned int fw_domains;
+
+       /**
+        * @csb_head: context status buffer head
+        */
+       unsigned int csb_head;
+
+       /**
+        * @csb_use_mmio: access csb through mmio, instead of hwsp
+        */
+       bool csb_use_mmio;
+};
+
 #define INTEL_ENGINE_CS_MAX_NAME 8
 
 struct intel_engine_cs {
                u32     *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
        } semaphore;
 
-       /* Execlists */
-       struct tasklet_struct irq_tasklet;
-       struct i915_priolist default_priolist;
-       bool no_priolist;
-       struct execlist_port {
-               struct drm_i915_gem_request *request_count;
-#define EXECLIST_COUNT_BITS 2
-#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
-#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
-#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
-#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
-#define port_set(p, packed) ((p)->request_count = (packed))
-#define port_isset(p) ((p)->request_count)
-#define port_index(p, e) ((p) - (e)->execlist_port)
-               GEM_DEBUG_DECL(u32 context_id);
-       } execlist_port[2];
-       struct rb_root execlist_queue;
-       struct rb_node *execlist_first;
-       unsigned int fw_domains;
-       unsigned int csb_head;
-       bool csb_use_mmio;
+       struct intel_engine_execlists execlists;
 
        /* Contexts are pinned whilst they are active on the GPU. The last
         * context executed remains active whilst the GPU is idle - the