if (i915_modparams.enable_execlists) {
                        const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
+                       struct intel_engine_execlists * const execlists = &engine->execlists;
                        u32 ptr, read, write;
                        unsigned int idx;
 
                        read = GEN8_CSB_READ_PTR(ptr);
                        write = GEN8_CSB_WRITE_PTR(ptr);
                        seq_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n",
-                                  read, engine->execlists.csb_head,
+                                  read, execlists->csb_head,
                                   write,
                                   intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
                                   yesno(test_bit(ENGINE_IRQ_EXECLIST,
                        }
 
                        rcu_read_lock();
-                       for (idx = 0; idx < ARRAY_SIZE(engine->execlists.port); idx++) {
+                       for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
                                unsigned int count;
 
-                               rq = port_unpack(&engine->execlists.port[idx],
-                                                &count);
+                               rq = port_unpack(&execlists->port[idx], &count);
                                if (rq) {
                                        seq_printf(m, "\t\tELSP[%d] count=%d, ",
                                                   idx, count);
                        rcu_read_unlock();
 
                        spin_lock_irq(&engine->timeline->lock);
-                       for (rb = engine->execlists.first; rb; rb = rb_next(rb)) {
+                       for (rb = execlists->first; rb; rb = rb_next(rb)) {
                                struct i915_priolist *p =
                                        rb_entry(rb, typeof(*p), node);
 
 
                        u32 seqno;
                        u32 head;
                        u32 tail;
-               } *requests, execlist[2];
+               } *requests, execlist[EXECLIST_MAX_PORTS];
+               unsigned int num_ports;
 
                struct drm_i915_error_waiter {
                        char comm[TASK_COMM_LEN];
 
 static void error_print_engine(struct drm_i915_error_state_buf *m,
                               const struct drm_i915_error_engine *ee)
 {
+       int n;
+
        err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
        err_printf(m, "  START: 0x%08x\n", ee->start);
        err_printf(m, "  HEAD:  0x%08x [0x%08x]\n", ee->head, ee->rq_head);
                   jiffies_to_msecs(jiffies - ee->hangcheck_timestamp));
        err_printf(m, "  engine reset count: %u\n", ee->reset_count);
 
-       error_print_request(m, "  ELSP[0]: ", &ee->execlist[0]);
-       error_print_request(m, "  ELSP[1]: ", &ee->execlist[1]);
+       for (n = 0; n < ee->num_ports; n++) {
+               err_printf(m, "  ELSP[%d]:", n);
+               error_print_request(m, " ", &ee->execlist[n]);
+       }
+
        error_print_context(m, "  Active context: ", &ee->context);
 }
 
 static void error_record_engine_execlists(struct intel_engine_cs *engine,
                                          struct drm_i915_error_engine *ee)
 {
-       const struct execlist_port *port = engine->execlists.port;
+       const struct intel_engine_execlists * const execlists = &engine->execlists;
        unsigned int n;
 
-       for (n = 0; n < ARRAY_SIZE(engine->execlists.port); n++) {
-               struct drm_i915_gem_request *rq = port_request(&port[n]);
+       for (n = 0; n < execlists_num_ports(execlists); n++) {
+               struct drm_i915_gem_request *rq = port_request(&execlists->port[n]);
 
                if (!rq)
                        break;
 
                record_request(rq, &ee->execlist[n]);
        }
+
+       ee->num_ports = n;
 }
 
 static void record_context(struct drm_i915_error_context *e,
 
        struct intel_engine_execlists * const execlists = &engine->execlists;
        struct execlist_port *port = execlists->port;
        struct drm_i915_gem_request *last = NULL;
+       const struct execlist_port * const last_port =
+               &execlists->port[execlists->port_mask];
        bool submit = false;
        struct rb_node *rb;
 
 
                list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
                        if (last && rq->ctx != last->ctx) {
-                               if (port != execlists->port) {
+                               if (port == last_port) {
                                        __list_del_many(&p->requests,
                                                        &rq->priotree.link);
                                        goto done;
        struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
        struct intel_engine_execlists * const execlists = &engine->execlists;
        struct execlist_port *port = execlists->port;
+       const struct execlist_port * const last_port =
+               &execlists->port[execlists->port_mask];
        struct drm_i915_gem_request *rq;
 
        rq = port_request(&port[0]);
                rq = port_request(&port[0]);
        }
 
-       if (!port_isset(&port[1]))
+       if (!port_isset(last_port))
                i915_guc_dequeue(engine);
 }
 
 
 
        execlists->csb_use_mmio = csb_force_mmio(engine->i915);
 
+       execlists->port_mask = 1;
+       BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
+       GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
+
        execlists->queue = RB_ROOT;
        execlists->first = NULL;
 }
 
                engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
        unsigned int n;
 
-       for (n = ARRAY_SIZE(engine->execlists.port); n--; ) {
+       for (n = execlists_num_ports(&engine->execlists); n--; ) {
                struct drm_i915_gem_request *rq;
                unsigned int count;
                u64 desc;
        struct drm_i915_gem_request *last;
        struct intel_engine_execlists * const execlists = &engine->execlists;
        struct execlist_port *port = execlists->port;
+       const struct execlist_port * const last_port =
+               &execlists->port[execlists->port_mask];
        struct rb_node *rb;
        bool submit = false;
 
                                 * combine this request with the last, then we
                                 * are done.
                                 */
-                               if (port != execlists->port) {
+                               if (port == last_port) {
                                        __list_del_many(&p->requests,
                                                        &rq->priotree.link);
                                        goto done;
 
                 * @context_id: context ID for port
                 */
                GEM_DEBUG_DECL(u32 context_id);
-       } port[2];
+
+#define EXECLIST_MAX_PORTS 2
+       } port[EXECLIST_MAX_PORTS];
+
+       /**
+        * @port_mask: number of execlist ports - 1
+        */
+       unsigned int port_mask;
 
        /**
         * @queue: queue of requests, in priority lists
        u32 (*get_cmd_length_mask)(u32 cmd_header);
 };
 
+static inline unsigned int
+execlists_num_ports(const struct intel_engine_execlists * const execlists)
+{
+       return execlists->port_mask + 1;
+}
+
 static inline void
 execlists_port_complete(struct intel_engine_execlists * const execlists,
                        struct execlist_port * const port)
 {
-       struct execlist_port * const port1 = &execlists->port[1];
+       const unsigned int m = execlists->port_mask;
 
        GEM_BUG_ON(port_index(port, execlists) != 0);
 
-       *port = *port1;
-       memset(port1, 0, sizeof(struct execlist_port));
+       memmove(port, port + 1, m * sizeof(struct execlist_port));
+       memset(port + m, 0, sizeof(struct execlist_port));
 }
 
 static inline unsigned int