struct nvkm_event event;
        } nonstall;
 
+       struct {
+               u32 chan_msec;
+       } timeout;
+
        int nr;
        struct list_head chan;
        spinlock_t lock;
 
        fifo->func = func;
        INIT_LIST_HEAD(&fifo->runqs);
        INIT_LIST_HEAD(&fifo->runls);
+       /*TODO: Needs to be >CTXSW_TIMEOUT, so RC can recover before this is hit.
+        *      CTXSW_TIMEOUT HW default seems to differ between GPUs, so just a
+        *      large number for now until we support changing it.
+        */
+       fifo->timeout.chan_msec = 10000;
        spin_lock_init(&fifo->lock);
        mutex_init(&fifo->mutex);
 
 
        .intr_0_names = gf100_runq_intr_0_names,
 };
 
+static bool
+gf100_runl_pending(struct nvkm_runl *runl)
+{
+       return nvkm_rd32(runl->fifo->engine.subdev.device, 0x00227c) & 0x00100000;
+}
+
 void
 gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
 {
        struct gf100_fifo_chan *chan;
        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
        struct nvkm_device *device = subdev->device;
+       struct nvkm_runl *runl = nvkm_runl_first(&fifo->base);
        struct nvkm_memory *cur;
        int nr = 0;
        int target;
                                    (target << 28));
        nvkm_wr32(device, 0x002274, 0x01f00000 | nr);
 
-       if (wait_event_timeout(fifo->runlist.wait,
-                              !(nvkm_rd32(device, 0x00227c) & 0x00100000),
-                              msecs_to_jiffies(2000)) == 0)
-               nvkm_error(subdev, "runlist update timeout\n");
+       runl->func->wait(runl);
        mutex_unlock(&fifo->base.mutex);
 }
 
 
 static const struct nvkm_runl_func
 gf100_runl = {
+       .wait = nv50_runl_wait,
+       .pending = gf100_runl_pending,
 };
 
 static void
 }
 
 static void
-gf100_fifo_intr_runlist(struct gf100_fifo *fifo)
+gf100_fifo_intr_runlist(struct nvkm_fifo *fifo)
 {
-       struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+       struct nvkm_subdev *subdev = &fifo->engine.subdev;
        struct nvkm_device *device = subdev->device;
        u32 intr = nvkm_rd32(device, 0x002a00);
 
        if (intr & 0x10000000) {
-               wake_up(&fifo->runlist.wait);
                nvkm_wr32(device, 0x002a00, 0x10000000);
                intr &= ~0x10000000;
        }
        }
 
        if (stat & 0x40000000) {
-               gf100_fifo_intr_runlist(gf100_fifo(fifo));
+               gf100_fifo_intr_runlist(fifo);
                stat &= ~0x40000000;
        }
 
        if (ret)
                return ret;
 
-       init_waitqueue_head(&fifo->runlist.wait);
-
        ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 0x1000,
                              0x1000, false, &fifo->user.mem);
        if (ret)
 
        struct {
                struct nvkm_memory *mem[2];
                int active;
-               wait_queue_head_t wait;
        } runlist;
 
        struct {
 
        .intr_0_names = gk104_runq_intr_0_names,
 };
 
+bool
+gk104_runl_pending(struct nvkm_runl *runl)
+{
+       struct nvkm_device *device = runl->fifo->engine.subdev.device;
+
+       return nvkm_rd32(device, 0x002284 + (runl->id * 0x08)) & 0x00100000;
+}
+
 void
 gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl,
                          struct nvkm_memory *mem, int nr)
 {
        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
        struct nvkm_device *device = subdev->device;
+       struct nvkm_runl *rl = nvkm_runl_get(&fifo->base, runl, 0);
        int target;
 
        switch (nvkm_memory_target(mem)) {
                                    (target << 28));
        nvkm_wr32(device, 0x002274, (runl << 20) | nr);
 
-       if (nvkm_msec(device, 2000,
-               if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000))
-                       break;
-       ) < 0)
-               nvkm_error(subdev, "runlist %d update timeout\n", runl);
+       rl->func->wait(rl);
 }
 
 void
 
 static const struct nvkm_runl_func
 gk104_runl = {
+       .wait = nv50_runl_wait,
+       .pending = gk104_runl_pending,
 };
 
 int
 }
 
 void
-gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
+gk104_fifo_intr_runlist(struct nvkm_fifo *fifo)
 {
-       struct nvkm_device *device = fifo->base.engine.subdev.device;
+       struct nvkm_device *device = fifo->engine.subdev.device;
+       struct nvkm_runl *runl;
        u32 mask = nvkm_rd32(device, 0x002a00);
-       while (mask) {
-               int runl = __ffs(mask);
-               wake_up(&fifo->runlist[runl].wait);
-               nvkm_wr32(device, 0x002a00, 1 << runl);
-               mask &= ~(1 << runl);
+
+       nvkm_runl_foreach_cond(runl, fifo, mask & BIT(runl->id)) {
+               nvkm_wr32(device, 0x002a00, BIT(runl->id));
        }
 }
 
        }
 
        if (stat & 0x40000000) {
-               gk104_fifo_intr_runlist(gk104_fifo(fifo));
+               gk104_fifo_intr_runlist(fifo);
                stat &= ~0x40000000;
        }
 
                                return ret;
                }
 
-               init_waitqueue_head(&fifo->runlist[i].wait);
                INIT_LIST_HEAD(&fifo->runlist[i].cgrp);
                INIT_LIST_HEAD(&fifo->runlist[i].chan);
        }
 
        struct {
                struct nvkm_memory *mem[2];
                int next;
-               wait_queue_head_t wait;
                struct list_head cgrp;
                struct list_head chan;
                u32 engm;
 void gk104_fifo_runlist_update(struct gk104_fifo *, int runl);
 void gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,
                              struct gk104_fifo_engine_status *status);
-void gk104_fifo_intr_runlist(struct gk104_fifo *fifo);
 void *gk104_fifo_dtor(struct nvkm_fifo *base);
 int gk104_fifo_oneinit(struct nvkm_fifo *);
 void gk104_fifo_init(struct nvkm_fifo *base);
 
 
 const struct nvkm_runl_func
 gk110_runl = {
+       .wait = nv50_runl_wait,
+       .pending = gk104_runl_pending,
 };
 
 int
 
 
 const struct nvkm_runl_func
 gm107_runl = {
+       .wait = nv50_runl_wait,
+       .pending = gk104_runl_pending,
 };
 
 static const struct nvkm_enum
 
 
 static const struct nvkm_runl_func
 gp100_runl = {
+       .wait = nv50_runl_wait,
+       .pending = gk104_runl_pending,
 };
 
 static const struct nvkm_enum
 
 
 static const struct nvkm_runl_func
 gv100_runl = {
+       .wait = nv50_runl_wait,
+       .pending = gk104_runl_pending,
 };
 
 const struct nvkm_enum
 
 #include "chid.h"
 #include "runl.h"
 
+#include <core/gpuobj.h>
+#include <subdev/timer.h>
+
 #include "nv50.h"
 #include "channv50.h"
 
-#include <core/gpuobj.h>
-
 #include <nvif/class.h>
 
 static const struct nvkm_chan_func
        mutex_unlock(&fifo->base.mutex);
 }
 
+static bool
+nv50_runl_pending(struct nvkm_runl *runl)
+{
+       return nvkm_rd32(runl->fifo->engine.subdev.device, 0x0032ec) & 0x00000100;
+}
+
+int
+nv50_runl_wait(struct nvkm_runl *runl)
+{
+       struct nvkm_fifo *fifo = runl->fifo;
+
+       nvkm_msec(fifo->engine.subdev.device, fifo->timeout.chan_msec,
+               if (!nvkm_runl_update_pending(runl))
+                       return 0;
+               usleep_range(1, 2);
+       );
+
+       return -ETIMEDOUT;
+}
+
 const struct nvkm_runl_func
 nv50_runl = {
+       .wait = nv50_runl_wait,
+       .pending = nv50_runl_pending,
 };
 
 void
 
 int nv50_fifo_chid_nr(struct nvkm_fifo *);
 int nv50_fifo_chid_ctor(struct nvkm_fifo *, int);
 extern const struct nvkm_runl_func nv50_runl;
+int nv50_runl_wait(struct nvkm_runl *);
 extern const struct nvkm_engn_func nv50_engn_sw;
 
 extern const struct nvkm_event_func g84_fifo_nonstall;
 int gk104_fifo_runl_ctor(struct nvkm_fifo *);
 void gk104_fifo_init_pbdmas(struct nvkm_fifo *, u32);
 irqreturn_t gk104_fifo_intr(struct nvkm_inth *);
+void gk104_fifo_intr_runlist(struct nvkm_fifo *);
 void gk104_fifo_intr_chsw(struct nvkm_fifo *);
 void gk104_fifo_intr_bind(struct nvkm_fifo *);
 extern const struct nvkm_fifo_func_mmu_fault gk104_fifo_mmu_fault;
 extern const struct nvkm_enum gk104_fifo_mmu_fault_gpcclient[];
 void gk104_fifo_recover_chan(struct nvkm_fifo *, int);
 int gk104_fifo_engine_id(struct nvkm_fifo *, struct nvkm_engine *);
+bool gk104_runl_pending(struct nvkm_runl *);
 extern const struct nvkm_runq_func gk104_runq;
 void gk104_runq_init(struct nvkm_runq *);
 bool gk104_runq_intr(struct nvkm_runq *, struct nvkm_runl *);
 
        return NULL;
 }
 
+bool
+nvkm_runl_update_pending(struct nvkm_runl *runl)
+{
+       if (!runl->func->pending(runl))
+               return false;
+
+       return true;
+}
+
 void
 nvkm_runl_del(struct nvkm_runl *runl)
 {
 
 
 struct nvkm_runl {
        const struct nvkm_runl_func {
+               int (*wait)(struct nvkm_runl *);
+               bool (*pending)(struct nvkm_runl *);
        } *func;
        struct nvkm_fifo *fifo;
        int id;
 struct nvkm_engn *nvkm_runl_add(struct nvkm_runl *, int engi, const struct nvkm_engn_func *,
                                enum nvkm_subdev_type, int inst);
 void nvkm_runl_del(struct nvkm_runl *);
+bool nvkm_runl_update_pending(struct nvkm_runl *);
 
 struct nvkm_chan *nvkm_runl_chan_get_chid(struct nvkm_runl *, int chid, unsigned long *irqflags);
 struct nvkm_chan *nvkm_runl_chan_get_inst(struct nvkm_runl *, u64 inst, unsigned long *irqflags);
 
 #define nvkm_runl_find_engn(engn,runl,cond) nvkm_list_find(engn, &(runl)->engns, head, (cond))
 
+#define nvkm_runl_first(fifo) list_first_entry(&(fifo)->runls, struct nvkm_runl, head)
 #define nvkm_runl_foreach(runl,fifo) list_for_each_entry((runl), &(fifo)->runls, head)
+#define nvkm_runl_foreach_cond(runl,fifo,cond) nvkm_list_foreach(runl, &(fifo)->runls, head, (cond))
 #define nvkm_runl_foreach_engn(engn,runl) list_for_each_entry((engn), &(runl)->engns, head)
 #define nvkm_runl_foreach_engn_cond(engn,runl,cond) \
        nvkm_list_foreach(engn, &(runl)->engns, head, (cond))
 
 tu102_chan = {
 };
 
+static bool
+tu102_runl_pending(struct nvkm_runl *runl)
+{
+       struct nvkm_device *device = runl->fifo->engine.subdev.device;
+
+       return nvkm_rd32(device, 0x002b0c + (runl->id * 0x10)) & 0x00008000;
+}
+
 static void
 tu102_fifo_runlist_commit(struct gk104_fifo *fifo, int runl,
                          struct nvkm_memory *mem, int nr)
        nvkm_wr32(device, 0x002b00 + (runl * 0x10), lower_32_bits(addr));
        nvkm_wr32(device, 0x002b04 + (runl * 0x10), upper_32_bits(addr));
        nvkm_wr32(device, 0x002b08 + (runl * 0x10), nr);
-
-       /*XXX: how to wait? can you even wait? */
 }
 
 static const struct gk104_fifo_runlist_func
 
 static const struct nvkm_runl_func
 tu102_runl = {
+       .wait = nv50_runl_wait,
+       .pending = tu102_runl_pending,
 };
 
 static const struct nvkm_enum
        }
 
        if (stat & 0x40000000) {
-               gk104_fifo_intr_runlist(gk104_fifo(fifo));
+               gk104_fifo_intr_runlist(fifo);
                stat &= ~0x40000000;
        }