unsigned int batching;
        struct kyber_ctx_queue *kcqs;
        struct sbitmap kcq_map[KYBER_NUM_DOMAINS];
-       wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS];
+       struct sbq_wait domain_wait[KYBER_NUM_DOMAINS];
        struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS];
        atomic_t wait_index[KYBER_NUM_DOMAINS];
 };
 
        for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
                INIT_LIST_HEAD(&khd->rqs[i]);
-               init_waitqueue_func_entry(&khd->domain_wait[i],
+               khd->domain_wait[i].sbq = NULL;
+               init_waitqueue_func_entry(&khd->domain_wait[i].wait,
                                          kyber_domain_wake);
-               khd->domain_wait[i].private = hctx;
-               INIT_LIST_HEAD(&khd->domain_wait[i].entry);
+               khd->domain_wait[i].wait.private = hctx;
+               INIT_LIST_HEAD(&khd->domain_wait[i].wait.entry);
                atomic_set(&khd->wait_index[i], 0);
        }
 
                             flush_busy_kcq, &data);
 }
 
-static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
+static int kyber_domain_wake(wait_queue_entry_t *wqe, unsigned mode, int flags,
                             void *key)
 {
-       struct blk_mq_hw_ctx *hctx = READ_ONCE(wait->private);
+       struct blk_mq_hw_ctx *hctx = READ_ONCE(wqe->private);
+       struct sbq_wait *wait = container_of(wqe, struct sbq_wait, wait);
 
-       list_del_init(&wait->entry);
+       sbitmap_del_wait_queue(wait);
        blk_mq_run_hw_queue(hctx, true);
        return 1;
 }
 {
        unsigned int sched_domain = khd->cur_domain;
        struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
-       wait_queue_entry_t *wait = &khd->domain_wait[sched_domain];
+       struct sbq_wait *wait = &khd->domain_wait[sched_domain];
        struct sbq_wait_state *ws;
        int nr;
 
         * run when one becomes available. Note that this is serialized on
         * khd->lock, but we still need to be careful about the waker.
         */
-       if (nr < 0 && list_empty_careful(&wait->entry)) {
+       if (nr < 0 && list_empty_careful(&wait->wait.entry)) {
                ws = sbq_wait_ptr(domain_tokens,
                                  &khd->wait_index[sched_domain]);
                khd->domain_ws[sched_domain] = ws;
-               add_wait_queue(&ws->wait, wait);
+               sbitmap_add_wait_queue(domain_tokens, ws, wait);
 
                /*
                 * Try again in case a token was freed before we got on the wait
         * between the !list_empty_careful() check and us grabbing the lock, but
         * list_del_init() is okay with that.
         */
-       if (nr >= 0 && !list_empty_careful(&wait->entry)) {
+       if (nr >= 0 && !list_empty_careful(&wait->wait.entry)) {
                ws = khd->domain_ws[sched_domain];
                spin_lock_irq(&ws->wait.lock);
-               list_del_init(&wait->entry);
+               sbitmap_del_wait_queue(wait);
                spin_unlock_irq(&ws->wait.lock);
        }
 
 {                                                                      \
        struct blk_mq_hw_ctx *hctx = data;                              \
        struct kyber_hctx_data *khd = hctx->sched_data;                 \
-       wait_queue_entry_t *wait = &khd->domain_wait[domain];           \
+       wait_queue_entry_t *wait = &khd->domain_wait[domain].wait;      \
                                                                        \
        seq_printf(m, "%d\n", !list_empty_careful(&wait->entry));       \
        return 0;                                                       \