if (nframes)
                queue_work(sdio->txrx_wq, &sdio->tx_work);
 
-       wake_up_process(sdio->kthread);
+       queue_work(sdio->txrx_wq, &sdio->work);
 }
 
 void mt7663s_rx_work(struct work_struct *work)
        if (intr.isr & WHIER_RX0_DONE_INT_EN) {
                ret = mt7663s_rx_run_queue(dev, 0, &intr);
                if (ret > 0) {
-                       wake_up_process(sdio->kthread);
+                       queue_work(sdio->txrx_wq, &sdio->work);
                        nframes += ret;
                }
        }
        if (intr.isr & WHIER_RX1_DONE_INT_EN) {
                ret = mt7663s_rx_run_queue(dev, 1, &intr);
                if (ret > 0) {
-                       wake_up_process(sdio->kthread);
+                       queue_work(sdio->txrx_wq, &sdio->work);
                        nframes += ret;
                }
        }
 
 
        cancel_work_sync(&sdio->tx_work);
        cancel_work_sync(&sdio->rx_work);
+       cancel_work_sync(&sdio->work);
        cancel_work_sync(&sdio->stat_work);
        clear_bit(MT76_READING_STATS, &dev->phy.state);
 
        .tx_queue_skb_raw = mt76s_tx_queue_skb_raw,
 };
 
-static int mt76s_kthread_run(void *data)
+static void mt76s_txrx_work(struct work_struct *work)
 {
-       struct mt76_dev *dev = data;
-       struct mt76_phy *mphy = &dev->phy;
-
-       while (!kthread_should_stop()) {
-               int i, nframes = 0;
-
-               cond_resched();
-
-               /* rx processing */
-               local_bh_disable();
-               rcu_read_lock();
-
-               mt76_for_each_q_rx(dev, i)
-                       nframes += mt76s_process_rx_queue(dev, &dev->q_rx[i]);
+       struct mt76_sdio *sdio = container_of(work, struct mt76_sdio, work);
+       struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
+       int i;
 
-               rcu_read_unlock();
-               local_bh_enable();
+       /* rx processing */
+       local_bh_disable();
+       rcu_read_lock();
 
-               /* tx processing */
-               for (i = 0; i < MT_TXQ_MCU_WA; i++)
-                       nframes += mt76s_process_tx_queue(dev, i);
+       mt76_for_each_q_rx(dev, i)
+               mt76s_process_rx_queue(dev, &dev->q_rx[i]);
 
-               if (dev->drv->tx_status_data &&
-                   !test_and_set_bit(MT76_READING_STATS, &mphy->state))
-                       queue_work(dev->wq, &dev->sdio.stat_work);
+       rcu_read_unlock();
+       local_bh_enable();
 
-               if (!nframes || !test_bit(MT76_STATE_RUNNING, &mphy->state)) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       schedule();
-               }
-       }
+       /* tx processing */
+       for (i = 0; i < MT_TXQ_MCU_WA; i++)
+               mt76s_process_tx_queue(dev, i);
 
-       return 0;
+       if (dev->drv->tx_status_data &&
+           !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
+               queue_work(dev->wq, &dev->sdio.stat_work);
 }
 
 void mt76s_deinit(struct mt76_dev *dev)
        struct mt76_sdio *sdio = &dev->sdio;
        int i;
 
-       kthread_stop(sdio->kthread);
        mt76s_stop_txrx(dev);
-
        if (sdio->txrx_wq) {
                destroy_workqueue(sdio->txrx_wq);
                sdio->txrx_wq = NULL;
        if (!sdio->txrx_wq)
                return -ENOMEM;
 
-       sdio->kthread = kthread_create(mt76s_kthread_run, dev, "mt76s");
-       if (IS_ERR(sdio->kthread))
-               return PTR_ERR(sdio->kthread);
-
        INIT_WORK(&sdio->stat_work, mt76s_tx_status_data);
+       INIT_WORK(&sdio->work, mt76s_txrx_work);
 
        mutex_init(&sdio->sched.lock);
        dev->queue_ops = &sdio_queue_ops;