__skb_queue_tail(&dev->done, skb);
        if (dev->done.qlen == 1)
-               tasklet_schedule(&dev->bh);
+               queue_work(system_bh_wq, &dev->bh_work);
        spin_unlock(&dev->done.lock);
        spin_unlock_irqrestore(&list->lock, flags);
        return old_state;
                default:
                        netif_dbg(dev, rx_err, dev->net,
                                  "rx submit, %d\n", retval);
-                       tasklet_schedule (&dev->bh);
+                       queue_work(system_bh_wq, &dev->bh_work);
                        break;
                case 0:
                        __usbnet_queue_skb(&dev->rxq, skb, rx_start);
                num++;
        }
 
-       tasklet_schedule(&dev->bh);
+       queue_work(system_bh_wq, &dev->bh_work);
 
        netif_dbg(dev, rx_status, dev->net,
                  "paused rx queue disabled, %d skbs requeued\n", num);
 {
        if (netif_running(dev->net)) {
                (void) unlink_urbs (dev, &dev->rxq);
-               tasklet_schedule(&dev->bh);
+               queue_work(system_bh_wq, &dev->bh_work);
        }
 }
 EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
        /* deferred work (timer, softirq, task) must also stop */
        dev->flags = 0;
        timer_delete_sync(&dev->delay);
-       tasklet_kill(&dev->bh);
+       disable_work_sync(&dev->bh_work);
        cancel_work_sync(&dev->kevent);
 
        /* We have cyclic dependencies. Those calls are needed
         * to break a cycle. We cannot fall into the gaps because
         * we have a flag
         */
-       tasklet_kill(&dev->bh);
+       disable_work_sync(&dev->bh_work);
        timer_delete_sync(&dev->delay);
        cancel_work_sync(&dev->kevent);
 
        clear_bit(EVENT_RX_KILL, &dev->flags);
 
        // delay posting reads until we're fully open
-       tasklet_schedule (&dev->bh);
+       queue_work(system_bh_wq, &dev->bh_work);
        if (info->manage_power) {
                retval = info->manage_power(dev, 1);
                if (retval < 0) {
                 */
        } else {
                /* submitting URBs for reading packets */
-               tasklet_schedule(&dev->bh);
+               queue_work(system_bh_wq, &dev->bh_work);
        }
 
        /* hard_mtu or rx_urb_size may change during link change */
                } else {
                        clear_bit (EVENT_RX_HALT, &dev->flags);
                        if (!usbnet_going_away(dev))
-                               tasklet_schedule(&dev->bh);
+                               queue_work(system_bh_wq, &dev->bh_work);
                }
        }
 
-       /* tasklet could resubmit itself forever if memory is tight */
+       /* work could resubmit itself forever if memory is tight */
        if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
                struct urb      *urb = NULL;
                int resched = 1;
 fail_lowmem:
                        if (resched)
                                if (!usbnet_going_away(dev))
-                                       tasklet_schedule(&dev->bh);
+                                       queue_work(system_bh_wq, &dev->bh_work);
                }
        }
 
        struct usbnet           *dev = netdev_priv(net);
 
        unlink_urbs (dev, &dev->txq);
-       tasklet_schedule (&dev->bh);
+       queue_work(system_bh_wq, &dev->bh_work);
        /* this needs to be handled individually because the generic layer
         * doesn't know what is sufficient and could not restore private
         * information if a remedy of an unconditional reset were used.
 
 /*-------------------------------------------------------------------------*/
 
-// tasklet (work deferred from completions, in_irq) or timer
+// work (work deferred from completions, in_irq) or timer
 
 static void usbnet_bh (struct timer_list *t)
 {
                                          "rxqlen %d --> %d\n",
                                          temp, dev->rxq.qlen);
                        if (dev->rxq.qlen < RX_QLEN(dev))
-                               tasklet_schedule (&dev->bh);
+                               queue_work(system_bh_wq, &dev->bh_work);
                }
                if (dev->txq.qlen < TX_QLEN (dev))
                        netif_wake_queue (dev->net);
        }
 }
 
-static void usbnet_bh_tasklet(struct tasklet_struct *t)
+static void usbnet_bh_work(struct work_struct *work)
 {
-       struct usbnet *dev = from_tasklet(dev, t, bh);
+       struct usbnet *dev = from_work(dev, work, bh_work);
 
        usbnet_bh(&dev->delay);
 }
        skb_queue_head_init (&dev->txq);
        skb_queue_head_init (&dev->done);
        skb_queue_head_init(&dev->rxq_pause);
-       tasklet_setup(&dev->bh, usbnet_bh_tasklet);
+       INIT_WORK(&dev->bh_work, usbnet_bh_work);
        INIT_WORK (&dev->kevent, usbnet_deferred_kevent);
        init_usb_anchor(&dev->deferred);
        timer_setup(&dev->delay, usbnet_bh, 0);
 
                        if (!(dev->txq.qlen >= TX_QLEN(dev)))
                                netif_tx_wake_all_queues(dev->net);
-                       tasklet_schedule (&dev->bh);
+                       queue_work(system_bh_wq, &dev->bh_work);
                }
        }