RTL8152_LINK_CHG,
        SELECTIVE_SUSPEND,
        PHY_RESET,
+       SCHEDULE_TASKLET,
 };
 
 /* Define these values to match your device */
                return;
 
        if (!skb_queue_empty(&tp->tx_queue))
-               schedule_delayed_work(&tp->schedule, 0);
+               tasklet_schedule(&tp->tl);
 }
 
 static void intr_callback(struct urb *urb)
        u8 *tx_data;
 
        __skb_queue_head_init(&skb_head);
-       spin_lock_bh(&tx_queue->lock);
+       spin_lock(&tx_queue->lock);
        skb_queue_splice_init(tx_queue, &skb_head);
-       spin_unlock_bh(&tx_queue->lock);
+       spin_unlock(&tx_queue->lock);
 
        tx_data = agg->head;
        agg->skb_num = agg->skb_len = 0;
        }
 
        if (!skb_queue_empty(&skb_head)) {
-               spin_lock_bh(&tx_queue->lock);
+               spin_lock(&tx_queue->lock);
                skb_queue_splice(&skb_head, tx_queue);
-               spin_unlock_bh(&tx_queue->lock);
+               spin_unlock(&tx_queue->lock);
        }
 
-       netif_tx_lock_bh(tp->netdev);
+       netif_tx_lock(tp->netdev);
 
        if (netif_queue_stopped(tp->netdev) &&
            skb_queue_len(&tp->tx_queue) < tp->tx_qlen)
                netif_wake_queue(tp->netdev);
 
-       netif_tx_unlock_bh(tp->netdev);
+       netif_tx_unlock(tp->netdev);
 
-       ret = usb_autopm_get_interface(tp->intf);
+       ret = usb_autopm_get_interface_async(tp->intf);
        if (ret < 0)
                goto out_tx_fill;
 
                          agg->head, (int)(tx_data - (u8 *)agg->head),
                          (usb_complete_t)write_bulk_callback, agg);
 
-       ret = usb_submit_urb(agg->urb, GFP_KERNEL);
+       ret = usb_submit_urb(agg->urb, GFP_ATOMIC);
        if (ret < 0)
-               usb_autopm_put_interface(tp->intf);
+               usb_autopm_put_interface_async(tp->intf);
 
 out_tx_fill:
        return ret;
                return;
 
        rx_bottom(tp);
+       tx_bottom(tp);
 }
 
 static
 }
 
 static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
-                                           struct net_device *netdev)
+                                       struct net_device *netdev)
 {
        struct r8152 *tp = netdev_priv(netdev);
 
 
        skb_queue_tail(&tp->tx_queue, skb);
 
-       if (list_empty(&tp->tx_free) &&
-           skb_queue_len(&tp->tx_queue) > tp->tx_qlen)
+       if (!list_empty(&tp->tx_free)) {
+               if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+                       set_bit(SCHEDULE_TASKLET, &tp->flags);
+                       schedule_delayed_work(&tp->schedule, 0);
+               } else {
+                       usb_mark_last_busy(tp->udev);
+                       tasklet_schedule(&tp->tl);
+               }
+       } else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen)
                netif_stop_queue(netdev);
 
-       if (!list_empty(&tp->tx_free))
-               schedule_delayed_work(&tp->schedule, 0);
-
        return NETDEV_TX_OK;
 }
 
        if (test_bit(RTL8152_SET_RX_MODE, &tp->flags))
                _rtl8152_set_rx_mode(tp->netdev);
 
-       if (tp->speed & LINK_STATUS)
-               tx_bottom(tp);
+       if (test_bit(SCHEDULE_TASKLET, &tp->flags) &&
+           (tp->speed & LINK_STATUS)) {
+               clear_bit(SCHEDULE_TASKLET, &tp->flags);
+               tasklet_schedule(&tp->tl);
+       }
 
        if (test_bit(PHY_RESET, &tp->flags))
                rtl_phy_reset(tp);