static void read_bulk_callback(struct urb *urb)
 {
        struct net_device *netdev;
-       unsigned long flags;
        int status = urb->status;
        struct rx_agg *agg;
        struct r8152 *tp;
                if (urb->actual_length < ETH_ZLEN)
                        break;
 
-               spin_lock_irqsave(&tp->rx_lock, flags);
+               spin_lock(&tp->rx_lock);
                list_add_tail(&agg->list, &tp->rx_done);
-               spin_unlock_irqrestore(&tp->rx_lock, flags);
+               spin_unlock(&tp->rx_lock);
                tasklet_schedule(&tp->tl);
                return;
        case -ESHUTDOWN:
        if (result == -ENODEV) {
                netif_device_detach(tp->netdev);
        } else if (result) {
-               spin_lock_irqsave(&tp->rx_lock, flags);
+               spin_lock(&tp->rx_lock);
                list_add_tail(&agg->list, &tp->rx_done);
-               spin_unlock_irqrestore(&tp->rx_lock, flags);
+               spin_unlock(&tp->rx_lock);
                tasklet_schedule(&tp->tl);
        }
 }
 {
        struct net_device_stats *stats;
        struct net_device *netdev;
-       unsigned long flags;
        struct tx_agg *agg;
        struct r8152 *tp;
        int status = urb->status;
                stats->tx_bytes += agg->skb_len;
        }
 
-       spin_lock_irqsave(&tp->tx_lock, flags);
+       spin_lock(&tp->tx_lock);
        list_add_tail(&agg->list, &tp->tx_free);
-       spin_unlock_irqrestore(&tp->tx_lock, flags);
+       spin_unlock(&tp->tx_lock);
 
        usb_autopm_put_interface_async(tp->intf);
 
 static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
 {
        struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue;
-       unsigned long flags;
        int remain, ret;
        u8 *tx_data;
 
        __skb_queue_head_init(&skb_head);
-       spin_lock_irqsave(&tx_queue->lock, flags);
+       spin_lock_bh(&tx_queue->lock);
        skb_queue_splice_init(tx_queue, &skb_head);
-       spin_unlock_irqrestore(&tx_queue->lock, flags);
+       spin_unlock_bh(&tx_queue->lock);
 
        tx_data = agg->head;
        agg->skb_num = agg->skb_len = 0;
        }
 
        if (!skb_queue_empty(&skb_head)) {
-               spin_lock_irqsave(&tx_queue->lock, flags);
+               spin_lock_bh(&tx_queue->lock);
                skb_queue_splice(&skb_head, tx_queue);
-               spin_unlock_irqrestore(&tx_queue->lock, flags);
+               spin_unlock_bh(&tx_queue->lock);
        }
 
        netif_tx_lock_bh(tp->netdev);
 {
        struct net_device_stats *stats = &tp->netdev->stats;
        struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue;
-       unsigned long flags;
        struct sk_buff *skb;
 
        if (skb_queue_empty(tx_queue))
                return;
 
        __skb_queue_head_init(&skb_head);
-       spin_lock_irqsave(&tx_queue->lock, flags);
+       spin_lock_bh(&tx_queue->lock);
        skb_queue_splice_init(tx_queue, &skb_head);
-       spin_unlock_irqrestore(&tx_queue->lock, flags);
+       spin_unlock_bh(&tx_queue->lock);
 
        while ((skb = __skb_dequeue(&skb_head))) {
                dev_kfree_skb(skb);