static void read_bulk_callback(struct urb *urb)
 {
        struct net_device *netdev;
-       unsigned long lockflags;
+       unsigned long flags;
        int status = urb->status;
        struct rx_agg *agg;
        struct r8152 *tp;
                if (urb->actual_length < ETH_ZLEN)
                        break;
 
-               spin_lock_irqsave(&tp->rx_lock, lockflags);
+               spin_lock_irqsave(&tp->rx_lock, flags);
                list_add_tail(&agg->list, &tp->rx_done);
-               spin_unlock_irqrestore(&tp->rx_lock, lockflags);
+               spin_unlock_irqrestore(&tp->rx_lock, flags);
                tasklet_schedule(&tp->tl);
                return;
        case -ESHUTDOWN:
        if (result == -ENODEV) {
                netif_device_detach(tp->netdev);
        } else if (result) {
-               spin_lock_irqsave(&tp->rx_lock, lockflags);
+               spin_lock_irqsave(&tp->rx_lock, flags);
                list_add_tail(&agg->list, &tp->rx_done);
-               spin_unlock_irqrestore(&tp->rx_lock, lockflags);
+               spin_unlock_irqrestore(&tp->rx_lock, flags);
                tasklet_schedule(&tp->tl);
        }
 }
 static void write_bulk_callback(struct urb *urb)
 {
        struct net_device_stats *stats;
-       unsigned long lockflags;
+       unsigned long flags;
        struct tx_agg *agg;
        struct r8152 *tp;
        int status = urb->status;
                stats->tx_bytes += agg->skb_len;
        }
 
-       spin_lock_irqsave(&tp->tx_lock, lockflags);
+       spin_lock_irqsave(&tp->tx_lock, flags);
        list_add_tail(&agg->list, &tp->tx_free);
-       spin_unlock_irqrestore(&tp->tx_lock, lockflags);
+       spin_unlock_irqrestore(&tp->tx_lock, flags);
 
        if (!netif_carrier_ok(tp->netdev))
                return;
        struct net_device *netdev;
        struct rx_agg *agg;
        struct rx_desc *rx_desc;
-       unsigned long lockflags;
+       unsigned long flags;
        struct list_head *cursor, *next;
        struct sk_buff *skb;
        struct urb *urb;
 
        stats = rtl8152_get_stats(netdev);
 
-       spin_lock_irqsave(&tp->rx_lock, lockflags);
+       spin_lock_irqsave(&tp->rx_lock, flags);
        list_for_each_safe(cursor, next, &tp->rx_done) {
                list_del_init(cursor);
-               spin_unlock_irqrestore(&tp->rx_lock, lockflags);
+               spin_unlock_irqrestore(&tp->rx_lock, flags);
 
                agg = list_entry(cursor, struct rx_agg, list);
                urb = agg->urb;
                if (urb->actual_length < ETH_ZLEN) {
                        ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
-                       spin_lock_irqsave(&tp->rx_lock, lockflags);
+                       spin_lock_irqsave(&tp->rx_lock, flags);
                        if (ret && ret != -ENODEV) {
                                list_add_tail(&agg->list, next);
                                tasklet_schedule(&tp->tl);
                }
 
                ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
-               spin_lock_irqsave(&tp->rx_lock, lockflags);
+               spin_lock_irqsave(&tp->rx_lock, flags);
                if (ret && ret != -ENODEV) {
                        list_add_tail(&agg->list, next);
                        tasklet_schedule(&tp->tl);
                }
        }
-       spin_unlock_irqrestore(&tp->rx_lock, lockflags);
+       spin_unlock_irqrestore(&tp->rx_lock, flags);
 }
 
 static void tx_bottom(struct r8152 *tp)
        struct net_device_stats *stats;
        struct net_device *netdev;
        struct tx_agg *agg;
-       unsigned long lockflags;
+       unsigned long flags;
        u32 remain, total;
        u8 *tx_data;
        int res;
 
 next_agg:
        agg = NULL;
-       spin_lock_irqsave(&tp->tx_lock, lockflags);
+       spin_lock_irqsave(&tp->tx_lock, flags);
        if (!skb_queue_empty(&tp->tx_queue) && !list_empty(&tp->tx_free)) {
                struct list_head *cursor;
 
                list_del_init(cursor);
                agg = list_entry(cursor, struct tx_agg, list);
        }
-       spin_unlock_irqrestore(&tp->tx_lock, lockflags);
+       spin_unlock_irqrestore(&tp->tx_lock, flags);
 
        if (!agg)
                return;
                        netif_warn(tp, tx_err, netdev,
                                   "failed tx_urb %d\n", res);
                        stats->tx_dropped += agg->skb_num;
-                       spin_lock_irqsave(&tp->tx_lock, lockflags);
+                       spin_lock_irqsave(&tp->tx_lock, flags);
                        list_add_tail(&agg->list, &tp->tx_free);
-                       spin_unlock_irqrestore(&tp->tx_lock, lockflags);
+                       spin_unlock_irqrestore(&tp->tx_lock, flags);
                }
                return;
        }
 {
        struct r8152 *tp = netdev_priv(netdev);
        struct net_device_stats *stats = rtl8152_get_stats(netdev);
-       unsigned long lockflags;
+       unsigned long flags;
        struct tx_agg *agg = NULL;
        struct tx_desc *tx_desc;
        unsigned int len;
 
        skb_tx_timestamp(skb);
 
-       spin_lock_irqsave(&tp->tx_lock, lockflags);
+       spin_lock_irqsave(&tp->tx_lock, flags);
        if (!list_empty(&tp->tx_free) && skb_queue_empty(&tp->tx_queue)) {
                struct list_head *cursor;
 
                list_del_init(cursor);
                agg = list_entry(cursor, struct tx_agg, list);
        }
-       spin_unlock_irqrestore(&tp->tx_lock, lockflags);
+       spin_unlock_irqrestore(&tp->tx_lock, flags);
 
        if (!agg) {
                skb_queue_tail(&tp->tx_queue, skb);
                        netif_warn(tp, tx_err, netdev,
                                   "failed tx_urb %d\n", res);
                        stats->tx_dropped++;
-                       spin_lock_irqsave(&tp->tx_lock, lockflags);
+                       spin_lock_irqsave(&tp->tx_lock, flags);
                        list_add_tail(&agg->list, &tp->tx_free);
-                       spin_unlock_irqrestore(&tp->tx_lock, lockflags);
+                       spin_unlock_irqrestore(&tp->tx_lock, flags);
                }
        }