static int  ali_ircc_net_close(struct net_device *dev);
 static int  ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud);
-static struct net_device_stats *ali_ircc_net_get_stats(struct net_device *dev);
 
 /* SIR function */
 static int  ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev);
        dev->open            = ali_ircc_net_open;
        dev->stop            = ali_ircc_net_close;
        dev->do_ioctl        = ali_ircc_net_ioctl;
-       dev->get_stats       = ali_ircc_net_get_stats;
 
        err = register_netdev(dev);
        if (err) {
          * async_unwrap_char will deliver all found frames  
         */
        do {
-               async_unwrap_char(self->netdev, &self->stats, &self->rx_buff, 
+               async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
                                  inb(iobase+UART_RX));
 
                /* Make sure we don't stay here too long */
                        netif_wake_queue(self->netdev); 
                }
                        
-               self->stats.tx_packets++;
+               self->netdev->stats.tx_packets++;
                
                /* Turn on receive interrupts */
                outb(UART_IER_RDI, iobase+UART_IER);
        self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
        self->tx_fifo.tail += skb->len;
 
-       self->stats.tx_bytes += skb->len;
+       dev->stats.tx_bytes += skb->len;
 
        skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start,
                      skb->len);
        
        {
                IRDA_ERROR("%s(), ********* LSR_FRAME_ABORT *********\n", __func__);
-               self->stats.tx_errors++;
-               self->stats.tx_fifo_errors++;           
+               self->netdev->stats.tx_errors++;
+               self->netdev->stats.tx_fifo_errors++;
        }
        else 
        {
-               self->stats.tx_packets++;
+               self->netdev->stats.tx_packets++;
        }
 
        /* Check if we need to change the speed */
                        IRDA_DEBUG(0,"%s(), ************* RX Errors ************ \n", __func__ );
                        
                        /* Skip frame */
-                       self->stats.rx_errors++;
+                       self->netdev->stats.rx_errors++;
                        
                        self->rx_buff.data += len;
                        
                        if (status & LSR_FIFO_UR) 
                        {
-                               self->stats.rx_frame_errors++;
+                               self->netdev->stats.rx_frame_errors++;
                                IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************ \n", __func__ );
                        }       
                        if (status & LSR_FRAME_ERROR)
                        {
-                               self->stats.rx_frame_errors++;
+                               self->netdev->stats.rx_frame_errors++;
                                IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************ \n", __func__ );
                        }
                                                        
                        if (status & LSR_CRC_ERROR) 
                        {
-                               self->stats.rx_crc_errors++;
+                               self->netdev->stats.rx_crc_errors++;
                                IRDA_DEBUG(0,"%s(), ************* CRC Errors ************ \n", __func__ );
                        }
                        
                        if(self->rcvFramesOverflow)
                        {
-                               self->stats.rx_frame_errors++;
+                               self->netdev->stats.rx_frame_errors++;
                                IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************ \n", __func__ );
                        }
                        if(len == 0)
                        {
-                               self->stats.rx_frame_errors++;
+                               self->netdev->stats.rx_frame_errors++;
                                IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 ********* \n", __func__ );
                        }
                }        
                                IRDA_WARNING("%s(), memory squeeze, "
                                             "dropping frame.\n",
                                             __func__);
-                               self->stats.rx_dropped++;
+                               self->netdev->stats.rx_dropped++;
 
                                return FALSE;
                        }
 
                        /* Move to next frame */
                        self->rx_buff.data += len;
-                       self->stats.rx_bytes += len;
-                       self->stats.rx_packets++;
+                       self->netdev->stats.rx_bytes += len;
+                       self->netdev->stats.rx_packets++;
 
                        skb->dev = self->netdev;
                        skb_reset_mac_header(skb);
        self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, 
                                           self->tx_buff.truesize);
        
-       self->stats.tx_bytes += self->tx_buff.len;
+       self->netdev->stats.tx_bytes += self->tx_buff.len;
 
        /* Turn on transmit finished interrupt. Will fire immediately!  */
        outb(UART_IER_THRI, iobase+UART_IER); 
        return status;
 }
 
-static struct net_device_stats *ali_ircc_net_get_stats(struct net_device *dev)
-{
-       struct ali_ircc_cb *self = netdev_priv(dev);
-       
-       IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
-               
-       IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
-       
-       return &self->stats;
-}
-
 static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state)
 {
        struct ali_ircc_cb *self = platform_get_drvdata(dev);
 
        struct tx_fifo tx_fifo;    /* Info about frames to be transmitted */
 
        struct net_device *netdev;     /* Yes! we are some kind of netdevice */
-       struct net_device_stats stats;
        
        struct irlap_cb *irlap;    /* The link layer we are binded to */
        struct qos_info qos;       /* QoS capabilities for this device */
 
        iobuff_t rx_buff;
 
        struct net_device *netdev;
-       struct net_device_stats stats;
        
        struct timeval stamp;
        struct timeval now;
 
 static int au1k_irda_rx(struct net_device *);
 static void au1k_irda_interrupt(int, void *);
 static void au1k_tx_timeout(struct net_device *);
-static struct net_device_stats *au1k_irda_stats(struct net_device *);
 static int au1k_irda_ioctl(struct net_device *, struct ifreq *, int);
 static int au1k_irda_set_speed(struct net_device *dev, int speed);
 
        dev->open = au1k_irda_start;
        dev->hard_start_xmit = au1k_irda_hard_xmit;
        dev->stop = au1k_irda_stop;
-       dev->get_stats = au1k_irda_stats;
        dev->do_ioctl = au1k_irda_ioctl;
        dev->tx_timeout = au1k_tx_timeout;
 
        return ret;
 }
 
-
-static struct net_device_stats *au1k_irda_stats(struct net_device *dev)
-{
-       struct au1k_private *aup = netdev_priv(dev);
-       return &aup->stats;
-}
-
 MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>");
 MODULE_DESCRIPTION("Au1000 IrDA Device Driver");
 
 
 struct toshoboe_cb
 {
   struct net_device *netdev;    /* Yes! we are some kind of netdevice */
-  struct net_device_stats stats;
   struct tty_driver ttydev;
 
   struct irlap_cb *irlap;       /* The link layer we are binded to */
 
 static int irda_usb_net_close(struct net_device *dev);
 static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 static void irda_usb_net_timeout(struct net_device *dev);
-static struct net_device_stats *irda_usb_net_get_stats(struct net_device *dev);
 
 /************************ TRANSMIT ROUTINES ************************/
 /*
        /* Ask USB to send the packet - Irq disabled -> GFP_ATOMIC */
        if ((res = usb_submit_urb(urb, GFP_ATOMIC))) {
                IRDA_WARNING("%s(), failed Tx URB\n", __func__);
-               self->stats.tx_errors++;
+               netdev->stats.tx_errors++;
                /* Let USB recover : We will catch that in the watchdog */
                /*netif_start_queue(netdev);*/
        } else {
                /* Increment packet stats */
-               self->stats.tx_packets++;
-                self->stats.tx_bytes += skb->len;
+               netdev->stats.tx_packets++;
+                netdev->stats.tx_bytes += skb->len;
                
                netdev->trans_start = jiffies;
        }
                IRDA_DEBUG(0, "%s: Tx timed out, urb->status=%d, urb->transfer_flags=0x%04X\n", netdev->name, urb->status, urb->transfer_flags);
 
                /* Increase error count */
-               self->stats.tx_errors++;
+               netdev->stats.tx_errors++;
 
 #ifdef IU_BUG_KICK_TIMEOUT
                /* Can't be a bad idea to reset the speed ;-) - Jean II */
        if (urb->status != 0) {
                switch (urb->status) {
                case -EILSEQ:
-                       self->stats.rx_crc_errors++;    
+                       self->netdev->stats.rx_crc_errors++;
                        /* Also precursor to a hot-unplug on UHCI. */
                        /* Fallthrough... */
                case -ECONNRESET:
                case -ETIME:
                        /* Usually precursor to a hot-unplug on OHCI. */
                default:
-                       self->stats.rx_errors++;
+                       self->netdev->stats.rx_errors++;
                        IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __func__, urb->status, urb->transfer_flags);
                        break;
                }
                                       IRDA_SKB_MAX_MTU);
 
        if (!newskb)  {
-               self->stats.rx_dropped++;
+               self->netdev->stats.rx_dropped++;
                /* We could deliver the current skb, but this would stall
                 * the Rx path. Better drop the packet... Jean II */
                goto done;  
        netif_rx(dataskb);
 
        /* Keep stats up to date */
-       self->stats.rx_bytes += len;
-       self->stats.rx_packets++;
+       self->netdev->stats.rx_bytes += len;
+       self->netdev->stats.rx_packets++;
 
 done:
        /* Note : at this point, the URB we've just received (urb)
 }
 
 /*------------------------------------------------------------------*/
-/*
- * Get device stats (for /proc/net/dev and ifconfig)
- */
-static struct net_device_stats *irda_usb_net_get_stats(struct net_device *dev)
-{
-       struct irda_usb_cb *self = netdev_priv(dev);
-       return &self->stats;
-}
 
 /********************* IRDA CONFIG SUBROUTINES *********************/
 /*
        netdev->watchdog_timeo  = 250*HZ/1000;  /* 250 ms > USB timeout */
        netdev->open            = irda_usb_net_open;
        netdev->stop            = irda_usb_net_close;
-       netdev->get_stats       = irda_usb_net_get_stats;
        netdev->do_ioctl        = irda_usb_net_ioctl;
 
        return register_netdev(netdev);
 
        struct urb *speed_urb;          /* URB used to send speed commands */
        
        struct net_device *netdev;      /* Yes! we are some kind of netdev. */
-       struct net_device_stats stats;
        struct irlap_cb   *irlap;       /* The link layer we are binded to */
        struct qos_info qos;
        char *speed_buff;               /* Buffer for speed changes */
 
        struct usb_device *usbdev;      /* init: probe_irda */
        struct net_device *netdev;      /* network layer */
        struct irlap_cb   *irlap;       /* The link layer we are binded to */
-       struct net_device_stats stats;  /* network statistics */
+
        struct qos_info   qos;
 
        __u8              *in_buf;      /* receive buffer */
                case -EPIPE:
                        break;
                default:
-                       kingsun->stats.tx_errors++;
+                       netdev->stats.tx_errors++;
                        netif_start_queue(netdev);
                }
        } else {
-               kingsun->stats.tx_packets++;
-               kingsun->stats.tx_bytes += skb->len;
+               netdev->stats.tx_packets++;
+               netdev->stats.tx_bytes += skb->len;
        }
 
        dev_kfree_skb(skb);
                if (bytes[0] >= 1 && bytes[0] < kingsun->max_rx) {
                        for (i = 1; i <= bytes[0]; i++) {
                                async_unwrap_char(kingsun->netdev,
-                                                 &kingsun->stats,
+                                                 &kingsun->netdev->stats,
                                                  &kingsun->rx_buff, bytes[i]);
                        }
                        do_gettimeofday(&kingsun->rx_time);
        return ret;
 }
 
-/*
- * Get device stats (for /proc/net/dev and ifconfig)
- */
-static struct net_device_stats *
-kingsun_net_get_stats(struct net_device *netdev)
-{
-       struct kingsun_cb *kingsun = netdev_priv(netdev);
-       return &kingsun->stats;
-}
 
 /*
  * This routine is called by the USB subsystem for each new device
        net->hard_start_xmit = kingsun_hard_xmit;
        net->open            = kingsun_net_open;
        net->stop            = kingsun_net_close;
-       net->get_stats       = kingsun_net_get_stats;
        net->do_ioctl        = kingsun_net_ioctl;
 
        ret = register_netdev(net);
 
        struct usb_device *usbdev;      /* init: probe_irda */
        struct net_device *netdev;      /* network layer */
        struct irlap_cb *irlap; /* The link layer we are binded to */
-       struct net_device_stats stats;  /* network statistics */
+
        struct qos_info qos;
 
        struct usb_ctrlrequest *tx_setuprequest;
                                case -EPIPE:
                                        break;
                                default:
-                                       kingsun->stats.tx_errors++;
+                                       netdev->stats.tx_errors++;
                                        netif_start_queue(netdev);
                                }
                        }
                case -EPIPE:
                        break;
                default:
-                       kingsun->stats.tx_errors++;
+                       netdev->stats.tx_errors++;
                        netif_start_queue(netdev);
                }
        } else {
-               kingsun->stats.tx_packets++;
-               kingsun->stats.tx_bytes += skb->len;
+               netdev->stats.tx_packets++;
+               netdev->stats.tx_bytes += skb->len;
 
        }
 
                         */
                        if (kingsun->rx_variable_xormask != 0) {
                                async_unwrap_char(kingsun->netdev,
-                                                 &kingsun->stats,
+                                                 &kingsun->netdev->stats,
                                                  &kingsun->rx_unwrap_buff,
                                                  bytes[i]);
                        }
        return ret;
 }
 
-/*
- * Get device stats (for /proc/net/dev and ifconfig)
- */
-static struct net_device_stats *ks959_net_get_stats(struct net_device *netdev)
-{
-       struct ks959_cb *kingsun = netdev_priv(netdev);
-       return &kingsun->stats;
-}
-
 /*
  * This routine is called by the USB subsystem for each new device
  * in the system. We need to check if the device is ours, and in
        net->hard_start_xmit = ks959_hard_xmit;
        net->open = ks959_net_open;
        net->stop = ks959_net_close;
-       net->get_stats = ks959_net_get_stats;
        net->do_ioctl = ks959_net_ioctl;
 
        ret = register_netdev(net);
 
        struct usb_device *usbdev;      /* init: probe_irda */
        struct net_device *netdev;      /* network layer */
        struct irlap_cb *irlap; /* The link layer we are binded to */
-       struct net_device_stats stats;  /* network statistics */
+
        struct qos_info qos;
 
        struct urb *tx_urb;
                                case -EPIPE:
                                        break;
                                default:
-                                       kingsun->stats.tx_errors++;
+                                       netdev->stats.tx_errors++;
                                        netif_start_queue(netdev);
                                }
                        }
                case -EPIPE:
                        break;
                default:
-                       kingsun->stats.tx_errors++;
+                       netdev->stats.tx_errors++;
                        netif_start_queue(netdev);
                }
        } else {
-               kingsun->stats.tx_packets++;
-               kingsun->stats.tx_bytes += skb->len;
+               netdev->stats.tx_packets++;
+               netdev->stats.tx_bytes += skb->len;
 
        }
 
 static void ksdazzle_rcv_irq(struct urb *urb)
 {
        struct ksdazzle_cb *kingsun = urb->context;
+       struct net_device *netdev = kingsun->netdev;
 
        /* in process of stopping, just drop data */
-       if (!netif_running(kingsun->netdev)) {
+       if (!netif_running(netdev)) {
                kingsun->receiving = 0;
                return;
        }
                unsigned int i;
 
                for (i = 0; i < urb->actual_length; i++) {
-                       async_unwrap_char(kingsun->netdev, &kingsun->stats,
+                       async_unwrap_char(netdev, &netdev->stats,
                                          &kingsun->rx_unwrap_buff, bytes[i]);
                }
                kingsun->receiving =
        return ret;
 }
 
-/*
- * Get device stats (for /proc/net/dev and ifconfig)
- */
-static struct net_device_stats *ksdazzle_net_get_stats(struct net_device
-                                                      *netdev)
-{
-       struct ksdazzle_cb *kingsun = netdev_priv(netdev);
-       return &kingsun->stats;
-}
-
 /*
  * This routine is called by the USB subsystem for each new device
  * in the system. We need to check if the device is ours, and in
        net->hard_start_xmit = ksdazzle_hard_xmit;
        net->open = ksdazzle_net_open;
        net->stop = ksdazzle_net_close;
-       net->get_stats = ksdazzle_net_get_stats;
        net->do_ioctl = ksdazzle_net_ioctl;
 
        ret = register_netdev(net);
 
        if(unlikely(new_len <= 0)) {
                IRDA_ERROR("%s short frame length %d\n",
                             mcs->netdev->name, new_len);
-               ++mcs->stats.rx_errors;
-               ++mcs->stats.rx_length_errors;
+               ++mcs->netdev->stats.rx_errors;
+               ++mcs->netdev->stats.rx_length_errors;
                return;
        }
        fcs = 0;
        if(fcs != GOOD_FCS) {
                IRDA_ERROR("crc error calc 0x%x len %d\n",
                           fcs, new_len);
-               mcs->stats.rx_errors++;
-               mcs->stats.rx_crc_errors++;
+               mcs->netdev->stats.rx_errors++;
+               mcs->netdev->stats.rx_crc_errors++;
                return;
        }
 
        skb = dev_alloc_skb(new_len + 1);
        if(unlikely(!skb)) {
-               ++mcs->stats.rx_dropped;
+               ++mcs->netdev->stats.rx_dropped;
                return;
        }
 
 
        netif_rx(skb);
 
-       mcs->stats.rx_packets++;
-       mcs->stats.rx_bytes += new_len;
+       mcs->netdev->stats.rx_packets++;
+       mcs->netdev->stats.rx_bytes += new_len;
 
        return;
 }
        if(unlikely(new_len <= 0)) {
                IRDA_ERROR("%s short frame length %d\n",
                           mcs->netdev->name, new_len);
-               ++mcs->stats.rx_errors;
-               ++mcs->stats.rx_length_errors;
+               ++mcs->netdev->stats.rx_errors;
+               ++mcs->netdev->stats.rx_length_errors;
                return;
        }
 
        fcs = ~(crc32_le(~0, buf, new_len));
        if(fcs != get_unaligned_le32(buf + new_len)) {
                IRDA_ERROR("crc error calc 0x%x len %d\n", fcs, new_len);
-               mcs->stats.rx_errors++;
-               mcs->stats.rx_crc_errors++;
+               mcs->netdev->stats.rx_errors++;
+               mcs->netdev->stats.rx_crc_errors++;
                return;
        }
 
        skb = dev_alloc_skb(new_len + 1);
        if(unlikely(!skb)) {
-               ++mcs->stats.rx_dropped;
+               ++mcs->netdev->stats.rx_dropped;
                return;
        }
 
 
        netif_rx(skb);
 
-       mcs->stats.rx_packets++;
-       mcs->stats.rx_bytes += new_len;
+       mcs->netdev->stats.rx_packets++;
+       mcs->netdev->stats.rx_bytes += new_len;
 
        return;
 }
                return ret;
 }
 
-
-/* Get device stats for /proc/net/dev and ifconfig */
-static struct net_device_stats *mcs_net_get_stats(struct net_device *netdev)
-{
-       struct mcs_cb *mcs = netdev_priv(netdev);
-       return &mcs->stats;
-}
-
 /* Receive callback function.  */
 static void mcs_receive_irq(struct urb *urb)
 {
                 */
                /* SIR speed */
                if(mcs->speed < 576000) {
-                       async_unwrap_char(mcs->netdev, &mcs->stats,
+                       async_unwrap_char(mcs->netdev, &mcs->netdev->stats,
                                  &mcs->rx_buff, 0xc0);
 
                        for (i = 0; i < urb->actual_length; i++)
-                               async_unwrap_char(mcs->netdev, &mcs->stats,
+                               async_unwrap_char(mcs->netdev, &mcs->netdev->stats,
                                          &mcs->rx_buff, bytes[i]);
 
-                       async_unwrap_char(mcs->netdev, &mcs->stats,
+                       async_unwrap_char(mcs->netdev, &mcs->netdev->stats,
                                  &mcs->rx_buff, 0xc1);
                }
                /* MIR speed */
                case -EPIPE:
                        break;
                default:
-                       mcs->stats.tx_errors++;
+                       mcs->netdev->stats.tx_errors++;
                        netif_start_queue(ndev);
                }
        } else {
-               mcs->stats.tx_packets++;
-               mcs->stats.tx_bytes += skb->len;
+               mcs->netdev->stats.tx_packets++;
+               mcs->netdev->stats.tx_bytes += skb->len;
        }
 
        dev_kfree_skb(skb);
        ndev->hard_start_xmit = mcs_hard_xmit;
        ndev->open = mcs_net_open;
        ndev->stop = mcs_net_close;
-       ndev->get_stats = mcs_net_get_stats;
        ndev->do_ioctl = mcs_net_ioctl;
 
        if (!intf->cur_altsetting)
 
        struct usb_device *usbdev;      /* init: probe_irda */
        struct net_device *netdev;      /* network layer */
        struct irlap_cb *irlap; /* The link layer we are binded to */
-       struct net_device_stats stats;  /* network statistics */
        struct qos_info qos;
        unsigned int speed;     /* Current speed */
        unsigned int new_speed; /* new speed */
 static int mcs_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd);
 static int mcs_net_close(struct net_device *netdev);
 static int mcs_net_open(struct net_device *netdev);
-static struct net_device_stats *mcs_net_get_stats(struct net_device *netdev);
 
 static void mcs_receive_irq(struct urb *urb);
 static void mcs_send_irq(struct urb *urb);
 
 static int  nsc_ircc_net_open(struct net_device *dev);
 static int  nsc_ircc_net_close(struct net_device *dev);
 static int  nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev);
 
 /* Globals */
 static int pnp_registered;
        dev->open            = nsc_ircc_net_open;
        dev->stop            = nsc_ircc_net_close;
        dev->do_ioctl        = nsc_ircc_net_ioctl;
-       dev->get_stats       = nsc_ircc_net_get_stats;
 
        err = register_netdev(dev);
        if (err) {
        self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, 
                                           self->tx_buff.truesize);
 
-       self->stats.tx_bytes += self->tx_buff.len;
+       dev->stats.tx_bytes += self->tx_buff.len;
        
        /* Add interrupt on tx low level (will fire immediately) */
        switch_bank(iobase, BANK0);
        self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
        self->tx_fifo.tail += skb->len;
 
-       self->stats.tx_bytes += skb->len;
+       dev->stats.tx_bytes += skb->len;
 
        skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start,
                      skb->len);
        
        /* Check for underrrun! */
        if (inb(iobase+ASCR) & ASCR_TXUR) {
-               self->stats.tx_errors++;
-               self->stats.tx_fifo_errors++;
+               self->netdev->stats.tx_errors++;
+               self->netdev->stats.tx_fifo_errors++;
                
                /* Clear bit, by writing 1 into it */
                outb(ASCR_TXUR, iobase+ASCR);
        } else {
-               self->stats.tx_packets++;
+               self->netdev->stats.tx_packets++;
        }
 
        /* Finished with this frame, so prepare for next */
                if (status & FRM_ST_ERR_MSK) {
                        if (status & FRM_ST_LOST_FR) {
                                /* Add number of lost frames to stats */
-                               self->stats.rx_errors += len;   
+                               self->netdev->stats.rx_errors += len;
                        } else {
                                /* Skip frame */
-                               self->stats.rx_errors++;
+                               self->netdev->stats.rx_errors++;
                                
                                self->rx_buff.data += len;
                        
                                if (status & FRM_ST_MAX_LEN)
-                                       self->stats.rx_length_errors++;
+                                       self->netdev->stats.rx_length_errors++;
                                
                                if (status & FRM_ST_PHY_ERR) 
-                                       self->stats.rx_frame_errors++;
+                                       self->netdev->stats.rx_frame_errors++;
                                
                                if (status & FRM_ST_BAD_CRC) 
-                                       self->stats.rx_crc_errors++;
+                                       self->netdev->stats.rx_crc_errors++;
                        }
                        /* The errors below can be reported in both cases */
                        if (status & FRM_ST_OVR1)
-                               self->stats.rx_fifo_errors++;                  
+                               self->netdev->stats.rx_fifo_errors++;
                        
                        if (status & FRM_ST_OVR2)
-                               self->stats.rx_fifo_errors++;
+                               self->netdev->stats.rx_fifo_errors++;
                } else {
                        /*  
                         * First we must make sure that the frame we
                                IRDA_WARNING("%s(), memory squeeze, "
                                             "dropping frame.\n",
                                             __func__);
-                               self->stats.rx_dropped++;
+                               self->netdev->stats.rx_dropped++;
 
                                /* Restore bank register */
                                outb(bank, iobase+BSR);
 
                        /* Move to next frame */
                        self->rx_buff.data += len;
-                       self->stats.rx_bytes += len;
-                       self->stats.rx_packets++;
+                       self->netdev->stats.rx_bytes += len;
+                       self->netdev->stats.rx_packets++;
 
                        skb->dev = self->netdev;
                        skb_reset_mac_header(skb);
        /*  Receive all characters in Rx FIFO */
        do {
                byte = inb(iobase+RXD);
-               async_unwrap_char(self->netdev, &self->stats, &self->rx_buff, 
-                                 byte);
+               async_unwrap_char(self->netdev, &self->netdev->stats,
+                                 &self->rx_buff, byte);
        } while (inb(iobase+LSR) & LSR_RXDA); /* Data available */      
 }
 
                        self->ier = IER_TXLDL_IE;
                else { 
 
-                       self->stats.tx_packets++;
+                       self->netdev->stats.tx_packets++;
                        netif_wake_queue(self->netdev);
                        self->ier = IER_TXEMP_IE;
                }
        return ret;
 }
 
-static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev)
-{
-       struct nsc_ircc_cb *self = netdev_priv(dev);
-       
-       return &self->stats;
-}
-
 static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state)
 {
        struct nsc_ircc_cb *self = platform_get_drvdata(dev);
 
        struct tx_fifo tx_fifo;    /* Info about frames to be transmitted */
 
        struct net_device *netdev;     /* Yes! we are some kind of netdevice */
-       struct net_device_stats stats;
        
        struct irlap_cb *irlap;    /* The link layer we are binded to */
        struct qos_info qos;       /* QoS capabilities for this device */
 
        int                     txdma;
        int                     rxdma;
 
-       struct net_device_stats stats;
        struct irlap_cb         *irlap;
        struct qos_info         qos;
 
                        data = STRBR;
                        if (lsr & (LSR_OE | LSR_PE | LSR_FE | LSR_BI)) {
                                printk(KERN_DEBUG "pxa_ir: sir receiving error\n");
-                               si->stats.rx_errors++;
+                               dev->stats.rx_errors++;
                                if (lsr & LSR_FE)
-                                       si->stats.rx_frame_errors++;
+                                       dev->stats.rx_frame_errors++;
                                if (lsr & LSR_OE)
-                                       si->stats.rx_fifo_errors++;
+                                       dev->stats.rx_fifo_errors++;
                        } else {
-                               si->stats.rx_bytes++;
-                               async_unwrap_char(dev, &si->stats, &si->rx_buff, data);
+                               dev->stats.rx_bytes++;
+                               async_unwrap_char(dev, &dev->stats,
+                                                 &si->rx_buff, data);
                        }
                        lsr = STLSR;
                }
 
        case 0x0C: /* Character Timeout Indication */
                do  {
-                   si->stats.rx_bytes++;
-                   async_unwrap_char(dev, &si->stats, &si->rx_buff, STRBR);
+                   dev->stats.rx_bytes++;
+                   async_unwrap_char(dev, &dev->stats, &si->rx_buff, STRBR);
                } while (STLSR & LSR_DR);
                si->last_oscr = OSCR;
                break;
                }
 
                if (si->tx_buff.len == 0) {
-                       si->stats.tx_packets++;
-                       si->stats.tx_bytes += si->tx_buff.data -
-                                             si->tx_buff.head;
+                       dev->stats.tx_packets++;
+                       dev->stats.tx_bytes += si->tx_buff.data - si->tx_buff.head;
 
                         /* We need to ensure that the transmitter has finished. */
                        while ((STLSR & LSR_TEMT) == 0)
        DCSR(channel) = dcsr & ~DCSR_RUN;
 
        if (dcsr & DCSR_ENDINTR)  {
-               si->stats.tx_packets++;
-               si->stats.tx_bytes += si->dma_tx_buff_len;
+               dev->stats.tx_packets++;
+               dev->stats.tx_bytes += si->dma_tx_buff_len;
        } else {
-               si->stats.tx_errors++;
+               dev->stats.tx_errors++;
        }
 
        while (ICSR1 & ICSR1_TBY)
                data = ICDR;
 
                if (stat & (ICSR1_CRE | ICSR1_ROR)) {
-                       si->stats.rx_errors++;
+                       dev->stats.rx_errors++;
                        if (stat & ICSR1_CRE) {
                                printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n");
-                               si->stats.rx_crc_errors++;
+                               dev->stats.rx_crc_errors++;
                        }
                        if (stat & ICSR1_ROR) {
                                printk(KERN_DEBUG "pxa_ir: fir receive overrun\n");
-                               si->stats.rx_over_errors++;
+                               dev->stats.rx_over_errors++;
                        }
                } else  {
                        si->dma_rx_buff[len++] = data;
 
                if (icsr0 & ICSR0_FRE) {
                        printk(KERN_ERR "pxa_ir: dropping erroneous frame\n");
-                       si->stats.rx_dropped++;
+                       dev->stats.rx_dropped++;
                        return;
                }
 
                skb = alloc_skb(len+1,GFP_ATOMIC);
                if (!skb)  {
                        printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n");
-                       si->stats.rx_dropped++;
+                       dev->stats.rx_dropped++;
                        return;
                }
 
                skb->protocol = htons(ETH_P_IRDA);
                netif_rx(skb);
 
-               si->stats.rx_packets++;
-               si->stats.rx_bytes += len;
+               dev->stats.rx_packets++;
+               dev->stats.rx_bytes += len;
        }
 }
 
        if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) {
                if (icsr0 & ICSR0_FRE) {
                        printk(KERN_DEBUG "pxa_ir: fir receive frame error\n");
-                       si->stats.rx_frame_errors++;
+                       dev->stats.rx_frame_errors++;
                } else {
                        printk(KERN_DEBUG "pxa_ir: fir receive abort\n");
-                       si->stats.rx_errors++;
+                       dev->stats.rx_errors++;
                }
                ICSR0 = icsr0 & (ICSR0_FRE | ICSR0_RAB);
        }
        return ret;
 }
 
-static struct net_device_stats *pxa_irda_stats(struct net_device *dev)
-{
-       struct pxa_irda *si = netdev_priv(dev);
-       return &si->stats;
-}
-
 static void pxa_irda_startup(struct pxa_irda *si)
 {
        /* Disable STUART interrupts */
        dev->open               = pxa_irda_start;
        dev->stop               = pxa_irda_stop;
        dev->do_ioctl           = pxa_irda_ioctl;
-       dev->get_stats          = pxa_irda_stats;
 
        irda_init_max_qos_capabilies(&si->qos);
 
 
        dma_regs_t              *txdma;
        dma_regs_t              *rxdma;
 
-       struct net_device_stats stats;
        struct device           *dev;
        struct irda_platform_data *pdata;
        struct irlap_cb         *irlap;
                data = Ser2UTDR;
 
                if (stat & (UTSR1_FRE | UTSR1_ROR)) {
-                       si->stats.rx_errors++;
+                       dev->stats.rx_errors++;
                        if (stat & UTSR1_FRE)
-                               si->stats.rx_frame_errors++;
+                               dev->stats.rx_frame_errors++;
                        if (stat & UTSR1_ROR)
-                               si->stats.rx_fifo_errors++;
+                               dev->stats.rx_fifo_errors++;
                } else
-                       async_unwrap_char(dev, &si->stats, &si->rx_buff, data);
+                       async_unwrap_char(dev, &dev->stats, &si->rx_buff, data);
 
                status = Ser2UTSR0;
        }
                 * There are at least 4 bytes in the FIFO.  Read 3 bytes
                 * and leave the rest to the block below.
                 */
-               async_unwrap_char(dev, &si->stats, &si->rx_buff, Ser2UTDR);
-               async_unwrap_char(dev, &si->stats, &si->rx_buff, Ser2UTDR);
-               async_unwrap_char(dev, &si->stats, &si->rx_buff, Ser2UTDR);
+               async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR);
+               async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR);
+               async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR);
        }
 
        if (status & (UTSR0_RFS | UTSR0_RID)) {
                 * Fifo contains more than 1 character.
                 */
                do {
-                       async_unwrap_char(dev, &si->stats, &si->rx_buff,
+                       async_unwrap_char(dev, &dev->stats, &si->rx_buff,
                                          Ser2UTDR);
                } while (Ser2UTSR1 & UTSR1_RNE);
 
                } while (Ser2UTSR1 & UTSR1_TNF && si->tx_buff.len);
 
                if (si->tx_buff.len == 0) {
-                       si->stats.tx_packets++;
-                       si->stats.tx_bytes += si->tx_buff.data -
+                       dev->stats.tx_packets++;
+                       dev->stats.tx_bytes += si->tx_buff.data -
                                              si->tx_buff.head;
 
                        /*
                data = Ser2HSDR;
 
                if (stat & (HSSR1_CRE | HSSR1_ROR)) {
-                       si->stats.rx_errors++;
+                       dev->stats.rx_errors++;
                        if (stat & HSSR1_CRE)
-                               si->stats.rx_crc_errors++;
+                               dev->stats.rx_crc_errors++;
                        if (stat & HSSR1_ROR)
-                               si->stats.rx_frame_errors++;
+                               dev->stats.rx_frame_errors++;
                } else
                        skb->data[len++] = data;
 
                skb->dev = dev;
                skb_reset_mac_header(skb);
                skb->protocol = htons(ETH_P_IRDA);
-               si->stats.rx_packets++;
-               si->stats.rx_bytes += len;
+               dev->stats.rx_packets++;
+               dev->stats.rx_bytes += len;
 
                /*
                 * Before we pass the buffer up, allocate a new one.
         * from the fifo.
         */
        if (Ser2HSSR0 & (HSSR0_FRE | HSSR0_RAB)) {
-               si->stats.rx_errors++;
+               dev->stats.rx_errors++;
 
                if (Ser2HSSR0 & HSSR0_FRE)
-                       si->stats.rx_frame_errors++;
+                       dev->stats.rx_frame_errors++;
 
                /*
                 * Clear out the DMA...
         */
        if (skb) {
                dma_unmap_single(si->dev, si->txbuf_dma, skb->len, DMA_TO_DEVICE);
-               si->stats.tx_packets ++;
-               si->stats.tx_bytes += skb->len;
+               dev->stats.tx_packets ++;
+               dev->stats.tx_bytes += skb->len;
                dev_kfree_skb_irq(skb);
        }
 
        return ret;
 }
 
-static struct net_device_stats *sa1100_irda_stats(struct net_device *dev)
-{
-       struct sa1100_irda *si = netdev_priv(dev);
-       return &si->stats;
-}
-
 static int sa1100_irda_start(struct net_device *dev)
 {
        struct sa1100_irda *si = netdev_priv(dev);
        dev->open               = sa1100_irda_start;
        dev->stop               = sa1100_irda_stop;
        dev->do_ioctl           = sa1100_irda_ioctl;
-       dev->get_stats          = sa1100_irda_stats;
        dev->irq                = IRQ_Ser2ICP;
 
        irda_init_max_qos_capabilies(&si->qos);
 
 
 struct sir_dev {
        struct net_device *netdev;
-       struct net_device_stats stats;
 
        struct irlap_cb    *irlap;
 
 
                        if ((skb=dev->tx_skb) != NULL) {
                                dev->tx_skb = NULL;
                                dev_kfree_skb_any(skb);
-                               dev->stats.tx_errors++;               
-                               dev->stats.tx_dropped++;                      
+                               dev->netdev->stats.tx_errors++;
+                               dev->netdev->stats.tx_dropped++;
                        }
                        dev->tx_buff.len = 0;
                }
                
        if ((skb=dev->tx_skb) != NULL) {
                dev->tx_skb = NULL;
-               dev->stats.tx_packets++;                      
-               dev->stats.tx_bytes += skb->len;
+               dev->netdev->stats.tx_packets++;
+               dev->netdev->stats.tx_bytes += skb->len;
                dev_kfree_skb_any(skb);
        }
 
                 * just update stats and set media busy
                 */
                irda_device_set_media_busy(dev->netdev, TRUE);
-               dev->stats.rx_dropped++;
+               dev->netdev->stats.rx_dropped++;
                IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __func__, count);
                return 0;
        }
        if (likely(atomic_read(&dev->enable_rx))) {
                while (count--)
                        /* Unwrap and destuff one byte */
-                       async_unwrap_char(dev->netdev, &dev->stats, 
+                       async_unwrap_char(dev->netdev, &dev->netdev->stats,
                                          &dev->rx_buff, *cp++);
        } else {
                while (count--) {
 
 /* callbacks from network layer */
 
-static struct net_device_stats *sirdev_get_stats(struct net_device *ndev)
-{
-       struct sir_dev *dev = netdev_priv(ndev);
-
-       return (dev) ? &dev->stats : NULL;
-}
-
 static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
        struct sir_dev *dev = netdev_priv(ndev);
         */
        atomic_set(&dev->enable_rx, 0);
        if (unlikely(sirdev_is_receiving(dev)))
-               dev->stats.collisions++;
+               dev->netdev->stats.collisions++;
 
        actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
 
                IRDA_ERROR("%s: drv->do_write failed (%d)\n",
                           __func__, actual);
                dev_kfree_skb_any(skb);
-               dev->stats.tx_errors++;               
-               dev->stats.tx_dropped++;                      
+               dev->netdev->stats.tx_errors++;
+               dev->netdev->stats.tx_dropped++;
                netif_wake_queue(ndev);
        }
        spin_unlock_irqrestore(&dev->tx_lock, flags);
        ndev->hard_start_xmit = sirdev_hard_xmit;
        ndev->open = sirdev_open;
        ndev->stop = sirdev_close;
-       ndev->get_stats = sirdev_get_stats;
        ndev->do_ioctl = sirdev_ioctl;
 
        if (register_netdev(ndev)) {
 
 /* Private data for each instance */
 struct smsc_ircc_cb {
        struct net_device *netdev;     /* Yes! we are some kind of netdevice */
-       struct net_device_stats stats;
        struct irlap_cb    *irlap; /* The link layer we are binded to */
 
        chipio_t io;               /* IrDA controller information */
 #if SMSC_IRCC2_C_NET_TIMEOUT
 static void smsc_ircc_timeout(struct net_device *dev);
 #endif
-static struct net_device_stats *smsc_ircc_net_get_stats(struct net_device *dev);
 static int smsc_ircc_is_receiving(struct smsc_ircc_cb *self);
 static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self);
 static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 speed);
        dev->open            = smsc_ircc_net_open;
        dev->stop            = smsc_ircc_net_close;
        dev->do_ioctl        = smsc_ircc_net_ioctl;
-       dev->get_stats       = smsc_ircc_net_get_stats;
 
        self = netdev_priv(dev);
        self->netdev = dev;
        return ret;
 }
 
-static struct net_device_stats *smsc_ircc_net_get_stats(struct net_device *dev)
-{
-       struct smsc_ircc_cb *self = netdev_priv(dev);
-
-       return &self->stats;
-}
-
 #if SMSC_IRCC2_C_NET_TIMEOUT
 /*
  * Function smsc_ircc_timeout (struct net_device *dev)
        self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
                                           self->tx_buff.truesize);
 
-       self->stats.tx_bytes += self->tx_buff.len;
+       dev->stats.tx_bytes += self->tx_buff.len;
 
        /* Turn on transmit finished interrupt. Will fire immediately!  */
        outb(UART_IER_THRI, self->io.sir_base + UART_IER);
        /* Check for underrun! */
        register_bank(iobase, 0);
        if (inb(iobase + IRCC_LSR) & IRCC_LSR_UNDERRUN) {
-               self->stats.tx_errors++;
-               self->stats.tx_fifo_errors++;
+               self->netdev->stats.tx_errors++;
+               self->netdev->stats.tx_fifo_errors++;
 
                /* Reset error condition */
                register_bank(iobase, 0);
                outb(IRCC_MASTER_ERROR_RESET, iobase + IRCC_MASTER);
                outb(0x00, iobase + IRCC_MASTER);
        } else {
-               self->stats.tx_packets++;
-               self->stats.tx_bytes += self->tx_buff.len;
+               self->netdev->stats.tx_packets++;
+               self->netdev->stats.tx_bytes += self->tx_buff.len;
        }
 
        /* Check if it's time to change the speed */
 
        /* Look for errors */
        if (lsr & (IRCC_LSR_FRAME_ERROR | IRCC_LSR_CRC_ERROR | IRCC_LSR_SIZE_ERROR)) {
-               self->stats.rx_errors++;
+               self->netdev->stats.rx_errors++;
                if (lsr & IRCC_LSR_FRAME_ERROR)
-                       self->stats.rx_frame_errors++;
+                       self->netdev->stats.rx_frame_errors++;
                if (lsr & IRCC_LSR_CRC_ERROR)
-                       self->stats.rx_crc_errors++;
+                       self->netdev->stats.rx_crc_errors++;
                if (lsr & IRCC_LSR_SIZE_ERROR)
-                       self->stats.rx_length_errors++;
+                       self->netdev->stats.rx_length_errors++;
                if (lsr & (IRCC_LSR_UNDERRUN | IRCC_LSR_OVERRUN))
-                       self->stats.rx_length_errors++;
+                       self->netdev->stats.rx_length_errors++;
                return;
        }
 
        skb_reserve(skb, 1);
 
        memcpy(skb_put(skb, len), self->rx_buff.data, len);
-       self->stats.rx_packets++;
-       self->stats.rx_bytes += len;
+       self->netdev->stats.rx_packets++;
+       self->netdev->stats.rx_bytes += len;
 
        skb->dev = self->netdev;
        skb_reset_mac_header(skb);
          * async_unwrap_char will deliver all found frames
         */
        do {
-               async_unwrap_char(self->netdev, &self->stats, &self->rx_buff,
+               async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
                                  inb(iobase + UART_RX));
 
                /* Make sure we don't stay here to long */
                        /* Tell network layer that we want more frames */
                        netif_wake_queue(self->netdev);
                }
-               self->stats.tx_packets++;
+               self->netdev->stats.tx_packets++;
 
                if (self->io.speed <= 115200) {
                        /*
 
         struct usb_device *usbdev;      /* init: probe_irda */
         struct net_device *netdev;      /* network layer */
         struct irlap_cb   *irlap;       /* The link layer we are binded to */
-        struct net_device_stats stats; /* network statistics */
+
         struct qos_info   qos;
        unsigned          speed;        /* Current speed */
 
                pr_debug("%s: short frame len %d\n",
                         stir->netdev->name, len);
 
-               ++stir->stats.rx_errors;
-               ++stir->stats.rx_length_errors;
+               ++stir->netdev->stats.rx_errors;
+               ++stir->netdev->stats.rx_length_errors;
                return;
        }
 
        fcs = ~(crc32_le(~0, rx_buff->data, len));
        if (fcs != get_unaligned_le32(rx_buff->data + len)) {
                pr_debug("crc error calc 0x%x len %d\n", fcs, len);
-               stir->stats.rx_errors++;
-               stir->stats.rx_crc_errors++;
+               stir->netdev->stats.rx_errors++;
+               stir->netdev->stats.rx_crc_errors++;
                return;
        }
 
        if (len < IRDA_RX_COPY_THRESHOLD) {
                nskb = dev_alloc_skb(len + 1);
                if (unlikely(!nskb)) {
-                       ++stir->stats.rx_dropped;
+                       ++stir->netdev->stats.rx_dropped;
                        return;
                }
                skb_reserve(nskb, 1);
        } else {
                nskb = dev_alloc_skb(rx_buff->truesize);
                if (unlikely(!nskb)) {
-                       ++stir->stats.rx_dropped;
+                       ++stir->netdev->stats.rx_dropped;
                        return;
                }
                skb_reserve(nskb, 1);
 
        netif_rx(skb);
 
-       stir->stats.rx_packets++;
-       stir->stats.rx_bytes += len;
+       stir->netdev->stats.rx_packets++;
+       stir->netdev->stats.rx_bytes += len;
 
        rx_buff->data = rx_buff->head;
        rx_buff->len = 0;
                if (unlikely(rx_buff->len >= rx_buff->truesize)) {
                        pr_debug("%s: fir frame exceeds %d\n",
                                 stir->netdev->name, rx_buff->truesize);
-                       ++stir->stats.rx_over_errors;
+                       ++stir->netdev->stats.rx_over_errors;
                        goto error_recovery;
                }
 
                continue;
 
        frame_error:
-               ++stir->stats.rx_frame_errors;
+               ++stir->netdev->stats.rx_frame_errors;
 
        error_recovery:
-               ++stir->stats.rx_errors;
+               ++stir->netdev->stats.rx_errors;
                rx_buff->state = OUTSIDE_FRAME;
                rx_buff->in_frame = FALSE;
        }
        int i;
 
        for (i = 0; i < len; i++)
-               async_unwrap_char(stir->netdev, &stir->stats,
+               async_unwrap_char(stir->netdev, &stir->netdev->stats,
                                  &stir->rx_buff, bytes[i]);
 }
 
        usb_kill_urb(stir->rx_urb);
 
        if (stir->rx_buff.in_frame) 
-               stir->stats.collisions++;
+               stir->netdev->stats.collisions++;
 }
 /*
  * Wrap data in socket buffer and send it.
        if (!first_frame)
                fifo_txwait(stir, wraplen);
 
-       stir->stats.tx_packets++;
-       stir->stats.tx_bytes += skb->len;
+       stir->netdev->stats.tx_packets++;
+       stir->netdev->stats.tx_bytes += skb->len;
        stir->netdev->trans_start = jiffies;
        pr_debug("send %d (%d)\n", skb->len, wraplen);
 
        if (usb_bulk_msg(stir->usbdev, usb_sndbulkpipe(stir->usbdev, 1),
                         stir->io_buf, wraplen,
                         NULL, TRANSMIT_TIMEOUT))
-               stir->stats.tx_errors++;
+               stir->netdev->stats.tx_errors++;
 }
 
 /*
        return ret;
 }
 
-/*
- * Get device stats (for /proc/net/dev and ifconfig)
- */
-static struct net_device_stats *stir_net_get_stats(struct net_device *netdev)
-{
-       struct stir_cb *stir = netdev_priv(netdev);
-       return &stir->stats;
-}
-
 /*
  * This routine is called by the USB subsystem for each new device
  * in the system. We need to check if the device is ours, and in
        net->hard_start_xmit = stir_hard_xmit;
        net->open            = stir_net_open;
        net->stop            = stir_net_close;
-       net->get_stats       = stir_net_get_stats;
        net->do_ioctl        = stir_net_ioctl;
 
        ret = register_netdev(net);
 
 static int via_ircc_net_close(struct net_device *dev);
 static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
                              int cmd);
-static struct net_device_stats *via_ircc_net_get_stats(struct net_device
-                                                      *dev);
 static void via_ircc_change_dongle_speed(int iobase, int speed,
                                         int dongle_id);
 static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
        dev->open = via_ircc_net_open;
        dev->stop = via_ircc_net_close;
        dev->do_ioctl = via_ircc_net_ioctl;
-       dev->get_stats = via_ircc_net_get_stats;
 
        err = register_netdev(dev);
        if (err)
            async_wrap_skb(skb, self->tx_buff.data,
                           self->tx_buff.truesize);
 
-       self->stats.tx_bytes += self->tx_buff.len;
+       dev->stats.tx_bytes += self->tx_buff.len;
        /* Send this frame with old speed */
        SetBaudRate(iobase, self->io.speed);
        SetPulseWidth(iobase, 12);
        self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
 
        self->tx_fifo.tail += skb->len;
-       self->stats.tx_bytes += skb->len;
+       dev->stats.tx_bytes += skb->len;
        skb_copy_from_linear_data(skb,
                      self->tx_fifo.queue[self->tx_fifo.free].start, skb->len);
        self->tx_fifo.len++;
        /* Clear bit, by writing 1 into it */
        Tx_status = GetTXStatus(iobase);
        if (Tx_status & 0x08) {
-               self->stats.tx_errors++;
-               self->stats.tx_fifo_errors++;
+               self->netdev->stats.tx_errors++;
+               self->netdev->stats.tx_fifo_errors++;
                hwreset(self);
 // how to clear underrrun ?
        } else {
-               self->stats.tx_packets++;
+               self->netdev->stats.tx_packets++;
                ResetChip(iobase, 3);
                ResetChip(iobase, 4);
        }
                }
                // Move to next frame 
                self->rx_buff.data += len;
-               self->stats.rx_bytes += len;
-               self->stats.rx_packets++;
+               self->netdev->stats.rx_bytes += len;
+               self->netdev->stats.rx_packets++;
                skb->dev = self->netdev;
                skb_reset_mac_header(skb);
                skb->protocol = htons(ETH_P_IRDA);
                 */
                if ((skb == NULL) || (skb->data == NULL)
                    || (self->rx_buff.data == NULL) || (len < 6)) {
-                       self->stats.rx_dropped++;
+                       self->netdev->stats.rx_dropped++;
                        return TRUE;
                }
                skb_reserve(skb, 1);
 
                // Move to next frame 
                self->rx_buff.data += len;
-               self->stats.rx_bytes += len;
-               self->stats.rx_packets++;
+               self->netdev->stats.rx_bytes += len;
+               self->netdev->stats.rx_packets++;
                skb->dev = self->netdev;
                skb_reset_mac_header(skb);
                skb->protocol = htons(ETH_P_IRDA);
        IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len);
 
        if ((len - 4) < 2) {
-               self->stats.rx_dropped++;
+               self->netdev->stats.rx_dropped++;
                return FALSE;
        }
 
        skb = dev_alloc_skb(len + 1);
        if (skb == NULL) {
-               self->stats.rx_dropped++;
+               self->netdev->stats.rx_dropped++;
                return FALSE;
        }
        skb_reserve(skb, 1);
                st_fifo->tail = 0;
        // Move to next frame 
        self->rx_buff.data += len;
-       self->stats.rx_bytes += len;
-       self->stats.rx_packets++;
+       self->netdev->stats.rx_bytes += len;
+       self->netdev->stats.rx_packets++;
        skb->dev = self->netdev;
        skb_reset_mac_header(skb);
        skb->protocol = htons(ETH_P_IRDA);
                         */
                        if ((skb == NULL) || (skb->data == NULL)
                            || (self->rx_buff.data == NULL) || (len < 6)) {
-                               self->stats.rx_dropped++;
+                               self->netdev->stats.rx_dropped++;
                                continue;
                        }
                        skb_reserve(skb, 1);
 
                        // Move to next frame 
                        self->rx_buff.data += len;
-                       self->stats.rx_bytes += len;
-                       self->stats.rx_packets++;
+                       self->netdev->stats.rx_bytes += len;
+                       self->netdev->stats.rx_packets++;
                        skb->dev = self->netdev;
                        skb_reset_mac_header(skb);
                        skb->protocol = htons(ETH_P_IRDA);
 
        IRDA_ASSERT(dev != NULL, return -1;);
        self = netdev_priv(dev);
-       self->stats.rx_packets = 0;
+       dev->stats.rx_packets = 0;
        IRDA_ASSERT(self != NULL, return 0;);
        iobase = self->io.fir_base;
        if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
        return ret;
 }
 
-static struct net_device_stats *via_ircc_net_get_stats(struct net_device
-                                                      *dev)
-{
-       struct via_ircc_cb *self = netdev_priv(dev);
-
-       return &self->stats;
-}
-
 MODULE_AUTHOR("VIA Technologies,inc");
 MODULE_DESCRIPTION("VIA IrDA Device Driver");
 MODULE_LICENSE("GPL");
 
        struct tx_fifo tx_fifo; /* Info about frames to be transmitted */
 
        struct net_device *netdev;      /* Yes! we are some kind of netdevice */
-       struct net_device_stats stats;
 
        struct irlap_cb *irlap; /* The link layer we are binded to */
        struct qos_info qos;    /* QoS capabilities for this device */
 
                now.tv_sec - idev->last_rx.tv_sec - delta1, delta2);    
 
        seq_printf(seq, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu",
-               idev->stats.rx_packets, idev->stats.rx_bytes, idev->stats.rx_errors,
-               idev->stats.rx_dropped);
+               ndev->stats.rx_packets, ndev->stats.rx_bytes, ndev->stats.rx_errors,
+               ndev->stats.rx_dropped);
        seq_printf(seq, " / overrun=%lu / length=%lu / frame=%lu / crc=%lu\n",
-               idev->stats.rx_over_errors, idev->stats.rx_length_errors,
-               idev->stats.rx_frame_errors, idev->stats.rx_crc_errors);
+               ndev->stats.rx_over_errors, ndev->stats.rx_length_errors,
+               ndev->stats.rx_frame_errors, ndev->stats.rx_crc_errors);
        seq_printf(seq, "TX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu / fifo=%lu\n",
-               idev->stats.tx_packets, idev->stats.tx_bytes, idev->stats.tx_errors,
-               idev->stats.tx_dropped, idev->stats.tx_fifo_errors);
+               ndev->stats.tx_packets, ndev->stats.tx_bytes, ndev->stats.tx_errors,
+               ndev->stats.tx_dropped, ndev->stats.tx_fifo_errors);
 
 }
                
 
                if (ret < 0) {
                        ret = -ret;
-                       idev->stats.rx_errors++;
+                       ndev->stats.rx_errors++;
                        if (ret & VLSI_RX_DROP)  
-                               idev->stats.rx_dropped++;
+                               ndev->stats.rx_dropped++;
                        if (ret & VLSI_RX_OVER)  
-                               idev->stats.rx_over_errors++;
+                               ndev->stats.rx_over_errors++;
                        if (ret & VLSI_RX_LENGTH)  
-                               idev->stats.rx_length_errors++;
+                               ndev->stats.rx_length_errors++;
                        if (ret & VLSI_RX_FRAME)  
-                               idev->stats.rx_frame_errors++;
+                               ndev->stats.rx_frame_errors++;
                        if (ret & VLSI_RX_CRC)  
-                               idev->stats.rx_crc_errors++;
+                               ndev->stats.rx_crc_errors++;
                }
                else if (ret > 0) {
-                       idev->stats.rx_packets++;
-                       idev->stats.rx_bytes += ret;
+                       ndev->stats.rx_packets++;
+                       ndev->stats.rx_bytes += ret;
                }
        }
 
 
 static void vlsi_unarm_rx(vlsi_irda_dev_t *idev)
 {
+       struct net_device *ndev = pci_get_drvdata(idev->pdev);
        struct vlsi_ring *r = idev->rx_ring;
        struct ring_descr *rd;
        int ret;
 
                if (ret < 0) {
                        ret = -ret;
-                       idev->stats.rx_errors++;
+                       ndev->stats.rx_errors++;
                        if (ret & VLSI_RX_DROP)  
-                               idev->stats.rx_dropped++;
+                               ndev->stats.rx_dropped++;
                        if (ret & VLSI_RX_OVER)  
-                               idev->stats.rx_over_errors++;
+                               ndev->stats.rx_over_errors++;
                        if (ret & VLSI_RX_LENGTH)  
-                               idev->stats.rx_length_errors++;
+                               ndev->stats.rx_length_errors++;
                        if (ret & VLSI_RX_FRAME)  
-                               idev->stats.rx_frame_errors++;
+                               ndev->stats.rx_frame_errors++;
                        if (ret & VLSI_RX_CRC)  
-                               idev->stats.rx_crc_errors++;
+                               ndev->stats.rx_crc_errors++;
                }
                else if (ret > 0) {
-                       idev->stats.rx_packets++;
-                       idev->stats.rx_bytes += ret;
+                       ndev->stats.rx_packets++;
+                       ndev->stats.rx_bytes += ret;
                }
        }
 }
 drop:
        IRDA_WARNING("%s: dropping packet - %s\n", __func__, msg);
        dev_kfree_skb_any(skb);
-       idev->stats.tx_errors++;
-       idev->stats.tx_dropped++;
+       ndev->stats.tx_errors++;
+       ndev->stats.tx_dropped++;
        /* Don't even think about returning NET_XMIT_DROP (=1) here!
         * In fact any retval!=0 causes the packet scheduler to requeue the
         * packet for later retry of transmission - which isn't exactly
 
                if (ret < 0) {
                        ret = -ret;
-                       idev->stats.tx_errors++;
+                       ndev->stats.tx_errors++;
                        if (ret & VLSI_TX_DROP)
-                               idev->stats.tx_dropped++;
+                               ndev->stats.tx_dropped++;
                        if (ret & VLSI_TX_FIFO)
-                               idev->stats.tx_fifo_errors++;
+                               ndev->stats.tx_fifo_errors++;
                }
                else if (ret > 0){
-                       idev->stats.tx_packets++;
-                       idev->stats.tx_bytes += ret;
+                       ndev->stats.tx_packets++;
+                       ndev->stats.tx_bytes += ret;
                }
        }
 
 
 static void vlsi_unarm_tx(vlsi_irda_dev_t *idev)
 {
+       struct net_device *ndev = pci_get_drvdata(idev->pdev);
        struct vlsi_ring *r = idev->tx_ring;
        struct ring_descr *rd;
        int ret;
 
                if (ret < 0) {
                        ret = -ret;
-                       idev->stats.tx_errors++;
+                       ndev->stats.tx_errors++;
                        if (ret & VLSI_TX_DROP)
-                               idev->stats.tx_dropped++;
+                               ndev->stats.tx_dropped++;
                        if (ret & VLSI_TX_FIFO)
-                               idev->stats.tx_fifo_errors++;
+                               ndev->stats.tx_fifo_errors++;
                }
                else if (ret > 0){
-                       idev->stats.tx_packets++;
-                       idev->stats.tx_bytes += ret;
+                       ndev->stats.tx_packets++;
+                       ndev->stats.tx_bytes += ret;
                }
        }
 
 
 /**************************************************************/
 
-static struct net_device_stats * vlsi_get_stats(struct net_device *ndev)
-{
-       vlsi_irda_dev_t *idev = netdev_priv(ndev);
-
-       return &idev->stats;
-}
-
 static void vlsi_tx_timeout(struct net_device *ndev)
 {
        vlsi_irda_dev_t *idev = netdev_priv(ndev);
  
        ndev->open            = vlsi_open;
        ndev->stop            = vlsi_close;
-       ndev->get_stats       = vlsi_get_stats;
        ndev->hard_start_xmit = vlsi_hard_start_xmit;
        ndev->do_ioctl        = vlsi_ioctl;
        ndev->tx_timeout      = vlsi_tx_timeout;
 
 
 typedef struct vlsi_irda_dev {
        struct pci_dev          *pdev;
-       struct net_device_stats stats;
 
        struct irlap_cb         *irlap;
 
 
 static int  w83977af_net_open(struct net_device *dev);
 static int  w83977af_net_close(struct net_device *dev);
 static int  w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static struct net_device_stats *w83977af_net_get_stats(struct net_device *dev);
 
 /*
  * Function w83977af_init ()
        dev->open            = w83977af_net_open;
        dev->stop            = w83977af_net_close;
        dev->do_ioctl        = w83977af_net_ioctl;
-       dev->get_stats       = w83977af_net_get_stats;
 
        err = register_netdev(dev);
        if (err) {
        if (inb(iobase+AUDR) & AUDR_UNDR) {
                IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ );
                
-               self->stats.tx_errors++;
-               self->stats.tx_fifo_errors++;
+               self->netdev->stats.tx_errors++;
+               self->netdev->stats.tx_fifo_errors++;
 
                /* Clear bit, by writing 1 to it */
                outb(AUDR_UNDR, iobase+AUDR);
        } else
-               self->stats.tx_packets++;
+               self->netdev->stats.tx_packets++;
 
        
        if (self->new_speed) {
                if (status & FS_FO_ERR_MSK) {
                        if (status & FS_FO_LST_FR) {
                                /* Add number of lost frames to stats */
-                               self->stats.rx_errors += len;   
+                               self->netdev->stats.rx_errors += len;
                        } else {
                                /* Skip frame */
-                               self->stats.rx_errors++;
+                               self->netdev->stats.rx_errors++;
                                
                                self->rx_buff.data += len;
                                
                                if (status & FS_FO_MX_LEX)
-                                       self->stats.rx_length_errors++;
+                                       self->netdev->stats.rx_length_errors++;
                                
                                if (status & FS_FO_PHY_ERR) 
-                                       self->stats.rx_frame_errors++;
+                                       self->netdev->stats.rx_frame_errors++;
                                
                                if (status & FS_FO_CRC_ERR) 
-                                       self->stats.rx_crc_errors++;
+                                       self->netdev->stats.rx_crc_errors++;
                        }
                        /* The errors below can be reported in both cases */
                        if (status & FS_FO_RX_OV)
-                               self->stats.rx_fifo_errors++;
+                               self->netdev->stats.rx_fifo_errors++;
                        
                        if (status & FS_FO_FSF_OV)
-                               self->stats.rx_fifo_errors++;
+                               self->netdev->stats.rx_fifo_errors++;
                        
                } else {
                        /* Check if we have transferred all data to memory */
 
                        /* Move to next frame */
                        self->rx_buff.data += len;
-                       self->stats.rx_packets++;
+                       self->netdev->stats.rx_packets++;
                        
                        skb->dev = self->netdev;
                        skb_reset_mac_header(skb);
        /*  Receive all characters in Rx FIFO */
        do {
                byte = inb(iobase+RBR);
-               async_unwrap_char(self->netdev, &self->stats, &self->rx_buff, 
+               async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
                                  byte);
        } while (inb(iobase+USR) & USR_RDR); /* Data available */       
 }
                        outb(AUDR_SFEND, iobase+AUDR);
                        outb(set, iobase+SSR); 
 
-                       self->stats.tx_packets++;
+                       self->netdev->stats.tx_packets++;
 
                        /* Feed me more packets */
                        netif_wake_queue(self->netdev);
        return ret;
 }
 
-static struct net_device_stats *w83977af_net_get_stats(struct net_device *dev)
-{
-       struct w83977af_ir *self = netdev_priv(dev);
-       
-       return &self->stats;
-}
-
 MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
 MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
 MODULE_LICENSE("GPL");
 
        int tx_len;          /* Number of frames in tx_buff */
 
        struct net_device *netdev; /* Yes! we are some kind of netdevice */
-       struct net_device_stats stats;
        
        struct irlap_cb    *irlap; /* The link layer we are binded to */
        struct qos_info     qos;   /* QoS capabilities for this device */