u32 skb_length;
        u32 pkt_sz;
        struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
-
+       struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
 
        /* We will atmost need two pages to describe the rndis
         * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
 
 drop:
        if (ret == 0) {
-               net->stats.tx_bytes += skb_length;
-               net->stats.tx_packets++;
+               u64_stats_update_begin(&tx_stats->s_sync);
+               tx_stats->packets++;
+               tx_stats->bytes += skb_length;
+               u64_stats_update_end(&tx_stats->s_sync);
        } else {
                if (ret != -EAGAIN) {
                        dev_kfree_skb_any(skb);
                                struct ndis_tcp_ip_checksum_info *csum_info)
 {
        struct net_device *net;
+       struct net_device_context *net_device_ctx;
        struct sk_buff *skb;
+       struct netvsc_stats *rx_stats;
 
        net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
        if (!net || net->reg_state != NETREG_REGISTERED) {
                packet->status = NVSP_STAT_FAIL;
                return 0;
        }
+       net_device_ctx = netdev_priv(net);
+       rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
 
        /* Allocate a skb - TODO direct I/O to pages? */
        skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
        skb_record_rx_queue(skb, packet->channel->
                            offermsg.offer.sub_channel_index);
 
-       net->stats.rx_packets++;
-       net->stats.rx_bytes += packet->total_data_buflen;
+       u64_stats_update_begin(&rx_stats->s_sync);
+       rx_stats->packets++;
+       rx_stats->bytes += packet->total_data_buflen;
+       u64_stats_update_end(&rx_stats->s_sync);
 
        /*
         * Pass the skb back up. Network stack will deallocate the skb when it
        return 0;
 }
 
+static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
+                                                   struct rtnl_link_stats64 *t)
+{
+       struct net_device_context *ndev_ctx = netdev_priv(net);
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats,
+                                                           cpu);
+               struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats,
+                                                           cpu);
+               u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
+               unsigned int start;
+
+               do {
+                       start = u64_stats_fetch_begin_irq(&tx_stats->s_sync);
+                       tx_packets = tx_stats->packets;
+                       tx_bytes = tx_stats->bytes;
+               } while (u64_stats_fetch_retry_irq(&tx_stats->s_sync, start));
+
+               do {
+                       start = u64_stats_fetch_begin_irq(&rx_stats->s_sync);
+                       rx_packets = rx_stats->packets;
+                       rx_bytes = rx_stats->bytes;
+               } while (u64_stats_fetch_retry_irq(&rx_stats->s_sync, start));
+
+               t->tx_bytes     += tx_bytes;
+               t->tx_packets   += tx_packets;
+               t->rx_bytes     += rx_bytes;
+               t->rx_packets   += rx_packets;
+       }
+
+       t->tx_dropped   = net->stats.tx_dropped;
+       t->tx_errors    = net->stats.tx_dropped;
+
+       t->rx_dropped   = net->stats.rx_dropped;
+       t->rx_errors    = net->stats.rx_errors;
+
+       return t;
+}
 
 static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
 {
        .ndo_validate_addr =            eth_validate_addr,
        .ndo_set_mac_address =          netvsc_set_mac_addr,
        .ndo_select_queue =             netvsc_select_queue,
+       .ndo_get_stats64 =              netvsc_get_stats64,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller =          netvsc_poll_controller,
 #endif
                netdev_notify_peers(net);
 }
 
+static void netvsc_free_netdev(struct net_device *netdev)
+{
+       struct net_device_context *net_device_ctx = netdev_priv(netdev);
+
+       free_percpu(net_device_ctx->tx_stats);
+       free_percpu(net_device_ctx->rx_stats);
+       free_netdev(netdev);
+}
 
 static int netvsc_probe(struct hv_device *dev,
                        const struct hv_vmbus_device_id *dev_id)
                netdev_dbg(net, "netvsc msg_enable: %d\n",
                           net_device_ctx->msg_enable);
 
+       net_device_ctx->tx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
+       if (!net_device_ctx->tx_stats) {
+               free_netdev(net);
+               return -ENOMEM;
+       }
+       net_device_ctx->rx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
+       if (!net_device_ctx->rx_stats) {
+               free_percpu(net_device_ctx->tx_stats);
+               free_netdev(net);
+               return -ENOMEM;
+       }
+
        hv_set_drvdata(dev, net);
        INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
        INIT_WORK(&net_device_ctx->work, do_set_multicast);
        ret = rndis_filter_device_add(dev, &device_info);
        if (ret != 0) {
                netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
-               free_netdev(net);
+               netvsc_free_netdev(net);
                hv_set_drvdata(dev, NULL);
                return ret;
        }
        if (ret != 0) {
                pr_err("Unable to register netdev.\n");
                rndis_filter_device_remove(dev);
-               free_netdev(net);
+               netvsc_free_netdev(net);
        } else {
                schedule_delayed_work(&net_device_ctx->dwork, 0);
        }
         */
        rndis_filter_device_remove(dev);
 
-       free_netdev(net);
+       netvsc_free_netdev(net);
        return 0;
 }