#define GEM_STATS_LEN ARRAY_SIZE(gem_statistics)
 
+#define QUEUE_STAT_TITLE(title) {      \
+       .stat_string = title,                   \
+}
+
+/* per queue statistics, each should be unsigned long type */
+struct queue_stats {
+       union {
+               unsigned long first;
+               unsigned long rx_packets;
+       };
+       unsigned long rx_bytes;
+       unsigned long rx_dropped;
+       unsigned long tx_packets;
+       unsigned long tx_bytes;
+       unsigned long tx_dropped;
+};
+
+static const struct gem_statistic queue_statistics[] = {
+               QUEUE_STAT_TITLE("rx_packets"),
+               QUEUE_STAT_TITLE("rx_bytes"),
+               QUEUE_STAT_TITLE("rx_dropped"),
+               QUEUE_STAT_TITLE("tx_packets"),
+               QUEUE_STAT_TITLE("tx_bytes"),
+               QUEUE_STAT_TITLE("tx_dropped"),
+};
+
+#define QUEUE_STATS_LEN ARRAY_SIZE(queue_statistics)
+
 struct macb;
 struct macb_queue;
 
        struct sk_buff          **rx_skbuff;
        void                    *rx_buffers;
        struct napi_struct      napi;
+       struct queue_stats stats;
 
 #ifdef CONFIG_MACB_USE_HWSTAMP
        struct work_struct      tx_ts_task;
        int skb_length;                         /* saved skb length for pci_unmap_single */
        unsigned int            max_tx_length;
 
-       u64                     ethtool_stats[GEM_STATS_LEN];
+       u64                     ethtool_stats[GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES];
 
        unsigned int            rx_frm_len_mask;
        unsigned int            jumbo_max_len;
 
                                            macb_tx_ring_wrap(bp, tail),
                                            skb->data);
                                bp->dev->stats.tx_packets++;
+                               queue->stats.tx_packets++;
                                bp->dev->stats.tx_bytes += skb->len;
+                               queue->stats.tx_bytes += skb->len;
                        }
                } else {
                        /* "Buffers exhausted mid-frame" errors may only happen
                                            macb_tx_ring_wrap(bp, tail),
                                            skb->data);
                                bp->dev->stats.tx_packets++;
+                               queue->stats.tx_packets++;
                                bp->dev->stats.tx_bytes += skb->len;
+                               queue->stats.tx_bytes += skb->len;
                        }
 
                        /* Now we can safely release resources */
                        netdev_err(bp->dev,
                                   "not whole frame pointed by descriptor\n");
                        bp->dev->stats.rx_dropped++;
+                       queue->stats.rx_dropped++;
                        break;
                }
                skb = queue->rx_skbuff[entry];
                        netdev_err(bp->dev,
                                   "inconsistent Rx descriptor chain\n");
                        bp->dev->stats.rx_dropped++;
+                       queue->stats.rx_dropped++;
                        break;
                }
                /* now everything is ready for receiving packet */
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
 
                bp->dev->stats.rx_packets++;
+               queue->stats.rx_packets++;
                bp->dev->stats.rx_bytes += skb->len;
+               queue->stats.rx_bytes += skb->len;
 
                gem_ptp_do_rxstamp(bp, skb, desc);
 
 
 static void gem_update_stats(struct macb *bp)
 {
-       unsigned int i;
+       struct macb_queue *queue;
+       unsigned int i, q, idx;
+       unsigned long *stat;
+
        u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
 
        for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
                        *(++p) += val;
                }
        }
+
+       idx = GEM_STATS_LEN;
+       for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+               for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
+                       bp->ethtool_stats[idx++] = *stat;
 }
 
 static struct net_device_stats *gem_get_stats(struct macb *bp)
 
        bp = netdev_priv(dev);
        gem_update_stats(bp);
-       memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
+       memcpy(data, &bp->ethtool_stats, sizeof(u64)
+                       * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
 }
 
 static int gem_get_sset_count(struct net_device *dev, int sset)
 {
+       struct macb *bp = netdev_priv(dev);
+
        switch (sset) {
        case ETH_SS_STATS:
-               return GEM_STATS_LEN;
+               return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
        default:
                return -EOPNOTSUPP;
        }
 
 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
 {
+       char stat_string[ETH_GSTRING_LEN];
+       struct macb *bp = netdev_priv(dev);
+       struct macb_queue *queue;
        unsigned int i;
+       unsigned int q;
 
        switch (sset) {
        case ETH_SS_STATS:
                for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
                        memcpy(p, gem_statistics[i].stat_string,
                               ETH_GSTRING_LEN);
+
+               for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+                       for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) {
+                               snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s",
+                                               q, queue_statistics[i].stat_string);
+                               memcpy(p, stat_string, ETH_GSTRING_LEN);
+                       }
+               }
                break;
        }
 }