]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
ixgbe: upgrade to 3.10.16.
authorJoe Jin <joe.jin@oracle.com>
Tue, 28 Aug 2012 05:58:38 +0000 (13:58 +0800)
committerJoe Jin <joe.jin@oracle.com>
Tue, 28 Aug 2012 07:20:55 +0000 (15:20 +0800)
Signed-off-by: Joe Jin <joe.jin@oracle.com>
39 files changed:
drivers/net/ixgbe/Makefile
drivers/net/ixgbe/ixgbe.h
drivers/net/ixgbe/ixgbe_82598.c
drivers/net/ixgbe/ixgbe_82598.h [new file with mode: 0644]
drivers/net/ixgbe/ixgbe_82599.c
drivers/net/ixgbe/ixgbe_82599.h [new file with mode: 0644]
drivers/net/ixgbe/ixgbe_api.c [new file with mode: 0644]
drivers/net/ixgbe/ixgbe_api.h [new file with mode: 0644]
drivers/net/ixgbe/ixgbe_common.c
drivers/net/ixgbe/ixgbe_common.h
drivers/net/ixgbe/ixgbe_dcb.c
drivers/net/ixgbe/ixgbe_dcb.h
drivers/net/ixgbe/ixgbe_dcb_82598.c
drivers/net/ixgbe/ixgbe_dcb_82598.h
drivers/net/ixgbe/ixgbe_dcb_82599.c
drivers/net/ixgbe/ixgbe_dcb_82599.h
drivers/net/ixgbe/ixgbe_dcb_nl.c
drivers/net/ixgbe/ixgbe_ethtool.c
drivers/net/ixgbe/ixgbe_fcoe.c
drivers/net/ixgbe/ixgbe_fcoe.h
drivers/net/ixgbe/ixgbe_lib.c [new file with mode: 0644]
drivers/net/ixgbe/ixgbe_main.c
drivers/net/ixgbe/ixgbe_mbx.c
drivers/net/ixgbe/ixgbe_mbx.h
drivers/net/ixgbe/ixgbe_osdep.h [new file with mode: 0644]
drivers/net/ixgbe/ixgbe_param.c [new file with mode: 0644]
drivers/net/ixgbe/ixgbe_phy.c
drivers/net/ixgbe/ixgbe_phy.h
drivers/net/ixgbe/ixgbe_procfs.c [new file with mode: 0644]
drivers/net/ixgbe/ixgbe_ptp.c [new file with mode: 0644]
drivers/net/ixgbe/ixgbe_sriov.c
drivers/net/ixgbe/ixgbe_sriov.h
drivers/net/ixgbe/ixgbe_sysfs.c [new file with mode: 0644]
drivers/net/ixgbe/ixgbe_type.h
drivers/net/ixgbe/ixgbe_x540.c
drivers/net/ixgbe/ixgbe_x540.h [new file with mode: 0644]
drivers/net/ixgbe/kcompat.c [new file with mode: 0644]
drivers/net/ixgbe/kcompat.h [new file with mode: 0644]
drivers/net/ixgbe/kcompat_ethtool.c [new file with mode: 0644]

index 7a16177a12a5c15b298ecd4b0caf19005bd397f2..61cbf27beb85078912ff617c210962a0fee0576d 100644 (file)
@@ -20,7 +20,6 @@
 # the file called "COPYING".
 #
 # Contact Information:
-# Linux NICS <linux.nics@intel.com>
 # e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
 # Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 #
@@ -34,7 +33,8 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
 
 ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
               ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
-              ixgbe_mbx.o ixgbe_x540.o
+              ixgbe_mbx.o ixgbe_x540.o ixgbe_api.o ixgbe_param.o \
+              ixgbe_lib.o kcompat.o ixgbe_ptp.o ixgbe_sysfs.o ixgbe_procfs.o
 
 ixgbe-$(CONFIG_IXGBE_DCB) +=  ixgbe_dcb.o ixgbe_dcb_82598.o \
                               ixgbe_dcb_82599.o ixgbe_dcb_nl.o
index 4af4bc22dd1fa1d7a4d72a849ac3aba10d670dde..d5c26019927d299374f8215330d7e3e8d644d661 100644 (file)
 #ifndef _IXGBE_H_
 #define _IXGBE_H_
 
-#include <linux/bitops.h>
-#include <linux/types.h>
+#ifndef IXGBE_NO_LRO
+#include <net/tcp.h>
+#endif
+
 #include <linux/pci.h>
 #include <linux/netdevice.h>
+#ifdef HAVE_IRQ_AFFINITY_HINT
 #include <linux/cpumask.h>
-#include <linux/aer.h>
-#include <linux/if_vlan.h>
+#endif /* HAVE_IRQ_AFFINITY_HINT */
+#include <linux/vmalloc.h>
 
-#include "ixgbe_type.h"
-#include "ixgbe_common.h"
+#ifdef SIOCETHTOOL
+#include <linux/ethtool.h>
+#endif
+#ifdef NETIF_F_HW_VLAN_TX
+#include <linux/if_vlan.h>
+#endif
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+#define IXGBE_DCA
+#include <linux/dca.h>
+#endif
 #include "ixgbe_dcb.h"
+
+#include "kcompat.h"
+
+#ifdef HAVE_SCTP
+#include <linux/sctp.h>
+#endif
+
 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
 #define IXGBE_FCOE
 #include "ixgbe_fcoe.h"
 #endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
-#ifdef CONFIG_IXGBE_DCA
-#include <linux/dca.h>
-#endif
 
-/* common prefix used by pr_<> macros */
-#undef pr_fmt
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include "ixgbe_api.h"
+
+#define PFX "ixgbe: "
+#define DPRINTK(nlevel, klevel, fmt, args...) \
+       ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
+       printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
+               __func__ , ## args)))
+
 
 /* TX/RX descriptor defines */
-#define IXGBE_DEFAULT_TXD                  512
-#define IXGBE_DEFAULT_TX_WORK              256
-#define IXGBE_MAX_TXD                     4096
-#define IXGBE_MIN_TXD                       64
+#define IXGBE_DEFAULT_TXD              512
+#define IXGBE_DEFAULT_TX_WORK          256
+#define IXGBE_MAX_TXD                  4096
+#define IXGBE_MIN_TXD                  64
+
+#define IXGBE_DEFAULT_RXD              512
+#define IXGBE_DEFAULT_RX_WORK          256
+#define IXGBE_MAX_RXD                  4096
+#define IXGBE_MIN_RXD                  64
 
-#define IXGBE_DEFAULT_RXD                  512
-#define IXGBE_MAX_RXD                     4096
-#define IXGBE_MIN_RXD                       64
 
 /* flow control */
-#define IXGBE_MIN_FCRTL                           0x40
+#define IXGBE_MIN_FCRTL                        0x40
 #define IXGBE_MAX_FCRTL                        0x7FF80
-#define IXGBE_MIN_FCRTH                          0x600
+#define IXGBE_MIN_FCRTH                        0x600
 #define IXGBE_MAX_FCRTH                        0x7FFF0
-#define IXGBE_DEFAULT_FCPAUSE           0xFFFF
-#define IXGBE_MIN_FCPAUSE                    0
-#define IXGBE_MAX_FCPAUSE               0xFFFF
+#define IXGBE_DEFAULT_FCPAUSE          0xFFFF
+#define IXGBE_MIN_FCPAUSE              0
+#define IXGBE_MAX_FCPAUSE              0xFFFF
 
 /* Supported Rx Buffer Sizes */
-#define IXGBE_RXBUFFER_512   512    /* Used for packet split */
-#define IXGBE_RXBUFFER_2K   2048
-#define IXGBE_RXBUFFER_3K   3072
-#define IXGBE_RXBUFFER_4K   4096
-#define IXGBE_RXBUFFER_7K   7168
-#define IXGBE_RXBUFFER_8K   8192
-#define IXGBE_RXBUFFER_15K  15360
-#define IXGBE_MAX_RXBUFFER  16384  /* largest size for a single descriptor */
+#define IXGBE_RXBUFFER_256       256  /* Used for skb receive header */
+#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+#define IXGBE_RXBUFFER_1536    1536
+#define IXGBE_RXBUFFER_2K      2048
+#define IXGBE_RXBUFFER_3K      3072
+#define IXGBE_RXBUFFER_4K      4096
+#define IXGBE_RXBUFFER_7K      7168
+#define IXGBE_RXBUFFER_8K      8192
+#define IXGBE_RXBUFFER_15K     15360
+#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */
+#define IXGBE_MAX_RXBUFFER     16384  /* largest size for single descriptor */
 
 /*
- * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we
- * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
- * this adds up to 512 bytes of extra data meaning the smallest allocation
- * we could have is 1K.
- * i.e. RXBUFFER_512 --> size-1024 slab
+ * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
+ * reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
+ * this adds up to 448 bytes of extra data.
+ *
+ * Since netdev_alloc_skb now allocates a page fragment we can use a value
+ * of 256 and the resultant skb will have a truesize of 960 or less.
  */
-#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512
+#define IXGBE_RX_HDR_SIZE      IXGBE_RXBUFFER_256
 
-#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
+#define MAXIMUM_ETHERNET_VLAN_SIZE     (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
 
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
 #define IXGBE_RX_BUFFER_WRITE  16      /* Must be power of 2 */
 #define IXGBE_TX_FLAGS_FCOE            (u32)(1 << 5)
 #define IXGBE_TX_FLAGS_FSO             (u32)(1 << 6)
 #define IXGBE_TX_FLAGS_TXSW            (u32)(1 << 7)
-#define IXGBE_TX_FLAGS_MAPPED_AS_PAGE  (u32)(1 << 8)
+#define IXGBE_TX_FLAGS_TSTAMP          (u32)(1 << 8)
 #define IXGBE_TX_FLAGS_VLAN_MASK       0xffff0000
 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK  0xe0000000
-#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT  29
+#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
 #define IXGBE_TX_FLAGS_VLAN_SHIFT      16
 
-#define IXGBE_MAX_VF_MC_ENTRIES         30
-#define IXGBE_MAX_VF_FUNCTIONS          64
-#define IXGBE_MAX_VFTA_ENTRIES          128
-#define MAX_EMULATION_MAC_ADDRS         16
-#define IXGBE_MAX_PF_MACVLANS           15
-#define VMDQ_P(p)   ((p) + adapter->num_vfs)
-#define IXGBE_82599_VF_DEVICE_ID        0x10ED
-#define IXGBE_X540_VF_DEVICE_ID         0x1515
+#define IXGBE_MAX_RX_DESC_POLL         10
+
+#define IXGBE_MAX_VF_MC_ENTRIES                30
+#define IXGBE_MAX_VF_FUNCTIONS         64
+#define IXGBE_MAX_VFTA_ENTRIES         128
+#define MAX_EMULATION_MAC_ADDRS                16
+#define IXGBE_MAX_PF_MACVLANS          15
+#define IXGBE_82599_VF_DEVICE_ID       0x10ED
+#define IXGBE_X540_VF_DEVICE_ID                0x1515
+
+#ifdef CONFIG_PCI_IOV
+#define VMDQ_P(p)      ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
+#else
+#define VMDQ_P(p)      (p)
+#endif
+
+#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter)    \
+       {                                                       \
+               u32 current_counter = IXGBE_READ_REG(hw, reg);  \
+               if (current_counter < last_counter)             \
+                       counter += 0x100000000LL;               \
+               last_counter = current_counter;                 \
+               counter &= 0xFFFFFFFF00000000LL;                \
+               counter |= current_counter;                     \
+       }
+
+#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
+       {                                                                \
+               u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb);   \
+               u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb);   \
+               u64 current_counter = (current_counter_msb << 32) |      \
+                       current_counter_lsb;                             \
+               if (current_counter < last_counter)                      \
+                       counter += 0x1000000000LL;                       \
+               last_counter = current_counter;                          \
+               counter &= 0xFFFFFFF000000000LL;                         \
+               counter |= current_counter;                              \
+       }
+
+struct vf_stats {
+       u64 gprc;
+       u64 gorc;
+       u64 gptc;
+       u64 gotc;
+       u64 mprc;
+};
 
 struct vf_data_storage {
        unsigned char vf_mac_addresses[ETH_ALEN];
@@ -124,28 +188,65 @@ struct vf_data_storage {
        u16 default_vf_vlan_id;
        u16 vlans_enabled;
        bool clear_to_send;
+       struct vf_stats vfstats;
+       struct vf_stats last_vfstats;
+       struct vf_stats saved_rst_vfstats;
        bool pf_set_mac;
        u16 pf_vlan; /* When set, guest VLAN config not allowed. */
        u16 pf_qos;
        u16 tx_rate;
-       struct pci_dev *vfdev;
+       u16 vlan_count;
+       u8 spoofchk_enabled;
 };
 
 struct vf_macvlans {
        struct list_head l;
        int vf;
-       int rar_entry;
        bool free;
        bool is_macvlan;
        u8 vf_macvlan[ETH_ALEN];
 };
 
+#ifndef IXGBE_NO_LRO
+#define IXGBE_LRO_MAX          32      /*Maximum number of LRO descriptors*/
+#define IXGBE_LRO_GLOBAL       10
+
+struct ixgbe_lro_stats {
+       u32 flushed;
+       u32 coal;
+};
+
+/*
+ * ixgbe_lro_header - header format to be aggregated by LRO
+ * @iph: IP header without options
+ * @tcp: TCP header
+ * @ts:  Optional TCP timestamp data in TCP options
+ *
+ * This structure relies on the check above that verifies that the header
+ * is IPv4 and does not contain any options.
+ */
+struct ixgbe_lrohdr {
+       struct iphdr iph;
+       struct tcphdr th;
+       __be32 ts[0];
+};
+
+struct ixgbe_lro_list {
+       struct sk_buff_head active;
+       struct ixgbe_lro_stats stats;
+};
+
+#endif /* IXGBE_NO_LRO */
 #define IXGBE_MAX_TXD_PWR      14
 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
 
 /* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
-#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+#define TXD_USE_COUNT(S)       DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
+#ifdef MAX_SKB_FRAGS
+#define DESC_NEEDED    ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+#else
+#define DESC_NEEDED    4
+#endif
 
 /* wrapper around a pointer to a socket buffer,
  * so a DMA handle can be stored along with the buffer */
@@ -155,17 +256,19 @@ struct ixgbe_tx_buffer {
        struct sk_buff *skb;
        unsigned int bytecount;
        unsigned short gso_segs;
-       dma_addr_t dma;
-       unsigned int length;
+       __be16 protocol;
+       DEFINE_DMA_UNMAP_ADDR(dma);
+       DEFINE_DMA_UNMAP_LEN(len);
        u32 tx_flags;
 };
 
 struct ixgbe_rx_buffer {
        struct sk_buff *skb;
        dma_addr_t dma;
+#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
        struct page *page;
-       dma_addr_t page_dma;
        unsigned int page_offset;
+#endif
 };
 
 struct ixgbe_queue_stats {
@@ -188,47 +291,56 @@ struct ixgbe_rx_queue_stats {
        u64 csum_err;
 };
 
-enum ixbge_ring_state_t {
+enum ixgbe_ring_state_t {
        __IXGBE_TX_FDIR_INIT_DONE,
        __IXGBE_TX_DETECT_HANG,
        __IXGBE_HANG_CHECK_ARMED,
-       __IXGBE_RX_PS_ENABLED,
        __IXGBE_RX_RSC_ENABLED,
+#ifndef HAVE_NDO_SET_FEATURES
+       __IXGBE_RX_CSUM_ENABLED,
+#endif
        __IXGBE_RX_CSUM_UDP_ZERO_ERR,
+#ifdef IXGBE_FCOE
+       __IXGBE_RX_FCOE,
+#endif
 };
 
-#define ring_is_ps_enabled(ring) \
-       test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
-#define set_ring_ps_enabled(ring) \
-       set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
-#define clear_ring_ps_enabled(ring) \
-       clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
 #define check_for_tx_hang(ring) \
        test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
 #define set_check_for_tx_hang(ring) \
        set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
 #define clear_check_for_tx_hang(ring) \
        clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+#ifndef IXGBE_NO_HW_RSC
 #define ring_is_rsc_enabled(ring) \
        test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
+#else
+#define ring_is_rsc_enabled(ring)      false
+#endif
 #define set_ring_rsc_enabled(ring) \
        set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
 #define clear_ring_rsc_enabled(ring) \
        clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
+#define netdev_ring(ring) (ring->netdev)
+#define ring_queue_index(ring) (ring->queue_index)
+
+
 struct ixgbe_ring {
        struct ixgbe_ring *next;        /* pointer to next ring in q_vector */
+       struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
+       struct net_device *netdev;      /* netdev ring belongs to */
+       struct device *dev;             /* device for DMA mapping */
        void *desc;                     /* descriptor ring memory */
-       struct device *dev;             /* device for DMA mapping */
-       struct net_device *netdev;      /* netdev ring belongs to */
        union {
                struct ixgbe_tx_buffer *tx_buffer_info;
                struct ixgbe_rx_buffer *rx_buffer_info;
        };
        unsigned long state;
        u8 __iomem *tail;
+       dma_addr_t dma;                 /* phys. address of descriptor ring */
+       unsigned int size;              /* length in bytes */
 
        u16 count;                      /* amount of descriptors */
-       u16 rx_buf_len;
 
        u8 queue_index; /* needed for multiqueue queue management */
        u8 reg_idx;                     /* holds the special value that gets
@@ -236,22 +348,27 @@ struct ixgbe_ring {
                                         * associated with this ring, which is
                                         * different for DCB and RSS modes
                                         */
-       u8 atr_sample_rate;
-       u8 atr_count;
-
        u16 next_to_use;
        u16 next_to_clean;
 
+       union {
+#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+               u16 rx_buf_len;
+#else
+               u16 next_to_alloc;
+#endif
+               struct {
+                       u8 atr_sample_rate;
+                       u8 atr_count;
+               };
+       };
+
        u8 dcb_tc;
        struct ixgbe_queue_stats stats;
-       struct u64_stats_sync syncp;
        union {
                struct ixgbe_tx_queue_stats tx_stats;
                struct ixgbe_rx_queue_stats rx_stats;
        };
-       unsigned int size;              /* length in bytes */
-       dma_addr_t dma;                 /* phys. address of descriptor ring */
-       struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */
 } ____cacheline_internodealigned_in_smp;
 
 enum ixgbe_ring_f_enum {
@@ -262,26 +379,50 @@ enum ixgbe_ring_f_enum {
 #ifdef IXGBE_FCOE
        RING_F_FCOE,
 #endif /* IXGBE_FCOE */
-
-       RING_F_ARRAY_SIZE      /* must be last in enum set */
+       RING_F_ARRAY_SIZE  /* must be last in enum set */
 };
 
-#define IXGBE_MAX_RSS_INDICES  16
-#define IXGBE_MAX_VMDQ_INDICES 64
-#define IXGBE_MAX_FDIR_INDICES 64
+#define IXGBE_MAX_DCB_INDICES  8
+#define IXGBE_MAX_RSS_INDICES  16
+#define IXGBE_MAX_VMDQ_INDICES 64
+#define IXGBE_MAX_FDIR_INDICES 64
 #ifdef IXGBE_FCOE
-#define IXGBE_MAX_FCOE_INDICES  8
-#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
-#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
+#define IXGBE_MAX_FCOE_INDICES 8
+#define MAX_RX_QUEUES  (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
+#define MAX_TX_QUEUES  (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
 #else
-#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
-#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
+#define MAX_RX_QUEUES  IXGBE_MAX_FDIR_INDICES
+#define MAX_TX_QUEUES  IXGBE_MAX_FDIR_INDICES
 #endif /* IXGBE_FCOE */
 struct ixgbe_ring_feature {
-       int indices;
-       int mask;
-} ____cacheline_internodealigned_in_smp;
+       u16 limit;      /* upper limit on feature indices */
+       u16 indices;    /* current value of indices */
+       u16 mask;       /* Mask used for feature to ring mapping */
+       u16 offset;     /* offset to start of feature */
+};
 
+#define IXGBE_82599_VMDQ_8Q_MASK 0x78
+#define IXGBE_82599_VMDQ_4Q_MASK 0x7C
+#define IXGBE_82599_VMDQ_2Q_MASK 0x7E
+
+#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+/*
+ * FCoE requires that all Rx buffers be over 2200 bytes in length.  Since
+ * this is twice the size of a half page we need to double the page order
+ * for FCoE enabled Rx queues.
+ */
+#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192)
+static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
+{
+       return test_bit(__IXGBE_RX_FCOE, &ring->state) ? 1 : 0;
+}
+#else
+#define ixgbe_rx_pg_order(_ring) 0
+#endif
+#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
+#define ixgbe_rx_bufsz(_ring) ((PAGE_SIZE / 2) << ixgbe_rx_pg_order(_ring))
+
+#endif
 struct ixgbe_ring_container {
        struct ixgbe_ring *ring;        /* pointer to linked list of rings */
        unsigned int total_bytes;       /* total bytes processed this int */
@@ -295,28 +436,33 @@ struct ixgbe_ring_container {
 #define ixgbe_for_each_ring(pos, head) \
        for (pos = (head).ring; pos != NULL; pos = pos->next)
 
-#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
-                              ? 8 : 1)
-#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
+#define MAX_RX_PACKET_BUFFERS  ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
+                                ? 8 : 1)
+#define MAX_TX_PACKET_BUFFERS  MAX_RX_PACKET_BUFFERS
 
 /* MAX_MSIX_Q_VECTORS of these are allocated,
  * but we only use one per queue-specific vector.
  */
 struct ixgbe_q_vector {
        struct ixgbe_adapter *adapter;
-#ifdef CONFIG_IXGBE_DCA
-       int cpu;            /* CPU for DCA */
-#endif
-       u16 v_idx;              /* index of q_vector within array, also used for
-                                * finding the bit in EICR and friends that
-                                * represents the vector for this ring */
-       u16 itr;                /* Interrupt throttle rate written to EITR */
+       int cpu;        /* CPU for DCA */
+       u16 v_idx;      /* index of q_vector within array, also used for
+                        * finding the bit in EICR and friends that
+                        * represents the vector for this ring */
+       u16 itr;        /* Interrupt throttle rate written to EITR */
        struct ixgbe_ring_container rx, tx;
 
        struct napi_struct napi;
+#ifndef HAVE_NETDEV_NAPI_LIST
+       struct net_device poll_dev;
+#endif
+#ifdef HAVE_IRQ_AFFINITY_HINT
        cpumask_t affinity_mask;
+#endif
+#ifndef IXGBE_NO_LRO
+       struct ixgbe_lro_list lrolist;   /* LRO list for queue vector*/
+#endif
        int numa_node;
-       struct rcu_head rcu;    /* to avoid race with update stats on free */
        char name[IFNAMSIZ + 9];
 
        /* for dynamic allocation of rings associated with this q_vector */
@@ -330,6 +476,7 @@ struct ixgbe_q_vector {
 #define IXGBE_MIN_RSC_ITR      24
 #define IXGBE_100K_ITR         40
 #define IXGBE_20K_ITR          200
+#define IXGBE_16K_ITR          248
 #define IXGBE_10K_ITR          400
 #define IXGBE_8K_ITR           500
 
@@ -340,6 +487,7 @@ static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
        return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
 }
 
+/* ixgbe_desc_unused - calculate if we have unused descriptors */
 static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
 {
        u16 ntc = ring->next_to_clean;
@@ -348,39 +496,67 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
        return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
 }
 
-#define IXGBE_RX_DESC(R, i)        \
+#define IXGBE_RX_DESC(R, i)    \
        (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
-#define IXGBE_TX_DESC(R, i)        \
+#define IXGBE_TX_DESC(R, i)    \
        (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
-#define IXGBE_TX_CTXTDESC(R, i)            \
+#define IXGBE_TX_CTXTDESC(R, i)        \
        (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
 
-#define IXGBE_MAX_JUMBO_FRAME_SIZE        16128
+#define IXGBE_MAX_JUMBO_FRAME_SIZE     16128
 #ifdef IXGBE_FCOE
-/* Use 3K as the baby jumbo frame size for FCoE */
-#define IXGBE_FCOE_JUMBO_FRAME_SIZE       3072
+/* use 3K as the baby jumbo frame size for FCoE */
+#define IXGBE_FCOE_JUMBO_FRAME_SIZE    3072
 #endif /* IXGBE_FCOE */
 
-#define OTHER_VECTOR 1
-#define NON_Q_VECTORS (OTHER_VECTOR)
+#define TCP_TIMER_VECTOR       0
+#define OTHER_VECTOR   1
+#define NON_Q_VECTORS  (OTHER_VECTOR + TCP_TIMER_VECTOR)
 
-#define MAX_MSIX_VECTORS_82599 64
-#define MAX_MSIX_Q_VECTORS_82599 64
-#define MAX_MSIX_VECTORS_82598 18
-#define MAX_MSIX_Q_VECTORS_82598 16
+#define IXGBE_MAX_MSIX_Q_VECTORS_82599 64
+#define IXGBE_MAX_MSIX_Q_VECTORS_82598 16
 
-#define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82599
-#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
+struct ixgbe_mac_addr {
+       u8 addr[ETH_ALEN];
+       u16 queue;
+       u16 state; /* bitmask */
+};
+#define IXGBE_MAC_STATE_DEFAULT                0x1
+#define IXGBE_MAC_STATE_MODIFIED       0x2
+#define IXGBE_MAC_STATE_IN_USE         0x4
+
+#ifdef IXGBE_PROCFS
+struct ixgbe_therm_proc_data {
+       struct ixgbe_hw *hw;
+       struct ixgbe_thermal_diode_data *sensor_data;
+};
 
-#define MIN_MSIX_Q_VECTORS 1
-#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
+#endif /* IXGBE_PROCFS */
+
+/*
+ * Only for array allocations in our adapter struct.  On 82598, there will be
+ * unused entries in the array, but that's not a big deal.  Also, in 82599,
+ * we can actually assign 64 queue vectors based on our extended-extended
+ * interrupt registers.  This is different than 82598, which is limited to 16.
+ */
+#define MAX_MSIX_Q_VECTORS     IXGBE_MAX_MSIX_Q_VECTORS_82599
+#define MAX_MSIX_COUNT         IXGBE_MAX_MSIX_VECTORS_82599
+
+#define MIN_MSIX_Q_VECTORS     1
+#define MIN_MSIX_COUNT         (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
 
 /* default to trying for four seconds */
-#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
+#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
 
 /* board specific private data structure */
 struct ixgbe_adapter {
+#ifdef NETIF_F_HW_VLAN_TX
+#ifdef HAVE_VLAN_RX_REGISTER
+       struct vlan_group *vlgrp; /* must be first, see ixgbe_receive_skb */
+#else
        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+#endif
+#endif /* NETIF_F_HW_VLAN_TX */
        /* OS defined structs */
        struct net_device *netdev;
        struct pci_dev *pdev;
@@ -391,44 +567,59 @@ struct ixgbe_adapter {
         * thus the additional *_CAPABLE flags.
         */
        u32 flags;
-#define IXGBE_FLAG_MSI_CAPABLE                  (u32)(1 << 1)
-#define IXGBE_FLAG_MSI_ENABLED                  (u32)(1 << 2)
-#define IXGBE_FLAG_MSIX_CAPABLE                 (u32)(1 << 3)
-#define IXGBE_FLAG_MSIX_ENABLED                 (u32)(1 << 4)
-#define IXGBE_FLAG_RX_1BUF_CAPABLE              (u32)(1 << 6)
-#define IXGBE_FLAG_RX_PS_CAPABLE                (u32)(1 << 7)
-#define IXGBE_FLAG_RX_PS_ENABLED                (u32)(1 << 8)
-#define IXGBE_FLAG_IN_NETPOLL                   (u32)(1 << 9)
-#define IXGBE_FLAG_DCA_ENABLED                  (u32)(1 << 10)
-#define IXGBE_FLAG_DCA_CAPABLE                  (u32)(1 << 11)
-#define IXGBE_FLAG_IMIR_ENABLED                 (u32)(1 << 12)
-#define IXGBE_FLAG_MQ_CAPABLE                   (u32)(1 << 13)
-#define IXGBE_FLAG_DCB_ENABLED                  (u32)(1 << 14)
-#define IXGBE_FLAG_RSS_ENABLED                  (u32)(1 << 16)
-#define IXGBE_FLAG_RSS_CAPABLE                  (u32)(1 << 17)
-#define IXGBE_FLAG_VMDQ_CAPABLE                 (u32)(1 << 18)
-#define IXGBE_FLAG_VMDQ_ENABLED                 (u32)(1 << 19)
-#define IXGBE_FLAG_FAN_FAIL_CAPABLE             (u32)(1 << 20)
-#define IXGBE_FLAG_NEED_LINK_UPDATE             (u32)(1 << 22)
-#define IXGBE_FLAG_NEED_LINK_CONFIG             (u32)(1 << 23)
-#define IXGBE_FLAG_FDIR_HASH_CAPABLE            (u32)(1 << 24)
-#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE         (u32)(1 << 25)
-#define IXGBE_FLAG_FCOE_CAPABLE                 (u32)(1 << 26)
-#define IXGBE_FLAG_FCOE_ENABLED                 (u32)(1 << 27)
-#define IXGBE_FLAG_SRIOV_CAPABLE                (u32)(1 << 28)
-#define IXGBE_FLAG_SRIOV_ENABLED                (u32)(1 << 29)
+#define IXGBE_FLAG_MSI_CAPABLE                 (u32)(1 << 0)
+#define IXGBE_FLAG_MSI_ENABLED                 (u32)(1 << 1)
+#define IXGBE_FLAG_MSIX_CAPABLE                        (u32)(1 << 2)
+#define IXGBE_FLAG_MSIX_ENABLED                        (u32)(1 << 3)
+#ifndef IXGBE_NO_LLI
+#define IXGBE_FLAG_LLI_PUSH                    (u32)(1 << 4)
+#endif
+#define IXGBE_FLAG_IN_NETPOLL                   (u32)(1 << 5)
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+#define IXGBE_FLAG_DCA_ENABLED                 (u32)(1 << 6)
+#define IXGBE_FLAG_DCA_CAPABLE                 (u32)(1 << 7)
+#define IXGBE_FLAG_DCA_ENABLED_DATA            (u32)(1 << 8)
+#else
+#define IXGBE_FLAG_DCA_ENABLED                 (u32)0
+#define IXGBE_FLAG_DCA_CAPABLE                 (u32)0
+#define IXGBE_FLAG_DCA_ENABLED_DATA             (u32)0
+#endif
+#define IXGBE_FLAG_MQ_CAPABLE                  (u32)(1 << 9)
+#define IXGBE_FLAG_DCB_ENABLED                 (u32)(1 << 10)
+#define IXGBE_FLAG_VMDQ_ENABLED                        (u32)(1 << 11)
+#define IXGBE_FLAG_FAN_FAIL_CAPABLE            (u32)(1 << 12)
+#define IXGBE_FLAG_NEED_LINK_UPDATE            (u32)(1 << 13)
+#define IXGBE_FLAG_NEED_LINK_CONFIG            (u32)(1 << 14)
+#define IXGBE_FLAG_FDIR_HASH_CAPABLE           (u32)(1 << 15)
+#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE                (u32)(1 << 16)
+#ifdef IXGBE_FCOE
+#define IXGBE_FLAG_FCOE_CAPABLE                        (u32)(1 << 17)
+#define IXGBE_FLAG_FCOE_ENABLED                        (u32)(1 << 18)
+#endif /* IXGBE_FCOE */
+#define IXGBE_FLAG_SRIOV_CAPABLE               (u32)(1 << 19)
+#define IXGBE_FLAG_SRIOV_ENABLED               (u32)(1 << 20)
+#define IXGBE_FLAG_SRIOV_REPLICATION_ENABLE    (u32)(1 << 21)
+#define IXGBE_FLAG_SRIOV_L2SWITCH_ENABLE       (u32)(1 << 22)
+#define IXGBE_FLAG_SRIOV_L2LOOPBACK_ENABLE     (u32)(1 << 23)
 
        u32 flags2;
-#define IXGBE_FLAG2_RSC_CAPABLE                 (u32)(1)
-#define IXGBE_FLAG2_RSC_ENABLED                 (u32)(1 << 1)
-#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE         (u32)(1 << 2)
-#define IXGBE_FLAG2_TEMP_SENSOR_EVENT           (u32)(1 << 3)
-#define IXGBE_FLAG2_SEARCH_FOR_SFP              (u32)(1 << 4)
-#define IXGBE_FLAG2_SFP_NEEDS_RESET             (u32)(1 << 5)
-#define IXGBE_FLAG2_RESET_REQUESTED             (u32)(1 << 6)
-#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT        (u32)(1 << 7)
-#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP         (u32)(1 << 8)
-#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP         (u32)(1 << 9)
+#ifndef IXGBE_NO_HW_RSC
+#define IXGBE_FLAG2_RSC_CAPABLE                        (u32)(1 << 0)
+#define IXGBE_FLAG2_RSC_ENABLED                        (u32)(1 << 1)
+#else
+#define IXGBE_FLAG2_RSC_CAPABLE                        0
+#define IXGBE_FLAG2_RSC_ENABLED                        0
+#endif
+#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE                (u32)(1 << 3)
+#define IXGBE_FLAG2_TEMP_SENSOR_EVENT          (u32)(1 << 4)
+#define IXGBE_FLAG2_SEARCH_FOR_SFP             (u32)(1 << 5)
+#define IXGBE_FLAG2_SFP_NEEDS_RESET            (u32)(1 << 6)
+#define IXGBE_FLAG2_RESET_REQUESTED            (u32)(1 << 7)
+#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT       (u32)(1 << 8)
+#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP         (u32)(1 << 9)
+#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP         (u32)(1 << 10)
+#define IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED      (u32)(1 << 11)
+#define IXGBE_FLAG2_PTP_PPS_ENABLED            (u32)(1 << 12)
 
        /* Tx fast path data */
        int num_tx_queues;
@@ -438,6 +629,7 @@ struct ixgbe_adapter {
        /* Rx fast path data */
        int num_rx_queues;
        u16 rx_itr_setting;
+       u16 rx_work_limit;
 
        /* TX */
        struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
@@ -460,29 +652,49 @@ struct ixgbe_adapter {
 
        struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
 
-       /* DCB parameters */
+#ifdef HAVE_DCBNL_IEEE
        struct ieee_pfc *ixgbe_ieee_pfc;
        struct ieee_ets *ixgbe_ieee_ets;
+#endif
        struct ixgbe_dcb_config dcb_cfg;
        struct ixgbe_dcb_config temp_dcb_cfg;
        u8 dcb_set_bitmap;
        u8 dcbx_cap;
+#ifndef HAVE_MQPRIO
+       u8 tc;
+#endif
        enum ixgbe_fc_mode last_lfc_mode;
 
-       int num_msix_vectors;
-       int max_msix_q_vectors;         /* true count of q_vectors for device */
+       int num_q_vectors;      /* current number of q_vectors for device */
+       int max_q_vectors;      /* upper limit of q_vectors for device */
        struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
        struct msix_entry *msix_entries;
 
+#ifndef HAVE_NETDEV_STATS_IN_NETDEV
+       struct net_device_stats net_stats;
+#endif
+#ifndef IXGBE_NO_LRO
+       struct ixgbe_lro_stats lro_stats;
+#endif
+
+#ifdef ETHTOOL_TEST
        u32 test_icr;
        struct ixgbe_ring test_tx_ring;
        struct ixgbe_ring test_rx_ring;
+#endif
 
        /* structs defined in ixgbe_hw.h */
        struct ixgbe_hw hw;
        u16 msg_enable;
        struct ixgbe_hw_stats stats;
-
+#ifndef IXGBE_NO_LLI
+       u32 lli_port;
+       u32 lli_size;
+       u32 lli_etype;
+       u32 lli_vlan_pri;
+#endif /* IXGBE_NO_LLI */
+
+       u32 *config_space;
        u64 tx_busy;
        unsigned int tx_ring_count;
        unsigned int rx_ring_count;
@@ -509,28 +721,40 @@ struct ixgbe_adapter {
 
        u16 bd_number;
 
-       u16 eeprom_verh;
-       u16 eeprom_verl;
+       char eeprom_id[32];
        u16 eeprom_cap;
-
+       bool netdev_registered;
        u32 interrupt_event;
+#ifdef HAVE_ETHTOOL_SET_PHYS_ID
        u32 led_reg;
+#endif
 
-       /* SR-IOV */
        DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
        unsigned int num_vfs;
        struct vf_data_storage *vfinfo;
        int vf_rate_link_speed;
        struct vf_macvlans vf_mvs;
        struct vf_macvlans *mv_list;
-       bool antispoofing_enabled;
-
+#ifdef CONFIG_PCI_IOV
        u32 timer_event_accumulator;
        u32 vferr_refcount;
+#endif
+       struct ixgbe_mac_addr *mac_table;
+#ifdef IXGBE_SYSFS
+       struct kobject *info_kobj;
+       struct kobject *therm_kobj[IXGBE_MAX_SENSORS];
+#else /* IXGBE_SYSFS */
+#ifdef IXGBE_PROCFS
+       struct proc_dir_entry *eth_dir;
+       struct proc_dir_entry *info_dir;
+       struct proc_dir_entry *therm_dir[IXGBE_MAX_SENSORS];
+       struct ixgbe_therm_proc_data therm_data[IXGBE_MAX_SENSORS];
+#endif /* IXGBE_PROCFS */
+#endif /* IXGBE_SYSFS */
 };
 
 struct ixgbe_fdir_filter {
-       struct hlist_node fdir_node;
+       struct  hlist_node fdir_node;
        union ixgbe_atr_input filter;
        u16 sw_idx;
        u16 action;
@@ -545,32 +769,54 @@ enum ixgbe_state_t {
 };
 
 struct ixgbe_cb {
+#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
        union {                         /* Union defining head/tail partner */
                struct sk_buff *head;
                struct sk_buff *tail;
        };
+#endif
        dma_addr_t dma;
-       u16 append_cnt;
-       bool delay_unmap;
+#ifndef IXGBE_NO_LRO
+       __be32  tsecr;                  /* timestamp echo response */
+       u32     tsval;                  /* timestamp value in host order */
+       u32     next_seq;               /* next expected sequence number */
+       u16     free;                   /* 65521 minus total size */
+       u16     mss;                    /* size of data portion of packet */
+#endif /* IXGBE_NO_LRO */
+#ifdef HAVE_VLAN_RX_REGISTER
+       u16     vid;                    /* VLAN tag */
+#endif
+       u16     append_cnt;             /* number of skb's appended */
+#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+       bool    page_released;
+#endif
 };
 #define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb)
 
-enum ixgbe_boards {
-       board_82598,
-       board_82599,
-       board_X540,
-};
+/* ESX ixgbe CIM IOCTL definition */
 
-extern struct ixgbe_info ixgbe_82598_info;
-extern struct ixgbe_info ixgbe_82599_info;
-extern struct ixgbe_info ixgbe_X540_info;
-#ifdef CONFIG_IXGBE_DCB
-extern const struct dcbnl_rtnl_ops dcbnl_ops;
-extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
-                              struct ixgbe_dcb_config *dst_dcb_cfg,
-                              int tc_max);
-#endif
+#ifdef IXGBE_SYSFS
+void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
+int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
+#endif /* IXGBE_SYSFS */
+#ifdef IXGBE_PROCFS
+void ixgbe_procfs_exit(struct ixgbe_adapter *adapter);
+int ixgbe_procfs_init(struct ixgbe_adapter *adapter);
+int ixgbe_procfs_topdir_init(void);
+void ixgbe_procfs_topdir_exit(void);
+#endif /* IXGBE_PROCFS */
+
+extern struct dcbnl_rtnl_ops dcbnl_ops;
+extern int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max);
+
+extern u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index);
+
+/* needed by ixgbe_main.c */
+extern int ixgbe_validate_mac_addr(u8 *mc_addr);
+extern void ixgbe_check_options(struct ixgbe_adapter *adapter);
+extern void ixgbe_assign_netdev_ops(struct net_device *netdev);
 
+/* needed by ixgbe_ethtool.c */
 extern char ixgbe_driver_name[];
 extern const char ixgbe_driver_version[];
 
@@ -583,63 +829,98 @@ extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
 extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
 extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
 extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
-extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
-extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
-extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
-                                  struct ixgbe_ring *);
+extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,
+                                   struct ixgbe_ring *);
+extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,
+                                   struct ixgbe_ring *);
 extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
 extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
 extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
+extern bool ixgbe_is_ixgbe(struct pci_dev *pcidev);
 extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
                                         struct ixgbe_adapter *,
                                         struct ixgbe_ring *);
 extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
-                                             struct ixgbe_tx_buffer *);
+                                            struct ixgbe_tx_buffer *);
 extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
-extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
-extern int ethtool_ioctl(struct ifreq *ifr);
-extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
-extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
-                                                union ixgbe_atr_hash_dword input,
-                                                union ixgbe_atr_hash_dword common,
-                                                 u8 queue);
-extern s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
-                                          union ixgbe_atr_input *input_mask);
-extern s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
-                                                union ixgbe_atr_input *input,
-                                                u16 soft_id, u8 queue);
-extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
-                                                union ixgbe_atr_input *input,
-                                                u16 soft_id);
-extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
-                                                union ixgbe_atr_input *mask);
+extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
+                                  struct ixgbe_ring *);
+extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
+                              struct ixgbe_ring *);
 extern void ixgbe_set_rx_mode(struct net_device *netdev);
+extern int ixgbe_write_mc_addr_list(struct net_device *netdev);
 extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
 extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
 extern void ixgbe_do_reset(struct net_device *netdev);
+extern void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector);
+extern int ixgbe_poll(struct napi_struct *napi, int budget);
+extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
+                                  struct ixgbe_ring *);
+extern void ixgbe_vlan_stripping_enable(struct ixgbe_adapter *adapter);
+extern void ixgbe_vlan_stripping_disable(struct ixgbe_adapter *adapter);
+#ifdef ETHTOOL_OPS_COMPAT
+extern int ethtool_ioctl(struct ifreq *ifr);
+#endif
+
 #ifdef IXGBE_FCOE
 extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
 extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
                     struct ixgbe_tx_buffer *first,
-                     u32 tx_flags, u8 *hdr_len);
-extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
+                    u8 *hdr_len);
 extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
                          union ixgbe_adv_rx_desc *rx_desc,
                          struct sk_buff *skb);
 extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
-                              struct scatterlist *sgl, unsigned int sgc);
+                             struct scatterlist *sgl, unsigned int sgc);
+#ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET
 extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
                                 struct scatterlist *sgl, unsigned int sgc);
+#endif /* HAVE_NETDEV_OPS_FCOE_DDP_TARGET */
 extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
+extern int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
+extern void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
+#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE
 extern int ixgbe_fcoe_enable(struct net_device *netdev);
 extern int ixgbe_fcoe_disable(struct net_device *netdev);
-#ifdef CONFIG_IXGBE_DCB
-extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
+#else
+int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter);
+void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter);
+#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */
+#ifdef CONFIG_DCB
+#ifdef HAVE_DCBNL_OPS_GETAPP
+extern u8 ixgbe_fcoe_getapp(struct net_device *netdev);
+#endif /* HAVE_DCBNL_OPS_GETAPP */
 extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
-#endif /* CONFIG_IXGBE_DCB */
+#endif /* CONFIG_DCB */
+extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
+#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN
 extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
+#endif
 #endif /* IXGBE_FCOE */
 
+#ifdef CONFIG_DCB
+#ifdef HAVE_DCBNL_IEEE
+s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame);
+#endif /* HAVE_DCBNL_IEEE */
+#endif /* CONFIG_DCB */
+
+extern int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
+                              u16 subdevice_id);
+extern void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring);
+extern int ixgbe_get_settings(struct net_device *netdev,
+                             struct ethtool_cmd *ecmd);
+extern int ixgbe_write_uc_addr_list(struct ixgbe_adapter *adapter,
+                                   struct net_device *netdev, int vfn);
+extern void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
+extern int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
+                               u8 *addr, u16 queue);
+extern int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
+                               u8 *addr, u16 queue);
+extern int ixgbe_available_rars(struct ixgbe_adapter *adapter);
+#ifndef HAVE_VLAN_RX_REGISTER
+extern void ixgbe_vlan_mode(struct net_device *, u32);
+#endif
+
+
+extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
 #endif /* _IXGBE_H_ */
index 271fc28fb064f974c436c246df0dbeb1334d3613..24015844d5296a76fe1d47470a77fef2bfbc5fa9 100644 (file)
 
 *******************************************************************************/
 
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-
-#include "ixgbe.h"
+#include "ixgbe_type.h"
+#include "ixgbe_82598.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
 #include "ixgbe_phy.h"
 
-#define IXGBE_82598_MAX_TX_QUEUES 32
-#define IXGBE_82598_MAX_RX_QUEUES 64
-#define IXGBE_82598_RAR_ENTRIES   16
-#define IXGBE_82598_MC_TBL_SIZE  128
-#define IXGBE_82598_VFT_TBL_SIZE 128
-#define IXGBE_82598_RX_PB_SIZE  512
-
+static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+                                            ixgbe_link_speed *speed,
+                                            bool *autoneg);
+static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
+static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
+                                     bool autoneg_wait_to_complete);
+static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
+                                     ixgbe_link_speed *speed, bool *link_up,
+                                     bool link_up_wait_to_complete);
+static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
+                                     ixgbe_link_speed speed,
+                                     bool autoneg,
+                                     bool autoneg_wait_to_complete);
 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
-                                         ixgbe_link_speed speed,
-                                         bool autoneg,
-                                         bool autoneg_wait_to_complete);
-static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
-                                       u8 *eeprom_data);
+                                        ixgbe_link_speed speed,
+                                        bool autoneg,
+                                        bool autoneg_wait_to_complete);
+static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
+static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
+static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
+                                 u32 headroom, int strategy);
 
 /**
  *  ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
@@ -56,9 +64,8 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
  *  increase the value to either 10ms to 250ms for capability version 1 config,
  *  or 16ms to 55ms for version 2.
  **/
-static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
+void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
 {
-       struct ixgbe_adapter *adapter = hw->back;
        u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
        u16 pcie_devctl2;
 
@@ -80,11 +87,9 @@ static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
         * directly in order to set the completion timeout value for
         * 16ms to 55ms
         */
-       pci_read_config_word(adapter->pdev,
-                            IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2);
+       pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
        pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
-       pci_write_config_word(adapter->pdev,
-                             IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
+       IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
 out:
        /* disable completion timeout resend */
        gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
@@ -92,43 +97,66 @@ out:
 }
 
 /**
- *  ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
+ *  ixgbe_init_ops_82598 - Inits func ptrs and MAC type
  *  @hw: pointer to hardware structure
  *
- *  Read PCIe configuration space, and get the MSI-X vector count from
- *  the capabilities table.
+ *  Initialize the function pointers and assign the MAC type for 82598.
+ *  Does not touch the hardware.
  **/
-static u16 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
-{
-       struct ixgbe_adapter *adapter = hw->back;
-       u16 msix_count;
-       pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82598_CAPS,
-                            &msix_count);
-       msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
-
-       /* MSI-X count is zero-based in HW, so increment to give proper value */
-       msix_count++;
-
-       return msix_count;
-}
-
-/**
- */
-static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
+s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
 {
        struct ixgbe_mac_info *mac = &hw->mac;
+       struct ixgbe_phy_info *phy = &hw->phy;
+       s32 ret_val;
+
+       ret_val = ixgbe_init_phy_ops_generic(hw);
+       ret_val = ixgbe_init_ops_generic(hw);
+
+       /* PHY */
+       phy->ops.init = &ixgbe_init_phy_ops_82598;
+
+       /* MAC */
+       mac->ops.start_hw = &ixgbe_start_hw_82598;
+       mac->ops.reset_hw = &ixgbe_reset_hw_82598;
+       mac->ops.get_media_type = &ixgbe_get_media_type_82598;
+       mac->ops.get_supported_physical_layer =
+                               &ixgbe_get_supported_physical_layer_82598;
+       mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
+       mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
+       mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
+
+       /* RAR, Multicast, VLAN */
+       mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
+       mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
+       mac->ops.set_vfta = &ixgbe_set_vfta_82598;
+       mac->ops.set_vlvf = NULL;
+       mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
+
+       /* Flow Control */
+       mac->ops.fc_enable = &ixgbe_fc_enable_82598;
+
+       mac->mcft_size          = 128;
+       mac->vft_size           = 128;
+       mac->num_rar_entries    = 16;
+       mac->rx_pb_size         = 512;
+       mac->max_tx_queues      = 32;
+       mac->max_rx_queues      = 64;
+       mac->max_msix_vectors   = ixgbe_get_pcie_msix_count_generic(hw);
+
+       /* SFP+ Module */
+       phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
+
+       /* Link */
+       mac->ops.check_link = &ixgbe_check_mac_link_82598;
+       mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
+       mac->ops.flap_tx_laser = NULL;
+       mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598;
+       mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
+
+       /* Manageability interface */
+       mac->ops.set_fw_drv_ver = NULL;
 
-       /* Call PHY identify routine to get the phy type */
-       ixgbe_identify_phy_generic(hw);
-
-       mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
-       mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
-       mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
-       mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
-       mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
-       mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
-
-       return 0;
+       return ret_val;
 }
 
 /**
@@ -136,11 +164,11 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
  *  @hw: pointer to hardware structure
  *
  *  Initialize any function pointers that were not able to be
- *  set during get_invariants because the PHY/SFP type was
+ *  set during init_shared_code because the PHY/SFP type was
  *  not known.  Perform the SFP init if necessary.
  *
  **/
-static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
+s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
 {
        struct ixgbe_mac_info *mac = &hw->mac;
        struct ixgbe_phy_info *phy = &hw->phy;
@@ -154,7 +182,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
        if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
                mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
                mac->ops.get_link_capabilities =
-                       &ixgbe_get_copper_link_capabilities_generic;
+                               &ixgbe_get_copper_link_capabilities_generic;
        }
 
        switch (hw->phy.type) {
@@ -162,7 +190,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
                phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
                phy->ops.check_link = &ixgbe_check_phy_link_tnx;
                phy->ops.get_firmware_version =
-                            &ixgbe_get_phy_firmware_version_tnx;
+                                       &ixgbe_get_phy_firmware_version_tnx;
                break;
        case ixgbe_phy_nl:
                phy->ops.reset = &ixgbe_reset_phy_nl;
@@ -178,8 +206,8 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
 
                /* Check to see if SFP+ module is supported */
                ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
-                                                           &list_offset,
-                                                           &data_offset);
+                                                             &list_offset,
+                                                             &data_offset);
                if (ret_val != 0) {
                        ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
                        goto out;
@@ -201,7 +229,7 @@ out:
  *  Disables relaxed ordering Then set pcie completion timeout
  *
  **/
-static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
+s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
 {
        u32 regval;
        u32 i;
@@ -225,8 +253,6 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
                IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
        }
 
-       hw->mac.rx_pb_size = IXGBE_82598_RX_PB_SIZE;
-
        /* set the completion timeout for interface */
        if (ret_val == 0)
                ixgbe_set_pcie_completion_timeout(hw);
@@ -243,8 +269,8 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
  *  Determines the link capabilities by reading the AUTOC register.
  **/
 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
-                                             ixgbe_link_speed *speed,
-                                             bool *autoneg)
+                                            ixgbe_link_speed *speed,
+                                            bool *autoneg)
 {
        s32 status = 0;
        u32 autoc = 0;
@@ -347,24 +373,39 @@ out:
 /**
  *  ixgbe_fc_enable_82598 - Enable flow control
  *  @hw: pointer to hardware structure
- *  @packetbuf_num: packet buffer number (0-7)
  *
  *  Enable flow control according to the current settings.
  **/
-static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
+s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
 {
        s32 ret_val = 0;
        u32 fctrl_reg;
        u32 rmcs_reg;
        u32 reg;
+       u32 fcrtl, fcrth;
        u32 link_speed = 0;
+       int i;
        bool link_up;
 
-#ifdef CONFIG_DCB
-       if (hw->fc.requested_mode == ixgbe_fc_pfc)
+       /* Validate the water mark configuration */
+       if (!hw->fc.pause_time) {
+               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
                goto out;
+       }
+
+       /* Low water mark of zero causes XOFF floods */
+       for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+               if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+                   hw->fc.high_water[i]) {
+                       if (!hw->fc.low_water[i] ||
+                           hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+                               hw_dbg(hw, "Invalid water mark configuration\n");
+                               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+                               goto out;
+                       }
+               }
+       }
 
-#endif /* CONFIG_DCB */
        /*
         * On 82598 having Rx FC on causes resets while doing 1G
         * so if it's on turn it off once we know link_speed. For
@@ -386,9 +427,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
        }
 
        /* Negotiate the fc mode to use */
-       ret_val = ixgbe_fc_autoneg(hw);
-       if (ret_val == IXGBE_ERR_FLOW_CONTROL)
-               goto out;
+       ixgbe_fc_autoneg(hw);
 
        /* Disable any previous flow control settings */
        fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
@@ -405,9 +444,6 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
         * 2: Tx flow control is enabled (we can send pause frames but
         *     we do not support receiving pause frames).
         * 3: Both Rx and Tx flow control (symmetric) are enabled.
-#ifdef CONFIG_DCB
-        * 4: Priority Flow Control is enabled.
-#endif
         * other: Invalid.
         */
        switch (hw->fc.current_mode) {
@@ -440,11 +476,6 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
                fctrl_reg |= IXGBE_FCTRL_RFCE;
                rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
                break;
-#ifdef CONFIG_DCB
-       case ixgbe_fc_pfc:
-               goto out;
-               break;
-#endif /* CONFIG_DCB */
        default:
                hw_dbg(hw, "Flow control param set incorrectly\n");
                ret_val = IXGBE_ERR_CONFIG;
@@ -458,28 +489,27 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
        IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
 
        /* Set up and enable Rx high/low water mark thresholds, enable XON. */
-       if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
-               reg = hw->fc.low_water << 6;
-               if (hw->fc.send_xon)
-                       reg |= IXGBE_FCRTL_XONE;
-
-               IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
-
-               reg = hw->fc.high_water[packetbuf_num] << 6;
-               reg |= IXGBE_FCRTH_FCEN;
+       for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+               if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+                   hw->fc.high_water[i]) {
+                       fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
+                       fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+                       IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
+                       IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
+               } else {
+                       IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
+                       IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
+               }
 
-               IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
        }
 
        /* Configure pause time (2 TCs per register) */
-       reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
-       if ((packetbuf_num & 1) == 0)
-               reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
-       else
-               reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
-       IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
+       reg = hw->fc.pause_time * 0x00010001;
+       for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+               IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
 
-       IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
+       /* Configure flow control refresh threshold value */
+       IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
 
 out:
        return ret_val;
@@ -493,7 +523,7 @@ out:
  *  Restarts the link.  Performs autonegotiation if needed.
  **/
 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
-                                      bool autoneg_wait_to_complete)
+                                     bool autoneg_wait_to_complete)
 {
        u32 autoc_reg;
        u32 links_reg;
@@ -548,10 +578,11 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
 
        for (timeout = 0;
             timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
-               hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &an_reg);
+               hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+                                    IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
 
-               if ((an_reg & MDIO_AN_STAT1_COMPLETE) &&
-                   (an_reg & MDIO_STAT1_LSTATUS))
+               if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
+                   (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
                        break;
 
                msleep(100);
@@ -575,24 +606,24 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
  *  Reads the links register to determine if link is up and the current speed
  **/
 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
-                                      ixgbe_link_speed *speed, bool *link_up,
-                                      bool link_up_wait_to_complete)
+                                     ixgbe_link_speed *speed, bool *link_up,
+                                     bool link_up_wait_to_complete)
 {
        u32 links_reg;
        u32 i;
        u16 link_reg, adapt_comp_reg;
 
        /*
-        * SERDES PHY requires us to read link status from register 0xC79F.
-        * Bit 0 set indicates link is up/ready; clear indicates link down.
-        * 0xC00C is read to check that the XAUI lanes are active.  Bit 0
-        * clear indicates active; set indicates inactive.
+        * SERDES PHY requires us to read link status from undocumented
+        * register 0xC79F.  Bit 0 set indicates link is up/ready; clear
+        * indicates link down.  OxC00C is read to check that the XAUI lanes
+        * are active.  Bit 0 clear indicates active; set indicates inactive.
         */
        if (hw->phy.type == ixgbe_phy_nl) {
-               hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
-               hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
-               hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD,
-                                    &adapt_comp_reg);
+               hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
+               hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
+               hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
+                                    &adapt_comp_reg);
                if (link_up_wait_to_complete) {
                        for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
                                if ((link_reg & 1) &&
@@ -604,11 +635,11 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
                                }
                                msleep(100);
                                hw->phy.ops.read_reg(hw, 0xC79F,
-                                                    MDIO_MMD_PMAPMD,
-                                                    &link_reg);
+                                                    IXGBE_TWINAX_DEV,
+                                                    &link_reg);
                                hw->phy.ops.read_reg(hw, 0xC00C,
-                                                    MDIO_MMD_PMAPMD,
-                                                    &adapt_comp_reg);
+                                                    IXGBE_TWINAX_DEV,
+                                                    &adapt_comp_reg);
                        }
                } else {
                        if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
@@ -657,23 +688,23 @@ out:
  *  ixgbe_setup_mac_link_82598 - Set MAC link speed
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
- *  @autoneg: true if auto-negotiation enabled
+ *  @autoneg: true if autonegotiation enabled
  *  @autoneg_wait_to_complete: true when waiting for completion is needed
  *
  *  Set the link speed in the AUTOC register and restarts link.
  **/
 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
-                                           ixgbe_link_speed speed, bool autoneg,
-                                           bool autoneg_wait_to_complete)
+                                     ixgbe_link_speed speed, bool autoneg,
+                                     bool autoneg_wait_to_complete)
 {
-       s32              status            = 0;
+       s32 status = 0;
        ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
-       u32              curr_autoc        = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-       u32              autoc             = curr_autoc;
-       u32              link_mode         = autoc & IXGBE_AUTOC_LMS_MASK;
+       u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       u32 autoc = curr_autoc;
+       u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
 
        /* Check to see if speed passed in is supported. */
-       ixgbe_get_link_capabilities_82598(hw, &link_capabilities, &autoneg);
+       ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
        speed &= link_capabilities;
 
        if (speed == IXGBE_LINK_SPEED_UNKNOWN)
@@ -681,7 +712,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
 
        /* Set KX4/KX support according to speed requested */
        else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
-                link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
+                link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
                autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
                if (speed & IXGBE_LINK_SPEED_10GB_FULL)
                        autoc |= IXGBE_AUTOC_KX4_SUPP;
@@ -715,15 +746,15 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
  *  Sets the link speed in the AUTOC register in the MAC and restarts link.
  **/
 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
-                                               ixgbe_link_speed speed,
-                                               bool autoneg,
-                                               bool autoneg_wait_to_complete)
+                                        ixgbe_link_speed speed,
+                                        bool autoneg,
+                                        bool autoneg_wait_to_complete)
 {
        s32 status;
 
        /* Setup the PHY according to input speed */
        status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
-                                             autoneg_wait_to_complete);
+                                             autoneg_wait_to_complete);
        /* Set up MAC */
        ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
 
@@ -762,28 +793,28 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
        if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
                /* Enable Tx Atlas so packets can be transmitted again */
                hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
-                                            &analog_val);
+                                            &analog_val);
                analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
                hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
-                                             analog_val);
+                                             analog_val);
 
                hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
-                                            &analog_val);
+                                            &analog_val);
                analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
                hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
-                                             analog_val);
+                                             analog_val);
 
                hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
-                                            &analog_val);
+                                            &analog_val);
                analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
                hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
-                                             analog_val);
+                                             analog_val);
 
                hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
-                                            &analog_val);
+                                            &analog_val);
                analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
                hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
-                                             analog_val);
+                                             analog_val);
        }
 
        /* Reset PHY */
@@ -860,7 +891,7 @@ mac_reset_top:
        hw->mac.ops.init_rx_addrs(hw);
 
 reset_hw_out:
-       if (phy_status)
+       if (phy_status != 0)
                status = phy_status;
 
        return status;
@@ -872,7 +903,7 @@ reset_hw_out:
  *  @rar: receive address register index to associate with a VMDq index
  *  @vmdq: VMDq set index
  **/
-static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
 {
        u32 rar_high;
        u32 rar_entries = hw->mac.num_rar_entries;
@@ -926,8 +957,8 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
  *
  *  Turn on/off specified VLAN in the VLAN filter table.
  **/
-static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
-                               bool vlan_on)
+s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+                        bool vlan_on)
 {
        u32 regindex;
        u32 bitindex;
@@ -982,7 +1013,7 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
        for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
                for (offset = 0; offset < hw->mac.vft_size; offset++)
                        IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
-                                       0);
+                                       0);
 
        return 0;
 }
@@ -995,12 +1026,12 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
  *
  *  Performs read operation to Atlas analog register specified.
  **/
-static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
+s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
 {
        u32  atlas_ctl;
 
        IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
-                       IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
+                       IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
        IXGBE_WRITE_FLUSH(hw);
        udelay(10);
        atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
@@ -1017,7 +1048,7 @@ static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
  *
  *  Performs write operation to Atlas analog register specified.
  **/
-static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
+s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
 {
        u32  atlas_ctl;
 
@@ -1037,8 +1068,8 @@ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
  *
  *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
  **/
-static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
-                                      u8 *eeprom_data)
+s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+                               u8 *eeprom_data)
 {
        s32 status = 0;
        u16 sfp_addr = 0;
@@ -1048,27 +1079,27 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
 
        if (hw->phy.type == ixgbe_phy_nl) {
                /*
-                * phy SDA/SCL registers are at addresses 0xC30A to
-                * 0xC30D.  These registers are used to talk to the SFP+
+                * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
+                * 0xC30D. These registers are used to talk to the SFP+
                 * module's EEPROM through the SDA/SCL (I2C) interface.
                 */
                sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
                sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
                hw->phy.ops.write_reg(hw,
-                                     IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
-                                     MDIO_MMD_PMAPMD,
-                                     sfp_addr);
+                                     IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
+                                     IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+                                     sfp_addr);
 
                /* Poll status */
                for (i = 0; i < 100; i++) {
                        hw->phy.ops.read_reg(hw,
-                                            IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
-                                            MDIO_MMD_PMAPMD,
-                                            &sfp_stat);
+                                            IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
+                                            IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+                                            &sfp_stat);
                        sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
                        if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
                                break;
-                       usleep_range(10000, 20000);
+                       msleep(10);
                }
 
                if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
@@ -1079,7 +1110,7 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
 
                /* Read data */
                hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
-                                    MDIO_MMD_PMAPMD, &sfp_data);
+                                    IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
 
                *eeprom_data = (u8)(sfp_data >> 8);
        } else {
@@ -1097,7 +1128,7 @@ out:
  *
  *  Determines physical layer capabilities of the current configuration.
  **/
-static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
+u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
 {
        u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
        u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -1112,13 +1143,13 @@ static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
        switch (hw->phy.type) {
        case ixgbe_phy_tn:
        case ixgbe_phy_cu_unknown:
-               hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE,
-               MDIO_MMD_PMAPMD, &ext_ability);
-               if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
+               hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+               IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
+               if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
                        physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
-               if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
+               if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
                        physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
-               if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
+               if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
                        physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
                goto out;
        default:
@@ -1199,7 +1230,7 @@ out:
  *  Calls common function and corrects issue with some single port devices
  *  that enable LAN1 but not LAN0.
  **/
-static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
+void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
 {
        struct ixgbe_bus_info *bus = &hw->bus;
        u16 pci_gen = 0;
@@ -1224,17 +1255,17 @@ static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
 }
 
 /**
- * ixgbe_set_rxpba_82598 - Configure packet buffers
+ * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure packet buffers.
- */
-static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, u32 headroom,
-                                 int strategy)
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
+                                 u32 headroom, int strategy)
 {
        u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
-       u8  i = 0;
+       u8 i = 0;
 
        if (!num_pb)
                return;
@@ -1263,74 +1294,3 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, u32 headroom,
 
        return;
 }
-
-static struct ixgbe_mac_operations mac_ops_82598 = {
-       .init_hw                = &ixgbe_init_hw_generic,
-       .reset_hw               = &ixgbe_reset_hw_82598,
-       .start_hw               = &ixgbe_start_hw_82598,
-       .clear_hw_cntrs         = &ixgbe_clear_hw_cntrs_generic,
-       .get_media_type         = &ixgbe_get_media_type_82598,
-       .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598,
-       .enable_rx_dma          = &ixgbe_enable_rx_dma_generic,
-       .get_mac_addr           = &ixgbe_get_mac_addr_generic,
-       .stop_adapter           = &ixgbe_stop_adapter_generic,
-       .get_bus_info           = &ixgbe_get_bus_info_generic,
-       .set_lan_id             = &ixgbe_set_lan_id_multi_port_pcie_82598,
-       .read_analog_reg8       = &ixgbe_read_analog_reg8_82598,
-       .write_analog_reg8      = &ixgbe_write_analog_reg8_82598,
-       .setup_link             = &ixgbe_setup_mac_link_82598,
-       .set_rxpba              = &ixgbe_set_rxpba_82598,
-       .check_link             = &ixgbe_check_mac_link_82598,
-       .get_link_capabilities  = &ixgbe_get_link_capabilities_82598,
-       .led_on                 = &ixgbe_led_on_generic,
-       .led_off                = &ixgbe_led_off_generic,
-       .blink_led_start        = &ixgbe_blink_led_start_generic,
-       .blink_led_stop         = &ixgbe_blink_led_stop_generic,
-       .set_rar                = &ixgbe_set_rar_generic,
-       .clear_rar              = &ixgbe_clear_rar_generic,
-       .set_vmdq               = &ixgbe_set_vmdq_82598,
-       .clear_vmdq             = &ixgbe_clear_vmdq_82598,
-       .init_rx_addrs          = &ixgbe_init_rx_addrs_generic,
-       .update_mc_addr_list    = &ixgbe_update_mc_addr_list_generic,
-       .enable_mc              = &ixgbe_enable_mc_generic,
-       .disable_mc             = &ixgbe_disable_mc_generic,
-       .clear_vfta             = &ixgbe_clear_vfta_82598,
-       .set_vfta               = &ixgbe_set_vfta_82598,
-       .fc_enable              = &ixgbe_fc_enable_82598,
-       .set_fw_drv_ver         = NULL,
-       .acquire_swfw_sync      = &ixgbe_acquire_swfw_sync,
-       .release_swfw_sync      = &ixgbe_release_swfw_sync,
-};
-
-static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
-       .init_params            = &ixgbe_init_eeprom_params_generic,
-       .read                   = &ixgbe_read_eerd_generic,
-       .write                  = &ixgbe_write_eeprom_generic,
-       .write_buffer           = &ixgbe_write_eeprom_buffer_bit_bang_generic,
-       .read_buffer            = &ixgbe_read_eerd_buffer_generic,
-       .calc_checksum          = &ixgbe_calc_eeprom_checksum_generic,
-       .validate_checksum      = &ixgbe_validate_eeprom_checksum_generic,
-       .update_checksum        = &ixgbe_update_eeprom_checksum_generic,
-};
-
-static struct ixgbe_phy_operations phy_ops_82598 = {
-       .identify               = &ixgbe_identify_phy_generic,
-       .identify_sfp           = &ixgbe_identify_sfp_module_generic,
-       .init                   = &ixgbe_init_phy_ops_82598,
-       .reset                  = &ixgbe_reset_phy_generic,
-       .read_reg               = &ixgbe_read_phy_reg_generic,
-       .write_reg              = &ixgbe_write_phy_reg_generic,
-       .setup_link             = &ixgbe_setup_phy_link_generic,
-       .setup_link_speed       = &ixgbe_setup_phy_link_speed_generic,
-       .read_i2c_eeprom        = &ixgbe_read_i2c_eeprom_82598,
-       .check_overtemp   = &ixgbe_tn_check_overtemp,
-};
-
-struct ixgbe_info ixgbe_82598_info = {
-       .mac                    = ixgbe_mac_82598EB,
-       .get_invariants         = &ixgbe_get_invariants_82598,
-       .mac_ops                = &mac_ops_82598,
-       .eeprom_ops             = &eeprom_ops_82598,
-       .phy_ops                = &phy_ops_82598,
-};
-
diff --git a/drivers/net/ixgbe/ixgbe_82598.h b/drivers/net/ixgbe/ixgbe_82598.h
new file mode 100644 (file)
index 0000000..c6abb02
--- /dev/null
@@ -0,0 +1,44 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2012 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_82598_H_
+#define _IXGBE_82598_H_
+
+u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
+s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
+s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
+s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+                               u8 *eeprom_data);
+u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
+s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
+void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
+void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
+#endif /* _IXGBE_82598_H_ */
index 9c14685358eb08283c7fafd7f80e78926a5c70db..678aac465534b6a4b8670e13ee4663a93fd13ad5 100644 (file)
 
 *******************************************************************************/
 
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-
-#include "ixgbe.h"
+#include "ixgbe_type.h"
+#include "ixgbe_82599.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
 #include "ixgbe_phy.h"
-#include "ixgbe_mbx.h"
-
-#define IXGBE_82599_MAX_TX_QUEUES 128
-#define IXGBE_82599_MAX_RX_QUEUES 128
-#define IXGBE_82599_RAR_ENTRIES   128
-#define IXGBE_82599_MC_TBL_SIZE   128
-#define IXGBE_82599_VFT_TBL_SIZE  128
-#define IXGBE_82599_RX_PB_SIZE   512
-
-static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
-static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
-static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
-static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
-                                                ixgbe_link_speed speed,
-                                                bool autoneg,
-                                                bool autoneg_wait_to_complete);
-static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
-                                           ixgbe_link_speed speed,
-                                           bool autoneg,
-                                           bool autoneg_wait_to_complete);
-static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
-                                     bool autoneg_wait_to_complete);
-static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
-                               ixgbe_link_speed speed,
-                               bool autoneg,
-                               bool autoneg_wait_to_complete);
+
 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
-                                         ixgbe_link_speed speed,
-                                         bool autoneg,
-                                         bool autoneg_wait_to_complete);
+                                        ixgbe_link_speed speed,
+                                        bool autoneg,
+                                        bool autoneg_wait_to_complete);
 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
-static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
+static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
+                                  u16 offset, u16 *data);
+static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
+                                         u16 words, u16 *data);
 
-static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
+void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
 {
        struct ixgbe_mac_info *mac = &hw->mac;
 
        /* enable the laser control functions for SFP+ fiber */
        if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
                mac->ops.disable_tx_laser =
-                                      &ixgbe_disable_tx_laser_multispeed_fiber;
+                                      &ixgbe_disable_tx_laser_multispeed_fiber;
                mac->ops.enable_tx_laser =
-                                       &ixgbe_enable_tx_laser_multispeed_fiber;
+                                       &ixgbe_enable_tx_laser_multispeed_fiber;
                mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
+
        } else {
                mac->ops.disable_tx_laser = NULL;
                mac->ops.enable_tx_laser = NULL;
@@ -85,18 +63,65 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
                /* Set up dual speed SFP+ support */
                mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
        } else {
-               if ((mac->ops.get_media_type(hw) ==
-                    ixgbe_media_type_backplane) &&
-                   (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
-                    hw->phy.smart_speed == ixgbe_smart_speed_on) &&
-                    !ixgbe_verify_lesm_fw_enabled_82599(hw))
+               if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
+                    (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
+                     hw->phy.smart_speed == ixgbe_smart_speed_on) &&
+                     !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
                        mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
-               else
+               } else {
                        mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
+               }
+       }
+}
+
+/**
+ *  ixgbe_init_phy_ops_82599 - PHY/SFP specific init
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize any function pointers that were not able to be
+ *  set during init_shared_code because the PHY/SFP type was
+ *  not known.  Perform the SFP init if necessary.
+ *
+ **/
+s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
+{
+       struct ixgbe_mac_info *mac = &hw->mac;
+       struct ixgbe_phy_info *phy = &hw->phy;
+       s32 ret_val = 0;
+
+       /* Identify the PHY or SFP module */
+       ret_val = phy->ops.identify(hw);
+       if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
+               goto init_phy_ops_out;
+
+       /* Setup function pointers based on detected SFP module and speeds */
+       ixgbe_init_mac_link_ops_82599(hw);
+       if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
+               hw->phy.ops.reset = NULL;
+
+       /* If copper media, overwrite with copper function pointers */
+       if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
+               mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
+               mac->ops.get_link_capabilities =
+                                 &ixgbe_get_copper_link_capabilities_generic;
+       }
+
+       /* Set necessary function pointers based on phy type */
+       switch (hw->phy.type) {
+       case ixgbe_phy_tn:
+               phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
+               phy->ops.check_link = &ixgbe_check_phy_link_tnx;
+               phy->ops.get_firmware_version =
+                            &ixgbe_get_phy_firmware_version_tnx;
+               break;
+       default:
+               break;
        }
+init_phy_ops_out:
+       return ret_val;
 }
 
-static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
+s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
 {
        s32 ret_val = 0;
        u32 reg_anlp1 = 0;
@@ -109,13 +134,13 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
                hw->phy.ops.reset = NULL;
 
                ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
-                                                             &data_offset);
+                                                             &data_offset);
                if (ret_val != 0)
                        goto setup_sfp_out;
 
                /* PHY config will finish before releasing the semaphore */
                ret_val = hw->mac.ops.acquire_swfw_sync(hw,
-                                                       IXGBE_GSSR_MAC_CSR_SM);
+                                                       IXGBE_GSSR_MAC_CSR_SM);
                if (ret_val != 0) {
                        ret_val = IXGBE_ERR_SWFW_SYNC;
                        goto setup_sfp_out;
@@ -130,21 +155,17 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
 
                /* Release the semaphore */
                hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
-               /*
-                * Delay obtaining semaphore again to allow FW access,
-                * semaphore_delay is in ms usleep_range needs us.
-                */
-               usleep_range(hw->eeprom.semaphore_delay * 1000,
-                            hw->eeprom.semaphore_delay * 2000);
+               /* Delay obtaining semaphore again to allow FW access */
+               msleep(hw->eeprom.semaphore_delay);
 
                /* Now restart DSP by setting Restart_AN and clearing LMS */
                IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
-                               IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
-                               IXGBE_AUTOC_AN_RESTART));
+                               IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
+                               IXGBE_AUTOC_AN_RESTART));
 
                /* Wait for AN to leave state 0 */
                for (i = 0; i < 10; i++) {
-                       usleep_range(4000, 8000);
+                       msleep(4);
                        reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
                        if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
                                break;
@@ -157,69 +178,97 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
 
                /* Restart DSP by setting Restart_AN and return to SFI mode */
                IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
-                               IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
-                               IXGBE_AUTOC_AN_RESTART));
+                               IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
+                               IXGBE_AUTOC_AN_RESTART));
        }
 
 setup_sfp_out:
        return ret_val;
 }
 
-static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
-{
-       struct ixgbe_mac_info *mac = &hw->mac;
-
-       ixgbe_init_mac_link_ops_82599(hw);
-
-       mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
-       mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
-       mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
-       mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
-       mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
-       mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
-
-       return 0;
-}
-
 /**
- *  ixgbe_init_phy_ops_82599 - PHY/SFP specific init
+ *  ixgbe_init_ops_82599 - Inits func ptrs and MAC type
  *  @hw: pointer to hardware structure
  *
- *  Initialize any function pointers that were not able to be
- *  set during get_invariants because the PHY/SFP type was
- *  not known.  Perform the SFP init if necessary.
- *
+ *  Initialize the function pointers and assign the MAC type for 82599.
+ *  Does not touch the hardware.
  **/
-static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
+
+s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
 {
        struct ixgbe_mac_info *mac = &hw->mac;
        struct ixgbe_phy_info *phy = &hw->phy;
-       s32 ret_val = 0;
+       struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+       s32 ret_val;
+
+       ret_val = ixgbe_init_phy_ops_generic(hw);
+       ret_val = ixgbe_init_ops_generic(hw);
+
+       /* PHY */
+       phy->ops.identify = &ixgbe_identify_phy_82599;
+       phy->ops.init = &ixgbe_init_phy_ops_82599;
+
+       /* MAC */
+       mac->ops.reset_hw = &ixgbe_reset_hw_82599;
+       mac->ops.get_media_type = &ixgbe_get_media_type_82599;
+       mac->ops.get_supported_physical_layer =
+                                   &ixgbe_get_supported_physical_layer_82599;
+       mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
+       mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
+       mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
+       mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
+       mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
+       mac->ops.start_hw = &ixgbe_start_hw_82599;
+       mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
+       mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
+       mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
+       mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
+       mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
+
+       /* RAR, Multicast, VLAN */
+       mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
+       mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
+       mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
+       mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
+       mac->rar_highwater = 1;
+       mac->ops.set_vfta = &ixgbe_set_vfta_generic;
+       mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
+       mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
+       mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
+       mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
+       mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
+       mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
+
+       /* Link */
+       mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
+       mac->ops.check_link = &ixgbe_check_mac_link_generic;
+       mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
+       ixgbe_init_mac_link_ops_82599(hw);
 
-       /* Identify the PHY or SFP module */
-       ret_val = phy->ops.identify(hw);
+       mac->mcft_size          = 128;
+       mac->vft_size           = 128;
+       mac->num_rar_entries    = 128;
+       mac->rx_pb_size         = 512;
+       mac->max_tx_queues      = 128;
+       mac->max_rx_queues      = 128;
+       mac->max_msix_vectors   = ixgbe_get_pcie_msix_count_generic(hw);
 
-       /* Setup function pointers based on detected SFP module and speeds */
-       ixgbe_init_mac_link_ops_82599(hw);
+       mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
+                                  IXGBE_FWSM_MODE_MASK) ? true : false;
 
-       /* If copper media, overwrite with copper function pointers */
-       if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
-               mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
-               mac->ops.get_link_capabilities =
-                       &ixgbe_get_copper_link_capabilities_generic;
-       }
+       hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
 
-       /* Set necessary function pointers based on phy type */
-       switch (hw->phy.type) {
-       case ixgbe_phy_tn:
-               phy->ops.check_link = &ixgbe_check_phy_link_tnx;
-               phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
-               phy->ops.get_firmware_version =
-                            &ixgbe_get_phy_firmware_version_tnx;
-               break;
-       default:
-               break;
-       }
+       /* EEPROM */
+       eeprom->ops.read = &ixgbe_read_eeprom_82599;
+       eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599;
+
+       /* Manageability interface */
+       mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
+
+       mac->ops.get_thermal_sensor_data =
+                                        &ixgbe_get_thermal_sensor_data_generic;
+       mac->ops.init_thermal_sensor_thresh =
+                                     &ixgbe_init_thermal_sensor_thresh_generic;
 
        return ret_val;
 }
@@ -232,16 +281,18 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
  *
  *  Determines the link capabilities by reading the AUTOC register.
  **/
-static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
-                                             ixgbe_link_speed *speed,
-                                             bool *negotiation)
+s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
+                                     ixgbe_link_speed *speed,
+                                     bool *negotiation)
 {
        s32 status = 0;
        u32 autoc = 0;
 
-       /* Determine 1G link capabilities off of SFP+ type */
+       /* Check if 1G SFP module. */
        if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
-           hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) {
+           hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+           hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+           hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
                *speed = IXGBE_LINK_SPEED_1GB_FULL;
                *negotiation = true;
                goto out;
@@ -249,8 +300,8 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
 
        /*
         * Determine link capabilities based on the stored value of AUTOC,
-        * which represents EEPROM defaults.  If AUTOC value has not been
-        * stored, use the current register value.
+        * which represents EEPROM defaults.  If AUTOC value has not
+        * been stored, use the current register values.
         */
        if (hw->mac.orig_link_settings_stored)
                autoc = hw->mac.orig_autoc;
@@ -314,7 +365,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
 
        if (hw->phy.multispeed_fiber) {
                *speed |= IXGBE_LINK_SPEED_10GB_FULL |
-                         IXGBE_LINK_SPEED_1GB_FULL;
+                         IXGBE_LINK_SPEED_1GB_FULL;
                *negotiation = true;
        }
 
@@ -328,7 +379,7 @@ out:
  *
  *  Returns the media type (fiber, copper, backplane)
  **/
-static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
+enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
 {
        enum ixgbe_media_type media_type;
 
@@ -385,8 +436,8 @@ out:
  *  Configures link settings based on values in the ixgbe_hw struct.
  *  Restarts the link.  Performs autonegotiation if needed.
  **/
-static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
-                               bool autoneg_wait_to_complete)
+s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+                              bool autoneg_wait_to_complete)
 {
        u32 autoc_reg;
        u32 links_reg;
@@ -434,7 +485,7 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
  *  PHY states.  This includes selectively shutting down the Tx
  *  laser on the PHY, effectively halting physical link.
  **/
-static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
 {
        u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
 
@@ -453,7 +504,7 @@ static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
  *  PHY states.  This includes selectively turning on the Tx
  *  laser on the PHY, effectively starting physical link.
  **/
-static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
 {
        u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
 
@@ -476,7 +527,7 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
  *  end.  This is consistent with true clause 37 autoneg, which also
  *  involves a loss of signal.
  **/
-static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
 {
        if (hw->mac.autotry_restart) {
                ixgbe_disable_tx_laser_multispeed_fiber(hw);
@@ -494,10 +545,9 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
  *
  *  Set the link speed in the AUTOC register and restarts link.
  **/
-static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
-                                          ixgbe_link_speed speed,
-                                          bool autoneg,
-                                          bool autoneg_wait_to_complete)
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+                                    ixgbe_link_speed speed, bool autoneg,
+                                    bool autoneg_wait_to_complete)
 {
        s32 status = 0;
        ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
@@ -509,8 +559,7 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
        bool negotiation;
 
        /* Mask off requested but non-supported speeds */
-       status = hw->mac.ops.get_link_capabilities(hw, &link_speed,
-                                                  &negotiation);
+       status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
        if (status != 0)
                return status;
 
@@ -525,8 +574,7 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
 
                /* If we already have link at this speed, just jump out */
-               status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
-                                               false);
+               status = ixgbe_check_link(hw, &link_speed, &link_up, false);
                if (status != 0)
                        return status;
 
@@ -549,7 +597,7 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                        return status;
 
                /* Flap the tx laser if it has not already been done */
-               hw->mac.ops.flap_tx_laser(hw);
+               ixgbe_flap_tx_laser(hw);
 
                /*
                 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
@@ -561,8 +609,8 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                        msleep(100);
 
                        /* If we have link, just jump out */
-                       status = hw->mac.ops.check_link(hw, &link_speed,
-                                                       &link_up, false);
+                       status = ixgbe_check_link(hw, &link_speed,
+                                                 &link_up, false);
                        if (status != 0)
                                return status;
 
@@ -577,8 +625,7 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                        highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
 
                /* If we already have link at this speed, just jump out */
-               status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
-                                               false);
+               status = ixgbe_check_link(hw, &link_speed, &link_up, false);
                if (status != 0)
                        return status;
 
@@ -602,14 +649,13 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                        return status;
 
                /* Flap the tx laser if it has not already been done */
-               hw->mac.ops.flap_tx_laser(hw);
+               ixgbe_flap_tx_laser(hw);
 
                /* Wait for the link partner to also set speed */
                msleep(100);
 
                /* If we have link, just jump out */
-               status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
-                                               false);
+               status = ixgbe_check_link(hw, &link_speed, &link_up, false);
                if (status != 0)
                        return status;
 
@@ -624,9 +670,7 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
         */
        if (speedcnt > 1)
                status = ixgbe_setup_mac_link_multispeed_fiber(hw,
-                                                              highest_link_speed,
-                                                              autoneg,
-                                                              autoneg_wait_to_complete);
+                       highest_link_speed, autoneg, autoneg_wait_to_complete);
 
 out:
        /* Set autoneg_advertised value based on input link speed */
@@ -650,9 +694,9 @@ out:
  *
  *  Implements the Intel SmartSpeed algorithm.
  **/
-static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
-                                    ixgbe_link_speed speed, bool autoneg,
-                                    bool autoneg_wait_to_complete)
+s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+                                   ixgbe_link_speed speed, bool autoneg,
+                                   bool autoneg_wait_to_complete)
 {
        s32 status = 0;
        ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
@@ -694,11 +738,11 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
                 * Table 9 in the AN MAS.
                 */
                for (i = 0; i < 5; i++) {
-                       mdelay(100);
+                       msleep(100);
 
                        /* If we have link, just jump out */
-                       status = hw->mac.ops.check_link(hw, &link_speed,
-                                                       &link_up, false);
+                       status = ixgbe_check_link(hw, &link_speed, &link_up,
+                                                 false);
                        if (status != 0)
                                goto out;
 
@@ -729,11 +773,10 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
         * connect attempts as defined in the AN MAS table 73-7.
         */
        for (i = 0; i < 6; i++) {
-               mdelay(100);
+               msleep(100);
 
                /* If we have link, just jump out */
-               status = hw->mac.ops.check_link(hw, &link_speed,
-                                               &link_up, false);
+               status = ixgbe_check_link(hw, &link_speed, &link_up, false);
                if (status != 0)
                        goto out;
 
@@ -748,8 +791,8 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
 
 out:
        if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
-               hw_dbg(hw, "Smartspeed has downgraded the link speed from "
-                      "the maximum advertised\n");
+               hw_dbg(hw, "Smartspeed has downgraded the link speed "
+               "from the maximum advertised\n");
        return status;
 }
 
@@ -762,9 +805,9 @@ out:
  *
  *  Set the link speed in the AUTOC register and restarts link.
  **/
-static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
-                               ixgbe_link_speed speed, bool autoneg,
-                               bool autoneg_wait_to_complete)
+s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+                              ixgbe_link_speed speed, bool autoneg,
+                              bool autoneg_wait_to_complete)
 {
        s32 status = 0;
        u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -779,8 +822,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
        ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
 
        /* Check to see if speed passed in is supported. */
-       status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
-                                                  &autoneg);
+       status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
        if (status != 0)
                goto out;
 
@@ -811,8 +853,8 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
                if (speed & IXGBE_LINK_SPEED_1GB_FULL)
                        autoc |= IXGBE_AUTOC_KX_SUPP;
        } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
-                  (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
-                   link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
+                  (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
+                   link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
                /* Switch from 1G SFI to 10G SFI if requested */
                if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
                    (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
@@ -820,7 +862,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
                        autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
                }
        } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
-                  (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
+                  (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
                /* Switch from 10G SFI to 1G SFI if requested */
                if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
                    (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
@@ -852,9 +894,8 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
                                }
                                if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
                                        status =
-                                               IXGBE_ERR_AUTONEG_NOT_COMPLETE;
-                                       hw_dbg(hw, "Autoneg did not "
-                                              "complete.\n");
+                                               IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+                                       hw_dbg(hw, "Autoneg did not complete.\n");
                                }
                        }
                }
@@ -877,15 +918,15 @@ out:
  *  Restarts link on PHY and MAC based on settings passed in.
  **/
 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
-                                         ixgbe_link_speed speed,
-                                         bool autoneg,
-                                         bool autoneg_wait_to_complete)
+                                        ixgbe_link_speed speed,
+                                        bool autoneg,
+                                        bool autoneg_wait_to_complete)
 {
        s32 status;
 
        /* Setup the PHY according to input speed */
        status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
-                                             autoneg_wait_to_complete);
+                                             autoneg_wait_to_complete);
        /* Set up MAC */
        ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
 
@@ -900,7 +941,7 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
  *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
  *  reset.
  **/
-static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
+s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
 {
        ixgbe_link_speed link_speed;
        s32 status;
@@ -938,7 +979,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
 
 mac_reset_top:
        /*
-        * Issue global reset to the MAC. Needs to be SW reset if link is up.
+        * Issue global reset to the MAC.  Needs to be SW reset if link is up.
         * If link reset is used when link is up, it might reset the PHY when
         * mng is using it.  If link is down or the flag to force full link
         * reset is set, then perform link reset.
@@ -993,13 +1034,13 @@ mac_reset_top:
        } else {
                if (autoc != hw->mac.orig_autoc)
                        IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
-                                       IXGBE_AUTOC_AN_RESTART));
+                                       IXGBE_AUTOC_AN_RESTART));
 
                if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
                    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
                        autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
                        autoc2 |= (hw->mac.orig_autoc2 &
-                                  IXGBE_AUTOC2_UPPER_MASK);
+                                  IXGBE_AUTOC2_UPPER_MASK);
                        IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
                }
        }
@@ -1021,7 +1062,10 @@ mac_reset_top:
        /* Add the SAN MAC address to the RAR only if it's a valid address */
        if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
                hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
-                                   hw->mac.san_addr, 0, IXGBE_RAH_AV);
+                                   hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+               /* Save the SAN MAC RAR index */
+               hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
 
                /* Reserve the last RAR for the SAN MAC address */
                hw->mac.num_rar_entries--;
@@ -1029,7 +1073,7 @@ mac_reset_top:
 
        /* Store the alternative WWNN/WWPN prefix */
        hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
-                                      &hw->mac.wwpn_prefix);
+                                  &hw->mac.wwpn_prefix);
 
 reset_hw_out:
        return status;
@@ -1057,7 +1101,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
        }
        if (i >= IXGBE_FDIRCMD_CMD_POLL) {
                hw_dbg(hw, "Flow Director previous command isn't complete, "
-                      "aborting table re-initialization.\n");
+                        "aborting table re-initialization.\n");
                return IXGBE_ERR_FDIR_REINIT_FAILED;
        }
 
@@ -1071,12 +1115,12 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
         * - write 0 to bit 8 of FDIRCMD register
         */
        IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
-                       (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
-                        IXGBE_FDIRCMD_CLEARHT));
+                       (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
+                        IXGBE_FDIRCMD_CLEARHT));
        IXGBE_WRITE_FLUSH(hw);
        IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
-                       (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
-                        ~IXGBE_FDIRCMD_CLEARHT));
+                       (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
+                        ~IXGBE_FDIRCMD_CLEARHT));
        IXGBE_WRITE_FLUSH(hw);
        /*
         * Clear FDIR Hash register to clear any leftover hashes
@@ -1091,7 +1135,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
        /* Poll init-done after we write FDIRCTRL register */
        for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
                if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
-                                  IXGBE_FDIRCTRL_INIT_DONE)
+                                  IXGBE_FDIRCTRL_INIT_DONE)
                        break;
                udelay(10);
        }
@@ -1140,9 +1184,9 @@ static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
        IXGBE_WRITE_FLUSH(hw);
        for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
                if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
-                                  IXGBE_FDIRCTRL_INIT_DONE)
+                                  IXGBE_FDIRCTRL_INIT_DONE)
                        break;
-               usleep_range(1000, 2000);
+               msleep(1);
        }
 
        if (i >= IXGBE_FDIR_INIT_DONE_POLL)
@@ -1153,7 +1197,7 @@ static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
  *  ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
  *  @hw: pointer to hardware structure
  *  @fdirctrl: value to write to flow director control register, initially
- *             contains just the value of the Rx packet buffer allocation
+ *          contains just the value of the Rx packet buffer allocation
  **/
 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
 {
@@ -1177,7 +1221,7 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
  *  ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
  *  @hw: pointer to hardware structure
  *  @fdirctrl: value to write to flow director control register, initially
- *             contains just the value of the Rx packet buffer allocation
+ *          contains just the value of the Rx packet buffer allocation
  **/
 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
 {
@@ -1237,17 +1281,17 @@ do { \
  *  defines, and computing two keys at once since the hashed dword stream
  *  will be the same for both keys.
  **/
-static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
-                                           union ixgbe_atr_hash_dword common)
+u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
+                                    union ixgbe_atr_hash_dword common)
 {
        u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
        u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
 
        /* record the flow_vm_vlan bits as they are a key part to the hash */
-       flow_vm_vlan = ntohl(input.dword);
+       flow_vm_vlan = IXGBE_NTOHL(input.dword);
 
        /* generate common hash dword */
-       hi_hash_dword = ntohl(common.dword);
+       hi_hash_dword = IXGBE_NTOHL(common.dword);
 
        /* low dword is word swapped version of common */
        lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
@@ -1301,9 +1345,9 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
  *  @queue: queue index to direct traffic to
  **/
 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
-                                          union ixgbe_atr_hash_dword input,
-                                          union ixgbe_atr_hash_dword common,
-                                          u8 queue)
+                                         union ixgbe_atr_hash_dword input,
+                                         union ixgbe_atr_hash_dword common,
+                                         u8 queue)
 {
        u64  fdirhashcmd;
        u32  fdircmd;
@@ -1327,7 +1371,7 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
 
        /* configure FDIRCMD register */
        fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
-                 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+                 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
        fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
        fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
 
@@ -1385,10 +1429,10 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
        input->dword_stream[10] &= input_mask->dword_stream[10];
 
        /* record the flow_vm_vlan bits as they are a key part to the hash */
-       flow_vm_vlan = ntohl(input->dword_stream[0]);
+       flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
 
        /* generate common hash dword */
-       hi_hash_dword = ntohl(input->dword_stream[1] ^
+       hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^
                                    input->dword_stream[2] ^
                                    input->dword_stream[3] ^
                                    input->dword_stream[4] ^
@@ -1450,9 +1494,9 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
  **/
 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
 {
-       u32 mask = ntohs(input_mask->formatted.dst_port);
+       u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
        mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
-       mask |= ntohs(input_mask->formatted.src_port);
+       mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
        mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
        mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
        mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
@@ -1471,10 +1515,10 @@ static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
         (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
 
 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
-       IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value)))
+       IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
 
 #define IXGBE_STORE_AS_BE16(_value) \
-       ntohs(((u16)(_value) >> 8) | ((u16)(_value) << 8))
+       IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
 
 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
                                    union ixgbe_atr_input *input_mask)
@@ -1523,7 +1567,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
                return IXGBE_ERR_CONFIG;
        }
 
-       switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) {
+       switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
        case 0x0000:
                /* mask VLAN ID, fall through to mask VLAN priority */
                fdirm |= IXGBE_FDIRM_VLANID;
@@ -1593,15 +1637,15 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
        IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
 
        /* record source and destination port (little-endian)*/
-       fdirport = ntohs(input->formatted.dst_port);
+       fdirport = IXGBE_NTOHS(input->formatted.dst_port);
        fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
-       fdirport |= ntohs(input->formatted.src_port);
+       fdirport |= IXGBE_NTOHS(input->formatted.src_port);
        IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
 
        /* record vlan (little-endian) and flex_bytes(big-endian) */
        fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
        fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
-       fdirvlan |= ntohs(input->formatted.vlan_id);
+       fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
        IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
 
        /* configure FDIRHASH register */
@@ -1672,6 +1716,64 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
        return err;
 }
 
+/**
+ *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
+ *  @hw: pointer to hardware structure
+ *  @input: input bitstream
+ *  @input_mask: mask for the input bitstream
+ *  @soft_id: software index for the filters
+ *  @queue: queue index to direct traffic to
+ *
+ *  Note that the caller to this function must lock before calling, since the
+ *  hardware writes must be protected from one another.
+ **/
+s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
+                                       union ixgbe_atr_input *input,
+                                       union ixgbe_atr_input *input_mask,
+                                       u16 soft_id, u8 queue)
+{
+       s32 err = IXGBE_ERR_CONFIG;
+
+       /*
+        * Check flow_type formatting, and bail out before we touch the hardware
+        * if there's a configuration issue
+        */
+       switch (input->formatted.flow_type) {
+       case IXGBE_ATR_FLOW_TYPE_IPV4:
+               input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
+               if (input->formatted.dst_port || input->formatted.src_port) {
+                       hw_dbg(hw, " Error on src/dst port\n");
+                       return IXGBE_ERR_CONFIG;
+               }
+               break;
+       case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+               if (input->formatted.dst_port || input->formatted.src_port) {
+                       hw_dbg(hw, " Error on src/dst port\n");
+                       return IXGBE_ERR_CONFIG;
+               }
+       case IXGBE_ATR_FLOW_TYPE_TCPV4:
+       case IXGBE_ATR_FLOW_TYPE_UDPV4:
+               input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
+                                                 IXGBE_ATR_L4TYPE_MASK;
+               break;
+       default:
+               hw_dbg(hw, " Error on flow type input\n");
+               return err;
+       }
+
+       /* program input mask into the HW */
+       err = ixgbe_fdir_set_input_mask_82599(hw, input_mask);
+       if (err)
+               return err;
+
+       /* apply mask and compute/store hash */
+       ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
+
+       /* program filters to filter memory */
+       return ixgbe_fdir_write_perfect_filter_82599(hw, input,
+                                                    soft_id, queue);
+}
+
 /**
  *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
  *  @hw: pointer to hardware structure
@@ -1680,12 +1782,12 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
  *
  *  Performs read operation to Omer analog register specified.
  **/
-static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
+s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
 {
        u32  core_ctl;
 
        IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
-                       (reg << 8));
+                       (reg << 8));
        IXGBE_WRITE_FLUSH(hw);
        udelay(10);
        core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
@@ -1702,7 +1804,7 @@ static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
  *
  *  Performs write operation to Omer analog register specified.
  **/
-static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
+s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
 {
        u32  core_ctl;
 
@@ -1722,7 +1824,7 @@ static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
  *  and the generation start_hw function.
  *  Then performs revision-specific operations, if any.
  **/
-static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
+s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
 {
        s32 ret_val = 0;
 
@@ -1736,7 +1838,6 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
 
        /* We need to run link autotry after the driver loads */
        hw->mac.autotry_restart = true;
-       hw->mac.rx_pb_size = IXGBE_82599_RX_PB_SIZE;
 
        if (ret_val == 0)
                ret_val = ixgbe_verify_fw_version_82599(hw);
@@ -1752,7 +1853,7 @@ out:
  *  If PHY already detected, maintains current PHY type in hw struct,
  *  otherwise executes the PHY detection routine.
  **/
-static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
+s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
 {
        s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
 
@@ -1763,7 +1864,7 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
                if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
                        goto out;
                else
-                       status = ixgbe_identify_sfp_module_generic(hw);
+                       status = ixgbe_identify_module_generic(hw);
        }
 
        /* Set PHY type none if no PHY detected */
@@ -1786,7 +1887,7 @@ out:
  *
  *  Determines physical layer capabilities of the current configuration.
  **/
-static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
+u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
 {
        u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
        u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -1803,13 +1904,13 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
        switch (hw->phy.type) {
        case ixgbe_phy_tn:
        case ixgbe_phy_cu_unknown:
-               hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
-                                                        &ext_ability);
-               if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
+               hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+               IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
+               if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
                        physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
-               if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
+               if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
                        physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
-               if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
+               if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
                        physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
                goto out;
        default:
@@ -1889,6 +1990,8 @@ sfp_check:
                        physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
                else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
                        physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
+               else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
+                       physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
                break;
        default:
                break;
@@ -1905,19 +2008,21 @@ out:
  *
  *  Enables the Rx DMA unit for 82599
  **/
-static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
+s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
 {
+
        /*
         * Workaround for 82599 silicon errata when enabling the Rx datapath.
         * If traffic is incoming before we enable the Rx unit, it could hang
         * the Rx DMA unit.  Therefore, make sure the security engine is
         * completely disabled prior to enabling the Rx unit.
         */
-       hw->mac.ops.disable_rx_buff(hw);
+
+       hw->mac.ops.disable_sec_rx_path(hw);
 
        IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
 
-       hw->mac.ops.enable_rx_buff(hw);
+       hw->mac.ops.enable_sec_rx_path(hw);
 
        return 0;
 }
@@ -1952,16 +2057,15 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
 
        /* get the offset to the Pass Through Patch Configuration block */
        hw->eeprom.ops.read(hw, (fw_offset +
-                                IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
-                                &fw_ptp_cfg_offset);
+                                IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
+                                &fw_ptp_cfg_offset);
 
        if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
                goto fw_version_out;
 
        /* get the firmware version */
        hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
-                                IXGBE_FW_PATCH_VERSION_4),
-                                &fw_version);
+                           IXGBE_FW_PATCH_VERSION_4), &fw_version);
 
        if (fw_version > 0x5)
                status = 0;
@@ -1977,7 +2081,7 @@ fw_version_out:
  *  Returns true if the LESM FW module is present and enabled. Otherwise
  *  returns false. Smart Speed must be disabled if LESM FW module is enabled.
  **/
-static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
 {
        bool lesm_enabled = false;
        u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
@@ -2074,86 +2178,4 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
        return ret_val;
 }
 
-static struct ixgbe_mac_operations mac_ops_82599 = {
-       .init_hw                = &ixgbe_init_hw_generic,
-       .reset_hw               = &ixgbe_reset_hw_82599,
-       .start_hw               = &ixgbe_start_hw_82599,
-       .clear_hw_cntrs         = &ixgbe_clear_hw_cntrs_generic,
-       .get_media_type         = &ixgbe_get_media_type_82599,
-       .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599,
-       .enable_rx_dma          = &ixgbe_enable_rx_dma_82599,
-       .disable_rx_buff        = &ixgbe_disable_rx_buff_generic,
-       .enable_rx_buff         = &ixgbe_enable_rx_buff_generic,
-       .get_mac_addr           = &ixgbe_get_mac_addr_generic,
-       .get_san_mac_addr       = &ixgbe_get_san_mac_addr_generic,
-       .get_device_caps        = &ixgbe_get_device_caps_generic,
-       .get_wwn_prefix         = &ixgbe_get_wwn_prefix_generic,
-       .stop_adapter           = &ixgbe_stop_adapter_generic,
-       .get_bus_info           = &ixgbe_get_bus_info_generic,
-       .set_lan_id             = &ixgbe_set_lan_id_multi_port_pcie,
-       .read_analog_reg8       = &ixgbe_read_analog_reg8_82599,
-       .write_analog_reg8      = &ixgbe_write_analog_reg8_82599,
-       .setup_link             = &ixgbe_setup_mac_link_82599,
-       .set_rxpba              = &ixgbe_set_rxpba_generic,
-       .check_link             = &ixgbe_check_mac_link_generic,
-       .get_link_capabilities  = &ixgbe_get_link_capabilities_82599,
-       .led_on                 = &ixgbe_led_on_generic,
-       .led_off                = &ixgbe_led_off_generic,
-       .blink_led_start        = &ixgbe_blink_led_start_generic,
-       .blink_led_stop         = &ixgbe_blink_led_stop_generic,
-       .set_rar                = &ixgbe_set_rar_generic,
-       .clear_rar              = &ixgbe_clear_rar_generic,
-       .set_vmdq               = &ixgbe_set_vmdq_generic,
-       .clear_vmdq             = &ixgbe_clear_vmdq_generic,
-       .init_rx_addrs          = &ixgbe_init_rx_addrs_generic,
-       .update_mc_addr_list    = &ixgbe_update_mc_addr_list_generic,
-       .enable_mc              = &ixgbe_enable_mc_generic,
-       .disable_mc             = &ixgbe_disable_mc_generic,
-       .clear_vfta             = &ixgbe_clear_vfta_generic,
-       .set_vfta               = &ixgbe_set_vfta_generic,
-       .fc_enable              = &ixgbe_fc_enable_generic,
-       .set_fw_drv_ver         = &ixgbe_set_fw_drv_ver_generic,
-       .init_uta_tables        = &ixgbe_init_uta_tables_generic,
-       .setup_sfp              = &ixgbe_setup_sfp_modules_82599,
-       .set_mac_anti_spoofing  = &ixgbe_set_mac_anti_spoofing,
-       .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
-       .acquire_swfw_sync      = &ixgbe_acquire_swfw_sync,
-       .release_swfw_sync      = &ixgbe_release_swfw_sync,
-
-};
-
-static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
-       .init_params            = &ixgbe_init_eeprom_params_generic,
-       .read                   = &ixgbe_read_eeprom_82599,
-       .read_buffer            = &ixgbe_read_eeprom_buffer_82599,
-       .write                  = &ixgbe_write_eeprom_generic,
-       .write_buffer           = &ixgbe_write_eeprom_buffer_bit_bang_generic,
-       .calc_checksum          = &ixgbe_calc_eeprom_checksum_generic,
-       .validate_checksum      = &ixgbe_validate_eeprom_checksum_generic,
-       .update_checksum        = &ixgbe_update_eeprom_checksum_generic,
-};
-
-static struct ixgbe_phy_operations phy_ops_82599 = {
-       .identify               = &ixgbe_identify_phy_82599,
-       .identify_sfp           = &ixgbe_identify_sfp_module_generic,
-       .init                   = &ixgbe_init_phy_ops_82599,
-       .reset                  = &ixgbe_reset_phy_generic,
-       .read_reg               = &ixgbe_read_phy_reg_generic,
-       .write_reg              = &ixgbe_write_phy_reg_generic,
-       .setup_link             = &ixgbe_setup_phy_link_generic,
-       .setup_link_speed       = &ixgbe_setup_phy_link_speed_generic,
-       .read_i2c_byte          = &ixgbe_read_i2c_byte_generic,
-       .write_i2c_byte         = &ixgbe_write_i2c_byte_generic,
-       .read_i2c_eeprom        = &ixgbe_read_i2c_eeprom_generic,
-       .write_i2c_eeprom       = &ixgbe_write_i2c_eeprom_generic,
-       .check_overtemp         = &ixgbe_tn_check_overtemp,
-};
-
-struct ixgbe_info ixgbe_82599_info = {
-       .mac                    = ixgbe_mac_82599EB,
-       .get_invariants         = &ixgbe_get_invariants_82599,
-       .mac_ops                = &mac_ops_82599,
-       .eeprom_ops             = &eeprom_ops_82599,
-       .phy_ops                = &phy_ops_82599,
-       .mbx_ops                = &mbx_ops_generic,
-};
+
diff --git a/drivers/net/ixgbe/ixgbe_82599.h b/drivers/net/ixgbe/ixgbe_82599.h
new file mode 100644 (file)
index 0000000..02be92a
--- /dev/null
@@ -0,0 +1,58 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2012 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_82599_H_
+#define _IXGBE_82599_H_
+
+s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
+                                     ixgbe_link_speed *speed, bool *autoneg);
+enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
+void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+                                         ixgbe_link_speed speed, bool autoneg,
+                                         bool autoneg_wait_to_complete);
+s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+                                   ixgbe_link_speed speed, bool autoneg,
+                                   bool autoneg_wait_to_complete);
+s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+                              bool autoneg_wait_to_complete);
+s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+                              bool autoneg, bool autoneg_wait_to_complete);
+s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
+void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
+s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw);
+s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
+s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
+u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
+#endif /* _IXGBE_82599_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_api.c b/drivers/net/ixgbe/ixgbe_api.c
new file mode 100644 (file)
index 0000000..4e3804e
--- /dev/null
@@ -0,0 +1,1158 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2012 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+
+/**
+ *  ixgbe_init_shared_code - Initialize the shared code
+ *  @hw: pointer to hardware structure
+ *
+ *  This will assign function pointers and assign the MAC type and PHY code.
+ *  Does not touch the hardware. This function must be called prior to any
+ *  other function in the shared code. The ixgbe_hw structure should be
+ *  memset to 0 prior to calling this function.  The following fields in
+ *  hw structure should be filled in prior to calling this function:
+ *  hw_addr, back, device_id, vendor_id, subsystem_device_id,
+ *  subsystem_vendor_id, and revision_id
+ **/
+s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
+{
+       s32 status;
+
+       /*
+        * Set the mac type
+        */
+       ixgbe_set_mac_type(hw);
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               status = ixgbe_init_ops_82598(hw);
+               break;
+       case ixgbe_mac_82599EB:
+               status = ixgbe_init_ops_82599(hw);
+               break;
+       case ixgbe_mac_X540:
+               status = ixgbe_init_ops_X540(hw);
+               break;
+       default:
+               status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+               break;
+       }
+
+       return status;
+}
+
+/**
+ *  ixgbe_set_mac_type - Sets MAC type
+ *  @hw: pointer to the HW structure
+ *
+ *  This function sets the mac type of the adapter based on the
+ *  vendor ID and device ID stored in the hw structure.
+ **/
+s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
+{
+       s32 ret_val = 0;
+
+       if (hw->vendor_id == IXGBE_INTEL_VENDOR_ID) {
+               switch (hw->device_id) {
+               case IXGBE_DEV_ID_82598:
+               case IXGBE_DEV_ID_82598_BX:
+               case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+               case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+               case IXGBE_DEV_ID_82598AT:
+               case IXGBE_DEV_ID_82598AT2:
+               case IXGBE_DEV_ID_82598EB_CX4:
+               case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+               case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+               case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+               case IXGBE_DEV_ID_82598EB_XF_LR:
+               case IXGBE_DEV_ID_82598EB_SFP_LOM:
+                       hw->mac.type = ixgbe_mac_82598EB;
+                       break;
+               case IXGBE_DEV_ID_82599_KX4:
+               case IXGBE_DEV_ID_82599_KX4_MEZZ:
+               case IXGBE_DEV_ID_82599_XAUI_LOM:
+               case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+               case IXGBE_DEV_ID_82599_KR:
+               case IXGBE_DEV_ID_82599_SFP:
+               case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
+               case IXGBE_DEV_ID_82599_SFP_FCOE:
+               case IXGBE_DEV_ID_82599_SFP_EM:
+               case IXGBE_DEV_ID_82599_SFP_SF2:
+               case IXGBE_DEV_ID_82599_SFP_SF_QP:
+               case IXGBE_DEV_ID_82599EN_SFP:
+               case IXGBE_DEV_ID_82599_CX4:
+               case IXGBE_DEV_ID_82599_LS:
+               case IXGBE_DEV_ID_82599_T3_LOM:
+                       hw->mac.type = ixgbe_mac_82599EB;
+                       break;
+               case IXGBE_DEV_ID_X540T:
+                       hw->mac.type = ixgbe_mac_X540;
+                       break;
+               default:
+                       ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+                       break;
+               }
+       } else {
+               ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+       }
+
+       hw_dbg(hw, "ixgbe_set_mac_type found mac: %d, returns: %d\n",
+                 hw->mac.type, ret_val);
+       return ret_val;
+}
+
+/**
+ *  ixgbe_init_hw - Initialize the hardware
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize the hardware by resetting and then starting the hardware
+ **/
+s32 ixgbe_init_hw(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.init_hw, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_reset_hw - Performs a hardware reset
+ *  @hw: pointer to hardware structure
+ *
+ *  Resets the hardware by resetting the transmit and receive units, masks and
+ *  clears all interrupts, performs a PHY reset, and performs a MAC reset
+ **/
+s32 ixgbe_reset_hw(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.reset_hw, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_start_hw - Prepares hardware for Rx/Tx
+ *  @hw: pointer to hardware structure
+ *
+ *  Starts the hardware by filling the bus info structure and media type,
+ *  clears all on chip counters, initializes receive address registers,
+ *  multicast table, VLAN filter table, calls routine to setup link and
+ *  flow control settings, and leaves transmit and receive units disabled
+ *  and uninitialized.
+ **/
+s32 ixgbe_start_hw(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.start_hw, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_clear_hw_cntrs - Clear hardware counters
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears all hardware statistics counters by reading them from the hardware
+ *  Statistics counters are clear on read.
+ **/
+s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.clear_hw_cntrs, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_media_type - Get media type
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the media type (fiber, copper, backplane)
+ **/
+enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.get_media_type, (hw),
+                              ixgbe_media_type_unknown);
+}
+
+/**
+ *  ixgbe_get_mac_addr - Get MAC address
+ *  @hw: pointer to hardware structure
+ *  @mac_addr: Adapter MAC address
+ *
+ *  Reads the adapter's MAC address from the first Receive Address Register
+ *  (RAR0) A reset of the adapter must have been performed prior to calling
+ *  this function in order for the MAC address to have been loaded from the
+ *  EEPROM into RAR0
+ **/
+s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.get_mac_addr,
+                              (hw, mac_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_san_mac_addr - Get SAN MAC address
+ *  @hw: pointer to hardware structure
+ *  @san_mac_addr: SAN MAC address
+ *
+ *  Reads the SAN MAC address from the EEPROM, if it's available.  This is
+ *  per-port, so set_lan_id() must be called before reading the addresses.
+ **/
+s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.get_san_mac_addr,
+                              (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_set_san_mac_addr - Write a SAN MAC address
+ *  @hw: pointer to hardware structure
+ *  @san_mac_addr: SAN MAC address
+ *
+ *  Writes A SAN MAC address to the EEPROM.
+ **/
+s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.set_san_mac_addr,
+                              (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_device_caps - Get additional device capabilities
+ *  @hw: pointer to hardware structure
+ *  @device_caps: the EEPROM word for device capabilities
+ *
+ *  Reads the extra device capabilities from the EEPROM
+ **/
+s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.get_device_caps,
+                              (hw, device_caps), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_wwn_prefix - Get alternative WWNN/WWPN prefix from the EEPROM
+ *  @hw: pointer to hardware structure
+ *  @wwnn_prefix: the alternative WWNN prefix
+ *  @wwpn_prefix: the alternative WWPN prefix
+ *
+ *  This function will read the EEPROM from the alternative SAN MAC address
+ *  block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+                        u16 *wwpn_prefix)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.get_wwn_prefix,
+                              (hw, wwnn_prefix, wwpn_prefix),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_fcoe_boot_status -  Get FCOE boot status from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @bs: the fcoe boot status
+ *
+ *  This function will read the FCOE boot status from the iSCSI FCOE block
+ **/
+s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.get_fcoe_boot_status,
+                              (hw, bs),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_bus_info - Set PCI bus info
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
+ **/
+s32 ixgbe_get_bus_info(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.get_bus_info, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_num_of_tx_queues - Get Tx queues
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the number of transmit queues for the given adapter.
+ **/
+u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw)
+{
+       return hw->mac.max_tx_queues;
+}
+
+/**
+ *  ixgbe_get_num_of_rx_queues - Get Rx queues
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the number of receive queues for the given adapter.
+ **/
+u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw)
+{
+       return hw->mac.max_rx_queues;
+}
+
+/**
+ *  ixgbe_stop_adapter - Disable Rx/Tx units
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ *  disables transmit and receive units. The adapter_stopped flag is used by
+ *  the shared code and drivers to determine if the adapter is in a stopped
+ *  state and should not touch the hardware.
+ **/
+s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.stop_adapter, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_read_pba_string - Reads part number string from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @pba_num: stores the part number string from the EEPROM
+ *  @pba_num_size: part number string buffer length
+ *
+ *  Reads the part number string from the EEPROM.
+ **/
+s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size)
+{
+       return ixgbe_read_pba_string_generic(hw, pba_num, pba_num_size);
+}
+
+/**
+ *  ixgbe_identify_phy - Get PHY type
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines the physical layer module found on the current adapter.
+ **/
+s32 ixgbe_identify_phy(struct ixgbe_hw *hw)
+{
+       s32 status = 0;
+
+       if (hw->phy.type == ixgbe_phy_unknown) {
+               status = ixgbe_call_func(hw, hw->phy.ops.identify, (hw),
+                                        IXGBE_NOT_IMPLEMENTED);
+       }
+
+       return status;
+}
+
+/**
+ *  ixgbe_reset_phy - Perform a PHY reset
+ *  @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reset_phy(struct ixgbe_hw *hw)
+{
+       s32 status = 0;
+
+       if (hw->phy.type == ixgbe_phy_unknown) {
+               if (ixgbe_identify_phy(hw) != 0)
+                       status = IXGBE_ERR_PHY;
+       }
+
+       if (status == 0) {
+               status = ixgbe_call_func(hw, hw->phy.ops.reset, (hw),
+                                        IXGBE_NOT_IMPLEMENTED);
+       }
+       return status;
+}
+
+/**
+ *  ixgbe_get_phy_firmware_version -
+ *  @hw: pointer to hardware structure
+ *  @firmware_version: pointer to firmware version
+ **/
+s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, u16 *firmware_version)
+{
+       s32 status = 0;
+
+       status = ixgbe_call_func(hw, hw->phy.ops.get_firmware_version,
+                                (hw, firmware_version),
+                                IXGBE_NOT_IMPLEMENTED);
+       return status;
+}
+
+/**
+ *  ixgbe_read_phy_reg - Read PHY register
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit address of PHY register to read
+ *  @phy_data: Pointer to read data from PHY register
+ *
+ *  Reads a value from a specified PHY register
+ **/
+s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+                      u16 *phy_data)
+{
+       if (hw->phy.id == 0)
+               ixgbe_identify_phy(hw);
+
+       return ixgbe_call_func(hw, hw->phy.ops.read_reg, (hw, reg_addr,
+                              device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_write_phy_reg - Write PHY register
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit PHY register to write
+ *  @phy_data: Data to write to the PHY register
+ *
+ *  Writes a value to specified PHY register
+ **/
+s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+                       u16 phy_data)
+{
+       if (hw->phy.id == 0)
+               ixgbe_identify_phy(hw);
+
+       return ixgbe_call_func(hw, hw->phy.ops.write_reg, (hw, reg_addr,
+                              device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_setup_phy_link - Restart PHY autoneg
+ *  @hw: pointer to hardware structure
+ *
+ *  Restart autonegotiation and PHY and waits for completion.
+ **/
+s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->phy.ops.setup_link, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_check_phy_link - Determine link and speed status
+ *  @hw: pointer to hardware structure
+ *
+ *  Reads a PHY register to determine if link is up and the current speed for
+ *  the PHY.
+ **/
+s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+                        bool *link_up)
+{
+       return ixgbe_call_func(hw, hw->phy.ops.check_link, (hw, speed,
+                              link_up), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_setup_phy_link_speed - Set auto advertise
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg: true if autonegotiation enabled
+ *
+ *  Sets the auto advertised capabilities
+ **/
+s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+                              bool autoneg,
+                              bool autoneg_wait_to_complete)
+{
+       return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed,
+                              autoneg, autoneg_wait_to_complete),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_check_link - Get link and speed status
+ *  @hw: pointer to hardware structure
+ *
+ *  Reads the links register to determine if link is up and the current speed
+ **/
+s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+                    bool *link_up, bool link_up_wait_to_complete)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.check_link, (hw, speed,
+                              link_up, link_up_wait_to_complete),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_disable_tx_laser - Disable Tx laser
+ *  @hw: pointer to hardware structure
+ *
+ *  If the driver needs to disable the laser on SFI optics.
+ **/
+void ixgbe_disable_tx_laser(struct ixgbe_hw *hw)
+{
+       if (hw->mac.ops.disable_tx_laser)
+               hw->mac.ops.disable_tx_laser(hw);
+}
+
+/**
+ *  ixgbe_enable_tx_laser - Enable Tx laser
+ *  @hw: pointer to hardware structure
+ *
+ *  If the driver needs to enable the laser on SFI optics.
+ **/
+void ixgbe_enable_tx_laser(struct ixgbe_hw *hw)
+{
+       if (hw->mac.ops.enable_tx_laser)
+               hw->mac.ops.enable_tx_laser(hw);
+}
+
+/**
+ *  ixgbe_flap_tx_laser - flap Tx laser to start autotry process
+ *  @hw: pointer to hardware structure
+ *
+ *  When the driver changes the link speeds that it can support then
+ *  flap the tx laser to alert the link partner to start autotry
+ *  process on its end.
+ **/
+void ixgbe_flap_tx_laser(struct ixgbe_hw *hw)
+{
+       if (hw->mac.ops.flap_tx_laser)
+               hw->mac.ops.flap_tx_laser(hw);
+}
+
+/**
+ *  ixgbe_setup_link - Set link speed
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg: true if autonegotiation enabled
+ *
+ *  Configures link settings.  Restarts the link.
+ *  Performs autonegotiation if needed.
+ **/
+s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+                    bool autoneg,
+                    bool autoneg_wait_to_complete)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw, speed,
+                              autoneg, autoneg_wait_to_complete),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_link_capabilities - Returns link capabilities
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines the link capabilities of the current configuration.
+ **/
+s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+                               bool *autoneg)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.get_link_capabilities, (hw,
+                              speed, autoneg), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_led_on - Turn on LEDs
+ *  @hw: pointer to hardware structure
+ *  @index: led number to turn on
+ *
+ *  Turns on the software controllable LEDs.
+ **/
+s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.led_on, (hw, index),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_led_off - Turn off LEDs
+ *  @hw: pointer to hardware structure
+ *  @index: led number to turn off
+ *
+ *  Turns off the software controllable LEDs.
+ **/
+s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.led_off, (hw, index),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_blink_led_start - Blink LEDs
+ *  @hw: pointer to hardware structure
+ *  @index: led number to blink
+ *
+ *  Blink LED based on index.
+ **/
+s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.blink_led_start, (hw, index),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_blink_led_stop - Stop blinking LEDs
+ *  @hw: pointer to hardware structure
+ *
+ *  Stop blinking LED based on index.
+ **/
+s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.blink_led_stop, (hw, index),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_init_eeprom_params - Initialize EEPROM parameters
+ *  @hw: pointer to hardware structure
+ *
+ *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ *  ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->eeprom.ops.init_params, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+
+/**
+ *  ixgbe_write_eeprom - Write word to EEPROM
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @data: 16 bit word to be written to the EEPROM
+ *
+ *  Writes 16 bit value to EEPROM. If ixgbe_eeprom_update_checksum is not
+ *  called after this function, the EEPROM will most likely contain an
+ *  invalid checksum.
+ **/
+s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+       return ixgbe_call_func(hw, hw->eeprom.ops.write, (hw, offset, data),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_write_eeprom_buffer - Write word(s) to EEPROM
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *  @words: number of words
+ *
+ *  Writes 16 bit word(s) to EEPROM. If ixgbe_eeprom_update_checksum is not
+ *  called after this function, the EEPROM will most likely contain an
+ *  invalid checksum.
+ **/
+s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, u16 words,
+                             u16 *data)
+{
+       return ixgbe_call_func(hw, hw->eeprom.ops.write_buffer,
+                              (hw, offset, words, data),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_read_eeprom - Read word from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to be read
+ *  @data: read 16 bit value from EEPROM
+ *
+ *  Reads 16 bit value from EEPROM
+ **/
+s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+       return ixgbe_call_func(hw, hw->eeprom.ops.read, (hw, offset, data),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_read_eeprom_buffer - Read word(s) from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to be read
+ *  @data: read 16 bit word(s) from EEPROM
+ *  @words: number of words
+ *
+ *  Reads 16 bit word(s) from EEPROM
+ **/
+s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
+                            u16 words, u16 *data)
+{
+       return ixgbe_call_func(hw, hw->eeprom.ops.read_buffer,
+                              (hw, offset, words, data),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_validate_eeprom_checksum - Validate EEPROM checksum
+ *  @hw: pointer to hardware structure
+ *  @checksum_val: calculated checksum
+ *
+ *  Performs checksum calculation and validates the EEPROM checksum
+ **/
+s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+       return ixgbe_call_func(hw, hw->eeprom.ops.validate_checksum,
+                              (hw, checksum_val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_eeprom_update_checksum - Updates the EEPROM checksum
+ *  @hw: pointer to hardware structure
+ **/
+s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->eeprom.ops.update_checksum, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_insert_mac_addr - Find a RAR for this mac address
+ *  @hw: pointer to hardware structure
+ *  @addr: Address to put into receive address register
+ *  @vmdq: VMDq pool to assign
+ *
+ *  Puts an ethernet address into a receive address register, or
+ *  finds the rar that it is aleady in; adds to the pool list
+ **/
+s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.insert_mac_addr,
+                              (hw, addr, vmdq),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_set_rar - Set Rx address register
+ *  @hw: pointer to hardware structure
+ *  @index: Receive address register to write
+ *  @addr: Address to put into receive address register
+ *  @vmdq: VMDq "set"
+ *  @enable_addr: set flag that address is active
+ *
+ *  Puts an ethernet address into a receive address register.
+ **/
+s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+                 u32 enable_addr)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.set_rar, (hw, index, addr, vmdq,
+                              enable_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_clear_rar - Clear Rx address register
+ *  @hw: pointer to hardware structure
+ *  @index: Receive address register to write
+ *
+ *  Puts an ethernet address into a receive address register.
+ **/
+s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.clear_rar, (hw, index),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_set_vmdq - Associate a VMDq index with a receive address
+ *  @hw: pointer to hardware structure
+ *  @rar: receive address register index to associate with VMDq index
+ *  @vmdq: VMDq set or pool index
+ **/
+s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.set_vmdq, (hw, rar, vmdq),
+                              IXGBE_NOT_IMPLEMENTED);
+
+}
+
+/**
+ *  ixgbe_set_vmdq_san_mac - Associate VMDq index 127 with a receive address
+ *  @hw: pointer to hardware structure
+ *  @vmdq: VMDq default pool index
+ **/
+s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.set_vmdq_san_mac,
+                              (hw, vmdq), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address
+ *  @hw: pointer to hardware structure
+ *  @rar: receive address register index to disassociate with VMDq index
+ *  @vmdq: VMDq set or pool index
+ **/
+s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.clear_vmdq, (hw, rar, vmdq),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_init_rx_addrs - Initializes receive address filters.
+ *  @hw: pointer to hardware structure
+ *
+ *  Places the MAC address in receive address register 0 and clears the rest
+ *  of the receive address registers. Clears the multicast table. Assumes
+ *  the receiver is in reset when the routine is called.
+ **/
+s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.init_rx_addrs, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_num_rx_addrs - Returns the number of RAR entries.
+ *  @hw: pointer to hardware structure
+ **/
+u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw)
+{
+       return hw->mac.num_rar_entries;
+}
+
+/**
+ *  ixgbe_update_uc_addr_list - Updates the MAC's list of secondary addresses
+ *  @hw: pointer to hardware structure
+ *  @addr_list: the list of new multicast addresses
+ *  @addr_count: number of addresses
+ *  @func: iterator function to walk the multicast address list
+ *
+ *  The given list replaces any existing list. Clears the secondary addrs from
+ *  receive address registers. Uses unused receive address registers for the
+ *  first secondary addresses, and falls back to promiscuous mode as needed.
+ **/
+s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
+                             u32 addr_count, ixgbe_mc_addr_itr func)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.update_uc_addr_list, (hw,
+                              addr_list, addr_count, func),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_update_mc_addr_list - Updates the MAC's list of multicast addresses
+ *  @hw: pointer to hardware structure
+ *  @mc_addr_list: the list of new multicast addresses
+ *  @mc_addr_count: number of addresses
+ *  @func: iterator function to walk the multicast address list
+ *
+ *  The given list replaces any existing list. Clears the MC addrs from receive
+ *  address registers and the multicast table. Uses unused receive address
+ *  registers for the first multicast addresses, and hashes the rest into the
+ *  multicast table.
+ **/
+s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
+                             u32 mc_addr_count, ixgbe_mc_addr_itr func,
+                             bool clear)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.update_mc_addr_list, (hw,
+                              mc_addr_list, mc_addr_count, func, clear),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_enable_mc - Enable multicast address in RAR
+ *  @hw: pointer to hardware structure
+ *
+ *  Enables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_enable_mc(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.enable_mc, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_disable_mc - Disable multicast address in RAR
+ *  @hw: pointer to hardware structure
+ *
+ *  Disables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_disable_mc(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.disable_mc, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_clear_vfta - Clear VLAN filter table
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+s32 ixgbe_clear_vfta(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.clear_vfta, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_set_vfta - Set VLAN filter table
+ *  @hw: pointer to hardware structure
+ *  @vlan: VLAN id to write to VLAN filter
+ *  @vind: VMDq output index that maps queue to VLAN id in VFTA
+ *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
+ *
+ *  Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind,
+                              vlan_on), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_set_vlvf - Set VLAN Pool Filter
+ *  @hw: pointer to hardware structure
+ *  @vlan: VLAN id to write to VLAN filter
+ *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
+ *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ *  @vfta_changed: pointer to boolean flag which indicates whether VFTA
+ *                 should be changed
+ *
+ *  Turn on/off specified bit in VLVF table.
+ **/
+s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on,
+                   bool *vfta_changed)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.set_vlvf, (hw, vlan, vind,
+                              vlan_on, vfta_changed), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_fc_enable - Enable flow control
+ *  @hw: pointer to hardware structure
+ *
+ *  Configures the flow control settings based on SW configuration.
+ **/
+s32 ixgbe_fc_enable(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.fc_enable, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_fw_drv_ver - Try to send the driver version number FW
+ * @hw: pointer to hardware structure
+ * @maj: driver major number to be sent to firmware
+ * @min: driver minor number to be sent to firmware
+ * @build: driver build number to be sent to firmware
+ * @ver: driver version number to be sent to firmware
+ **/
+s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
+                        u8 ver)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.set_fw_drv_ver, (hw, maj, min,
+                              build, ver), IXGBE_NOT_IMPLEMENTED);
+}
+
+
+/**
+ *  ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
+ *  @hw: pointer to hardware structure
+ *
+ *  Updates the temperatures in mac.thermal_sensor_data
+ **/
+s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.get_thermal_sensor_data, (hw),
+                               IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds
+ *  @hw: pointer to hardware structure
+ *
+ *  Inits the thermal sensor thresholds according to the NVM map
+ **/
+s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.init_thermal_sensor_thresh, (hw),
+                               IXGBE_NOT_IMPLEMENTED);
+}
+/**
+ *  ixgbe_read_analog_reg8 - Reads 8 bit analog register
+ *  @hw: pointer to hardware structure
+ *  @reg: analog register to read
+ *  @val: read value
+ *
+ *  Performs write operation to analog register specified.
+ **/
+s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.read_analog_reg8, (hw, reg,
+                              val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_write_analog_reg8 - Writes 8 bit analog register
+ *  @hw: pointer to hardware structure
+ *  @reg: analog register to write
+ *  @val: value to write
+ *
+ *  Performs write operation to Atlas analog register specified.
+ **/
+s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.write_analog_reg8, (hw, reg,
+                              val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_init_uta_tables - Initializes Unicast Table Arrays.
+ *  @hw: pointer to hardware structure
+ *
+ *  Initializes the Unicast Table Arrays to zero on device load.  This
+ *  is part of the Rx init addr execution path.
+ **/
+s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.init_uta_tables, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_read_i2c_byte - Reads 8 bit word over I2C at specified device address
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset to read
+ *  @data: value read
+ *
+ *  Performs byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+                       u8 *data)
+{
+       return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte, (hw, byte_offset,
+                              dev_addr, data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_write_i2c_byte - Writes 8 bit word over I2C
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset to write
+ *  @data: value to write
+ *
+ *  Performs byte write operation to SFP module's EEPROM over I2C interface
+ *  at a specified device address.
+ **/
+s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+                        u8 data)
+{
+       return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte, (hw, byte_offset,
+                              dev_addr, data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: EEPROM byte offset to write
+ *  @eeprom_data: value to write
+ *
+ *  Performs byte write operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw,
+                          u8 byte_offset, u8 eeprom_data)
+{
+       return ixgbe_call_func(hw, hw->phy.ops.write_i2c_eeprom,
+                              (hw, byte_offset, eeprom_data),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: EEPROM byte offset to read
+ *  @eeprom_data: value read
+ *
+ *  Performs byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data)
+{
+       return ixgbe_call_func(hw, hw->phy.ops.read_i2c_eeprom,
+                             (hw, byte_offset, eeprom_data),
+                             IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_supported_physical_layer - Returns physical layer type
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines physical layer capabilities of the current configuration.
+ **/
+u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer,
+                              (hw), IXGBE_PHYSICAL_LAYER_UNKNOWN);
+}
+
+/**
+ *  ixgbe_enable_rx_dma - Enables Rx DMA unit, dependent on device specifics
+ *  @hw: pointer to hardware structure
+ *  @regval: bitfield to write to the Rx DMA register
+ *
+ *  Enables the Rx DMA unit of the device.
+ **/
+s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.enable_rx_dma,
+                              (hw, regval), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_disable_sec_rx_path - Stops the receive data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Stops the receive data path.
+ **/
+s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.disable_sec_rx_path,
+                               (hw), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_enable_sec_rx_path - Enables the receive data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Enables the receive data path.
+ **/
+s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.enable_sec_rx_path,
+                               (hw), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_acquire_swfw_semaphore - Acquire SWFW semaphore
+ *  @hw: pointer to hardware structure
+ *  @mask: Mask to specify which semaphore to acquire
+ *
+ *  Acquires the SWFW semaphore through SW_FW_SYNC register for the specified
+ *  function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.acquire_swfw_sync,
+                              (hw, mask), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_release_swfw_semaphore - Release SWFW semaphore
+ *  @hw: pointer to hardware structure
+ *  @mask: Mask to specify which semaphore to release
+ *
+ *  Releases the SWFW semaphore through SW_FW_SYNC register for the specified
+ *  function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask)
+{
+       if (hw->mac.ops.release_swfw_sync)
+               hw->mac.ops.release_swfw_sync(hw, mask);
+}
+
diff --git a/drivers/net/ixgbe/ixgbe_api.h b/drivers/net/ixgbe/ixgbe_api.h
new file mode 100644 (file)
index 0000000..a6ab30d
--- /dev/null
@@ -0,0 +1,168 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2012 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_API_H_
+#define _IXGBE_API_H_
+
+#include "ixgbe_type.h"
+
+s32 ixgbe_init_shared_code(struct ixgbe_hw *hw);
+
+extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw);
+
+s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
+s32 ixgbe_init_hw(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw(struct ixgbe_hw *hw);
+s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw);
+enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw);
+s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr);
+s32 ixgbe_get_bus_info(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw);
+s32 ixgbe_stop_adapter(struct ixgbe_hw *hw);
+s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size);
+
+s32 ixgbe_identify_phy(struct ixgbe_hw *hw);
+s32 ixgbe_reset_phy(struct ixgbe_hw *hw);
+s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+                      u16 *phy_data);
+s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+                       u16 phy_data);
+
+s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw);
+s32 ixgbe_check_phy_link(struct ixgbe_hw *hw,
+                        ixgbe_link_speed *speed,
+                        bool *link_up);
+s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw,
+                              ixgbe_link_speed speed,
+                              bool autoneg,
+                              bool autoneg_wait_to_complete);
+void ixgbe_disable_tx_laser(struct ixgbe_hw *hw);
+void ixgbe_enable_tx_laser(struct ixgbe_hw *hw);
+void ixgbe_flap_tx_laser(struct ixgbe_hw *hw);
+s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+                    bool autoneg, bool autoneg_wait_to_complete);
+s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+                    bool *link_up, bool link_up_wait_to_complete);
+s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+                               bool *autoneg);
+s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index);
+
+s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw);
+s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
+                             u16 words, u16 *data);
+s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
+                            u16 words, u16 *data);
+
+s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val);
+s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw);
+
+s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
+s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+                 u32 enable_addr);
+s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq);
+s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw);
+s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
+                             u32 addr_count, ixgbe_mc_addr_itr func);
+s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
+                             u32 mc_addr_count, ixgbe_mc_addr_itr func,
+                             bool clear);
+void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr_list, u32 vmdq);
+s32 ixgbe_enable_mc(struct ixgbe_hw *hw);
+s32 ixgbe_disable_mc(struct ixgbe_hw *hw);
+s32 ixgbe_clear_vfta(struct ixgbe_hw *hw);
+s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan,
+                  u32 vind, bool vlan_on);
+s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+                  bool vlan_on, bool *vfta_changed);
+s32 ixgbe_fc_enable(struct ixgbe_hw *hw);
+s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
+                        u8 ver);
+s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw);
+s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw);
+void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr);
+s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw,
+                                  u16 *firmware_version);
+s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw);
+s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data);
+u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval);
+s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw);
+s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw);
+s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+                                         union ixgbe_atr_hash_dword input,
+                                         union ixgbe_atr_hash_dword common,
+                                         u8 queue);
+s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+                                   union ixgbe_atr_input *input_mask);
+s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+                                         union ixgbe_atr_input *input,
+                                         u16 soft_id, u8 queue);
+s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+                                         union ixgbe_atr_input *input,
+                                         u16 soft_id);
+s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
+                                       union ixgbe_atr_input *input,
+                                       union ixgbe_atr_input *mask,
+                                       u16 soft_id,
+                                       u8 queue);
+void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+                                         union ixgbe_atr_input *mask);
+u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
+                                    union ixgbe_atr_hash_dword common);
+s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+                       u8 *data);
+s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+                        u8 data);
+s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data);
+s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
+s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
+s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps);
+s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
+void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
+s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+                        u16 *wwpn_prefix);
+s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs);
+
+#endif /* _IXGBE_API_H_ */
index c7956141cdebdd9f86edf63254a347dd114df47a..8646bbd49f217b1d87b9aea159402b90790e0673 100644 (file)
 
 *******************************************************************************/
 
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/netdevice.h>
-
-#include "ixgbe.h"
 #include "ixgbe_common.h"
 #include "ixgbe_phy.h"
+#include "ixgbe_api.h"
 
 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
@@ -40,28 +35,265 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
-                                        u16 count);
+                                       u16 count);
 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
 
 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
-static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
-static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
-static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
-static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
-static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
-                             u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
-static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
-static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
+static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
+                                        u16 *san_mac_offset);
 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
                                             u16 words, u16 *data);
 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
-                                            u16 words, u16 *data);
+                                             u16 words, u16 *data);
 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
                                                 u16 offset);
-static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
+
+/**
+ *  ixgbe_init_ops_generic - Inits function ptrs
+ *  @hw: pointer to the hardware structure
+ *
+ *  Initialize the function pointers.
+ **/
+s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
+{
+       struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+       struct ixgbe_mac_info *mac = &hw->mac;
+       u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+       /* EEPROM */
+       eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
+       /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
+       if (eec & IXGBE_EEC_PRES) {
+               eeprom->ops.read = &ixgbe_read_eerd_generic;
+               eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic;
+       } else {
+               eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
+               eeprom->ops.read_buffer =
+                                &ixgbe_read_eeprom_buffer_bit_bang_generic;
+       }
+       eeprom->ops.write = &ixgbe_write_eeprom_generic;
+       eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic;
+       eeprom->ops.validate_checksum =
+                                     &ixgbe_validate_eeprom_checksum_generic;
+       eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
+       eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
+
+       /* MAC */
+       mac->ops.init_hw = &ixgbe_init_hw_generic;
+       mac->ops.reset_hw = NULL;
+       mac->ops.start_hw = &ixgbe_start_hw_generic;
+       mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
+       mac->ops.get_media_type = NULL;
+       mac->ops.get_supported_physical_layer = NULL;
+       mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
+       mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
+       mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
+       mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
+       mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
+       mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
+       mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
+
+       /* LEDs */
+       mac->ops.led_on = &ixgbe_led_on_generic;
+       mac->ops.led_off = &ixgbe_led_off_generic;
+       mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
+       mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
+
+       /* RAR, Multicast, VLAN */
+       mac->ops.set_rar = &ixgbe_set_rar_generic;
+       mac->ops.clear_rar = &ixgbe_clear_rar_generic;
+       mac->ops.insert_mac_addr = NULL;
+       mac->ops.set_vmdq = NULL;
+       mac->ops.clear_vmdq = NULL;
+       mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
+       mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
+       mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
+       mac->ops.enable_mc = &ixgbe_enable_mc_generic;
+       mac->ops.disable_mc = &ixgbe_disable_mc_generic;
+       mac->ops.clear_vfta = NULL;
+       mac->ops.set_vfta = NULL;
+       mac->ops.set_vlvf = NULL;
+       mac->ops.init_uta_tables = NULL;
+
+       /* Flow Control */
+       mac->ops.fc_enable = &ixgbe_fc_enable_generic;
+
+       /* Link */
+       mac->ops.get_link_capabilities = NULL;
+       mac->ops.setup_link = NULL;
+       mac->ops.check_link = NULL;
+
+       return 0;
+}
+
+/**
+ *  ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
+ *  control
+ *  @hw: pointer to hardware structure
+ *
+ *  There are several phys that do not support autoneg flow control. This
+ *  function check the device id to see if the associated phy supports
+ *  autoneg flow control.
+ **/
+static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+{
+
+       switch (hw->device_id) {
+       case IXGBE_DEV_ID_X540T:
+               return 0;
+       case IXGBE_DEV_ID_82599_T3_LOM:
+               return 0;
+       default:
+               return IXGBE_ERR_FC_NOT_SUPPORTED;
+       }
+}
+
+/**
+ *  ixgbe_setup_fc - Set up flow control
+ *  @hw: pointer to hardware structure
+ *
+ *  Called at init time to set up flow control.
+ **/
+static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
+{
+       s32 ret_val = 0;
+       u32 reg = 0, reg_bp = 0;
+       u16 reg_cu = 0;
+
+       /*
+        * Validate the requested mode.  Strict IEEE mode does not allow
+        * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
+        */
+       if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+               hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+               goto out;
+       }
+
+       /*
+        * 10gig parts do not have a word in the EEPROM to determine the
+        * default flow control setting, so we explicitly set it to full.
+        */
+       if (hw->fc.requested_mode == ixgbe_fc_default)
+               hw->fc.requested_mode = ixgbe_fc_full;
+
+       /*
+        * Set up the 1G and 10G flow control advertisement registers so the
+        * HW will be able to do fc autoneg once the cable is plugged in.  If
+        * we link at 10G, the 1G advertisement is harmless and vice versa.
+        */
+       switch (hw->phy.media_type) {
+       case ixgbe_media_type_fiber:
+       case ixgbe_media_type_backplane:
+               reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+               reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+               break;
+       case ixgbe_media_type_copper:
+               hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+                                    IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
+               break;
+       default:
+               break;
+       }
+
+       /*
+        * The possible values of fc.requested_mode are:
+        * 0: Flow control is completely disabled
+        * 1: Rx flow control is enabled (we can receive pause frames,
+        *    but not send pause frames).
+        * 2: Tx flow control is enabled (we can send pause frames but
+        *    we do not support receiving pause frames).
+        * 3: Both Rx and Tx flow control (symmetric) are enabled.
+        * other: Invalid.
+        */
+       switch (hw->fc.requested_mode) {
+       case ixgbe_fc_none:
+               /* Flow control completely disabled by software override. */
+               reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+               if (hw->phy.media_type == ixgbe_media_type_backplane)
+                       reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
+                                   IXGBE_AUTOC_ASM_PAUSE);
+               else if (hw->phy.media_type == ixgbe_media_type_copper)
+                       reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
+               break;
+       case ixgbe_fc_tx_pause:
+               /*
+                * Tx Flow control is enabled, and Rx Flow control is
+                * disabled by software override.
+                */
+               reg |= IXGBE_PCS1GANA_ASM_PAUSE;
+               reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
+               if (hw->phy.media_type == ixgbe_media_type_backplane) {
+                       reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
+                       reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
+               } else if (hw->phy.media_type == ixgbe_media_type_copper) {
+                       reg_cu |= IXGBE_TAF_ASM_PAUSE;
+                       reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
+               }
+               break;
+       case ixgbe_fc_rx_pause:
+               /*
+                * Rx Flow control is enabled and Tx Flow control is
+                * disabled by software override. Since there really
+                * isn't a way to advertise that we are capable of RX
+                * Pause ONLY, we will advertise that we support both
+                * symmetric and asymmetric Rx PAUSE, as such we fall
+                * through to the fc_full statement.  Later, we will
+                * disable the adapter's ability to send PAUSE frames.
+                */
+       case ixgbe_fc_full:
+               /* Flow control (both Rx and Tx) is enabled by SW override. */
+               reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
+               if (hw->phy.media_type == ixgbe_media_type_backplane)
+                       reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
+                                 IXGBE_AUTOC_ASM_PAUSE;
+               else if (hw->phy.media_type == ixgbe_media_type_copper)
+                       reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
+               break;
+       default:
+               hw_dbg(hw, "Flow control param set incorrectly\n");
+               ret_val = IXGBE_ERR_CONFIG;
+               goto out;
+               break;
+       }
+
+       if (hw->mac.type != ixgbe_mac_X540) {
+               /*
+                * Enable auto-negotiation between the MAC & PHY;
+                * the MAC will advertise clause 37 flow control.
+                */
+               IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
+               reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
+
+               /* Disable AN timeout */
+               if (hw->fc.strict_ieee)
+                       reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
+
+               IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
+               hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
+       }
+
+       /*
+        * AUTOC restart handles negotiation of 1G and 10G on backplane
+        * and copper. There is no need to set the PCS1GCTL register.
+        *
+        */
+       if (hw->phy.media_type == ixgbe_media_type_backplane) {
+               reg_bp |= IXGBE_AUTOC_AN_RESTART;
+               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
+       } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
+                   (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
+               hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+                                     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
+       }
+
+       hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
+out:
+       return ret_val;
+}
 
 /**
  *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
@@ -74,13 +306,13 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
  **/
 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
 {
+       s32 ret_val;
        u32 ctrl_ext;
 
        /* Set the media type */
        hw->phy.media_type = hw->mac.ops.get_media_type(hw);
 
-       /* Identify the PHY */
-       hw->phy.ops.identify(hw);
+       /* PHY ops initialization must be done in reset_hw() */
 
        /* Clear the VLAN filter table */
        hw->mac.ops.clear_vfta(hw);
@@ -95,12 +327,15 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
        IXGBE_WRITE_FLUSH(hw);
 
        /* Setup flow control */
-       ixgbe_setup_fc(hw, 0);
+       ret_val = ixgbe_setup_fc(hw);
+       if (ret_val != 0)
+               goto out;
 
        /* Clear adapter stopped flag */
        hw->adapter_stopped = false;
 
-       return 0;
+out:
+       return ret_val;
 }
 
 /**
@@ -265,11 +500,15 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
 
        if (hw->mac.type == ixgbe_mac_X540) {
                if (hw->phy.id == 0)
-                       hw->phy.ops.identify(hw);
-               hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i);
-               hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i);
-               hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i);
-               hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i);
+                       ixgbe_identify_phy(hw);
+               hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
+                                    IXGBE_MDIO_PCS_DEV_TYPE, &i);
+               hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
+                                    IXGBE_MDIO_PCS_DEV_TYPE, &i);
+               hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
+                                    IXGBE_MDIO_PCS_DEV_TYPE, &i);
+               hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
+                                    IXGBE_MDIO_PCS_DEV_TYPE, &i);
        }
 
        return 0;
@@ -284,7 +523,7 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
  *  Reads the part number string from the EEPROM.
  **/
 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
-                                  u32 pba_num_size)
+                                 u32 pba_num_size)
 {
        s32 ret_val;
        u16 data;
@@ -419,15 +658,13 @@ s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
  **/
 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
 {
-       struct ixgbe_adapter *adapter = hw->back;
        struct ixgbe_mac_info *mac = &hw->mac;
        u16 link_status;
 
        hw->bus.type = ixgbe_bus_type_pci_express;
 
        /* Get the negotiated link width and speed from PCI config space */
-       pci_read_config_word(adapter->pdev, IXGBE_PCI_LINK_STATUS,
-                            &link_status);
+       link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
 
        switch (link_status & IXGBE_PCI_LINK_WIDTH) {
        case IXGBE_PCI_LINK_WIDTH_1:
@@ -454,6 +691,9 @@ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
        case IXGBE_PCI_LINK_SPEED_5000:
                hw->bus.speed = ixgbe_bus_speed_5000;
                break;
+       case IXGBE_PCI_LINK_SPEED_8000:
+               hw->bus.speed = ixgbe_bus_speed_8000;
+               break;
        default:
                hw->bus.speed = ixgbe_bus_speed_unknown;
                break;
@@ -529,7 +769,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
 
        /* flush all queues disables */
        IXGBE_WRITE_FLUSH(hw);
-       usleep_range(1000, 2000);
+       msleep(2);
 
        /*
         * Prevent the PCI-E bus from from hanging by disabling PCI-E master
@@ -610,7 +850,7 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
                        eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
                                            IXGBE_EEC_SIZE_SHIFT);
                        eeprom->word_size = 1 << (eeprom_size +
-                                                 IXGBE_EEPROM_WORD_SIZE_SHIFT);
+                                            IXGBE_EEPROM_WORD_SIZE_SHIFT);
                }
 
                if (eec & IXGBE_EEC_ADDR_SIZE)
@@ -629,7 +869,7 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
  *  ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
  *  @hw: pointer to hardware structure
  *  @offset: offset within the EEPROM to write
- *  @words: number of words
+ *  @words: number of word(s)
  *  @data: 16 bit word(s) to write to EEPROM
  *
  *  Reads 16 bit word(s) from EEPROM through bit-bang method
@@ -667,7 +907,7 @@ s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
         */
        for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
                count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
-                        IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
+                       IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
                status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
                                                            count, &data[i]);
 
@@ -714,8 +954,8 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
 
                        /*  Send the WRITE ENABLE command (8 bit opcode )  */
                        ixgbe_shift_out_eeprom_bits(hw,
-                                                 IXGBE_EEPROM_WREN_OPCODE_SPI,
-                                                 IXGBE_EEPROM_OPCODE_BITS);
+                                                  IXGBE_EEPROM_WREN_OPCODE_SPI,
+                                                  IXGBE_EEPROM_OPCODE_BITS);
 
                        ixgbe_standby_eeprom(hw);
 
@@ -751,7 +991,7 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
                        } while (++i < words);
 
                        ixgbe_standby_eeprom(hw);
-                       usleep_range(10000, 20000);
+                       msleep(10);
                }
                /* Done with writing - release the EEPROM */
                ixgbe_release_eeprom(hw);
@@ -790,8 +1030,8 @@ out:
  *  ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
  *  @hw: pointer to hardware structure
  *  @offset: offset within the EEPROM to be read
- *  @words: number of word(s)
  *  @data: read 16 bit words(s) from EEPROM
+ *  @words: number of word(s)
  *
  *  Reads 16 bit word(s) from EEPROM through bit-bang method
  **/
@@ -820,7 +1060,7 @@ s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
         */
        for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
                count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
-                        IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
+                       IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
 
                status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
                                                           count, &data[i]);
@@ -999,7 +1239,7 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
        hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
 
        hw_dbg(hw, "Detected EEPROM page size = %d words.",
-              hw->eeprom.word_page_size);
+                 hw->eeprom.word_page_size);
 out:
        return status;
 }
@@ -1021,7 +1261,7 @@ s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
  *  ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
  *  @hw: pointer to hardware structure
  *  @offset: offset of  word in the EEPROM to write
- *  @words: number of words
+ *  @words: number of word(s)
  *  @data: word(s) write to the EEPROM
  *
  *  Write a 16 bit word(s) to the EEPROM using the EEWR register.
@@ -1047,8 +1287,8 @@ s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
 
        for (i = 0; i < words; i++) {
                eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
-                      (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
-                      IXGBE_EEPROM_RW_REG_START;
+                       (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
+                       IXGBE_EEPROM_RW_REG_START;
 
                status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
                if (status != 0) {
@@ -1090,7 +1330,7 @@ s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
  *  Polls the status bit (bit 1) of the EERD or EEWR to determine when the
  *  read or write is done respectively.
  **/
-static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
 {
        u32 i;
        u32 reg;
@@ -1124,7 +1364,8 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
        u32 eec;
        u32 i;
 
-       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
+           != 0)
                status = IXGBE_ERR_SWFW_SYNC;
 
        if (status == 0) {
@@ -1192,7 +1433,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
 
        if (i == timeout) {
                hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore "
-                      "not granted.\n");
+                        "not granted.\n");
                /*
                 * this release is particularly important because our attempts
                 * above to get the semaphore may have succeeded, and if there
@@ -1238,13 +1479,13 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
                 */
                if (i >= timeout) {
                        hw_dbg(hw, "SWESMBI Software EEPROM semaphore "
-                              "not granted.\n");
+                                "not granted.\n");
                        ixgbe_release_eeprom_semaphore(hw);
                        status = IXGBE_ERR_EEPROM;
                }
        } else {
                hw_dbg(hw, "Software semaphore SMBI between device drivers "
-                      "not granted.\n");
+                        "not granted.\n");
        }
 
        return status;
@@ -1286,14 +1527,14 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
         */
        for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
                ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
-                                           IXGBE_EEPROM_OPCODE_BITS);
+                                           IXGBE_EEPROM_OPCODE_BITS);
                spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
                if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
                        break;
 
                udelay(5);
                ixgbe_standby_eeprom(hw);
-       }
+       };
 
        /*
         * On some parts, SPI write time could vary from 0-20mSec on 3.3V
@@ -1335,7 +1576,7 @@ static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
  *  @count: number of bits to shift out
  **/
 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
-                                        u16 count)
+                                       u16 count)
 {
        u32 eec;
        u32 mask;
@@ -1375,7 +1616,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
                 * EEPROM
                 */
                mask = mask >> 1;
-       }
+       };
 
        /* We leave the "DI" bit set to "0" when we leave this routine. */
        eec &= ~IXGBE_EEC_DI;
@@ -1478,12 +1719,8 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
 
        hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
 
-       /*
-        * Delay before attempt to obtain semaphore again to allow FW
-        * access. semaphore_delay is in ms we need us for usleep_range
-        */
-       usleep_range(hw->eeprom.semaphore_delay * 1000,
-                    hw->eeprom.semaphore_delay * 2000);
+       /* Delay before attempt to obtain semaphore again to allow FW access */
+       msleep(hw->eeprom.semaphore_delay);
 }
 
 /**
@@ -1539,7 +1776,7 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
  *  caller does not need checksum_val, the value can be NULL.
  **/
 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
-                                           u16 *checksum_val)
+                                          u16 *checksum_val)
 {
        s32 status;
        u16 checksum;
@@ -1612,16 +1849,19 @@ s32 ixgbe_validate_mac_addr(u8 *mac_addr)
        s32 status = 0;
 
        /* Make sure it is not a multicast address */
-       if (IXGBE_IS_MULTICAST(mac_addr))
+       if (IXGBE_IS_MULTICAST(mac_addr)) {
+               hw_dbg(hw, "MAC address is multicast\n");
                status = IXGBE_ERR_INVALID_MAC_ADDR;
        /* Not a broadcast address */
-       else if (IXGBE_IS_BROADCAST(mac_addr))
+       } else if (IXGBE_IS_BROADCAST(mac_addr)) {
+               hw_dbg(hw, "MAC address is broadcast\n");
                status = IXGBE_ERR_INVALID_MAC_ADDR;
        /* Reject the zero address */
-       else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
-                mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
+       } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+                  mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
+               hw_dbg(hw, "MAC address is all zeros\n");
                status = IXGBE_ERR_INVALID_MAC_ADDR;
-
+       }
        return status;
 }
 
@@ -1636,7 +1876,7 @@ s32 ixgbe_validate_mac_addr(u8 *mac_addr)
  *  Puts an ethernet address into a receive address register.
  **/
 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
-                          u32 enable_addr)
+                         u32 enable_addr)
 {
        u32 rar_low, rar_high;
        u32 rar_entries = hw->mac.num_rar_entries;
@@ -1734,15 +1974,23 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
                /* Get the MAC address from the RAR0 for later reference */
                hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
 
-               hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr);
+               hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
+                         hw->mac.addr[0], hw->mac.addr[1],
+                         hw->mac.addr[2]);
+               hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
+                         hw->mac.addr[4], hw->mac.addr[5]);
        } else {
                /* Setup the receive address. */
                hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
-               hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
+               hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ",
+                         hw->mac.addr[0], hw->mac.addr[1],
+                         hw->mac.addr[2]);
+               hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
+                         hw->mac.addr[4], hw->mac.addr[5]);
 
                hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
 
-               /*  clear VMDq pool/queue selection for RAR 0 */
+               /* clear VMDq pool/queue selection for RAR 0 */
                hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
        }
        hw->addr_ctrl.overflow_promisc = 0;
@@ -1764,9 +2012,107 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
        for (i = 0; i < hw->mac.mcft_size; i++)
                IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
 
-       if (hw->mac.ops.init_uta_tables)
-               hw->mac.ops.init_uta_tables(hw);
+       ixgbe_init_uta_tables(hw);
+
+       return 0;
+}
+
+/**
+ *  ixgbe_add_uc_addr - Adds a secondary unicast address.
+ *  @hw: pointer to hardware structure
+ *  @addr: new address
+ *
+ *  Adds it to unused receive address register or goes into promiscuous mode.
+ **/
+void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
+{
+       u32 rar_entries = hw->mac.num_rar_entries;
+       u32 rar;
+
+       hw_dbg(hw, " UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
+                 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+
+       /*
+        * Place this address in the RAR if there is room,
+        * else put the controller into promiscuous mode
+        */
+       if (hw->addr_ctrl.rar_used_count < rar_entries) {
+               rar = hw->addr_ctrl.rar_used_count;
+               hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
+               hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar);
+               hw->addr_ctrl.rar_used_count++;
+       } else {
+               hw->addr_ctrl.overflow_promisc++;
+       }
+
+       hw_dbg(hw, "ixgbe_add_uc_addr Complete\n");
+}
+
+/**
+ *  ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
+ *  @hw: pointer to hardware structure
+ *  @addr_list: the list of new addresses
+ *  @addr_count: number of addresses
+ *  @next: iterator function to walk the address list
+ *
+ *  The given list replaces any existing list.  Clears the secondary addrs from
+ *  receive address registers.  Uses unused receive address registers for the
+ *  first secondary addresses, and falls back to promiscuous mode as needed.
+ *
+ *  Drivers using secondary unicast addresses must set user_set_promisc when
+ *  manually putting the device into promiscuous mode.
+ **/
+s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
+                                     u32 addr_count, ixgbe_mc_addr_itr next)
+{
+       u8 *addr;
+       u32 i;
+       u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
+       u32 uc_addr_in_use;
+       u32 fctrl;
+       u32 vmdq;
+
+       /*
+        * Clear accounting of old secondary address list,
+        * don't count RAR[0]
+        */
+       uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
+       hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
+       hw->addr_ctrl.overflow_promisc = 0;
+
+       /* Zero out the other receive addresses */
+       hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use+1);
+       for (i = 0; i < uc_addr_in_use; i++) {
+               IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
+       }
+
+       /* Add the new addresses */
+       for (i = 0; i < addr_count; i++) {
+               hw_dbg(hw, " Adding the secondary addresses:\n");
+               addr = next(hw, &addr_list, &vmdq);
+               ixgbe_add_uc_addr(hw, addr, vmdq);
+       }
+
+       if (hw->addr_ctrl.overflow_promisc) {
+               /* enable promisc if not already in overflow or set by user */
+               if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
+                       hw_dbg(hw, " Entering address overflow promisc mode\n");
+                       fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+                       fctrl |= IXGBE_FCTRL_UPE;
+                       IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+               }
+       } else {
+               /* only disable if set by overflow, not by user */
+               if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
+                       hw_dbg(hw, " Leaving address overflow promisc mode\n");
+                       fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+                       fctrl &= ~IXGBE_FCTRL_UPE;
+                       IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+               }
+       }
 
+       hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n");
        return 0;
 }
 
@@ -1816,7 +2162,7 @@ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
  *
  *  Sets the bit-vector in the multicast table.
  **/
-static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
+void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
 {
        u32 vector;
        u32 vector_bit;
@@ -1844,34 +2190,38 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
 /**
  *  ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
  *  @hw: pointer to hardware structure
- *  @netdev: pointer to net device structure
+ *  @mc_addr_list: the list of new multicast addresses
+ *  @mc_addr_count: number of addresses
+ *  @next: iterator function to walk the multicast address list
+ *  @clear: flag, when set clears the table beforehand
  *
- *  The given list replaces any existing list. Clears the MC addrs from receive
- *  address registers and the multicast table. Uses unused receive address
- *  registers for the first multicast addresses, and hashes the rest into the
- *  multicast table.
+ *  When the clear flag is set, the given list replaces any existing list.
+ *  Hashes the given addresses into the multicast table.
  **/
-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
-                                     struct net_device *netdev)
+s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
+                                     u32 mc_addr_count, ixgbe_mc_addr_itr next,
+                                     bool clear)
 {
-       struct netdev_hw_addr *ha;
        u32 i;
+       u32 vmdq;
 
        /*
         * Set the new number of MC addresses that we are being requested to
         * use.
         */
-       hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
+       hw->addr_ctrl.num_mc_addrs = mc_addr_count;
        hw->addr_ctrl.mta_in_use = 0;
 
        /* Clear mta_shadow */
-       hw_dbg(hw, " Clearing MTA\n");
-       memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+       if (clear) {
+               hw_dbg(hw, " Clearing MTA\n");
+               memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+       }
 
-       /* Update mta shadow */
-       netdev_for_each_mc_addr(ha, netdev) {
+       /* Update mta_shadow */
+       for (i = 0; i < mc_addr_count; i++) {
                hw_dbg(hw, " Adding the multicast addresses:\n");
-               ixgbe_set_mta(hw, ha->addr);
+               ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
        }
 
        /* Enable mta */
@@ -1881,7 +2231,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
 
        if (hw->addr_ctrl.mta_in_use > 0)
                IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
-                               IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
+                               IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
 
        hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
        return 0;
@@ -1899,7 +2249,7 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
 
        if (a->mta_in_use > 0)
                IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
-                               hw->mac.mc_filter_type);
+                               hw->mac.mc_filter_type);
 
        return 0;
 }
@@ -1923,30 +2273,42 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
 /**
  *  ixgbe_fc_enable_generic - Enable flow control
  *  @hw: pointer to hardware structure
- *  @packetbuf_num: packet buffer number (0-7)
  *
  *  Enable flow control according to the current settings.
  **/
-s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
+s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
 {
        s32 ret_val = 0;
        u32 mflcn_reg, fccfg_reg;
        u32 reg;
        u32 fcrtl, fcrth;
+       int i;
 
-#ifdef CONFIG_DCB
-       if (hw->fc.requested_mode == ixgbe_fc_pfc)
+       /* Validate the water mark configuration */
+       if (!hw->fc.pause_time) {
+               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
                goto out;
+       }
+
+       /* Low water mark of zero causes XOFF floods */
+       for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+               if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+                   hw->fc.high_water[i]) {
+                       if (!hw->fc.low_water[i] ||
+                           hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+                               hw_dbg(hw, "Invalid water mark configuration\n");
+                               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+                               goto out;
+                       }
+               }
+       }
 
-#endif /* CONFIG_DCB */
        /* Negotiate the fc mode to use */
-       ret_val = ixgbe_fc_autoneg(hw);
-       if (ret_val == IXGBE_ERR_FLOW_CONTROL)
-               goto out;
+       ixgbe_fc_autoneg(hw);
 
        /* Disable any previous flow control settings */
        mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
-       mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE);
+       mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
 
        fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
        fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
@@ -1959,9 +2321,6 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
         * 2: Tx flow control is enabled (we can send pause frames but
         *    we do not support receiving pause frames).
         * 3: Both Rx and Tx flow control (symmetric) are enabled.
-#ifdef CONFIG_DCB
-        * 4: Priority Flow Control is enabled.
-#endif
         * other: Invalid.
         */
        switch (hw->fc.current_mode) {
@@ -1994,11 +2353,6 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
                mflcn_reg |= IXGBE_MFLCN_RFCE;
                fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
                break;
-#ifdef CONFIG_DCB
-       case ixgbe_fc_pfc:
-               goto out;
-               break;
-#endif /* CONFIG_DCB */
        default:
                hw_dbg(hw, "Flow control param set incorrectly\n");
                ret_val = IXGBE_ERR_CONFIG;
@@ -2011,100 +2365,87 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
        IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
        IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
 
-       fcrtl = hw->fc.low_water << 10;
 
-       if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
-               fcrth = hw->fc.high_water[packetbuf_num] << 10;
-               fcrth |= IXGBE_FCRTH_FCEN;
-               if (hw->fc.send_xon)
-                       fcrtl |= IXGBE_FCRTL_XONE;
-       } else {
-               /*
-                * If Tx flow control is disabled, set our high water mark
-                * to Rx FIFO size minus 32 in order prevent Tx switch
-                * loopback from stalling on DMA.
-                */
-               fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)) - 32;
-       }
+       /* Set up and enable Rx high/low water mark thresholds, enable XON. */
+       for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+               if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+                   hw->fc.high_water[i]) {
+                       fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
+                       IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
+                       fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+               } else {
+                       IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
+                       /*
+                        * In order to prevent Tx hangs when the internal Tx
+                        * switch is enabled we must set the high water mark
+                        * to the maximum FCRTH value.  This allows the Tx
+                        * switch to function even under heavy Rx workloads.
+                        */
+                       fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
+               }
 
-       IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth);
-       IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl);
+               IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
+       }
 
        /* Configure pause time (2 TCs per register) */
-       reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
-       if ((packetbuf_num & 1) == 0)
-               reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
-       else
-               reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
-       IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
+       reg = hw->fc.pause_time * 0x00010001;
+       for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+               IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
 
-       IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
+       /* Configure flow control refresh threshold value */
+       IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
 
 out:
        return ret_val;
 }
 
 /**
- *  ixgbe_fc_autoneg - Configure flow control
+ *  ixgbe_negotiate_fc - Negotiate flow control
  *  @hw: pointer to hardware structure
+ *  @adv_reg: flow control advertised settings
+ *  @lp_reg: link partner's flow control settings
+ *  @adv_sym: symmetric pause bit in advertisement
+ *  @adv_asm: asymmetric pause bit in advertisement
+ *  @lp_sym: symmetric pause bit in link partner advertisement
+ *  @lp_asm: asymmetric pause bit in link partner advertisement
  *
- *  Compares our advertised flow control capabilities to those advertised by
- *  our link partner, and determines the proper flow control mode to use.
+ *  Find the intersection between advertised settings and link partner's
+ *  advertised settings
  **/
-s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
+static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+                             u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
 {
-       s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
-       ixgbe_link_speed speed;
-       bool link_up;
-
-       if (hw->fc.disable_fc_autoneg)
-               goto out;
-
-       /*
-        * AN should have completed when the cable was plugged in.
-        * Look for reasons to bail out.  Bail out if:
-        * - FC autoneg is disabled, or if
-        * - link is not up.
-        *
-        * Since we're being called from an LSC, link is already known to be up.
-        * So use link_up_wait_to_complete=false.
-        */
-       hw->mac.ops.check_link(hw, &speed, &link_up, false);
-       if (!link_up) {
-               ret_val = IXGBE_ERR_FLOW_CONTROL;
-               goto out;
-       }
-
-       switch (hw->phy.media_type) {
-       /* Autoneg flow control on fiber adapters */
-       case ixgbe_media_type_fiber:
-               if (speed == IXGBE_LINK_SPEED_1GB_FULL)
-                       ret_val = ixgbe_fc_autoneg_fiber(hw);
-               break;
-
-       /* Autoneg flow control on backplane adapters */
-       case ixgbe_media_type_backplane:
-               ret_val = ixgbe_fc_autoneg_backplane(hw);
-               break;
-
-       /* Autoneg flow control on copper adapters */
-       case ixgbe_media_type_copper:
-               if (ixgbe_device_supports_autoneg_fc(hw) == 0)
-                       ret_val = ixgbe_fc_autoneg_copper(hw);
-               break;
-
-       default:
-               break;
-       }
+       if ((!(adv_reg)) ||  (!(lp_reg)))
+               return IXGBE_ERR_FC_NOT_NEGOTIATED;
 
-out:
-       if (ret_val == 0) {
-               hw->fc.fc_was_autonegged = true;
+       if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
+               /*
+                * Now we need to check if the user selected Rx ONLY
+                * of pause frames.  In this case, we had to advertise
+                * FULL flow control because we could not advertise RX
+                * ONLY. Hence, we must now check to see if we need to
+                * turn OFF the TRANSMISSION of PAUSE frames.
+                */
+               if (hw->fc.requested_mode == ixgbe_fc_full) {
+                       hw->fc.current_mode = ixgbe_fc_full;
+                       hw_dbg(hw, "Flow Control = FULL.\n");
+               } else {
+                       hw->fc.current_mode = ixgbe_fc_rx_pause;
+                       hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
+               }
+       } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+                  (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+               hw->fc.current_mode = ixgbe_fc_tx_pause;
+               hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
+       } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+                  !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+               hw->fc.current_mode = ixgbe_fc_rx_pause;
+               hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
        } else {
-               hw->fc.fc_was_autonegged = false;
-               hw->fc.current_mode = hw->fc.requested_mode;
+               hw->fc.current_mode = ixgbe_fc_none;
+               hw_dbg(hw, "Flow Control = NONE.\n");
        }
-       return ret_val;
+       return 0;
 }
 
 /**
@@ -2116,7 +2457,7 @@ out:
 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
 {
        u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
-       s32 ret_val;
+       s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
 
        /*
         * On multispeed fiber at 1g, bail out if
@@ -2126,19 +2467,17 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
 
        linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
        if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
-           (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
-               ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+           (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
                goto out;
-       }
 
        pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
        pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
 
        ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg,
-                              pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
-                              IXGBE_PCS1GANA_ASM_PAUSE,
-                              IXGBE_PCS1GANA_SYM_PAUSE,
-                              IXGBE_PCS1GANA_ASM_PAUSE);
+                                     pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
+                                     IXGBE_PCS1GANA_ASM_PAUSE,
+                                     IXGBE_PCS1GANA_SYM_PAUSE,
+                                     IXGBE_PCS1GANA_ASM_PAUSE);
 
 out:
        return ret_val;
@@ -2153,7 +2492,7 @@ out:
 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
 {
        u32 links2, anlp1_reg, autoc_reg, links;
-       s32 ret_val;
+       s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
 
        /*
         * On backplane, bail out if
@@ -2161,21 +2500,13 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
         * - we are 82599 and link partner is not AN enabled
         */
        links = IXGBE_READ_REG(hw, IXGBE_LINKS);
-       if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
-               hw->fc.fc_was_autonegged = false;
-               hw->fc.current_mode = hw->fc.requested_mode;
-               ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+       if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
                goto out;
-       }
 
        if (hw->mac.type == ixgbe_mac_82599EB) {
                links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
-               if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
-                       hw->fc.fc_was_autonegged = false;
-                       hw->fc.current_mode = hw->fc.requested_mode;
-                       ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+               if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
                        goto out;
-               }
        }
        /*
         * Read the 10g AN autoc and LP ability registers and resolve
@@ -2203,11 +2534,11 @@ static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
        u16 technology_ability_reg = 0;
        u16 lp_technology_ability_reg = 0;
 
-       hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
-                            MDIO_MMD_AN,
+       hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+                            IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
                             &technology_ability_reg);
-       hw->phy.ops.read_reg(hw, MDIO_AN_LPA,
-                            MDIO_MMD_AN,
+       hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
+                            IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
                             &lp_technology_ability_reg);
 
        return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
@@ -2217,241 +2548,60 @@ static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
 }
 
 /**
- *  ixgbe_negotiate_fc - Negotiate flow control
- *  @hw: pointer to hardware structure
- *  @adv_reg: flow control advertised settings
- *  @lp_reg: link partner's flow control settings
- *  @adv_sym: symmetric pause bit in advertisement
- *  @adv_asm: asymmetric pause bit in advertisement
- *  @lp_sym: symmetric pause bit in link partner advertisement
- *  @lp_asm: asymmetric pause bit in link partner advertisement
- *
- *  Find the intersection between advertised settings and link partner's
- *  advertised settings
- **/
-static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
-                             u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
-{
-       if ((!(adv_reg)) ||  (!(lp_reg)))
-               return IXGBE_ERR_FC_NOT_NEGOTIATED;
-
-       if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
-               /*
-                * Now we need to check if the user selected Rx ONLY
-                * of pause frames.  In this case, we had to advertise
-                * FULL flow control because we could not advertise RX
-                * ONLY. Hence, we must now check to see if we need to
-                * turn OFF the TRANSMISSION of PAUSE frames.
-                */
-               if (hw->fc.requested_mode == ixgbe_fc_full) {
-                       hw->fc.current_mode = ixgbe_fc_full;
-                       hw_dbg(hw, "Flow Control = FULL.\n");
-               } else {
-                       hw->fc.current_mode = ixgbe_fc_rx_pause;
-                       hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
-               }
-       } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
-                  (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
-               hw->fc.current_mode = ixgbe_fc_tx_pause;
-               hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
-       } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
-                  !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
-               hw->fc.current_mode = ixgbe_fc_rx_pause;
-               hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
-       } else {
-               hw->fc.current_mode = ixgbe_fc_none;
-               hw_dbg(hw, "Flow Control = NONE.\n");
-       }
-       return 0;
-}
-
-/**
- *  ixgbe_setup_fc - Set up flow control
+ *  ixgbe_fc_autoneg - Configure flow control
  *  @hw: pointer to hardware structure
  *
- *  Called at init time to set up flow control.
+ *  Compares our advertised flow control capabilities to those advertised by
+ *  our link partner, and determines the proper flow control mode to use.
  **/
-static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
+void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
 {
-       s32 ret_val = 0;
-       u32 reg = 0, reg_bp = 0;
-       u16 reg_cu = 0;
-
-#ifdef CONFIG_DCB
-       if (hw->fc.requested_mode == ixgbe_fc_pfc) {
-               hw->fc.current_mode = hw->fc.requested_mode;
-               goto out;
-       }
-
-#endif /* CONFIG_DCB */
-       /* Validate the packetbuf configuration */
-       if (packetbuf_num < 0 || packetbuf_num > 7) {
-               hw_dbg(hw, "Invalid packet buffer number [%d], expected range "
-                      "is 0-7\n", packetbuf_num);
-               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-               goto out;
-       }
+       s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+       ixgbe_link_speed speed;
+       bool link_up;
 
        /*
-        * Validate the water mark configuration.  Zero water marks are invalid
-        * because it causes the controller to just blast out fc packets.
+        * AN should have completed when the cable was plugged in.
+        * Look for reasons to bail out.  Bail out if:
+        * - FC autoneg is disabled, or if
+        * - link is not up.
         */
-       if (!hw->fc.low_water ||
-           !hw->fc.high_water[packetbuf_num] ||
-           !hw->fc.pause_time) {
-               hw_dbg(hw, "Invalid water mark configuration\n");
-               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+       if (hw->fc.disable_fc_autoneg)
                goto out;
-       }
 
-       /*
-        * Validate the requested mode.  Strict IEEE mode does not allow
-        * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
-        */
-       if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
-               hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict "
-                      "IEEE mode\n");
-               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+       hw->mac.ops.check_link(hw, &speed, &link_up, false);
+       if (!link_up)
                goto out;
-       }
-
-       /*
-        * 10gig parts do not have a word in the EEPROM to determine the
-        * default flow control setting, so we explicitly set it to full.
-        */
-       if (hw->fc.requested_mode == ixgbe_fc_default)
-               hw->fc.requested_mode = ixgbe_fc_full;
-
-       /*
-        * Set up the 1G and 10G flow control advertisement registers so the
-        * HW will be able to do fc autoneg once the cable is plugged in.  If
-        * we link at 10G, the 1G advertisement is harmless and vice versa.
-        */
 
        switch (hw->phy.media_type) {
+       /* Autoneg flow control on fiber adapters */
        case ixgbe_media_type_fiber:
+               if (speed == IXGBE_LINK_SPEED_1GB_FULL)
+                       ret_val = ixgbe_fc_autoneg_fiber(hw);
+               break;
+
+       /* Autoneg flow control on backplane adapters */
        case ixgbe_media_type_backplane:
-               reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
-               reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+               ret_val = ixgbe_fc_autoneg_backplane(hw);
                break;
 
+       /* Autoneg flow control on copper adapters */
        case ixgbe_media_type_copper:
-               hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
-                                       MDIO_MMD_AN, &reg_cu);
+               if (ixgbe_device_supports_autoneg_fc(hw) == 0)
+                       ret_val = ixgbe_fc_autoneg_copper(hw);
                break;
 
        default:
-               ;
-       }
-
-       /*
-        * The possible values of fc.requested_mode are:
-        * 0: Flow control is completely disabled
-        * 1: Rx flow control is enabled (we can receive pause frames,
-        *    but not send pause frames).
-        * 2: Tx flow control is enabled (we can send pause frames but
-        *    we do not support receiving pause frames).
-        * 3: Both Rx and Tx flow control (symmetric) are enabled.
-#ifdef CONFIG_DCB
-        * 4: Priority Flow Control is enabled.
-#endif
-        * other: Invalid.
-        */
-       switch (hw->fc.requested_mode) {
-       case ixgbe_fc_none:
-               /* Flow control completely disabled by software override. */
-               reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
-               if (hw->phy.media_type == ixgbe_media_type_backplane)
-                       reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
-                                   IXGBE_AUTOC_ASM_PAUSE);
-               else if (hw->phy.media_type == ixgbe_media_type_copper)
-                       reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
-               break;
-       case ixgbe_fc_rx_pause:
-               /*
-                * Rx Flow control is enabled and Tx Flow control is
-                * disabled by software override. Since there really
-                * isn't a way to advertise that we are capable of RX
-                * Pause ONLY, we will advertise that we support both
-                * symmetric and asymmetric Rx PAUSE.  Later, we will
-                * disable the adapter's ability to send PAUSE frames.
-                */
-               reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
-               if (hw->phy.media_type == ixgbe_media_type_backplane)
-                       reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
-                                  IXGBE_AUTOC_ASM_PAUSE);
-               else if (hw->phy.media_type == ixgbe_media_type_copper)
-                       reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
-               break;
-       case ixgbe_fc_tx_pause:
-               /*
-                * Tx Flow control is enabled, and Rx Flow control is
-                * disabled by software override.
-                */
-               reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
-               reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
-               if (hw->phy.media_type == ixgbe_media_type_backplane) {
-                       reg_bp |= (IXGBE_AUTOC_ASM_PAUSE);
-                       reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE);
-               } else if (hw->phy.media_type == ixgbe_media_type_copper) {
-                       reg_cu |= (IXGBE_TAF_ASM_PAUSE);
-                       reg_cu &= ~(IXGBE_TAF_SYM_PAUSE);
-               }
-               break;
-       case ixgbe_fc_full:
-               /* Flow control (both Rx and Tx) is enabled by SW override. */
-               reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
-               if (hw->phy.media_type == ixgbe_media_type_backplane)
-                       reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
-                                  IXGBE_AUTOC_ASM_PAUSE);
-               else if (hw->phy.media_type == ixgbe_media_type_copper)
-                       reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
-               break;
-#ifdef CONFIG_DCB
-       case ixgbe_fc_pfc:
-               goto out;
-               break;
-#endif /* CONFIG_DCB */
-       default:
-               hw_dbg(hw, "Flow control param set incorrectly\n");
-               ret_val = IXGBE_ERR_CONFIG;
-               goto out;
                break;
        }
 
-       if (hw->mac.type != ixgbe_mac_X540) {
-               /*
-                * Enable auto-negotiation between the MAC & PHY;
-                * the MAC will advertise clause 37 flow control.
-                */
-               IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
-               reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
-
-               /* Disable AN timeout */
-               if (hw->fc.strict_ieee)
-                       reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
-
-               IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
-               hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
-       }
-
-       /*
-        * AUTOC restart handles negotiation of 1G and 10G on backplane
-        * and copper. There is no need to set the PCS1GCTL register.
-        *
-        */
-       if (hw->phy.media_type == ixgbe_media_type_backplane) {
-               reg_bp |= IXGBE_AUTOC_AN_RESTART;
-               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
-       } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
-                   (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
-               hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
-                                     MDIO_MMD_AN, reg_cu);
-       }
-
-       hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
 out:
-       return ret_val;
+       if (ret_val == 0) {
+               hw->fc.fc_was_autonegged = true;
+       } else {
+               hw->fc.fc_was_autonegged = false;
+               hw->fc.current_mode = hw->fc.requested_mode;
+       }
 }
 
 /**
@@ -2463,17 +2613,15 @@ out:
  *  bit hasn't caused the master requests to be disabled, else 0
  *  is returned signifying master requests disabled.
  **/
-static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
+s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
 {
-       struct ixgbe_adapter *adapter = hw->back;
        s32 status = 0;
        u32 i;
-       u16 value;
 
        /* Always set this bit to ensure any future transactions are blocked */
        IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
 
-       /* Exit if master requests are blocked */
+       /* Exit if master requets are blocked */
        if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
                goto out;
 
@@ -2501,9 +2649,8 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
         */
        for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
                udelay(100);
-               pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS,
-                                                        &value);
-               if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
+               if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
+                   IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
                        goto out;
        }
 
@@ -2546,7 +2693,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
                 * thread currently using resource (swmask)
                 */
                ixgbe_release_eeprom_semaphore(hw);
-               usleep_range(5000, 10000);
+               msleep(5);
                timeout--;
        }
 
@@ -2585,15 +2732,16 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
 }
 
 /**
- *  ixgbe_disable_rx_buff_generic - Stops the receive data path
+ *  ixgbe_disable_sec_rx_path_generic - Stops the receive data path
  *  @hw: pointer to hardware structure
  *
- *  Stops the receive data path and waits for the HW to internally
- *  empty the Rx security block.
+ *  Stops the receive data path and waits for the HW to internally empty
+ *  the Rx security block
  **/
-s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
+s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
 {
 #define IXGBE_MAX_SECRX_POLL 40
+
        int i;
        int secrxreg;
 
@@ -2606,25 +2754,24 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
                        break;
                else
                        /* Use interrupt-safe sleep just in case */
-                       udelay(10);
+                       udelay(1000);
        }
 
        /* For informational purposes only */
        if (i >= IXGBE_MAX_SECRX_POLL)
                hw_dbg(hw, "Rx unit being enabled before security "
-                      "path fully disabled.  Continuing with init.\n");
+                        "path fully disabled.  Continuing with init.\n");
 
        return 0;
-
 }
 
 /**
- *  ixgbe_enable_rx_buff - Enables the receive data path
+ *  ixgbe_enable_sec_rx_path_generic - Enables the receive data path
  *  @hw: pointer to hardware structure
  *
- *  Enables the receive data path
+ *  Enables the receive data path.
  **/
-s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
+s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
 {
        int secrxreg;
 
@@ -2673,7 +2820,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
                autoc_reg |= IXGBE_AUTOC_FLU;
                IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
                IXGBE_WRITE_FLUSH(hw);
-               usleep_range(10000, 20000);
+               msleep(10);
        }
 
        led_reg &= ~IXGBE_LED_MODE_MASK(index);
@@ -2717,7 +2864,7 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
  *  get and set mac_addr routines.
  **/
 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
-                                        u16 *san_mac_offset)
+                                        u16 *san_mac_offset)
 {
        /*
         * First read the EEPROM pointer to see if the MAC addresses are
@@ -2764,7 +2911,7 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
        hw->mac.ops.set_lan_id(hw);
        /* apply the port offset to the address offset */
        (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
-                        (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+                        (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
        for (i = 0; i < 3; i++) {
                hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
                san_mac_addr[i * 2] = (u8)(san_mac_data);
@@ -2776,6 +2923,44 @@ san_mac_addr_out:
        return 0;
 }
 
+/**
+ *  ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
+ *  @hw: pointer to hardware structure
+ *  @san_mac_addr: SAN MAC address
+ *
+ *  Write a SAN MAC address to the EEPROM.
+ **/
+s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+       s32 status = 0;
+       u16 san_mac_data, san_mac_offset;
+       u8 i;
+
+       /* Look for SAN mac address pointer.  If not defined, return */
+       ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+
+       if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
+               status = IXGBE_ERR_NO_SAN_ADDR_PTR;
+               goto san_mac_addr_out;
+       }
+
+       /* Make sure we know which port we need to write */
+       hw->mac.ops.set_lan_id(hw);
+       /* Apply the port offset to the address offset */
+       (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
+                        (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+
+       for (i = 0; i < 3; i++) {
+               san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
+               san_mac_data |= (u16)(san_mac_addr[i * 2]);
+               hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
+               san_mac_offset++;
+       }
+
+san_mac_addr_out:
+       return status;
+}
+
 /**
  *  ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
  *  @hw: pointer to hardware structure
@@ -2783,20 +2968,105 @@ san_mac_addr_out:
  *  Read PCIe configuration space, and get the MSI-X vector count from
  *  the capabilities table.
  **/
-u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
+u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
 {
-       struct ixgbe_adapter *adapter = hw->back;
-       u16 msix_count;
-       pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82599_CAPS,
-                            &msix_count);
+       u16 msix_count = 1;
+       u16 max_msix_count;
+       u16 pcie_offset;
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
+               max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
+               max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
+               break;
+       default:
+               return msix_count;
+       }
+
+       msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
        msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
 
-       /* MSI-X count is zero-based in HW, so increment to give proper value */
+       /* MSI-X count is zero-based in HW */
        msix_count++;
 
+       if (msix_count > max_msix_count)
+               msix_count = max_msix_count;
+
        return msix_count;
 }
 
+/**
+ *  ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
+ *  @hw: pointer to hardware structure
+ *  @addr: Address to put into receive address register
+ *  @vmdq: VMDq pool to assign
+ *
+ *  Puts an ethernet address into a receive address register, or
+ *  finds the rar that it is aleady in; adds to the pool list
+ **/
+s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
+{
+       static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
+       u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
+       u32 rar;
+       u32 rar_low, rar_high;
+       u32 addr_low, addr_high;
+
+       /* swap bytes for HW little endian */
+       addr_low  = addr[0] | (addr[1] << 8)
+                           | (addr[2] << 16)
+                           | (addr[3] << 24);
+       addr_high = addr[4] | (addr[5] << 8);
+
+       /*
+        * Either find the mac_id in rar or find the first empty space.
+        * rar_highwater points to just after the highest currently used
+        * rar in order to shorten the search.  It grows when we add a new
+        * rar to the top.
+        */
+       for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
+               rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+
+               if (((IXGBE_RAH_AV & rar_high) == 0)
+                   && first_empty_rar == NO_EMPTY_RAR_FOUND) {
+                       first_empty_rar = rar;
+               } else if ((rar_high & 0xFFFF) == addr_high) {
+                       rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
+                       if (rar_low == addr_low)
+                               break;    /* found it already in the rars */
+               }
+       }
+
+       if (rar < hw->mac.rar_highwater) {
+               /* already there so just add to the pool bits */
+               ixgbe_set_vmdq(hw, rar, vmdq);
+       } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
+               /* stick it into first empty RAR slot we found */
+               rar = first_empty_rar;
+               ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
+       } else if (rar == hw->mac.rar_highwater) {
+               /* add it to the top of the list and inc the highwater mark */
+               ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
+               hw->mac.rar_highwater++;
+       } else if (rar >= hw->mac.num_rar_entries) {
+               return IXGBE_ERR_INVALID_MAC_ADDR;
+       }
+
+       /*
+        * If we found rar[0], make sure the default pool bit (we use pool 0)
+        * remains cleared to be sure default pool packets will get delivered
+        */
+       if (rar == 0)
+               ixgbe_clear_vmdq(hw, rar, 0);
+
+       return rar;
+}
+
 /**
  *  ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
  *  @hw: pointer to hardware struct
@@ -2873,6 +3143,31 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
        return 0;
 }
 
+/**
+ *  This function should only be involved in the IOV mode.
+ *  In IOV mode, Default pool is next pool after the number of
+ *  VFs advertized and not 0.
+ *  MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
+ *
+ *  ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
+ *  @hw: pointer to hardware struct
+ *  @vmdq: VMDq pool index
+ **/
+s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
+{
+       u32 rar = hw->mac.san_mac_rar_index;
+
+       if (vmdq < 32) {
+               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
+               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
+       } else {
+               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
+       }
+
+       return 0;
+}
+
 /**
  *  ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
  *  @hw: pointer to hardware structure
@@ -2881,6 +3176,8 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
 {
        int i;
 
+       hw_dbg(hw, " Clearing UTA\n");
+
        for (i = 0; i < 128; i++)
                IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
 
@@ -2895,7 +3192,7 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
  *  return the VLVF index where this VLAN id should be placed
  *
  **/
-static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
+s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
 {
        u32 bits = 0;
        u32 first_empty_slot = 0;
@@ -2944,14 +3241,13 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
  *  Turn on/off specified VLAN in the VLAN filter table.
  **/
 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
-                           bool vlan_on)
+                          bool vlan_on)
 {
        s32 regindex;
        u32 bitindex;
        u32 vfta;
-       u32 bits;
-       u32 vt;
        u32 targetbit;
+       s32 ret_val = 0;
        bool vfta_changed = false;
 
        if (vlan > 4095)
@@ -2987,7 +3283,39 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
        }
 
        /* Part 2
-        * If VT Mode is set
+        * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
+        */
+       ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
+                                        &vfta_changed);
+       if (ret_val != 0)
+               return ret_val;
+
+       if (vfta_changed)
+               IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
+
+       return 0;
+}
+
+/**
+ *  ixgbe_set_vlvf_generic - Set VLAN Pool Filter
+ *  @hw: pointer to hardware structure
+ *  @vlan: VLAN id to write to VLAN filter
+ *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
+ *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ *  @vfta_changed: pointer to boolean flag which indicates whether VFTA
+ *                 should be changed
+ *
+ *  Turn on/off specified bit in VLVF table.
+ **/
+s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+                           bool vlan_on, bool *vfta_changed)
+{
+       u32 vt;
+
+       if (vlan > 4095)
+               return IXGBE_ERR_PARAM;
+
+       /* If VT Mode is set
         *   Either vlan_on
         *     make sure the vlan is in VLVF
         *     set the vind bit in the matching VLVFB
@@ -2997,6 +3325,7 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
        vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
        if (vt & IXGBE_VT_CTL_VT_ENABLE) {
                s32 vlvf_index;
+               u32 bits;
 
                vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
                if (vlvf_index < 0)
@@ -3006,39 +3335,39 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
                        /* set the pool bit */
                        if (vind < 32) {
                                bits = IXGBE_READ_REG(hw,
-                                               IXGBE_VLVFB(vlvf_index*2));
+                                               IXGBE_VLVFB(vlvf_index * 2));
                                bits |= (1 << vind);
                                IXGBE_WRITE_REG(hw,
-                                               IXGBE_VLVFB(vlvf_index*2),
+                                               IXGBE_VLVFB(vlvf_index * 2),
                                                bits);
                        } else {
                                bits = IXGBE_READ_REG(hw,
-                                               IXGBE_VLVFB((vlvf_index*2)+1));
-                               bits |= (1 << (vind-32));
+                                       IXGBE_VLVFB((vlvf_index * 2) + 1));
+                               bits |= (1 << (vind - 32));
                                IXGBE_WRITE_REG(hw,
-                                               IXGBE_VLVFB((vlvf_index*2)+1),
-                                               bits);
+                                       IXGBE_VLVFB((vlvf_index * 2) + 1),
+                                       bits);
                        }
                } else {
                        /* clear the pool bit */
                        if (vind < 32) {
                                bits = IXGBE_READ_REG(hw,
-                                               IXGBE_VLVFB(vlvf_index*2));
+                                               IXGBE_VLVFB(vlvf_index * 2));
                                bits &= ~(1 << vind);
                                IXGBE_WRITE_REG(hw,
-                                               IXGBE_VLVFB(vlvf_index*2),
+                                               IXGBE_VLVFB(vlvf_index * 2),
                                                bits);
                                bits |= IXGBE_READ_REG(hw,
-                                               IXGBE_VLVFB((vlvf_index*2)+1));
+                                       IXGBE_VLVFB((vlvf_index * 2) + 1));
                        } else {
                                bits = IXGBE_READ_REG(hw,
-                                               IXGBE_VLVFB((vlvf_index*2)+1));
-                               bits &= ~(1 << (vind-32));
+                                       IXGBE_VLVFB((vlvf_index * 2) + 1));
+                               bits &= ~(1 << (vind - 32));
                                IXGBE_WRITE_REG(hw,
-                                               IXGBE_VLVFB((vlvf_index*2)+1),
-                                               bits);
+                                       IXGBE_VLVFB((vlvf_index * 2) + 1),
+                                       bits);
                                bits |= IXGBE_READ_REG(hw,
-                                               IXGBE_VLVFB(vlvf_index*2));
+                                               IXGBE_VLVFB(vlvf_index * 2));
                        }
                }
 
@@ -3060,20 +3389,16 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
                if (bits) {
                        IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
                                        (IXGBE_VLVF_VIEN | vlan));
-                       if (!vlan_on) {
+                       if ((!vlan_on) && (vfta_changed != NULL)) {
                                /* someone wants to clear the vfta entry
                                 * but some pools/VFs are still using it.
                                 * Ignore it. */
-                               vfta_changed = false;
+                               *vfta_changed = false;
                        }
-               }
-               else
+               } else
                        IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
        }
 
-       if (vfta_changed)
-               IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
-
        return 0;
 }
 
@@ -3092,8 +3417,8 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
 
        for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
                IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
-               IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0);
-               IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
        }
 
        return 0;
@@ -3121,7 +3446,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
 
        if (links_orig != links_reg) {
                hw_dbg(hw, "LINKS changed from %08X to %08X\n",
-                      links_orig, links_reg);
+                         links_orig, links_reg);
        }
 
        if (link_up_wait_to_complete) {
@@ -3158,7 +3483,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
 }
 
 /**
- *  ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from
+ *  ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from
  *  the EEPROM
  *  @hw: pointer to hardware structure
  *  @wwnn_prefix: the alternative WWNN prefix
@@ -3168,7 +3493,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
  *  block to check the support for the alternative WWNN/WWPN prefix support.
  **/
 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
-                                        u16 *wwpn_prefix)
+                                u16 *wwpn_prefix)
 {
        u16 offset, caps;
        u16 alt_san_mac_blk_offset;
@@ -3179,7 +3504,7 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
 
        /* check if alternative SAN MAC is supported */
        hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
-                           &alt_san_mac_blk_offset);
+                           &alt_san_mac_blk_offset);
 
        if ((alt_san_mac_blk_offset == 0) ||
            (alt_san_mac_blk_offset == 0xFFFF))
@@ -3203,25 +3528,50 @@ wwn_prefix_out:
 }
 
 /**
- *  ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
- *  control
+ *  ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
  *  @hw: pointer to hardware structure
+ *  @bs: the fcoe boot status
  *
- *  There are several phys that do not support autoneg flow control. This
- *  function check the device id to see if the associated phy supports
- *  autoneg flow control.
+ *  This function will read the FCOE boot status from the iSCSI FCOE block
  **/
-static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
 {
+       u16 offset, caps, flags;
+       s32 status;
 
-       switch (hw->device_id) {
-       case IXGBE_DEV_ID_X540T:
-               return 0;
-       case IXGBE_DEV_ID_82599_T3_LOM:
-               return 0;
-       default:
-               return IXGBE_ERR_FC_NOT_SUPPORTED;
-       }
+       /* clear output first */
+       *bs = ixgbe_fcoe_bootstatus_unavailable;
+
+       /* check if FCOE IBA block is present */
+       offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
+       status = hw->eeprom.ops.read(hw, offset, &caps);
+       if (status != 0)
+               goto out;
+
+       if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
+               goto out;
+
+       /* check if iSCSI FCOE block is populated */
+       status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
+       if (status != 0)
+               goto out;
+
+       if ((offset == 0) || (offset == 0xFFFF))
+               goto out;
+
+       /* read fcoe flags in iSCSI FCOE block */
+       offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
+       status = hw->eeprom.ops.read(hw, offset, &flags);
+       if (status != 0)
+               goto out;
+
+       if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
+               *bs = ixgbe_fcoe_bootstatus_enabled;
+       else
+               *bs = ixgbe_fcoe_bootstatus_disabled;
+
+out:
+       return status;
 }
 
 /**
@@ -3248,20 +3598,22 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
         * PFVFSPOOF register array is size 8 with 8 bits assigned to
         * MAC anti-spoof enables in each register array element.
         */
-       for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
+       for (j = 0; j < pf_target_reg; j++)
                IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
 
-       /* If not enabling anti-spoofing then done */
-       if (!enable)
-               return;
-
        /*
         * The PF should be allowed to spoof so that it can support
-        * emulation mode NICs.  Reset the bit assigned to the PF
+        * emulation mode NICs.  Do not set the bits assigned to the PF
+        */
+       pfvfspoof &= (1 << pf_target_shift) - 1;
+       IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
+
+       /*
+        * Remaining pools belong to the PF so they do not need to have
+        * anti-spoofing enabled.
         */
-       pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg));
-       pfvfspoof ^= (1 << pf_target_shift);
-       IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof);
+       for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
+               IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
 }
 
 /**
@@ -3303,72 +3655,6 @@ s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
        return 0;
 }
 
-/**
- * ixgbe_set_rxpba_generic - Initialize RX packet buffer
- * @hw: pointer to hardware structure
- * @num_pb: number of packet buffers to allocate
- * @headroom: reserve n KB of headroom
- * @strategy: packet buffer allocation strategy
- **/
-void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw,
-                            int num_pb,
-                            u32 headroom,
-                            int strategy)
-{
-       u32 pbsize = hw->mac.rx_pb_size;
-       int i = 0;
-       u32 rxpktsize, txpktsize, txpbthresh;
-
-       /* Reserve headroom */
-       pbsize -= headroom;
-
-       if (!num_pb)
-               num_pb = 1;
-
-       /* Divide remaining packet buffer space amongst the number
-        * of packet buffers requested using supplied strategy.
-        */
-       switch (strategy) {
-       case (PBA_STRATEGY_WEIGHTED):
-               /* pba_80_48 strategy weight first half of packet buffer with
-                * 5/8 of the packet buffer space.
-                */
-               rxpktsize = ((pbsize * 5 * 2) / (num_pb * 8));
-               pbsize -= rxpktsize * (num_pb / 2);
-               rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
-               for (; i < (num_pb / 2); i++)
-                       IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
-               /* Fall through to configure remaining packet buffers */
-       case (PBA_STRATEGY_EQUAL):
-               /* Divide the remaining Rx packet buffer evenly among the TCs */
-               rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
-               for (; i < num_pb; i++)
-                       IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
-               break;
-       default:
-               break;
-       }
-
-       /*
-        * Setup Tx packet buffer and threshold equally for all TCs
-        * TXPBTHRESH register is set in K so divide by 1024 and subtract
-        * 10 since the largest packet we support is just over 9K.
-        */
-       txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
-       txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
-       for (i = 0; i < num_pb; i++) {
-               IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
-               IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
-       }
-
-       /* Clear unused TCs, if any, to zero buffer size*/
-       for (; i < IXGBE_MAX_PB; i++) {
-               IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
-               IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
-               IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
-       }
-}
-
 /**
  *  ixgbe_calculate_checksum - Calculate checksum for buffer
  *  @buffer: pointer to EEPROM
@@ -3383,7 +3669,6 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
 
        if (!buffer)
                return 0;
-
        for (i = 0; i < length; i++)
                sum += buffer[i];
 
@@ -3394,7 +3679,7 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
  *  ixgbe_host_interface_command - Issue command to manageability block
  *  @hw: pointer to the HW structure
  *  @buffer: contains the command to write and where the return status will
- *           be placed
+ *   be placed
  *  @length: length of buffer, must be multiple of 4 bytes
  *
  *  Communicates with the manageability block.  On success return 0
@@ -3433,7 +3718,7 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
         */
        for (i = 0; i < dword_len; i++)
                IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
-                                     i, cpu_to_le32(buffer[i]));
+                                     i, IXGBE_CPU_TO_LE32(buffer[i]));
 
        /* Setting this bit tells the ARC that a new command is pending. */
        IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
@@ -3442,7 +3727,7 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
                hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
                if (!(hicr & IXGBE_HICR_C))
                        break;
-               usleep_range(1000, 2000);
+               msleep(1);
        }
 
        /* Check command successful completion. */
@@ -3459,7 +3744,7 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
        /* first pull in the header so we know the buffer length */
        for (bi = 0; bi < dword_len; bi++) {
                buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
-               le32_to_cpus(&buffer[bi]);
+               IXGBE_LE32_TO_CPUS(&buffer[bi]);
        }
 
        /* If there is any thing in data position pull it in */
@@ -3479,7 +3764,7 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
        /* Pull in the rest of the buffer (bi is where we left off)*/
        for (; bi <= dword_len; bi++) {
                buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
-               le32_to_cpus(&buffer[bi]);
+               IXGBE_LE32_TO_CPUS(&buffer[bi]);
        }
 
 out:
@@ -3506,7 +3791,8 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
        int i;
        s32 ret_val = 0;
 
-       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM) != 0) {
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
+           != 0) {
                ret_val = IXGBE_ERR_SWFW_SYNC;
                goto out;
        }
@@ -3545,6 +3831,65 @@ out:
        return ret_val;
 }
 
+/**
+ * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
+ * @hw: pointer to hardware structure
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
+                            int strategy)
+{
+       u32 pbsize = hw->mac.rx_pb_size;
+       int i = 0;
+       u32 rxpktsize, txpktsize, txpbthresh;
+
+       /* Reserve headroom */
+       pbsize -= headroom;
+
+       if (!num_pb)
+               num_pb = 1;
+
+       /* Divide remaining packet buffer space amongst the number of packet
+        * buffers requested using supplied strategy.
+        */
+       switch (strategy) {
+       case PBA_STRATEGY_WEIGHTED:
+               /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
+                * buffer with 5/8 of the packet buffer space.
+                */
+               rxpktsize = (pbsize * 5) / (num_pb * 4);
+               pbsize -= rxpktsize * (num_pb / 2);
+               rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
+               for (; i < (num_pb / 2); i++)
+                       IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+               /* Fall through to configure remaining packet buffers */
+       case PBA_STRATEGY_EQUAL:
+               rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
+               for (; i < num_pb; i++)
+                       IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+               break;
+       default:
+               break;
+       }
+
+       /* Only support an equally distributed Tx packet buffer strategy. */
+       txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
+       txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
+       for (i = 0; i < num_pb; i++) {
+               IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
+               IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
+       }
+
+       /* Clear unused TCs, if any, to zero buffer size*/
+       for (; i < IXGBE_MAX_PB; i++) {
+               IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
+       }
+}
+
 /**
  * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
  * @hw: pointer to the hardware structure
@@ -3585,3 +3930,153 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
        IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
        IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
 }
+
+static const u8 ixgbe_emc_temp_data[4] = {
+       IXGBE_EMC_INTERNAL_DATA,
+       IXGBE_EMC_DIODE1_DATA,
+       IXGBE_EMC_DIODE2_DATA,
+       IXGBE_EMC_DIODE3_DATA
+};
+static const u8 ixgbe_emc_therm_limit[4] = {
+       IXGBE_EMC_INTERNAL_THERM_LIMIT,
+       IXGBE_EMC_DIODE1_THERM_LIMIT,
+       IXGBE_EMC_DIODE2_THERM_LIMIT,
+       IXGBE_EMC_DIODE3_THERM_LIMIT
+};
+
+/**
+ *  ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
+ *  @hw: pointer to hardware structure
+ *  @data: pointer to the thermal sensor data structure
+ *
+ *  Returns the thermal sensor data structure
+ **/
+s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
+{
+       s32 status = 0;
+       u16 ets_offset;
+       u16 ets_cfg;
+       u16 ets_sensor;
+       u8  num_sensors;
+       u8  sensor_index;
+       u8  sensor_location;
+       u8  i;
+       struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+       /* Only support thermal sensors attached to 82599 physical port 0 */
+       if ((hw->mac.type != ixgbe_mac_82599EB) ||
+           (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
+               status = IXGBE_NOT_IMPLEMENTED;
+               goto out;
+       }
+
+       status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset);
+       if (status)
+               goto out;
+
+       if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) {
+               status = IXGBE_NOT_IMPLEMENTED;
+               goto out;
+       }
+
+       status = hw->eeprom.ops.read(hw, ets_offset, &ets_cfg);
+       if (status)
+               goto out;
+
+       if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
+               != IXGBE_ETS_TYPE_EMC) {
+               status = IXGBE_NOT_IMPLEMENTED;
+               goto out;
+       }
+
+       num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
+       if (num_sensors > IXGBE_MAX_SENSORS)
+               num_sensors = IXGBE_MAX_SENSORS;
+
+       for (i = 0; i < num_sensors; i++) {
+               status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
+                                            &ets_sensor);
+               if (status)
+                       goto out;
+
+               sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
+                               IXGBE_ETS_DATA_INDEX_SHIFT);
+               sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
+                                  IXGBE_ETS_DATA_LOC_SHIFT);
+
+               if (sensor_location != 0) {
+                       status = hw->phy.ops.read_i2c_byte(hw,
+                                       ixgbe_emc_temp_data[sensor_index],
+                                       IXGBE_I2C_THERMAL_SENSOR_ADDR,
+                                       &data->sensor[i].temp);
+                       if (status)
+                               goto out;
+               }
+       }
+out:
+       return status;
+}
+
+/**
+ *  ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds
+ *  @hw: pointer to hardware structure
+ *
+ *  Inits the thermal sensor thresholds according to the NVM map
+ *  and save off the threshold and location values into mac.thermal_sensor_data
+ **/
+s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
+{
+       s32 status = 0;
+       u16 ets_offset;
+       u16 ets_cfg;
+       u16 ets_sensor;
+       u8  low_thresh_delta;
+       u8  num_sensors;
+       u8  sensor_index;
+       u8  sensor_location;
+       u8  therm_limit;
+       u8  i;
+       struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+       memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
+
+       /* Only support thermal sensors attached to 82599 physical port 0 */
+       if ((hw->mac.type != ixgbe_mac_82599EB) ||
+           (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
+               return IXGBE_NOT_IMPLEMENTED;
+
+       hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset);
+       if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
+               return IXGBE_NOT_IMPLEMENTED;
+
+       hw->eeprom.ops.read(hw, ets_offset, &ets_cfg);
+       if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
+               != IXGBE_ETS_TYPE_EMC)
+               return IXGBE_NOT_IMPLEMENTED;
+
+       low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
+                            IXGBE_ETS_LTHRES_DELTA_SHIFT);
+       num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
+
+       for (i = 0; i < num_sensors; i++) {
+               hw->eeprom.ops.read(hw, (ets_offset + 1 + i), &ets_sensor);
+               sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
+                               IXGBE_ETS_DATA_INDEX_SHIFT);
+               sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
+                                  IXGBE_ETS_DATA_LOC_SHIFT);
+               therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
+
+               hw->phy.ops.write_i2c_byte(hw,
+                       ixgbe_emc_therm_limit[sensor_index],
+                       IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
+
+               if ((i < IXGBE_MAX_SENSORS) && (sensor_location != 0)) {
+                       data->sensor[i].location = sensor_location;
+                       data->sensor[i].caution_thresh = therm_limit;
+                       data->sensor[i].max_op_thresh = therm_limit -
+                                                       low_thresh_delta;
+               }
+       }
+       return status;
+}
+
index 204f06235b455ce8a0d0c803471001f9c6a986f9..9bd6f534bfc2cc5079a69fbb1752c1334c6828be 100644 (file)
 #define _IXGBE_COMMON_H_
 
 #include "ixgbe_type.h"
-#include "ixgbe.h"
 
-u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
+u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
+
 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
-                                  u32 pba_num_size);
+                                 u32 pba_num_size);
 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
@@ -58,90 +58,83 @@ s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
                                    u16 words, u16 *data);
 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
-                                       u16 *data);
+                                      u16 *data);
 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
                                              u16 words, u16 *data);
 u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
-                                           u16 *checksum_val);
+                                          u16 *checksum_val);
 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
 
 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
-                          u32 enable_addr);
+                         u32 enable_addr);
 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
-                                     struct net_device *netdev);
+s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
+                                     u32 mc_addr_count,
+                                     ixgbe_mc_addr_itr func, bool clear);
+s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
+                                     u32 addr_count, ixgbe_mc_addr_itr func);
 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
-s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
-s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
-s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num);
-s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
+s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw);
+s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw);
+
+s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
 
 s32 ixgbe_validate_mac_addr(u8 *mac_addr);
 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
+s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
+
+s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
+
 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
+s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
+
 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
-                           u32 vind, bool vlan_on);
+                        u32 vind, bool vlan_on);
+s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+                          bool vlan_on, bool *vfta_changed);
 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
+s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan);
+
 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
-                                 ixgbe_link_speed *speed,
-                                 bool *link_up, bool link_up_wait_to_complete);
+                              ixgbe_link_speed *speed,
+                              bool *link_up, bool link_up_wait_to_complete);
+
 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
-                                 u16 *wwpn_prefix);
-s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
+                                u16 *wwpn_prefix);
+
+s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs);
 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
+void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
+                            int strategy);
 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
                                 u8 build, u8 ver);
 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
 
-void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
-                            u32 headroom, int strategy);
-
-#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
-
-#ifndef writeq
-#define writeq(val, addr) writel((u32) (val), addr); \
-    writel((u32) (val >> 32), (addr + 4));
-#endif
-
-#define IXGBE_WRITE_REG64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
-
-#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
-
-#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) (\
-    writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
-
-#define IXGBE_READ_REG_ARRAY(a, reg, offset) (\
-    readl((a)->hw_addr + (reg) + ((offset) << 2)))
-
-#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
-
-#define hw_dbg(hw, format, arg...) \
-       netdev_dbg(((struct ixgbe_adapter *)(hw->back))->netdev, format, ##arg)
-#define e_dev_info(format, arg...) \
-       dev_info(&adapter->pdev->dev, format, ## arg)
-#define e_dev_warn(format, arg...) \
-       dev_warn(&adapter->pdev->dev, format, ## arg)
-#define e_dev_err(format, arg...) \
-       dev_err(&adapter->pdev->dev, format, ## arg)
-#define e_dev_notice(format, arg...) \
-       dev_notice(&adapter->pdev->dev, format, ## arg)
-#define e_info(msglvl, format, arg...) \
-       netif_info(adapter, msglvl, adapter->netdev, format, ## arg)
-#define e_err(msglvl, format, arg...) \
-       netif_err(adapter, msglvl, adapter->netdev, format, ## arg)
-#define e_warn(msglvl, format, arg...) \
-       netif_warn(adapter, msglvl, adapter->netdev, format, ## arg)
-#define e_crit(msglvl, format, arg...) \
-       netif_crit(adapter, msglvl, adapter->netdev, format, ## arg)
+#define IXGBE_I2C_THERMAL_SENSOR_ADDR  0xF8
+#define IXGBE_EMC_INTERNAL_DATA                0x00
+#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20
+#define IXGBE_EMC_DIODE1_DATA          0x01
+#define IXGBE_EMC_DIODE1_THERM_LIMIT   0x19
+#define IXGBE_EMC_DIODE2_DATA          0x23
+#define IXGBE_EMC_DIODE2_THERM_LIMIT   0x1A
+#define IXGBE_EMC_DIODE3_DATA          0x2A
+#define IXGBE_EMC_DIODE3_THERM_LIMIT   0x30
+
+s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
+s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
 #endif /* IXGBE_COMMON */
index 8bfaaee5ac5b58964a70e54f39c7e21d041178b6..f515fd25495ad0304ebb6f55d2fa146a8b88cec5 100644 (file)
   the file called "COPYING".
 
   Contact Information:
-  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
 *******************************************************************************/
 
 
-#include "ixgbe.h"
 #include "ixgbe_type.h"
 #include "ixgbe_dcb.h"
 #include "ixgbe_dcb_82598.h"
 #include "ixgbe_dcb_82599.h"
 
 /**
- * ixgbe_ieee_credits - This calculates the ieee traffic class
+ * ixgbe_dcb_calculate_tc_credits - This calculates the ieee traffic class
  * credits from the configured bandwidth percentages. Credits
  * are the smallest unit programmable into the underlying
  * hardware. The IEEE 802.1Qaz specification do not use bandwidth
  * groups so this is much simplified from the CEE case.
  */
-static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
-                             __u16 *max, int max_frame)
+s32 ixgbe_dcb_calculate_tc_credits(u8 *bw, u16 *refill, u16 *max,
+                                  int max_frame_size)
 {
        int min_percent = 100;
        int min_credit, multiplier;
        int i;
 
-       min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
-                       DCB_CREDIT_QUANTUM;
+       min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) /
+                       IXGBE_DCB_CREDIT_QUANTUM;
 
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+       for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
                if (bw[i] < min_percent && bw[i])
                        min_percent = bw[i];
        }
@@ -58,53 +56,54 @@ static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
        multiplier = (min_credit / min_percent) + 1;
 
        /* Find out the hw credits for each TC */
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-               int val = min(bw[i] * multiplier, MAX_CREDIT_REFILL);
+       for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+               int val = min(bw[i] * multiplier, IXGBE_DCB_MAX_CREDIT_REFILL);
 
                if (val < min_credit)
                        val = min_credit;
-               refill[i] = val;
+               refill[i] = (u16)val;
 
-               max[i] = bw[i] ? (bw[i] * MAX_CREDIT)/100 : min_credit;
+               max[i] = bw[i] ? (bw[i]*IXGBE_DCB_MAX_CREDIT)/100 : min_credit;
        }
+
        return 0;
 }
 
 /**
- * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
+ * ixgbe_dcb_calculate_tc_credits_cee - Calculates traffic class credits
  * @ixgbe_dcb_config: Struct containing DCB settings.
  * @direction: Configuring either Tx or Rx.
  *
  * This function calculates the credits allocated to each traffic class.
  * It should be called only after the rules are checked by
- * ixgbe_dcb_check_config().
+ * ixgbe_dcb_check_config_cee().
  */
-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
+s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *hw,
                                   struct ixgbe_dcb_config *dcb_config,
-                                  int max_frame, u8 direction)
+                                  u32 max_frame_size, u8 direction)
 {
-       struct tc_bw_alloc *p;
-       int min_credit;
-       int min_multiplier;
-       int min_percent = 100;
-       s32 ret_val = 0;
+       struct ixgbe_dcb_tc_path *p;
+       u32 min_multiplier      = 0;
+       u16 min_percent         = 100;
+       s32 ret_val =           0;
        /* Initialization values default for Tx settings */
-       u32 credit_refill       = 0;
-       u32 credit_max          = 0;
-       u16 link_percentage     = 0;
-       u8  bw_percent          = 0;
+       u32 min_credit          = 0;
+       u32 credit_refill       = 0;
+       u32 credit_max          = 0;
+       u16 link_percentage     = 0;
+       u8  bw_percent          = 0;
        u8  i;
 
        if (dcb_config == NULL) {
-               ret_val = DCB_ERR_CONFIG;
+               ret_val = IXGBE_ERR_CONFIG;
                goto out;
        }
 
-       min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
-                       DCB_CREDIT_QUANTUM;
+       min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) /
+                    IXGBE_DCB_CREDIT_QUANTUM;
 
        /* Find smallest link percentage */
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+       for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
                p = &dcb_config->tc_config[i].path[direction];
                bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
                link_percentage = p->bwg_percent;
@@ -126,7 +125,7 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
        min_multiplier = (min_credit / min_percent) + 1;
 
        /* Find out the link percentage for each TC first */
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+       for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
                p = &dcb_config->tc_config[i].path[direction];
                bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
 
@@ -141,11 +140,11 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
 
                /* Calculate credit refill ratio using multiplier */
                credit_refill = min(link_percentage * min_multiplier,
-                                   MAX_CREDIT_REFILL);
+                                   (u32)IXGBE_DCB_MAX_CREDIT_REFILL);
                p->data_credits_refill = (u16)credit_refill;
 
                /* Calculate maximum credit for the TC */
-               credit_max = (link_percentage * MAX_CREDIT) / 100;
+               credit_max = (link_percentage * IXGBE_DCB_MAX_CREDIT) / 100;
 
                /*
                 * Adjustment based on rule checking, if the percentage
@@ -155,20 +154,20 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
                if (credit_max && (credit_max < min_credit))
                        credit_max = min_credit;
 
-               if (direction == DCB_TX_CONFIG) {
+               if (direction == IXGBE_DCB_TX_CONFIG) {
                        /*
                         * Adjustment based on rule checking, if the
                         * percentage of a TC is too small, the maximum
                         * credit may not be enough to send out a TSO
                         * packet in descriptor plane arbitration.
                         */
-                       if ((hw->mac.type == ixgbe_mac_82598EB) &&
-                           credit_max &&
-                           (credit_max < MINIMUM_CREDIT_FOR_TSO))
-                               credit_max = MINIMUM_CREDIT_FOR_TSO;
+                       if (credit_max && (credit_max <
+                           IXGBE_DCB_MIN_TSO_CREDIT)
+                           && (hw->mac.type == ixgbe_mac_82598EB))
+                               credit_max = IXGBE_DCB_MIN_TSO_CREDIT;
 
                        dcb_config->tc_config[i].desc_credits_max =
-                               (u16)credit_max;
+                                                               (u16)credit_max;
                }
 
                p->data_credits_max = (u16)credit_max;
@@ -178,106 +177,318 @@ out:
        return ret_val;
 }
 
-void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
+/**
+ * ixgbe_dcb_unpack_pfc_cee - Unpack dcb_config PFC info
+ * @cfg: dcb configuration to unpack into hardware consumable fields
+ * @map: user priority to traffic class map
+ * @pfc_up: u8 to store user priority PFC bitmask
+ *
+ * This unpacks the dcb configuration PFC info which is stored per
+ * traffic class into a 8bit user priority bitmask that can be
+ * consumed by hardware routines. The priority to tc map must be
+ * updated before calling this routine to use current up-to maps.
+ */
+void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *cfg, u8 *map, u8 *pfc_up)
 {
-       int i;
+       struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+       int up;
 
-       *pfc_en = 0;
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
-               *pfc_en |= !!(cfg->tc_config[i].dcb_pfc & 0xF) << i;
+       /*
+        * If the TC for this user priority has PFC enabled then set the
+        * matching bit in 'pfc_up' to reflect that PFC is enabled.
+        */
+       for (*pfc_up = 0, up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++) {
+               if (tc_config[map[up]].pfc != ixgbe_dcb_pfc_disabled)
+                       *pfc_up |= 1 << up;
+       }
 }
 
-void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction,
+void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *cfg, int direction,
                             u16 *refill)
 {
-       struct tc_bw_alloc *p;
-       int i;
+       struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+       int tc;
 
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-               p = &cfg->tc_config[i].path[direction];
-               refill[i] = p->data_credits_refill;
-       }
+       for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
+               refill[tc] = tc_config[tc].path[direction].data_credits_refill;
 }
 
-void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max)
+void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *cfg, u16 *max)
 {
-       int i;
+       struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+       int tc;
 
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
-               max[i] = cfg->tc_config[i].desc_credits_max;
+       for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
+               max[tc] = tc_config[tc].desc_credits_max;
 }
 
-void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction,
+void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *cfg, int direction,
                            u8 *bwgid)
 {
-       struct tc_bw_alloc *p;
-       int i;
+       struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+       int tc;
+
+       for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
+               bwgid[tc] = tc_config[tc].path[direction].bwg_id;
+}
+
+void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *cfg, int direction,
+                          u8 *tsa)
+{
+       struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+       int tc;
+
+       for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
+               tsa[tc] = tc_config[tc].path[direction].tsa;
+}
+
+u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
+{
+       struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+       u8 prio_mask = 1 << up;
+       u8 tc = cfg->num_tcs.pg_tcs;
+
+       /* If tc is 0 then DCB is likely not enabled or supported */
+       if (!tc)
+               goto out;
 
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-               p = &cfg->tc_config[i].path[direction];
-               bwgid[i] = p->bwg_id;
+       /*
+        * Test from maximum TC to 1 and report the first match we find.  If
+        * we find no match we can assume that the TC is 0 since the TC must
+        * be set for all user priorities
+        */
+       for (tc--; tc; tc--) {
+               if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap)
+                       break;
        }
+out:
+       return tc;
 }
 
-void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
-                           u8 *ptype)
+void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *cfg, int direction,
+                             u8 *map)
 {
-       struct tc_bw_alloc *p;
-       int i;
+       u8 up;
 
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-               p = &cfg->tc_config[i].path[direction];
-               ptype[i] = p->prio_type;
+       for (up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++)
+               map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up);
+}
+
+/**
+ * ixgbe_dcb_config - Struct containing DCB settings.
+ * @dcb_config: Pointer to DCB config structure
+ *
+ * This function checks DCB rules for DCB settings.
+ * The following rules are checked:
+ * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%.
+ * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth
+ *    Group must total 100.
+ * 3. A Traffic Class should not be set to both Link Strict Priority
+ *    and Group Strict Priority.
+ * 4. Link strict Bandwidth Groups can only have link strict traffic classes
+ *    with zero bandwidth.
+ */
+s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *dcb_config)
+{
+       struct ixgbe_dcb_tc_path *p;
+       s32 ret_val = 0;
+       u8 i, j, bw = 0, bw_id;
+       u8 bw_sum[2][IXGBE_DCB_MAX_BW_GROUP];
+       bool link_strict[2][IXGBE_DCB_MAX_BW_GROUP];
+
+       memset(bw_sum, 0, sizeof(bw_sum));
+       memset(link_strict, 0, sizeof(link_strict));
+
+       /* First Tx, then Rx */
+       for (i = 0; i < 2; i++) {
+               /* Check each traffic class for rule violation */
+               for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
+                       p = &dcb_config->tc_config[j].path[i];
+
+                       bw = p->bwg_percent;
+                       bw_id = p->bwg_id;
+
+                       if (bw_id >= IXGBE_DCB_MAX_BW_GROUP) {
+                               ret_val = IXGBE_ERR_CONFIG;
+                               goto err_config;
+                       }
+                       if (p->tsa == ixgbe_dcb_tsa_strict) {
+                               link_strict[i][bw_id] = true;
+                               /* Link strict should have zero bandwidth */
+                               if (bw) {
+                                       ret_val = IXGBE_ERR_CONFIG;
+                                       goto err_config;
+                               }
+                       } else if (!bw) {
+                               /*
+                                * Traffic classes without link strict
+                                * should have non-zero bandwidth.
+                                */
+                               ret_val = IXGBE_ERR_CONFIG;
+                               goto err_config;
+                       }
+                       bw_sum[i][bw_id] += bw;
+               }
+
+               bw = 0;
+
+               /* Check each bandwidth group for rule violation */
+               for (j = 0; j < IXGBE_DCB_MAX_BW_GROUP; j++) {
+                       bw += dcb_config->bw_percentage[i][j];
+                       /*
+                        * Sum of bandwidth percentages of all traffic classes
+                        * within a Bandwidth Group must total 100 except for
+                        * link strict group (zero bandwidth).
+                        */
+                       if (link_strict[i][j]) {
+                               if (bw_sum[i][j]) {
+                                       /*
+                                        * Link strict group should have zero
+                                        * bandwidth.
+                                        */
+                                       ret_val = IXGBE_ERR_CONFIG;
+                                       goto err_config;
+                               }
+                       } else if (bw_sum[i][j] != IXGBE_DCB_BW_PERCENT &&
+                                  bw_sum[i][j] != 0) {
+                               ret_val = IXGBE_ERR_CONFIG;
+                               goto err_config;
+                       }
+               }
+
+               if (bw != IXGBE_DCB_BW_PERCENT) {
+                       ret_val = IXGBE_ERR_CONFIG;
+                       goto err_config;
+               }
        }
+
+err_config:
+       hw_dbg(hw, "DCB error code %d while checking %s settings.\n",
+                 ret_val, (i == IXGBE_DCB_TX_CONFIG) ? "Tx" : "Rx");
+
+       return ret_val;
 }
 
-void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
+/**
+ * ixgbe_dcb_get_tc_stats - Returns status of each traffic class
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count:  Number of elements in bwg_array.
+ *
+ * This function returns the status data for each of the Traffic Classes in use.
+ */
+s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
+                          u8 tc_count)
 {
-       int i, up;
-       unsigned long bitmap;
+       s32 ret = IXGBE_NOT_IMPLEMENTED;
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               ret = ixgbe_dcb_get_tc_stats_82599(hw, stats, tc_count);
+               break;
+       default:
+               break;
+       }
+       return ret;
+}
 
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-               bitmap = cfg->tc_config[i].path[direction].up_to_tc_bitmap;
-               for_each_set_bit(up, &bitmap, MAX_USER_PRIORITY)
-                       map[up] = i;
+/**
+ * ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count:  Number of elements in bwg_array.
+ *
+ * This function returns the CBFC status data for each of the Traffic Classes.
+ */
+s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
+                           u8 tc_count)
+{
+       s32 ret = IXGBE_NOT_IMPLEMENTED;
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               ret = ixgbe_dcb_get_pfc_stats_82599(hw, stats, tc_count);
+               break;
+       default:
+               break;
        }
+       return ret;
 }
 
 /**
- * ixgbe_dcb_hw_config - Config and enable DCB
+ * ixgbe_dcb_config_rx_arbiter_cee - Config Rx arbiter
  * @hw: pointer to hardware structure
  * @dcb_config: pointer to ixgbe_dcb_config structure
  *
- * Configure dcb settings and enable dcb mode.
+ * Configure Rx Data Arbiter and credits for each traffic class.
  */
-s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
-                        struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *hw,
+                               struct ixgbe_dcb_config *dcb_config)
 {
-       s32 ret = 0;
-       u8 pfc_en;
-       u8 ptype[MAX_TRAFFIC_CLASS];
-       u8 bwgid[MAX_TRAFFIC_CLASS];
-       u8 prio_tc[MAX_TRAFFIC_CLASS];
-       u16 refill[MAX_TRAFFIC_CLASS];
-       u16 max[MAX_TRAFFIC_CLASS];
+       s32 ret = IXGBE_NOT_IMPLEMENTED;
+       u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS]     = { 0 };
+       u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS]   = { 0 };
+       u8 map[IXGBE_DCB_MAX_USER_PRIORITY]     = { 0 };
+       u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 };
+       u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS]    = { 0 };
+
+       ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
+       ixgbe_dcb_unpack_max_cee(dcb_config, max);
+       ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
+       ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
+       ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
 
-       /* Unpack CEE standard containers */
-       ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en);
-       ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill);
-       ixgbe_dcb_unpack_max(dcb_config, max);
-       ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid);
-       ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype);
-       ixgbe_dcb_unpack_map(dcb_config, DCB_TX_CONFIG, prio_tc);
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               ret = ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               ret = ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwgid,
+                                                       tsa, map);
+               break;
+       default:
+               break;
+       }
+       return ret;
+}
+
+/**
+ * ixgbe_dcb_config_tx_desc_arbiter_cee - Config Tx Desc arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Tx Descriptor Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *hw,
+                                    struct ixgbe_dcb_config *dcb_config)
+{
+       s32 ret = IXGBE_NOT_IMPLEMENTED;
+       u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+       u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+       u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+       u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+
+       ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
+       ixgbe_dcb_unpack_max_cee(dcb_config, max);
+       ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
+       ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
 
        switch (hw->mac.type) {
        case ixgbe_mac_82598EB:
-               ret = ixgbe_dcb_hw_config_82598(hw, pfc_en, refill, max,
-                                               bwgid, ptype);
+               ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
+                                                            bwgid, tsa);
                break;
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X540:
-               ret = ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max,
-                                               bwgid, ptype, prio_tc);
+               ret = ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
+                                                            bwgid, tsa);
                break;
        default:
                break;
@@ -285,10 +496,62 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
        return ret;
 }
 
-/* Helper routines to abstract HW specifics from DCB netlink ops */
-s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
+/**
+ * ixgbe_dcb_config_tx_data_arbiter_cee - Config Tx data arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Tx Data Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *hw,
+                                    struct ixgbe_dcb_config *dcb_config)
+{
+       s32 ret = IXGBE_NOT_IMPLEMENTED;
+       u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+       u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+       u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
+       u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+       u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+
+       ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
+       ixgbe_dcb_unpack_max_cee(dcb_config, max);
+       ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
+       ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
+       ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
+                                                            bwgid, tsa);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               ret = ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
+                                                            bwgid, tsa,
+                                                            map);
+               break;
+       default:
+               break;
+       }
+       return ret;
+}
+
+/**
+ * ixgbe_dcb_config_pfc_cee - Config priority flow control
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Priority Flow Control for each traffic class.
+ */
+s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *hw,
+                        struct ixgbe_dcb_config *dcb_config)
 {
-       int ret = -EINVAL;
+       s32 ret = IXGBE_NOT_IMPLEMENTED;
+       u8 pfc_en;
+       u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
+
+       ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
+       ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
 
        switch (hw->mac.type) {
        case ixgbe_mac_82598EB:
@@ -296,7 +559,7 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
                break;
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X540:
-               ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc);
+               ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map);
                break;
        default:
                break;
@@ -304,60 +567,119 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
        return ret;
 }
 
-s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame)
+/**
+ * ixgbe_dcb_config_tc_stats - Config traffic class statistics
+ * @hw: pointer to hardware structure
+ *
+ * Configure queue statistics registers, all queues belonging to same traffic
+ * class uses a single set of queue statistics counters.
+ */
+s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw)
 {
-       __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
-       __u8 prio_type[IEEE_8021QAZ_MAX_TCS];
-       int i;
+       s32 ret = IXGBE_NOT_IMPLEMENTED;
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               ret = ixgbe_dcb_config_tc_stats_82598(hw);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               ret = ixgbe_dcb_config_tc_stats_82599(hw, NULL);
+               break;
+       default:
+               break;
+       }
+       return ret;
+}
 
-       /* naively give each TC a bwg to map onto CEE hardware */
-       __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7};
+/**
+ * ixgbe_dcb_hw_config_cee - Config and enable DCB
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure dcb settings and enable dcb mode.
+ */
+s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *hw,
+                       struct ixgbe_dcb_config *dcb_config)
+{
+       s32 ret = IXGBE_NOT_IMPLEMENTED;
+       u8 pfc_en;
+       u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+       u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+       u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
+       u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+       u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS];
 
-       /* Map TSA onto CEE prio type */
-       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
-               switch (ets->tc_tsa[i]) {
-               case IEEE_8021QAZ_TSA_STRICT:
-                       prio_type[i] = 2;
-                       break;
-               case IEEE_8021QAZ_TSA_ETS:
-                       prio_type[i] = 0;
-                       break;
-               default:
-                       /* Hardware only supports priority strict or
-                        * ETS transmission selection algorithms if
-                        * we receive some other value from dcbnl
-                        * throw an error
-                        */
-                       return -EINVAL;
-               }
+       /* Unpack CEE standard containers */
+       ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
+       ixgbe_dcb_unpack_max_cee(dcb_config, max);
+       ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
+       ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
+       ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->link_speed,
+                                               refill, max, bwgid, tsa);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               ixgbe_dcb_config_82599(hw, dcb_config);
+               ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->link_speed,
+                                               refill, max, bwgid,
+                                               tsa, map);
+
+               ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
+               break;
+       default:
+               break;
        }
 
-       ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame);
-       return ixgbe_dcb_hw_ets_config(hw, refill, max,
-                                      bwg_id, prio_type, ets->prio_tc);
+       if (!ret && dcb_config->pfc_mode_enable) {
+               ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
+               ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
+       }
+
+       return ret;
+}
+
+/* Helper routines to abstract HW specifics from DCB netlink ops */
+s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
+{
+       int ret = IXGBE_ERR_PARAM;
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map);
+               break;
+       default:
+               break;
+       }
+       return ret;
 }
 
-s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
-                           u16 *refill, u16 *max, u8 *bwg_id,
-                           u8 *prio_type, u8 *prio_tc)
+s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
+                           u8 *bwg_id, u8 *tsa, u8 *map)
 {
        switch (hw->mac.type) {
        case ixgbe_mac_82598EB:
-               ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max,
-                                                       prio_type);
-               ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
-                                                            bwg_id, prio_type);
-               ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
-                                                            bwg_id, prio_type);
+               ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
+               ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,
+                                                      tsa);
+               ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,
+                                                      tsa);
                break;
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X540:
-               ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max,
-                                                 bwg_id, prio_type, prio_tc);
-               ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
-                                                      bwg_id, prio_type);
+               ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
+                                                 tsa, map);
+               ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,
+                                                      tsa);
                ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
-                                                      prio_type, prio_tc);
+                                                      tsa, map);
                break;
        default:
                break;
index 24333b7181665132d49f754aa46ce0a179add250..ae6bd69bf185fcefe2accc8f672da4567a76dad3 100644 (file)
   the file called "COPYING".
 
   Contact Information:
-  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
 *******************************************************************************/
 
-#ifndef _DCB_CONFIG_H_
-#define _DCB_CONFIG_H_
+#ifndef _IXGBE_DCB_H_
+#define _IXGBE_DCB_H_
+
 
-#include <linux/dcbnl.h>
 #include "ixgbe_type.h"
 
-/* DCB data structures */
-
-#define IXGBE_MAX_PACKET_BUFFERS 8
-#define MAX_USER_PRIORITY        8
-#define MAX_BW_GROUP             8
-#define BW_PERCENT               100
-
-#define DCB_TX_CONFIG            0
-#define DCB_RX_CONFIG            1
-
-/* DCB error Codes */
-#define DCB_SUCCESS              0
-#define DCB_ERR_CONFIG           -1
-#define DCB_ERR_PARAM            -2
-
-/* Transmit and receive Errors */
-/* Error in bandwidth group allocation */
-#define DCB_ERR_BW_GROUP        -3
-/* Error in traffic class bandwidth allocation */
-#define DCB_ERR_TC_BW           -4
-/* Traffic class has both link strict and group strict enabled */
-#define DCB_ERR_LS_GS           -5
-/* Link strict traffic class has non zero bandwidth */
-#define DCB_ERR_LS_BW_NONZERO   -6
-/* Link strict bandwidth group has non zero bandwidth */
-#define DCB_ERR_LS_BWG_NONZERO  -7
-/*  Traffic class has zero bandwidth */
-#define DCB_ERR_TC_BW_ZERO      -8
-
-#define DCB_NOT_IMPLEMENTED      0x7FFFFFFF
-
-struct dcb_pfc_tc_debug {
-       u8  tc;
-       u8  pause_status;
-       u64 pause_quanta;
-};
+/* DCB defines */
+/* DCB credit calculation defines */
+#define IXGBE_DCB_CREDIT_QUANTUM       64
+#define IXGBE_DCB_MAX_CREDIT_REFILL    200   /* 200 * 64B = 12800B */
+#define IXGBE_DCB_MAX_TSO_SIZE         (32 * 1024) /* Max TSO pkt size in DCB*/
+#define IXGBE_DCB_MAX_CREDIT           (2 * IXGBE_DCB_MAX_CREDIT_REFILL)
 
-enum strict_prio_type {
-       prio_none = 0,
-       prio_group,
-       prio_link
-};
+/* 513 for 32KB TSO packet */
+#define IXGBE_DCB_MIN_TSO_CREDIT       \
+       ((IXGBE_DCB_MAX_TSO_SIZE / IXGBE_DCB_CREDIT_QUANTUM) + 1)
 
-/* DCB capability definitions */
-#define IXGBE_DCB_PG_SUPPORT        0x00000001
-#define IXGBE_DCB_PFC_SUPPORT       0x00000002
-#define IXGBE_DCB_BCN_SUPPORT       0x00000004
-#define IXGBE_DCB_UP2TC_SUPPORT     0x00000008
-#define IXGBE_DCB_GSP_SUPPORT       0x00000010
+/* DCB configuration defines */
+#define IXGBE_DCB_MAX_USER_PRIORITY    8
+#define IXGBE_DCB_MAX_BW_GROUP         8
+#define IXGBE_DCB_BW_PERCENT           100
 
-#define IXGBE_DCB_8_TC_SUPPORT      0x80
+#define IXGBE_DCB_TX_CONFIG            0
+#define IXGBE_DCB_RX_CONFIG            1
 
-struct dcb_support {
-       /* DCB capabilities */
-       u32 capabilities;
+/* DCB capability defines */
+#define IXGBE_DCB_PG_SUPPORT   0x00000001
+#define IXGBE_DCB_PFC_SUPPORT  0x00000002
+#define IXGBE_DCB_BCN_SUPPORT  0x00000004
+#define IXGBE_DCB_UP2TC_SUPPORT        0x00000008
+#define IXGBE_DCB_GSP_SUPPORT  0x00000010
+
+struct ixgbe_dcb_support {
+       u32 capabilities; /* DCB capabilities */
 
        /* Each bit represents a number of TCs configurable in the hw.
-        * If 8 traffic classes can be configured, the value is 0x80.
-        */
-       u8  traffic_classes;
-       u8  pfc_traffic_classes;
+        * If 8 traffic classes can be configured, the value is 0x80. */
+       u8 traffic_classes;
+       u8 pfc_traffic_classes;
+};
+
+enum ixgbe_dcb_tsa {
+       ixgbe_dcb_tsa_ets = 0,
+       ixgbe_dcb_tsa_group_strict_cee,
+       ixgbe_dcb_tsa_strict
 };
 
 /* Traffic class bandwidth allocation per direction */
-struct tc_bw_alloc {
-       u8 bwg_id;                /* Bandwidth Group (BWG) ID */
-       u8 bwg_percent;           /* % of BWG's bandwidth */
-       u8 link_percent;          /* % of link bandwidth */
-       u8 up_to_tc_bitmap;       /* User Priority to Traffic Class mapping */
-       u16 data_credits_refill;  /* Credit refill amount in 64B granularity */
-       u16 data_credits_max;     /* Max credits for a configured packet buffer
-                                  * in 64B granularity.*/
-       enum strict_prio_type prio_type; /* Link or Group Strict Priority */
+struct ixgbe_dcb_tc_path {
+       u8 bwg_id; /* Bandwidth Group (BWG) ID */
+       u8 bwg_percent; /* % of BWG's bandwidth */
+       u8 link_percent; /* % of link bandwidth */
+       u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */
+       u16 data_credits_refill; /* Credit refill amount in 64B granularity */
+       u16 data_credits_max; /* Max credits for a configured packet buffer
+                              * in 64B granularity.*/
+       enum ixgbe_dcb_tsa tsa; /* Link or Group Strict Priority */
 };
 
-enum dcb_pfc_type {
-       pfc_disabled = 0,
-       pfc_enabled_full,
-       pfc_enabled_tx,
-       pfc_enabled_rx
+enum ixgbe_dcb_pfc {
+       ixgbe_dcb_pfc_disabled = 0,
+       ixgbe_dcb_pfc_enabled,
+       ixgbe_dcb_pfc_enabled_txonly,
+       ixgbe_dcb_pfc_enabled_rxonly
 };
 
 /* Traffic class configuration */
-struct tc_configuration {
-       struct tc_bw_alloc path[2]; /* One each for Tx/Rx */
-       enum dcb_pfc_type  dcb_pfc; /* Class based flow control setting */
+struct ixgbe_dcb_tc_config {
+       struct ixgbe_dcb_tc_path path[2]; /* One each for Tx/Rx */
+       enum ixgbe_dcb_pfc pfc; /* Class based flow control setting */
 
        u16 desc_credits_max; /* For Tx Descriptor arbitration */
        u8 tc; /* Traffic class (TC) */
 };
 
-struct dcb_num_tcs {
+enum ixgbe_dcb_pba {
+       /* PBA[0-7] each use 64KB FIFO */
+       ixgbe_dcb_pba_equal = PBA_STRATEGY_EQUAL,
+       /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */
+       ixgbe_dcb_pba_80_48 = PBA_STRATEGY_WEIGHTED
+};
+
+struct ixgbe_dcb_num_tcs {
        u8 pg_tcs;
        u8 pfc_tcs;
 };
 
 struct ixgbe_dcb_config {
-       struct dcb_support support;
-       struct dcb_num_tcs num_tcs;
-       struct tc_configuration tc_config[MAX_TRAFFIC_CLASS];
-       u8     bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */
-       bool   pfc_mode_enable;
-
-       u32  dcb_cfg_version; /* Not used...OS-specific? */
-       u32  link_speed; /* For bandwidth allocation validation purpose */
+       struct ixgbe_dcb_tc_config tc_config[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+       struct ixgbe_dcb_support support;
+       struct ixgbe_dcb_num_tcs num_tcs;
+       u8 bw_percentage[2][IXGBE_DCB_MAX_BW_GROUP]; /* One each for Tx/Rx */
+       bool pfc_mode_enable;
+       bool round_robin_enable;
+
+       enum ixgbe_dcb_pba rx_pba_cfg;
+
+       u32 dcb_cfg_version; /* Not used...OS-specific? */
+       u32 link_speed; /* For bandwidth allocation validation purpose */
+       bool vt_mode;
 };
 
 /* DCB driver APIs */
-void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en);
-void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *, int, u16 *);
-void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *);
-void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *);
-void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *);
-void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *, int, u8 *);
+
+/* DCB rule checking */
+s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *);
 
 /* DCB credits calculation */
-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
-                                  struct ixgbe_dcb_config *, int, u8);
-
-/* DCB hw initialization */
-s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max);
-s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
-                           u8 *bwg_id, u8 *prio_type, u8 *tc_prio);
-s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio);
-s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
-
-/* DCB definitions for credit calculation */
-#define DCB_CREDIT_QUANTUM     64   /* DCB Quantum */
-#define MAX_CREDIT_REFILL       511  /* 0x1FF * 64B = 32704B */
-#define DCB_MAX_TSO_SIZE        (32*1024) /* MAX TSO packet size supported in DCB mode */
-#define MINIMUM_CREDIT_FOR_TSO  (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */
-#define MAX_CREDIT              4095 /* Maximum credit supported: 256KB * 1204 / 64B */
-
-#endif /* _DCB_CONFIG_H */
+s32 ixgbe_dcb_calculate_tc_credits(u8 *, u16 *, u16 *, int);
+s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *,
+                                      struct ixgbe_dcb_config *, u32, u8);
+
+/* DCB PFC */
+s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *, u8, u8 *);
+s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+
+/* DCB stats */
+s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *);
+s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
+s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
+
+/* DCB config arbiters */
+s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *,
+                                        struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *,
+                                        struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *,
+                                   struct ixgbe_dcb_config *);
+
+/* DCB unpack routines */
+void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *, u8 *, u8 *);
+void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *, int, u16 *);
+void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *, u16 *);
+void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *, int, u8 *);
+void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *, int, u8 *);
+void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *, int, u8 *);
+u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8);
+
+/* DCB initialization */
+s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, u16 *, u16 *, u8 *, u8 *, u8 *);
+s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+#endif /* _IXGBE_DCB_H_ */
index d3695edfcb8b7ce6f10bfd5fcf722d55235714cd..f40689c54c41bf3f6a449165f0a3a9a869f33300 100644 (file)
   the file called "COPYING".
 
   Contact Information:
-  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
 *******************************************************************************/
 
-#include "ixgbe.h"
+
 #include "ixgbe_type.h"
 #include "ixgbe_dcb.h"
 #include "ixgbe_dcb_82598.h"
 
+/**
+ * ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count:  Number of elements in bwg_array.
+ *
+ * This function returns the status data for each of the Traffic Classes in use.
+ */
+s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw,
+                                struct ixgbe_hw_stats *stats,
+                                u8 tc_count)
+{
+       int tc;
+
+       if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
+               return IXGBE_ERR_PARAM;
+       /* Statistics pertaining to each traffic class */
+       for (tc = 0; tc < tc_count; tc++) {
+               /* Transmitted Packets */
+               stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
+               /* Transmitted Bytes */
+               stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc));
+               /* Received Packets */
+               stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
+               /* Received Bytes */
+               stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc));
+
+#if 0
+               /* Can we get rid of these??  Consequently, getting rid
+                * of the tc_stats structure.
+                */
+               tc_stats_array[up]->in_overflow_discards = 0;
+               tc_stats_array[up]->out_overflow_discards = 0;
+#endif
+       }
+
+       return 0;
+}
+
+/**
+ * ixgbe_dcb_get_pfc_stats_82598 - Returns CBFC status data
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count:  Number of elements in bwg_array.
+ *
+ * This function returns the CBFC status data for each of the Traffic Classes.
+ */
+s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw,
+                                 struct ixgbe_hw_stats *stats,
+                                 u8 tc_count)
+{
+       int tc;
+
+       if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
+               return IXGBE_ERR_PARAM;
+       for (tc = 0; tc < tc_count; tc++) {
+               /* Priority XOFF Transmitted */
+               stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
+               /* Priority XOFF Received */
+               stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(tc));
+       }
+
+       return 0;
+}
+
 /**
  * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
  * @hw: pointer to hardware structure
  *
  * Configure Rx Data Arbiter and credits for each traffic class.
  */
-s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
-                                       u16 *refill,
-                                       u16 *max,
-                                       u8 *prio_type)
+s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+                                     u16 *max, u8 *tsa)
 {
-       u32    reg           = 0;
-       u32    credit_refill = 0;
-       u32    credit_max    = 0;
-       u8     i             = 0;
+       u32 reg = 0;
+       u32 credit_refill = 0;
+       u32 credit_max = 0;
+       u8 i = 0;
 
        reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA;
        IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg);
@@ -62,13 +124,13 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
        IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
 
        /* Configure traffic class credits and priority */
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+       for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
                credit_refill = refill[i];
-               credit_max    = max[i];
+               credit_max = max[i];
 
                reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
 
-               if (prio_type[i] == prio_link)
+               if (tsa[i] == ixgbe_dcb_tsa_strict)
                        reg |= IXGBE_RT2CR_LSP;
 
                IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
@@ -96,37 +158,34 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
  * Configure Tx Descriptor Arbiter and credits for each traffic class.
  */
 s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
-                                               u16 *refill,
-                                               u16 *max,
-                                               u8 *bwg_id,
-                                               u8 *prio_type)
+                                          u16 *refill, u16 *max, u8 *bwg_id,
+                                          u8 *tsa)
 {
-       u32    reg, max_credits;
-       u8     i;
+       u32 reg, max_credits;
+       u8 i;
 
        reg = IXGBE_READ_REG(hw, IXGBE_DPMCS);
 
        /* Enable arbiter */
        reg &= ~IXGBE_DPMCS_ARBDIS;
-       /* Enable DFP and Recycle mode */
-       reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
        reg |= IXGBE_DPMCS_TSOEF;
+
        /* Configure Max TSO packet size 34KB including payload and headers */
        reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
 
        IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg);
 
        /* Configure traffic class credits and priority */
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+       for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
                max_credits = max[i];
                reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
                reg |= refill[i];
                reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
 
-               if (prio_type[i] == prio_group)
+               if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
                        reg |= IXGBE_TDTQ2TCCR_GSP;
 
-               if (prio_type[i] == prio_link)
+               if (tsa[i] == ixgbe_dcb_tsa_strict)
                        reg |= IXGBE_TDTQ2TCCR_LSP;
 
                IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
@@ -143,10 +202,8 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
  * Configure Tx Data Arbiter and credits for each traffic class.
  */
 s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
-                                               u16 *refill,
-                                               u16 *max,
-                                               u8 *bwg_id,
-                                               u8 *prio_type)
+                                          u16 *refill, u16 *max, u8 *bwg_id,
+                                          u8 *tsa)
 {
        u32 reg;
        u8 i;
@@ -160,15 +217,15 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
        IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg);
 
        /* Configure traffic class credits and priority */
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+       for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
                reg = refill[i];
                reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
                reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
 
-               if (prio_type[i] == prio_group)
+               if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
                        reg |= IXGBE_TDPT2TCCR_GSP;
 
-               if (prio_type[i] == prio_link)
+               if (tsa[i] == ixgbe_dcb_tsa_strict)
                        reg |= IXGBE_TDPT2TCCR_LSP;
 
                IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
@@ -191,54 +248,46 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
  */
 s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
 {
-       u32 reg;
-       u8  i;
-
-       if (pfc_en) {
-               /* Enable Transmit Priority Flow Control */
-               reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
-               reg &= ~IXGBE_RMCS_TFCE_802_3X;
-               /* correct the reporting of our flow control status */
-               reg |= IXGBE_RMCS_TFCE_PRIORITY;
-               IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
-
-               /* Enable Receive Priority Flow Control */
-               reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
-               reg &= ~IXGBE_FCTRL_RFCE;
-               reg |= IXGBE_FCTRL_RPFCE;
-               IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
-
-               /* Configure pause time */
-               for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++)
-                       IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800);
-
-               /* Configure flow control refresh threshold value */
-               IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
-       }
+       u32 fcrtl, reg;
+       u8 i;
 
-       /*
-        * Configure flow control thresholds and enable priority flow control
-        * for each traffic class.
-        */
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-               int enabled = pfc_en & (1 << i);
+       /* Enable Transmit Priority Flow Control */
+       reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
+       reg &= ~IXGBE_RMCS_TFCE_802_3X;
+       reg |= IXGBE_RMCS_TFCE_PRIORITY;
+       IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
 
-               reg = hw->fc.low_water << 10;
+       /* Enable Receive Priority Flow Control */
+       reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+       reg &= ~(IXGBE_FCTRL_RPFCE | IXGBE_FCTRL_RFCE);
 
-               if (enabled == pfc_enabled_tx ||
-                   enabled == pfc_enabled_full)
-                       reg |= IXGBE_FCRTL_XONE;
+       if (pfc_en)
+               reg |= IXGBE_FCTRL_RPFCE;
 
-               IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
+       IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
 
-               reg = hw->fc.high_water[i] << 10;
-               if (enabled == pfc_enabled_tx ||
-                   enabled == pfc_enabled_full)
-                       reg |= IXGBE_FCRTH_FCEN;
+       /* Configure PFC Tx thresholds per TC */
+       for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+               if (!(pfc_en & (1 << i))) {
+                       IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
+                       IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
+                       continue;
+               }
 
+               fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
+               reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+               IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
                IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
        }
 
+       /* Configure pause time */
+       reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
+       for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+               IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+       /* Configure flow control refresh threshold value */
+       IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
        return 0;
 }
 
@@ -249,11 +298,11 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
  * Configure queue statistics registers, all queues belonging to same traffic
  * class uses a single set of queue statistics counters.
  */
-static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
+s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
 {
        u32 reg = 0;
-       u8  i   = 0;
-       u8  j   = 0;
+       u8 i = 0;
+       u8 j = 0;
 
        /* Receive Queues stats setting -  8 queues per statistics reg */
        for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) {
@@ -264,7 +313,7 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
                reg |= ((0x1010101) * j);
                IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg);
        }
-       /* Transmit Queues stats setting -  4 queues per statistics reg */
+       /* Transmit Queues stats setting -  4 queues per statistics reg*/
        for (i = 0; i < 8; i++) {
                reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i));
                reg |= ((0x1010101) * i);
@@ -281,16 +330,17 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
  *
  * Configure dcb settings and enable dcb mode.
  */
-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
-                             u16 *max, u8 *bwg_id, u8 *prio_type)
+s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, int link_speed,
+                             u16 *refill, u16 *max, u8 *bwg_id,
+                             u8 *tsa)
 {
-       ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
-       ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
-                                              bwg_id, prio_type);
-       ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
-                                              bwg_id, prio_type);
-       ixgbe_dcb_config_pfc_82598(hw, pfc_en);
+       ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
+       ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,
+                                              tsa);
+       ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,
+                                              tsa);
        ixgbe_dcb_config_tc_stats_82598(hw);
 
+
        return 0;
 }
index ba835708fcace01dec3e34d3bb09b43ffe5376fc..7f0697aaeee69dec470f5eb79edbaa316f9cc35d 100644 (file)
   the file called "COPYING".
 
   Contact Information:
-  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
 *******************************************************************************/
 
-#ifndef _DCB_82598_CONFIG_H_
-#define _DCB_82598_CONFIG_H_
+#ifndef _IXGBE_DCB_82598_H_
+#define _IXGBE_DCB_82598_H_
 
 /* DCB register definitions */
 
-#define IXGBE_DPMCS_MTSOS_SHIFT 16
-#define IXGBE_DPMCS_TDPAC       0x00000001 /* 0 Round Robin, 1 DFP - Deficit Fixed Priority */
-#define IXGBE_DPMCS_TRM         0x00000010 /* Transmit Recycle Mode */
-#define IXGBE_DPMCS_ARBDIS      0x00000040 /* DCB arbiter disable */
-#define IXGBE_DPMCS_TSOEF       0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */
-
-#define IXGBE_RUPPBMR_MQA       0x80000000 /* Enable UP to queue mapping */
-
-#define IXGBE_RT2CR_MCL_SHIFT   12 /* Offset to Max Credit Limit setting */
-#define IXGBE_RT2CR_LSP         0x80000000 /* LSP enable bit */
-
-#define IXGBE_RDRXCTL_MPBEN     0x00000010 /* DMA config for multiple packet buffers enable */
-#define IXGBE_RDRXCTL_MCEN      0x00000040 /* DMA config for multiple cores (RSS) enable */
-
-#define IXGBE_TDTQ2TCCR_MCL_SHIFT   12
-#define IXGBE_TDTQ2TCCR_BWG_SHIFT   9
-#define IXGBE_TDTQ2TCCR_GSP     0x40000000
-#define IXGBE_TDTQ2TCCR_LSP     0x80000000
-
-#define IXGBE_TDPT2TCCR_MCL_SHIFT   12
-#define IXGBE_TDPT2TCCR_BWG_SHIFT   9
-#define IXGBE_TDPT2TCCR_GSP     0x40000000
-#define IXGBE_TDPT2TCCR_LSP     0x80000000
-
-#define IXGBE_PDPMCS_TPPAC      0x00000020 /* 0 Round Robin, 1 for DFP - Deficit Fixed Priority */
-#define IXGBE_PDPMCS_ARBDIS     0x00000040 /* Arbiter disable */
-#define IXGBE_PDPMCS_TRM        0x00000100 /* Transmit Recycle Mode enable */
-
-#define IXGBE_DTXCTL_ENDBUBD    0x00000004 /* Enable DBU buffer division */
-
-#define IXGBE_TXPBSIZE_40KB     0x0000A000 /* 40KB Packet Buffer */
-#define IXGBE_RXPBSIZE_48KB     0x0000C000 /* 48KB Packet Buffer */
-#define IXGBE_RXPBSIZE_64KB     0x00010000 /* 64KB Packet Buffer */
-#define IXGBE_RXPBSIZE_80KB     0x00014000 /* 80KB Packet Buffer */
-
-#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000
-
-/* DCB hardware-specific driver APIs */
-
-/* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en);
-
-/* DCB hw initialization */
-s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
-                                       u16 *refill,
-                                       u16 *max,
-                                       u8 *prio_type);
-
-s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
-                                               u16 *refill,
-                                               u16 *max,
-                                               u8 *bwg_id,
-                                               u8 *prio_type);
-
-s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
-                                               u16 *refill,
-                                               u16 *max,
-                                               u8 *bwg_id,
-                                               u8 *prio_type);
-
-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
-                             u16 *max, u8 *bwg_id, u8 *prio_type);
-
-#endif /* _DCB_82598_CONFIG_H */
+#define IXGBE_DPMCS_MTSOS_SHIFT        16
+#define IXGBE_DPMCS_TDPAC      0x00000001 /* 0 Round Robin,
+                                           * 1 DFP - Deficit Fixed Priority */
+#define IXGBE_DPMCS_TRM                0x00000010 /* Transmit Recycle Mode */
+#define IXGBE_DPMCS_ARBDIS     0x00000040 /* DCB arbiter disable */
+#define IXGBE_DPMCS_TSOEF      0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */
+
+#define IXGBE_RUPPBMR_MQA      0x80000000 /* Enable UP to queue mapping */
+
+#define IXGBE_RT2CR_MCL_SHIFT  12 /* Offset to Max Credit Limit setting */
+#define IXGBE_RT2CR_LSP                0x80000000 /* LSP enable bit */
+
+#define IXGBE_RDRXCTL_MPBEN    0x00000010 /* DMA config for multiple packet
+                                           * buffers enable */
+#define IXGBE_RDRXCTL_MCEN     0x00000040 /* DMA config for multiple cores
+                                           * (RSS) enable */
+
+#define IXGBE_TDTQ2TCCR_MCL_SHIFT      12
+#define IXGBE_TDTQ2TCCR_BWG_SHIFT      9
+#define IXGBE_TDTQ2TCCR_GSP    0x40000000
+#define IXGBE_TDTQ2TCCR_LSP    0x80000000
+
+#define IXGBE_TDPT2TCCR_MCL_SHIFT      12
+#define IXGBE_TDPT2TCCR_BWG_SHIFT      9
+#define IXGBE_TDPT2TCCR_GSP    0x40000000
+#define IXGBE_TDPT2TCCR_LSP    0x80000000
+
+#define IXGBE_PDPMCS_TPPAC     0x00000020 /* 0 Round Robin,
+                                           * 1 DFP - Deficit Fixed Priority */
+#define IXGBE_PDPMCS_ARBDIS    0x00000040 /* Arbiter disable */
+#define IXGBE_PDPMCS_TRM       0x00000100 /* Transmit Recycle Mode enable */
+
+#define IXGBE_DTXCTL_ENDBUBD   0x00000004 /* Enable DBU buffer division */
+
+#define IXGBE_TXPBSIZE_40KB    0x0000A000 /* 40KB Packet Buffer */
+#define IXGBE_RXPBSIZE_48KB    0x0000C000 /* 48KB Packet Buffer */
+#define IXGBE_RXPBSIZE_64KB    0x00010000 /* 64KB Packet Buffer */
+#define IXGBE_RXPBSIZE_80KB    0x00014000 /* 80KB Packet Buffer */
+
+/* DCB driver APIs */
+
+/* DCB PFC */
+s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8);
+
+/* DCB stats */
+s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *);
+s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *,
+                                struct ixgbe_hw_stats *, u8);
+s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *,
+                                 struct ixgbe_hw_stats *, u8);
+
+/* DCB config arbiters */
+s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *,
+                                          u8 *, u8 *);
+s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *,
+                                          u8 *, u8 *);
+s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *, u8 *);
+
+/* DCB initialization */
+s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, int, u16 *, u16 *, u8 *, u8 *);
+#endif /* _IXGBE_DCB_82958_H_ */
index 888a419dc3d9736ed8c1abfe9ea26e94a4e1c91a..c92462c3310faff775acc48f395c932025ad7256 100644 (file)
 
 *******************************************************************************/
 
-#include "ixgbe.h"
+
 #include "ixgbe_type.h"
 #include "ixgbe_dcb.h"
 #include "ixgbe_dcb_82599.h"
 
+/**
+ * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count:  Number of elements in bwg_array.
+ *
+ * This function returns the status data for each of the Traffic Classes in use.
+ */
+s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
+                                struct ixgbe_hw_stats *stats,
+                                u8 tc_count)
+{
+       int tc;
+
+       if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
+               return IXGBE_ERR_PARAM;
+       /* Statistics pertaining to each traffic class */
+       for (tc = 0; tc < tc_count; tc++) {
+               /* Transmitted Packets */
+               stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
+               /* Transmitted Bytes (read low first to prevent missed carry) */
+               stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc));
+               stats->qbtc[tc] +=
+                       (((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32);
+               /* Received Packets */
+               stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
+               /* Received Bytes (read low first to prevent missed carry) */
+               stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc));
+               stats->qbrc[tc] +=
+                       (((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32);
+
+               /* Received Dropped Packet */
+               stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc));
+       }
+
+       return 0;
+}
+
+/**
+ * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count:  Number of elements in bwg_array.
+ *
+ * This function returns the CBFC status data for each of the Traffic Classes.
+ */
+s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
+                                 struct ixgbe_hw_stats *stats,
+                                 u8 tc_count)
+{
+       int tc;
+
+       if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
+               return IXGBE_ERR_PARAM;
+       for (tc = 0; tc < tc_count; tc++) {
+               /* Priority XOFF Transmitted */
+               stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
+               /* Priority XOFF Received */
+               stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc));
+       }
+
+       return 0;
+}
+
 /**
  * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
  * @hw: pointer to hardware structure
- * @refill: refill credits index by traffic class
- * @max: max credits index by traffic class
- * @bwg_id: bandwidth grouping indexed by traffic class
- * @prio_type: priority type indexed by traffic class
+ * @dcb_config: pointer to ixgbe_dcb_config structure
  *
  * Configure Rx Packet Arbiter and credits for each traffic class.
  */
-s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
-                                     u16 *refill,
-                                     u16 *max,
-                                     u8 *bwg_id,
-                                     u8 *prio_type,
-                                     u8 *prio_tc)
+s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+                                     u16 *max, u8 *bwg_id, u8 *tsa,
+                                     u8 *map)
 {
-       u32    reg           = 0;
-       u32    credit_refill = 0;
-       u32    credit_max    = 0;
-       u8     i             = 0;
+       u32 reg = 0;
+       u32 credit_refill = 0;
+       u32 credit_max = 0;
+       u8  i = 0;
 
        /*
         * Disable the arbiter before changing parameters
@@ -59,21 +117,27 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
        reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
        IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
 
-       /* Map all traffic classes to their UP */
+       /*
+        * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
+        * bits sets for the UPs that needs to be mappped to that TC.
+        * e.g if priorities 6 and 7 are to be mapped to a TC then the
+        * up_to_tc_bitmap value for that TC will be 11000000 in binary.
+        */
        reg = 0;
-       for (i = 0; i < MAX_USER_PRIORITY; i++)
-               reg |= (prio_tc[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
+       for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
+               reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
+
        IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
 
        /* Configure traffic class credits and priority */
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+       for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
                credit_refill = refill[i];
-               credit_max    = max[i];
+               credit_max = max[i];
                reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
 
                reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
 
-               if (prio_type[i] == prio_link)
+               if (tsa[i] == ixgbe_dcb_tsa_strict)
                        reg |= IXGBE_RTRPT4C_LSP;
 
                IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
@@ -92,21 +156,15 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
 /**
  * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
  * @hw: pointer to hardware structure
- * @refill: refill credits index by traffic class
- * @max: max credits index by traffic class
- * @bwg_id: bandwidth grouping indexed by traffic class
- * @prio_type: priority type indexed by traffic class
+ * @dcb_config: pointer to ixgbe_dcb_config structure
  *
  * Configure Tx Descriptor Arbiter and credits for each traffic class.
  */
-s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
-                                          u16 *refill,
-                                          u16 *max,
-                                          u8 *bwg_id,
-                                          u8 *prio_type)
+s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+                                          u16 *max, u8 *bwg_id, u8 *tsa)
 {
-       u32    reg, max_credits;
-       u8     i;
+       u32 reg, max_credits;
+       u8  i;
 
        /* Clear the per-Tx queue credits; we use per-TC instead */
        for (i = 0; i < 128; i++) {
@@ -115,16 +173,16 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
        }
 
        /* Configure traffic class credits and priority */
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+       for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
                max_credits = max[i];
                reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
                reg |= refill[i];
                reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
 
-               if (prio_type[i] == prio_group)
+               if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
                        reg |= IXGBE_RTTDT2C_GSP;
 
-               if (prio_type[i] == prio_link)
+               if (tsa[i] == ixgbe_dcb_tsa_strict)
                        reg |= IXGBE_RTTDT2C_LSP;
 
                IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
@@ -143,19 +201,13 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
 /**
  * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
  * @hw: pointer to hardware structure
- * @refill: refill credits index by traffic class
- * @max: max credits index by traffic class
- * @bwg_id: bandwidth grouping indexed by traffic class
- * @prio_type: priority type indexed by traffic class
+ * @dcb_config: pointer to ixgbe_dcb_config structure
  *
  * Configure Tx Packet Arbiter and credits for each traffic class.
  */
-s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
-                                          u16 *refill,
-                                          u16 *max,
-                                          u8 *bwg_id,
-                                          u8 *prio_type,
-                                          u8 *prio_tc)
+s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+                                          u16 *max, u8 *bwg_id, u8 *tsa,
+                                          u8 *map)
 {
        u32 reg;
        u8 i;
@@ -169,22 +221,28 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
              IXGBE_RTTPCS_ARBDIS;
        IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
 
-       /* Map all traffic classes to their UP */
+       /*
+        * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
+        * bits sets for the UPs that needs to be mappped to that TC.
+        * e.g if priorities 6 and 7 are to be mapped to a TC then the
+        * up_to_tc_bitmap value for that TC will be 11000000 in binary.
+        */
        reg = 0;
-       for (i = 0; i < MAX_USER_PRIORITY; i++)
-               reg |= (prio_tc[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
+       for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
+               reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
+
        IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
 
        /* Configure traffic class credits and priority */
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+       for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
                reg = refill[i];
                reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
                reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
 
-               if (prio_type[i] == prio_group)
+               if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
                        reg |= IXGBE_RTTPT2C_GSP;
 
-               if (prio_type[i] == prio_link)
+               if (tsa[i] == ixgbe_dcb_tsa_strict)
                        reg |= IXGBE_RTTPT2C_LSP;
 
                IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
@@ -205,145 +263,287 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
  * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
  * @hw: pointer to hardware structure
  * @pfc_en: enabled pfc bitmask
- * @prio_tc: priority to tc assignments indexed by priority
+ * @map: priority to tc assignments indexed by priority
  *
  * Configure Priority Flow Control (PFC) for each traffic class.
  */
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
 {
-       u32 i, j, reg;
+       u32 i, j, fcrtl, reg;
        u8 max_tc = 0;
 
-       for (i = 0; i < MAX_USER_PRIORITY; i++)
-               if (prio_tc[i] > max_tc)
-                       max_tc = prio_tc[i];
+       /* Enable Transmit Priority Flow Control */
+       IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY);
+
+       /* Enable Receive Priority Flow Control */
+       reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+       reg |= IXGBE_MFLCN_DPF;
+
+       /*
+        * X540 supports per TC Rx priority flow control.  So
+        * clear all TCs and only enable those that should be
+        * enabled.
+        */
+       reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
+
+       if (hw->mac.type == ixgbe_mac_X540)
+               reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
+
+       if (pfc_en)
+               reg |= IXGBE_MFLCN_RPFCE;
+
+       IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
+
+       for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) {
+               if (map[i] > max_tc)
+                       max_tc = map[i];
+       }
+
 
        /* Configure PFC Tx thresholds per TC */
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+       for (i = 0; i <= max_tc; i++) {
                int enabled = 0;
 
-               if (i > max_tc) {
-                       reg = 0;
-                       IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
-                       IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
-                       continue;
-               }
-
-               for (j = 0; j < MAX_USER_PRIORITY; j++) {
-                       if ((prio_tc[j] == i) && (pfc_en & (1 << j))) {
+               for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) {
+                       if ((map[j] == i) && (pfc_en & (1 << j))) {
                                enabled = 1;
                                break;
                        }
                }
 
-               reg = hw->fc.low_water << 10;
-
-               if (enabled)
-                       reg |= IXGBE_FCRTL_XONE;
-               IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
+               if (enabled) {
+                       reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+                       fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
+                       IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
+               } else {
+                       reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
+                       IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
+               }
 
-               reg = hw->fc.high_water[i] << 10;
-               if (enabled)
-                       reg |= IXGBE_FCRTH_FCEN;
                IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
        }
 
-       if (pfc_en) {
-               /* Configure pause time (2 TCs per register) */
-               reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
-               for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
-                       IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+       for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+               IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0);
+       }
 
-               /* Configure flow control refresh threshold value */
-               IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+       /* Configure pause time (2 TCs per register) */
+       reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
+       for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+               IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
 
+       /* Configure flow control refresh threshold value */
+       IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
 
-               reg = IXGBE_FCCFG_TFCE_PRIORITY;
-               IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
-               /*
-                * Enable Receive PFC
-                * 82599 will always honor XOFF frames we receive when
-                * we are in PFC mode however X540 only honors enabled
-                * traffic classes.
-                */
-               reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
-               reg &= ~IXGBE_MFLCN_RFCE;
-               reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
+       return 0;
+}
 
-               if (hw->mac.type == ixgbe_mac_X540) {
-                       reg &= ~IXGBE_MFLCN_RPFCE_MASK;
-                       reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
-               }
+/**
+ * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
+ * @hw: pointer to hardware structure
+ *
+ * Configure queue statistics registers, all queues belonging to same traffic
+ * class uses a single set of queue statistics counters.
+ */
+s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw,
+                                   struct ixgbe_dcb_config *dcb_config)
+{
+       u32 reg = 0;
+       u8  i   = 0;
+       u8 tc_count = 8;
+       bool vt_mode = false;
+
+       if (dcb_config != NULL) {
+               tc_count = dcb_config->num_tcs.pg_tcs;
+               vt_mode = dcb_config->vt_mode;
+       }
 
-               IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
+       if (!((tc_count == 8 && vt_mode == false) || tc_count == 4))
+               return IXGBE_ERR_PARAM;
 
-       } else {
-               /* X540 devices have a RX bit that should be cleared
-                * if PFC is disabled on all TCs but PFC features is
-                * enabled.
+       if (tc_count == 8 && vt_mode == false) {
+               /*
+                * Receive Queues stats setting
+                * 32 RQSMR registers, each configuring 4 queues.
+                *
+                * Set all 16 queues of each TC to the same stat
+                * with TC 'n' going to stat 'n'.
                 */
-               if (hw->mac.type == ixgbe_mac_X540) {
-                       reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
-                       reg &= ~IXGBE_MFLCN_RPFCE_MASK;
-                       IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
+               for (i = 0; i < 32; i++) {
+                       reg = 0x01010101 * (i / 4);
+                       IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
                }
-
-               for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
-                       hw->mac.ops.fc_enable(hw, i);
+               /*
+                * Transmit Queues stats setting
+                * 32 TQSM registers, each controlling 4 queues.
+                *
+                * Set all queues of each TC to the same stat
+                * with TC 'n' going to stat 'n'.
+                * Tx queues are allocated non-uniformly to TCs:
+                * 32, 32, 16, 16, 8, 8, 8, 8.
+                */
+               for (i = 0; i < 32; i++) {
+                       if (i < 8)
+                               reg = 0x00000000;
+                       else if (i < 16)
+                               reg = 0x01010101;
+                       else if (i < 20)
+                               reg = 0x02020202;
+                       else if (i < 24)
+                               reg = 0x03030303;
+                       else if (i < 26)
+                               reg = 0x04040404;
+                       else if (i < 28)
+                               reg = 0x05050505;
+                       else if (i < 30)
+                               reg = 0x06060606;
+                       else
+                               reg = 0x07070707;
+                       IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
+               }
+       } else if (tc_count == 4 && vt_mode == false) {
+               /*
+                * Receive Queues stats setting
+                * 32 RQSMR registers, each configuring 4 queues.
+                *
+                * Set all 16 queues of each TC to the same stat
+                * with TC 'n' going to stat 'n'.
+                */
+               for (i = 0; i < 32; i++) {
+                       if (i % 8 > 3)
+                               /* In 4 TC mode, odd 16-queue ranges are
+                                *  not used.
+                               */
+                               continue;
+                       reg = 0x01010101 * (i / 8);
+                       IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
+               }
+               /*
+                * Transmit Queues stats setting
+                * 32 TQSM registers, each controlling 4 queues.
+                *
+                * Set all queues of each TC to the same stat
+                * with TC 'n' going to stat 'n'.
+                * Tx queues are allocated non-uniformly to TCs:
+                * 64, 32, 16, 16.
+                */
+               for (i = 0; i < 32; i++) {
+                       if (i < 16)
+                               reg = 0x00000000;
+                       else if (i < 24)
+                               reg = 0x01010101;
+                       else if (i < 28)
+                               reg = 0x02020202;
+                       else
+                               reg = 0x03030303;
+                       IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
+               }
+       } else if (tc_count == 4 && vt_mode == true) {
+               /*
+                * Receive Queues stats setting
+                * 32 RQSMR registers, each configuring 4 queues.
+                *
+                * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
+                * pool. Set all 32 queues of each TC across pools to the same
+                * stat with TC 'n' going to stat 'n'.
+                */
+               for (i = 0; i < 32; i++)
+                       IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100);
+               /*
+                * Transmit Queues stats setting
+                * 32 TQSM registers, each controlling 4 queues.
+                *
+                * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
+                * pool. Set all 32 queues of each TC across pools to the same
+                * stat with TC 'n' going to stat 'n'.
+                */
+               for (i = 0; i < 32; i++)
+                       IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100);
        }
 
        return 0;
 }
 
 /**
- * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
+ * ixgbe_dcb_config_82599 - Configure general DCB parameters
  * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
  *
- * Configure queue statistics registers, all queues belonging to same traffic
- * class uses a single set of queue statistics counters.
+ * Configure general DCB parameters.
  */
-static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
+s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw,
+                          struct ixgbe_dcb_config *dcb_config)
 {
-       u32 reg = 0;
-       u8  i   = 0;
+       u32 reg;
+       u32 q;
 
-       /*
-        * Receive Queues stats setting
-        * 32 RQSMR registers, each configuring 4 queues.
-        * Set all 16 queues of each TC to the same stat
-        * with TC 'n' going to stat 'n'.
-        */
-       for (i = 0; i < 32; i++) {
-               reg = 0x01010101 * (i / 4);
-               IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
+       /* Disable the Tx desc arbiter so that MTQC can be changed */
+       reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+       reg |= IXGBE_RTTDCS_ARBDIS;
+       IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+       reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+       if (dcb_config->num_tcs.pg_tcs == 8) {
+               /* Enable DCB for Rx with 8 TCs */
+               switch (reg & IXGBE_MRQC_MRQE_MASK) {
+               case 0:
+               case IXGBE_MRQC_RT4TCEN:
+                       /* RSS disabled cases */
+                       reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+                             IXGBE_MRQC_RT8TCEN;
+                       break;
+               case IXGBE_MRQC_RSSEN:
+               case IXGBE_MRQC_RTRSS4TCEN:
+                       /* RSS enabled cases */
+                       reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+                             IXGBE_MRQC_RTRSS8TCEN;
+                       break;
+               default:
+                       /*
+                        * Unsupported value, assume stale data,
+                        * overwrite no RSS
+                        */
+                       reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+                             IXGBE_MRQC_RT8TCEN;
+               }
        }
-       /*
-        * Transmit Queues stats setting
-        * 32 TQSM registers, each controlling 4 queues.
-        * Set all queues of each TC to the same stat
-        * with TC 'n' going to stat 'n'.
-        * Tx queues are allocated non-uniformly to TCs:
-        * 32, 32, 16, 16, 8, 8, 8, 8.
-        */
-       for (i = 0; i < 32; i++) {
-               if (i < 8)
-                       reg = 0x00000000;
-               else if (i < 16)
-                       reg = 0x01010101;
-               else if (i < 20)
-                       reg = 0x02020202;
-               else if (i < 24)
-                       reg = 0x03030303;
-               else if (i < 26)
-                       reg = 0x04040404;
-               else if (i < 28)
-                       reg = 0x05050505;
-               else if (i < 30)
-                       reg = 0x06060606;
+       if (dcb_config->num_tcs.pg_tcs == 4) {
+               /* We support both VT-on and VT-off with 4 TCs. */
+               if (dcb_config->vt_mode)
+                       reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+                             IXGBE_MRQC_VMDQRT4TCEN;
                else
-                       reg = 0x07070707;
-               IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
+                       reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+                             IXGBE_MRQC_RTRSS4TCEN;
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
+
+       /* Enable DCB for Tx with 8 TCs */
+       if (dcb_config->num_tcs.pg_tcs == 8)
+               reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
+       else {
+               /* We support both VT-on and VT-off with 4 TCs. */
+               reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+               if (dcb_config->vt_mode)
+                       reg |= IXGBE_MTQC_VT_ENA;
        }
+       IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
+
+       /* Disable drop for all queues */
+       for (q = 0; q < 128; q++)
+               IXGBE_WRITE_REG(hw, IXGBE_QDE,
+                               (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
+
+       /* Enable the Tx desc arbiter */
+       reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+       reg &= ~IXGBE_RTTDCS_ARBDIS;
+       IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+       /* Enable Security TX Buffer IFG for DCB */
+       reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+       reg |= IXGBE_SECTX_DCB;
+       IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
 
        return 0;
 }
@@ -351,25 +551,21 @@ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
 /**
  * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
  * @hw: pointer to hardware structure
- * @refill: refill credits index by traffic class
- * @max: max credits index by traffic class
- * @bwg_id: bandwidth grouping indexed by traffic class
- * @prio_type: priority type indexed by traffic class
- * @pfc_en: enabled pfc bitmask
+ * @dcb_config: pointer to ixgbe_dcb_config structure
  *
  * Configure dcb settings and enable dcb mode.
  */
-s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
-                             u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc)
+s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed,
+                             u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa,
+                             u8 *map)
 {
-       ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
-                                         prio_type, prio_tc);
-       ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
-                                              bwg_id, prio_type);
-       ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
-                                              bwg_id, prio_type, prio_tc);
-       ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc);
-       ixgbe_dcb_config_tc_stats_82599(hw);
+
+       ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa,
+                                         map);
+       ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,
+                                              tsa);
+       ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
+                                              tsa, map);
 
        return 0;
 }
index 4dec47faeb00566beb5dabe3715f0304d1a7d3d1..e9f5d5147d84c2b50a61c1715409975580a309e7 100644 (file)
 
 *******************************************************************************/
 
-#ifndef _DCB_82599_CONFIG_H_
-#define _DCB_82599_CONFIG_H_
+#ifndef _IXGBE_DCB_82599_H_
+#define _IXGBE_DCB_82599_H_
 
 /* DCB register definitions */
-#define IXGBE_RTTDCS_TDPAC      0x00000001 /* 0 Round Robin,
-                                            * 1 WSP - Weighted Strict Priority
-                                            */
-#define IXGBE_RTTDCS_VMPAC      0x00000002 /* 0 Round Robin,
-                                            * 1 WRR - Weighted Round Robin
-                                            */
-#define IXGBE_RTTDCS_TDRM       0x00000010 /* Transmit Recycle Mode */
-#define IXGBE_RTTDCS_ARBDIS     0x00000040 /* DCB arbiter disable */
-#define IXGBE_RTTDCS_BDPM       0x00400000 /* Bypass Data Pipe - must clear! */
-#define IXGBE_RTTDCS_BPBFSM     0x00800000 /* Bypass PB Free Space - must
-                                             * clear!
-                                             */
-#define IXGBE_RTTDCS_SPEED_CHG  0x80000000 /* Link speed change */
+#define IXGBE_RTTDCS_TDPAC     0x00000001 /* 0 Round Robin,
+                                           * 1 WSP - Weighted Strict Priority
+                                           */
+#define IXGBE_RTTDCS_VMPAC     0x00000002 /* 0 Round Robin,
+                                           * 1 WRR - Weighted Round Robin
+                                           */
+#define IXGBE_RTTDCS_TDRM      0x00000010 /* Transmit Recycle Mode */
+#define IXGBE_RTTDCS_BDPM      0x00400000 /* Bypass Data Pipe - must clear! */
+#define IXGBE_RTTDCS_BPBFSM    0x00800000 /* Bypass PB Free Space - must
+                                            * clear!
+                                            */
+#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */
 
 /* Receive UP2TC mapping */
-#define IXGBE_RTRUP2TC_UP_SHIFT 3
+#define IXGBE_RTRUP2TC_UP_SHIFT        3
 /* Transmit UP2TC mapping */
-#define IXGBE_RTTUP2TC_UP_SHIFT 3
+#define IXGBE_RTTUP2TC_UP_SHIFT        3
 
-#define IXGBE_RTRPT4C_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */
-#define IXGBE_RTRPT4C_BWG_SHIFT 9  /* Offset to BWG index */
-#define IXGBE_RTRPT4C_GSP       0x40000000 /* GSP enable bit */
-#define IXGBE_RTRPT4C_LSP       0x80000000 /* LSP enable bit */
+#define IXGBE_RTRPT4C_MCL_SHIFT        12 /* Offset to Max Credit Limit setting */
+#define IXGBE_RTRPT4C_BWG_SHIFT        9  /* Offset to BWG index */
+#define IXGBE_RTRPT4C_GSP      0x40000000 /* GSP enable bit */
+#define IXGBE_RTRPT4C_LSP      0x80000000 /* LSP enable bit */
 
-#define IXGBE_RDRXCTL_MPBEN     0x00000010 /* DMA config for multiple packet
-                                            * buffers enable
-                                            */
-#define IXGBE_RDRXCTL_MCEN      0x00000040 /* DMA config for multiple cores
-                                            * (RSS) enable
-                                            */
+#define IXGBE_RDRXCTL_MPBEN    0x00000010 /* DMA config for multiple packet
+                                           * buffers enable
+                                           */
+#define IXGBE_RDRXCTL_MCEN     0x00000040 /* DMA config for multiple cores
+                                           * (RSS) enable
+                                           */
 
 /* RTRPCS Bit Masks */
-#define IXGBE_RTRPCS_RRM        0x00000002 /* Receive Recycle Mode enable */
+#define IXGBE_RTRPCS_RRM       0x00000002 /* Receive Recycle Mode enable */
 /* Receive Arbitration Control: 0 Round Robin, 1 DFP */
-#define IXGBE_RTRPCS_RAC        0x00000004
-#define IXGBE_RTRPCS_ARBDIS     0x00000040 /* Arbitration disable bit */
+#define IXGBE_RTRPCS_RAC       0x00000004
+#define IXGBE_RTRPCS_ARBDIS    0x00000040 /* Arbitration disable bit */
 
 /* RTTDT2C Bit Masks */
-#define IXGBE_RTTDT2C_MCL_SHIFT 12
-#define IXGBE_RTTDT2C_BWG_SHIFT 9
-#define IXGBE_RTTDT2C_GSP       0x40000000
-#define IXGBE_RTTDT2C_LSP       0x80000000
+#define IXGBE_RTTDT2C_MCL_SHIFT        12
+#define IXGBE_RTTDT2C_BWG_SHIFT        9
+#define IXGBE_RTTDT2C_GSP      0x40000000
+#define IXGBE_RTTDT2C_LSP      0x80000000
 
-#define IXGBE_RTTPT2C_MCL_SHIFT 12
-#define IXGBE_RTTPT2C_BWG_SHIFT 9
-#define IXGBE_RTTPT2C_GSP       0x40000000
-#define IXGBE_RTTPT2C_LSP       0x80000000
+#define IXGBE_RTTPT2C_MCL_SHIFT        12
+#define IXGBE_RTTPT2C_BWG_SHIFT        9
+#define IXGBE_RTTPT2C_GSP      0x40000000
+#define IXGBE_RTTPT2C_LSP      0x80000000
 
 /* RTTPCS Bit Masks */
-#define IXGBE_RTTPCS_TPPAC      0x00000020 /* 0 Round Robin,
-                                            * 1 SP - Strict Priority
-                                            */
-#define IXGBE_RTTPCS_ARBDIS     0x00000040 /* Arbiter disable */
-#define IXGBE_RTTPCS_TPRM       0x00000100 /* Transmit Recycle Mode enable */
-#define IXGBE_RTTPCS_ARBD_SHIFT 22
-#define IXGBE_RTTPCS_ARBD_DCB   0x4        /* Arbitration delay in DCB mode */
+#define IXGBE_RTTPCS_TPPAC     0x00000020 /* 0 Round Robin,
+                                           * 1 SP - Strict Priority
+                                           */
+#define IXGBE_RTTPCS_ARBDIS    0x00000040 /* Arbiter disable */
+#define IXGBE_RTTPCS_TPRM      0x00000100 /* Transmit Recycle Mode enable */
+#define IXGBE_RTTPCS_ARBD_SHIFT        22
+#define IXGBE_RTTPCS_ARBD_DCB  0x4 /* Arbitration delay in DCB mode */
 
-/* SECTXMINIFG DCB */
-#define IXGBE_SECTX_DCB                0x00001F00 /* DCB TX Buffer IFG */
+#define IXGBE_TXPBTHRESH_DCB   0xA /* THRESH value for DCB mode */
 
+/* SECTXMINIFG DCB */
+#define IXGBE_SECTX_DCB                0x00001F00 /* DCB TX Buffer SEC IFG */
 
-/* DCB hardware-specific driver APIs */
 
-/* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc);
+/* DCB driver APIs */
 
-/* DCB hw initialization */
-s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
-                                       u16 *refill,
-                                       u16 *max,
-                                       u8 *bwg_id,
-                                       u8 *prio_type,
-                                       u8 *prio_tc);
+/* DCB PFC */
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *, u8, u8 *);
 
-s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
-                                               u16 *refill,
-                                               u16 *max,
-                                               u8 *bwg_id,
-                                               u8 *prio_type);
+/* DCB stats */
+s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *,
+                                   struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *,
+                                struct ixgbe_hw_stats *, u8);
+s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *,
+                                 struct ixgbe_hw_stats *, u8);
 
-s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
-                                               u16 *refill,
-                                               u16 *max,
-                                               u8 *bwg_id,
-                                               u8 *prio_type,
-                                               u8 *prio_tc);
+/* DCB config arbiters */
+s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *,
+                                          u8 *, u8 *);
+s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *,
+                                          u8 *, u8 *, u8 *);
+s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *, u8 *,
+                                     u8 *, u8 *);
 
-s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
-                             u16 *max, u8 *bwg_id, u8 *prio_type,
-                             u8 *prio_tc);
+/* DCB initialization */
+s32 ixgbe_dcb_config_82599(struct ixgbe_hw *,
+                          struct ixgbe_dcb_config *);
 
-#endif /* _DCB_82599_CONFIG_H */
+s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *, int, u16 *, u16 *, u8 *,
+                             u8 *, u8 *);
+#endif /* _IXGBE_DCB_82959_H_ */
index 3e3569b78bc1d74e1955fb482c735ef077b878b3..8fb3aa87fc75e7bb3a453864e3c70d42881af469 100644 (file)
   the file called "COPYING".
 
   Contact Information:
-  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
 *******************************************************************************/
 
 #include "ixgbe.h"
+
+#ifdef CONFIG_DCB
 #include <linux/dcbnl.h>
 #include "ixgbe_dcb_82598.h"
 #include "ixgbe_dcb_82599.h"
 #define BIT_PG_RX      0x04
 #define BIT_PG_TX      0x08
 #define BIT_APP_UPCHG  0x10
-#define BIT_LINKSPEED   0x80
+#define BIT_RESETLINK  0x40
+#define BIT_LINKSPEED  0x80
 
 /* Responses for the DCB_C_SET_ALL command */
-#define DCB_HW_CHG_RST  0  /* DCB configuration changed with reset */
-#define DCB_NO_HW_CHG   1  /* DCB configuration did not change */
-#define DCB_HW_CHG      2  /* DCB configuration changed, no reset */
+#define DCB_HW_CHG_RST 0  /* DCB configuration changed with reset */
+#define DCB_NO_HW_CHG  1  /* DCB configuration did not change */
+#define DCB_HW_CHG     2  /* DCB configuration changed, no reset */
 
-int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
-                       struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max)
+int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max)
 {
-       struct tc_configuration *src_tc_cfg = NULL;
-       struct tc_configuration *dst_tc_cfg = NULL;
-       int i;
+       struct ixgbe_dcb_config *scfg = &adapter->temp_dcb_cfg;
+       struct ixgbe_dcb_config *dcfg = &adapter->dcb_cfg;
+       struct ixgbe_dcb_tc_config *src = NULL;
+       struct ixgbe_dcb_tc_config *dst = NULL;
+       int i, j;
+       int tx = IXGBE_DCB_TX_CONFIG;
+       int rx = IXGBE_DCB_RX_CONFIG;
+       int changes = 0;
 
-       if (!src_dcb_cfg || !dst_dcb_cfg)
-               return -EINVAL;
+#ifdef IXGBE_FCOE
+       if (adapter->fcoe.up_set != adapter->fcoe.up)
+               changes |= BIT_APP_UPCHG;
 
+#endif /* IXGBE_FCOE */
        for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) {
-               src_tc_cfg = &src_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0];
-               dst_tc_cfg = &dst_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0];
+               src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0];
+               dst = &dcfg->tc_config[i - DCB_PG_ATTR_TC_0];
 
-               dst_tc_cfg->path[DCB_TX_CONFIG].prio_type =
-                               src_tc_cfg->path[DCB_TX_CONFIG].prio_type;
+               if (dst->path[tx].tsa != src->path[tx].tsa) {
+                       dst->path[tx].tsa = src->path[tx].tsa;
+                       changes |= BIT_PG_TX;
+               }
 
-               dst_tc_cfg->path[DCB_TX_CONFIG].bwg_id =
-                               src_tc_cfg->path[DCB_TX_CONFIG].bwg_id;
+               if (dst->path[tx].bwg_id != src->path[tx].bwg_id) {
+                       dst->path[tx].bwg_id = src->path[tx].bwg_id;
+                       changes |= BIT_PG_TX;
+               }
 
-               dst_tc_cfg->path[DCB_TX_CONFIG].bwg_percent =
-                               src_tc_cfg->path[DCB_TX_CONFIG].bwg_percent;
+               if (dst->path[tx].bwg_percent != src->path[tx].bwg_percent) {
+                       dst->path[tx].bwg_percent = src->path[tx].bwg_percent;
+                       changes |= BIT_PG_TX;
+               }
 
-               dst_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap =
-                               src_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap;
+               if (dst->path[tx].up_to_tc_bitmap !=
+                   src->path[tx].up_to_tc_bitmap) {
+                       dst->path[tx].up_to_tc_bitmap =
+                               src->path[tx].up_to_tc_bitmap;
+                       changes |= (BIT_PG_TX | BIT_PFC | BIT_APP_UPCHG);
+               }
 
-               dst_tc_cfg->path[DCB_RX_CONFIG].prio_type =
-                               src_tc_cfg->path[DCB_RX_CONFIG].prio_type;
+               if (dst->path[rx].tsa != src->path[rx].tsa) {
+                       dst->path[rx].tsa = src->path[rx].tsa;
+                       changes |= BIT_PG_RX;
+               }
 
-               dst_tc_cfg->path[DCB_RX_CONFIG].bwg_id =
-                               src_tc_cfg->path[DCB_RX_CONFIG].bwg_id;
+               if (dst->path[rx].bwg_id != src->path[rx].bwg_id) {
+                       dst->path[rx].bwg_id = src->path[rx].bwg_id;
+                       changes |= BIT_PG_RX;
+               }
 
-               dst_tc_cfg->path[DCB_RX_CONFIG].bwg_percent =
-                               src_tc_cfg->path[DCB_RX_CONFIG].bwg_percent;
+               if (dst->path[rx].bwg_percent != src->path[rx].bwg_percent) {
+                       dst->path[rx].bwg_percent = src->path[rx].bwg_percent;
+                       changes |= BIT_PG_RX;
+               }
 
-               dst_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap =
-                               src_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap;
+               if (dst->path[rx].up_to_tc_bitmap !=
+                   src->path[rx].up_to_tc_bitmap) {
+                       dst->path[rx].up_to_tc_bitmap =
+                               src->path[rx].up_to_tc_bitmap;
+                       changes |= (BIT_PG_RX | BIT_PFC | BIT_APP_UPCHG);
+               }
        }
 
        for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) {
-               dst_dcb_cfg->bw_percentage[DCB_TX_CONFIG]
-                       [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage
-                               [DCB_TX_CONFIG][i-DCB_PG_ATTR_BW_ID_0];
-               dst_dcb_cfg->bw_percentage[DCB_RX_CONFIG]
-                       [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage
-                               [DCB_RX_CONFIG][i-DCB_PG_ATTR_BW_ID_0];
+               j = i - DCB_PG_ATTR_BW_ID_0;
+
+               if (dcfg->bw_percentage[tx][j] != scfg->bw_percentage[tx][j]) {
+                       dcfg->bw_percentage[tx][j] = scfg->bw_percentage[tx][j];
+                       changes |= BIT_PG_TX;
+               }
+               if (dcfg->bw_percentage[rx][j] != scfg->bw_percentage[rx][j]) {
+                       dcfg->bw_percentage[rx][j] = scfg->bw_percentage[rx][j];
+                       changes |= BIT_PG_RX;
+               }
        }
 
        for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) {
-               dst_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc =
-                       src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc;
+               j = i - DCB_PFC_UP_ATTR_0;
+               if (dcfg->tc_config[j].pfc != scfg->tc_config[j].pfc) {
+                       dcfg->tc_config[j].pfc = scfg->tc_config[j].pfc;
+                       changes |= BIT_PFC;
+               }
        }
 
-       dst_dcb_cfg->pfc_mode_enable = src_dcb_cfg->pfc_mode_enable;
+       if (dcfg->pfc_mode_enable != scfg->pfc_mode_enable) {
+               dcfg->pfc_mode_enable = scfg->pfc_mode_enable;
+               changes |= BIT_PFC;
+       }
 
-       return 0;
+       return changes;
 }
 
 static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
@@ -111,34 +149,21 @@ static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
 
 static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
 {
-       int err = 0;
-       u8 prio_tc[MAX_USER_PRIORITY] = {0};
-       int i;
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       int err = 0;
 
        /* Fail command if not in CEE mode */
        if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
                return 1;
 
        /* verify there is something to do, if not then exit */
-       if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+       if (!state == !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
                goto out;
 
-       if (state > 0) {
-               err = ixgbe_setup_tc(netdev, adapter->dcb_cfg.num_tcs.pg_tcs);
-               ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
-       } else {
-               err = ixgbe_setup_tc(netdev, 0);
-       }
-
-       if (err)
-               goto out;
-
-       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
-               netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
-
+       err = ixgbe_setup_tc(netdev,
+                            state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0);
 out:
-       return err ? 1 : 0;
+       return !!err;
 }
 
 static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
@@ -164,13 +189,13 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
 }
 
 static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
-                                         u8 prio, u8 bwg_id, u8 bw_pct,
-                                         u8 up_map)
+                                        u8 prio, u8 bwg_id, u8 bw_pct,
+                                        u8 up_map)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
        if (prio != DCB_ATTR_VALUE_UNDEFINED)
-               adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio;
+               adapter->temp_dcb_cfg.tc_config[tc].path[0].tsa = prio;
        if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
                adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id;
        if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
@@ -179,42 +204,24 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
        if (up_map != DCB_ATTR_VALUE_UNDEFINED)
                adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap =
                        up_map;
-
-       if ((adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type !=
-            adapter->dcb_cfg.tc_config[tc].path[0].prio_type) ||
-           (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id !=
-            adapter->dcb_cfg.tc_config[tc].path[0].bwg_id) ||
-           (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
-            adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
-           (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
-            adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
-               adapter->dcb_set_bitmap |= BIT_PG_TX;
-
-       if (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
-            adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)
-               adapter->dcb_set_bitmap |= BIT_PFC | BIT_APP_UPCHG;
 }
 
 static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
-                                          u8 bw_pct)
+                                         u8 bw_pct)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
        adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
-
-       if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
-           adapter->dcb_cfg.bw_percentage[0][bwg_id])
-               adapter->dcb_set_bitmap |= BIT_PG_TX;
 }
 
 static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
-                                         u8 prio, u8 bwg_id, u8 bw_pct,
-                                         u8 up_map)
+                                        u8 prio, u8 bwg_id, u8 bw_pct,
+                                        u8 up_map)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
        if (prio != DCB_ATTR_VALUE_UNDEFINED)
-               adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio;
+               adapter->temp_dcb_cfg.tc_config[tc].path[1].tsa = prio;
        if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
                adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id;
        if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
@@ -223,48 +230,30 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
        if (up_map != DCB_ATTR_VALUE_UNDEFINED)
                adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap =
                        up_map;
-
-       if ((adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type !=
-            adapter->dcb_cfg.tc_config[tc].path[1].prio_type) ||
-           (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id !=
-            adapter->dcb_cfg.tc_config[tc].path[1].bwg_id) ||
-           (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
-            adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
-           (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
-            adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
-               adapter->dcb_set_bitmap |= BIT_PG_RX;
-
-       if (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
-            adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)
-               adapter->dcb_set_bitmap |= BIT_PFC;
 }
 
 static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
-                                          u8 bw_pct)
+                                         u8 bw_pct)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
        adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
-
-       if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
-           adapter->dcb_cfg.bw_percentage[1][bwg_id])
-               adapter->dcb_set_bitmap |= BIT_PG_RX;
 }
 
 static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
-                                         u8 *prio, u8 *bwg_id, u8 *bw_pct,
-                                         u8 *up_map)
+                                        u8 *prio, u8 *bwg_id, u8 *bw_pct,
+                                        u8 *up_map)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-       *prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type;
+       *prio = adapter->dcb_cfg.tc_config[tc].path[0].tsa;
        *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id;
        *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent;
        *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap;
 }
 
 static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
-                                          u8 *bw_pct)
+                                         u8 *bw_pct)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
@@ -272,44 +261,41 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
 }
 
 static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
-                                         u8 *prio, u8 *bwg_id, u8 *bw_pct,
-                                         u8 *up_map)
+                                        u8 *prio, u8 *bwg_id, u8 *bw_pct,
+                                        u8 *up_map)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-       *prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type;
+       *prio = adapter->dcb_cfg.tc_config[tc].path[1].tsa;
        *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id;
        *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent;
        *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap;
 }
 
 static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
-                                          u8 *bw_pct)
+                                         u8 *bw_pct)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
        *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id];
 }
 
-static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
-                                    u8 setting)
+static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int up, u8 pfc)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       u8 tc = ixgbe_dcb_get_tc_from_up(&adapter->temp_dcb_cfg, 0, up);
 
-       adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting;
-       if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc !=
-           adapter->dcb_cfg.tc_config[priority].dcb_pfc) {
-               adapter->dcb_set_bitmap |= BIT_PFC;
+       adapter->temp_dcb_cfg.tc_config[tc].pfc = pfc;
+       if (adapter->temp_dcb_cfg.tc_config[tc].pfc !=
+           adapter->dcb_cfg.tc_config[tc].pfc)
                adapter->temp_dcb_cfg.pfc_mode_enable = true;
-       }
 }
 
-static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
-                                    u8 *setting)
+static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int up, u8 *pfc)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
-       *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
+       u8 tc = ixgbe_dcb_get_tc_from_up(&adapter->dcb_cfg, 0, up);
+       *pfc = adapter->dcb_cfg.tc_config[tc].pfc;
 }
 
 #ifdef IXGBE_FCOE
@@ -321,141 +307,106 @@ static void ixgbe_dcbnl_devreset(struct net_device *dev)
                usleep_range(1000, 2000);
 
        if (netif_running(dev))
+#ifdef HAVE_NET_DEVICE_OPS
                dev->netdev_ops->ndo_stop(dev);
+#else
+               dev->stop(dev);
+#endif
 
        ixgbe_clear_interrupt_scheme(adapter);
        ixgbe_init_interrupt_scheme(adapter);
 
        if (netif_running(dev))
+#ifdef HAVE_NET_DEVICE_OPS
                dev->netdev_ops->ndo_open(dev);
+#else
+               dev->open(dev);
+#endif
 
        clear_bit(__IXGBE_RESETTING, &adapter->state);
 }
-#endif
 
+#endif
 static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       int ret, i;
-#ifdef IXGBE_FCOE
-       struct dcb_app app = {
-                             .selector = DCB_APP_IDTYPE_ETHTYPE,
-                             .protocol = ETH_P_FCOE,
-                            };
-       u8 up;
-
-       /* In IEEE mode, use the IEEE Ethertype selector value */
-       if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) {
-               app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
-               up = dcb_ieee_getapp_mask(netdev, &app);
-       } else {
-               up = dcb_getapp(netdev, &app);
-       }
-#endif
+       struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
+       struct ixgbe_hw *hw = &adapter->hw;
+       int ret = DCB_NO_HW_CHG;
+       u8 prio_tc[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
 
        /* Fail command if not in CEE mode */
        if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
-               return 1;
+               return ret;
 
-       ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
-                                MAX_TRAFFIC_CLASS);
-       if (ret)
-               return DCB_NO_HW_CHG;
-
-       if (adapter->dcb_cfg.pfc_mode_enable) {
-               switch (adapter->hw.mac.type) {
-               case ixgbe_mac_82599EB:
-               case ixgbe_mac_X540:
-                       if (adapter->hw.fc.current_mode != ixgbe_fc_pfc)
-                               adapter->last_lfc_mode =
-                                                 adapter->hw.fc.current_mode;
-                       break;
-               default:
-                       break;
-               }
-               adapter->hw.fc.requested_mode = ixgbe_fc_pfc;
-       } else {
-               switch (adapter->hw.mac.type) {
-               case ixgbe_mac_82598EB:
-                       adapter->hw.fc.requested_mode = ixgbe_fc_none;
-                       break;
-               case ixgbe_mac_82599EB:
-               case ixgbe_mac_X540:
-                       adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
-                       break;
-               default:
-                       break;
-               }
-       }
+       adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(adapter,
+                                                     IXGBE_DCB_MAX_TRAFFIC_CLASS);
+       if (!adapter->dcb_set_bitmap)
+               return ret;
 
-       if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
-               u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
-               u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
+       ixgbe_dcb_unpack_map_cee(dcb_cfg, IXGBE_DCB_TX_CONFIG, prio_tc);
+
+       if (adapter->dcb_set_bitmap & (BIT_PG_TX | BIT_PG_RX)) {
                /* Priority to TC mapping in CEE case default to 1:1 */
-               u8 prio_tc[MAX_USER_PRIORITY];
                int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+#ifdef HAVE_MQPRIO
+               int i;
+#endif
 
 #ifdef IXGBE_FCOE
                if (adapter->netdev->features & NETIF_F_FCOE_MTU)
                        max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
 #endif
 
-               ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
-                                              max_frame, DCB_TX_CONFIG);
-               ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
-                                              max_frame, DCB_RX_CONFIG);
+               ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_cfg, max_frame,
+                                                  IXGBE_DCB_TX_CONFIG);
 
-               ixgbe_dcb_unpack_refill(&adapter->dcb_cfg,
-                                       DCB_TX_CONFIG, refill);
-               ixgbe_dcb_unpack_max(&adapter->dcb_cfg, max);
-               ixgbe_dcb_unpack_bwgid(&adapter->dcb_cfg,
-                                      DCB_TX_CONFIG, bwg_id);
-               ixgbe_dcb_unpack_prio(&adapter->dcb_cfg,
-                                     DCB_TX_CONFIG, prio_type);
-               ixgbe_dcb_unpack_map(&adapter->dcb_cfg,
-                                    DCB_TX_CONFIG, prio_tc);
+               ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_cfg, max_frame,
+                                                  IXGBE_DCB_RX_CONFIG);
 
-               ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
-                                       bwg_id, prio_type, prio_tc);
+               ixgbe_dcb_hw_config_cee(hw, dcb_cfg);
 
+#ifdef HAVE_MQPRIO
                for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
                        netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
+#endif /* HAVE_MQPRIO */
+               ret = DCB_HW_CHG_RST;
        }
 
        if (adapter->dcb_set_bitmap & BIT_PFC) {
-               u8 pfc_en;
-               u8 prio_tc[MAX_USER_PRIORITY];
-
-               ixgbe_dcb_unpack_map(&adapter->dcb_cfg,
-                                    DCB_TX_CONFIG, prio_tc);
-               ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en);
-               ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en, prio_tc);
-               ret = DCB_HW_CHG;
+               if (dcb_cfg->pfc_mode_enable) {
+                       u8 pfc_en;
+                       ixgbe_dcb_unpack_pfc_cee(dcb_cfg, prio_tc, &pfc_en);
+                       ixgbe_dcb_config_pfc(hw, pfc_en, prio_tc);
+               } else {
+                       hw->mac.ops.fc_enable(hw);
+               }
+               ixgbe_set_rx_drop_en(adapter);
+               if (ret != DCB_HW_CHG_RST)
+                       ret = DCB_HW_CHG;
        }
 
-       if (adapter->dcb_cfg.pfc_mode_enable)
-               adapter->hw.fc.current_mode = ixgbe_fc_pfc;
-
 #ifdef IXGBE_FCOE
        /* Reprogam FCoE hardware offloads when the traffic class
         * FCoE is using changes. This happens if the APP info
         * changes or the up2tc mapping is updated.
         */
-       if ((up && !(up & (1 << adapter->fcoe.up))) ||
-           (adapter->dcb_set_bitmap & BIT_APP_UPCHG)) {
-               adapter->fcoe.up = ffs(up) - 1;
+       if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
+               adapter->fcoe.up_set = adapter->fcoe.up;
                ixgbe_dcbnl_devreset(netdev);
                ret = DCB_HW_CHG_RST;
        }
-#endif
 
+#endif /* IXGBE_FCOE */
        adapter->dcb_set_bitmap = 0x00;
        return ret;
 }
 
 static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
 {
+#ifdef HAVE_DCBNL_IEEE
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
+#endif
 
        switch (capid) {
        case DCB_CAP_ATTR_PG:
@@ -479,9 +430,11 @@ static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
        case DCB_CAP_ATTR_BCN:
                *cap = false;
                break;
+#ifdef HAVE_DCBNL_IEEE
        case DCB_CAP_ATTR_DCBX:
                *cap = adapter->dcbx_cap;
                break;
+#endif
        default:
                *cap = false;
                break;
@@ -490,7 +443,11 @@ static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
        return 0;
 }
 
+#ifdef NUMTCS_RETURNS_U8
 static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
+#else
+static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
+#endif
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        u8 rval = 0;
@@ -514,9 +471,32 @@ static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
        return rval;
 }
 
+#ifdef NUMTCS_RETURNS_U8
 static u8 ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
+#else
+static int ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
+#endif
 {
-       return -EINVAL;
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       u8 rval = 0;
+
+       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+               switch (tcid) {
+               case DCB_NUMTCS_ATTR_PG:
+                       adapter->dcb_cfg.num_tcs.pg_tcs = num;
+                       break;
+               case DCB_NUMTCS_ATTR_PFC:
+                       adapter->dcb_cfg.num_tcs.pfc_tcs = num;
+                       break;
+               default:
+                       rval = -EINVAL;
+                       break;
+               }
+       } else {
+               rval = -EINVAL;
+       }
+
+       return rval;
 }
 
 static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev)
@@ -531,11 +511,10 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
        adapter->temp_dcb_cfg.pfc_mode_enable = state;
-       if (adapter->temp_dcb_cfg.pfc_mode_enable !=
-               adapter->dcb_cfg.pfc_mode_enable)
-               adapter->dcb_set_bitmap |= BIT_PFC;
+       return;
 }
 
+#ifdef HAVE_DCBNL_OPS_GETAPP
 /**
  * ixgbe_dcbnl_getapp - retrieve the DCBX application user priority
  * @netdev : the corresponding netdev
@@ -548,30 +527,86 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
  */
 static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
 {
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       u8 rval = 0;
+#ifdef HAVE_DCBNL_IEEE
        struct dcb_app app = {
-                               .selector = idtype,
-                               .protocol = id,
-                            };
+               .selector = idtype,
+               .protocol = id,
+       };
 
-       if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
-               return 0;
+       rval = dcb_getapp(netdev, &app);
+#endif
+
+       switch (idtype) {
+       case DCB_APP_IDTYPE_ETHTYPE:
+#ifdef IXGBE_FCOE
+               if (id == ETH_P_FCOE)
+                       rval = ixgbe_fcoe_getapp(netdev);
+#endif
+               break;
+       case DCB_APP_IDTYPE_PORTNUM:
+               break;
+       default:
+               break;
+       }
+
+       return rval;
+}
+
+/**
+ * ixgbe_dcbnl_setapp - set the DCBX application user priority
+ * @netdev : the corresponding netdev
+ * @idtype : identifies the id as ether type or TCP/UDP port number
+ * @id: id is either ether type or TCP/UDP port number
+ * @up: the 802.1p user priority bitmap
+ *
+ * Returns : 0 on success or 1 on error
+ */
+static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
+                            u8 idtype, u16 id, u8 up)
+{
+       int err = 0;
+#ifdef HAVE_DCBNL_IEEE
+       struct dcb_app app;
 
-       return dcb_getapp(netdev, &app);
+       app.selector = idtype;
+       app.protocol = id;
+       app.priority = up;
+       err = dcb_setapp(netdev, &app);
+#endif
+
+       switch (idtype) {
+       case DCB_APP_IDTYPE_ETHTYPE:
+#ifdef IXGBE_FCOE
+               if (id == ETH_P_FCOE) {
+                       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+                       adapter->fcoe.up = ffs(up) - 1;
+               }
+#endif
+               break;
+       case DCB_APP_IDTYPE_PORTNUM:
+               break;
+       default:
+               break;
+       }
+
+       return err;
 }
+#endif /* HAVE_DCBNL_OPS_GETAPP */
 
+#ifdef HAVE_DCBNL_IEEE
 static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
                                   struct ieee_ets *ets)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets;
 
-       ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs;
-
        /* No IEEE PFC settings available */
        if (!my_ets)
-               return 0;
+               return -EINVAL;
 
+       ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs;
        ets->cbs = my_ets->cbs;
        memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
        memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
@@ -617,9 +652,6 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
        if (err)
                goto err_out;
 
-       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
-               netdev_set_prio_tc_map(dev, i, ets->prio_tc[i]);
-
        err = ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
 err_out:
        return err;
@@ -632,29 +664,31 @@ static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
        struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc;
        int i;
 
-       pfc->pfc_cap = adapter->dcb_cfg.num_tcs.pfc_tcs;
-
        /* No IEEE PFC settings available */
        if (!my_pfc)
-               return 0;
+               return -EINVAL;
 
+       pfc->pfc_cap = adapter->dcb_cfg.num_tcs.pfc_tcs;
        pfc->pfc_en = my_pfc->pfc_en;
        pfc->mbc = my_pfc->mbc;
        pfc->delay = my_pfc->delay;
 
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+       for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
                pfc->requests[i] = adapter->stats.pxoffrxc[i];
                pfc->indications[i] = adapter->stats.pxofftxc[i];
        }
 
        return 0;
 }
+       int err;
 
 static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
                                   struct ieee_pfc *pfc)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
+       struct ixgbe_hw *hw = &adapter->hw;
        u8 *prio_tc;
+       int err;
 
        if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
                return -EINVAL;
@@ -668,7 +702,17 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
 
        prio_tc = adapter->ixgbe_ieee_ets->prio_tc;
        memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc));
-       return ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en, prio_tc);
+
+
+       /* Enable link flow control parameters if PFC is disabled */
+       if (pfc->pfc_en)
+               err = ixgbe_dcb_config_pfc(hw, pfc->pfc_en, prio_tc);
+       else
+               err = hw->mac.ops.fc_enable(hw);
+
+       ixgbe_set_rx_drop_en(adapter);
+
+       return err;
 }
 
 static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
@@ -691,19 +735,41 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
                        return err;
 
                adapter->fcoe.up = app->priority;
+               adapter->fcoe.up_set = adapter->fcoe.up;
+               ixgbe_dcbnl_devreset(dev);
+       }
+#endif
+       return 0;
+}
 
-               if (netif_running(dev))
-                       dev->netdev_ops->ndo_stop(dev);
+#ifdef HAVE_DCBNL_IEEE_DELAPP
+static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
+                                  struct dcb_app *app)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+       int err;
 
-               ixgbe_clear_interrupt_scheme(adapter);
-               ixgbe_init_interrupt_scheme(adapter);
+       if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+               return -EINVAL;
+
+       err = dcb_ieee_delapp(dev, app);
+
+#ifdef IXGBE_FCOE
+       if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+           app->protocol == ETH_P_FCOE) {
+               u8 app_mask = dcb_ieee_getapp_mask(dev, app);
+
+               if (app_mask & (1 << adapter->fcoe.up))
+                       return err;
 
-               if (netif_running(dev))
-                       dev->netdev_ops->ndo_open(dev);
+               adapter->fcoe.up = app_mask ?
+                                  ffs(app_mask) - 1 : IXGBE_FCOE_DEFUP;
+               ixgbe_dcbnl_devreset(dev);
        }
 #endif
        return err;
 }
+#endif /* HAVE_DCBNL_IEEE_DELAPP */
 
 static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev)
 {
@@ -716,7 +782,6 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        struct ieee_ets ets = {0};
        struct ieee_pfc pfc = {0};
-       int err = 0;
 
        /* no support for LLD_MANAGED modes or CEE+IEEE */
        if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
@@ -737,7 +802,7 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
                ixgbe_dcbnl_ieee_setets(dev, &ets);
                ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
        } else if (mode & DCB_CAP_DCBX_VER_CEE) {
-               u8 mask = BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG;
+               u8 mask = (BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG);
 
                adapter->dcb_set_bitmap |= mask;
                ixgbe_dcbnl_set_all(dev);
@@ -747,18 +812,25 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
                 */
                ixgbe_dcbnl_ieee_setets(dev, &ets);
                ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
-               err = ixgbe_setup_tc(dev, 0);
+               ixgbe_setup_tc(dev, 0);
        }
 
-       return err ? 1 : 0;
+       return 0;
 }
 
-const struct dcbnl_rtnl_ops dcbnl_ops = {
+#endif
+
+struct dcbnl_rtnl_ops dcbnl_ops = {
+#ifdef HAVE_DCBNL_IEEE
        .ieee_getets    = ixgbe_dcbnl_ieee_getets,
        .ieee_setets    = ixgbe_dcbnl_ieee_setets,
        .ieee_getpfc    = ixgbe_dcbnl_ieee_getpfc,
        .ieee_setpfc    = ixgbe_dcbnl_ieee_setpfc,
        .ieee_setapp    = ixgbe_dcbnl_ieee_setapp,
+#ifdef HAVE_DCBNL_IEEE_DELAPP
+       .ieee_delapp    = ixgbe_dcbnl_ieee_delapp,
+#endif
+#endif
        .getstate       = ixgbe_dcbnl_get_state,
        .setstate       = ixgbe_dcbnl_set_state,
        .getpermhwaddr  = ixgbe_dcbnl_get_perm_hw_addr,
@@ -778,7 +850,14 @@ const struct dcbnl_rtnl_ops dcbnl_ops = {
        .setnumtcs      = ixgbe_dcbnl_setnumtcs,
        .getpfcstate    = ixgbe_dcbnl_getpfcstate,
        .setpfcstate    = ixgbe_dcbnl_setpfcstate,
+#ifdef HAVE_DCBNL_OPS_GETAPP
        .getapp         = ixgbe_dcbnl_getapp,
+       .setapp         = ixgbe_dcbnl_setapp,
+#endif
+#ifdef HAVE_DCBNL_IEEE
        .getdcbx        = ixgbe_dcbnl_getdcbx,
        .setdcbx        = ixgbe_dcbnl_setdcbx,
+#endif
 };
+
+#endif
index 61593776984e36ba1f2244987f1c3fc0d9abed3e..124ade95486457e4e5535075386a1ccd1ac96654 100644 (file)
 
 #include <linux/types.h>
 #include <linux/module.h>
-#include <linux/slab.h>
 #include <linux/pci.h>
 #include <linux/netdevice.h>
 #include <linux/ethtool.h>
 #include <linux/vmalloc.h>
-#include <linux/uaccess.h>
+#include <linux/highmem.h>
+#ifdef SIOCETHTOOL
+#include <asm/uaccess.h>
 
 #include "ixgbe.h"
 
+#ifndef ETH_GSTRING_LEN
+#define ETH_GSTRING_LEN 32
+#endif
 
 #define IXGBE_ALL_RAR_ENTRIES 16
 
-enum {NETDEV_STATS, IXGBE_STATS};
-
+#ifdef ETHTOOL_OPS_COMPAT
+#include "kcompat_ethtool.c"
+#endif
+#ifdef ETHTOOL_GSTATS
 struct ixgbe_stats {
        char stat_string[ETH_GSTRING_LEN];
-       int type;
        int sizeof_stat;
        int stat_offset;
 };
 
-#define IXGBE_STAT(m)          IXGBE_STATS, \
-                               sizeof(((struct ixgbe_adapter *)0)->m), \
-                               offsetof(struct ixgbe_adapter, m)
-#define IXGBE_NETDEV_STAT(m)   NETDEV_STATS, \
-                               sizeof(((struct rtnl_link_stats64 *)0)->m), \
-                               offsetof(struct rtnl_link_stats64, m)
-
-static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
-       {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
-       {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
-       {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
-       {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
-       {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
-       {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
-       {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
-       {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
-       {"lsc_int", IXGBE_STAT(lsc_int)},
-       {"tx_busy", IXGBE_STAT(tx_busy)},
-       {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
-       {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
-       {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
-       {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
-       {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
-       {"multicast", IXGBE_NETDEV_STAT(multicast)},
-       {"broadcast", IXGBE_STAT(stats.bprc)},
-       {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
-       {"collisions", IXGBE_NETDEV_STAT(collisions)},
-       {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
-       {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
-       {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
-       {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
-       {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
-       {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
-       {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
-       {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
-       {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
-       {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
-       {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
-       {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
-       {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
-       {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
-       {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
-       {"tx_restart_queue", IXGBE_STAT(restart_queue)},
-       {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
-       {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
-       {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
-       {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
-       {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
-       {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
-       {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
-       {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
-       {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
-       {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
-       {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
-       {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
-       {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
-       {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
+#define IXGBE_NETDEV_STAT(_net_stat) { \
+       .stat_string = #_net_stat, \
+       .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
+       .stat_offset = offsetof(struct net_device_stats, _net_stat) \
+}
+static const struct ixgbe_stats ixgbe_gstrings_net_stats[] = {
+       IXGBE_NETDEV_STAT(rx_packets),
+       IXGBE_NETDEV_STAT(tx_packets),
+       IXGBE_NETDEV_STAT(rx_bytes),
+       IXGBE_NETDEV_STAT(tx_bytes),
+       IXGBE_NETDEV_STAT(rx_errors),
+       IXGBE_NETDEV_STAT(tx_errors),
+       IXGBE_NETDEV_STAT(rx_dropped),
+       IXGBE_NETDEV_STAT(tx_dropped),
+       IXGBE_NETDEV_STAT(multicast),
+       IXGBE_NETDEV_STAT(collisions),
+       IXGBE_NETDEV_STAT(rx_over_errors),
+       IXGBE_NETDEV_STAT(rx_crc_errors),
+       IXGBE_NETDEV_STAT(rx_frame_errors),
+       IXGBE_NETDEV_STAT(rx_fifo_errors),
+       IXGBE_NETDEV_STAT(rx_missed_errors),
+       IXGBE_NETDEV_STAT(tx_aborted_errors),
+       IXGBE_NETDEV_STAT(tx_carrier_errors),
+       IXGBE_NETDEV_STAT(tx_fifo_errors),
+       IXGBE_NETDEV_STAT(tx_heartbeat_errors),
+};
+
+#define IXGBE_STAT(_name, _stat) { \
+       .stat_string = _name, \
+       .sizeof_stat = FIELD_SIZEOF(struct ixgbe_adapter, _stat), \
+       .stat_offset = offsetof(struct ixgbe_adapter, _stat) \
+}
+static struct ixgbe_stats ixgbe_gstrings_stats[] = {
+       IXGBE_STAT("rx_pkts_nic", stats.gprc),
+       IXGBE_STAT("tx_pkts_nic", stats.gptc),
+       IXGBE_STAT("rx_bytes_nic", stats.gorc),
+       IXGBE_STAT("tx_bytes_nic", stats.gotc),
+       IXGBE_STAT("lsc_int", lsc_int),
+       IXGBE_STAT("tx_busy", tx_busy),
+       IXGBE_STAT("non_eop_descs", non_eop_descs),
+       IXGBE_STAT("broadcast", stats.bprc),
+       IXGBE_STAT("rx_no_buffer_count", stats.rnbc[0]) ,
+       IXGBE_STAT("tx_timeout_count", tx_timeout_count),
+       IXGBE_STAT("tx_restart_queue", restart_queue),
+       IXGBE_STAT("rx_long_length_errors", stats.roc),
+       IXGBE_STAT("rx_short_length_errors", stats.ruc),
+       IXGBE_STAT("tx_flow_control_xon", stats.lxontxc),
+       IXGBE_STAT("rx_flow_control_xon", stats.lxonrxc),
+       IXGBE_STAT("tx_flow_control_xoff", stats.lxofftxc),
+       IXGBE_STAT("rx_flow_control_xoff", stats.lxoffrxc),
+       IXGBE_STAT("rx_csum_offload_errors", hw_csum_rx_error),
+       IXGBE_STAT("alloc_rx_page_failed", alloc_rx_page_failed),
+       IXGBE_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
+#ifndef IXGBE_NO_LRO
+       IXGBE_STAT("lro_aggregated", lro_stats.coal),
+       IXGBE_STAT("lro_flushed", lro_stats.flushed),
+#endif /* IXGBE_NO_LRO */
+       IXGBE_STAT("rx_no_dma_resources", hw_rx_no_dma_resources),
+       IXGBE_STAT("hw_rsc_aggregated", rsc_total_count),
+       IXGBE_STAT("hw_rsc_flushed", rsc_total_flush),
+#ifdef HAVE_TX_MQ
+       IXGBE_STAT("fdir_match", stats.fdirmatch),
+       IXGBE_STAT("fdir_miss", stats.fdirmiss),
+       IXGBE_STAT("fdir_overflow", fdir_overflow),
+#endif /* HAVE_TX_MQ */
 #ifdef IXGBE_FCOE
-       {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
-       {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
-       {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
-       {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
-       {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
-       {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
-       {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
-       {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
+       IXGBE_STAT("fcoe_bad_fccrc", stats.fccrc),
+       IXGBE_STAT("fcoe_last_errors", stats.fclast),
+       IXGBE_STAT("rx_fcoe_dropped", stats.fcoerpdc),
+       IXGBE_STAT("rx_fcoe_packets", stats.fcoeprc),
+       IXGBE_STAT("rx_fcoe_dwords", stats.fcoedwrc),
+       IXGBE_STAT("fcoe_noddp", stats.fcoe_noddp),
+       IXGBE_STAT("fcoe_noddp_ext_buff", stats.fcoe_noddp_ext_buff),
+       IXGBE_STAT("tx_fcoe_packets", stats.fcoeptc),
+       IXGBE_STAT("tx_fcoe_dwords", stats.fcoedwtc),
 #endif /* IXGBE_FCOE */
+       IXGBE_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
+       IXGBE_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
+       IXGBE_STAT("os2bmc_tx_by_host", stats.o2bspc),
+       IXGBE_STAT("os2bmc_rx_by_host", stats.b2ogprc),
 };
 
-/* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
- * we set the num_rx_queues to evaluate to num_tx_queues. This is
- * used because we do not have a good way to get the max number of
- * rx queues with CONFIG_RPS disabled.
- */
-#define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
-
-#define IXGBE_QUEUE_STATS_LEN ( \
-       (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
-       (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
-#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
+#define IXGBE_QUEUE_STATS_LEN \
+       ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \
+        ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \
+         (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
+#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
+#define IXGBE_NETDEV_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_net_stats)
 #define IXGBE_PB_STATS_LEN ( \
-                       (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
-                        sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
-                        sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
-                        sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
-                       / sizeof(u64))
+               (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \
+                IXGBE_FLAG_DCB_ENABLED) ? \
+                (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
+                 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
+                 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
+                 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
+                / sizeof(u64) : 0)
+#define IXGBE_VF_STATS_LEN \
+       ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_vfs) * \
+         (sizeof(struct vf_stats) / sizeof(u64)))
 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
-                         IXGBE_PB_STATS_LEN + \
-                         IXGBE_QUEUE_STATS_LEN)
+                        IXGBE_NETDEV_STATS_LEN + \
+                        IXGBE_PB_STATS_LEN + \
+                        IXGBE_QUEUE_STATS_LEN + \
+                        IXGBE_VF_STATS_LEN)
 
+#endif /* ETHTOOL_GSTATS */
+#ifdef ETHTOOL_TEST
 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
        "Register test  (offline)", "Eeprom test    (offline)",
        "Interrupt test (offline)", "Loopback test  (offline)",
        "Link test   (on/offline)"
 };
-#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
+#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
+#endif /* ETHTOOL_TEST */
 
-static int ixgbe_get_settings(struct net_device *netdev,
-                              struct ethtool_cmd *ecmd)
+int ixgbe_get_settings(struct net_device *netdev,
+                      struct ethtool_cmd *ecmd)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -161,8 +184,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
        if ((hw->phy.media_type == ixgbe_media_type_copper) ||
            (hw->phy.multispeed_fiber)) {
                ecmd->supported |= (SUPPORTED_1000baseT_Full |
-                                   SUPPORTED_Autoneg);
-
+                                   SUPPORTED_Autoneg);
                switch (hw->mac.type) {
                case ixgbe_mac_X540:
                        ecmd->supported |= SUPPORTED_100baseT_Full;
@@ -211,8 +233,8 @@ static int ixgbe_get_settings(struct net_device *netdev,
                                             ADVERTISED_FIBRE);
                        ecmd->port = PORT_FIBRE;
                        ecmd->autoneg = AUTONEG_DISABLE;
-               } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) ||
-                          (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
+               } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE)
+                         || (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
                        ecmd->supported |= (SUPPORTED_1000baseT_Full |
                                            SUPPORTED_Autoneg |
                                            SUPPORTED_FIBRE);
@@ -232,11 +254,12 @@ static int ixgbe_get_settings(struct net_device *netdev,
        } else {
                ecmd->supported |= SUPPORTED_FIBRE;
                ecmd->advertising = (ADVERTISED_10000baseT_Full |
-                                    ADVERTISED_FIBRE);
+                                    ADVERTISED_FIBRE);
                ecmd->port = PORT_FIBRE;
                ecmd->autoneg = AUTONEG_DISABLE;
        }
 
+#ifdef HAVE_ETHTOOL_SFP_DISPLAY_PORT
        /* Get PHY type */
        switch (adapter->hw.phy.type) {
        case ixgbe_phy_tn:
@@ -276,7 +299,14 @@ static int ixgbe_get_settings(struct net_device *netdev,
                        ecmd->port = PORT_TP;
                        ecmd->supported = SUPPORTED_TP;
                        ecmd->advertising = (ADVERTISED_1000baseT_Full |
-                                            ADVERTISED_TP);
+                               ADVERTISED_TP);
+                       break;
+               case ixgbe_sfp_type_1g_sx_core0:
+               case ixgbe_sfp_type_1g_sx_core1:
+                       ecmd->port = PORT_FIBRE;
+                       ecmd->supported = SUPPORTED_FIBRE;
+                       ecmd->advertising = (ADVERTISED_1000baseT_Full |
+                               ADVERTISED_FIBRE);
                        break;
                case ixgbe_sfp_type_unknown:
                default:
@@ -294,25 +324,36 @@ static int ixgbe_get_settings(struct net_device *netdev,
                ecmd->port = PORT_OTHER;
                break;
        }
+#endif
+
+       if (!in_interrupt()) {
+               hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+       } else {
+               /*
+                * this case is a special workaround for RHEL5 bonding
+                * that calls this routine from interrupt context
+                */
+               link_speed = adapter->link_speed;
+               link_up = adapter->link_up;
+       }
 
-       hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
        if (link_up) {
                switch (link_speed) {
                case IXGBE_LINK_SPEED_10GB_FULL:
-                       ethtool_cmd_speed_set(ecmd, SPEED_10000);
+                       ecmd->speed = SPEED_10000;
                        break;
                case IXGBE_LINK_SPEED_1GB_FULL:
-                       ethtool_cmd_speed_set(ecmd, SPEED_1000);
+                       ecmd->speed = SPEED_1000;
                        break;
                case IXGBE_LINK_SPEED_100_FULL:
-                       ethtool_cmd_speed_set(ecmd, SPEED_100);
+                       ecmd->speed = SPEED_100;
                        break;
                default:
                        break;
                }
                ecmd->duplex = DUPLEX_FULL;
        } else {
-               ethtool_cmd_speed_set(ecmd, -1);
+               ecmd->speed = -1;
                ecmd->duplex = -1;
        }
 
@@ -320,7 +361,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
 }
 
 static int ixgbe_set_settings(struct net_device *netdev,
-                              struct ethtool_cmd *ecmd)
+                             struct ethtool_cmd *ecmd)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -359,20 +400,12 @@ static int ixgbe_set_settings(struct net_device *netdev,
                        e_info(probe, "setup link failed with code %d\n", err);
                        hw->mac.ops.setup_link(hw, old, true, true);
                }
-       } else {
-               /* in this case we currently only support 10Gb/FULL */
-               u32 speed = ethtool_cmd_speed(ecmd);
-               if ((ecmd->autoneg == AUTONEG_ENABLE) ||
-                   (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
-                   (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
-                       return -EINVAL;
        }
-
        return err;
 }
 
 static void ixgbe_get_pauseparam(struct net_device *netdev,
-                                 struct ethtool_pauseparam *pause)
+                                struct ethtool_pauseparam *pause)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -389,49 +422,31 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
        } else if (hw->fc.current_mode == ixgbe_fc_full) {
                pause->rx_pause = 1;
                pause->tx_pause = 1;
-#ifdef CONFIG_DCB
-       } else if (hw->fc.current_mode == ixgbe_fc_pfc) {
-               pause->rx_pause = 0;
-               pause->tx_pause = 0;
-#endif
        }
 }
 
 static int ixgbe_set_pauseparam(struct net_device *netdev,
-                                struct ethtool_pauseparam *pause)
+                               struct ethtool_pauseparam *pause)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
-       struct ixgbe_fc_info fc;
+       struct ixgbe_fc_info fc = hw->fc;
 
-#ifdef CONFIG_DCB
-       if (adapter->dcb_cfg.pfc_mode_enable ||
-               ((hw->mac.type == ixgbe_mac_82598EB) &&
-               (adapter->flags & IXGBE_FLAG_DCB_ENABLED)))
+       /* 82598 does no support link flow control with DCB enabled */
+       if ((hw->mac.type == ixgbe_mac_82598EB) &&
+           (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
                return -EINVAL;
 
-#endif
-       fc = hw->fc;
-
-       if (pause->autoneg != AUTONEG_ENABLE)
-               fc.disable_fc_autoneg = true;
-       else
-               fc.disable_fc_autoneg = false;
+       fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
 
        if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
                fc.requested_mode = ixgbe_fc_full;
-       else if (pause->rx_pause && !pause->tx_pause)
+       else if (pause->rx_pause)
                fc.requested_mode = ixgbe_fc_rx_pause;
-       else if (!pause->rx_pause && pause->tx_pause)
+       else if (pause->tx_pause)
                fc.requested_mode = ixgbe_fc_tx_pause;
-       else if (!pause->rx_pause && !pause->tx_pause)
-               fc.requested_mode = ixgbe_fc_none;
        else
-               return -EINVAL;
-
-#ifdef CONFIG_DCB
-       adapter->last_lfc_mode = fc.requested_mode;
-#endif
+               fc.requested_mode = ixgbe_fc_none;
 
        /* if the thing changed then we'll update and use new autoneg */
        if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
@@ -463,10 +478,11 @@ static int ixgbe_get_regs_len(struct net_device *netdev)
        return IXGBE_REGS_LEN * sizeof(u32);
 }
 
-#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
+#define IXGBE_GET_STAT(_A_, _R_)       (_A_->stats._R_)
+
 
-static void ixgbe_get_regs(struct net_device *netdev,
-                           struct ethtool_regs *regs, void *p)
+static void ixgbe_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
+                          void *p)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -529,8 +545,10 @@ static void ixgbe_get_regs(struct net_device *netdev,
                        break;
                case ixgbe_mac_82599EB:
                case ixgbe_mac_X540:
-                       regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
-                       regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
+                       regs_buff[35 + i] = IXGBE_READ_REG(hw,
+                                                         IXGBE_FCRTL_82599(i));
+                       regs_buff[43 + i] = IXGBE_READ_REG(hw,
+                                                         IXGBE_FCRTH_82599(i));
                        break;
                default:
                        break;
@@ -782,7 +800,7 @@ static int ixgbe_get_eeprom_len(struct net_device *netdev)
 }
 
 static int ixgbe_get_eeprom(struct net_device *netdev,
-                            struct ethtool_eeprom *eeprom, u8 *bytes)
+                           struct ethtool_eeprom *eeprom, u8 *bytes)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -804,8 +822,8 @@ static int ixgbe_get_eeprom(struct net_device *netdev,
        if (!eeprom_buff)
                return -ENOMEM;
 
-       ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
-                                            eeprom_buff);
+       ret_val = ixgbe_read_eeprom_buffer(hw, first_word, eeprom_len,
+                                          eeprom_buff);
 
        /* Device's eeprom is always little-endian, word addressable */
        for (i = 0; i < eeprom_len; i++)
@@ -848,18 +866,18 @@ static int ixgbe_set_eeprom(struct net_device *netdev,
                 * need read/modify/write of first changed EEPROM word
                 * only the second byte of the word is being modified
                 */
-               ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
+               ret_val = ixgbe_read_eeprom(hw, first_word, &eeprom_buff[0]);
                if (ret_val)
                        goto err;
 
                ptr++;
        }
-       if ((eeprom->offset + eeprom->len) & 1) {
+       if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
                /*
                 * need read/modify/write of last changed EEPROM word
                 * only the first byte of the word is being modified
                 */
-               ret_val = hw->eeprom.ops.read(hw, last_word,
+               ret_val = ixgbe_read_eeprom(hw, last_word,
                                          &eeprom_buff[last_word - first_word]);
                if (ret_val)
                        goto err;
@@ -874,13 +892,13 @@ static int ixgbe_set_eeprom(struct net_device *netdev,
        for (i = 0; i < last_word - first_word + 1; i++)
                cpu_to_le16s(&eeprom_buff[i]);
 
-       ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
-                                             last_word - first_word + 1,
-                                             eeprom_buff);
+       ret_val = ixgbe_write_eeprom_buffer(hw, first_word,
+                                           last_word - first_word + 1,
+                                           eeprom_buff);
 
        /* Update the checksum */
        if (ret_val == 0)
-               hw->eeprom.ops.update_checksum(hw);
+               ixgbe_update_eeprom_checksum(hw);
 
 err:
        kfree(eeprom_buff);
@@ -888,23 +906,16 @@ err:
 }
 
 static void ixgbe_get_drvinfo(struct net_device *netdev,
-                              struct ethtool_drvinfo *drvinfo)
+                             struct ethtool_drvinfo *drvinfo)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       char firmware_version[32];
-       u32 nvm_track_id;
 
        strncpy(drvinfo->driver, ixgbe_driver_name,
-               sizeof(drvinfo->driver) - 1);
+               sizeof(drvinfo->driver) - 1);
        strncpy(drvinfo->version, ixgbe_driver_version,
-               sizeof(drvinfo->version) - 1);
-
-       nvm_track_id = (adapter->eeprom_verh << 16) |
-                       adapter->eeprom_verl;
-       snprintf(firmware_version, sizeof(firmware_version), "0x%08x",
-                nvm_track_id);
+               sizeof(drvinfo->version) - 1);
 
-       strncpy(drvinfo->fw_version, firmware_version,
+       strncpy(drvinfo->fw_version, adapter->eeprom_id,
                sizeof(drvinfo->fw_version) - 1);
        strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info) - 1);
@@ -914,43 +925,43 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
 }
 
 static void ixgbe_get_ringparam(struct net_device *netdev,
-                                struct ethtool_ringparam *ring)
+                               struct ethtool_ringparam *ring)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
-       struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
 
        ring->rx_max_pending = IXGBE_MAX_RXD;
        ring->tx_max_pending = IXGBE_MAX_TXD;
-       ring->rx_pending = rx_ring->count;
-       ring->tx_pending = tx_ring->count;
+       ring->rx_mini_max_pending = 0;
+       ring->rx_jumbo_max_pending = 0;
+       ring->rx_pending = adapter->rx_ring_count;
+       ring->tx_pending = adapter->tx_ring_count;
+       ring->rx_mini_pending = 0;
+       ring->rx_jumbo_pending = 0;
 }
 
 static int ixgbe_set_ringparam(struct net_device *netdev,
-                               struct ethtool_ringparam *ring)
+                              struct ethtool_ringparam *ring)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
-       int i, err = 0;
+       struct ixgbe_ring *tx_ring = NULL, *rx_ring = NULL;
        u32 new_rx_count, new_tx_count;
-       bool need_update = false;
+       int i, err = 0;
 
        if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
                return -EINVAL;
 
-       new_rx_count = max_t(u32, ring->rx_pending, IXGBE_MIN_RXD);
-       new_rx_count = min_t(u32, new_rx_count, IXGBE_MAX_RXD);
-       new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
-
-       new_tx_count = max_t(u32, ring->tx_pending, IXGBE_MIN_TXD);
-       new_tx_count = min_t(u32, new_tx_count, IXGBE_MAX_TXD);
+       new_tx_count = clamp_t(u32, ring->tx_pending,
+                              IXGBE_MIN_TXD, IXGBE_MAX_TXD);
        new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
 
-       if ((new_tx_count == adapter->tx_ring[0]->count) &&
-           (new_rx_count == adapter->rx_ring[0]->count)) {
-               /* nothing to do */
+       new_rx_count = clamp_t(u32, ring->rx_pending,
+                              IXGBE_MIN_RXD, IXGBE_MAX_RXD);
+       new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+       /* if nothing to do return success */
+       if ((new_tx_count == adapter->tx_ring_count) &&
+           (new_rx_count == adapter->rx_ring_count))
                return 0;
-       }
 
        while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
                usleep_range(1000, 2000);
@@ -965,86 +976,109 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
                goto clear_reset;
        }
 
-       temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring));
-       if (!temp_tx_ring) {
-               err = -ENOMEM;
-               goto clear_reset;
-       }
-
+       /* alloc updated Tx resources */
        if (new_tx_count != adapter->tx_ring_count) {
+               tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring));
+               if (!tx_ring) {
+                       err = -ENOMEM;
+                       goto clear_reset;
+               }
+
                for (i = 0; i < adapter->num_tx_queues; i++) {
-                       memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
-                              sizeof(struct ixgbe_ring));
-                       temp_tx_ring[i].count = new_tx_count;
-                       err = ixgbe_setup_tx_resources(&temp_tx_ring[i]);
+                       /* clone ring and setup updated count */
+                       tx_ring[i] = *adapter->tx_ring[i];
+                       tx_ring[i].count = new_tx_count;
+                       err = ixgbe_setup_tx_resources(&tx_ring[i]);
                        if (err) {
                                while (i) {
                                        i--;
-                                       ixgbe_free_tx_resources(&temp_tx_ring[i]);
+                                       ixgbe_free_tx_resources(&tx_ring[i]);
                                }
+
+                               vfree(tx_ring);
+                               tx_ring = NULL;
+
                                goto clear_reset;
                        }
                }
-               need_update = true;
-       }
-
-       temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring));
-       if (!temp_rx_ring) {
-               err = -ENOMEM;
-               goto err_setup;
        }
 
+       /* alloc updated Rx resources */
        if (new_rx_count != adapter->rx_ring_count) {
+               rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring));
+               if (!rx_ring) {
+                       err = -ENOMEM;
+                       goto clear_reset;
+               }
+
                for (i = 0; i < adapter->num_rx_queues; i++) {
-                       memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
-                              sizeof(struct ixgbe_ring));
-                       temp_rx_ring[i].count = new_rx_count;
-                       err = ixgbe_setup_rx_resources(&temp_rx_ring[i]);
+                       /* clone ring and setup updated count */
+                       rx_ring[i] = *adapter->rx_ring[i];
+                       rx_ring[i].count = new_rx_count;
+                       err = ixgbe_setup_rx_resources(&rx_ring[i]);
                        if (err) {
                                while (i) {
                                        i--;
-                                       ixgbe_free_rx_resources(&temp_rx_ring[i]);
+                                       ixgbe_free_rx_resources(&rx_ring[i]);
                                }
-                               goto err_setup;
+
+                               vfree(rx_ring);
+                               rx_ring = NULL;
+
+                               goto clear_reset;
                        }
                }
-               need_update = true;
        }
 
-       /* if rings need to be updated, here's the place to do it in one shot */
-       if (need_update) {
-               ixgbe_down(adapter);
+       /* bring interface down to prepare for update */
+       ixgbe_down(adapter);
 
-               /* tx */
-               if (new_tx_count != adapter->tx_ring_count) {
-                       for (i = 0; i < adapter->num_tx_queues; i++) {
-                               ixgbe_free_tx_resources(adapter->tx_ring[i]);
-                               memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
-                                      sizeof(struct ixgbe_ring));
-                       }
-                       adapter->tx_ring_count = new_tx_count;
+       /* Tx */
+       if (tx_ring) {
+               for (i = 0; i < adapter->num_tx_queues; i++) {
+                       ixgbe_free_tx_resources(adapter->tx_ring[i]);
+                       *adapter->tx_ring[i] = tx_ring[i];
                }
+               adapter->tx_ring_count = new_tx_count;
 
-               /* rx */
-               if (new_rx_count != adapter->rx_ring_count) {
-                       for (i = 0; i < adapter->num_rx_queues; i++) {
-                               ixgbe_free_rx_resources(adapter->rx_ring[i]);
-                               memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
-                                      sizeof(struct ixgbe_ring));
-                       }
-                       adapter->rx_ring_count = new_rx_count;
+               vfree(tx_ring);
+               tx_ring = NULL;
+       }
+
+       /* Rx */
+       if (rx_ring) {
+               for (i = 0; i < adapter->num_rx_queues; i++) {
+                       ixgbe_free_rx_resources(adapter->rx_ring[i]);
+                       *adapter->rx_ring[i] = rx_ring[i];
                }
-               ixgbe_up(adapter);
+               adapter->rx_ring_count = new_rx_count;
+
+               vfree(rx_ring);
+               rx_ring = NULL;
        }
 
-       vfree(temp_rx_ring);
-err_setup:
-       vfree(temp_tx_ring);
+       /* restore interface using new values */
+       ixgbe_up(adapter);
+
 clear_reset:
+       /* free Tx resources if Rx error is encountered */
+       if (tx_ring) {
+               for (i = 0; i < adapter->num_tx_queues; i++)
+                       ixgbe_free_tx_resources(&tx_ring[i]);
+               vfree(tx_ring);
+       }
+
        clear_bit(__IXGBE_RESETTING, &adapter->state);
        return err;
 }
 
+#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
+static int ixgbe_get_stats_count(struct net_device *netdev)
+{
+       return IXGBE_STATS_LEN;
+}
+
+#else /* HAVE_ETHTOOL_GET_SSET_COUNT */
 static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
 {
        switch (sset) {
@@ -1057,80 +1091,71 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
        }
 }
 
+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
 static void ixgbe_get_ethtool_stats(struct net_device *netdev,
-                                    struct ethtool_stats *stats, u64 *data)
+                                   struct ethtool_stats *stats, u64 *data)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       struct rtnl_link_stats64 temp;
-       const struct rtnl_link_stats64 *net_stats;
-       unsigned int start;
-       struct ixgbe_ring *ring;
-       int i, j;
-       char *p = NULL;
+#ifdef HAVE_NETDEV_STATS_IN_NETDEV
+       struct net_device_stats *net_stats = &netdev->stats;
+#else
+       struct net_device_stats *net_stats = &adapter->net_stats;
+#endif
+       u64 *queue_stat;
+       int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
+       int i, j, k;
+       char *p;
 
        ixgbe_update_stats(adapter);
-       net_stats = dev_get_stats(netdev, &temp);
-       for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
-               switch (ixgbe_gstrings_stats[i].type) {
-               case NETDEV_STATS:
-                       p = (char *) net_stats +
-                                       ixgbe_gstrings_stats[i].stat_offset;
-                       break;
-               case IXGBE_STATS:
-                       p = (char *) adapter +
-                                       ixgbe_gstrings_stats[i].stat_offset;
-                       break;
-               }
 
-               data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
-                          sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+       for (i = 0; i < IXGBE_NETDEV_STATS_LEN; i++) {
+               p = (char *)net_stats + ixgbe_gstrings_net_stats[i].stat_offset;
+               data[i] = (ixgbe_gstrings_net_stats[i].sizeof_stat ==
+                       sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+       }
+       for (j = 0; j < IXGBE_GLOBAL_STATS_LEN; j++, i++) {
+               p = (char *)adapter + ixgbe_gstrings_stats[j].stat_offset;
+               data[i] = (ixgbe_gstrings_stats[j].sizeof_stat ==
+                          sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+       }
+       for (j = 0; j < adapter->num_tx_queues; j++) {
+               queue_stat = (u64 *)&adapter->tx_ring[j]->stats;
+               for (k = 0; k < stat_count; k++)
+                       data[i + k] = queue_stat[k];
+               i += k;
        }
-       for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
-               ring = adapter->tx_ring[j];
-               if (!ring) {
-                       data[i] = 0;
-                       data[i+1] = 0;
-                       i += 2;
-                       continue;
+       for (j = 0; j < adapter->num_rx_queues; j++) {
+               queue_stat = (u64 *)&adapter->rx_ring[j]->stats;
+               for (k = 0; k < stat_count; k++)
+                       data[i + k] = queue_stat[k];
+               i += k;
+       }
+       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+               for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) {
+                       data[i++] = adapter->stats.pxontxc[j];
+                       data[i++] = adapter->stats.pxofftxc[j];
                }
-
-               do {
-                       start = u64_stats_fetch_begin_bh(&ring->syncp);
-                       data[i]   = ring->stats.packets;
-                       data[i+1] = ring->stats.bytes;
-               } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
-               i += 2;
-       }
-       for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
-               ring = adapter->rx_ring[j];
-               if (!ring) {
-                       data[i] = 0;
-                       data[i+1] = 0;
-                       i += 2;
-                       continue;
+               for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) {
+                       data[i++] = adapter->stats.pxonrxc[j];
+                       data[i++] = adapter->stats.pxoffrxc[j];
                }
-
-               do {
-                       start = u64_stats_fetch_begin_bh(&ring->syncp);
-                       data[i]   = ring->stats.packets;
-                       data[i+1] = ring->stats.bytes;
-               } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
-               i += 2;
-       }
-
-       for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
-               data[i++] = adapter->stats.pxontxc[j];
-               data[i++] = adapter->stats.pxofftxc[j];
        }
-       for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
-               data[i++] = adapter->stats.pxonrxc[j];
-               data[i++] = adapter->stats.pxoffrxc[j];
+       stat_count = sizeof(struct vf_stats) / sizeof(u64);
+       for (j = 0; j < adapter->num_vfs; j++) {
+               queue_stat = (u64 *)&adapter->vfinfo[j].vfstats;
+               for (k = 0; k < stat_count; k++)
+                       data[i + k] = queue_stat[k];
+               queue_stat = (u64 *)&adapter->vfinfo[j].saved_rst_vfstats;
+               for (k = 0; k < stat_count; k++)
+                       data[i + k] += queue_stat[k];
+               i += k;
        }
 }
 
 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
-                              u8 *data)
+                             u8 *data)
 {
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
        char *p = (char *)data;
        int i;
 
@@ -1140,33 +1165,52 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
                       IXGBE_TEST_LEN * ETH_GSTRING_LEN);
                break;
        case ETH_SS_STATS:
+               for (i = 0; i < IXGBE_NETDEV_STATS_LEN; i++) {
+                       memcpy(p, ixgbe_gstrings_net_stats[i].stat_string,
+                              ETH_GSTRING_LEN);
+                       p += ETH_GSTRING_LEN;
+               }
                for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
                        memcpy(p, ixgbe_gstrings_stats[i].stat_string,
                               ETH_GSTRING_LEN);
                        p += ETH_GSTRING_LEN;
                }
-               for (i = 0; i < netdev->num_tx_queues; i++) {
+               for (i = 0; i < adapter->num_tx_queues; i++) {
                        sprintf(p, "tx_queue_%u_packets", i);
                        p += ETH_GSTRING_LEN;
                        sprintf(p, "tx_queue_%u_bytes", i);
                        p += ETH_GSTRING_LEN;
                }
-               for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
+               for (i = 0; i < adapter->num_rx_queues; i++) {
                        sprintf(p, "rx_queue_%u_packets", i);
                        p += ETH_GSTRING_LEN;
                        sprintf(p, "rx_queue_%u_bytes", i);
                        p += ETH_GSTRING_LEN;
                }
-               for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
-                       sprintf(p, "tx_pb_%u_pxon", i);
+               if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+                       for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
+                               sprintf(p, "tx_pb_%u_pxon", i);
+                               p += ETH_GSTRING_LEN;
+                               sprintf(p, "tx_pb_%u_pxoff", i);
+                               p += ETH_GSTRING_LEN;
+                       }
+                       for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) {
+                               sprintf(p, "rx_pb_%u_pxon", i);
+                               p += ETH_GSTRING_LEN;
+                               sprintf(p, "rx_pb_%u_pxoff", i);
+                               p += ETH_GSTRING_LEN;
+                       }
+               }
+               for (i = 0; i < adapter->num_vfs; i++) {
+                       sprintf(p, "VF %d Rx Packets", i);
                        p += ETH_GSTRING_LEN;
-                       sprintf(p, "tx_pb_%u_pxoff", i);
+                       sprintf(p, "VF %d Rx Bytes", i);
                        p += ETH_GSTRING_LEN;
-               }
-               for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
-                       sprintf(p, "rx_pb_%u_pxon", i);
+                       sprintf(p, "VF %d Tx Packets", i);
                        p += ETH_GSTRING_LEN;
-                       sprintf(p, "rx_pb_%u_pxoff", i);
+                       sprintf(p, "VF %d Tx Bytes", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "VF %d MC Packets", i);
                        p += ETH_GSTRING_LEN;
                }
                /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
@@ -1216,7 +1260,7 @@ struct ixgbe_reg_test {
 #define TABLE64_TEST_HI        6
 
 /* default 82599 register test */
-static const struct ixgbe_reg_test reg_test_82599[] = {
+static struct ixgbe_reg_test reg_test_82599[] = {
        { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
        { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
        { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1240,7 +1284,7 @@ static const struct ixgbe_reg_test reg_test_82599[] = {
 };
 
 /* default 82598 register test */
-static const struct ixgbe_reg_test reg_test_82598[] = {
+static struct ixgbe_reg_test reg_test_82598[] = {
        { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
        { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
        { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1267,66 +1311,46 @@ static const struct ixgbe_reg_test reg_test_82598[] = {
        { 0, 0, 0, 0 }
 };
 
-static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
-                            u32 mask, u32 write)
-{
-       u32 pat, val, before;
-       static const u32 test_pattern[] = {
-               0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
-
-       for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
-               before = readl(adapter->hw.hw_addr + reg);
-               writel((test_pattern[pat] & write),
-                      (adapter->hw.hw_addr + reg));
-               val = readl(adapter->hw.hw_addr + reg);
-               if (val != (test_pattern[pat] & write & mask)) {
-                       e_err(drv, "pattern test reg %04X failed: got "
-                             "0x%08X expected 0x%08X\n",
-                             reg, val, (test_pattern[pat] & write & mask));
-                       *data = reg;
-                       writel(before, adapter->hw.hw_addr + reg);
-                       return 1;
-               }
-               writel(before, adapter->hw.hw_addr + reg);
-       }
-       return 0;
+#define REG_PATTERN_TEST(R, M, W)                                            \
+{                                                                            \
+       u32 pat, val, before;                                                 \
+       const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
+       for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {                       \
+               before = readl(adapter->hw.hw_addr + R);                      \
+               writel((_test[pat] & W), (adapter->hw.hw_addr + R));          \
+               val = readl(adapter->hw.hw_addr + R);                         \
+               if (val != (_test[pat] & W & M)) {                            \
+                       e_err(drv, "pattern test reg %04X failed: got "       \
+                             "0x%08X expected 0x%08X\n",                     \
+                               R, val, (_test[pat] & W & M));                \
+                       *data = R;                                            \
+                       writel(before, adapter->hw.hw_addr + R);              \
+                       return 1;                                             \
+               }                                                             \
+               writel(before, adapter->hw.hw_addr + R);                      \
+       }                                                                     \
 }
 
-static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
-                             u32 mask, u32 write)
-{
-       u32 val, before;
-       before = readl(adapter->hw.hw_addr + reg);
-       writel((write & mask), (adapter->hw.hw_addr + reg));
-       val = readl(adapter->hw.hw_addr + reg);
-       if ((write & mask) != (val & mask)) {
-               e_err(drv, "set/check reg %04X test failed: got 0x%08X "
-                     "expected 0x%08X\n", reg, (val & mask), (write & mask));
-               *data = reg;
-               writel(before, (adapter->hw.hw_addr + reg));
-               return 1;
-       }
-       writel(before, (adapter->hw.hw_addr + reg));
-       return 0;
+#define REG_SET_AND_CHECK(R, M, W)                                           \
+{                                                                            \
+       u32 val, before;                                                      \
+       before = readl(adapter->hw.hw_addr + R);                              \
+       writel((W & M), (adapter->hw.hw_addr + R));                           \
+       val = readl(adapter->hw.hw_addr + R);                                 \
+       if ((W & M) != (val & M)) {                                           \
+               e_err(drv, "set/check reg %04X test failed: got 0x%08X "      \
+                     "expected 0x%08X\n", R, (val & M), (W & M));            \
+               *data = R;                                                    \
+               writel(before, (adapter->hw.hw_addr + R));                    \
+               return 1;                                                     \
+       }                                                                     \
+       writel(before, (adapter->hw.hw_addr + R));                            \
 }
 
-#define REG_PATTERN_TEST(reg, mask, write)                                   \
-       do {                                                                  \
-               if (reg_pattern_test(adapter, data, reg, mask, write))        \
-                       return 1;                                             \
-       } while (0)                                                           \
-
-
-#define REG_SET_AND_CHECK(reg, mask, write)                                  \
-       do {                                                                  \
-               if (reg_set_and_check(adapter, data, reg, mask, write))       \
-                       return 1;                                             \
-       } while (0)                                                           \
-
 static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
 {
-       const struct ixgbe_reg_test *test;
-       u32 value, before, after;
+       struct ixgbe_reg_test *test;
+       u32 value, status_before, status_after;
        u32 i, toggle;
 
        switch (adapter->hw.mac.type) {
@@ -1351,18 +1375,18 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
         * tests.  Some bits are read-only, some toggle, and some
         * are writeable on newer MACs.
         */
-       before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
+       status_before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
        value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle);
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
-       after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
-       if (value != after) {
-               e_err(drv, "failed STATUS register test got: 0x%08X "
-                     "expected: 0x%08X\n", after, value);
+       status_after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
+       if (value != status_after) {
+               e_err(drv, "failed STATUS register test got: "
+                     "0x%08X expected: 0x%08X\n", status_after, value);
                *data = 1;
                return 1;
        }
        /* restore previous status */
-       IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before);
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, status_before);
 
        /*
         * Perform the remainder of the register test, looping through
@@ -1373,13 +1397,13 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
                        switch (test->test_type) {
                        case PATTERN_TEST:
                                REG_PATTERN_TEST(test->reg + (i * 0x40),
-                                                test->mask,
-                                                test->write);
+                                               test->mask,
+                                               test->write);
                                break;
                        case SET_READ_TEST:
                                REG_SET_AND_CHECK(test->reg + (i * 0x40),
-                                                 test->mask,
-                                                 test->write);
+                                               test->mask,
+                                               test->write);
                                break;
                        case WRITE_NO_TEST:
                                writel(test->write,
@@ -1388,18 +1412,18 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
                                break;
                        case TABLE32_TEST:
                                REG_PATTERN_TEST(test->reg + (i * 4),
-                                                test->mask,
-                                                test->write);
+                                               test->mask,
+                                               test->write);
                                break;
                        case TABLE64_TEST_LO:
                                REG_PATTERN_TEST(test->reg + (i * 8),
-                                                test->mask,
-                                                test->write);
+                                               test->mask,
+                                               test->write);
                                break;
                        case TABLE64_TEST_HI:
                                REG_PATTERN_TEST((test->reg + 4) + (i * 8),
-                                                test->mask,
-                                                test->write);
+                                               test->mask,
+                                               test->write);
                                break;
                        }
                }
@@ -1412,8 +1436,7 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
 
 static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
 {
-       struct ixgbe_hw *hw = &adapter->hw;
-       if (hw->eeprom.ops.validate_checksum(hw, NULL))
+       if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL))
                *data = 1;
        else
                *data = 0;
@@ -1444,21 +1467,21 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
                return 0;
        } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
                shared_int = false;
-               if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
+               if (request_irq(irq, &ixgbe_test_intr, 0, netdev->name,
                                netdev)) {
                        *data = 1;
                        return -1;
                }
-       } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
-                               netdev->name, netdev)) {
+       } else if (!request_irq(irq, &ixgbe_test_intr, IRQF_PROBE_SHARED,
+                               netdev->name, netdev)) {
                shared_int = false;
-       } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
-                              netdev->name, netdev)) {
+       } else if (request_irq(irq, &ixgbe_test_intr, IRQF_SHARED,
+                              netdev->name, netdev)) {
                *data = 1;
                return -1;
        }
-       e_info(hw, "testing %s interrupt\n", shared_int ?
-              "shared" : "unshared");
+       e_info(hw, "testing %s interrupt\n",
+              (shared_int ? "shared" : "unshared"));
 
        /* Disable all the interrupts */
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
@@ -1480,9 +1503,9 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
                         */
                        adapter->test_icr = 0;
                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
-                                       ~mask & 0x00007FFF);
+                                       ~mask & 0x00007FFF);
                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
-                                       ~mask & 0x00007FFF);
+                                       ~mask & 0x00007FFF);
                        IXGBE_WRITE_FLUSH(&adapter->hw);
                        usleep_range(10000, 20000);
 
@@ -1504,7 +1527,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
                IXGBE_WRITE_FLUSH(&adapter->hw);
                usleep_range(10000, 20000);
 
-               if (!(adapter->test_icr &mask)) {
+               if (!(adapter->test_icr & mask)) {
                        *data = 4;
                        break;
                }
@@ -1519,9 +1542,9 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
                         */
                        adapter->test_icr = 0;
                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
-                                       ~mask & 0x00007FFF);
+                                       ~mask & 0x00007FFF);
                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
-                                       ~mask & 0x00007FFF);
+                                       ~mask & 0x00007FFF);
                        IXGBE_WRITE_FLUSH(&adapter->hw);
                        usleep_range(10000, 20000);
 
@@ -1559,9 +1582,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
        ixgbe_disable_rx_queue(adapter, rx_ring);
 
        /* now Tx */
-       reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
-       reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
-       IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
+       IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), 0);
 
        switch (hw->mac.type) {
        case ixgbe_mac_82599EB:
@@ -1591,7 +1612,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
        /* Setup Tx descriptor ring and Tx buffers */
        tx_ring->count = IXGBE_DEFAULT_TXD;
        tx_ring->queue_index = 0;
-       tx_ring->dev = &adapter->pdev->dev;
+       tx_ring->dev = pci_dev_to_dev(adapter->pdev);
        tx_ring->netdev = adapter->netdev;
        tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
 
@@ -1615,10 +1636,12 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
        /* Setup Rx Descriptor ring and Rx buffers */
        rx_ring->count = IXGBE_DEFAULT_RXD;
        rx_ring->queue_index = 0;
-       rx_ring->dev = &adapter->pdev->dev;
+       rx_ring->dev = pci_dev_to_dev(adapter->pdev);
        rx_ring->netdev = adapter->netdev;
        rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
+#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
        rx_ring->rx_buf_len = IXGBE_RXBUFFER_2K;
+#endif
 
        err = ixgbe_setup_rx_resources(rx_ring);
        if (err) {
@@ -1674,21 +1697,21 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
        if (hw->mac.type == ixgbe_mac_82598EB) {
                u8 atlas;
 
-               hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
+               ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
                atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
-               hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
+               ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
 
-               hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
+               ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
                atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
-               hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
+               ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
 
-               hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
+               ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
                atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
-               hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
+               ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
 
-               hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
+               ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
                atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
-               hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
+               ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
        }
 
        return 0;
@@ -1721,13 +1744,21 @@ static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
 
        frame_size >>= 1;
 
+#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
        data = rx_buffer->skb->data;
+#else
+       data = kmap(rx_buffer->page) + rx_buffer->page_offset;
+#endif
 
        if (data[3] != 0xFF ||
            data[frame_size + 10] != 0xBE ||
            data[frame_size + 12] != 0xAF)
                match = false;
 
+#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+       kunmap(rx_buffer->page);
+
+#endif
        return match;
 }
 
@@ -1738,6 +1769,11 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
        union ixgbe_adv_rx_desc *rx_desc;
        struct ixgbe_rx_buffer *rx_buffer;
        struct ixgbe_tx_buffer *tx_buffer;
+#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+       const int bufsz = rx_ring->rx_buf_len;
+#else
+       const int bufsz = ixgbe_rx_bufsz(rx_ring);
+#endif
        u16 rx_ntc, tx_ntc, count = 0;
 
        /* initialize next to clean and descriptor values */
@@ -1746,23 +1782,28 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
        rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
 
        while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) {
+               /* unmap buffer on Tx side */
+               tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
+               ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
+
                /* check Rx buffer */
                rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
 
-               /* unmap Rx buffer, will be remapped by alloc_rx_buffers */
-               dma_unmap_single(rx_ring->dev,
-                                rx_buffer->dma,
-                                rx_ring->rx_buf_len,
-                                DMA_FROM_DEVICE);
-               rx_buffer->dma = 0;
+               /* sync Rx buffer for CPU read */
+               dma_sync_single_for_cpu(rx_ring->dev,
+                                       rx_buffer->dma,
+                                       bufsz,
+                                       DMA_FROM_DEVICE);
 
                /* verify contents of skb */
                if (ixgbe_check_lbtest_frame(rx_buffer, size))
                        count++;
 
-               /* unmap buffer on Tx side */
-               tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
-               ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
+               /* sync Rx buffer for device write */
+               dma_sync_single_for_device(rx_ring->dev,
+                                          rx_buffer->dma,
+                                          bufsz,
+                                          DMA_FROM_DEVICE);
 
                /* increment Rx/Tx next to clean counters */
                rx_ntc++;
@@ -1865,8 +1906,15 @@ out:
        return *data;
 }
 
+#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
+static int ixgbe_diag_test_count(struct net_device *netdev)
+{
+       return IXGBE_TEST_LEN;
+}
+
+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
 static void ixgbe_diag_test(struct net_device *netdev,
-                            struct ethtool_test *eth_test, u64 *data)
+                           struct ethtool_test *eth_test, u64 *data)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        bool if_running = netif_running(netdev);
@@ -1886,10 +1934,10 @@ static void ixgbe_diag_test(struct net_device *netdev,
                        int i;
                        for (i = 0; i < adapter->num_vfs; i++) {
                                if (adapter->vfinfo[i].clear_to_send) {
-                                       netdev_warn(netdev, "%s",
-                                                   "offline diagnostic is not "
-                                                   "supported when VFs are "
-                                                   "present\n");
+                                       e_warn(drv, "Please take active VFS "
+                                              "offline and restart the "
+                                              "adapter before running NIC "
+                                              "diagnostics\n");
                                        data[0] = 1;
                                        data[1] = 1;
                                        data[2] = 1;
@@ -1926,8 +1974,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
                 * loopback diagnostic. */
                if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
                                      IXGBE_FLAG_VMDQ_ENABLED)) {
-                       e_info(hw, "Skip MAC loopback diagnostic in VT "
-                              "mode\n");
+                       e_info(hw, "skip MAC loopback diagnostic in VT mode\n");
                        data[3] = 0;
                        goto skip_loopback;
                }
@@ -1962,56 +2009,15 @@ skip_ol_tests:
 }
 
 static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
-                               struct ethtool_wolinfo *wol)
+                              struct ethtool_wolinfo *wol)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       int retval = 1;
-       u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
-
-       /* WOL not supported except for the following */
-       switch(hw->device_id) {
-       case IXGBE_DEV_ID_82599_SFP:
-               /* Only these subdevices could supports WOL */
-               switch (hw->subsystem_device_id) {
-               case IXGBE_SUBDEV_ID_82599_560FLR:
-                       /* only support first port */
-                       if (hw->bus.func != 0) {
-                               wol->supported = 0;
-                               break;
-                       }
-               case IXGBE_SUBDEV_ID_82599_SFP:
-                       retval = 0;
-                       break;
-               default:
-                       wol->supported = 0;
-                       break;
-               }
-               break;
-       case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
-               /* All except this subdevice support WOL */
-               if (hw->subsystem_device_id ==
-                   IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
-                       wol->supported = 0;
-                       break;
-               }
-               retval = 0;
-               break;
-       case IXGBE_DEV_ID_82599_KX4:
-               retval = 0;
-               break;
-       case IXGBE_DEV_ID_X540T:
-               /* check eeprom to see if enabled wol */
-               if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
-                   ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
-                    (hw->bus.func == 0))) {
-                       retval = 0;
-                       break;
-               }
+       int retval = 0;
 
-               /* All others not supported */
-               wol->supported = 0;
-               break;
-       default:
+       /* WOL not supported for all devices */
+       if (!ixgbe_wol_supported(adapter, hw->device_id,
+                                hw->subsystem_device_id)) {
+               retval = 1;
                wol->supported = 0;
        }
 
@@ -2019,12 +2025,12 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
 }
 
 static void ixgbe_get_wol(struct net_device *netdev,
-                          struct ethtool_wolinfo *wol)
+                         struct ethtool_wolinfo *wol)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
        wol->supported = WAKE_UCAST | WAKE_MCAST |
-                        WAKE_BCAST | WAKE_MAGIC;
+                        WAKE_BCAST | WAKE_MAGIC;
        wol->wolopts = 0;
 
        if (ixgbe_wol_exclusion(adapter, wol) ||
@@ -2077,6 +2083,7 @@ static int ixgbe_nway_reset(struct net_device *netdev)
        return 0;
 }
 
+#ifdef HAVE_ETHTOOL_SET_PHYS_ID
 static int ixgbe_set_phys_id(struct net_device *netdev,
                             enum ethtool_phys_id_state state)
 {
@@ -2104,12 +2111,37 @@ static int ixgbe_set_phys_id(struct net_device *netdev,
 
        return 0;
 }
+#else
+static int ixgbe_phys_id(struct net_device *netdev, u32 data)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+       u32 i;
+
+       if (!data || data > 300)
+               data = 300;
+
+       for (i = 0; i < (data * 1000); i += 400) {
+               ixgbe_led_on(hw, IXGBE_LED_ON);
+               msleep_interruptible(200);
+               ixgbe_led_off(hw, IXGBE_LED_ON);
+               msleep_interruptible(200);
+       }
+
+       /* Restore LED settings */
+       IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+
+       return 0;
+}
+#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
 
 static int ixgbe_get_coalesce(struct net_device *netdev,
-                              struct ethtool_coalesce *ec)
+                             struct ethtool_coalesce *ec)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
+       ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
        /* only valid if in constant ITR mode */
        if (adapter->rx_itr_setting <= 1)
                ec->rx_coalesce_usecs = adapter->rx_itr_setting;
@@ -2154,19 +2186,23 @@ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
        /* if interrupt rate is too high then disable RSC */
        } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
                adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
+#ifdef IXGBE_NO_LRO
                e_info(probe, "rx-usecs set too low, disabling RSC\n");
+#else
+               e_info(probe, "rx-usecs set too low, "
+                             "falling back to software LRO\n");
+#endif
                return true;
        }
        return false;
 }
 
 static int ixgbe_set_coalesce(struct net_device *netdev,
-                              struct ethtool_coalesce *ec)
+                             struct ethtool_coalesce *ec)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_q_vector *q_vector;
        int i;
-       int num_vectors;
        u16 tx_itr_param, rx_itr_param;
        bool need_reset = false;
 
@@ -2175,6 +2211,9 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
            && ec->tx_coalesce_usecs)
                return -EINVAL;
 
+       if (ec->tx_max_coalesced_frames_irq)
+               adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq;
+
        if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
            (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
                return -EINVAL;
@@ -2202,13 +2241,10 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
        /* check the old value and enable RSC if necessary */
        need_reset = ixgbe_update_rsc(adapter);
 
-       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
-               num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-       else
-               num_vectors = 1;
-
-       for (i = 0; i < num_vectors; i++) {
+       for (i = 0; i < adapter->num_q_vectors; i++) {
                q_vector = adapter->q_vector[i];
+               q_vector->tx.work_limit = adapter->tx_work_limit;
+               q_vector->rx.work_limit = adapter->rx_work_limit;
                if (q_vector->tx.count && !q_vector->rx.count)
                        /* tx only */
                        q_vector->itr = tx_itr_param;
@@ -2229,6 +2265,227 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
        return 0;
 }
 
+#ifndef HAVE_NDO_SET_FEATURES
+static u32 ixgbe_get_rx_csum(struct net_device *netdev)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_ring *ring = adapter->rx_ring[0];
+       return test_bit(__IXGBE_RX_CSUM_ENABLED, &ring->state);
+}
+
+static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       int i;
+
+       for (i = 0; i < adapter->num_rx_queues; i++) {
+               struct ixgbe_ring *ring = adapter->rx_ring[i];
+               if (data)
+                       set_bit(__IXGBE_RX_CSUM_ENABLED, &ring->state);
+               else
+                       clear_bit(__IXGBE_RX_CSUM_ENABLED, &ring->state);
+       }
+
+       /* LRO and RSC both depend on RX checksum to function */
+       if (!data && (netdev->features & NETIF_F_LRO)) {
+               netdev->features &= ~NETIF_F_LRO;
+
+               if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+                       adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
+                       ixgbe_do_reset(netdev);
+               }
+       }
+
+       return 0;
+}
+
+static u32 ixgbe_get_tx_csum(struct net_device *netdev)
+{
+       return (netdev->features & NETIF_F_IP_CSUM) != 0;
+}
+
+static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       u32 feature_list;
+
+#ifdef NETIF_F_IPV6_CSUM
+       feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+#else
+       feature_list = NETIF_F_IP_CSUM;
+#endif
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               feature_list |= NETIF_F_SCTP_CSUM;
+               break;
+       default:
+               break;
+       }
+       if (data)
+               netdev->features |= feature_list;
+       else
+               netdev->features &= ~feature_list;
+
+       return 0;
+}
+
+#ifdef NETIF_F_TSO
+static int ixgbe_set_tso(struct net_device *netdev, u32 data)
+{
+       if (data) {
+               netdev->features |= NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+               netdev->features |= NETIF_F_TSO6;
+#endif
+       } else {
+#ifndef HAVE_NETDEV_VLAN_FEATURES
+#ifdef NETIF_F_HW_VLAN_TX
+               struct ixgbe_adapter *adapter = netdev_priv(netdev);
+               /* disable TSO on all VLANs if they're present */
+               if (adapter->vlgrp) {
+                       int i;
+                       struct net_device *v_netdev;
+                       for (i = 0; i < VLAN_N_VID; i++) {
+                               v_netdev =
+                                      vlan_group_get_device(adapter->vlgrp, i);
+                               if (v_netdev) {
+                                       v_netdev->features &= ~NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+                                       v_netdev->features &= ~NETIF_F_TSO6;
+#endif
+                                       vlan_group_set_device(adapter->vlgrp, i,
+                                                             v_netdev);
+                               }
+                       }
+               }
+#endif
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
+               netdev->features &= ~NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+               netdev->features &= ~NETIF_F_TSO6;
+#endif
+       }
+       return 0;
+}
+
+#endif /* NETIF_F_TSO */
+#ifdef ETHTOOL_GFLAGS
+static int ixgbe_set_flags(struct net_device *netdev, u32 data)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN;
+       u32 changed = netdev->features ^ data;
+       bool need_reset;
+       int rc;
+
+#ifndef HAVE_VLAN_RX_REGISTER
+       if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
+           !(data & ETH_FLAG_RXVLAN))
+               return -EINVAL;
+
+#endif
+#ifdef IXGBE_NO_LRO
+       if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
+#endif
+               supported_flags |= ETH_FLAG_LRO;
+
+#ifdef ETHTOOL_GRXRINGS
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_X540:
+       case ixgbe_mac_82599EB:
+               supported_flags |= ETH_FLAG_NTUPLE;
+       default:
+               break;
+       }
+
+#endif
+#ifdef NETIF_F_RXHASH
+       supported_flags |= ETH_FLAG_RXHASH;
+
+#endif
+       rc = ethtool_op_set_flags(netdev, data, supported_flags);
+       if (rc)
+               return rc;
+
+#ifndef HAVE_VLAN_RX_REGISTER
+       if (changed & ETH_FLAG_RXVLAN)
+               ixgbe_vlan_mode(netdev, netdev->features);
+
+#endif
+       /* if state changes we need to update adapter->flags and reset */
+       if (!(netdev->features & NETIF_F_LRO)) {
+               if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+                       need_reset = true;
+               adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
+       } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
+                  !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
+               if (adapter->rx_itr_setting == 1 ||
+                   adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
+                       adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+                       need_reset = true;
+               } else if (changed & ETH_FLAG_LRO) {
+#ifdef IXGBE_NO_LRO
+                       e_info(probe, "rx-usecs set too low, "
+                              "disabling RSC\n");
+#else
+                       e_info(probe, "rx-usecs set too low, "
+                              "falling back to software LRO\n");
+#endif
+               }
+       }
+
+#ifdef ETHTOOL_GRXRINGS
+       /*
+        * Check if Flow Director n-tuple support was enabled or disabled.  If
+        * the state changed, we need to reset.
+        */
+       switch (netdev->features & NETIF_F_NTUPLE) {
+       case NETIF_F_NTUPLE:
+               /* turn off ATR, enable perfect filters and reset */
+               if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+                       need_reset = true;
+
+               adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+               adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+               break;
+       default:
+               /* turn off perfect filters, enable ATR and reset */
+               if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+                       need_reset = true;
+
+               adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+
+               /* We cannot enable ATR if VMDq is enabled */
+               if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
+                       break;
+
+               /* We cannot enable ATR if we have 2 or more traffic classes */
+               if (netdev_get_num_tc(netdev) > 1)
+                       break;
+
+               /* We cannot enable ATR if RSS is disabled */
+               if (adapter->ring_feature[RING_F_RSS].limit <= 1)
+                       break;
+
+               /* A sample rate of 0 indicates ATR disabled */
+               if (!adapter->atr_sample_rate)
+                       break;
+
+               adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+               break;
+       }
+
+#endif /* ETHTOOL_GRXRINGS */
+       if (need_reset)
+               ixgbe_do_reset(netdev);
+
+       return 0;
+}
+
+#endif /* ETHTOOL_GFLAGS */
+#endif /* HAVE_NDO_SET_FEATURES */
+#ifdef ETHTOOL_GRXRINGS
 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
                                        struct ethtool_rxnfc *cmd)
 {
@@ -2317,6 +2574,8 @@ static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
                cnt++;
        }
 
+       cmd->rule_cnt = cnt;
+
        return 0;
 }
 
@@ -2325,10 +2584,6 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
 {
        cmd->data = 0;
 
-       /* if RSS is disabled then report no hashing */
-       if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
-               return 0;
-
        /* Report default options for RSS on ixgbe */
        switch (cmd->flow_type) {
        case TCP_V4_FLOW:
@@ -2363,7 +2618,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
 }
 
 static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
                           void *rule_locs)
+#else
+                          u32 *rule_locs)
+#endif
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        int ret = -EOPNOTSUPP;
@@ -2621,6 +2880,19 @@ static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
        return err;
 }
 
+#ifdef ETHTOOL_SRXNTUPLE
+/*
+ * We need to keep this around for kernels 2.6.33 - 2.6.39 in order to avoid
+ * a null pointer dereference as it was assumend if the NETIF_F_NTUPLE flag
+ * was defined that this function was present.
+ */
+static int ixgbe_set_rx_ntuple(struct net_device *dev,
+                              struct ethtool_rx_ntuple *cmd)
+{
+       return -EOPNOTSUPP;
+}
+
+#endif
 #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
                       IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
 static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
@@ -2748,37 +3020,75 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
        return ret;
 }
 
-static const struct ethtool_ops ixgbe_ethtool_ops = {
-       .get_settings           = ixgbe_get_settings,
-       .set_settings           = ixgbe_set_settings,
-       .get_drvinfo            = ixgbe_get_drvinfo,
-       .get_regs_len           = ixgbe_get_regs_len,
-       .get_regs               = ixgbe_get_regs,
-       .get_wol                = ixgbe_get_wol,
-       .set_wol                = ixgbe_set_wol,
-       .nway_reset             = ixgbe_nway_reset,
-       .get_link               = ethtool_op_get_link,
-       .get_eeprom_len         = ixgbe_get_eeprom_len,
-       .get_eeprom             = ixgbe_get_eeprom,
-       .set_eeprom             = ixgbe_set_eeprom,
-       .get_ringparam          = ixgbe_get_ringparam,
-       .set_ringparam          = ixgbe_set_ringparam,
-       .get_pauseparam         = ixgbe_get_pauseparam,
-       .set_pauseparam         = ixgbe_set_pauseparam,
-       .get_msglevel           = ixgbe_get_msglevel,
-       .set_msglevel           = ixgbe_set_msglevel,
-       .self_test              = ixgbe_diag_test,
-       .get_strings            = ixgbe_get_strings,
-       .set_phys_id            = ixgbe_set_phys_id,
-       .get_sset_count         = ixgbe_get_sset_count,
+#endif /* ETHTOOL_GRXRINGS */
+static struct ethtool_ops ixgbe_ethtool_ops = {
+       .get_settings           = ixgbe_get_settings,
+       .set_settings           = ixgbe_set_settings,
+       .get_drvinfo            = ixgbe_get_drvinfo,
+       .get_regs_len           = ixgbe_get_regs_len,
+       .get_regs               = ixgbe_get_regs,
+       .get_wol                = ixgbe_get_wol,
+       .set_wol                = ixgbe_set_wol,
+       .nway_reset             = ixgbe_nway_reset,
+       .get_link               = ethtool_op_get_link,
+       .get_eeprom_len         = ixgbe_get_eeprom_len,
+       .get_eeprom             = ixgbe_get_eeprom,
+       .set_eeprom             = ixgbe_set_eeprom,
+       .get_ringparam          = ixgbe_get_ringparam,
+       .set_ringparam          = ixgbe_set_ringparam,
+       .get_pauseparam         = ixgbe_get_pauseparam,
+       .set_pauseparam         = ixgbe_set_pauseparam,
+       .get_msglevel           = ixgbe_get_msglevel,
+       .set_msglevel           = ixgbe_set_msglevel,
+#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
+       .self_test_count        = ixgbe_diag_test_count,
+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
+       .self_test              = ixgbe_diag_test,
+       .get_strings            = ixgbe_get_strings,
+#ifdef HAVE_ETHTOOL_SET_PHYS_ID
+       .set_phys_id            = ixgbe_set_phys_id,
+#else
+       .phys_id                = ixgbe_phys_id,
+#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
+#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
+       .get_stats_count        = ixgbe_get_stats_count,
+#else /* HAVE_ETHTOOL_GET_SSET_COUNT */
+       .get_sset_count         = ixgbe_get_sset_count,
+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
        .get_ethtool_stats      = ixgbe_get_ethtool_stats,
-       .get_coalesce           = ixgbe_get_coalesce,
-       .set_coalesce           = ixgbe_set_coalesce,
+#ifdef HAVE_ETHTOOL_GET_PERM_ADDR
+       .get_perm_addr          = ethtool_op_get_perm_addr,
+#endif
+       .get_coalesce           = ixgbe_get_coalesce,
+       .set_coalesce           = ixgbe_set_coalesce,
+#ifndef HAVE_NDO_SET_FEATURES
+       .get_rx_csum            = ixgbe_get_rx_csum,
+       .set_rx_csum            = ixgbe_set_rx_csum,
+       .get_tx_csum            = ixgbe_get_tx_csum,
+       .set_tx_csum            = ixgbe_set_tx_csum,
+       .get_sg                 = ethtool_op_get_sg,
+       .set_sg                 = ethtool_op_set_sg,
+#ifdef NETIF_F_TSO
+       .get_tso                = ethtool_op_get_tso,
+       .set_tso                = ixgbe_set_tso,
+#endif
+#ifdef ETHTOOL_GFLAGS
+       .get_flags              = ethtool_op_get_flags,
+       .set_flags              = ixgbe_set_flags,
+#endif
+#endif /* HAVE_NDO_SET_FEATURES */
+#ifdef ETHTOOL_GRXRINGS
        .get_rxnfc              = ixgbe_get_rxnfc,
        .set_rxnfc              = ixgbe_set_rxnfc,
+#ifdef ETHTOOL_SRXNTUPLE
+       .set_rx_ntuple          = ixgbe_set_rx_ntuple,
+#endif
+#endif
 };
 
 void ixgbe_set_ethtool_ops(struct net_device *netdev)
 {
        SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops);
 }
+#endif /* SIOCETHTOOL */
+
index c2da9cce45c2fb630c22f111b0591081dc23a7fd..21ec1ffdd43017f160e0be585b5efc03abe0f01d 100644 (file)
 *******************************************************************************/
 
 #include "ixgbe.h"
+
+
+#ifdef IXGBE_FCOE
+#ifdef CONFIG_DCB
+#include "ixgbe_dcb_82599.h"
+#endif /* CONFIG_DCB */
 #include <linux/if_ether.h>
-#include <linux/gfp.h>
-#include <linux/if_vlan.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
 #include <scsi/fc/fc_fs.h>
@@ -104,10 +108,10 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
                        udelay(100);
        }
        if (ddp->sgl)
-               pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
+               dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc,
                             DMA_FROM_DEVICE);
        if (ddp->pool) {
-               pci_pool_free(ddp->pool, ddp->udl, ddp->udp);
+               dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
                ddp->pool = NULL;
        }
 
@@ -134,6 +138,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
        struct ixgbe_hw *hw;
        struct ixgbe_fcoe *fcoe;
        struct ixgbe_fcoe_ddp *ddp;
+       struct ixgbe_fcoe_ddp_pool *ddp_pool;
        struct scatterlist *sg;
        unsigned int i, j, dmacount;
        unsigned int len;
@@ -144,10 +149,8 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
        unsigned int thislen = 0;
        u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
        dma_addr_t addr = 0;
-       struct pci_pool *pool;
-       unsigned int cpu;
 
-       if (!netdev || !sgl)
+       if (!netdev || !sgl || !sgc)
                return 0;
 
        adapter = netdev_priv(netdev);
@@ -162,35 +165,40 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
                return 0;
 
        fcoe = &adapter->fcoe;
-       if (!fcoe->pool) {
-               e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
-               return 0;
-       }
-
        ddp = &fcoe->ddp[xid];
        if (ddp->sgl) {
                e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
-                     xid, ddp->sgl, ddp->sgc);
+                       xid, ddp->sgl, ddp->sgc);
                return 0;
        }
        ixgbe_fcoe_clear_ddp(ddp);
 
+
+       if (!fcoe->ddp_pool) {
+               e_warn(drv, "No ddp_pool resources allocated\n");
+               return 0;
+       }
+
+       ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
+       if (!ddp_pool->pool) {
+               e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
+               goto out_noddp;
+       }
+
        /* setup dma from scsi command sgl */
-       dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
+       dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
        if (dmacount == 0) {
                e_err(drv, "xid 0x%x DMA map error\n", xid);
-               return 0;
+               goto out_noddp;
        }
 
        /* alloc the udl from per cpu ddp pool */
-       cpu = get_cpu();
-       pool = *per_cpu_ptr(fcoe->pool, cpu);
-       ddp->udl = pci_pool_alloc(pool, GFP_ATOMIC, &ddp->udp);
+       ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
        if (!ddp->udl) {
                e_err(drv, "failed allocated ddp context\n");
                goto out_noddp_unmap;
        }
-       ddp->pool = pool;
+       ddp->pool = ddp_pool->pool;
        ddp->sgl = sgl;
        ddp->sgc = sgc;
 
@@ -201,7 +209,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
                while (len) {
                        /* max number of buffers allowed in one DDP context */
                        if (j >= IXGBE_BUFFCNT_MAX) {
-                               *per_cpu_ptr(fcoe->pcpu_noddp, cpu) += 1;
+                               ddp_pool->noddp++;
                                goto out_noddp_free;
                        }
 
@@ -236,12 +244,13 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
        lastsize = thisoff + thislen;
 
        /*
-        * lastsize can not be buffer len.
+        * lastsize can not be bufflen.
         * If it is then adding another buffer with lastsize = 1.
+        * Since lastsize is 1 there will be no HW access to this buffer.
         */
        if (lastsize == bufflen) {
                if (j >= IXGBE_BUFFCNT_MAX) {
-                       *per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) += 1;
+                       ddp_pool->noddp_ext_buff++;
                        goto out_noddp_free;
                }
 
@@ -293,11 +302,12 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
        return 1;
 
 out_noddp_free:
-       pci_pool_free(pool, ddp->udl, ddp->udp);
+       dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
        ixgbe_fcoe_clear_ddp(ddp);
 
 out_noddp_unmap:
-       pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
+       dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
+out_noddp:
        put_cpu();
        return 0;
 }
@@ -322,6 +332,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
        return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
 }
 
+#ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET
 /**
  * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode
  * @netdev: the corresponding net_device
@@ -338,11 +349,12 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
  * Returns : 1 for success and 0 for no ddp
  */
 int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
-                           struct scatterlist *sgl, unsigned int sgc)
+                         struct scatterlist *sgl, unsigned int sgc)
 {
        return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
 }
 
+#endif /* HAVE_NETDEV_OPS_FCOE_DDP_TARGET */
 /**
  * ixgbe_fcoe_ddp - check ddp status and mark it done
  * @adapter: ixgbe adapter
@@ -351,7 +363,7 @@ int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
  *
  * This checks ddp status.
  *
- * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates
+ * Returns : < 0 indicates an error or not a FCoE ddp, 0 indicates
  * not passing the skb to ULD, > 0 indicates is the length of data
  * being ddped.
  */
@@ -359,11 +371,10 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
                   union ixgbe_adv_rx_desc *rx_desc,
                   struct sk_buff *skb)
 {
-       int rc = -EINVAL;
-       struct ixgbe_fcoe *fcoe;
+       struct ixgbe_fcoe *fcoe = &adapter->fcoe;
        struct ixgbe_fcoe_ddp *ddp;
        struct fc_frame_header *fh;
-       struct fcoe_crc_eof *crc;
+       int rc = -EINVAL;
        __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR);
        __le32 ddp_err;
        u32 fctl;
@@ -374,23 +385,23 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
        else
                skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-       if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
-               fh = (struct fc_frame_header *)(skb->data +
-                       sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr));
-       else
-               fh = (struct fc_frame_header *)(skb->data +
-                       sizeof(struct fcoe_hdr));
+       /* verify header contains at least the FCOE header */
+       BUG_ON(skb_headlen(skb) < FCOE_HEADER_LEN);
+
+       fh = (struct fc_frame_header *)(skb->data + sizeof(struct fcoe_hdr));
+
+       if (skb->protocol == htons(ETH_P_8021Q))
+               fh = (struct fc_frame_header *)((char *)fh + VLAN_HLEN);
 
        fctl = ntoh24(fh->fh_f_ctl);
        if (fctl & FC_FC_EX_CTX)
-               xid =  be16_to_cpu(fh->fh_ox_id);
+               xid =  ntohs(fh->fh_ox_id);
        else
-               xid =  be16_to_cpu(fh->fh_rx_id);
+               xid =  ntohs(fh->fh_rx_id);
 
        if (xid >= IXGBE_FCOE_DDP_MAX)
                goto ddp_out;
 
-       fcoe = &adapter->fcoe;
        ddp = &fcoe->ddp[xid];
        if (!ddp->udl)
                goto ddp_out;
@@ -409,7 +420,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
                break;
        /* unmap the sg list when FCPRSP is received */
        case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
-               pci_unmap_sg(adapter->pdev, ddp->sgl,
+               dma_unmap_sg(&adapter->pdev->dev, ddp->sgl,
                             ddp->sgc, DMA_FROM_DEVICE);
                ddp->err = ddp_err;
                ddp->sgl = NULL;
@@ -437,6 +448,8 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
         */
        if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
            (fctl & FC_FC_END_SEQ)) {
+               struct fcoe_crc_eof *crc;
+               skb_linearize(skb);
                crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
                crc->fcoe_eof = FC_EOF_T;
        }
@@ -448,16 +461,15 @@ ddp_out:
  * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO)
  * @tx_ring: tx desc ring
  * @first: first tx_buffer structure containing skb, tx_flags, and protocol
- * @tx_flags: tx flags
  * @hdr_len: hdr_len to be returned
  *
  * This sets up large send offload for FCoE
  *
- * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error
+ * Returns : 0 indicates success, < 0 for error
  */
 int ixgbe_fso(struct ixgbe_ring *tx_ring,
              struct ixgbe_tx_buffer *first,
-              u32 tx_flags, u8 *hdr_len)
+             u8 *hdr_len)
 {
        struct sk_buff *skb = first->skb;
        struct fc_frame_header *fh;
@@ -466,12 +478,14 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring,
        u32 mss_l4len_idx;
        u8 sof, eof;
 
-       if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
-               dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
-                       skb_shinfo(skb)->gso_type);
+#ifdef NETIF_F_FSO
+       if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type != SKB_GSO_FCOE) {
+               dev_err(tx_ring->dev, "Wrong gso type %d:expecting "
+                       "SKB_GSO_FCOE\n", skb_shinfo(skb)->gso_type);
                return -EINVAL;
        }
 
+#endif
        /* resets the header to point fcoe/fc */
        skb_set_network_header(skb, skb->mac_len);
        skb_set_transport_header(skb, skb->mac_len +
@@ -532,9 +546,18 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring,
        *hdr_len = sizeof(struct fcoe_crc_eof);
 
        /* hdr_len includes fc_hdr if FCoE LSO is enabled */
-       if (skb_is_gso(skb))
-               *hdr_len += (skb_transport_offset(skb) +
-                            sizeof(struct fc_frame_header));
+       if (skb_is_gso(skb)) {
+               *hdr_len += skb_transport_offset(skb) +
+                           sizeof(struct fc_frame_header);
+               /* update gso_segs and bytecount */
+               first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len,
+                                              skb_shinfo(skb)->gso_size);
+               first->bytecount += (first->gso_segs - 1) * *hdr_len;
+               first->tx_flags |= IXGBE_TX_FLAGS_FSO;
+       }
+
+       /* set flag indicating FCOE to ixgbe_tx_map call */
+       first->tx_flags |= IXGBE_TX_FLAGS_FCOE;
 
        /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
        mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
@@ -545,53 +568,46 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring,
                          sizeof(struct fc_frame_header);
        vlan_macip_lens |= (skb_transport_offset(skb) - 4)
                           << IXGBE_ADVTXD_MACLEN_SHIFT;
-       vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+       vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
 
        /* write context desc */
        ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof,
                          IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx);
 
-       return skb_is_gso(skb);
+       return 0;
 }
 
-static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe)
+static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu)
 {
-       unsigned int cpu;
-       struct pci_pool **pool;
+       struct ixgbe_fcoe_ddp_pool *ddp_pool;
 
-       for_each_possible_cpu(cpu) {
-               pool = per_cpu_ptr(fcoe->pool, cpu);
-               if (*pool)
-                       pci_pool_destroy(*pool);
-       }
-       free_percpu(fcoe->pool);
-       fcoe->pool = NULL;
+       ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
+       if (ddp_pool->pool)
+               dma_pool_destroy(ddp_pool->pool);
+       ddp_pool->pool = NULL;
 }
 
-static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
+static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe,
+                                    struct device *dev,
+                                    unsigned int cpu)
 {
-       struct ixgbe_fcoe *fcoe = &adapter->fcoe;
-       unsigned int cpu;
-       struct pci_pool **pool;
+       struct ixgbe_fcoe_ddp_pool *ddp_pool;
+       struct dma_pool *pool;
        char pool_name[32];
 
-       fcoe->pool = alloc_percpu(struct pci_pool *);
-       if (!fcoe->pool)
-               return;
+       snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu);
 
-       /* allocate pci pool for each cpu */
-       for_each_possible_cpu(cpu) {
-               snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu);
-               pool = per_cpu_ptr(fcoe->pool, cpu);
-               *pool = pci_pool_create(pool_name,
-                                       adapter->pdev, IXGBE_FCPTR_MAX,
-                                       IXGBE_FCPTR_ALIGN, PAGE_SIZE);
-               if (!*pool) {
-                       e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
-                       ixgbe_fcoe_ddp_pools_free(fcoe);
-                       return;
-               }
-       }
+       pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX,
+                              IXGBE_FCPTR_ALIGN, PAGE_SIZE);
+       if (!pool)
+               return -ENOMEM;
+
+       ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
+       ddp_pool->pool = pool;
+       ddp_pool->noddp = 0;
+       ddp_pool->noddp_ext_buff = 0;
+
+       return 0;
 }
 
 /**
@@ -604,134 +620,183 @@ static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
  */
 void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
 {
-       int i, fcoe_q, fcoe_i;
+       struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
        struct ixgbe_hw *hw = &adapter->hw;
-       struct ixgbe_fcoe *fcoe = &adapter->fcoe;
-       struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
-       unsigned int cpu;
-
-       if (!fcoe->pool) {
-               spin_lock_init(&fcoe->lock);
-
-               ixgbe_fcoe_ddp_pools_alloc(adapter);
-               if (!fcoe->pool) {
-                       e_err(drv, "failed to alloc percpu fcoe DDP pools\n");
-                       return;
-               }
-
-               /* Extra buffer to be shared by all DDPs for HW work around */
-               fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
-               if (fcoe->extra_ddp_buffer == NULL) {
-                       e_err(drv, "failed to allocated extra DDP buffer\n");
-                       goto out_ddp_pools;
-               }
+       int i, fcoe_q, fcoe_i;
+       u32 etqf;
 
-               fcoe->extra_ddp_buffer_dma =
-                       dma_map_single(&adapter->pdev->dev,
-                                      fcoe->extra_ddp_buffer,
-                                      IXGBE_FCBUFF_MIN,
-                                      DMA_FROM_DEVICE);
-               if (dma_mapping_error(&adapter->pdev->dev,
-                                     fcoe->extra_ddp_buffer_dma)) {
-                       e_err(drv, "failed to map extra DDP buffer\n");
-                       goto out_extra_ddp_buffer;
-               }
+       /* Minimal funcionality for FCoE requires at least CRC offloads */
+       if (!(adapter->netdev->features & NETIF_F_FCOE_CRC))
+               return;
 
-               /* Alloc per cpu mem to count the ddp alloc failure number */
-               fcoe->pcpu_noddp = alloc_percpu(u64);
-               if (!fcoe->pcpu_noddp) {
-                       e_err(drv, "failed to alloc noddp counter\n");
-                       goto out_pcpu_noddp_alloc_fail;
-               }
+       /* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */
+       etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN;
+       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+               etqf |= IXGBE_ETQF_POOL_ENABLE;
+               etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT;
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf);
+       IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
 
-               fcoe->pcpu_noddp_ext_buff = alloc_percpu(u64);
-               if (!fcoe->pcpu_noddp_ext_buff) {
-                       e_err(drv, "failed to alloc noddp extra buff cnt\n");
-                       goto out_pcpu_noddp_extra_buff_alloc_fail;
-               }
+       /* leave remaining registers unconfigued if FCoE is disabled */
+       if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+               return;
 
-               for_each_possible_cpu(cpu) {
-                       *per_cpu_ptr(fcoe->pcpu_noddp, cpu) = 0;
-                       *per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) = 0;
-               }
+       /* Use one or more Rx queues for FCoE by redirection table */
+       for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
+               fcoe_i = fcoe->offset + (i % fcoe->indices);
+               fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
+               fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
+               IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
        }
+       IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
 
-       /* Enable L2 eth type filter for FCoE */
-       IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE),
-                       (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN));
-       /* Enable L2 eth type filter for FIP */
-       IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP),
-                       (ETH_P_FIP | IXGBE_ETQF_FILTER_EN));
-       if (adapter->ring_feature[RING_F_FCOE].indices) {
-               /* Use multiple rx queues for FCoE by redirection table */
-               for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
-                       fcoe_i = f->mask + i % f->indices;
-                       fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
-                       fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
-                       IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
-               }
-               IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
-               IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
-       } else  {
-               /* Use single rx queue for FCoE */
-               fcoe_i = f->mask;
-               fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
-               IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
-               IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
-                               IXGBE_ETQS_QUEUE_EN |
-                               (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
+       /* Enable L2 EtherType filter for FIP */
+       etqf = ETH_P_FIP | IXGBE_ETQF_FILTER_EN;
+       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+               etqf |= IXGBE_ETQF_POOL_ENABLE;
+               etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT;
        }
-       /* send FIP frames to the first FCoE queue */
-       fcoe_i = f->mask;
-       fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
+       IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf);
+
+       /* Send FIP frames to the first FCoE queue */
+       fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx;
        IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
                        IXGBE_ETQS_QUEUE_EN |
                        (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
 
-       IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, IXGBE_FCRXCTRL_FCCRCBO |
+       /* Configure FCoE Rx control */
+       IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
+                       IXGBE_FCRXCTRL_FCOELLI |
+                       IXGBE_FCRXCTRL_FCCRCBO |
                        (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
-       return;
-out_pcpu_noddp_extra_buff_alloc_fail:
-       free_percpu(fcoe->pcpu_noddp);
-out_pcpu_noddp_alloc_fail:
-       dma_unmap_single(&adapter->pdev->dev,
-                        fcoe->extra_ddp_buffer_dma,
-                        IXGBE_FCBUFF_MIN,
-                        DMA_FROM_DEVICE);
-out_extra_ddp_buffer:
-       kfree(fcoe->extra_ddp_buffer);
-out_ddp_pools:
-       ixgbe_fcoe_ddp_pools_free(fcoe);
 }
 
 /**
- * ixgbe_cleanup_fcoe - release all fcoe ddp context resources
+ * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources
  * @adapter : ixgbe adapter
  *
  * Cleans up outstanding ddp context resources
  *
  * Returns : none
  */
-void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
+void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
 {
-       int i;
        struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+       int cpu, i;
 
-       if (!fcoe->pool)
+       /* do nothing if no DDP pools were allocated */
+       if (!fcoe->ddp_pool)
                return;
 
        for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
                ixgbe_fcoe_ddp_put(adapter->netdev, i);
+
+       for_each_possible_cpu(cpu)
+               ixgbe_fcoe_dma_pool_free(fcoe, cpu);
+
        dma_unmap_single(&adapter->pdev->dev,
                         fcoe->extra_ddp_buffer_dma,
                         IXGBE_FCBUFF_MIN,
                         DMA_FROM_DEVICE);
-       free_percpu(fcoe->pcpu_noddp);
-       free_percpu(fcoe->pcpu_noddp_ext_buff);
        kfree(fcoe->extra_ddp_buffer);
-       ixgbe_fcoe_ddp_pools_free(fcoe);
+
+       fcoe->extra_ddp_buffer = NULL;
+       fcoe->extra_ddp_buffer_dma = 0;
+}
+
+/**
+ * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources
+ * @adapter: ixgbe adapter
+ *
+ * Sets up ddp context resouces
+ *
+ * Returns : 0 indicates success or -EINVAL on failure
+ */
+int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+       struct device *dev = &adapter->pdev->dev;
+       void *buffer;
+       dma_addr_t dma;
+       unsigned int cpu;
+
+       /* do nothing if no DDP pools were allocated */
+       if (!fcoe->ddp_pool)
+               return 0;
+
+       /* Extra buffer to be shared by all DDPs for HW work around */
+       buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
+       if (!buffer) {
+               e_err(drv, "failed to allocate extra DDP buffer\n");
+               return -ENOMEM;
+       }
+
+       dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE);
+       if (dma_mapping_error(dev, dma)) {
+               e_err(drv, "failed to map extra DDP buffer\n");
+               kfree(buffer);
+               return -ENOMEM;
+       }
+
+       fcoe->extra_ddp_buffer = buffer;
+       fcoe->extra_ddp_buffer_dma = dma;
+
+       /* allocate pci pool for each cpu */
+       for_each_possible_cpu(cpu) {
+               int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu);
+               if (!err)
+                       continue;
+
+               e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
+               ixgbe_free_fcoe_ddp_resources(adapter);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE
+int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter)
+#else
+static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter)
+#endif
+{
+       struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+
+       if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
+               return -EINVAL;
+
+       fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool);
+
+       if (!fcoe->ddp_pool) {
+               e_err(drv, "failed to allocate percpu DDP resources\n");
+               return -ENOMEM;
+       }
+
+       adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
+
+       return 0;
+}
+
+#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE
+void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter)
+#else
+static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter)
+#endif
+{
+       struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+
+       adapter->netdev->fcoe_ddp_xid = 0;
+
+       if (!fcoe->ddp_pool)
+               return;
+
+       free_percpu(fcoe->ddp_pool);
+       fcoe->ddp_pool = NULL;
 }
 
+#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE
 /**
  * ixgbe_fcoe_enable - turn on FCoE offload feature
  * @netdev: the corresponding netdev
@@ -742,40 +807,37 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
  */
 int ixgbe_fcoe_enable(struct net_device *netdev)
 {
-       int rc = -EINVAL;
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_fcoe *fcoe = &adapter->fcoe;
 
+       atomic_inc(&fcoe->refcnt);
 
        if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
-               goto out_enable;
+               return -EINVAL;
 
-       atomic_inc(&fcoe->refcnt);
        if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
-               goto out_enable;
+               return -EINVAL;
 
        e_info(drv, "Enabling FCoE offload features.\n");
        if (netif_running(netdev))
                netdev->netdev_ops->ndo_stop(netdev);
 
-       ixgbe_clear_interrupt_scheme(adapter);
+       /* Allocate per CPU memory to track DDP pools */
+       ixgbe_fcoe_ddp_enable(adapter);
 
+       /* enable FCoE and notify stack */
        adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
-       adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE;
-       netdev->features |= NETIF_F_FCOE_CRC;
-       netdev->features |= NETIF_F_FSO;
        netdev->features |= NETIF_F_FCOE_MTU;
-       netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
+       netdev_features_change(netdev);
 
+       /* release existing queues and reallocate them */
+       ixgbe_clear_interrupt_scheme(adapter);
        ixgbe_init_interrupt_scheme(adapter);
-       netdev_features_change(netdev);
 
        if (netif_running(netdev))
                netdev->netdev_ops->ndo_open(netdev);
-       rc = 0;
 
-out_enable:
-       return rc;
+       return 0;
 }
 
 /**
@@ -788,43 +850,59 @@ out_enable:
  */
 int ixgbe_fcoe_disable(struct net_device *netdev)
 {
-       int rc = -EINVAL;
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       struct ixgbe_fcoe *fcoe = &adapter->fcoe;
 
-       if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
-               goto out_disable;
+       if (!atomic_dec_and_test(&adapter->fcoe.refcnt))
+               return -EINVAL;
 
        if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
-               goto out_disable;
-
-       if (!atomic_dec_and_test(&fcoe->refcnt))
-               goto out_disable;
+               return -EINVAL;
 
        e_info(drv, "Disabling FCoE offload features.\n");
-       netdev->features &= ~NETIF_F_FCOE_CRC;
-       netdev->features &= ~NETIF_F_FSO;
-       netdev->features &= ~NETIF_F_FCOE_MTU;
-       netdev->fcoe_ddp_xid = 0;
-       netdev_features_change(netdev);
-
        if (netif_running(netdev))
                netdev->netdev_ops->ndo_stop(netdev);
 
-       ixgbe_clear_interrupt_scheme(adapter);
+       /* Free per CPU memory to track DDP pools */
+       ixgbe_fcoe_ddp_disable(adapter);
+
+       /* disable FCoE and notify stack */
        adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
-       adapter->ring_feature[RING_F_FCOE].indices = 0;
-       ixgbe_cleanup_fcoe(adapter);
+       netdev->features &= ~NETIF_F_FCOE_MTU;
+
+       netdev_features_change(netdev);
+
+       /* release existing queues and reallocate them */
+       ixgbe_clear_interrupt_scheme(adapter);
        ixgbe_init_interrupt_scheme(adapter);
 
        if (netif_running(netdev))
                netdev->netdev_ops->ndo_open(netdev);
-       rc = 0;
 
-out_disable:
-       return rc;
+       return 0;
 }
+#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */
 
+#ifdef CONFIG_DCB
+#ifdef HAVE_DCBNL_OPS_GETAPP
+/**
+ * ixgbe_fcoe_getapp - retrieves current user priority bitmap for FCoE
+ * @netdev: the corresponding net_device
+ *
+ * Finds out the corresponding user priority bitmap from the current
+ * traffic class that FCoE belongs to. Returns 0 as the invalid user
+ * priority bitmap to indicate an error.
+ *
+ * Returns : 802.1p user priority bitmap for FCoE
+ */
+u8 ixgbe_fcoe_getapp(struct net_device *netdev)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       return 1 << adapter->fcoe.up;
+}
+
+#endif /* HAVE_DCBNL_OPS_GETAPP */
+#endif /* CONFIG_DCB */
+#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN
 /**
  * ixgbe_fcoe_get_wwn - get world wide name for the node or the port
  * @netdev : ixgbe adapter
@@ -868,3 +946,16 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
        }
        return rc;
 }
+
+#endif /* HAVE_NETDEV_OPS_FCOE_GETWWN */
+/**
+ * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to
+ * @adapter - pointer to the device adapter structure
+ *
+ * Return : TC that FCoE is mapped to
+ */
+u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter)
+{
+       return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up);
+}
+#endif /* IXGBE_FCOE */
index 1dbed17c8107bf1b6eb78f1331957b120dd28815..2c7a00e420276088cce389ddfeb31eb51a377539 100644 (file)
@@ -28,6 +28,8 @@
 #ifndef _IXGBE_FCOE_H
 #define _IXGBE_FCOE_H
 
+#ifdef IXGBE_FCOE
+
 #include <scsi/fc/fc_fs.h>
 #include <scsi/fc/fc_fcoe.h>
 
@@ -37,7 +39,7 @@
 /* ddp user buffer */
 #define IXGBE_BUFFCNT_MAX      256     /* 8 bits bufcnt */
 #define IXGBE_FCPTR_ALIGN      16
-#define IXGBE_FCPTR_MAX        (IXGBE_BUFFCNT_MAX * sizeof(dma_addr_t))
+#define IXGBE_FCPTR_MAX                (IXGBE_BUFFCNT_MAX * sizeof(dma_addr_t))
 #define IXGBE_FCBUFF_4KB       0x0
 #define IXGBE_FCBUFF_8KB       0x1
 #define IXGBE_FCBUFF_16KB      0x2
 #define IXGBE_FCBUFF_MIN       4096    /* 4KB min */
 #define IXGBE_FCOE_DDP_MAX     512     /* 9 bits xid */
 
-/* Default traffic class to use for FCoE */
-#define IXGBE_FCOE_DEFTC       3
+/* Default user priority to use for FCoE */
+#define IXGBE_FCOE_DEFUP       3
 
 /* fcerr */
-#define IXGBE_FCERR_BADCRC       0x00100000
+#define IXGBE_FCERR_BADCRC     0x00100000
+#define IXGBE_FCERR_EOFSOF     0x00200000
+#define IXGBE_FCERR_NOFIRST    0x00300000
+#define IXGBE_FCERR_OOOSEQ     0x00400000
+#define IXGBE_FCERR_NODMA      0x00500000
+#define IXGBE_FCERR_PKTLOST    0x00600000
 
 /* FCoE DDP for target mode */
 #define __IXGBE_FCOE_TARGET    1
@@ -62,22 +69,27 @@ struct ixgbe_fcoe_ddp {
        struct scatterlist *sgl;
        dma_addr_t udp;
        u64 *udl;
-       struct pci_pool *pool;
+       struct dma_pool *pool;
+};
+
+/* per cpu variables */
+struct ixgbe_fcoe_ddp_pool {
+       struct dma_pool *pool;
+       u64 noddp;
+       u64 noddp_ext_buff;
 };
 
 struct ixgbe_fcoe {
-       struct pci_pool **pool;
+       struct ixgbe_fcoe_ddp_pool __percpu *ddp_pool;
        atomic_t refcnt;
        spinlock_t lock;
        struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
-       unsigned char *extra_ddp_buffer;
+       void *extra_ddp_buffer;
        dma_addr_t extra_ddp_buffer_dma;
        unsigned long mode;
-       u64 __percpu *pcpu_noddp;
-       u64 __percpu *pcpu_noddp_ext_buff;
-#ifdef CONFIG_IXGBE_DCB
        u8 up;
-#endif
+       u8 up_set;
 };
+#endif /* IXGBE_FCOE */
 
 #endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ixgbe/ixgbe_lib.c b/drivers/net/ixgbe/ixgbe_lib.c
new file mode 100644 (file)
index 0000000..36a858a
--- /dev/null
@@ -0,0 +1,1269 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2012 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "ixgbe.h"
+#include "ixgbe_sriov.h"
+
+#ifdef HAVE_TX_MQ
+/**
+ * ixgbe_cache_ring_dcb_vmdq - Descriptor ring to register mapping for VMDq
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for VMDq to the assigned rings.  It
+ * will also try to cache the proper offsets if RSS/FCoE are enabled along
+ * with VMDq.
+ *
+ **/
+static bool ixgbe_cache_ring_dcb_vmdq(struct ixgbe_adapter *adapter)
+{
+#ifdef IXGBE_FCOE
+       struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
+#endif /* IXGBE_FCOE */
+       struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
+       int i;
+       u16 reg_idx;
+       u8 tcs = netdev_get_num_tc(adapter->netdev);
+
+       /* verify we have DCB enabled before proceeding */
+       if (tcs <= 1)
+               return false;
+
+       /* verify we have VMDq enabled before proceeding */
+       if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
+               return false;
+
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
+               /*
+                * The bits on the 82598 are reversed compared to the other
+                * adapters.  The DCB bits are the higher order bits and the
+                * lower bits belong to the VMDq pool.  In order to sort
+                * this out we have to swap the bits to get the correct layout
+                */
+               for (i = 0; i < adapter->num_rx_queues; i++) {
+                       u8 reg_idx = ((i >> 3) | (i << 3)) & 0x3F;
+                       adapter->rx_ring[i]->reg_idx = reg_idx;
+               }
+               for (i = 0; i < adapter->num_tx_queues; i++) {
+                       u8 reg_idx = ((i >> 4) | (i << 2)) & 0x1F;
+                       adapter->tx_ring[i]->reg_idx = reg_idx;
+               }
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               /* start at VMDq register offset for SR-IOV enabled setups */
+               reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+               for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
+                       /* If we are greater than indices move to next pool */
+                       if ((reg_idx & ~vmdq->mask) >= tcs)
+                               reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+                       adapter->rx_ring[i]->reg_idx = reg_idx;
+               }
+
+               reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+               for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
+                       /* If we are greater than indices move to next pool */
+                       if ((reg_idx & ~vmdq->mask) >= tcs)
+                               reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+                       adapter->tx_ring[i]->reg_idx = reg_idx;
+               }
+
+               break;
+       default:
+               break;
+       }
+
+#ifdef IXGBE_FCOE
+       /* nothing to do if FCoE is disabled */
+       if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+               return true;
+
+       /* The work is already done if the FCoE ring is shared */
+       if (fcoe->offset < tcs)
+               return true;
+
+       /* The FCoE rings exist separately, we need to move their reg_idx */
+       if (fcoe->indices) {
+               u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
+               u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
+
+               reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
+               for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
+                       reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
+                       adapter->rx_ring[i]->reg_idx = reg_idx;
+                       reg_idx++;
+               }
+
+               reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
+               for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
+                       reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
+                       adapter->tx_ring[i]->reg_idx = reg_idx;
+                       reg_idx++;
+               }
+       }
+
+#endif /* IXGBE_FCOE */
+       return true;
+}
+
+/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
+static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
+                                   unsigned int *tx, unsigned int *rx)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct net_device *dev = adapter->netdev;
+       u8 num_tcs = netdev_get_num_tc(dev);
+
+       *tx = 0;
+       *rx = 0;
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               /* TxQs/TC: 4   RxQs/TC: 8 */
+               *tx = tc << 2; /* 0, 4,  8, 12, 16, 20, 24, 28 */
+               *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               if (num_tcs > 4) {
+                       /*
+                        * TCs    : TC0/1 TC2/3 TC4-7
+                        * TxQs/TC:    32    16     8
+                        * RxQs/TC:    16    16    16
+                        */
+                       *rx = tc << 4;
+                       if (tc < 3)
+                               *tx = tc << 5;          /*   0,  32,  64 */
+                       else if (tc < 5)
+                               *tx = (tc + 2) << 4;    /*  80,  96 */
+                       else
+                               *tx = (tc + 8) << 3;    /* 104, 112, 120 */
+               } else {
+                       /*
+                        * TCs    : TC0 TC1 TC2/3
+                        * TxQs/TC:  64  32    16
+                        * RxQs/TC:  32  32    32
+                        */
+                       *rx = tc << 5;
+                       if (tc < 2)
+                               *tx = tc << 6;          /*  0,  64 */
+                       else
+                               *tx = (tc + 4) << 4;    /* 96, 112 */
+               }
+       default:
+               break;
+       }
+}
+
+/**
+ * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for DCB to the assigned rings.
+ *
+ **/
+static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
+{
+       int tc, offset, rss_i, i;
+       unsigned int tx_idx, rx_idx;
+       struct net_device *dev = adapter->netdev;
+       u8 num_tcs = netdev_get_num_tc(dev);
+
+       if (num_tcs <= 1)
+               return false;
+
+       rss_i = adapter->ring_feature[RING_F_RSS].indices;
+
+       for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
+               ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
+               for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
+                       adapter->tx_ring[offset + i]->reg_idx = tx_idx;
+                       adapter->rx_ring[offset + i]->reg_idx = rx_idx;
+                       adapter->tx_ring[offset + i]->dcb_tc = tc;
+                       adapter->rx_ring[offset + i]->dcb_tc = tc;
+               }
+       }
+
+       return true;
+}
+
+#endif /* HAVE_TX_MQ */
+/**
+ * ixgbe_cache_ring_vmdq - Descriptor ring to register mapping for VMDq
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for VMDq to the assigned rings.  It
+ * will also try to cache the proper offsets if RSS/FCoE are enabled along
+ * with VMDq.
+ *
+ **/
+static bool ixgbe_cache_ring_vmdq(struct ixgbe_adapter *adapter)
+{
+#ifdef IXGBE_FCOE
+       struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
+#endif /* IXGBE_FCOE */
+       struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
+       struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
+       int i;
+       u16 reg_idx;
+
+       /* only proceed if VMDq is enabled */
+       if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
+               return false;
+
+       /* start at VMDq register offset for SR-IOV enabled setups */
+       reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+       for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
+#ifdef IXGBE_FCOE
+               /* Allow first FCoE queue to be mapped as RSS */
+               if (fcoe->offset && (i > fcoe->offset))
+                       break;
+#endif
+               /* If we are greater than indices move to next pool */
+               if ((reg_idx & ~vmdq->mask) >= rss->indices)
+                       reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+               adapter->rx_ring[i]->reg_idx = reg_idx;
+       }
+
+#ifdef IXGBE_FCOE
+       /* FCoE uses a linear block of queues so just assigning 1:1 */
+       for (; i < adapter->num_rx_queues; i++, reg_idx++)
+               adapter->rx_ring[i]->reg_idx = reg_idx;
+
+#endif
+       reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+       for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
+#ifdef IXGBE_FCOE
+               /* Allow first FCoE queue to be mapped as RSS */
+               if (fcoe->offset && (i > fcoe->offset))
+                       break;
+#endif
+               /* If we are greater than indices move to next pool */
+               if ((reg_idx & rss->mask) >= rss->indices)
+                       reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+               adapter->tx_ring[i]->reg_idx = reg_idx;
+       }
+
+#ifdef IXGBE_FCOE
+       /* FCoE uses a linear block of queues so just assigning 1:1 */
+       for (; i < adapter->num_tx_queues; i++, reg_idx++)
+               adapter->tx_ring[i]->reg_idx = reg_idx;
+
+#endif
+
+
+       return true;
+}
+
+/**
+ * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV.
+ *
+ **/
+static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
+{
+       int i;
+
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               adapter->rx_ring[i]->reg_idx = i;
+
+       for (i = 0; i < adapter->num_tx_queues; i++)
+               adapter->tx_ring[i]->reg_idx = i;
+
+       return true;
+}
+
+/**
+ * ixgbe_cache_ring_register - Descriptor ring to register mapping
+ * @adapter: board private structure to initialize
+ *
+ * Once we know the feature-set enabled for the device, we'll cache
+ * the register offset the descriptor ring is assigned to.
+ *
+ * Note, the order the various feature calls is important.  It must start with
+ * the "most" features enabled at the same time, then trickle down to the
+ * least amount of features turned on at once.
+ **/
+static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
+{
+#ifdef HAVE_TX_MQ
+       if (ixgbe_cache_ring_dcb_vmdq(adapter))
+               return;
+
+       if (ixgbe_cache_ring_dcb(adapter))
+               return;
+
+#endif
+       if (ixgbe_cache_ring_vmdq(adapter))
+               return;
+
+       ixgbe_cache_ring_rss(adapter);
+}
+
+#define IXGBE_RSS_16Q_MASK     0xF
+#define IXGBE_RSS_8Q_MASK      0x7
+#define IXGBE_RSS_4Q_MASK      0x3
+#define IXGBE_RSS_2Q_MASK      0x1
+#define IXGBE_RSS_DISABLED_MASK        0x0
+
+#ifdef HAVE_TX_MQ
+/**
+ * ixgbe_set_dcb_vmdq_queues: Allocate queues for VMDq devices w/ DCB
+ * @adapter: board private structure to initialize
+ *
+ * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues
+ * and VM pools where appropriate.  Also assign queues based on DCB
+ * priorities and map accordingly..
+ *
+ **/
+static bool ixgbe_set_dcb_vmdq_queues(struct ixgbe_adapter *adapter)
+{
+       int i;
+       u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
+       u16 vmdq_m = 0;
+       u8 tcs = netdev_get_num_tc(adapter->netdev);
+#ifdef IXGBE_FCOE
+       u16 fcoe_i = 0;
+#endif
+
+       /* verify we have DCB enabled before proceeding */
+       if (tcs <= 1)
+               return false;
+
+       /* verify we have VMDq enabled before proceeding */
+       if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
+               return false;
+
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
+               /* 4 pools w/ 8TC per pool */
+               vmdq_i = min_t(u16, vmdq_i, 4);
+               vmdq_m = 0x7;
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               /* Add starting offset to total pool count */
+               vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
+
+               /* 16 pools w/ 8 TC per pool */
+               if (tcs > 4) {
+                       vmdq_i = min_t(u16, vmdq_i, 16);
+                       vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
+               /* 32 pools w/ 4 TC per pool */
+               } else {
+                       vmdq_i = min_t(u16, vmdq_i, 32);
+                       vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
+               }
+
+#ifdef IXGBE_FCOE
+               /* queues in the remaining pools are available for FCoE */
+               fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
+
+#endif
+               /* remove the starting offset from the pool count */
+               vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
+
+               break;
+       default:
+               /* unknown hardware, only support one pool w/ one queue */
+               vmdq_i = 1;
+               tcs = 1;
+               break;
+       }
+
+       /* save features for later use */
+       adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
+       adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
+
+       /*
+        * We do not support DCB, VMDq, and RSS all simultaneously
+        * so we will disable RSS since it is the lowest priority
+        */
+       adapter->ring_feature[RING_F_RSS].indices = 1;
+       adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
+
+       adapter->num_rx_pools = vmdq_i;
+       adapter->num_rx_queues_per_pool = tcs;
+
+       adapter->num_tx_queues = vmdq_i * tcs;
+       adapter->num_rx_queues = vmdq_i * tcs;
+
+       /* disable ATR as it is not supported when VMDq is enabled */
+       adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
+#ifdef IXGBE_FCOE
+       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+               struct ixgbe_ring_feature *fcoe;
+
+               fcoe = &adapter->ring_feature[RING_F_FCOE];
+
+               /* limit ourselves based on feature limits */
+               fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
+               fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
+
+               if (fcoe_i) {
+                       /* alloc queues for FCoE separately */
+                       fcoe->indices = fcoe_i;
+                       fcoe->offset = vmdq_i * tcs;
+
+                       /* add queues to adapter */
+                       adapter->num_tx_queues += fcoe_i;
+                       adapter->num_rx_queues += fcoe_i;
+               } else if (tcs > 1) {
+                       /* use queue belonging to FcoE TC */
+                       fcoe->indices = 1;
+                       fcoe->offset = ixgbe_fcoe_get_tc(adapter);
+               } else {
+                       adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
+
+                       fcoe->indices = 0;
+                       fcoe->offset = 0;
+               }
+       }
+
+#endif /* IXGBE_FCOE */
+       /* configure TC to queue mapping */
+       for (i = 0; i < tcs; i++)
+               netdev_set_tc_queue(adapter->netdev, i, 1, i);
+
+       return true;
+}
+
+/**
+ * ixgbe_set_dcb_queues: Allocate queues for a DCB-enabled device
+ * @adapter: board private structure to initialize
+ *
+ * When DCB (Data Center Bridging) is enabled, allocate queues for
+ * each traffic class.  If multiqueue isn't available,then abort DCB
+ * initialization.
+ *
+ * This function handles all combinations of DCB, RSS, and FCoE.
+ *
+ **/
+static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
+{
+       struct net_device *dev = adapter->netdev;
+       struct ixgbe_ring_feature *f;
+       int rss_i, rss_m, i;
+       int tcs;
+
+       /* Map queue offset and counts onto allocated tx queues */
+       tcs = netdev_get_num_tc(dev);
+
+       if (tcs <= 1)
+               return false;
+
+       /* determine the upper limit for our current DCB mode */
+       rss_i = dev->num_tx_queues / tcs;
+       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+               /* 8 TC w/ 4 queues per TC */
+               rss_i = min_t(u16, rss_i, 4);
+               rss_m = IXGBE_RSS_4Q_MASK;
+       } else if (tcs > 4) {
+               /* 8 TC w/ 8 queues per TC */
+               rss_i = min_t(u16, rss_i, 8);
+               rss_m = IXGBE_RSS_8Q_MASK;
+       } else {
+               /* 4 TC w/ 16 queues per TC */
+               rss_i = min_t(u16, rss_i, 16);
+               rss_m = IXGBE_RSS_16Q_MASK;
+       }
+
+       /* set RSS mask and indices */
+       f = &adapter->ring_feature[RING_F_RSS];
+       rss_i = min_t(u16, rss_i, f->limit);
+       f->indices = rss_i;
+       f->mask = rss_m;
+
+       /* disable ATR as it is not supported when DCB is enabled */
+       adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
+#ifdef IXGBE_FCOE
+       /*
+        * FCoE enabled queues require special configuration indexed
+        * by feature specific indices and mask. Here we map FCoE
+        * indices onto the DCB queue pairs allowing FCoE to own
+        * configuration later.
+        */
+       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+               u8 tc = ixgbe_fcoe_get_tc(adapter);
+
+               f = &adapter->ring_feature[RING_F_FCOE];
+               f->indices = min_t(u16, rss_i, f->limit);
+               f->offset = rss_i * tc;
+       }
+
+#endif /* IXGBE_FCOE */
+       for (i = 0; i < tcs; i++)
+               netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
+
+       adapter->num_tx_queues = rss_i * tcs;
+       adapter->num_rx_queues = rss_i * tcs;
+
+       return true;
+}
+
+#endif
+/**
+ * ixgbe_set_vmdq_queues: Allocate queues for VMDq devices
+ * @adapter: board private structure to initialize
+ *
+ * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues
+ * and VM pools where appropriate.  If RSS is available, then also try and
+ * enable RSS and map accordingly.
+ *
+ **/
+static bool ixgbe_set_vmdq_queues(struct ixgbe_adapter *adapter)
+{
+       u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
+       u16 vmdq_m = 0;
+       u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
+       u16 rss_m = IXGBE_RSS_DISABLED_MASK;
+#ifdef IXGBE_FCOE
+       u16 fcoe_i = 0;
+#endif
+
+       /* only proceed if VMDq is enabled */
+       if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
+               return false;
+
+
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
+               vmdq_i = min_t(u16, vmdq_i, 16);
+               /* 16 pool mode with 1 queue per pool */
+               if ((vmdq_i > 4) || (rss_i == 1)) {
+                       vmdq_m = 0x0F;
+                       rss_i = 1;
+               /* 4 pool mode with 8 queue per pool */
+               } else {
+                       vmdq_m = 0x18;
+                       rss_m = IXGBE_RSS_8Q_MASK;
+                       rss_i = min_t(u16, rss_i, 8);
+               }
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               /* Add starting offset to total pool count */
+               vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
+
+               /* double check we are limited to maximum pools */
+               vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
+
+               /* 64 pool mode with 2 queues per pool */
+               if ((vmdq_i > 32) || (rss_i < 4)) {
+                       vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
+                       rss_m = IXGBE_RSS_2Q_MASK;
+                       rss_i = min_t(u16, rss_i, 2);
+               /* 32 pool mode with 4 queues per pool */
+               } else {
+                       vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
+                       rss_m = IXGBE_RSS_4Q_MASK;
+                       rss_i = 4;
+               }
+
+#ifdef IXGBE_FCOE
+               /* queues in the remaining pools are available for FCoE */
+               fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
+
+#endif
+               /* remove the starting offset from the pool count */
+               vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
+
+               break;
+       default:
+               /* unknown hardware, support one pool w/ one queue */
+               vmdq_i = 1;
+               rss_i = 1;
+               break;
+       }
+
+       /* save features for later use */
+       adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
+       adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
+
+       /* limit RSS based on user input and save for later use */
+       adapter->ring_feature[RING_F_RSS].indices = rss_i;
+       adapter->ring_feature[RING_F_RSS].mask = rss_m;
+
+       adapter->num_rx_pools = vmdq_i;
+       adapter->num_rx_queues_per_pool = rss_i;
+
+       adapter->num_rx_queues = vmdq_i * rss_i;
+#ifdef HAVE_TX_MQ
+       adapter->num_tx_queues = vmdq_i * rss_i;
+#else
+       adapter->num_tx_queues = vmdq_i;
+#endif /* HAVE_TX_MQ */
+
+       /* disable ATR as it is not supported when VMDq is enabled */
+       adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
+#ifdef IXGBE_FCOE
+       /*
+        * FCoE can use rings from adjacent buffers to allow RSS
+        * like behavior.  To account for this we need to add the
+        * FCoE indices to the total ring count.
+        */
+       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+               struct ixgbe_ring_feature *fcoe;
+
+               fcoe = &adapter->ring_feature[RING_F_FCOE];
+
+               /* limit ourselves based on feature limits */
+               fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
+
+               if (vmdq_i > 1 && fcoe_i) {
+                       /* reserve no more than number of CPUs */
+                       fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
+
+                       /* alloc queues for FCoE separately */
+                       fcoe->indices = fcoe_i;
+                       fcoe->offset = vmdq_i * rss_i;
+               } else {
+                       /* merge FCoE queues with RSS queues */
+                       fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
+
+                       /* limit indices to rss_i if MSI-X is disabled */
+                       if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+                               fcoe_i = rss_i;
+
+                       /* attempt to reserve some queues for just FCoE */
+                       fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
+                       fcoe->offset = fcoe_i - fcoe->indices;
+                       fcoe_i -= rss_i;
+               }
+
+               /* add queues to adapter */
+               adapter->num_tx_queues += fcoe_i;
+               adapter->num_rx_queues += fcoe_i;
+       }
+
+#endif
+       return true;
+}
+
+/**
+ * ixgbe_set_rss_queues: Allocate queues for RSS
+ * @adapter: board private structure to initialize
+ *
+ * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
+ * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
+ *
+ **/
+static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_ring_feature *f;
+       u16 rss_i;
+
+       /* set mask for 16 queue limit of RSS */
+       f = &adapter->ring_feature[RING_F_RSS];
+       rss_i = f->limit;
+
+       f->indices = rss_i;
+       f->mask = IXGBE_RSS_16Q_MASK;
+
+       /* disable ATR by default, it will be configured below */
+       adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
+       /*
+        * Use Flow Director in addition to RSS to ensure the best
+        * distribution of flows across cores, even when an FDIR flow
+        * isn't matched.
+        */
+       if (rss_i > 1 && adapter->atr_sample_rate) {
+               f = &adapter->ring_feature[RING_F_FDIR];
+
+               f->indices = min_t(u16, num_online_cpus(), f->limit);
+               rss_i = max_t(u16, rss_i, f->indices);
+
+               if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+                       adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+       }
+
+#ifdef IXGBE_FCOE
+       /*
+        * FCoE can exist on the same rings as standard network traffic
+        * however it is preferred to avoid that if possible.  In order
+        * to get the best performance we allocate as many FCoE queues
+        * as we can and we place them at the end of the ring array to
+        * avoid sharing queues with standard RSS on systems with 24 or
+        * more CPUs.
+        */
+       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+               u16 fcoe_i;
+
+               f = &adapter->ring_feature[RING_F_FCOE];
+
+               /* merge FCoE queues with RSS queues */
+               fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
+
+               /* limit indices to rss_i if MSI-X is disabled */
+               if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+                       fcoe_i = rss_i;
+
+               /* attempt to reserve some queues for just FCoE */
+               f->indices = min_t(u16, fcoe_i, f->limit);
+               f->offset = fcoe_i - f->indices;
+               rss_i = max_t(u16, fcoe_i, rss_i);
+       }
+
+#endif /* IXGBE_FCOE */
+       adapter->num_rx_queues = rss_i;
+#ifdef HAVE_TX_MQ
+       adapter->num_tx_queues = rss_i;
+#endif
+
+       return true;
+}
+
+/*
+ * ixgbe_set_num_queues: Allocate queues for device, feature dependent
+ * @adapter: board private structure to initialize
+ *
+ * This is the top level queue allocation routine.  The order here is very
+ * important, starting with the "most" number of features turned on at once,
+ * and ending with the smallest set of features.  This way large combinations
+ * can be allocated if they're turned on, and smaller combinations are the
+ * fallthrough conditions.
+ *
+ **/
+static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
+{
+       /* Start with base case */
+       adapter->num_rx_queues = 1;
+       adapter->num_tx_queues = 1;
+       adapter->num_rx_pools = adapter->num_rx_queues;
+       adapter->num_rx_queues_per_pool = 1;
+
+#ifdef HAVE_TX_MQ
+       if (ixgbe_set_dcb_vmdq_queues(adapter))
+               return;
+
+       if (ixgbe_set_dcb_queues(adapter))
+               return;
+
+#endif
+       if (ixgbe_set_vmdq_queues(adapter))
+               return;
+
+       ixgbe_set_rss_queues(adapter);
+}
+
+static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
+                                      int vectors)
+{
+       int err, vector_threshold;
+
+       /*
+        * We'll want at least 2 (vector_threshold):
+        * 1) TxQ[0] + RxQ[0] handler
+        * 2) Other (Link Status Change, etc.)
+        */
+               vector_threshold = MIN_MSIX_COUNT;
+
+       /*
+        * The more we get, the more we will assign to Tx/Rx Cleanup
+        * for the separate queues...where Rx Cleanup >= Tx Cleanup.
+        * Right now, we simply care about how many we'll get; we'll
+        * set them up later while requesting irq's.
+        */
+       while (vectors >= vector_threshold) {
+               err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+                                     vectors);
+               if (!err) /* Success in acquiring all requested vectors. */
+                       break;
+               else if (err < 0)
+                       vectors = 0; /* Nasty failure, quit now */
+               else /* err == number of vectors we should try again with */
+                       vectors = err;
+       }
+
+       if (vectors < vector_threshold) {
+               /* Can't allocate enough MSI-X interrupts?  Oh well.
+                * This just means we'll go with either a single MSI
+                * vector or fall back to legacy interrupts.
+                */
+               e_warn(hw, "Unable to allocate MSI-X interrupts\n");
+               adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
+               kfree(adapter->msix_entries);
+               adapter->msix_entries = NULL;
+       } else {
+               adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
+               /*
+                * Adjust for only the vectors we'll use, which is minimum
+                * of max_q_vectors, or the number of vectors we were allocated.
+                */
+               vectors -= NON_Q_VECTORS;
+               adapter->num_q_vectors = min(vectors, adapter->max_q_vectors);
+       }
+}
+
+static void ixgbe_add_ring(struct ixgbe_ring *ring,
+                          struct ixgbe_ring_container *head)
+{
+       ring->next = head->ring;
+       head->ring = ring;
+       head->count++;
+}
+
+/**
+ * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_count: q_vectors allocated on adapter, used for ring interleaving
+ * @v_idx: index of vector in adapter struct
+ * @txr_count: total number of Tx rings to allocate
+ * @txr_idx: index of first Tx ring to allocate
+ * @rxr_count: total number of Rx rings to allocate
+ * @rxr_idx: index of first Rx ring to allocate
+ *
+ * We allocate one q_vector.  If allocation fails we return -ENOMEM.
+ **/
+static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
+                               unsigned int v_count, unsigned int v_idx,
+                               unsigned int txr_count, unsigned int txr_idx,
+                               unsigned int rxr_count, unsigned int rxr_idx)
+{
+       struct ixgbe_q_vector *q_vector;
+       struct ixgbe_ring *ring;
+       int node = -1;
+#ifdef HAVE_IRQ_AFFINITY_HINT
+       int cpu = -1;
+#endif
+       int ring_count, size;
+
+       ring_count = txr_count + rxr_count;
+       size = sizeof(struct ixgbe_q_vector) +
+              (sizeof(struct ixgbe_ring) * ring_count);
+
+#ifdef HAVE_IRQ_AFFINITY_HINT
+       /* customize cpu for Flow Director mapping */
+       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+               if (cpu_online(v_idx)) {
+                       cpu = v_idx;
+                       node = cpu_to_node(cpu);
+               }
+       }
+
+#endif
+       /* allocate q_vector and rings */
+       q_vector = kzalloc_node(size, GFP_KERNEL, node);
+       if (!q_vector)
+               q_vector = kzalloc(size, GFP_KERNEL);
+       if (!q_vector)
+               return -ENOMEM;
+
+       /* setup affinity mask and node */
+#ifdef HAVE_IRQ_AFFINITY_HINT
+       if (cpu != -1)
+               cpumask_set_cpu(cpu, &q_vector->affinity_mask);
+       else
+               cpumask_copy(&q_vector->affinity_mask, cpu_online_mask);
+#endif
+       q_vector->numa_node = node;
+
+#ifndef IXGBE_NO_LRO
+       /* initialize LRO */
+       __skb_queue_head_init(&q_vector->lrolist.active);
+
+#endif
+       /* initialize NAPI */
+       netif_napi_add(adapter->netdev, &q_vector->napi,
+                      ixgbe_poll, 64);
+
+       /* tie q_vector and adapter together */
+       adapter->q_vector[v_idx] = q_vector;
+       q_vector->adapter = adapter;
+       q_vector->v_idx = v_idx;
+
+       /* initialize work limits */
+       q_vector->tx.work_limit = adapter->tx_work_limit;
+       q_vector->rx.work_limit = adapter->rx_work_limit;
+
+       /* initialize pointer to rings */
+       ring = q_vector->ring;
+
+       while (txr_count) {
+               /* assign generic ring traits */
+               ring->dev = pci_dev_to_dev(adapter->pdev);
+               ring->netdev = adapter->netdev;
+
+               /* configure backlink on ring */
+               ring->q_vector = q_vector;
+
+               /* update q_vector Tx values */
+               ixgbe_add_ring(ring, &q_vector->tx);
+
+               /* apply Tx specific ring traits */
+               ring->count = adapter->tx_ring_count;
+               ring->queue_index = txr_idx;
+
+               /* assign ring to adapter */
+               adapter->tx_ring[txr_idx] = ring;
+
+               /* update count and index */
+               txr_count--;
+               txr_idx += v_count;
+
+               /* push pointer to next ring */
+               ring++;
+       }
+
+       while (rxr_count) {
+               /* assign generic ring traits */
+               ring->dev = pci_dev_to_dev(adapter->pdev);
+               ring->netdev = adapter->netdev;
+
+               /* configure backlink on ring */
+               ring->q_vector = q_vector;
+
+               /* update q_vector Rx values */
+               ixgbe_add_ring(ring, &q_vector->rx);
+
+               /*
+                * 82599 errata, UDP frames with a 0 checksum
+                * can be marked as checksum errors.
+                */
+               if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+                       set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
+
+#ifndef HAVE_NDO_SET_FEATURES
+               /* enable rx csum by default */
+               set_bit(__IXGBE_RX_CSUM_ENABLED, &ring->state);
+
+#endif
+#ifdef IXGBE_FCOE
+               if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+                       struct ixgbe_ring_feature *f;
+                       f = &adapter->ring_feature[RING_F_FCOE];
+
+                       if ((rxr_idx >= f->offset) &&
+                           (rxr_idx < f->offset + f->indices)) {
+                               set_bit(__IXGBE_RX_FCOE, &ring->state);
+                       }
+               }
+
+#endif
+               /* apply Rx specific ring traits */
+               ring->count = adapter->rx_ring_count;
+               ring->queue_index = rxr_idx;
+
+               /* assign ring to adapter */
+               adapter->rx_ring[rxr_idx] = ring;
+
+               /* update count and index */
+               rxr_count--;
+               rxr_idx += v_count;
+
+               /* push pointer to next ring */
+               ring++;
+       }
+
+       return 0;
+}
+
+/**
+ * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
+{
+       struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
+       struct ixgbe_ring *ring;
+
+       ixgbe_for_each_ring(ring, q_vector->tx)
+               adapter->tx_ring[ring->queue_index] = NULL;
+
+       ixgbe_for_each_ring(ring, q_vector->rx)
+               adapter->rx_ring[ring->queue_index] = NULL;
+
+       adapter->q_vector[v_idx] = NULL;
+       netif_napi_del(&q_vector->napi);
+#ifndef IXGBE_NO_LRO
+       __skb_queue_purge(&q_vector->lrolist.active);
+#endif
+       kfree(q_vector);
+}
+
+/**
+ * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt.  If allocation fails we
+ * return -ENOMEM.
+ **/
+static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
+{
+       unsigned int q_vectors = adapter->num_q_vectors;
+       unsigned int rxr_remaining = adapter->num_rx_queues;
+       unsigned int txr_remaining = adapter->num_tx_queues;
+       unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0;
+       int err;
+
+       if (q_vectors >= (rxr_remaining + txr_remaining)) {
+               for (; rxr_remaining; v_idx++) {
+                       err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
+                                                  0, 0, 1, rxr_idx);
+                       if (err)
+                               goto err_out;
+
+                       /* update counts and index */
+                       rxr_remaining--;
+                       rxr_idx++;
+               }
+       }
+
+       for (; v_idx < q_vectors; v_idx++) {
+               int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
+               int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+               err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
+                                          tqpv, txr_idx,
+                                          rqpv, rxr_idx);
+
+               if (err)
+                       goto err_out;
+
+               /* update counts and index */
+               rxr_remaining -= rqpv;
+               txr_remaining -= tqpv;
+               rxr_idx++;
+               txr_idx++;
+       }
+
+       return 0;
+
+err_out:
+       adapter->num_tx_queues = 0;
+       adapter->num_rx_queues = 0;
+       adapter->num_q_vectors = 0;
+
+       while (v_idx--)
+               ixgbe_free_q_vector(adapter, v_idx);
+
+       return -ENOMEM;
+}
+
+/**
+ * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
+{
+       int v_idx = adapter->num_q_vectors;
+
+       adapter->num_tx_queues = 0;
+       adapter->num_rx_queues = 0;
+       adapter->num_q_vectors = 0;
+
+       while (v_idx--)
+               ixgbe_free_q_vector(adapter, v_idx);
+}
+
+static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
+{
+       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+               adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
+               pci_disable_msix(adapter->pdev);
+               kfree(adapter->msix_entries);
+               adapter->msix_entries = NULL;
+       } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
+               adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
+               pci_disable_msi(adapter->pdev);
+       }
+}
+
+/**
+ * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int err = 0;
+       int vector, v_budget;
+
+       if (!(adapter->flags & IXGBE_FLAG_MSIX_CAPABLE))
+               goto try_msi;
+
+       /*
+        * It's easy to be greedy for MSI-X vectors, but it really
+        * doesn't do us much good if we have a lot more vectors
+        * than CPU's.  So let's be conservative and only ask for
+        * (roughly) the same number of vectors as there are CPU's.
+        * the default is to use pairs of vectors
+        */
+       v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
+       v_budget = min_t(int, v_budget, num_online_cpus());
+       v_budget += NON_Q_VECTORS;
+
+       /*
+        * At the same time, hardware can only support a maximum of
+        * hw.mac->max_msix_vectors vectors.  With features
+        * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
+        * descriptor queues supported by our device.  Thus, we cap it off in
+        * those rare cases where the cpu count also exceeds our vector limit.
+        */
+       v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
+
+       /* A failure in MSI-X entry allocation isn't fatal, but it does
+        * mean we disable MSI-X capabilities of the adapter. */
+       adapter->msix_entries = kcalloc(v_budget,
+                                       sizeof(struct msix_entry), GFP_KERNEL);
+       if (adapter->msix_entries) {
+               for (vector = 0; vector < v_budget; vector++)
+                       adapter->msix_entries[vector].entry = vector;
+
+               ixgbe_acquire_msix_vectors(adapter, v_budget);
+
+               if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+                       return;
+       }
+
+try_msi:
+       /* disable DCB if number of TCs exceeds 1 */
+       if (netdev_get_num_tc(adapter->netdev) > 1) {
+               e_err(probe, "num TCs exceeds number of queues - disabling DCB\n");
+               netdev_reset_tc(adapter->netdev);
+
+               if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+                       adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
+
+               adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+               adapter->temp_dcb_cfg.pfc_mode_enable = false;
+               adapter->dcb_cfg.pfc_mode_enable = false;
+       }
+       adapter->dcb_cfg.num_tcs.pg_tcs = 1;
+       adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
+
+       /* disable VMDq */
+       adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
+
+#ifdef CONFIG_PCI_IOV
+       /* disable SR-IOV */
+       ixgbe_disable_sriov(adapter);
+
+#endif /* CONFIG_PCI_IOV */
+       /* disable RSS */
+       adapter->ring_feature[RING_F_RSS].limit = 1;
+
+       ixgbe_set_num_queues(adapter);
+       adapter->num_q_vectors = 1;
+
+       if (!(adapter->flags & IXGBE_FLAG_MSI_CAPABLE))
+               return;
+
+       err = pci_enable_msi(adapter->pdev);
+       if (err) {
+               e_warn(hw, "Unable to allocate MSI interrupt, "
+                      "falling back to legacy.  Error: %d\n", err);
+               return;
+       }
+       adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
+}
+
+/**
+ * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
+ * @adapter: board private structure to initialize
+ *
+ * We determine which interrupt scheme to use based on...
+ * - Kernel support (MSI, MSI-X)
+ *   - which can be user-defined (via MODULE_PARAM)
+ * - Hardware queue count (num_*_queues)
+ *   - defined by miscellaneous hardware support/features (RSS, etc.)
+ **/
+int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
+{
+       int err;
+
+       /* Number of supported queues */
+       ixgbe_set_num_queues(adapter);
+
+       /* Set interrupt mode */
+       ixgbe_set_interrupt_capability(adapter);
+
+       /* Allocate memory for queues */
+       err = ixgbe_alloc_q_vectors(adapter);
+       if (err) {
+               e_err(probe, "Unable to allocate memory for queue vectors\n");
+               ixgbe_reset_interrupt_capability(adapter);
+               return err;
+       }
+
+       ixgbe_cache_ring_register(adapter);
+
+       set_bit(__IXGBE_DOWN, &adapter->state);
+
+       return 0;
+}
+
+/**
+ * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @adapter: board private structure to clear interrupt scheme on
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
+{
+       ixgbe_free_q_vectors(adapter);
+       ixgbe_reset_interrupt_capability(adapter);
+}
+
+void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
+                      u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
+{
+       struct ixgbe_adv_tx_context_desc *context_desc;
+       u16 i = tx_ring->next_to_use;
+
+       context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
+
+       i++;
+       tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+       /* set bits to identify this as an advanced context descriptor */
+       type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
+
+       context_desc->vlan_macip_lens   = cpu_to_le32(vlan_macip_lens);
+       context_desc->seqnum_seed       = cpu_to_le32(fcoe_sof_eof);
+       context_desc->type_tucmd_mlhl   = cpu_to_le32(type_tucmd);
+       context_desc->mss_l4len_idx     = cpu_to_le32(mss_l4len_idx);
+}
+
index b03a8ece97f738356b472e77a0804ab715480361..b04c4cba00fc5ace0ffbc426abafb390ccc7cd15 100644 (file)
 
 *******************************************************************************/
 
+/******************************************************************************
+ Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
+******************************************************************************/
 #include <linux/types.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/netdevice.h>
 #include <linux/vmalloc.h>
+#include <linux/highmem.h>
 #include <linux/string.h>
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/tcp.h>
+#ifdef HAVE_SCTP
 #include <linux/sctp.h>
+#endif
 #include <linux/pkt_sched.h>
 #include <linux/ipv6.h>
-#include <linux/slab.h>
+#ifdef NETIF_F_TSO
 #include <net/checksum.h>
+#ifdef NETIF_F_TSO6
 #include <net/ip6_checksum.h>
+#endif
+#endif
+#ifdef SIOCETHTOOL
 #include <linux/ethtool.h>
-#include <linux/if.h>
-#include <linux/if_vlan.h>
-#include <linux/prefetch.h>
-#include <scsi/fc/fc_fcoe.h>
+#endif
 
 #include "ixgbe.h"
-#include "ixgbe_common.h"
+
+
 #include "ixgbe_dcb_82599.h"
 #include "ixgbe_sriov.h"
 
 char ixgbe_driver_name[] = "ixgbe";
 static const char ixgbe_driver_string[] =
                              "Intel(R) 10 Gigabit PCI Express Network Driver";
+#define DRV_HW_PERF
+
+#define FPGA
+
+#define DRIVERIOV
+
+#define VMDQ_TAG
+
 #define MAJ 3
-#define MIN 6
-#define BUILD 7
-#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
-       __stringify(BUILD) "-k"
+#define MIN 10
+#define BUILD 16
+#define DRV_VERSION    __stringify(MAJ) "." __stringify(MIN) "." \
+                       __stringify(BUILD) DRIVERIOV DRV_HW_PERF FPGA VMDQ_TAG
 const char ixgbe_driver_version[] = DRV_VERSION;
 static const char ixgbe_copyright[] =
                                "Copyright (c) 1999-2012 Intel Corporation.";
 
-static const struct ixgbe_info *ixgbe_info_tbl[] = {
-       [board_82598] = &ixgbe_82598_info,
-       [board_82599] = &ixgbe_82599_info,
-       [board_X540] = &ixgbe_X540_info,
-};
-
 /* ixgbe_pci_tbl - PCI Device ID Table
  *
  * Wildcard entries (PCI_ANY_ID) should come last
@@ -77,62 +87,50 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  *   Class, Class Mask, private data (not used) }
  */
-static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
+DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP)},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP)},
        /* required last entry */
        {0, }
 };
 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
 
-#ifdef CONFIG_IXGBE_DCA
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
 static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
                            void *p);
 static struct notifier_block dca_notifier = {
-       .notifier_call = ixgbe_notify_dca,
-       .next          = NULL,
-       .priority      = 0
+       .notifier_call  = ixgbe_notify_dca,
+       .next           = NULL,
+       .priority       = 0
 };
-#endif
-
-#ifdef CONFIG_PCI_IOV
-static unsigned int max_vfs;
-module_param(max_vfs, uint, 0);
-MODULE_PARM_DESC(max_vfs,
-                "Maximum number of virtual functions to allocate per physical function");
-#endif /* CONFIG_PCI_IOV */
-
-static unsigned int allow_unsupported_sfp;
-module_param(allow_unsupported_sfp, uint, 0);
-MODULE_PARM_DESC(allow_unsupported_sfp,
-                "Allow unsupported and untested SFP+ modules on 82599-based adapters");
 
+#endif
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
 MODULE_LICENSE("GPL");
@@ -151,349 +149,11 @@ static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
 {
        BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
 
-       /* flush memory to make sure state is correct before next watchdog */
+       /* flush memory to make sure state is correct before next watchog */
        smp_mb__before_clear_bit();
        clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
 }
 
-struct ixgbe_reg_info {
-       u32 ofs;
-       char *name;
-};
-
-static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
-
-       /* General Registers */
-       {IXGBE_CTRL, "CTRL"},
-       {IXGBE_STATUS, "STATUS"},
-       {IXGBE_CTRL_EXT, "CTRL_EXT"},
-
-       /* Interrupt Registers */
-       {IXGBE_EICR, "EICR"},
-
-       /* RX Registers */
-       {IXGBE_SRRCTL(0), "SRRCTL"},
-       {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
-       {IXGBE_RDLEN(0), "RDLEN"},
-       {IXGBE_RDH(0), "RDH"},
-       {IXGBE_RDT(0), "RDT"},
-       {IXGBE_RXDCTL(0), "RXDCTL"},
-       {IXGBE_RDBAL(0), "RDBAL"},
-       {IXGBE_RDBAH(0), "RDBAH"},
-
-       /* TX Registers */
-       {IXGBE_TDBAL(0), "TDBAL"},
-       {IXGBE_TDBAH(0), "TDBAH"},
-       {IXGBE_TDLEN(0), "TDLEN"},
-       {IXGBE_TDH(0), "TDH"},
-       {IXGBE_TDT(0), "TDT"},
-       {IXGBE_TXDCTL(0), "TXDCTL"},
-
-       /* List Terminator */
-       {}
-};
-
-
-/*
- * ixgbe_regdump - register printout routine
- */
-static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
-{
-       int i = 0, j = 0;
-       char rname[16];
-       u32 regs[64];
-
-       switch (reginfo->ofs) {
-       case IXGBE_SRRCTL(0):
-               for (i = 0; i < 64; i++)
-                       regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
-               break;
-       case IXGBE_DCA_RXCTRL(0):
-               for (i = 0; i < 64; i++)
-                       regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
-               break;
-       case IXGBE_RDLEN(0):
-               for (i = 0; i < 64; i++)
-                       regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
-               break;
-       case IXGBE_RDH(0):
-               for (i = 0; i < 64; i++)
-                       regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
-               break;
-       case IXGBE_RDT(0):
-               for (i = 0; i < 64; i++)
-                       regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
-               break;
-       case IXGBE_RXDCTL(0):
-               for (i = 0; i < 64; i++)
-                       regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
-               break;
-       case IXGBE_RDBAL(0):
-               for (i = 0; i < 64; i++)
-                       regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
-               break;
-       case IXGBE_RDBAH(0):
-               for (i = 0; i < 64; i++)
-                       regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
-               break;
-       case IXGBE_TDBAL(0):
-               for (i = 0; i < 64; i++)
-                       regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
-               break;
-       case IXGBE_TDBAH(0):
-               for (i = 0; i < 64; i++)
-                       regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
-               break;
-       case IXGBE_TDLEN(0):
-               for (i = 0; i < 64; i++)
-                       regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
-               break;
-       case IXGBE_TDH(0):
-               for (i = 0; i < 64; i++)
-                       regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
-               break;
-       case IXGBE_TDT(0):
-               for (i = 0; i < 64; i++)
-                       regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
-               break;
-       case IXGBE_TXDCTL(0):
-               for (i = 0; i < 64; i++)
-                       regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
-               break;
-       default:
-               pr_info("%-15s %08x\n", reginfo->name,
-                       IXGBE_READ_REG(hw, reginfo->ofs));
-               return;
-       }
-
-       for (i = 0; i < 8; i++) {
-               snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
-               pr_err("%-15s", rname);
-               for (j = 0; j < 8; j++)
-                       pr_cont(" %08x", regs[i*8+j]);
-               pr_cont("\n");
-       }
-
-}
-
-/*
- * ixgbe_dump - Print registers, tx-rings and rx-rings
- */
-static void ixgbe_dump(struct ixgbe_adapter *adapter)
-{
-       struct net_device *netdev = adapter->netdev;
-       struct ixgbe_hw *hw = &adapter->hw;
-       struct ixgbe_reg_info *reginfo;
-       int n = 0;
-       struct ixgbe_ring *tx_ring;
-       struct ixgbe_tx_buffer *tx_buffer_info;
-       union ixgbe_adv_tx_desc *tx_desc;
-       struct my_u0 { u64 a; u64 b; } *u0;
-       struct ixgbe_ring *rx_ring;
-       union ixgbe_adv_rx_desc *rx_desc;
-       struct ixgbe_rx_buffer *rx_buffer_info;
-       u32 staterr;
-       int i = 0;
-
-       if (!netif_msg_hw(adapter))
-               return;
-
-       /* Print netdevice Info */
-       if (netdev) {
-               dev_info(&adapter->pdev->dev, "Net device Info\n");
-               pr_info("Device Name     state            "
-                       "trans_start      last_rx\n");
-               pr_info("%-15s %016lX %016lX %016lX\n",
-                       netdev->name,
-                       netdev->state,
-                       netdev->trans_start,
-                       netdev->last_rx);
-       }
-
-       /* Print Registers */
-       dev_info(&adapter->pdev->dev, "Register Dump\n");
-       pr_info(" Register Name   Value\n");
-       for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
-            reginfo->name; reginfo++) {
-               ixgbe_regdump(hw, reginfo);
-       }
-
-       /* Print TX Ring Summary */
-       if (!netdev || !netif_running(netdev))
-               goto exit;
-
-       dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
-       pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n");
-       for (n = 0; n < adapter->num_tx_queues; n++) {
-               tx_ring = adapter->tx_ring[n];
-               tx_buffer_info =
-                       &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
-               pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
-                          n, tx_ring->next_to_use, tx_ring->next_to_clean,
-                          (u64)tx_buffer_info->dma,
-                          tx_buffer_info->length,
-                          tx_buffer_info->next_to_watch,
-                          (u64)tx_buffer_info->time_stamp);
-       }
-
-       /* Print TX Rings */
-       if (!netif_msg_tx_done(adapter))
-               goto rx_ring_summary;
-
-       dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
-
-       /* Transmit Descriptor Formats
-        *
-        * Advanced Transmit Descriptor
-        *   +--------------------------------------------------------------+
-        * 0 |         Buffer Address [63:0]                                |
-        *   +--------------------------------------------------------------+
-        * 8 |  PAYLEN  | PORTS  | IDX | STA | DCMD  |DTYP |  RSV |  DTALEN |
-        *   +--------------------------------------------------------------+
-        *   63       46 45    40 39 36 35 32 31   24 23 20 19              0
-        */
-
-       for (n = 0; n < adapter->num_tx_queues; n++) {
-               tx_ring = adapter->tx_ring[n];
-               pr_info("------------------------------------\n");
-               pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
-               pr_info("------------------------------------\n");
-               pr_info("T [desc]     [address 63:0  ] "
-                       "[PlPOIdStDDt Ln] [bi->dma       ] "
-                       "leng  ntw timestamp        bi->skb\n");
-
-               for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
-                       tx_desc = IXGBE_TX_DESC(tx_ring, i);
-                       tx_buffer_info = &tx_ring->tx_buffer_info[i];
-                       u0 = (struct my_u0 *)tx_desc;
-                       pr_info("T [0x%03X]    %016llX %016llX %016llX"
-                               " %04X  %p %016llX %p", i,
-                               le64_to_cpu(u0->a),
-                               le64_to_cpu(u0->b),
-                               (u64)tx_buffer_info->dma,
-                               tx_buffer_info->length,
-                               tx_buffer_info->next_to_watch,
-                               (u64)tx_buffer_info->time_stamp,
-                               tx_buffer_info->skb);
-                       if (i == tx_ring->next_to_use &&
-                               i == tx_ring->next_to_clean)
-                               pr_cont(" NTC/U\n");
-                       else if (i == tx_ring->next_to_use)
-                               pr_cont(" NTU\n");
-                       else if (i == tx_ring->next_to_clean)
-                               pr_cont(" NTC\n");
-                       else
-                               pr_cont("\n");
-
-                       if (netif_msg_pktdata(adapter) &&
-                               tx_buffer_info->dma != 0)
-                               print_hex_dump(KERN_INFO, "",
-                                       DUMP_PREFIX_ADDRESS, 16, 1,
-                                       phys_to_virt(tx_buffer_info->dma),
-                                       tx_buffer_info->length, true);
-               }
-       }
-
-       /* Print RX Rings Summary */
-rx_ring_summary:
-       dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
-       pr_info("Queue [NTU] [NTC]\n");
-       for (n = 0; n < adapter->num_rx_queues; n++) {
-               rx_ring = adapter->rx_ring[n];
-               pr_info("%5d %5X %5X\n",
-                       n, rx_ring->next_to_use, rx_ring->next_to_clean);
-       }
-
-       /* Print RX Rings */
-       if (!netif_msg_rx_status(adapter))
-               goto exit;
-
-       dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
-
-       /* Advanced Receive Descriptor (Read) Format
-        *    63                                           1        0
-        *    +-----------------------------------------------------+
-        *  0 |       Packet Buffer Address [63:1]           |A0/NSE|
-        *    +----------------------------------------------+------+
-        *  8 |       Header Buffer Address [63:1]           |  DD  |
-        *    +-----------------------------------------------------+
-        *
-        *
-        * Advanced Receive Descriptor (Write-Back) Format
-        *
-        *   63       48 47    32 31  30      21 20 16 15   4 3     0
-        *   +------------------------------------------------------+
-        * 0 | Packet     IP     |SPH| HDR_LEN   | RSV|Packet|  RSS |
-        *   | Checksum   Ident  |   |           |    | Type | Type |
-        *   +------------------------------------------------------+
-        * 8 | VLAN Tag | Length | Extended Error | Extended Status |
-        *   +------------------------------------------------------+
-        *   63       48 47    32 31            20 19               0
-        */
-       for (n = 0; n < adapter->num_rx_queues; n++) {
-               rx_ring = adapter->rx_ring[n];
-               pr_info("------------------------------------\n");
-               pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
-               pr_info("------------------------------------\n");
-               pr_info("R  [desc]      [ PktBuf     A0] "
-                       "[  HeadBuf   DD] [bi->dma       ] [bi->skb] "
-                       "<-- Adv Rx Read format\n");
-               pr_info("RWB[desc]      [PcsmIpSHl PtRs] "
-                       "[vl er S cks ln] ---------------- [bi->skb] "
-                       "<-- Adv Rx Write-Back format\n");
-
-               for (i = 0; i < rx_ring->count; i++) {
-                       rx_buffer_info = &rx_ring->rx_buffer_info[i];
-                       rx_desc = IXGBE_RX_DESC(rx_ring, i);
-                       u0 = (struct my_u0 *)rx_desc;
-                       staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
-                       if (staterr & IXGBE_RXD_STAT_DD) {
-                               /* Descriptor Done */
-                               pr_info("RWB[0x%03X]     %016llX "
-                                       "%016llX ---------------- %p", i,
-                                       le64_to_cpu(u0->a),
-                                       le64_to_cpu(u0->b),
-                                       rx_buffer_info->skb);
-                       } else {
-                               pr_info("R  [0x%03X]     %016llX "
-                                       "%016llX %016llX %p", i,
-                                       le64_to_cpu(u0->a),
-                                       le64_to_cpu(u0->b),
-                                       (u64)rx_buffer_info->dma,
-                                       rx_buffer_info->skb);
-
-                               if (netif_msg_pktdata(adapter)) {
-                                       print_hex_dump(KERN_INFO, "",
-                                          DUMP_PREFIX_ADDRESS, 16, 1,
-                                          phys_to_virt(rx_buffer_info->dma),
-                                          rx_ring->rx_buf_len, true);
-
-                                       if (rx_ring->rx_buf_len
-                                               < IXGBE_RXBUFFER_2K)
-                                               print_hex_dump(KERN_INFO, "",
-                                                 DUMP_PREFIX_ADDRESS, 16, 1,
-                                                 phys_to_virt(
-                                                   rx_buffer_info->page_dma +
-                                                   rx_buffer_info->page_offset
-                                                 ),
-                                                 PAGE_SIZE/2, true);
-                               }
-                       }
-
-                       if (i == rx_ring->next_to_use)
-                               pr_cont(" NTU\n");
-                       else if (i == rx_ring->next_to_clean)
-                               pr_cont(" NTC\n");
-                       else
-                               pr_cont("\n");
-
-               }
-       }
-
-exit:
-       return;
-}
-
 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
 {
        u32 ctrl_ext;
@@ -586,63 +246,74 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
        }
 }
 
-static inline void ixgbe_unmap_tx_resource(struct ixgbe_ring *ring,
-                                          struct ixgbe_tx_buffer *tx_buffer)
+void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring,
+                                     struct ixgbe_tx_buffer *tx_buffer)
 {
-       if (tx_buffer->dma) {
-               if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_MAPPED_AS_PAGE)
-                       dma_unmap_page(ring->dev,
-                                      tx_buffer->dma,
-                                      tx_buffer->length,
-                                      DMA_TO_DEVICE);
-               else
+       if (tx_buffer->skb) {
+               dev_kfree_skb_any(tx_buffer->skb);
+               if (dma_unmap_len(tx_buffer, len))
                        dma_unmap_single(ring->dev,
-                                        tx_buffer->dma,
-                                        tx_buffer->length,
-                                        DMA_TO_DEVICE);
-       }
-       tx_buffer->dma = 0;
+                                        dma_unmap_addr(tx_buffer, dma),
+                                        dma_unmap_len(tx_buffer, len),
+                                        DMA_TO_DEVICE);
+       } else if (dma_unmap_len(tx_buffer, len)) {
+               dma_unmap_page(ring->dev,
+                              dma_unmap_addr(tx_buffer, dma),
+                              dma_unmap_len(tx_buffer, len),
+                              DMA_TO_DEVICE);
+       }
+       tx_buffer->next_to_watch = NULL;
+       tx_buffer->skb = NULL;
+       dma_unmap_len_set(tx_buffer, len, 0);
+       /* tx_buffer_info must be completely set up in the transmit path */
 }
 
-void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
-                                     struct ixgbe_tx_buffer *tx_buffer_info)
+static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
 {
-       ixgbe_unmap_tx_resource(tx_ring, tx_buffer_info);
-       if (tx_buffer_info->skb)
-               dev_kfree_skb_any(tx_buffer_info->skb);
-       tx_buffer_info->skb = NULL;
-       /* tx_buffer_info must be completely set up in the transmit path */
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct ixgbe_hw_stats *hwstats = &adapter->stats;
+       int i;
+       u32 data;
+
+       if ((hw->fc.current_mode != ixgbe_fc_full) &&
+           (hw->fc.current_mode != ixgbe_fc_rx_pause))
+               return;
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+               break;
+       default:
+               data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+       }
+       hwstats->lxoffrxc += data;
+
+       /* refill credits (no tx hang) if we received xoff */
+       if (!data)
+               return;
+
+       for (i = 0; i < adapter->num_tx_queues; i++)
+               clear_bit(__IXGBE_HANG_CHECK_ARMED,
+                         &adapter->tx_ring[i]->state);
 }
 
 static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        struct ixgbe_hw_stats *hwstats = &adapter->stats;
-       u32 data = 0;
        u32 xoff[8] = {0};
        int i;
+       bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
 
-       if ((hw->fc.current_mode == ixgbe_fc_full) ||
-           (hw->fc.current_mode == ixgbe_fc_rx_pause)) {
-               switch (hw->mac.type) {
-               case ixgbe_mac_82598EB:
-                       data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
-                       break;
-               default:
-                       data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
-               }
-               hwstats->lxoffrxc += data;
-
-               /* refill credits (no tx hang) if we received xoff */
-               if (!data)
-                       return;
+#ifdef HAVE_DCBNL_IEEE
+       if (adapter->ixgbe_ieee_pfc)
+               pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
 
-               for (i = 0; i < adapter->num_tx_queues; i++)
-                       clear_bit(__IXGBE_HANG_CHECK_ARMED,
-                                 &adapter->tx_ring[i]->state);
-               return;
-       } else if (!(adapter->dcb_cfg.pfc_mode_enable))
+#endif
+       if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
+               ixgbe_update_xoff_rx_lfc(adapter);
                return;
+       }
 
        /* update stats for each tc, only valid with PFC enabled */
        for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
@@ -661,7 +332,7 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
                struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
                u8 tc = tx_ring->dcb_tc;
 
-               if (xoff[tc])
+               if ((tc <= 7) && (xoff[tc]))
                        clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
        }
 }
@@ -673,20 +344,16 @@ static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
 
 static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
 {
-       struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
+       struct ixgbe_adapter *adapter = ring->q_vector->adapter;
        struct ixgbe_hw *hw = &adapter->hw;
 
        u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
        u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
 
-       if (head != tail)
-               return (head < tail) ?
-                       tail - head : (tail + ring->count - head);
-
-       return 0;
+       return ((head <= tail) ? tail : tail + ring->count) - head;
 }
 
-static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
+static bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
 {
        u32 tx_done = ixgbe_get_tx_completed(tx_ring);
        u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
@@ -703,7 +370,7 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
         * bit is cleared if a pause frame is received to remove
         * false hang detection due to PFC or 802.3x frames. By
         * requiring this to fail twice we avoid races with
-        * pfc clearing the ARMED bit and conditions where we
+        * PFC clearing the ARMED bit and conditions where we
         * run the check_tx_hang logic with a transmit completion
         * pending but without time to complete it yet.
         */
@@ -735,6 +402,35 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
        }
 }
 
+/**
+ * ixgbe_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void ixgbe_tx_timeout(struct net_device *netdev)
+{
+struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       bool real_tx_hang = false;
+       int i;
+
+#define TX_TIMEO_LIMIT 16000
+       for (i = 0; i < adapter->num_tx_queues; i++) {
+               struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+               if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring))
+                       real_tx_hang = true;
+       }
+
+       if (real_tx_hang) {
+               ixgbe_tx_timeout_reset(adapter);
+       } else {
+               e_info(drv, "Fake Tx hang detected with timeout of %d "
+                       "seconds\n", netdev->watchdog_timeo/HZ);
+
+               /* fake Tx hang - increase the kernel timeout */
+               if (netdev->watchdog_timeo < TX_TIMEO_LIMIT)
+                       netdev->watchdog_timeo *= 2;
+       }
+}
+
 /**
  * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
  * @q_vector: structure containing interrupt and ring information
@@ -748,12 +444,16 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
        union ixgbe_adv_tx_desc *tx_desc;
        unsigned int total_bytes = 0, total_packets = 0;
        unsigned int budget = q_vector->tx.work_limit;
-       u16 i = tx_ring->next_to_clean;
+       unsigned int i = tx_ring->next_to_clean;
+
+       if (test_bit(__IXGBE_DOWN, &adapter->state))
+               return true;
 
        tx_buffer = &tx_ring->tx_buffer_info[i];
        tx_desc = IXGBE_TX_DESC(tx_ring, i);
+       i -= tx_ring->count;
 
-       for (; budget; budget--) {
+       do {
                union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
 
                /* if next_to_watch is not set then there is no work pending */
@@ -770,67 +470,92 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                /* clear next_to_watch to prevent false hangs */
                tx_buffer->next_to_watch = NULL;
 
+               /* update the statistics for this packet */
+               total_bytes += tx_buffer->bytecount;
+               total_packets += tx_buffer->gso_segs;
+
                /* free the skb */
                dev_kfree_skb_any(tx_buffer->skb);
 
+               /* unmap skb header data */
+               dma_unmap_single(tx_ring->dev,
+                                dma_unmap_addr(tx_buffer, dma),
+                                dma_unmap_len(tx_buffer, len),
+                                DMA_TO_DEVICE);
+
                /* clear tx_buffer data */
                tx_buffer->skb = NULL;
+               dma_unmap_len_set(tx_buffer, len, 0);
 
-               do {
-                       ixgbe_unmap_tx_resource(tx_ring, tx_buffer);
-                       if (likely(tx_desc == eop_desc)) {
-                               eop_desc = NULL;
-
-                               total_bytes += tx_buffer->bytecount;
-                               total_packets += tx_buffer->gso_segs;
-                       }
-
+               /* unmap remaining buffers */
+               while (tx_desc != eop_desc) {
                        tx_buffer++;
                        tx_desc++;
                        i++;
-                       if (unlikely(i == tx_ring->count)) {
-                               i = 0;
-
+                       if (unlikely(!i)) {
+                               i -= tx_ring->count;
                                tx_buffer = tx_ring->tx_buffer_info;
                                tx_desc = IXGBE_TX_DESC(tx_ring, 0);
                        }
 
-               } while (eop_desc);
-       }
+                       /* unmap any remaining paged data */
+                       if (dma_unmap_len(tx_buffer, len)) {
+                               dma_unmap_page(tx_ring->dev,
+                                              dma_unmap_addr(tx_buffer, dma),
+                                              dma_unmap_len(tx_buffer, len),
+                                              DMA_TO_DEVICE);
+                               dma_unmap_len_set(tx_buffer, len, 0);
+                       }
+               }
+
+               /* move us one more past the eop_desc for start of next pkt */
+               tx_buffer++;
+               tx_desc++;
+               i++;
+               if (unlikely(!i)) {
+                       i -= tx_ring->count;
+                       tx_buffer = tx_ring->tx_buffer_info;
+                       tx_desc = IXGBE_TX_DESC(tx_ring, 0);
+               }
 
+               /* issue prefetch for next Tx descriptor */
+               prefetch(tx_desc);
+
+               /* update budget accounting */
+               budget--;
+       } while (likely(budget));
+
+       i += tx_ring->count;
        tx_ring->next_to_clean = i;
-       u64_stats_update_begin(&tx_ring->syncp);
        tx_ring->stats.bytes += total_bytes;
        tx_ring->stats.packets += total_packets;
-       u64_stats_update_end(&tx_ring->syncp);
        q_vector->tx.total_bytes += total_bytes;
        q_vector->tx.total_packets += total_packets;
 
        if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
                /* schedule immediate reset if we believe we hung */
                struct ixgbe_hw *hw = &adapter->hw;
-               tx_desc = IXGBE_TX_DESC(tx_ring, i);
                e_err(drv, "Detected Tx Unit Hang\n"
                        "  Tx Queue             <%d>\n"
                        "  TDH, TDT             <%x>, <%x>\n"
                        "  next_to_use          <%x>\n"
-                       "  next_to_clean        <%x>\n"
-                       "tx_buffer_info[next_to_clean]\n"
-                       "  time_stamp           <%lx>\n"
-                       "  jiffies              <%lx>\n",
+                       "  next_to_clean        <%x>\n",
                        tx_ring->queue_index,
                        IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
                        IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
-                       tx_ring->next_to_use, i,
+                       tx_ring->next_to_use, i);
+               e_err(drv, "tx_buffer_info[next_to_clean]\n"
+                       "  time_stamp           <%lx>\n"
+                       "  jiffies              <%lx>\n",
                        tx_ring->tx_buffer_info[i].time_stamp, jiffies);
 
-               netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+               netif_stop_subqueue(netdev_ring(tx_ring),
+                                   ring_queue_index(tx_ring));
 
                e_info(probe,
                       "tx hang %d detected on queue %d, resetting adapter\n",
-                       adapter->tx_timeout_count + 1, tx_ring->queue_index);
+                      adapter->tx_timeout_count + 1, tx_ring->queue_index);
 
-               /* schedule immediate reset if we believe we hung */
                ixgbe_tx_timeout_reset(adapter);
 
                /* the adapter is about to reset, no point in enabling stuff */
@@ -838,23 +563,33 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
        }
 
 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
-       if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
+       if (unlikely(total_packets && netif_carrier_ok(netdev_ring(tx_ring)) &&
                     (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
                /* Make sure that anybody stopping the queue after this
                 * sees the new next_to_clean.
                 */
                smp_mb();
-               if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) &&
-                   !test_bit(__IXGBE_DOWN, &adapter->state)) {
-                       netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
+#ifdef HAVE_TX_MQ
+               if (__netif_subqueue_stopped(netdev_ring(tx_ring),
+                                            ring_queue_index(tx_ring))
+                   && !test_bit(__IXGBE_DOWN, &q_vector->adapter->state)) {
+                       netif_wake_subqueue(netdev_ring(tx_ring),
+                                           ring_queue_index(tx_ring));
                        ++tx_ring->tx_stats.restart_queue;
                }
+#else
+               if (netif_queue_stopped(netdev_ring(tx_ring)) &&
+                   !test_bit(__IXGBE_DOWN, &q_vector->adapter->state)) {
+                       netif_wake_queue(netdev_ring(tx_ring));
+                       ++tx_ring->tx_stats.restart_queue;
+               }
+#endif
        }
 
        return !!budget;
 }
 
-#ifdef CONFIG_IXGBE_DCA
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
                                struct ixgbe_ring *tx_ring,
                                int cpu)
@@ -941,8 +676,7 @@ out_no_update:
 
 static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
 {
-       int num_q_vectors;
-       int i;
+       int v_idx;
 
        if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
                return;
@@ -950,14 +684,9 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
        /* always use CB2 mode, difference is masked in the CB driver */
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
 
-       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
-               num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-       else
-               num_q_vectors = 1;
-
-       for (i = 0; i < num_q_vectors; i++) {
-               adapter->q_vector[i]->cpu = -1;
-               ixgbe_update_dca(adapter->q_vector[i]);
+       for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
+               adapter->q_vector[v_idx]->cpu = -1;
+               ixgbe_update_dca(adapter->q_vector[v_idx]);
        }
 }
 
@@ -992,33 +721,37 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
        return 0;
 }
 
-#endif /* CONFIG_IXGBE_DCA */
+#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
+#ifdef NETIF_F_RXHASH
 static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
                                 union ixgbe_adv_rx_desc *rx_desc,
                                 struct sk_buff *skb)
 {
-       if (ring->netdev->features & NETIF_F_RXHASH)
+       if (netdev_ring(ring)->features & NETIF_F_RXHASH)
                skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
 }
 
+#endif /* NETIF_F_RXHASH */
+#ifdef IXGBE_FCOE
 /**
  * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
- * @adapter: address of board private structure
+ * @ring: structure containing ring specific data
  * @rx_desc: advanced rx descriptor
  *
  * Returns : true if it is FCoE pkt
  */
-static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter,
+static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
                                    union ixgbe_adv_rx_desc *rx_desc)
 {
        __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
 
-       return (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+       return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
               ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
                (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
                             IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
 }
 
+#endif /* IXGBE_FCOE */
 /**
  * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
  * @ring: structure containing ring specific data
@@ -1032,7 +765,11 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
        skb_checksum_none_assert(skb);
 
        /* Rx csum disabled */
-       if (!(ring->netdev->features & NETIF_F_RXCSUM))
+#ifdef HAVE_NDO_SET_FEATURES
+       if (!(netdev_ring(ring)->features & NETIF_F_RXCSUM))
+#else
+       if (!test_bit(__IXGBE_RX_CSUM_ENABLED, &ring->state))
+#endif
                return;
 
        /* if IP and error */
@@ -1046,7 +783,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
                return;
 
        if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
-               u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+               __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
 
                /*
                 * 82599 errata, UDP frames with a 0 checksum can be marked as
@@ -1067,6 +804,11 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
 static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
 {
        rx_ring->next_to_use = val;
+#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+
+       /* update next to alloc since we have filled the ring */
+       rx_ring->next_to_alloc = val;
+#endif
        /*
         * Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
@@ -1077,29 +819,38 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
        writel(val, rx_ring->tail);
 }
 
+#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
 static bool ixgbe_alloc_mapped_skb(struct ixgbe_ring *rx_ring,
                                   struct ixgbe_rx_buffer *bi)
 {
        struct sk_buff *skb = bi->skb;
        dma_addr_t dma = bi->dma;
 
-       if (dma)
+       if (unlikely(dma))
                return true;
 
        if (likely(!skb)) {
-               skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+               skb = netdev_alloc_skb_ip_align(netdev_ring(rx_ring),
                                                rx_ring->rx_buf_len);
-               bi->skb = skb;
-               if (!skb) {
+               if (unlikely(!skb)) {
                        rx_ring->rx_stats.alloc_rx_buff_failed++;
                        return false;
                }
+
+               bi->skb = skb;
        }
 
        dma = dma_map_single(rx_ring->dev, skb->data,
                             rx_ring->rx_buf_len, DMA_FROM_DEVICE);
 
+       /*
+        * if mapping failed free memory back to system since
+        * there isn't much point in holding memory we can't use
+        */
        if (dma_mapping_error(rx_ring->dev, dma)) {
+               dev_kfree_skb_any(skb);
+               bi->skb = NULL;
+
                rx_ring->rx_stats.alloc_rx_buff_failed++;
                return false;
        }
@@ -1108,39 +859,51 @@ static bool ixgbe_alloc_mapped_skb(struct ixgbe_ring *rx_ring,
        return true;
 }
 
+#else
 static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
                                    struct ixgbe_rx_buffer *bi)
 {
        struct page *page = bi->page;
-       dma_addr_t page_dma = bi->page_dma;
-       unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
+       dma_addr_t dma = bi->dma;
 
-       if (page_dma)
+       /* since we are recycling buffers we should seldom need to alloc */
+       if (likely(dma))
                return true;
 
-       if (!page) {
-               page = alloc_page(GFP_ATOMIC | __GFP_COLD);
-               bi->page = page;
+       /* alloc new page for storage */
+       if (likely(!page)) {
+               page = alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP,
+                                  ixgbe_rx_pg_order(rx_ring));
                if (unlikely(!page)) {
                        rx_ring->rx_stats.alloc_rx_page_failed++;
                        return false;
                }
+               bi->page = page;
        }
 
-       page_dma = dma_map_page(rx_ring->dev, page,
-                               page_offset, PAGE_SIZE / 2,
-                               DMA_FROM_DEVICE);
+       /* map page for use */
+       dma = dma_map_page(rx_ring->dev, page, 0,
+                          ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
+
+       /*
+        * if mapping failed free memory back to system since
+        * there isn't much point in holding memory we can't use
+        */
+       if (dma_mapping_error(rx_ring->dev, dma)) {
+               __free_pages(page, ixgbe_rx_pg_order(rx_ring));
+               bi->page = NULL;
 
-       if (dma_mapping_error(rx_ring->dev, page_dma)) {
                rx_ring->rx_stats.alloc_rx_page_failed++;
                return false;
        }
 
-       bi->page_dma = page_dma;
-       bi->page_offset = page_offset;
+       bi->dma = dma;
+       bi->page_offset ^= ixgbe_rx_bufsz(rx_ring);
+
        return true;
 }
 
+#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */
 /**
  * ixgbe_alloc_rx_buffers - Replace used receive buffers
  * @rx_ring: ring to place buffers on
@@ -1152,31 +915,31 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
        struct ixgbe_rx_buffer *bi;
        u16 i = rx_ring->next_to_use;
 
-       /* nothing to do or no valid netdev defined */
-       if (!cleaned_count || !rx_ring->netdev)
+       /* nothing to do */
+       if (!cleaned_count)
                return;
 
        rx_desc = IXGBE_RX_DESC(rx_ring, i);
        bi = &rx_ring->rx_buffer_info[i];
        i -= rx_ring->count;
 
-       while (cleaned_count--) {
+       do {
+#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
                if (!ixgbe_alloc_mapped_skb(rx_ring, bi))
+#else
+               if (!ixgbe_alloc_mapped_page(rx_ring, bi))
+#endif
                        break;
 
-               /* Refresh the desc even if buffer_addrs didn't change
-                * because each write-back erases this info. */
-
-               if (ring_is_ps_enabled(rx_ring)) {
-                       rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
-
-                       if (!ixgbe_alloc_mapped_page(rx_ring, bi))
-                               break;
-
-                       rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
-               } else {
-                       rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
-               }
+               /*
+                * Refresh the desc even if buffer_addrs didn't change
+                * because each write-back erases this info.
+                */
+#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+               rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+#else
+               rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+#endif
 
                rx_desc++;
                bi++;
@@ -1189,7 +952,9 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
 
                /* clear the hdr_addr for the next_to_use descriptor */
                rx_desc->read.hdr_addr = 0;
-       }
+
+               cleaned_count--;
+       } while (cleaned_count);
 
        i += rx_ring->count;
 
@@ -1197,20 +962,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
                ixgbe_release_rx_desc(rx_ring, i);
 }
 
-static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
-{
-       /* HW will not DMA in data larger than the given buffer, even if it
-        * parses the (NFS, of course) header to be larger.  In that case, it
-        * fills the header buffer and spills the rest into the page.
-        */
-       u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
-       u16 hlen = (hdr_info &  IXGBE_RXDADV_HDRBUFLEN_MASK) >>
-                   IXGBE_RXDADV_HDRBUFLEN_SHIFT;
-       if (hlen > IXGBE_RX_HDR_SIZE)
-               hlen = IXGBE_RX_HDR_SIZE;
-       return hlen;
-}
-
+#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
 /**
  * ixgbe_merge_active_tail - merge active tail into lro skb
  * @tail: pointer to active tail in frag_list
@@ -1228,7 +980,7 @@ static inline struct sk_buff *ixgbe_merge_active_tail(struct sk_buff *tail)
 
        head->len += tail->len;
        head->data_len += tail->len;
-       head->truesize += tail->len;
+       head->truesize += tail->truesize;
 
        IXGBE_CB(tail)->head = NULL;
 
@@ -1281,37 +1033,448 @@ static inline bool ixgbe_close_active_frag_list(struct sk_buff *head)
        return true;
 }
 
+#endif
+#ifdef HAVE_VLAN_RX_REGISTER
 /**
- * ixgbe_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
- * @data: pointer to the start of the headers
- * @max_len: total length of section to find headers in
- *
- * This function is meant to determine the length of headers that will
- * be recognized by hardware for LRO, GRO, and RSC offloads.  The main
- * motivation of doing this is to only perform one pull for IPv4 TCP
- * packets so that we can do basic things like calculating the gso_size
- * based on the average data per packet.
+ * ixgbe_receive_skb - Send a completed packet up the stack
+ * @q_vector: structure containing interrupt and ring information
+ * @skb: packet to send up
  **/
-static unsigned int ixgbe_get_headlen(unsigned char *data,
-                                     unsigned int max_len)
+static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
+                             struct sk_buff *skb)
 {
-       union {
-               unsigned char *network;
-               /* l2 headers */
-               struct ethhdr *eth;
-               struct vlan_hdr *vlan;
-               /* l3 headers */
-               struct iphdr *ipv4;
-       } hdr;
-       __be16 protocol;
-       u8 nexthdr = 0; /* default to not TCP */
-       u8 hlen;
+       struct ixgbe_adapter *adapter = q_vector->adapter;
+       u16 vlan_tag = IXGBE_CB(skb)->vid;
 
-       /* this should never happen, but better safe than sorry */
-       if (max_len < ETH_HLEN)
-               return max_len;
+#ifdef NETIF_F_HW_VLAN_TX
+       if (vlan_tag & VLAN_VID_MASK) {
+               /* by placing vlgrp at start of structure we can alias it */
+               struct vlan_group **vlgrp = netdev_priv(skb->dev);
+               if (!*vlgrp)
+                       dev_kfree_skb_any(skb);
+               else if (adapter->flags & IXGBE_FLAG_IN_NETPOLL)
+                       vlan_hwaccel_rx(skb, *vlgrp, vlan_tag);
+               else
+                       vlan_gro_receive(&q_vector->napi,
+                                        *vlgrp, vlan_tag, skb);
+       } else {
+#endif /* NETIF_F_HW_VLAN_TX */
+               if (adapter->flags & IXGBE_FLAG_IN_NETPOLL)
+                       netif_rx(skb);
+               else
+                       napi_gro_receive(&q_vector->napi, skb);
+#ifdef NETIF_F_HW_VLAN_TX
+       }
+#endif /* NETIF_F_HW_VLAN_TX */
+}
 
-       /* initialize network frame pointer */
+#endif /* HAVE_VLAN_RX_REGISTER */
+#ifndef IXGBE_NO_LRO
+/**
+ * ixgbe_can_lro - returns true if packet is TCP/IPV4 and LRO is enabled
+ * @rx_ring: structure containing ring specific data
+ * @rx_desc: pointer to the rx descriptor
+ * @skb: pointer to the skb to be merged
+ *
+ **/
+static inline bool ixgbe_can_lro(struct ixgbe_ring *rx_ring,
+                                union ixgbe_adv_rx_desc *rx_desc,
+                                struct sk_buff *skb)
+{
+       struct iphdr *iph = (struct iphdr *)skb->data;
+       __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+
+       /* verify hardware indicates this is IPv4/TCP */
+       if (!(pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) ||
+           !(pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_TCP)))
+               return false;
+
+       /* .. and RSC is not already enabled */
+       if (ring_is_rsc_enabled(rx_ring))
+               return false;
+
+       /* .. and LRO is enabled */
+       if (!(netdev_ring(rx_ring)->features & NETIF_F_LRO))
+               return false;
+
+       /* .. and we are not in promiscuous mode */
+       if (netdev_ring(rx_ring)->flags & IFF_PROMISC)
+               return false;
+
+       /* .. and the header is large enough for us to read IP/TCP fields */
+       if (!pskb_may_pull(skb, sizeof(struct ixgbe_lrohdr)))
+               return false;
+
+       /* .. and there are no VLANs on packet */
+       if (skb->protocol != __constant_htons(ETH_P_IP))
+               return false;
+
+       /* .. and we are version 4 with no options */
+       if (*(u8 *)iph != 0x45)
+               return false;
+
+       /* .. and the packet is not fragmented */
+       if (iph->frag_off & htons(IP_MF | IP_OFFSET))
+               return false;
+
+       /* .. and that next header is TCP */
+       if (iph->protocol != IPPROTO_TCP)
+               return false;
+
+       return true;
+}
+
+static inline struct ixgbe_lrohdr *ixgbe_lro_hdr(struct sk_buff *skb)
+{
+       return (struct ixgbe_lrohdr *)skb->data;
+}
+
+/**
+ * ixgbe_lro_flush - Indicate packets to upper layer.
+ *
+ * Update IP and TCP header part of head skb if more than one
+ * skb's chained and indicate packets to upper layer.
+ **/
+static void ixgbe_lro_flush(struct ixgbe_q_vector *q_vector,
+                           struct sk_buff *skb)
+{
+       struct ixgbe_lro_list *lrolist = &q_vector->lrolist;
+
+       __skb_unlink(skb, &lrolist->active);
+
+       if (IXGBE_CB(skb)->append_cnt) {
+               struct ixgbe_lrohdr *lroh = ixgbe_lro_hdr(skb);
+
+#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+               /* close any active lro contexts */
+               ixgbe_close_active_frag_list(skb);
+
+#endif
+               /* incorporate ip header and re-calculate checksum */
+               lroh->iph.tot_len = ntohs(skb->len);
+               lroh->iph.check = 0;
+
+               /* header length is 5 since we know no options exist */
+               lroh->iph.check = ip_fast_csum((u8 *)lroh, 5);
+
+               /* clear TCP checksum to indicate we are an LRO frame */
+               lroh->th.check = 0;
+
+               /* incorporate latest timestamp into the tcp header */
+               if (IXGBE_CB(skb)->tsecr) {
+                       lroh->ts[2] = IXGBE_CB(skb)->tsecr;
+                       lroh->ts[1] = htonl(IXGBE_CB(skb)->tsval);
+               }
+#ifdef NETIF_F_TSO
+
+               skb_shinfo(skb)->gso_size = IXGBE_CB(skb)->mss;
+#endif
+       }
+
+#ifdef HAVE_VLAN_RX_REGISTER
+       ixgbe_receive_skb(q_vector, skb);
+#else
+       napi_gro_receive(&q_vector->napi, skb);
+#endif
+       lrolist->stats.flushed++;
+}
+
+static void ixgbe_lro_flush_all(struct ixgbe_q_vector *q_vector)
+{
+       struct ixgbe_lro_list *lrolist = &q_vector->lrolist;
+       struct sk_buff *skb, *tmp;
+
+       skb_queue_reverse_walk_safe(&lrolist->active, skb, tmp)
+               ixgbe_lro_flush(q_vector, skb);
+}
+
+/*
+ * ixgbe_lro_header_ok - Main LRO function.
+ **/
+static void ixgbe_lro_header_ok(struct sk_buff *skb)
+{
+       struct ixgbe_lrohdr *lroh = ixgbe_lro_hdr(skb);
+       u16 opt_bytes, data_len;
+
+#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+       IXGBE_CB(skb)->tail = NULL;
+#endif
+       IXGBE_CB(skb)->tsecr = 0;
+       IXGBE_CB(skb)->append_cnt = 0;
+       IXGBE_CB(skb)->mss = 0;
+
+       /* ensure that the checksum is valid */
+       if (skb->ip_summed != CHECKSUM_UNNECESSARY)
+               return;
+
+       /* If we see CE codepoint in IP header, packet is not mergeable */
+       if (INET_ECN_is_ce(ipv4_get_dsfield(&lroh->iph)))
+               return;
+
+       /* ensure no bits set besides ack or psh */
+       if (lroh->th.fin || lroh->th.syn || lroh->th.rst ||
+           lroh->th.urg || lroh->th.ece || lroh->th.cwr ||
+           !lroh->th.ack)
+               return;
+
+       /* store the total packet length */
+       data_len = ntohs(lroh->iph.tot_len);
+
+       /* remove any padding from the end of the skb */
+       __pskb_trim(skb, data_len);
+
+       /* remove header length from data length */
+       data_len -= sizeof(struct ixgbe_lrohdr);
+
+       /*
+        * check for timestamps. Since the only option we handle are timestamps,
+        * we only have to handle the simple case of aligned timestamps
+        */
+       opt_bytes = (lroh->th.doff << 2) - sizeof(struct tcphdr);
+       if (opt_bytes != 0) {
+               if ((opt_bytes != TCPOLEN_TSTAMP_ALIGNED) ||
+                   !pskb_may_pull(skb, sizeof(struct ixgbe_lrohdr) +
+                                       TCPOLEN_TSTAMP_ALIGNED) ||
+                   (lroh->ts[0] != htonl((TCPOPT_NOP << 24) |
+                                            (TCPOPT_NOP << 16) |
+                                            (TCPOPT_TIMESTAMP << 8) |
+                                             TCPOLEN_TIMESTAMP)) ||
+                   (lroh->ts[2] == 0)) {
+                       return;
+               }
+
+               IXGBE_CB(skb)->tsval = ntohl(lroh->ts[1]);
+               IXGBE_CB(skb)->tsecr = lroh->ts[2];
+
+               data_len -= TCPOLEN_TSTAMP_ALIGNED;
+       }
+
+       /* record data_len as mss for the packet */
+       IXGBE_CB(skb)->mss = data_len;
+       IXGBE_CB(skb)->next_seq = ntohl(lroh->th.seq);
+}
+
+#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+static void ixgbe_merge_frags(struct sk_buff *lro_skb, struct sk_buff *new_skb)
+{
+       struct skb_shared_info *sh_info;
+       struct skb_shared_info *new_skb_info;
+       u16 data_len;
+
+       sh_info = skb_shinfo(lro_skb);
+       new_skb_info = skb_shinfo(new_skb);
+
+       /* copy frags into the last skb */
+       memcpy(sh_info->frags + sh_info->nr_frags,
+              new_skb_info->frags,
+              new_skb_info->nr_frags * sizeof(skb_frag_t));
+
+       /* copy size data over */
+       sh_info->nr_frags += new_skb_info->nr_frags;
+       data_len = IXGBE_CB(new_skb)->mss;
+       lro_skb->len += data_len;
+       lro_skb->data_len += data_len;
+       lro_skb->truesize += data_len;
+
+       /* wipe record of data from new_skb and free it */
+       new_skb_info->nr_frags = 0;
+       new_skb->len = new_skb->data_len = 0;
+       dev_kfree_skb_any(new_skb);
+}
+
+#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */
+/**
+ * ixgbe_lro_receive - if able, queue skb into lro chain
+ * @q_vector: structure containing interrupt and ring information
+ * @new_skb: pointer to current skb being checked
+ *
+ * Checks whether the skb given is eligible for LRO and if that's
+ * fine chains it to the existing lro_skb based on flowid. If an LRO for
+ * the flow doesn't exist create one.
+ **/
+static void ixgbe_lro_receive(struct ixgbe_q_vector *q_vector,
+                             struct sk_buff *new_skb)
+{
+       struct sk_buff *lro_skb;
+       struct ixgbe_lro_list *lrolist = &q_vector->lrolist;
+       struct ixgbe_lrohdr *lroh = ixgbe_lro_hdr(new_skb);
+       __be32 saddr = lroh->iph.saddr;
+       __be32 daddr = lroh->iph.daddr;
+       __be32 tcp_ports = *(__be32 *)&lroh->th;
+#ifdef HAVE_VLAN_RX_REGISTER
+       u16 vid = IXGBE_CB(new_skb)->vid;
+#else
+       u16 vid = new_skb->vlan_tci;
+#endif
+
+       ixgbe_lro_header_ok(new_skb);
+
+       /*
+        * we have a packet that might be eligible for LRO,
+        * so see if it matches anything we might expect
+        */
+       skb_queue_walk(&lrolist->active, lro_skb) {
+               u16 data_len;
+
+               if (*(__be32 *)&ixgbe_lro_hdr(lro_skb)->th != tcp_ports ||
+                   ixgbe_lro_hdr(lro_skb)->iph.saddr != saddr ||
+                   ixgbe_lro_hdr(lro_skb)->iph.daddr != daddr)
+                       continue;
+
+#ifdef HAVE_VLAN_RX_REGISTER
+               if (IXGBE_CB(lro_skb)->vid != vid)
+#else
+               if (lro_skb->vlan_tci != vid)
+#endif
+                       continue;
+
+               /* out of order packet */
+               if (IXGBE_CB(lro_skb)->next_seq !=
+                   IXGBE_CB(new_skb)->next_seq) {
+                       ixgbe_lro_flush(q_vector, lro_skb);
+                       IXGBE_CB(new_skb)->mss = 0;
+                       break;
+               }
+
+               /* TCP timestamp options have changed */
+               if (!IXGBE_CB(lro_skb)->tsecr != !IXGBE_CB(new_skb)->tsecr) {
+                       ixgbe_lro_flush(q_vector, lro_skb);
+                       break;
+               }
+
+               /* make sure timestamp values are increasing */
+               if (IXGBE_CB(lro_skb)->tsecr &&
+                   IXGBE_CB(lro_skb)->tsval > IXGBE_CB(new_skb)->tsval) {
+                       ixgbe_lro_flush(q_vector, lro_skb);
+                       IXGBE_CB(new_skb)->mss = 0;
+                       break;
+               }
+
+               data_len = IXGBE_CB(new_skb)->mss;
+
+               /*
+                * malformed header, no tcp data, resultant packet would
+                * be too large, or new skb is larger than our current mss.
+                */
+               if (data_len == 0 ||
+                   data_len > IXGBE_CB(lro_skb)->mss ||
+                   data_len > IXGBE_CB(lro_skb)->free) {
+                       ixgbe_lro_flush(q_vector, lro_skb);
+                       break;
+               }
+
+#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+               /*
+                * Data would remain in header or consume more frags
+                * then the sk_buff can contain.
+                */
+               if (data_len != new_skb->data_len ||
+                   skb_shinfo(new_skb)->nr_frags >=
+                   (MAX_SKB_FRAGS - skb_shinfo(lro_skb)->nr_frags)) {
+                       ixgbe_lro_flush(q_vector, lro_skb);
+                       break;
+               }
+
+#endif
+               /* ack sequence numbers or window size has changed */
+               if (ixgbe_lro_hdr(lro_skb)->th.ack_seq != lroh->th.ack_seq ||
+                   ixgbe_lro_hdr(lro_skb)->th.window != lroh->th.window) {
+                       ixgbe_lro_flush(q_vector, lro_skb);
+                       break;
+               }
+
+               /* Remove IP and TCP header */
+               skb_pull(new_skb, new_skb->len - data_len);
+
+               /* update timestamp and timestamp echo response */
+               IXGBE_CB(lro_skb)->tsval = IXGBE_CB(new_skb)->tsval;
+               IXGBE_CB(lro_skb)->tsecr = IXGBE_CB(new_skb)->tsecr;
+
+               /* update sequence and free space */
+               IXGBE_CB(lro_skb)->next_seq += data_len;
+               IXGBE_CB(lro_skb)->free -= data_len;
+
+               /* update append_cnt */
+               IXGBE_CB(lro_skb)->append_cnt++;
+
+#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+               /* if header is empty pull pages into current skb */
+               ixgbe_merge_frags(lro_skb, new_skb);
+#else
+               /* chain this new skb in frag_list */
+               ixgbe_add_active_tail(lro_skb, new_skb);
+#endif
+
+               if ((data_len < IXGBE_CB(lro_skb)->mss) || lroh->th.psh) {
+                       ixgbe_lro_hdr(lro_skb)->th.psh |= lroh->th.psh;
+                       ixgbe_lro_flush(q_vector, lro_skb);
+               }
+
+               lrolist->stats.coal++;
+               return;
+       }
+
+       if (IXGBE_CB(new_skb)->mss && !lroh->th.psh) {
+               /* if we are at capacity flush the tail */
+               if (skb_queue_len(&lrolist->active) >= IXGBE_LRO_MAX) {
+                       lro_skb = skb_peek_tail(&lrolist->active);
+                       if (lro_skb)
+                               ixgbe_lro_flush(q_vector, lro_skb);
+               }
+
+               /* update sequence and free space */
+               IXGBE_CB(new_skb)->next_seq += IXGBE_CB(new_skb)->mss;
+               IXGBE_CB(new_skb)->free = 65521 - new_skb->len;
+
+               /* .. and insert at the front of the active list */
+               __skb_queue_head(&lrolist->active, new_skb);
+
+               lrolist->stats.coal++;
+               return;
+       }
+
+       /* packet not handled by any of the above, pass it to the stack */
+#ifdef HAVE_VLAN_RX_REGISTER
+       ixgbe_receive_skb(q_vector, new_skb);
+#else
+       napi_gro_receive(&q_vector->napi, new_skb);
+#endif /* HAVE_VLAN_RX_REGISTER */
+}
+
+#endif /* IXGBE_NO_LRO */
+#if !defined(CONFIG_IXGBE_DISABLE_PACKET_SPLIT) || defined(NETIF_F_GSO)
+/**
+ * ixgbe_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
+ * @data: pointer to the start of the headers
+ * @max_len: total length of section to find headers in
+ *
+ * This function is meant to determine the length of headers that will
+ * be recognized by hardware for LRO, GRO, and RSC offloads.  The main
+ * motivation of doing this is to only perform one pull for IPv4 TCP
+ * packets so that we can do basic things like calculating the gso_size
+ * based on the average data per packet.
+ **/
+static unsigned int ixgbe_get_headlen(unsigned char *data,
+                                     unsigned int max_len)
+{
+       union {
+               unsigned char *network;
+               /* l2 headers */
+               struct ethhdr *eth;
+               struct vlan_hdr *vlan;
+               /* l3 headers */
+               struct iphdr *ipv4;
+               struct ipv6hdr *ipv6;
+       } hdr;
+       __be16 protocol;
+       u8 nexthdr = 0; /* default to not TCP */
+       u8 hlen;
+
+       /* this should never happen, but better safe than sorry */
+       if (max_len < ETH_HLEN)
+               return max_len;
+
+       /* initialize network frame pointer */
        hdr.network = data;
 
        /* set first protocol and move network header forward */
@@ -1342,7 +1505,16 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
                /* record next protocol */
                nexthdr = hdr.ipv4->protocol;
                hdr.network += hlen;
-#ifdef CONFIG_FCOE
+#ifdef NETIF_F_TSO6
+       } else if (protocol == __constant_htons(ETH_P_IPV6)) {
+               if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
+                       return max_len;
+
+               /* record next protocol */
+               nexthdr = hdr.ipv6->nexthdr;
+               hdr.network += sizeof(struct ipv6hdr);
+#endif /* NETIF_F_TSO6 */
+#ifdef IXGBE_FCOE
        } else if (protocol == __constant_htons(ETH_P_FCOE)) {
                if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
                        return max_len;
@@ -1352,7 +1524,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
                return hdr.network - data;
        }
 
-       /* finally sort out TCP */
+       /* finally sort out TCP/UDP */
        if (nexthdr == IPPROTO_TCP) {
                if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
                        return max_len;
@@ -1365,6 +1537,11 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
                        return hdr.network - data;
 
                hdr.network += hlen;
+       } else if (nexthdr == IPPROTO_UDP) {
+               if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
+                       return max_len;
+
+               hdr.network += sizeof(struct udphdr);
        }
 
        /*
@@ -1379,6 +1556,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
                return max_len;
 }
 
+#endif /* !CONFIG_IXGBE_DISABLE_PACKET_SPLIT || NETIF_F_GSO */
 static void ixgbe_get_rsc_cnt(struct ixgbe_ring *rx_ring,
                              union ixgbe_adv_rx_desc *rx_desc,
                              struct sk_buff *skb)
@@ -1402,16 +1580,22 @@ static void ixgbe_get_rsc_cnt(struct ixgbe_ring *rx_ring,
        IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
 }
 
+#ifdef NETIF_F_GSO
 static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
                                   struct sk_buff *skb)
 {
+#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+       u16 hdr_len = skb_headlen(skb);
+#else
        u16 hdr_len = ixgbe_get_headlen(skb->data, skb_headlen(skb));
+#endif
 
        /* set gso_size to avoid messing up TCP MSS */
        skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
                                                 IXGBE_CB(skb)->append_cnt);
 }
 
+#endif /* NETIF_F_GSO */
 static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
                                   struct sk_buff *skb)
 {
@@ -1422,12 +1606,30 @@ static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
        rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
        rx_ring->rx_stats.rsc_flush++;
 
+#ifdef NETIF_F_GSO
        ixgbe_set_rsc_gso_size(rx_ring, skb);
 
+#endif
        /* gso_size is computed using append_cnt so always clear it last */
        IXGBE_CB(skb)->append_cnt = 0;
 }
 
+static void ixgbe_rx_vlan(struct ixgbe_ring *ring,
+                         union ixgbe_adv_rx_desc *rx_desc,
+                         struct sk_buff *skb)
+{
+       if ((netdev_ring(ring)->features & NETIF_F_HW_VLAN_RX) &&
+           ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP))
+#ifndef HAVE_VLAN_RX_REGISTER
+               __vlan_hwaccel_put_tag(skb,
+                                      le16_to_cpu(rx_desc->wb.upper.vlan));
+#else
+               IXGBE_CB(skb)->vid = le16_to_cpu(rx_desc->wb.upper.vlan);
+       else
+               IXGBE_CB(skb)->vid = 0;
+#endif
+}
+
 /**
  * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor
  * @rx_ring: rx descriptor ring packet is being transacted on
@@ -1444,173 +1646,554 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
 {
        ixgbe_update_rsc_stats(rx_ring, skb);
 
+#ifdef NETIF_F_RXHASH
        ixgbe_rx_hash(rx_ring, rx_desc, skb);
 
+#endif /* NETIF_F_RXHASH */
        ixgbe_rx_checksum(rx_ring, rx_desc, skb);
+       ixgbe_rx_vlan(rx_ring, rx_desc, skb);
 
-       if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
-               u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
-               __vlan_hwaccel_put_tag(skb, vid);
-       }
-
-       skb_record_rx_queue(skb, rx_ring->queue_index);
+       skb_record_rx_queue(skb, ring_queue_index(rx_ring));
 
-       skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+       skb->protocol = eth_type_trans(skb, netdev_ring(rx_ring));
 }
 
 static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
+                        struct ixgbe_ring *rx_ring,
+                        union ixgbe_adv_rx_desc *rx_desc,
                         struct sk_buff *skb)
 {
-       struct ixgbe_adapter *adapter = q_vector->adapter;
-
-       if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
-               napi_gro_receive(&q_vector->napi, skb);
+#ifndef IXGBE_NO_LRO
+       if (ixgbe_can_lro(rx_ring, rx_desc, skb))
+               ixgbe_lro_receive(q_vector, skb);
        else
-               netif_rx(skb);
+#endif
+#ifdef HAVE_VLAN_RX_REGISTER
+               ixgbe_receive_skb(q_vector, skb);
+#else
+               napi_gro_receive(&q_vector->napi, skb);
+#endif
+#ifndef NETIF_F_GRO
+
+       netdev_ring(rx_ring)->last_rx = jiffies;
+#endif
 }
 
-static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
-                              struct ixgbe_ring *rx_ring,
-                              int budget)
+/**
+ * ixgbe_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean.  If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
+                            union ixgbe_adv_rx_desc *rx_desc,
+                            struct sk_buff *skb)
 {
-       union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
-       struct ixgbe_rx_buffer *rx_buffer_info;
-       struct sk_buff *skb;
-       unsigned int total_rx_bytes = 0, total_rx_packets = 0;
-       const int current_node = numa_node_id();
-#ifdef IXGBE_FCOE
-       struct ixgbe_adapter *adapter = q_vector->adapter;
-       int ddp_bytes = 0;
-#endif /* IXGBE_FCOE */
-       u16 i;
-       u16 cleaned_count = 0;
+#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+       struct sk_buff *next_skb;
+#endif
+       u32 ntc = rx_ring->next_to_clean + 1;
 
-       i = rx_ring->next_to_clean;
-       rx_desc = IXGBE_RX_DESC(rx_ring, i);
+       /* fetch, update, and store next to clean */
+       ntc = (ntc < rx_ring->count) ? ntc : 0;
+       rx_ring->next_to_clean = ntc;
 
-       while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) {
-               u32 upper_len = 0;
+       prefetch(IXGBE_RX_DESC(rx_ring, ntc));
 
-               rmb(); /* read descriptor and rx_buffer_info after status DD */
+       if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
+               return false;
 
-               rx_buffer_info = &rx_ring->rx_buffer_info[i];
+       /* append_cnt indicates packet is RSC, if so fetch nextp */
+       if (IXGBE_CB(skb)->append_cnt) {
+               ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
+               ntc &= IXGBE_RXDADV_NEXTP_MASK;
+               ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
+       }
 
-               skb = rx_buffer_info->skb;
-               rx_buffer_info->skb = NULL;
-               prefetch(skb->data);
+       /* place skb in next buffer to be received */
+#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+       next_skb = rx_ring->rx_buffer_info[ntc].skb;
+       rx_ring->rx_stats.non_eop_descs++;
 
-               /* linear means we are building an skb from multiple pages */
-               if (!skb_is_nonlinear(skb)) {
-                       u16 hlen;
-                       if (ring_is_ps_enabled(rx_ring)) {
-                               hlen = ixgbe_get_hlen(rx_desc);
-                               upper_len = le16_to_cpu(rx_desc->wb.upper.length);
-                       } else {
-                               hlen = le16_to_cpu(rx_desc->wb.upper.length);
-                       }
+       ixgbe_add_active_tail(skb, next_skb);
+       IXGBE_CB(next_skb)->head = skb;
+#else
+       rx_ring->rx_buffer_info[ntc].skb = skb;
+       rx_ring->rx_stats.non_eop_descs++;
+#endif
 
-                       skb_put(skb, hlen);
+       return true;
+}
 
-                       /*
-                        * Delay unmapping of the first packet. It carries the
-                        * header information, HW may still access the header
-                        * after writeback.  Only unmap it when EOP is reached
-                        */
-                       if (!IXGBE_CB(skb)->head) {
-                               IXGBE_CB(skb)->delay_unmap = true;
-                               IXGBE_CB(skb)->dma = rx_buffer_info->dma;
-                       } else {
-                               skb = ixgbe_merge_active_tail(skb);
-                               dma_unmap_single(rx_ring->dev,
-                                                rx_buffer_info->dma,
-                                                rx_ring->rx_buf_len,
-                                                DMA_FROM_DEVICE);
-                       }
-                       rx_buffer_info->dma = 0;
-               } else {
-                       /* assume packet split since header is unmapped */
-                       upper_len = le16_to_cpu(rx_desc->wb.upper.length);
-               }
+#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+/**
+ * ixgbe_cleanup_headers - Correct corrupted or empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being fixed
+ *
+ * Check for corrupted packet headers caused by senders on the local L2
+ * embedded NIC switch not setting up their Tx Descriptors right.  These
+ * should be very rare.
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
+                                 union ixgbe_adv_rx_desc *rx_desc,
+                                 struct sk_buff *skb)
+{
+       struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+       unsigned char *va;
+       unsigned int pull_len;
+
+       /* if the page was released unmap it, else just sync our portion */
+       if (unlikely(IXGBE_CB(skb)->page_released)) {
+               dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
+                              ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
+               IXGBE_CB(skb)->page_released = false;
+       } else {
+               dma_sync_single_range_for_cpu(rx_ring->dev,
+                                             IXGBE_CB(skb)->dma,
+                                             frag->page_offset,
+                                             ixgbe_rx_bufsz(rx_ring),
+                                             DMA_FROM_DEVICE);
+       }
+       IXGBE_CB(skb)->dma = 0;
 
-               if (upper_len) {
-                       dma_unmap_page(rx_ring->dev,
-                                      rx_buffer_info->page_dma,
-                                      PAGE_SIZE / 2,
-                                      DMA_FROM_DEVICE);
-                       rx_buffer_info->page_dma = 0;
-                       skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-                                          rx_buffer_info->page,
-                                          rx_buffer_info->page_offset,
-                                          upper_len);
-
-                       if ((page_count(rx_buffer_info->page) == 1) &&
-                           (page_to_nid(rx_buffer_info->page) == current_node))
-                               get_page(rx_buffer_info->page);
-                       else
-                               rx_buffer_info->page = NULL;
+       /* verify that the packet does not have any known errors */
+       if (unlikely(ixgbe_test_staterr(rx_desc,
+                                       IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
+               dev_kfree_skb_any(skb);
+               return true;
+       }
+
+       /*
+        * it is valid to use page_address instead of kmap since we are
+        * working with pages allocated out of the lomem pool per
+        * alloc_page(GFP_ATOMIC)
+        */
+       va = skb_frag_address(frag);
+
+       /*
+        * we need the header to contain the greater of either ETH_HLEN or
+        * 60 bytes if the skb->len is less than 60 for skb_pad.
+        */
+       pull_len = skb_frag_size(frag);
+       if (pull_len > IXGBE_RX_HDR_SIZE)
+               pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
+
+       /* align pull length to size of long to optimize memcpy performance */
+       skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+       /* update all of the pointers */
+       skb_frag_size_sub(frag, pull_len);
+       frag->page_offset += pull_len;
+       skb->data_len -= pull_len;
+       skb->tail += pull_len;
+
+       /*
+        * if we sucked the frag empty then we should free it,
+        * if there are other frags here something is screwed up in hardware
+        */
+       if (skb_frag_size(frag) == 0) {
+               BUG_ON(skb_shinfo(skb)->nr_frags != 1);
+               skb_shinfo(skb)->nr_frags = 0;
+               __skb_frag_unref(frag);
+               skb->truesize -= ixgbe_rx_bufsz(rx_ring);
+       }
+
+#ifdef IXGBE_FCOE
+       /* do not attempt to pad FCoE Frames as this will disrupt DDP */
+       if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
+               return false;
+
+#endif
+       /* if skb_pad returns an error the skb was freed */
+       if (unlikely(skb->len < 60)) {
+               int pad_len = 60 - skb->len;
+
+               if (skb_pad(skb, pad_len))
+                       return true;
+               __skb_put(skb, pad_len);
+       }
+
+       return false;
+}
+
+/**
+ * ixgbe_can_reuse_page - determine if we can reuse a page
+ * @rx_buffer: pointer to rx_buffer containing the page we want to reuse
+ *
+ * Returns true if page can be reused in another Rx buffer
+ **/
+static inline bool ixgbe_can_reuse_page(struct ixgbe_rx_buffer *rx_buffer)
+{
+       struct page *page = rx_buffer->page;
+
+       /* if we are only owner of page and it is local we can reuse it */
+       return likely(page_count(page) == 1) &&
+              likely(page_to_nid(page) == numa_node_id());
+}
+
+/**
+ * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Syncronizes page for reuse by the adapter
+ **/
+static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
+                               struct ixgbe_rx_buffer *old_buff)
+{
+       struct ixgbe_rx_buffer *new_buff;
+       u16 nta = rx_ring->next_to_alloc;
+       u16 bufsz = ixgbe_rx_bufsz(rx_ring);
+
+       new_buff = &rx_ring->rx_buffer_info[nta];
+
+       /* update, and store next to alloc */
+       nta++;
+       rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+       /* transfer page from old buffer to new buffer */
+       new_buff->page = old_buff->page;
+       new_buff->dma = old_buff->dma;
+
+       /* flip page offset to other buffer and store to new_buff */
+       new_buff->page_offset = old_buff->page_offset ^ bufsz;
+
+       /* sync the buffer for use by the device */
+       dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
+                                        new_buff->page_offset, bufsz,
+                                        DMA_FROM_DEVICE);
+
+       /* bump ref count on page before it is given to the stack */
+       get_page(new_buff->page);
+}
+
+/**
+ * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
+ *
+ * This function is based on skb_add_rx_frag.  I would have used that
+ * function however it doesn't handle the truesize case correctly since we
+ * are allocating more memory than might be used for a single receive.
+ **/
+static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
+                             struct ixgbe_rx_buffer *rx_buffer,
+                             struct sk_buff *skb, int size)
+{
+       skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+                          rx_buffer->page, rx_buffer->page_offset,
+                          size);
+       skb->len += size;
+       skb->data_len += size;
+       skb->truesize += ixgbe_rx_bufsz(rx_ring);
+}
+
+/**
+ * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @q_vector: structure containing interrupt and ring information
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing.  The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the syste.
+ *
+ * Returns true if all work is completed without reaching budget
+ **/
+static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
+                              struct ixgbe_ring *rx_ring,
+                              int budget)
+{
+       unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+#ifdef IXGBE_FCOE
+       int ddp_bytes = 0;
+#endif /* IXGBE_FCOE */
+       u16 cleaned_count = ixgbe_desc_unused(rx_ring);
+
+       do {
+               struct ixgbe_rx_buffer *rx_buffer;
+               union ixgbe_adv_rx_desc *rx_desc;
+               struct sk_buff *skb;
+               struct page *page;
+               u16 ntc;
 
-                       skb->len += upper_len;
-                       skb->data_len += upper_len;
-                       skb->truesize += PAGE_SIZE / 2;
+               /* return some buffers to hardware, one at a time is too slow */
+               if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
+                       ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
+                       cleaned_count = 0;
                }
 
-               ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb);
+               ntc = rx_ring->next_to_clean;
+               rx_desc = IXGBE_RX_DESC(rx_ring, ntc);
+               rx_buffer = &rx_ring->rx_buffer_info[ntc];
 
-               i++;
-               if (i == rx_ring->count)
-                       i = 0;
+               if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
+                       break;
 
-               next_rxd = IXGBE_RX_DESC(rx_ring, i);
-               prefetch(next_rxd);
-               cleaned_count++;
+               /*
+                * This memory barrier is needed to keep us from reading
+                * any other fields out of the rx_desc until we know the
+                * RXD_STAT_DD bit is set
+                */
+               rmb();
 
-               if ((!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) {
-                       struct ixgbe_rx_buffer *next_buffer;
-                       u32 nextp;
+               page = rx_buffer->page;
+               prefetchw(page);
 
-                       if (IXGBE_CB(skb)->append_cnt) {
-                               nextp = le32_to_cpu(
-                                               rx_desc->wb.upper.status_error);
-                               nextp >>= IXGBE_RXDADV_NEXTP_SHIFT;
-                       } else {
-                               nextp = i;
+               skb = rx_buffer->skb;
+
+               if (likely(!skb)) {
+                       void *page_addr = page_address(page) +
+                                         rx_buffer->page_offset;
+
+                       /* prefetch first cache line of first page */
+                       prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+                       prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+                       /* allocate a skb to store the frags */
+                       skb = netdev_alloc_skb_ip_align(netdev_ring(rx_ring),
+                                                       IXGBE_RX_HDR_SIZE);
+                       if (unlikely(!skb)) {
+                               rx_ring->rx_stats.alloc_rx_buff_failed++;
+                               break;
                        }
 
-                       next_buffer = &rx_ring->rx_buffer_info[nextp];
+                       /*
+                        * we will be copying header into skb->data in
+                        * pskb_may_pull so it is in our interest to prefetch
+                        * it now to avoid a possible cache miss
+                        */
+                       prefetchw(skb->data);
+
+                       /*
+                        * Delay unmapping of the first packet. It carries the
+                        * header information, HW may still access the header
+                        * after the writeback.  Only unmap it when EOP is
+                        * reached
+                        */
+                       IXGBE_CB(skb)->dma = rx_buffer->dma;
+               } else {
+                       /* we are reusing so sync this buffer for CPU use */
+                       dma_sync_single_range_for_cpu(rx_ring->dev,
+                                                     rx_buffer->dma,
+                                                     rx_buffer->page_offset,
+                                                     ixgbe_rx_bufsz(rx_ring),
+                                                     DMA_FROM_DEVICE);
+               }
 
-                       if (ring_is_ps_enabled(rx_ring)) {
-                               rx_buffer_info->skb = next_buffer->skb;
-                               rx_buffer_info->dma = next_buffer->dma;
-                               next_buffer->skb = skb;
-                               next_buffer->dma = 0;
-                       } else {
-                               struct sk_buff *next_skb = next_buffer->skb;
-                               ixgbe_add_active_tail(skb, next_skb);
-                               IXGBE_CB(next_skb)->head = skb;
+               /* pull page into skb */
+               ixgbe_add_rx_frag(rx_ring, rx_buffer, skb,
+                                 le16_to_cpu(rx_desc->wb.upper.length));
+
+               if (ixgbe_can_reuse_page(rx_buffer)) {
+                       /* hand second half of page back to the ring */
+                       ixgbe_reuse_rx_page(rx_ring, rx_buffer);
+               } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
+                       /* the page has been released from the ring */
+                       IXGBE_CB(skb)->page_released = true;
+               } else {
+                       /* we are not reusing the buffer so unmap it */
+                       dma_unmap_page(rx_ring->dev, rx_buffer->dma,
+                                      ixgbe_rx_pg_size(rx_ring),
+                                      DMA_FROM_DEVICE);
+               }
+
+               /* clear contents of buffer_info */
+               rx_buffer->skb = NULL;
+               rx_buffer->dma = 0;
+               rx_buffer->page = NULL;
+
+               ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb);
+
+               cleaned_count++;
+
+               /* place incomplete frames back on ring for completion */
+               if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
+                       continue;
+
+               /* verify the packet layout is correct */
+               if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
+                       continue;
+
+               /* probably a little skewed due to removing CRC */
+               total_rx_bytes += skb->len;
+               total_rx_packets++;
+
+               /* populate checksum, timestamp, VLAN, and protocol */
+               ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
+
+#ifdef IXGBE_FCOE
+               /* if ddp, not passing to ULD unless for FCP_RSP or error */
+               if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
+                       ddp_bytes = ixgbe_fcoe_ddp(q_vector->adapter,
+                                                  rx_desc, skb);
+                       if (!ddp_bytes) {
+                               dev_kfree_skb_any(skb);
+#ifndef NETIF_F_GRO
+                               netdev_ring(rx_ring)->last_rx = jiffies;
+#endif
+                               continue;
                        }
-                       rx_ring->rx_stats.non_eop_descs++;
-                       goto next_desc;
                }
 
+#endif /* IXGBE_FCOE */
+               ixgbe_rx_skb(q_vector, rx_ring, rx_desc, skb);
+
+               /* update budget accounting */
+               budget--;
+       } while (likely(budget));
+
+#ifdef IXGBE_FCOE
+       /* include DDPed FCoE data */
+       if (ddp_bytes > 0) {
+               unsigned int mss;
+
+               mss = netdev_ring(rx_ring)->mtu - sizeof(struct fcoe_hdr) -
+                       sizeof(struct fc_frame_header) -
+                       sizeof(struct fcoe_crc_eof);
+               if (mss > 512)
+                       mss &= ~511;
+               total_rx_bytes += ddp_bytes;
+               total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
+       }
+
+#endif /* IXGBE_FCOE */
+       rx_ring->stats.packets += total_rx_packets;
+       rx_ring->stats.bytes += total_rx_bytes;
+       q_vector->rx.total_packets += total_rx_packets;
+       q_vector->rx.total_bytes += total_rx_bytes;
+
+       if (cleaned_count)
+               ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
+
+#ifndef IXGBE_NO_LRO
+       ixgbe_lro_flush_all(q_vector);
+
+#endif /* IXGBE_NO_LRO */
+       return !!budget;
+}
+
+#else /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */
+/**
+ * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - legacy
+ * @q_vector: structure containing interrupt and ring information
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a legacy approach to Rx interrupt
+ * handling.  This version will perform better on systems with a low cost
+ * dma mapping API.
+ *
+ * Returns true if all work is completed without reaching budget
+ **/
+static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
+                              struct ixgbe_ring *rx_ring,
+                              int budget)
+{
+       unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+#ifdef IXGBE_FCOE
+       int ddp_bytes = 0;
+#endif /* IXGBE_FCOE */
+       u16 cleaned_count = ixgbe_desc_unused(rx_ring);
+
+       do {
+               struct ixgbe_rx_buffer *rx_buffer;
+               union ixgbe_adv_rx_desc *rx_desc;
+               struct sk_buff *skb;
+               u16 ntc;
+
+               /* return some buffers to hardware, one at a time is too slow */
+               if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
+                       ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
+                       cleaned_count = 0;
+               }
+
+               ntc = rx_ring->next_to_clean;
+               rx_desc = IXGBE_RX_DESC(rx_ring, ntc);
+               rx_buffer = &rx_ring->rx_buffer_info[ntc];
+
+               if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
+                       break;
+
+               /*
+                * This memory barrier is needed to keep us from reading
+                * any other fields out of the rx_desc until we know the
+                * RXD_STAT_DD bit is set
+                */
+               rmb();
+
+               skb = rx_buffer->skb;
+
+               prefetch(skb->data);
+
+               /* pull the header of the skb in */
+               __skb_put(skb, le16_to_cpu(rx_desc->wb.upper.length));
+
+               /*
+                * Delay unmapping of the first packet. It carries the
+                * header information, HW may still access the header after
+                * the writeback.  Only unmap it when EOP is reached
+                */
+               if (!IXGBE_CB(skb)->head) {
+                       IXGBE_CB(skb)->dma = rx_buffer->dma;
+               } else {
+                       skb = ixgbe_merge_active_tail(skb);
+                       dma_unmap_single(rx_ring->dev,
+                                        rx_buffer->dma,
+                                        rx_ring->rx_buf_len,
+                                        DMA_FROM_DEVICE);
+               }
+
+               /* clear skb reference in buffer info structure */
+               rx_buffer->skb = NULL;
+               rx_buffer->dma = 0;
+
+               ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb);
+
+               cleaned_count++;
+
+               if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
+                       continue;
+
                dma_unmap_single(rx_ring->dev,
                                 IXGBE_CB(skb)->dma,
                                 rx_ring->rx_buf_len,
                                 DMA_FROM_DEVICE);
+
                IXGBE_CB(skb)->dma = 0;
-               IXGBE_CB(skb)->delay_unmap = false;
 
                if (ixgbe_close_active_frag_list(skb) &&
                    !IXGBE_CB(skb)->append_cnt) {
                        /* if we got here without RSC the packet is invalid */
                        dev_kfree_skb_any(skb);
-                       goto next_desc;
+                       continue;
                }
 
                /* ERR_MASK will only have valid bits if EOP set */
                if (unlikely(ixgbe_test_staterr(rx_desc,
-                                           IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
+                                          IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
                        dev_kfree_skb_any(skb);
-                       goto next_desc;
+                       continue;
                }
 
                /* probably a little skewed due to removing CRC */
@@ -1622,43 +2205,31 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 
 #ifdef IXGBE_FCOE
                /* if ddp, not passing to ULD unless for FCP_RSP or error */
-               if (ixgbe_rx_is_fcoe(adapter, rx_desc)) {
-                       ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
+               if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
+                       ddp_bytes = ixgbe_fcoe_ddp(q_vector->adapter,
+                                                  rx_desc, skb);
                        if (!ddp_bytes) {
                                dev_kfree_skb_any(skb);
-                               goto next_desc;
+#ifndef NETIF_F_GRO
+                               netdev_ring(rx_ring)->last_rx = jiffies;
+#endif
+                               continue;
                        }
                }
+
 #endif /* IXGBE_FCOE */
-               ixgbe_rx_skb(q_vector, skb);
+               ixgbe_rx_skb(q_vector, rx_ring, rx_desc, skb);
 
+               /* update budget accounting */
                budget--;
-next_desc:
-               if (!budget)
-                       break;
-
-               /* return some buffers to hardware, one at a time is too slow */
-               if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
-                       ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
-                       cleaned_count = 0;
-               }
-
-               /* use prefetched values */
-               rx_desc = next_rxd;
-       }
-
-       rx_ring->next_to_clean = i;
-       cleaned_count = ixgbe_desc_unused(rx_ring);
-
-       if (cleaned_count)
-               ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
+       } while (likely(budget));
 
 #ifdef IXGBE_FCOE
        /* include DDPed FCoE data */
        if (ddp_bytes > 0) {
                unsigned int mss;
 
-               mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
+               mss = netdev_ring(rx_ring)->mtu - sizeof(struct fcoe_hdr) -
                        sizeof(struct fc_frame_header) -
                        sizeof(struct fcoe_crc_eof);
                if (mss > 512)
@@ -1666,18 +2237,24 @@ next_desc:
                total_rx_bytes += ddp_bytes;
                total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
        }
-#endif /* IXGBE_FCOE */
 
-       u64_stats_update_begin(&rx_ring->syncp);
+#endif /* IXGBE_FCOE */
        rx_ring->stats.packets += total_rx_packets;
        rx_ring->stats.bytes += total_rx_bytes;
-       u64_stats_update_end(&rx_ring->syncp);
        q_vector->rx.total_packets += total_rx_packets;
        q_vector->rx.total_bytes += total_rx_bytes;
 
+       if (cleaned_count)
+               ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
+
+#ifndef IXGBE_NO_LRO
+       ixgbe_lro_flush_all(q_vector);
+
+#endif /* IXGBE_NO_LRO */
        return !!budget;
 }
 
+#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */
 /**
  * ixgbe_configure_msix - Configure MSI-X hardware
  * @adapter: board private structure
@@ -1687,14 +2264,11 @@ next_desc:
  **/
 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
 {
-       struct ixgbe_q_vector *q_vector;
-       int q_vectors, v_idx;
+       int v_idx;
        u32 mask;
 
-       q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
        /* Populate MSIX to EITR Select */
-       if (adapter->num_vfs > 32) {
+       if (adapter->num_vfs >= 32) {
                u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
        }
@@ -1703,9 +2277,9 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
         * Populate the IVAR table and set the ITR values to the
         * corresponding register.
         */
-       for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+       for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
+               struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
                struct ixgbe_ring *ring;
-               q_vector = adapter->q_vector[v_idx];
 
                ixgbe_for_each_ring(ring, q_vector->rx)
                        ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
@@ -1798,18 +2372,21 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
 
        switch (itr_setting) {
        case lowest_latency:
-               if (bytes_perint > 10)
+               if (bytes_perint > 10) {
                        itr_setting = low_latency;
+               }
                break;
        case low_latency:
-               if (bytes_perint > 20)
+               if (bytes_perint > 20) {
                        itr_setting = bulk_latency;
-               else if (bytes_perint <= 10)
+               } else if (bytes_perint <= 10) {
                        itr_setting = lowest_latency;
+               }
                break;
        case bulk_latency:
-               if (bytes_perint <= 20)
+               if (bytes_perint <= 20) {
                        itr_setting = low_latency;
+               }
                break;
        }
 
@@ -2033,8 +2610,7 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
        }
 }
 
-static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
-                                          u64 qmask)
+void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, u64 qmask)
 {
        u32 mask;
        struct ixgbe_hw *hw = &adapter->hw;
@@ -2059,8 +2635,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
        /* skip the flush */
 }
 
-static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
-                                           u64 qmask)
+void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, u64 qmask)
 {
        u32 mask;
        struct ixgbe_hw *hw = &adapter->hw;
@@ -2140,8 +2715,8 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
        u32 eicr;
 
        /*
-        * Workaround for Silicon errata.  Use clear-by-write instead
-        * of clear-by-read.  Reading with EICS will return the
+        * Workaround for Silicon errata #26 on 82598.  Use clear-by-write
+        * instead of clear-by-read.  Reading with EICS will return the
         * interrupt causes without clearing, which later be done
         * with the write to EICR.
         */
@@ -2157,26 +2732,35 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
        switch (hw->mac.type) {
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X540:
-               if (eicr & IXGBE_EICR_ECC)
+               if (eicr & IXGBE_EICR_ECC) {
                        e_info(link, "Received unrecoverable ECC Err, please "
                               "reboot\n");
+                       IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
+               }
+#ifdef HAVE_TX_MQ
                /* Handle Flow Director Full threshold interrupt */
                if (eicr & IXGBE_EICR_FLOW_DIR) {
                        int reinit_count = 0;
                        int i;
                        for (i = 0; i < adapter->num_tx_queues; i++) {
                                struct ixgbe_ring *ring = adapter->tx_ring[i];
-                               if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
-                                                      &ring->state))
+                               if (test_and_clear_bit(
+                                                     __IXGBE_TX_FDIR_INIT_DONE,
+                                                     &ring->state))
                                        reinit_count++;
                        }
                        if (reinit_count) {
-                               /* no more flow director interrupts until after init */
-                               IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
-                               adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
+                               /* no more flow director interrupts until
+                                * after init
+                                */
+                               IXGBE_WRITE_REG(hw, IXGBE_EIMC,
+                                               IXGBE_EIMC_FLOW_DIR);
+                               adapter->flags2 |=
+                                       IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
                                ixgbe_service_event_schedule(adapter);
                        }
                }
+#endif
                ixgbe_check_sfp_event(adapter, eicr);
                ixgbe_check_overtemp_event(adapter, eicr);
                break;
@@ -2206,28 +2790,28 @@ static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
 }
 
 /**
- * ixgbe_poll - NAPI Rx polling callback
- * @napi: structure for representing this polling device
- * @budget: how many packets driver is allowed to clean
+ * ixgbe_poll - NAPI polling RX/TX cleanup routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
  *
- * This function is used for legacy and MSI, NAPI mode
+ * This function will clean all queues associated with a q_vector.
  **/
-static int ixgbe_poll(struct napi_struct *napi, int budget)
+int ixgbe_poll(struct napi_struct *napi, int budget)
 {
        struct ixgbe_q_vector *q_vector =
-                               container_of(napi, struct ixgbe_q_vector, napi);
+                              container_of(napi, struct ixgbe_q_vector, napi);
        struct ixgbe_adapter *adapter = q_vector->adapter;
        struct ixgbe_ring *ring;
        int per_ring_budget;
        bool clean_complete = true;
 
-#ifdef CONFIG_IXGBE_DCA
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
        if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
                ixgbe_update_dca(q_vector);
-#endif
 
+#endif
        ixgbe_for_each_ring(ring, q_vector->tx)
-               clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
+               clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
 
        /* attempt to distribute budget to each queue fairly, but don't allow
         * the budget to go below 1 because we'll exit polling */
@@ -2240,13 +2824,18 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
                clean_complete &= ixgbe_clean_rx_irq(q_vector, ring,
                                                     per_ring_budget);
 
+#ifndef HAVE_NETDEV_NAPI_LIST
+       if (!netif_running(adapter->netdev))
+               clean_complete = true;
+
+#endif
        /* If all work not completed, return budget and keep polling */
        if (!clean_complete)
                return budget;
 
        /* all work done, exit the polling mode */
        napi_complete(napi);
-       if (adapter->rx_itr_setting & 1)
+       if (adapter->rx_itr_setting == 1)
                ixgbe_set_itr(q_vector);
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
                ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
@@ -2264,11 +2853,10 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
 static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
-       int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
        int vector, err;
        int ri = 0, ti = 0;
 
-       for (vector = 0; vector < q_vectors; vector++) {
+       for (vector = 0; vector < adapter->num_q_vectors; vector++) {
                struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
                struct msix_entry *entry = &adapter->msix_entries[vector];
 
@@ -2293,12 +2881,14 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
                              "Error: %d\n", err);
                        goto free_queue_irqs;
                }
+#ifdef HAVE_IRQ_AFFINITY_HINT
                /* If Flow Director is enabled, set interrupt affinity */
                if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
                        /* assign the mask for this irq */
                        irq_set_affinity_hint(entry->vector,
                                              &q_vector->affinity_mask);
                }
+#endif /* HAVE_IRQ_AFFINITY_HINT */
        }
 
        err = request_irq(adapter->msix_entries[vector].vector,
@@ -2313,8 +2903,10 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
 free_queue_irqs:
        while (vector) {
                vector--;
+#ifdef HAVE_IRQ_AFFINITY_HINT
                irq_set_affinity_hint(adapter->msix_entries[vector].vector,
                                      NULL);
+#endif
                free_irq(adapter->msix_entries[vector].vector,
                         adapter->q_vector[vector]);
        }
@@ -2356,7 +2948,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
                 */
                if (!test_bit(__IXGBE_DOWN, &adapter->state))
                        ixgbe_irq_enable(adapter, true, true);
-               return IRQ_NONE;        /* Not our interrupt */
+               return IRQ_NONE;        /* Not our interrupt */
        }
 
        if (eicr & IXGBE_EICR_LSC)
@@ -2364,12 +2956,11 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
 
        switch (hw->mac.type) {
        case ixgbe_mac_82599EB:
-               ixgbe_check_sfp_event(adapter, eicr);
-               /* Fall through */
        case ixgbe_mac_X540:
                if (eicr & IXGBE_EICR_ECC)
-                       e_info(link, "Received unrecoverable ECC err, please "
-                                    "reboot\n");
+                       e_info(link, "Received unrecoverable ECC Err, please "
+                              "reboot\n");
+               ixgbe_check_sfp_event(adapter, eicr);
                ixgbe_check_overtemp_event(adapter, eicr);
                break;
        default:
@@ -2406,10 +2997,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
                err = ixgbe_request_msix_irqs(adapter);
        else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
-               err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
+               err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
                                  netdev->name, adapter);
        else
-               err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
+               err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
                                  netdev->name, adapter);
 
        if (err)
@@ -2420,33 +3011,33 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
 
 static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
 {
-       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
-               int i, q_vectors;
-
-               q_vectors = adapter->num_msix_vectors;
-               i = q_vectors - 1;
-               free_irq(adapter->msix_entries[i].vector, adapter);
-               i--;
-
-               for (; i >= 0; i--) {
-                       /* free only the irqs that were actually requested */
-                       if (!adapter->q_vector[i]->rx.ring &&
-                           !adapter->q_vector[i]->tx.ring)
-                               continue;
-
-                       /* clear the affinity_mask in the IRQ descriptor */
-                       irq_set_affinity_hint(adapter->msix_entries[i].vector,
-                                             NULL);
+       int vector;
 
-                       free_irq(adapter->msix_entries[i].vector,
-                                adapter->q_vector[i]);
-               }
-       } else {
+       if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
                free_irq(adapter->pdev->irq, adapter);
+               return;
        }
-}
 
-/**
+       for (vector = 0; vector < adapter->num_q_vectors; vector++) {
+               struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
+               struct msix_entry *entry = &adapter->msix_entries[vector];
+
+               /* free only the irqs that were actually requested */
+               if (!q_vector->rx.ring && !q_vector->tx.ring)
+                       continue;
+
+#ifdef HAVE_IRQ_AFFINITY_HINT
+               /* clear the affinity_mask in the IRQ descriptor */
+               irq_set_affinity_hint(entry->vector, NULL);
+
+#endif
+               free_irq(entry->vector, q_vector);
+       }
+
+       free_irq(adapter->msix_entries[vector++].vector, adapter);
+}
+
+/**
  * ixgbe_irq_disable - Mask off interrupt generation on the NIC
  * @adapter: board private structure
  **/
@@ -2467,9 +3058,12 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
        }
        IXGBE_WRITE_FLUSH(&adapter->hw);
        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
-               int i;
-               for (i = 0; i < adapter->num_msix_vectors; i++)
-                       synchronize_irq(adapter->msix_entries[i].vector);
+               int vector;
+
+               for (vector = 0; vector < adapter->num_q_vectors; vector++)
+                       synchronize_irq(adapter->msix_entries[vector].vector);
+
+               synchronize_irq(adapter->msix_entries[vector++].vector);
        } else {
                synchronize_irq(adapter->pdev->irq);
        }
@@ -2514,18 +3108,27 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
        u8 reg_idx = ring->reg_idx;
 
        /* disable queue to avoid issues while updating state */
-       IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
        IXGBE_WRITE_FLUSH(hw);
 
-       IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
-                       (tdba & DMA_BIT_MASK(32)));
-       IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
+       IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
+       IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), tdba >> 32);
        IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
                        ring->count * sizeof(union ixgbe_adv_tx_desc));
+
+       /* disable head writeback */
+       IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(reg_idx), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(reg_idx), 0);
+
+       /* reset head and tail pointers */
        IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
        IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
        ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
 
+       /* reset ntu and ntc to place SW in sync with hardwdare */
+       ring->next_to_clean = 0;
+       ring->next_to_use = 0;
+
        /*
         * set WTHRESH to encourage burst writeback, it should not be set
         * higher than 1 when ITR is 0 as it could cause false TX hangs
@@ -2547,8 +3150,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
                   32;          /* PTHRESH = 32 */
 
        /* reinitialize flowdirector state */
-       if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
-           adapter->atr_sample_rate) {
+       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
                ring->atr_sample_rate = adapter->atr_sample_rate;
                ring->atr_count = 0;
                set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
@@ -2568,7 +3170,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
 
        /* poll to verify queue is enabled */
        do {
-               usleep_range(1000, 2000);
+               msleep(1);
                txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
        } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
        if (!wait_loop)
@@ -2578,8 +3180,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
 static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       u32 rttdcs;
-       u32 reg;
+       u32 rttdcs, mtqc;
        u8 tcs = netdev_get_num_tc(adapter->netdev);
 
        if (hw->mac.type == ixgbe_mac_82598EB)
@@ -2591,28 +3192,32 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
        IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
 
        /* set transmit pool layout */
-       switch (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
-       case (IXGBE_FLAG_SRIOV_ENABLED):
-               IXGBE_WRITE_REG(hw, IXGBE_MTQC,
-                               (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
-               break;
-       default:
-               if (!tcs)
-                       reg = IXGBE_MTQC_64Q_1PB;
-               else if (tcs <= 4)
-                       reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+       if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
+               mtqc = IXGBE_MTQC_VT_ENA;
+               if (tcs > 4)
+                       mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
+               else if (tcs > 1)
+                       mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+               else if (adapter->ring_feature[RING_F_RSS].indices == 4)
+                       mtqc |= IXGBE_MTQC_32VF;
+               else
+                       mtqc |= IXGBE_MTQC_64VF;
+       } else {
+               if (tcs > 4)
+                       mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
+               else if (tcs > 1)
+                       mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
                else
-                       reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
+                       mtqc = IXGBE_MTQC_64Q_1PB;
+       }
 
-               IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
+       IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
 
-               /* Enable Security TX Buffer IFG for multiple pb */
-               if (tcs) {
-                       reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
-                       reg |= IXGBE_SECTX_DCB;
-                       IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
-               }
-               break;
+       /* Enable Security TX Buffer IFG for multiple pb */
+       if (tcs) {
+               u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+               sectx |= IXGBE_SECTX_DCB;
+               IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
        }
 
        /* re-enable the arbiter */
@@ -2632,6 +3237,13 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
        u32 dmatxctl;
        u32 i;
 
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+       if (adapter->num_tx_queues > 1)
+               adapter->netdev->features |= NETIF_F_MULTI_QUEUE;
+       else
+               adapter->netdev->features &= ~NETIF_F_MULTI_QUEUE;
+
+#endif
        ixgbe_setup_mtqc(adapter);
 
        if (hw->mac.type != ixgbe_mac_82598EB) {
@@ -2646,51 +3258,105 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
                ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
 }
 
+static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
+                                struct ixgbe_ring *ring)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u8 reg_idx = ring->reg_idx;
+       u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
+
+       srrctl |= IXGBE_SRRCTL_DROP_EN;
+
+       IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
+}
+
+static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
+                                 struct ixgbe_ring *ring)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u8 reg_idx = ring->reg_idx;
+       u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
+
+       srrctl &= ~IXGBE_SRRCTL_DROP_EN;
+
+       IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
+}
+
+void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
+{
+       int i;
+       bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
+
+#ifdef HAVE_DCBNL_IEEE
+       if (adapter->ixgbe_ieee_pfc)
+               pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
+
+#endif
+       /*
+        * We should set the drop enable bit if:
+        *  SR-IOV is enabled
+        *   or
+        *  Number of Rx queues > 1 and flow control is disabled
+        *
+        *  This allows us to avoid head of line blocking for security
+        *  and performance reasons.
+        */
+       if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
+           !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
+               for (i = 0; i < adapter->num_rx_queues; i++)
+                       ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
+       } else {
+               for (i = 0; i < adapter->num_rx_queues; i++)
+                       ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
+       }
+}
+
 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
 
 static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
                                   struct ixgbe_ring *rx_ring)
 {
+       struct ixgbe_hw *hw = &adapter->hw;
        u32 srrctl;
        u8 reg_idx = rx_ring->reg_idx;
 
-       switch (adapter->hw.mac.type) {
-       case ixgbe_mac_82598EB: {
-               struct ixgbe_ring_feature *feature = adapter->ring_feature;
-               const int mask = feature[RING_F_RSS].mask;
-               reg_idx = reg_idx & mask;
-       }
-               break;
-       case ixgbe_mac_82599EB:
-       case ixgbe_mac_X540:
-       default:
-               break;
-       }
+       if (hw->mac.type == ixgbe_mac_82598EB) {
+               u16 mask = adapter->ring_feature[RING_F_RSS].mask;
 
-       srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
+               /* program one srrctl register per VMDq index */
+               if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
+                       mask = adapter->ring_feature[RING_F_VMDQ].mask;
+
+               /*
+                * if VMDq is not active we must program one srrctl register
+                * per RSS queue since we have enabled RDRXCTL.MVMEN
+                */
+               reg_idx &= mask;
 
-       srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
-       srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
-       if (adapter->num_vfs)
-               srrctl |= IXGBE_SRRCTL_DROP_EN;
+               /* divide by the first bit of the mask to get the indices */
+               if (reg_idx)
+                       reg_idx /= ((~mask) + 1) & mask;
+       }
 
-       srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
-                 IXGBE_SRRCTL_BSIZEHDR_MASK;
+       /* configure header buffer length, needed for RSC */
+       srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
 
-       if (ring_is_ps_enabled(rx_ring)) {
-#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
-               srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+       /* configure the packet buffer length */
+#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+       srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
+                 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 #else
-               srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+#if PAGE_SIZE > IXGBE_MAX_RXBUFFER
+       srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+#else
+       srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+#endif
 #endif
-               srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
-       } else {
-               srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
-                         IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-               srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
-       }
 
-       IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
+       /* configure descriptor type */
+       srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+       IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
 }
 
 static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
@@ -2702,11 +3368,15 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
        u32 mrqc = 0, reta = 0;
        u32 rxcsum;
        int i, j;
-       u8 tcs = netdev_get_num_tc(adapter->netdev);
-       int maxq = adapter->ring_feature[RING_F_RSS].indices;
+       u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
 
-       if (tcs)
-               maxq = min(maxq, adapter->num_tx_queues / tcs);
+       /*
+        * Program table for at least 2 queues w/ SR-IOV so that VFs can
+        * make full use of any rings they may have.  We will use the
+        * PSRTYPE register to control how many rings we use within the PF.
+        */
+       if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2))
+               rss_i = 2;
 
        /* Fill out hash function seeds */
        for (i = 0; i < 10; i++)
@@ -2714,7 +3384,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
 
        /* Fill out redirection table */
        for (i = 0, j = 0; i < 128; i++, j++) {
-               if (j == maxq)
+               if (j == rss_i)
                        j = 0;
                /* reta = 4-byte sliding window of
                 * 0x00..(indices-1)(indices-1)00..etc. */
@@ -2728,35 +3398,36 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
        rxcsum |= IXGBE_RXCSUM_PCSD;
        IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
 
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB &&
-           (adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
-               mrqc = IXGBE_MRQC_RSSEN;
+       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+               if (adapter->ring_feature[RING_F_RSS].mask)
+                       mrqc = IXGBE_MRQC_RSSEN;
        } else {
-               int mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
-                                            | IXGBE_FLAG_SRIOV_ENABLED);
-
-               switch (mask) {
-               case (IXGBE_FLAG_RSS_ENABLED):
-                       if (!tcs)
-                               mrqc = IXGBE_MRQC_RSSEN;
-                       else if (tcs <= 4)
-                               mrqc = IXGBE_MRQC_RTRSS4TCEN;
+               u8 tcs = netdev_get_num_tc(adapter->netdev);
+
+               if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
+                       if (tcs > 4)
+                               mrqc = IXGBE_MRQC_VMDQRT8TCEN;  /* 8 TCs */
+                       else if (tcs > 1)
+                               mrqc = IXGBE_MRQC_VMDQRT4TCEN;  /* 4 TCs */
+                       else if (adapter->ring_feature[RING_F_RSS].indices == 4)
+                               mrqc = IXGBE_MRQC_VMDQRSS32EN;
                        else
+                               mrqc = IXGBE_MRQC_VMDQRSS64EN;
+               } else {
+                       if (tcs > 4)
                                mrqc = IXGBE_MRQC_RTRSS8TCEN;
-                       break;
-               case (IXGBE_FLAG_SRIOV_ENABLED):
-                       mrqc = IXGBE_MRQC_VMDQEN;
-                       break;
-               default:
-                       break;
+                       else if (tcs > 1)
+                               mrqc = IXGBE_MRQC_RTRSS4TCEN;
+                       else
+                               mrqc = IXGBE_MRQC_RSSEN;
                }
        }
 
        /* Perform hash on these packet types */
-       mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
-             | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
-             | IXGBE_MRQC_RSS_FIELD_IPV6
-             | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
+       mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 |
+               IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
+               IXGBE_MRQC_RSS_FIELD_IPV6 |
+               IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
 
        if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
                mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
@@ -2766,23 +3437,40 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
        IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
 }
 
+/**
+ * ixgbe_clear_rscctl - disable RSC for the indicated ring
+ * @adapter: address of board private structure
+ * @ring: structure containing ring specific data
+ **/
+void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
+                       struct ixgbe_ring *ring)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 rscctrl;
+       u8 reg_idx = ring->reg_idx;
+
+       rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
+       rscctrl &= ~IXGBE_RSCCTL_RSCEN;
+       IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
+
+       clear_ring_rsc_enabled(ring);
+}
+
 /**
  * ixgbe_configure_rscctl - enable RSC for the indicated ring
  * @adapter:    address of board private structure
- * @index:      index of ring to set
+ * @ring: structure containing ring specific data
  **/
-static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
-                                  struct ixgbe_ring *ring)
+void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
+                           struct ixgbe_ring *ring)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        u32 rscctrl;
-       int rx_buf_len;
        u8 reg_idx = ring->reg_idx;
 
        if (!ring_is_rsc_enabled(ring))
                return;
 
-       rx_buf_len = ring->rx_buf_len;
        rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
        rscctrl |= IXGBE_RSCCTL_RSCEN;
        /*
@@ -2790,55 +3478,27 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
         * total size of max desc * buf_len is not greater
         * than 65536
         */
-       if (ring_is_ps_enabled(ring)) {
-#if (PAGE_SIZE < 8192)
+#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+#if (MAX_SKB_FRAGS >= 16) && (PAGE_SIZE <= 8192)
+       rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+#elif (MAX_SKB_FRAGS >= 8) && (PAGE_SIZE <= 16384)
+       rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
+#elif (MAX_SKB_FRAGS >= 4)
+       rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
+#else
+       rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
+#endif
+#else /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */
+       if (ring->rx_buf_len <= IXGBE_RXBUFFER_4K)
                rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
-#elif (PAGE_SIZE < 16384)
+       else if (ring->rx_buf_len <= IXGBE_RXBUFFER_8K)
                rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
-#elif (PAGE_SIZE < 32768)
+       else
                rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
-#else
-               rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
 #endif
-       } else {
-               if (rx_buf_len <= IXGBE_RXBUFFER_4K)
-                       rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
-               else if (rx_buf_len <= IXGBE_RXBUFFER_8K)
-                       rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
-               else
-                       rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
-       }
        IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
 }
 
-/**
- *  ixgbe_set_uta - Set unicast filter table address
- *  @adapter: board private structure
- *
- *  The unicast table address is a register array of 32-bit registers.
- *  The table is meant to be used in a way similar to how the MTA is used
- *  however due to certain limitations in the hardware it is necessary to
- *  set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
- *  enable bit to allow vlan tag stripping when promiscuous mode is enabled
- **/
-static void ixgbe_set_uta(struct ixgbe_adapter *adapter)
-{
-       struct ixgbe_hw *hw = &adapter->hw;
-       int i;
-
-       /* The UTA table only exists on 82599 hardware and newer */
-       if (hw->mac.type < ixgbe_mac_82599EB)
-               return;
-
-       /* we only need to do this if VMDq is enabled */
-       if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
-               return;
-
-       for (i = 0; i < 128; i++)
-               IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
-}
-
-#define IXGBE_MAX_RX_DESC_POLL 10
 static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
                                       struct ixgbe_ring *ring)
 {
@@ -2853,13 +3513,13 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
                return;
 
        do {
-               usleep_range(1000, 2000);
+               msleep(1);
                rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
        } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
 
        if (!wait_loop) {
-               e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
-                     "the polling period\n", reg_idx);
+               e_err(drv, "RXDCTL.ENABLE on Rx queue %d "
+                     "not set within the polling period\n", reg_idx);
        }
 }
 
@@ -2905,26 +3565,28 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
        rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
        ixgbe_disable_rx_queue(adapter, ring);
 
-       IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
-       IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
+       IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
+       IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), rdba >> 32);
        IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
                        ring->count * sizeof(union ixgbe_adv_rx_desc));
+
+       /* reset head and tail pointers */
        IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
        IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
        ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
 
+       /* reset ntu and ntc to place SW in sync with hardwdare */
+       ring->next_to_clean = 0;
+       ring->next_to_use = 0;
+#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+       ring->next_to_alloc = 0;
+#endif
+
        ixgbe_configure_srrctl(adapter, ring);
        ixgbe_configure_rscctl(adapter, ring);
 
-       /* If operating in IOV mode set RLPML for X540 */
-       if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
-           hw->mac.type == ixgbe_mac_X540) {
-               rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
-               rxdctl |= ((ring->netdev->mtu + ETH_HLEN +
-                           ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN);
-       }
-
-       if (hw->mac.type == ixgbe_mac_82598EB) {
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
                /*
                 * enable cache line friendly hardware writes:
                 * PTHRESH=32 descriptors (half the internal cache),
@@ -2934,6 +3596,17 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
                 */
                rxdctl &= ~0x3FFFFF;
                rxdctl |=  0x080420;
+               break;
+       case ixgbe_mac_X540:
+               rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | IXGBE_RXDCTL_RLPML_EN);
+#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+               /* If operating in IOV mode set RLPML for X540 */
+               if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+                       break;
+               rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN;
+#endif
+       default:
+               break;
        }
 
        /* enable receive descriptor ring */
@@ -2947,6 +3620,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
 static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
+       int rss_i = adapter->ring_feature[RING_F_RSS].indices;
        int p;
 
        /* PSRTYPE must be initialized in non 82598 adapters */
@@ -2959,59 +3633,103 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
        if (hw->mac.type == ixgbe_mac_82598EB)
                return;
 
-       if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
-               psrtype |= (adapter->num_rx_queues_per_pool << 29);
+       if (rss_i > 3)
+               psrtype |= 2 << 29;
+       else if (rss_i > 1)
+               psrtype |= 1 << 29;
 
        for (p = 0; p < adapter->num_rx_pools; p++)
-               IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p),
-                               psrtype);
+               IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)), psrtype);
 }
 
 static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       u32 gcr_ext;
-       u32 vt_reg_bits;
        u32 reg_offset, vf_shift;
-       u32 vmdctl;
+       u32 gcr_ext, vmdctl;
+       int i;
 
-       if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+       if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
                return;
 
-       vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
-       vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN;
-       vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT);
-       IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
+               vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
+               IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+               vmdctl |= IXGBE_VT_CTL_VT_ENABLE;
+               vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
+               vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
+               if (adapter->num_vfs)
+                       vmdctl |= IXGBE_VT_CTL_REPLEN;
+               IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
+
+               for (i = 1; i < adapter->num_rx_pools; i++) {
+                       u32 vmolr;
+                       int pool = VMDQ_P(i);
+
+                       /*
+                       * accept untagged packets until a vlan tag
+                       * is specifically set for the VMDQ queue/pool
+                       */
+                       vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
+                       vmolr |= IXGBE_VMOLR_AUPE;
+                       vmolr |= IXGBE_VMOLR_BAM;
+                       IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
+               }
 
-       vf_shift = adapter->num_vfs % 32;
-       reg_offset = (adapter->num_vfs >= 32) ? 1 : 0;
+               vf_shift = VMDQ_P(0) % 32;
+               reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
 
-       /* Enable only the PF's pool for Tx/Rx */
-       IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
-       IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0);
-       IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
-       IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0);
-       IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+               /* Enable only the PF pools for Tx/Rx */
+               IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
+               IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
+               IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
+               IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
+               break;
+       default:
+               break;
+       }
 
-       /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
-       hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
+       if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+               return;
 
        /*
         * Set up VF register offsets for selected VT Mode,
         * i.e. 32 or 64 VFs for SR-IOV
         */
-       gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
-       gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
-       gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
+       switch (adapter->ring_feature[RING_F_VMDQ].mask) {
+       case IXGBE_82599_VMDQ_8Q_MASK:
+               gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
+               break;
+       case IXGBE_82599_VMDQ_4Q_MASK:
+               gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
+               break;
+       default:
+               gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
+               break;
+       }
+
        IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
 
        /* enable Tx loopback for VF/PF communication */
-       IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
-       /* Enable MAC Anti-Spoofing */
-       hw->mac.ops.set_mac_anti_spoofing(hw,
-                                         (adapter->antispoofing_enabled =
-                                          (adapter->num_vfs != 0)),
-                                         adapter->num_vfs);
+       if (adapter->flags & IXGBE_FLAG_SRIOV_L2LOOPBACK_ENABLE)
+               IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+       else
+               IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, 0);
+
+       hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
+                                               adapter->num_vfs);
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+       for (i = 0; i < adapter->num_vfs; i++) {
+               if (!adapter->vfinfo[i].spoofchk_enabled)
+                       ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
+       }
+#endif
 }
 
 static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
@@ -3019,22 +3737,12 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        struct net_device *netdev = adapter->netdev;
        int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
-       int rx_buf_len;
        struct ixgbe_ring *rx_ring;
        int i;
        u32 mhadd, hlreg0;
-
-       /* Decide whether to use packet split mode or not */
-       /* On by default */
-       adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
-
-       /* Do not use packet split if we're in SR-IOV Mode */
-       if (adapter->num_vfs)
-               adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
-
-       /* Disable packet split due to 82599 erratum #45 */
-       if (hw->mac.type == ixgbe_mac_82599EB)
-               adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+       int rx_buf_len;
+#endif
 
 #ifdef IXGBE_FCOE
        /* adjust max frame to be able to do baby jumbo for FCoE */
@@ -3050,33 +3758,32 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
 
                IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
        }
+               /* MHADD will allow an extra 4 bytes past for vlan tagged frames */
+               max_frame += VLAN_HLEN;
 
-       /* MHADD will allow an extra 4 bytes past for vlan tagged frames */
-       max_frame += VLAN_HLEN;
-
-       /* Set the RX buffer length according to the mode */
-       if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
-               rx_buf_len = IXGBE_RX_HDR_SIZE;
+#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+       if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
+           (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) {
+               rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+       /*
+        * Make best use of allocation by using all but 1K of a
+        * power of 2 allocation that will be used for skb->head.
+        */
+       } else if (max_frame <= IXGBE_RXBUFFER_3K) {
+               rx_buf_len = IXGBE_RXBUFFER_3K;
+       } else if (max_frame <= IXGBE_RXBUFFER_7K) {
+               rx_buf_len = IXGBE_RXBUFFER_7K;
+       } else if (max_frame <= IXGBE_RXBUFFER_15K) {
+               rx_buf_len = IXGBE_RXBUFFER_15K;
        } else {
-               if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
-                   (netdev->mtu <= ETH_DATA_LEN))
-                       rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
-               /*
-                * Make best use of allocation by using all but 1K of a
-                * power of 2 allocation that will be used for skb->head.
-                */
-               else if (max_frame <= IXGBE_RXBUFFER_3K)
-                       rx_buf_len = IXGBE_RXBUFFER_3K;
-               else if (max_frame <= IXGBE_RXBUFFER_7K)
-                       rx_buf_len = IXGBE_RXBUFFER_7K;
-               else if (max_frame <= IXGBE_RXBUFFER_15K)
-                       rx_buf_len = IXGBE_RXBUFFER_15K;
-               else
-                       rx_buf_len = IXGBE_MAX_RXBUFFER;
+               rx_buf_len = IXGBE_MAX_RXBUFFER;
        }
 
+#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */
        hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
-       /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
+       /* set jumbo enable since MHADD.MFS is keeping size locked at
+        * max_frame
+        */
        hlreg0 |= IXGBE_HLREG0_JUMBOEN;
        IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
 
@@ -3086,34 +3793,20 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
         */
        for (i = 0; i < adapter->num_rx_queues; i++) {
                rx_ring = adapter->rx_ring[i];
-               rx_ring->rx_buf_len = rx_buf_len;
-
-               if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
-                       set_ring_ps_enabled(rx_ring);
-               else
-                       clear_ring_ps_enabled(rx_ring);
-
                if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
                        set_ring_rsc_enabled(rx_ring);
                else
                        clear_ring_rsc_enabled(rx_ring);
+#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+
+               rx_ring->rx_buf_len = rx_buf_len;
 
 #ifdef IXGBE_FCOE
-               if (netdev->features & NETIF_F_FCOE_MTU) {
-                       struct ixgbe_ring_feature *f;
-                       f = &adapter->ring_feature[RING_F_FCOE];
-                       if ((i >= f->mask) && (i < f->mask + f->indices)) {
-                               clear_ring_ps_enabled(rx_ring);
-                               if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
-                                       rx_ring->rx_buf_len =
-                                               IXGBE_FCOE_JUMBO_FRAME_SIZE;
-                       } else if (!ring_is_rsc_enabled(rx_ring) &&
-                                  !ring_is_ps_enabled(rx_ring)) {
-                               rx_ring->rx_buf_len =
-                                               IXGBE_FCOE_JUMBO_FRAME_SIZE;
-                       }
-               }
+               if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state) &&
+                   (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE))
+                       rx_ring->rx_buf_len = IXGBE_FCOE_JUMBO_FRAME_SIZE;
 #endif /* IXGBE_FCOE */
+#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */
        }
 }
 
@@ -3176,8 +3869,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
        /* Program registers for the distribution of queues */
        ixgbe_setup_mrqc(adapter);
 
-       ixgbe_set_uta(adapter);
-
        /* set_rx_buffer_len must be called before ring initialization */
        ixgbe_set_rx_buffer_len(adapter);
 
@@ -3194,69 +3885,134 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
 
        /* enable all receives */
        rxctrl |= IXGBE_RXCTRL_RXEN;
-       hw->mac.ops.enable_rx_dma(hw, rxctrl);
+       ixgbe_enable_rx_dma(hw, rxctrl);
 }
 
+#ifdef NETIF_F_HW_VLAN_TX
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+#else
 static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+#endif
+
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
-       int pool_ndx = adapter->num_vfs;
+       int pool_ndx = VMDQ_P(0);
 
        /* add VID to filter table */
-       hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
-       set_bit(vid, adapter->active_vlans);
+       if (hw->mac.ops.set_vfta) {
+#ifndef HAVE_VLAN_RX_REGISTER
+               if (vid < VLAN_N_VID)
+                       set_bit(vid, adapter->active_vlans);
+#endif
+               hw->mac.ops.set_vfta(hw, vid, pool_ndx, true);
+               if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
+                       int i;
+                       switch (adapter->hw.mac.type) {
+                       case ixgbe_mac_82599EB:
+                       case ixgbe_mac_X540:
+                               /* enable vlan id for all pools */
+                               for (i = 1; i < adapter->num_rx_pools; i++)
+                                       hw->mac.ops.set_vfta(hw, vid,
+                                                            VMDQ_P(i), true);
+                               break;
+                       default:
+                               break;
+                       }
+               }
+       }
+#ifndef HAVE_NETDEV_VLAN_FEATURES
+
+       /*
+        * Copy feature flags from netdev to the vlan netdev for this vid.
+        * This allows things like TSO to bubble down to our vlan device.
+        * Some vlans, such as VLAN 0 for DCB will not have a v_netdev so
+        * we will not have a netdev that needs updating.
+        */
+       if (adapter->vlgrp) {
+               struct vlan_group *vlgrp = adapter->vlgrp;
+               struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid);
+               if (v_netdev) {
+                       v_netdev->features |= netdev->features;
+                       vlan_group_set_device(vlgrp, vid, v_netdev);
+               }
+       }
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+       return 0;
+#endif
 }
 
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+#else
 static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+#endif
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
-       int pool_ndx = adapter->num_vfs;
+       int pool_ndx = VMDQ_P(0);
 
-       /* remove VID from filter table */
-       hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
-       clear_bit(vid, adapter->active_vlans);
-}
+       /* User is not allowed to remove vlan ID 0 */
+       if (!vid)
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+               return 0;
+#else
+               return;
+#endif
 
-/**
- * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
- * @adapter: driver data
- */
-static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
-{
-       struct ixgbe_hw *hw = &adapter->hw;
-       u32 vlnctrl;
+#ifdef HAVE_VLAN_RX_REGISTER
+       if (!test_bit(__IXGBE_DOWN, &adapter->state))
+               ixgbe_irq_disable(adapter);
 
-       vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-       vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
-       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+       vlan_group_set_device(adapter->vlgrp, vid, NULL);
+
+       if (!test_bit(__IXGBE_DOWN, &adapter->state))
+               ixgbe_irq_enable(adapter, true, true);
+
+#endif /* HAVE_VLAN_RX_REGISTER */
+       /* remove VID from filter table */
+       if (hw->mac.ops.set_vfta) {
+               hw->mac.ops.set_vfta(hw, vid, pool_ndx, false);
+               if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
+                       int i;
+                       switch (adapter->hw.mac.type) {
+                       case ixgbe_mac_82599EB:
+                       case ixgbe_mac_X540:
+                               /* remove vlan id from all pools */
+                               for (i = 1; i < adapter->num_rx_pools; i++)
+                                       hw->mac.ops.set_vfta(hw, vid, VMDQ_P(i),
+                                                            false);
+                               break;
+                       default:
+                               break;
+                       }
+               }
+       }
+#ifndef HAVE_VLAN_RX_REGISTER
+
+       clear_bit(vid, adapter->active_vlans);
+#endif
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+       return 0;
+#endif
 }
 
+#ifdef HAVE_8021P_SUPPORT
 /**
- * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
+ * ixgbe_vlan_stripping_disable - helper to disable vlan tag stripping
  * @adapter: driver data
  */
-static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
+void ixgbe_vlan_stripping_disable(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        u32 vlnctrl;
+       int i;
 
-       vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-       vlnctrl |= IXGBE_VLNCTRL_VFE;
-       vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
-       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
-}
-
-/**
- * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
- * @adapter: driver data
- */
-static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
-{
-       struct ixgbe_hw *hw = &adapter->hw;
-       u32 vlnctrl;
-       int i, j;
+       /* leave vlan tag stripping enabled for DCB */
+       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
+               return;
 
        switch (hw->mac.type) {
        case ixgbe_mac_82598EB:
@@ -3267,10 +4023,10 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X540:
                for (i = 0; i < adapter->num_rx_queues; i++) {
-                       j = adapter->rx_ring[i]->reg_idx;
-                       vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
+                       u8 reg_idx = adapter->rx_ring[i]->reg_idx;
+                       vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
                        vlnctrl &= ~IXGBE_RXDCTL_VME;
-                       IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
+                       IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), vlnctrl);
                }
                break;
        default:
@@ -3278,15 +4034,16 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
        }
 }
 
+#endif
 /**
- * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping
+ * ixgbe_vlan_stripping_enable - helper to enable vlan tag stripping
  * @adapter: driver data
  */
-static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
+void ixgbe_vlan_stripping_enable(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        u32 vlnctrl;
-       int i, j;
+       int i;
 
        switch (hw->mac.type) {
        case ixgbe_mac_82598EB:
@@ -3297,10 +4054,10 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X540:
                for (i = 0; i < adapter->num_rx_queues; i++) {
-                       j = adapter->rx_ring[i]->reg_idx;
-                       vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
+                       u8 reg_idx = adapter->rx_ring[i]->reg_idx;
+                       vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
                        vlnctrl |= IXGBE_RXDCTL_VME;
-                       IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
+                       IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), vlnctrl);
                }
                break;
        default:
@@ -3308,16 +4065,271 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
        }
 }
 
+#ifdef HAVE_VLAN_RX_REGISTER
+static void ixgbe_vlan_mode(struct net_device *netdev, struct vlan_group *grp)
+#else
+void ixgbe_vlan_mode(struct net_device *netdev, u32 features)
+#endif
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+#ifdef HAVE_VLAN_RX_REGISTER
+
+       if (!test_bit(__IXGBE_DOWN, &adapter->state))
+               ixgbe_irq_disable(adapter);
+
+       adapter->vlgrp = grp;
+
+       if (!test_bit(__IXGBE_DOWN, &adapter->state))
+               ixgbe_irq_enable(adapter, true, true);
+#endif
+#ifdef HAVE_8021P_SUPPORT
+#ifdef HAVE_VLAN_RX_REGISTER
+       bool enable = (grp || (adapter->flags & IXGBE_FLAG_DCB_ENABLED));
+#else
+       bool enable = !!(features & NETIF_F_HW_VLAN_RX);
+#endif
+       if (enable)
+               /* enable VLAN tag insert/strip */
+               ixgbe_vlan_stripping_enable(adapter);
+       else
+               /* disable VLAN tag insert/strip */
+               ixgbe_vlan_stripping_disable(adapter);
+
+#endif
+}
+
 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
 {
-       u16 vid;
+#ifdef HAVE_VLAN_RX_REGISTER
+       ixgbe_vlan_mode(adapter->netdev, adapter->vlgrp);
 
+       /*
+        * add vlan ID 0 and enable vlan tag stripping so we
+        * always accept priority-tagged traffic
+        */
        ixgbe_vlan_rx_add_vid(adapter->netdev, 0);
+#ifndef HAVE_8021P_SUPPORT
+       ixgbe_vlan_stripping_enable(adapter);
+#endif
+       if (adapter->vlgrp) {
+               u16 vid;
+               for (vid = 0; vid < VLAN_N_VID; vid++) {
+                       if (!vlan_group_get_device(adapter->vlgrp, vid))
+                               continue;
+                       ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
+               }
+       }
+#else
+       struct net_device *netdev = adapter->netdev;
+       u16 vid;
+
+       ixgbe_vlan_mode(netdev, netdev->features);
 
        for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
-               ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
+               ixgbe_vlan_rx_add_vid(netdev, vid);
+#endif
+}
+
+#endif
+static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
+{
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+       struct netdev_hw_addr *mc_ptr;
+#else
+       struct dev_mc_list *mc_ptr;
+#endif
+#ifdef CONFIG_PCI_IOV
+       struct ixgbe_adapter *adapter = hw->back;
+#endif /* CONFIG_PCI_IOV */
+       u8 *addr = *mc_addr_ptr;
+
+       *vmdq = VMDQ_P(0);
+
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+       mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]);
+       if (mc_ptr->list.next) {
+               struct netdev_hw_addr *ha;
+
+               ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr, list);
+               *mc_addr_ptr = ha->addr;
+       }
+#else
+       mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
+       if (mc_ptr->next)
+               *mc_addr_ptr = mc_ptr->next->dmi_addr;
+#endif
+       else
+               *mc_addr_ptr = NULL;
+
+       return addr;
+}
+
+/**
+ * ixgbe_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ *                0 on no addresses written
+ *                X on writing X addresses to MTA
+ **/
+int ixgbe_write_mc_addr_list(struct net_device *netdev)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+       struct netdev_hw_addr *ha;
+#endif
+       u8  *addr_list = NULL;
+       int addr_count = 0;
+
+       if (!hw->mac.ops.update_mc_addr_list)
+               return -ENOMEM;
+
+       if (!netif_running(netdev))
+               return 0;
+
+
+       hw->mac.ops.update_mc_addr_list(hw, NULL, 0,
+                                       ixgbe_addr_list_itr, true);
+
+       if (!netdev_mc_empty(netdev)) {
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+               ha = list_first_entry(&netdev->mc.list,
+                                     struct netdev_hw_addr, list);
+               addr_list = ha->addr;
+#else
+               addr_list = netdev->mc_list->dmi_addr;
+#endif
+               addr_count = netdev_mc_count(netdev);
+
+               hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
+                                               ixgbe_addr_list_itr, false);
+       }
+
+#ifdef CONFIG_PCI_IOV
+       ixgbe_restore_vf_multicasts(adapter);
+#endif
+       return addr_count;
+}
+
+
+void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int i;
+       for (i = 0; i < hw->mac.num_rar_entries; i++) {
+               if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE) {
+                       hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr,
+                                               adapter->mac_table[i].queue,
+                                               IXGBE_RAH_AV);
+               } else {
+                       hw->mac.ops.clear_rar(hw, i);
+               }
+       }
+}
+
+void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int i;
+       for (i = 0; i < hw->mac.num_rar_entries; i++) {
+               if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) {
+                       if (adapter->mac_table[i].state &
+                                       IXGBE_MAC_STATE_IN_USE) {
+                               hw->mac.ops.set_rar(hw, i,
+                                               adapter->mac_table[i].addr,
+                                               adapter->mac_table[i].queue,
+                                               IXGBE_RAH_AV);
+                       } else {
+                               hw->mac.ops.clear_rar(hw, i);
+                       }
+                       adapter->mac_table[i].state &=
+                               ~(IXGBE_MAC_STATE_MODIFIED);
+               }
+       }
+}
+
+int ixgbe_available_rars(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int i, count = 0;
+
+       for (i = 0; i < hw->mac.num_rar_entries; i++) {
+               if (adapter->mac_table[i].state == 0)
+                       count++;
+       }
+       return count;
+}
+
+int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int i;
+
+       if (is_zero_ether_addr(addr))
+               return -EINVAL;
+
+       for (i = 0; i < hw->mac.num_rar_entries; i++) {
+               if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE) {
+                       continue;
+               }
+               adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED |
+                                               IXGBE_MAC_STATE_IN_USE);
+               memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN);
+               adapter->mac_table[i].queue = queue;
+               ixgbe_sync_mac_table(adapter);
+               return i;
+       }
+       return -ENOMEM;
+}
+
+void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
+{
+       int i;
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       for (i = 0; i < hw->mac.num_rar_entries; i++) {
+               adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
+               adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
+               memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+               adapter->mac_table[i].queue = 0;
+       }
+       ixgbe_sync_mac_table(adapter);
+}
+
+void ixgbe_del_mac_filter_by_index(struct ixgbe_adapter *adapter, int index)
+{
+       adapter->mac_table[index].state |= IXGBE_MAC_STATE_MODIFIED;
+       adapter->mac_table[index].state &= ~IXGBE_MAC_STATE_IN_USE;
+       memset(adapter->mac_table[index].addr, 0, ETH_ALEN);
+       adapter->mac_table[index].queue = 0;
+       ixgbe_sync_mac_table(adapter);
 }
 
+int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8* addr, u16 queue)
+{
+       /* search table for addr, if found, set to 0 and sync */
+       int i;
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       if (is_zero_ether_addr(addr))
+               return -EINVAL;
+
+       for (i = 0; i < hw->mac.num_rar_entries; i++) {
+               if (!compare_ether_addr(addr, adapter->mac_table[i].addr) &&
+                   adapter->mac_table[i].queue == queue) {
+                       adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
+                       adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
+                       memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+                       adapter->mac_table[i].queue = 0;
+                       ixgbe_sync_mac_table(adapter);
+                       return 0;
+               }
+       }
+       return -ENOMEM;
+}
+#ifdef HAVE_SET_RX_MODE
 /**
  * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
  * @netdev: network interface device structure
@@ -3327,39 +4339,36 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
  *                0 on no addresses written
  *                X on writing X addresses to the RAR table
  **/
-static int ixgbe_write_uc_addr_list(struct net_device *netdev)
+int ixgbe_write_uc_addr_list(struct ixgbe_adapter *adapter,
+                            struct net_device *netdev, int vfn)
 {
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       struct ixgbe_hw *hw = &adapter->hw;
-       unsigned int vfn = adapter->num_vfs;
-       unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS;
        int count = 0;
 
        /* return ENOMEM indicating insufficient memory for addresses */
-       if (netdev_uc_count(netdev) > rar_entries)
+       if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter))
                return -ENOMEM;
 
-       if (!netdev_uc_empty(netdev) && rar_entries) {
+       if (!netdev_uc_empty(netdev)) {
+#ifdef NETDEV_HW_ADDR_T_UNICAST
                struct netdev_hw_addr *ha;
-               /* return error if we do not support writing to RAR table */
-               if (!hw->mac.ops.set_rar)
-                       return -ENOMEM;
-
+#else
+               struct dev_mc_list *ha;
+#endif
                netdev_for_each_uc_addr(ha, netdev) {
-                       if (!rar_entries)
-                               break;
-                       hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
-                                           vfn, IXGBE_RAH_AV);
+#ifdef NETDEV_HW_ADDR_T_UNICAST
+                       ixgbe_del_mac_filter(adapter, ha->addr, vfn);
+                       ixgbe_add_mac_filter(adapter, ha->addr, vfn);
+#else
+                       ixgbe_del_mac_filter(adapter, ha->da_addr, vfn);
+                       ixgbe_add_mac_filter(adapter, ha->da_addr, vfn);
+#endif
                        count++;
                }
        }
-       /* write the addresses in reverse order to avoid write combining */
-       for (; rar_entries > 0 ; rar_entries--)
-               hw->mac.ops.clear_rar(hw, rar_entries);
-
        return count;
 }
 
+#endif
 /**
  * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
  * @netdev: network interface device structure
@@ -3374,11 +4383,12 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
        u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
+       u32 vlnctrl;
        int count;
 
        /* Check for Promiscuous and All Multicast modes */
-
        fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+       vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 
        /* set all bits that we expect to always be set */
        fctrl |= IXGBE_FCTRL_BAM;
@@ -3387,13 +4397,12 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
 
        /* clear the bits we are changing the status of */
        fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+       vlnctrl  &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
 
        if (netdev->flags & IFF_PROMISC) {
                hw->addr_ctrl.user_set_promisc = true;
                fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
-               vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
-               /* don't hardware filter vlans in promisc mode */
-               ixgbe_vlan_filter_disable(adapter);
+               vmolr |= IXGBE_VMOLR_MPE;
        } else {
                if (netdev->flags & IFF_ALLMULTI) {
                        fctrl |= IXGBE_FCTRL_MPE;
@@ -3404,50 +4413,50 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
                         * then we should just turn on promiscuous mode so
                         * that we can at least receive multicast traffic
                         */
-                       hw->mac.ops.update_mc_addr_list(hw, netdev);
-                       vmolr |= IXGBE_VMOLR_ROMPE;
+                       count = ixgbe_write_mc_addr_list(netdev);
+                       if (count < 0) {
+                               fctrl |= IXGBE_FCTRL_MPE;
+                               vmolr |= IXGBE_VMOLR_MPE;
+                       } else if (count) {
+                               vmolr |= IXGBE_VMOLR_ROMPE;
+                       }
                }
-               ixgbe_vlan_filter_enable(adapter);
+#ifdef NETIF_F_HW_VLAN_TX
+               /* enable hardware vlan filtering */
+               vlnctrl |= IXGBE_VLNCTRL_VFE;
+#endif
                hw->addr_ctrl.user_set_promisc = false;
+#ifdef HAVE_SET_RX_MODE
                /*
                 * Write addresses to available RAR registers, if there is not
                 * sufficient space to store all the addresses then enable
                 * unicast promiscuous mode
                 */
-               count = ixgbe_write_uc_addr_list(netdev);
+               count = ixgbe_write_uc_addr_list(adapter, netdev, VMDQ_P(0));
                if (count < 0) {
                        fctrl |= IXGBE_FCTRL_UPE;
                        vmolr |= IXGBE_VMOLR_ROPE;
                }
+#endif
        }
 
-       if (adapter->num_vfs) {
-               ixgbe_restore_vf_multicasts(adapter);
-               vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) &
+       if (hw->mac.type != ixgbe_mac_82598EB) {
+               vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
                         ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
                           IXGBE_VMOLR_ROPE);
-               IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr);
+               IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
        }
 
+       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
        IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
-
-       if (netdev->features & NETIF_F_HW_VLAN_RX)
-               ixgbe_vlan_strip_enable(adapter);
-       else
-               ixgbe_vlan_strip_disable(adapter);
 }
 
 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
 {
-       int q_idx;
        struct ixgbe_q_vector *q_vector;
-       int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
-       /* legacy and MSI only use one vector */
-       if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
-               q_vectors = 1;
+       int q_idx;
 
-       for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+       for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
                q_vector = adapter->q_vector[q_idx];
                napi_enable(&q_vector->napi);
        }
@@ -3455,21 +4464,50 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
 
 static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
 {
-       int q_idx;
        struct ixgbe_q_vector *q_vector;
-       int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
-       /* legacy and MSI only use one vector */
-       if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
-               q_vectors = 1;
+       int q_idx;
 
-       for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+       for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
                q_vector = adapter->q_vector[q_idx];
                napi_disable(&q_vector->napi);
        }
 }
 
-#ifdef CONFIG_IXGBE_DCB
+#ifdef HAVE_DCBNL_IEEE
+s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame)
+{
+       __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
+       __u8 prio_type[IEEE_8021QAZ_MAX_TCS];
+       int i;
+
+       /* naively give each TC a bwg to map onto CEE hardware */
+       __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7};
+
+       /* Map TSA onto CEE prio type */
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+               switch (ets->tc_tsa[i]) {
+               case IEEE_8021QAZ_TSA_STRICT:
+                       prio_type[i] = 2;
+                       break;
+               case IEEE_8021QAZ_TSA_ETS:
+                       prio_type[i] = 0;
+                       break;
+               default:
+                       /* Hardware only supports priority strict or
+                        * ETS transmission selection algorithms if
+                        * we receive some other value from dcbnl
+                        * throw an error
+                        */
+                       return -EINVAL;
+               }
+       }
+
+       ixgbe_dcb_calculate_tc_credits(ets->tc_tx_bw, refill, max, max_frame);
+       return ixgbe_dcb_hw_config(hw, refill, max,
+                                  bwg_id, prio_type, ets->prio_tc);
+}
+
+#endif
 /*
  * ixgbe_configure_dcb - Configure DCB hardware
  * @adapter: ixgbe adapter struct
@@ -3481,63 +4519,195 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
 static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+       struct net_device *dev = adapter->netdev;
+
+       int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
 
        if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
                if (hw->mac.type == ixgbe_mac_82598EB)
-                       netif_set_gso_max_size(adapter->netdev, 65536);
+                       netif_set_gso_max_size(dev, 65536);
                return;
        }
 
        if (hw->mac.type == ixgbe_mac_82598EB)
-               netif_set_gso_max_size(adapter->netdev, 32768);
+               netif_set_gso_max_size(dev, 32768);
 
+#ifdef IXGBE_FCOE
+       if (dev->features & NETIF_F_FCOE_MTU)
+               max_frame = max_t(int, max_frame,
+                                 IXGBE_FCOE_JUMBO_FRAME_SIZE);
+#endif /* IXGBE_FCOE */
 
-       /* Enable VLAN tag insert/strip */
-       adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
+#ifdef HAVE_DCBNL_IEEE
+       if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) {
+               if (adapter->ixgbe_ieee_ets)
+                       ixgbe_dcb_hw_ets(&adapter->hw,
+                                        adapter->ixgbe_ieee_ets,
+                                        max_frame);
 
-       hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
+               if (adapter->ixgbe_ieee_pfc && adapter->ixgbe_ieee_ets) {
+                       struct ieee_pfc *pfc = adapter->ixgbe_ieee_pfc;
+                       u8 *tc = adapter->ixgbe_ieee_ets->prio_tc;
 
-#ifdef IXGBE_FCOE
-       if (adapter->netdev->features & NETIF_F_FCOE_MTU)
-               max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
-#endif
-
-       /* reconfigure the hardware */
-       if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
-               ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
-                                               DCB_TX_CONFIG);
-               ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
-                                               DCB_RX_CONFIG);
-               ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
-       } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
-               ixgbe_dcb_hw_ets(&adapter->hw,
-                                adapter->ixgbe_ieee_ets,
-                                max_frame);
-               ixgbe_dcb_hw_pfc_config(&adapter->hw,
-                                       adapter->ixgbe_ieee_pfc->pfc_en,
-                                       adapter->ixgbe_ieee_ets->prio_tc);
+                       ixgbe_dcb_config_pfc(&adapter->hw, pfc->pfc_en, tc);
+               }
+       } else
+#endif /* HAVE_DCBNL_IEEE */
+       {
+               ixgbe_dcb_calculate_tc_credits_cee(hw,
+                                                  &adapter->dcb_cfg,
+                                                  max_frame,
+                                                  IXGBE_DCB_TX_CONFIG);
+               ixgbe_dcb_calculate_tc_credits_cee(hw,
+                                                  &adapter->dcb_cfg,
+                                                  max_frame,
+                                                  IXGBE_DCB_RX_CONFIG);
+               ixgbe_dcb_hw_config_cee(hw, &adapter->dcb_cfg);
        }
 
        /* Enable RSS Hash per TC */
        if (hw->mac.type != ixgbe_mac_82598EB) {
-               int i;
-               u32 reg = 0;
+               u32 msb = 0;
+               u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
+
+               while (rss_i) {
+                       msb++;
+                       rss_i >>= 1;
+               }
+
+               /* write msb to all 8 TCs in one write */
+               IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
+       }
+}
+
+#ifndef IXGBE_NO_LLI
+static void ixgbe_configure_lli_82599(struct ixgbe_adapter *adapter)
+{
+       u16 port;
 
-               for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-                       u8 msb = 0;
-                       u8 cnt = adapter->netdev->tc_to_txq[i].count;
+       if (adapter->lli_etype) {
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0),
+                               (IXGBE_IMIR_LLI_EN_82599 |
+                                IXGBE_IMIR_SIZE_BP_82599 |
+                                IXGBE_IMIR_CTRL_BP_82599));
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_ETQS(0), IXGBE_ETQS_LLI);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_ETQF(0),
+                               (adapter->lli_etype | IXGBE_ETQF_FILTER_EN));
+       }
 
-                       while (cnt >>= 1)
-                               msb++;
+       if (adapter->lli_port) {
+               port = swab16(adapter->lli_port);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0),
+                               (IXGBE_IMIR_LLI_EN_82599 |
+                                IXGBE_IMIR_SIZE_BP_82599 |
+                                IXGBE_IMIR_CTRL_BP_82599));
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_FTQF(0),
+                               (IXGBE_FTQF_POOL_MASK_EN |
+                                (IXGBE_FTQF_PRIORITY_MASK <<
+                                 IXGBE_FTQF_PRIORITY_SHIFT) |
+                                (IXGBE_FTQF_DEST_PORT_MASK <<
+                                 IXGBE_FTQF_5TUPLE_MASK_SHIFT)));
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_SDPQF(0), (port << 16));
+       }
 
-                       reg |= msb << IXGBE_RQTC_SHIFT_TC(i);
+       if (adapter->flags & IXGBE_FLAG_LLI_PUSH) {
+               switch (adapter->hw.mac.type) {
+               case ixgbe_mac_82599EB:
+                       IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0),
+                                       (IXGBE_IMIR_LLI_EN_82599 |
+                                        IXGBE_IMIR_SIZE_BP_82599 |
+                                        IXGBE_IMIR_CTRL_PSH_82599 |
+                                        IXGBE_IMIR_CTRL_SYN_82599 |
+                                        IXGBE_IMIR_CTRL_URG_82599 |
+                                        IXGBE_IMIR_CTRL_ACK_82599 |
+                                        IXGBE_IMIR_CTRL_RST_82599 |
+                                        IXGBE_IMIR_CTRL_FIN_82599));
+                       IXGBE_WRITE_REG(&adapter->hw, IXGBE_LLITHRESH,
+                                       0xfc000000);
+                       break;
+               case ixgbe_mac_X540:
+                       IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0),
+                                       (IXGBE_IMIR_LLI_EN_82599 |
+                                        IXGBE_IMIR_SIZE_BP_82599 |
+                                        IXGBE_IMIR_CTRL_PSH_82599));
+                       break;
+               default:
+                       break;
                }
-               IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_FTQF(0),
+                               (IXGBE_FTQF_POOL_MASK_EN |
+                                (IXGBE_FTQF_PRIORITY_MASK <<
+                                 IXGBE_FTQF_PRIORITY_SHIFT) |
+                                (IXGBE_FTQF_5TUPLE_MASK_MASK <<
+                                 IXGBE_FTQF_5TUPLE_MASK_SHIFT)));
+
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_SYNQF, 0x80000100);
+       }
+
+       if (adapter->lli_size) {
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0),
+                               (IXGBE_IMIR_LLI_EN_82599 |
+                                IXGBE_IMIR_CTRL_BP_82599));
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_LLITHRESH,
+                               adapter->lli_size);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_FTQF(0),
+                               (IXGBE_FTQF_POOL_MASK_EN |
+                                (IXGBE_FTQF_PRIORITY_MASK <<
+                                 IXGBE_FTQF_PRIORITY_SHIFT) |
+                                (IXGBE_FTQF_5TUPLE_MASK_MASK <<
+                                 IXGBE_FTQF_5TUPLE_MASK_SHIFT)));
+       }
+
+       if (adapter->lli_vlan_pri) {
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIRVP,
+                               (IXGBE_IMIRVP_PRIORITY_EN |
+                                adapter->lli_vlan_pri));
+       }
+}
+
+static void ixgbe_configure_lli(struct ixgbe_adapter *adapter)
+{
+       u16 port;
+
+       /* lli should only be enabled with MSI-X and MSI */
+       if (!(adapter->flags & IXGBE_FLAG_MSI_ENABLED) &&
+           !(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+               return;
+
+       if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
+               ixgbe_configure_lli_82599(adapter);
+               return;
+       }
+
+       if (adapter->lli_port) {
+               /* use filter 0 for port */
+               port = swab16(adapter->lli_port);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIR(0),
+                               (port | IXGBE_IMIR_PORT_IM_EN));
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIREXT(0),
+                               (IXGBE_IMIREXT_SIZE_BP |
+                                IXGBE_IMIREXT_CTRL_BP));
+       }
+
+       if (adapter->flags & IXGBE_FLAG_LLI_PUSH) {
+               /* use filter 1 for push flag */
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIR(1),
+                               (IXGBE_IMIR_PORT_BP | IXGBE_IMIR_PORT_IM_EN));
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIREXT(1),
+                               (IXGBE_IMIREXT_SIZE_BP |
+                                IXGBE_IMIREXT_CTRL_PSH));
+       }
+
+       if (adapter->lli_size) {
+               /* use filter 2 for size */
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIR(2),
+                               (IXGBE_IMIR_PORT_BP | IXGBE_IMIR_PORT_IM_EN));
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIREXT(2),
+                               (adapter->lli_size | IXGBE_IMIREXT_CTRL_BP));
        }
 }
-#endif
 
+#endif /* IXGBE_NO_LLI */
 /* Additional bittime to account for IXGBE framing */
 #define IXGBE_ETH_FRAMING 20
 
@@ -3559,18 +4729,12 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
 
 #ifdef IXGBE_FCOE
        /* FCoE traffic class uses FCOE jumbo frames */
-       if (dev->features & NETIF_F_FCOE_MTU) {
-               int fcoe_pb = 0;
-
-#ifdef CONFIG_IXGBE_DCB
-               fcoe_pb = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
+       if ((dev->features & NETIF_F_FCOE_MTU) &&
+           (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
+           (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
+               tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
 
 #endif
-               if (fcoe_pb == pb && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE)
-                       tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
-       }
-#endif
-
        /* Calculate delay value for device */
        switch (hw->mac.type) {
        case ixgbe_mac_X540:
@@ -3597,7 +4761,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
         */
        if (marker < 0) {
                e_warn(drv, "Packet Buffer(%i) can not provide enough"
-                           "headroom to support flow control."
+                           "headroom to suppport flow control."
                            "Decrease MTU or number of traffic classes\n", pb);
                marker = tc + 1;
        }
@@ -3611,7 +4775,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
  * @adapter: board private structure to calculate for
  * @pb - packet buffer to calculate
  */
-static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
+static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        struct net_device *dev = adapter->netdev;
@@ -3621,6 +4785,14 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
        /* Calculate max LAN frame size */
        tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
 
+#ifdef IXGBE_FCOE
+       /* FCoE traffic class uses FCOE jumbo frames */
+       if ((dev->features & NETIF_F_FCOE_MTU) &&
+           (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
+           (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
+               tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
+
+#endif
        /* Calculate delay value for device */
        switch (hw->mac.type) {
        case ixgbe_mac_X540:
@@ -3647,15 +4819,18 @@ static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
        if (!num_tc)
                num_tc = 1;
 
-       hw->fc.low_water = ixgbe_lpbthresh(adapter);
 
        for (i = 0; i < num_tc; i++) {
                hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
+               hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
 
                /* Low water marks must not be larger than high water marks */
-               if (hw->fc.low_water > hw->fc.high_water[i])
-                       hw->fc.low_water = 0;
+               if (hw->fc.low_water[i] > hw->fc.high_water[i])
+                       hw->fc.low_water[i] = 0;
        }
+
+       for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++)
+               hw->fc.high_water[i] = 0;
 }
 
 static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
@@ -3670,7 +4845,7 @@ static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
        else
                hdrm = 0;
 
-       hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
+       hw->mac.ops.setup_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
        ixgbe_pbthresh_setup(adapter);
 }
 
@@ -3703,27 +4878,21 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
 
        ixgbe_configure_pb(adapter);
-#ifdef CONFIG_IXGBE_DCB
        ixgbe_configure_dcb(adapter);
-#endif
+
+       /*
+        * We must restore virtualization before VLANs or else
+        * the VLVF registers will not be populated
+        */
+       ixgbe_configure_virtualization(adapter);
 
        ixgbe_set_rx_mode(adapter->netdev);
+#ifdef NETIF_F_HW_VLAN_TX
        ixgbe_restore_vlan(adapter);
+#endif
 
-#ifdef IXGBE_FCOE
-       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
-               ixgbe_configure_fcoe(adapter);
-
-#endif /* IXGBE_FCOE */
-
-       switch (hw->mac.type) {
-       case ixgbe_mac_82599EB:
-       case ixgbe_mac_X540:
-               hw->mac.ops.disable_rx_buff(hw);
-               break;
-       default:
-               break;
-       }
+       if (adapter->hw.mac.type != ixgbe_mac_82598EB)
+               hw->mac.ops.disable_sec_rx_path(hw);
 
        if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
                ixgbe_init_fdir_signature_82599(&adapter->hw,
@@ -3734,22 +4903,19 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
                ixgbe_fdir_filter_restore(adapter);
        }
 
-       switch (hw->mac.type) {
-       case ixgbe_mac_82599EB:
-       case ixgbe_mac_X540:
-               hw->mac.ops.enable_rx_buff(hw);
-               break;
-       default:
-               break;
-       }
+       if (adapter->hw.mac.type != ixgbe_mac_82598EB)
+               hw->mac.ops.enable_sec_rx_path(hw);
 
-       ixgbe_configure_virtualization(adapter);
+#ifdef IXGBE_FCOE
+       /* configure FCoE L2 filters, redirection table, and Rx control */
+       ixgbe_configure_fcoe(adapter);
 
+#endif /* IXGBE_FCOE */
        ixgbe_configure_tx(adapter);
        ixgbe_configure_rx(adapter);
 }
 
-static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
+static bool ixgbe_is_sfp(struct ixgbe_hw *hw)
 {
        switch (hw->phy.type) {
        case ixgbe_phy_sfp_avago:
@@ -3776,7 +4942,7 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
 static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
 {
        /*
-        * We are assuming the worst case scenario here, and that
+        * We are assuming the worst case scenerio here, and that
         * is that an SFP was inserted/removed after the reset
         * but before SFP detection was enabled.  As such the best
         * solution is to just start searching as soon as we start
@@ -3818,6 +4984,47 @@ link_cfg_out:
        return ret;
 }
 
+/**
+ * ixgbe_clear_vf_stats_counters - Clear out VF stats after reset
+ * @adapter: board private structure
+ *
+ * On a reset we need to clear out the VF stats or accounting gets
+ * messed up because they're not clear on read.
+ **/
+void ixgbe_clear_vf_stats_counters(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int i;
+
+       for (i = 0; i < adapter->num_vfs; i++) {
+               adapter->vfinfo[i].last_vfstats.gprc =
+                       IXGBE_READ_REG(hw, IXGBE_PVFGPRC(i));
+               adapter->vfinfo[i].saved_rst_vfstats.gprc +=
+                       adapter->vfinfo[i].vfstats.gprc;
+               adapter->vfinfo[i].vfstats.gprc = 0;
+               adapter->vfinfo[i].last_vfstats.gptc =
+                       IXGBE_READ_REG(hw, IXGBE_PVFGPTC(i));
+               adapter->vfinfo[i].saved_rst_vfstats.gptc +=
+                       adapter->vfinfo[i].vfstats.gptc;
+               adapter->vfinfo[i].vfstats.gptc = 0;
+               adapter->vfinfo[i].last_vfstats.gorc =
+                       IXGBE_READ_REG(hw, IXGBE_PVFGORC_LSB(i));
+               adapter->vfinfo[i].saved_rst_vfstats.gorc +=
+                       adapter->vfinfo[i].vfstats.gorc;
+               adapter->vfinfo[i].vfstats.gorc = 0;
+               adapter->vfinfo[i].last_vfstats.gotc =
+                       IXGBE_READ_REG(hw, IXGBE_PVFGOTC_LSB(i));
+               adapter->vfinfo[i].saved_rst_vfstats.gotc +=
+                       adapter->vfinfo[i].vfstats.gotc;
+               adapter->vfinfo[i].vfstats.gotc = 0;
+               adapter->vfinfo[i].last_vfstats.mprc =
+                       IXGBE_READ_REG(hw, IXGBE_PVFMPRC(i));
+               adapter->vfinfo[i].saved_rst_vfstats.mprc +=
+                       adapter->vfinfo[i].vfstats.mprc;
+               adapter->vfinfo[i].vfstats.mprc = 0;
+       }
+}
+
 static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
@@ -3853,22 +5060,29 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
 
        if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
                gpie &= ~IXGBE_GPIE_VTMODE_MASK;
-               gpie |= IXGBE_GPIE_VTMODE_64;
+
+               switch (adapter->ring_feature[RING_F_VMDQ].mask) {
+               case IXGBE_82599_VMDQ_8Q_MASK:
+                       gpie |= IXGBE_GPIE_VTMODE_16;
+                       break;
+               case IXGBE_82599_VMDQ_4Q_MASK:
+                       gpie |= IXGBE_GPIE_VTMODE_32;
+                       break;
+               default:
+                       gpie |= IXGBE_GPIE_VTMODE_64;
+                       break;
+               }
        }
 
        /* Enable Thermal over heat sensor interrupt */
-       if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
+       if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
                switch (adapter->hw.mac.type) {
                case ixgbe_mac_82599EB:
                        gpie |= IXGBE_SDP0_GPIEN;
                        break;
-               case ixgbe_mac_X540:
-                       gpie |= IXGBE_EIMS_TS;
-                       break;
                default:
                        break;
                }
-       }
 
        /* Enable fan failure interrupt */
        if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
@@ -3896,15 +5110,17 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
        else
                ixgbe_configure_msi_and_legacy(adapter);
 
-       /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */
-       if (hw->mac.ops.enable_tx_laser &&
-           ((hw->phy.multispeed_fiber) ||
-            ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
-             (hw->mac.type == ixgbe_mac_82599EB))))
-               hw->mac.ops.enable_tx_laser(hw);
+       /* enable the optics */
+       if ((hw->phy.multispeed_fiber) ||
+           ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+            (hw->mac.type == ixgbe_mac_82599EB)))
+               ixgbe_enable_tx_laser(hw);
 
        clear_bit(__IXGBE_DOWN, &adapter->state);
        ixgbe_napi_enable_all(adapter);
+#ifndef IXGBE_NO_LLI
+       ixgbe_configure_lli(adapter);
+#endif
 
        if (ixgbe_is_sfp(hw)) {
                ixgbe_sfp_link_config(adapter);
@@ -3937,6 +5153,7 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
        adapter->link_check_timeout = jiffies;
        mod_timer(&adapter->service_timer, jiffies);
 
+       ixgbe_clear_vf_stats_counters(adapter);
        /* Set PF Reset Done bit so PF/VF Mail Ops can work */
        ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
        ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
@@ -3975,6 +5192,7 @@ void ixgbe_up(struct ixgbe_adapter *adapter)
 void ixgbe_reset(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
+       struct net_device *netdev = adapter->netdev;
        int err;
 
        /* lock SFP init bit to prevent race conditions with the watchdog */
@@ -3998,9 +5216,9 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
        case IXGBE_ERR_EEPROM_VERSION:
                /* We are running on a pre-production device, log a warning */
                e_dev_warn("This device is a pre-production adapter/LOM. "
-                          "Please be aware there may be issues associated with "
-                          "your hardware.  If you are experiencing problems "
-                          "please contact your Intel or hardware "
+                          "Please be aware there may be issues associated "
+                          "with your hardware.  If you are experiencing "
+                          "problems please contact your Intel or hardware "
                           "representative who provided you with this "
                           "hardware.\n");
                break;
@@ -4010,16 +5228,49 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
 
        clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
 
-       /* reprogram the RAR[0] in case user changed it. */
-       hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
-                           IXGBE_RAH_AV);
+       ixgbe_flush_sw_mac_table(adapter);
+       memcpy(&adapter->mac_table[0].addr, hw->mac.perm_addr,
+              netdev->addr_len);
+       adapter->mac_table[0].queue = VMDQ_P(0);
+       adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT |
+                                       IXGBE_MAC_STATE_IN_USE);
+       hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr,
+                               adapter->mac_table[0].queue,
+                               IXGBE_RAH_AV);
+
+       /* update SAN MAC vmdq pool selection */
+       if (hw->mac.san_mac_rar_index)
+               hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
+}
+
+#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+/**
+ * ixgbe_init_rx_page_offset - initialize page offset values for Rx buffers
+ * @rx_ring: ring to setup
+ *
+ * On many IA platforms the L1 cache has a critical stride of 4K, this
+ * results in each receive buffer starting in the same cache set.  To help
+ * reduce the pressure on this cache set we can interleave the offsets so
+ * that only every other buffer will be in the same cache set.
+ **/
+static void ixgbe_init_rx_page_offset(struct ixgbe_ring *rx_ring)
+{
+       struct ixgbe_rx_buffer *rx_buffer = rx_ring->rx_buffer_info;
+       u16 i;
+
+       for (i = 0; i < rx_ring->count; i += 2) {
+               rx_buffer[0].page_offset = 0;
+               rx_buffer[1].page_offset = ixgbe_rx_bufsz(rx_ring);
+               rx_buffer = &rx_buffer[2];
+       }
 }
 
+#endif
 /**
  * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
  * @rx_ring: ring to free buffers from
  **/
-static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
+void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
 {
        struct device *dev = rx_ring->dev;
        unsigned long size;
@@ -4031,51 +5282,61 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
 
        /* Free all the Rx ring sk_buffs */
        for (i = 0; i < rx_ring->count; i++) {
-               struct ixgbe_rx_buffer *rx_buffer_info;
-
-               rx_buffer_info = &rx_ring->rx_buffer_info[i];
-               if (rx_buffer_info->dma) {
-                       dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
-                                        rx_ring->rx_buf_len,
-                                        DMA_FROM_DEVICE);
-                       rx_buffer_info->dma = 0;
-               }
-               if (rx_buffer_info->skb) {
-                       struct sk_buff *skb = rx_buffer_info->skb;
-                       rx_buffer_info->skb = NULL;
+               struct ixgbe_rx_buffer *rx_buffer;
+
+               rx_buffer = &rx_ring->rx_buffer_info[i];
+               if (rx_buffer->skb) {
+                       struct sk_buff *skb = rx_buffer->skb;
+#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+                       if (IXGBE_CB(skb)->page_released) {
+                               dma_unmap_page(dev,
+                                              IXGBE_CB(skb)->dma,
+                                              ixgbe_rx_bufsz(rx_ring),
+                                              DMA_FROM_DEVICE);
+                               IXGBE_CB(skb)->page_released = false;
+                       }
+#else
                        /* We need to clean up RSC frag lists */
                        skb = ixgbe_merge_active_tail(skb);
-                       ixgbe_close_active_frag_list(skb);
-                       if (IXGBE_CB(skb)->delay_unmap) {
+                       if (ixgbe_close_active_frag_list(skb))
                                dma_unmap_single(dev,
                                                 IXGBE_CB(skb)->dma,
                                                 rx_ring->rx_buf_len,
                                                 DMA_FROM_DEVICE);
-                               IXGBE_CB(skb)->dma = 0;
-                               IXGBE_CB(skb)->delay_unmap = false;
-                       }
+                       IXGBE_CB(skb)->dma = 0;
+#endif
                        dev_kfree_skb(skb);
                }
-               if (!rx_buffer_info->page)
-                       continue;
-               if (rx_buffer_info->page_dma) {
-                       dma_unmap_page(dev, rx_buffer_info->page_dma,
-                                      PAGE_SIZE / 2, DMA_FROM_DEVICE);
-                       rx_buffer_info->page_dma = 0;
-               }
-               put_page(rx_buffer_info->page);
-               rx_buffer_info->page = NULL;
-               rx_buffer_info->page_offset = 0;
+               rx_buffer->skb = NULL;
+               if (rx_buffer->dma)
+#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+                       dma_unmap_page(dev, rx_buffer->dma,
+                                      ixgbe_rx_pg_size(rx_ring),
+                                      DMA_FROM_DEVICE);
+#else
+                       dma_unmap_single(dev,
+                                        rx_buffer->dma,
+                                        rx_ring->rx_buf_len,
+                                        DMA_FROM_DEVICE);
+#endif
+               rx_buffer->dma = 0;
+#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+               if (rx_buffer->page)
+                       __free_pages(rx_buffer->page,
+                                    ixgbe_rx_pg_order(rx_ring));
+               rx_buffer->page = NULL;
+#endif
        }
 
        size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
        memset(rx_ring->rx_buffer_info, 0, size);
 
+#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+       ixgbe_init_rx_page_offset(rx_ring);
+
+#endif
        /* Zero out the descriptor ring */
        memset(rx_ring->desc, 0, rx_ring->size);
-
-       rx_ring->next_to_clean = 0;
-       rx_ring->next_to_use = 0;
 }
 
 /**
@@ -4101,1033 +5362,138 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
        size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
        memset(tx_ring->tx_buffer_info, 0, size);
 
-       /* Zero out the descriptor ring */
-       memset(tx_ring->desc, 0, tx_ring->size);
-
-       tx_ring->next_to_use = 0;
-       tx_ring->next_to_clean = 0;
-}
-
-/**
- * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
- * @adapter: board private structure
- **/
-static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
-{
-       int i;
-
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               ixgbe_clean_rx_ring(adapter->rx_ring[i]);
-}
-
-/**
- * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
- * @adapter: board private structure
- **/
-static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
-{
-       int i;
-
-       for (i = 0; i < adapter->num_tx_queues; i++)
-               ixgbe_clean_tx_ring(adapter->tx_ring[i]);
-}
-
-static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
-{
-       struct hlist_node *node, *node2;
-       struct ixgbe_fdir_filter *filter;
-
-       spin_lock(&adapter->fdir_perfect_lock);
-
-       hlist_for_each_entry_safe(filter, node, node2,
-                                 &adapter->fdir_filter_list, fdir_node) {
-               hlist_del(&filter->fdir_node);
-               kfree(filter);
-       }
-       adapter->fdir_filter_count = 0;
-
-       spin_unlock(&adapter->fdir_perfect_lock);
-}
-
-void ixgbe_down(struct ixgbe_adapter *adapter)
-{
-       struct net_device *netdev = adapter->netdev;
-       struct ixgbe_hw *hw = &adapter->hw;
-       u32 rxctrl;
-       int i;
-
-       /* signal that we are down to the interrupt handler */
-       set_bit(__IXGBE_DOWN, &adapter->state);
-
-       /* disable receives */
-       rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
-       IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
-
-       /* disable all enabled rx queues */
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               /* this call also flushes the previous write */
-               ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
-
-       usleep_range(10000, 20000);
-
-       netif_tx_stop_all_queues(netdev);
-
-       /* call carrier off first to avoid false dev_watchdog timeouts */
-       netif_carrier_off(netdev);
-       netif_tx_disable(netdev);
-
-       ixgbe_irq_disable(adapter);
-
-       ixgbe_napi_disable_all(adapter);
-
-       adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT |
-                            IXGBE_FLAG2_RESET_REQUESTED);
-       adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
-
-       del_timer_sync(&adapter->service_timer);
-
-       if (adapter->num_vfs) {
-               /* Clear EITR Select mapping */
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
-
-               /* Mark all the VFs as inactive */
-               for (i = 0 ; i < adapter->num_vfs; i++)
-                       adapter->vfinfo[i].clear_to_send = 0;
-
-               /* ping all the active vfs to let them know we are going down */
-               ixgbe_ping_all_vfs(adapter);
-
-               /* Disable all VFTE/VFRE TX/RX */
-               ixgbe_disable_tx_rx(adapter);
-       }
-
-       /* disable transmits in the hardware now that interrupts are off */
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               u8 reg_idx = adapter->tx_ring[i]->reg_idx;
-               IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
-       }
-
-       /* Disable the Tx DMA engine on 82599 and X540 */
-       switch (hw->mac.type) {
-       case ixgbe_mac_82599EB:
-       case ixgbe_mac_X540:
-               IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
-                               (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
-                                ~IXGBE_DMATXCTL_TE));
-               break;
-       default:
-               break;
-       }
-
-       if (!pci_channel_offline(adapter->pdev))
-               ixgbe_reset(adapter);
-
-       /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
-       if (hw->mac.ops.disable_tx_laser &&
-           ((hw->phy.multispeed_fiber) ||
-            ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
-             (hw->mac.type == ixgbe_mac_82599EB))))
-               hw->mac.ops.disable_tx_laser(hw);
-
-       ixgbe_clean_all_tx_rings(adapter);
-       ixgbe_clean_all_rx_rings(adapter);
-
-#ifdef CONFIG_IXGBE_DCA
-       /* since we reset the hardware DCA settings were cleared */
-       ixgbe_setup_dca(adapter);
-#endif
-}
-
-/**
- * ixgbe_tx_timeout - Respond to a Tx Hang
- * @netdev: network interface device structure
- **/
-static void ixgbe_tx_timeout(struct net_device *netdev)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
-       /* Do the reset outside of interrupt context */
-       ixgbe_tx_timeout_reset(adapter);
-}
-
-/**
- * ixgbe_set_rss_queues: Allocate queues for RSS
- * @adapter: board private structure to initialize
- *
- * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
- * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
- *
- **/
-static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
-{
-       bool ret = false;
-       struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
-
-       if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
-               f->mask = 0xF;
-               adapter->num_rx_queues = f->indices;
-               adapter->num_tx_queues = f->indices;
-               ret = true;
-       }
-
-       return ret;
-}
-
-/**
- * ixgbe_set_fdir_queues: Allocate queues for Flow Director
- * @adapter: board private structure to initialize
- *
- * Flow Director is an advanced Rx filter, attempting to get Rx flows back
- * to the original CPU that initiated the Tx session.  This runs in addition
- * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
- * Rx load across CPUs using RSS.
- *
- **/
-static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
-{
-       bool ret = false;
-       struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
-
-       f_fdir->indices = min_t(int, num_online_cpus(), f_fdir->indices);
-       f_fdir->mask = 0;
-
-       /*
-        * Use RSS in addition to Flow Director to ensure the best
-        * distribution of flows across cores, even when an FDIR flow
-        * isn't matched.
-        */
-       if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
-           (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
-               adapter->num_tx_queues = f_fdir->indices;
-               adapter->num_rx_queues = f_fdir->indices;
-               ret = true;
-       } else {
-               adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
-       }
-       return ret;
-}
-
-#ifdef IXGBE_FCOE
-/**
- * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
- * @adapter: board private structure to initialize
- *
- * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
- * The ring feature mask is not used as a mask for FCoE, as it can take any 8
- * rx queues out of the max number of rx queues, instead, it is used as the
- * index of the first rx queue used by FCoE.
- *
- **/
-static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
-{
-       struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
-
-       if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
-               return false;
-
-       f->indices = min_t(int, num_online_cpus(), f->indices);
-
-       adapter->num_rx_queues = 1;
-       adapter->num_tx_queues = 1;
-
-       if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
-               e_info(probe, "FCoE enabled with RSS\n");
-               if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
-                       ixgbe_set_fdir_queues(adapter);
-               else
-                       ixgbe_set_rss_queues(adapter);
-       }
-
-       /* adding FCoE rx rings to the end */
-       f->mask = adapter->num_rx_queues;
-       adapter->num_rx_queues += f->indices;
-       adapter->num_tx_queues += f->indices;
-
-       return true;
-}
-#endif /* IXGBE_FCOE */
-
-/* Artificial max queue cap per traffic class in DCB mode */
-#define DCB_QUEUE_CAP 8
-
-#ifdef CONFIG_IXGBE_DCB
-static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
-{
-       int per_tc_q, q, i, offset = 0;
-       struct net_device *dev = adapter->netdev;
-       int tcs = netdev_get_num_tc(dev);
-
-       if (!tcs)
-               return false;
-
-       /* Map queue offset and counts onto allocated tx queues */
-       per_tc_q = min_t(unsigned int, dev->num_tx_queues / tcs, DCB_QUEUE_CAP);
-       q = min_t(int, num_online_cpus(), per_tc_q);
-
-       for (i = 0; i < tcs; i++) {
-               netdev_set_tc_queue(dev, i, q, offset);
-               offset += q;
-       }
-
-       adapter->num_tx_queues = q * tcs;
-       adapter->num_rx_queues = q * tcs;
-
-#ifdef IXGBE_FCOE
-       /* FCoE enabled queues require special configuration indexed
-        * by feature specific indices and mask. Here we map FCoE
-        * indices onto the DCB queue pairs allowing FCoE to own
-        * configuration later.
-        */
-       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
-               u8 prio_tc[MAX_USER_PRIORITY] = {0};
-               int tc;
-               struct ixgbe_ring_feature *f =
-                                       &adapter->ring_feature[RING_F_FCOE];
-
-               ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
-               tc = prio_tc[adapter->fcoe.up];
-               f->indices = dev->tc_to_txq[tc].count;
-               f->mask = dev->tc_to_txq[tc].offset;
-       }
-#endif
-
-       return true;
-}
-#endif
-
-/**
- * ixgbe_set_sriov_queues: Allocate queues for IOV use
- * @adapter: board private structure to initialize
- *
- * IOV doesn't actually use anything, so just NAK the
- * request for now and let the other queue routines
- * figure out what to do.
- */
-static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
-{
-       return false;
-}
-
-/*
- * ixgbe_set_num_queues: Allocate queues for device, feature dependent
- * @adapter: board private structure to initialize
- *
- * This is the top level queue allocation routine.  The order here is very
- * important, starting with the "most" number of features turned on at once,
- * and ending with the smallest set of features.  This way large combinations
- * can be allocated if they're turned on, and smaller combinations are the
- * fallthrough conditions.
- *
- **/
-static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
-{
-       /* Start with base case */
-       adapter->num_rx_queues = 1;
-       adapter->num_tx_queues = 1;
-       adapter->num_rx_pools = adapter->num_rx_queues;
-       adapter->num_rx_queues_per_pool = 1;
-
-       if (ixgbe_set_sriov_queues(adapter))
-               goto done;
-
-#ifdef CONFIG_IXGBE_DCB
-       if (ixgbe_set_dcb_queues(adapter))
-               goto done;
-
-#endif
-#ifdef IXGBE_FCOE
-       if (ixgbe_set_fcoe_queues(adapter))
-               goto done;
-
-#endif /* IXGBE_FCOE */
-       if (ixgbe_set_fdir_queues(adapter))
-               goto done;
-
-       if (ixgbe_set_rss_queues(adapter))
-               goto done;
-
-       /* fallback to base case */
-       adapter->num_rx_queues = 1;
-       adapter->num_tx_queues = 1;
-
-done:
-       if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) ||
-           (adapter->netdev->reg_state == NETREG_UNREGISTERING))
-               return 0;
-
-       /* Notify the stack of the (possibly) reduced queue counts. */
-       netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
-       return netif_set_real_num_rx_queues(adapter->netdev,
-                                           adapter->num_rx_queues);
-}
-
-static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
-                                      int vectors)
-{
-       int err, vector_threshold;
-
-       /* We'll want at least 2 (vector_threshold):
-        * 1) TxQ[0] + RxQ[0] handler
-        * 2) Other (Link Status Change, etc.)
-        */
-       vector_threshold = MIN_MSIX_COUNT;
-
-       /*
-        * The more we get, the more we will assign to Tx/Rx Cleanup
-        * for the separate queues...where Rx Cleanup >= Tx Cleanup.
-        * Right now, we simply care about how many we'll get; we'll
-        * set them up later while requesting irq's.
-        */
-       while (vectors >= vector_threshold) {
-               err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
-                                     vectors);
-               if (!err) /* Success in acquiring all requested vectors. */
-                       break;
-               else if (err < 0)
-                       vectors = 0; /* Nasty failure, quit now */
-               else /* err == number of vectors we should try again with */
-                       vectors = err;
-       }
-
-       if (vectors < vector_threshold) {
-               /* Can't allocate enough MSI-X interrupts?  Oh well.
-                * This just means we'll go with either a single MSI
-                * vector or fall back to legacy interrupts.
-                */
-               netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
-                            "Unable to allocate MSI-X interrupts\n");
-               adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
-               kfree(adapter->msix_entries);
-               adapter->msix_entries = NULL;
-       } else {
-               adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
-               /*
-                * Adjust for only the vectors we'll use, which is minimum
-                * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
-                * vectors we were allocated.
-                */
-               adapter->num_msix_vectors = min(vectors,
-                                  adapter->max_msix_q_vectors + NON_Q_VECTORS);
-       }
-}
-
-/**
- * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
- * @adapter: board private structure to initialize
- *
- * Cache the descriptor ring offsets for RSS to the assigned rings.
- *
- **/
-static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
-{
-       int i;
-
-       if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
-               return false;
-
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               adapter->rx_ring[i]->reg_idx = i;
-       for (i = 0; i < adapter->num_tx_queues; i++)
-               adapter->tx_ring[i]->reg_idx = i;
-
-       return true;
-}
-
-#ifdef CONFIG_IXGBE_DCB
-
-/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
-static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
-                                   unsigned int *tx, unsigned int *rx)
-{
-       struct net_device *dev = adapter->netdev;
-       struct ixgbe_hw *hw = &adapter->hw;
-       u8 num_tcs = netdev_get_num_tc(dev);
-
-       *tx = 0;
-       *rx = 0;
-
-       switch (hw->mac.type) {
-       case ixgbe_mac_82598EB:
-               *tx = tc << 2;
-               *rx = tc << 3;
-               break;
-       case ixgbe_mac_82599EB:
-       case ixgbe_mac_X540:
-               if (num_tcs > 4) {
-                       if (tc < 3) {
-                               *tx = tc << 5;
-                               *rx = tc << 4;
-                       } else if (tc <  5) {
-                               *tx = ((tc + 2) << 4);
-                               *rx = tc << 4;
-                       } else if (tc < num_tcs) {
-                               *tx = ((tc + 8) << 3);
-                               *rx = tc << 4;
-                       }
-               } else {
-                       *rx =  tc << 5;
-                       switch (tc) {
-                       case 0:
-                               *tx =  0;
-                               break;
-                       case 1:
-                               *tx = 64;
-                               break;
-                       case 2:
-                               *tx = 96;
-                               break;
-                       case 3:
-                               *tx = 112;
-                               break;
-                       default:
-                               break;
-                       }
-               }
-               break;
-       default:
-               break;
-       }
-}
-
-/**
- * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
- * @adapter: board private structure to initialize
- *
- * Cache the descriptor ring offsets for DCB to the assigned rings.
- *
- **/
-static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
-{
-       struct net_device *dev = adapter->netdev;
-       int i, j, k;
-       u8 num_tcs = netdev_get_num_tc(dev);
-
-       if (!num_tcs)
-               return false;
-
-       for (i = 0, k = 0; i < num_tcs; i++) {
-               unsigned int tx_s, rx_s;
-               u16 count = dev->tc_to_txq[i].count;
-
-               ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s);
-               for (j = 0; j < count; j++, k++) {
-                       adapter->tx_ring[k]->reg_idx = tx_s + j;
-                       adapter->rx_ring[k]->reg_idx = rx_s + j;
-                       adapter->tx_ring[k]->dcb_tc = i;
-                       adapter->rx_ring[k]->dcb_tc = i;
-               }
-       }
-
-       return true;
-}
-#endif
-
-/**
- * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
- * @adapter: board private structure to initialize
- *
- * Cache the descriptor ring offsets for Flow Director to the assigned rings.
- *
- **/
-static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
-{
-       int i;
-       bool ret = false;
-
-       if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
-           (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
-               for (i = 0; i < adapter->num_rx_queues; i++)
-                       adapter->rx_ring[i]->reg_idx = i;
-               for (i = 0; i < adapter->num_tx_queues; i++)
-                       adapter->tx_ring[i]->reg_idx = i;
-               ret = true;
-       }
-
-       return ret;
-}
-
-#ifdef IXGBE_FCOE
-/**
- * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
- * @adapter: board private structure to initialize
- *
- * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
- *
- */
-static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
-{
-       struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
-       int i;
-       u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
-
-       if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
-               return false;
-
-       if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
-               if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
-                       ixgbe_cache_ring_fdir(adapter);
-               else
-                       ixgbe_cache_ring_rss(adapter);
-
-               fcoe_rx_i = f->mask;
-               fcoe_tx_i = f->mask;
-       }
-       for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
-               adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
-               adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
-       }
-       return true;
-}
-
-#endif /* IXGBE_FCOE */
-/**
- * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
- * @adapter: board private structure to initialize
- *
- * SR-IOV doesn't use any descriptor rings but changes the default if
- * no other mapping is used.
- *
- */
-static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
-{
-       adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
-       adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
-       if (adapter->num_vfs)
-               return true;
-       else
-               return false;
-}
-
-/**
- * ixgbe_cache_ring_register - Descriptor ring to register mapping
- * @adapter: board private structure to initialize
- *
- * Once we know the feature-set enabled for the device, we'll cache
- * the register offset the descriptor ring is assigned to.
- *
- * Note, the order the various feature calls is important.  It must start with
- * the "most" features enabled at the same time, then trickle down to the
- * least amount of features turned on at once.
- **/
-static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
-{
-       /* start with default case */
-       adapter->rx_ring[0]->reg_idx = 0;
-       adapter->tx_ring[0]->reg_idx = 0;
-
-       if (ixgbe_cache_ring_sriov(adapter))
-               return;
-
-#ifdef CONFIG_IXGBE_DCB
-       if (ixgbe_cache_ring_dcb(adapter))
-               return;
-#endif
-
-#ifdef IXGBE_FCOE
-       if (ixgbe_cache_ring_fcoe(adapter))
-               return;
-#endif /* IXGBE_FCOE */
-
-       if (ixgbe_cache_ring_fdir(adapter))
-               return;
-
-       if (ixgbe_cache_ring_rss(adapter))
-               return;
-}
-
-/**
- * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
- * @adapter: board private structure to initialize
- *
- * Attempt to configure the interrupts using the best available
- * capabilities of the hardware and the kernel.
- **/
-static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
-{
-       struct ixgbe_hw *hw = &adapter->hw;
-       int err = 0;
-       int vector, v_budget;
-
-       /*
-        * It's easy to be greedy for MSI-X vectors, but it really
-        * doesn't do us much good if we have a lot more vectors
-        * than CPU's.  So let's be conservative and only ask for
-        * (roughly) the same number of vectors as there are CPU's.
-        * The default is to use pairs of vectors.
-        */
-       v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
-       v_budget = min_t(int, v_budget, num_online_cpus());
-       v_budget += NON_Q_VECTORS;
-
-       /*
-        * At the same time, hardware can only support a maximum of
-        * hw.mac->max_msix_vectors vectors.  With features
-        * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
-        * descriptor queues supported by our device.  Thus, we cap it off in
-        * those rare cases where the cpu count also exceeds our vector limit.
-        */
-       v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
-
-       /* A failure in MSI-X entry allocation isn't fatal, but it does
-        * mean we disable MSI-X capabilities of the adapter. */
-       adapter->msix_entries = kcalloc(v_budget,
-                                       sizeof(struct msix_entry), GFP_KERNEL);
-       if (adapter->msix_entries) {
-               for (vector = 0; vector < v_budget; vector++)
-                       adapter->msix_entries[vector].entry = vector;
-
-               ixgbe_acquire_msix_vectors(adapter, v_budget);
-
-               if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
-                       goto out;
-       }
-
-       adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
-       adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
-       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
-               e_err(probe,
-                     "ATR is not supported while multiple "
-                     "queues are disabled.  Disabling Flow Director\n");
-       }
-       adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
-       adapter->atr_sample_rate = 0;
-       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
-               ixgbe_disable_sriov(adapter);
-
-       err = ixgbe_set_num_queues(adapter);
-       if (err)
-               return err;
-
-       err = pci_enable_msi(adapter->pdev);
-       if (!err) {
-               adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
-       } else {
-               netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
-                            "Unable to allocate MSI interrupt, "
-                            "falling back to legacy.  Error: %d\n", err);
-               /* reset err */
-               err = 0;
-       }
-
-out:
-       return err;
-}
-
-static void ixgbe_add_ring(struct ixgbe_ring *ring,
-                          struct ixgbe_ring_container *head)
-{
-       ring->next = head->ring;
-       head->ring = ring;
-       head->count++;
-}
-
-/**
- * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
- * @adapter: board private structure to initialize
- * @v_idx: index of vector in adapter struct
- *
- * We allocate one q_vector.  If allocation fails we return -ENOMEM.
- **/
-static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx,
-                               int txr_count, int txr_idx,
-                               int rxr_count, int rxr_idx)
-{
-       struct ixgbe_q_vector *q_vector;
-       struct ixgbe_ring *ring;
-       int node = -1;
-       int cpu = -1;
-       int ring_count, size;
-
-       ring_count = txr_count + rxr_count;
-       size = sizeof(struct ixgbe_q_vector) +
-              (sizeof(struct ixgbe_ring) * ring_count);
-
-       /* customize cpu for Flow Director mapping */
-       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
-               if (cpu_online(v_idx)) {
-                       cpu = v_idx;
-                       node = cpu_to_node(cpu);
-               }
-       }
-
-       /* allocate q_vector and rings */
-       q_vector = kzalloc_node(size, GFP_KERNEL, node);
-       if (!q_vector)
-               q_vector = kzalloc(size, GFP_KERNEL);
-       if (!q_vector)
-               return -ENOMEM;
-
-       /* setup affinity mask and node */
-       if (cpu != -1)
-               cpumask_set_cpu(cpu, &q_vector->affinity_mask);
-       else
-               cpumask_copy(&q_vector->affinity_mask, cpu_online_mask);
-       q_vector->numa_node = node;
-
-       /* initialize NAPI */
-       netif_napi_add(adapter->netdev, &q_vector->napi,
-                      ixgbe_poll, 64);
-
-       /* tie q_vector and adapter together */
-       adapter->q_vector[v_idx] = q_vector;
-       q_vector->adapter = adapter;
-       q_vector->v_idx = v_idx;
-
-       /* initialize work limits */
-       q_vector->tx.work_limit = adapter->tx_work_limit;
-
-       /* initialize pointer to rings */
-       ring = q_vector->ring;
-
-       while (txr_count) {
-               /* assign generic ring traits */
-               ring->dev = &adapter->pdev->dev;
-               ring->netdev = adapter->netdev;
-
-               /* configure backlink on ring */
-               ring->q_vector = q_vector;
-
-               /* update q_vector Tx values */
-               ixgbe_add_ring(ring, &q_vector->tx);
-
-               /* apply Tx specific ring traits */
-               ring->count = adapter->tx_ring_count;
-               ring->queue_index = txr_idx;
-
-               /* assign ring to adapter */
-               adapter->tx_ring[txr_idx] = ring;
-
-               /* update count and index */
-               txr_count--;
-               txr_idx++;
-
-               /* push pointer to next ring */
-               ring++;
-       }
-
-       while (rxr_count) {
-               /* assign generic ring traits */
-               ring->dev = &adapter->pdev->dev;
-               ring->netdev = adapter->netdev;
-
-               /* configure backlink on ring */
-               ring->q_vector = q_vector;
-
-               /* update q_vector Rx values */
-               ixgbe_add_ring(ring, &q_vector->rx);
-
-               /*
-                * 82599 errata, UDP frames with a 0 checksum
-                * can be marked as checksum errors.
-                */
-               if (adapter->hw.mac.type == ixgbe_mac_82599EB)
-                       set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
-
-               /* apply Rx specific ring traits */
-               ring->count = adapter->rx_ring_count;
-               ring->queue_index = rxr_idx;
-
-               /* assign ring to adapter */
-               adapter->rx_ring[rxr_idx] = ring;
-
-               /* update count and index */
-               rxr_count--;
-               rxr_idx++;
-
-               /* push pointer to next ring */
-               ring++;
-       }
-
-       return 0;
+       /* Zero out the descriptor ring */
+       memset(tx_ring->desc, 0, tx_ring->size);
 }
 
 /**
- * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
- * @adapter: board private structure to initialize
- * @v_idx: Index of vector to be freed
- *
- * This function frees the memory allocated to the q_vector.  In addition if
- * NAPI is enabled it will delete any references to the NAPI struct prior
- * to freeing the q_vector.
+ * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
  **/
-static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
+static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
 {
-       struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
-       struct ixgbe_ring *ring;
-
-       ixgbe_for_each_ring(ring, q_vector->tx)
-               adapter->tx_ring[ring->queue_index] = NULL;
-
-       ixgbe_for_each_ring(ring, q_vector->rx)
-               adapter->rx_ring[ring->queue_index] = NULL;
-
-       adapter->q_vector[v_idx] = NULL;
-       netif_napi_del(&q_vector->napi);
+       int i;
 
-       /*
-        * ixgbe_get_stats64() might access the rings on this vector,
-        * we must wait a grace period before freeing it.
-        */
-       kfree_rcu(q_vector, rcu);
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               ixgbe_clean_rx_ring(adapter->rx_ring[i]);
 }
 
 /**
- * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
- * @adapter: board private structure to initialize
- *
- * We allocate one q_vector per queue interrupt.  If allocation fails we
- * return -ENOMEM.
+ * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
  **/
-static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
+static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
 {
-       int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-       int rxr_remaining = adapter->num_rx_queues;
-       int txr_remaining = adapter->num_tx_queues;
-       int rxr_idx = 0, txr_idx = 0, v_idx = 0;
-       int err;
+       int i;
 
-       /* only one q_vector if MSI-X is disabled. */
-       if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
-               q_vectors = 1;
+       for (i = 0; i < adapter->num_tx_queues; i++)
+               ixgbe_clean_tx_ring(adapter->tx_ring[i]);
+}
 
-       if (q_vectors >= (rxr_remaining + txr_remaining)) {
-               for (; rxr_remaining; v_idx++, q_vectors--) {
-                       int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
-                       err = ixgbe_alloc_q_vector(adapter, v_idx,
-                                                  0, 0, rqpv, rxr_idx);
+static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
+{
+       struct hlist_node *node, *node2;
+       struct ixgbe_fdir_filter *filter;
 
-                       if (err)
-                               goto err_out;
+       spin_lock(&adapter->fdir_perfect_lock);
 
-                       /* update counts and index */
-                       rxr_remaining -= rqpv;
-                       rxr_idx += rqpv;
-               }
+       hlist_for_each_entry_safe(filter, node, node2,
+                                 &adapter->fdir_filter_list, fdir_node) {
+               hlist_del(&filter->fdir_node);
+               kfree(filter);
        }
+       adapter->fdir_filter_count = 0;
 
-       for (; q_vectors; v_idx++, q_vectors--) {
-               int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
-               int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors);
-               err = ixgbe_alloc_q_vector(adapter, v_idx,
-                                          tqpv, txr_idx,
-                                          rqpv, rxr_idx);
+       spin_unlock(&adapter->fdir_perfect_lock);
+}
 
-               if (err)
-                       goto err_out;
+void ixgbe_down(struct ixgbe_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 rxctrl;
+       int i;
 
-               /* update counts and index */
-               rxr_remaining -= rqpv;
-               rxr_idx += rqpv;
-               txr_remaining -= tqpv;
-               txr_idx += tqpv;
-       }
+       /* signal that we are down to the interrupt handler */
+       set_bit(__IXGBE_DOWN, &adapter->state);
 
-       return 0;
+       /* disable receives */
+       rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+       IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
 
-err_out:
-       while (v_idx) {
-               v_idx--;
-               ixgbe_free_q_vector(adapter, v_idx);
-       }
+       /* disable all enabled rx queues */
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               /* this call also flushes the previous write */
+               ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
 
-       return -ENOMEM;
-}
+       usleep_range(10000, 20000);
 
-/**
- * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
- * @adapter: board private structure to initialize
- *
- * This function frees the memory allocated to the q_vectors.  In addition if
- * NAPI is enabled it will delete any references to the NAPI struct prior
- * to freeing the q_vector.
- **/
-static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
-{
-       int v_idx, q_vectors;
+       netif_tx_stop_all_queues(netdev);
 
-       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
-               q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-       else
-               q_vectors = 1;
+       /* call carrier off first to avoid false dev_watchdog timeouts */
+       netif_carrier_off(netdev);
+       netif_tx_disable(netdev);
 
-       for (v_idx = 0; v_idx < q_vectors; v_idx++)
-               ixgbe_free_q_vector(adapter, v_idx);
-}
+       ixgbe_irq_disable(adapter);
 
-static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
-{
-       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
-               adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
-               pci_disable_msix(adapter->pdev);
-               kfree(adapter->msix_entries);
-               adapter->msix_entries = NULL;
-       } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
-               adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
-               pci_disable_msi(adapter->pdev);
-       }
-}
+       ixgbe_napi_disable_all(adapter);
 
-/**
- * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
- * @adapter: board private structure to initialize
- *
- * We determine which interrupt scheme to use based on...
- * - Kernel support (MSI, MSI-X)
- *   - which can be user-defined (via MODULE_PARAM)
- * - Hardware queue count (num_*_queues)
- *   - defined by miscellaneous hardware support/features (RSS, etc.)
- **/
-int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
-{
-       int err;
+       adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT |
+                            IXGBE_FLAG2_RESET_REQUESTED);
+       adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
 
-       /* Number of supported queues */
-       err = ixgbe_set_num_queues(adapter);
-       if (err)
-               return err;
+       del_timer_sync(&adapter->service_timer);
 
-       err = ixgbe_set_interrupt_capability(adapter);
-       if (err) {
-               e_dev_err("Unable to setup interrupt capabilities\n");
-               goto err_set_interrupt;
-       }
+       if (adapter->num_vfs) {
+               /* Clear EITR Select mapping */
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
 
-       err = ixgbe_alloc_q_vectors(adapter);
-       if (err) {
-               e_dev_err("Unable to allocate memory for queue vectors\n");
-               goto err_alloc_q_vectors;
-       }
+               /* Mark all the VFs as inactive */
+               for (i = 0 ; i < adapter->num_vfs; i++)
+                       adapter->vfinfo[i].clear_to_send = 0;
 
-       ixgbe_cache_ring_register(adapter);
+               /* ping all the active vfs to let them know we are going down */
+               ixgbe_ping_all_vfs(adapter);
 
-       e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
-                  (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
-                  adapter->num_rx_queues, adapter->num_tx_queues);
+               /* Disable all VFTE/VFRE TX/RX */
+               ixgbe_disable_tx_rx(adapter);
+       }
 
-       set_bit(__IXGBE_DOWN, &adapter->state);
+       /* disable transmits in the hardware now that interrupts are off */
+       for (i = 0; i < adapter->num_tx_queues; i++) {
+               u8 reg_idx = adapter->tx_ring[i]->reg_idx;
+               IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
+       }
 
-       return 0;
+       /* Disable the Tx DMA engine on 82599 and X540 */
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
+                               (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
+                                ~IXGBE_DMATXCTL_TE));
+               break;
+       default:
+               break;
+       }
 
-err_alloc_q_vectors:
-       ixgbe_reset_interrupt_capability(adapter);
-err_set_interrupt:
-       return err;
-}
+#ifdef HAVE_PCI_ERS
+       if (!pci_channel_offline(adapter->pdev))
+#endif
+               ixgbe_reset(adapter);
+       /* power down the optics */
+       if ((hw->phy.multispeed_fiber) ||
+           ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+            (hw->mac.type == ixgbe_mac_82599EB)))
+               ixgbe_disable_tx_laser(hw);
 
-/**
- * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
- * @adapter: board private structure to clear interrupt scheme on
- *
- * We go through and clear interrupt specific resources and reset the structure
- * to pre-load conditions
- **/
-void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
-{
-       adapter->num_tx_queues = 0;
-       adapter->num_rx_queues = 0;
+       ixgbe_clean_all_tx_rings(adapter);
+       ixgbe_clean_all_rx_rings(adapter);
 
-       ixgbe_free_q_vectors(adapter);
-       ixgbe_reset_interrupt_capability(adapter);
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+       /* since we reset the hardware DCA settings were cleared */
+       ixgbe_setup_dca(adapter);
+#endif
 }
 
 /**
@@ -5142,129 +5508,165 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        struct pci_dev *pdev = adapter->pdev;
-       unsigned int rss;
-#ifdef CONFIG_IXGBE_DCB
-       int j;
-       struct tc_configuration *tc;
-#endif
+       struct ixgbe_dcb_tc_config *tc;
+       int j, bwg_pct;
+       int err;
 
        /* PCI config space info */
 
        hw->vendor_id = pdev->vendor;
        hw->device_id = pdev->device;
-       hw->revision_id = pdev->revision;
+       pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
        hw->subsystem_vendor_id = pdev->subsystem_vendor;
        hw->subsystem_device_id = pdev->subsystem_device;
 
+       err = ixgbe_init_shared_code(hw);
+       if (err) {
+               e_err(probe, "init_shared_code failed: %d\n", err);
+               goto out;
+       }
+       adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
+                                    hw->mac.num_rar_entries,
+                                    GFP_ATOMIC);
        /* Set capability flags */
-       rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
-       adapter->ring_feature[RING_F_RSS].indices = rss;
-       adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
        switch (hw->mac.type) {
        case ixgbe_mac_82598EB:
+               adapter->flags |= IXGBE_FLAG_MSI_CAPABLE |
+                                 IXGBE_FLAG_MSIX_CAPABLE |
+                                 IXGBE_FLAG_MQ_CAPABLE;
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+               adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
+#endif
+               adapter->flags &= ~IXGBE_FLAG_SRIOV_CAPABLE;
+               adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
+
                if (hw->device_id == IXGBE_DEV_ID_82598AT)
                        adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
-               adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
+
+               adapter->max_q_vectors = IXGBE_MAX_MSIX_Q_VECTORS_82598;
                break;
        case ixgbe_mac_X540:
                adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
        case ixgbe_mac_82599EB:
-               adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
+               adapter->flags |= IXGBE_FLAG_MSI_CAPABLE |
+                                 IXGBE_FLAG_MSIX_CAPABLE |
+                                 IXGBE_FLAG_MQ_CAPABLE;
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+               adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
+#endif
+               adapter->flags |= IXGBE_FLAG_SRIOV_CAPABLE;
                adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
-               adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
-               if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
-                       adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
-               /* Flow Director hash filters enabled */
-               adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
-               adapter->atr_sample_rate = 20;
-               adapter->ring_feature[RING_F_FDIR].indices =
-                                                        IXGBE_MAX_FDIR_INDICES;
-               adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
 #ifdef IXGBE_FCOE
                adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
                adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
-               adapter->ring_feature[RING_F_FCOE].indices = 0;
-#ifdef CONFIG_IXGBE_DCB
+#ifdef CONFIG_DCB
                /* Default traffic class to use for FCoE */
-               adapter->fcoe.up = IXGBE_FCOE_DEFTC;
+               adapter->fcoe.up = IXGBE_FCOE_DEFUP;
+               adapter->fcoe.up_set = IXGBE_FCOE_DEFUP;
 #endif
-#endif /* IXGBE_FCOE */
-               break;
+#endif
+               adapter->ring_feature[RING_F_FDIR].limit =
+                                                       IXGBE_MAX_FDIR_INDICES;
+               if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
+                       adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
+#ifndef IXGBE_NO_SMART_SPEED
+               hw->phy.smart_speed = ixgbe_smart_speed_on;
+#else
+               hw->phy.smart_speed = ixgbe_smart_speed_off;
+#endif
+               adapter->max_q_vectors = IXGBE_MAX_MSIX_Q_VECTORS_82599;
        default:
                break;
        }
 
+#ifdef IXGBE_FCOE
+       /* FCoE support exists, always init the FCoE lock */
+       spin_lock_init(&adapter->fcoe.lock);
+
+#endif
        /* n-tuple support exists, always init our spinlock */
        spin_lock_init(&adapter->fdir_perfect_lock);
 
-#ifdef CONFIG_IXGBE_DCB
+
        switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+       case ixgbe_mac_82599EB:
+               adapter->dcb_cfg.num_tcs.pg_tcs = 8;
+               adapter->dcb_cfg.num_tcs.pfc_tcs = 8;
+               break;
        case ixgbe_mac_X540:
-               adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
-               adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
+               adapter->dcb_cfg.num_tcs.pg_tcs = 4;
+               adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
                break;
        default:
-               adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
-               adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
+               adapter->dcb_cfg.num_tcs.pg_tcs = 1;
+               adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
                break;
        }
 
        /* Configure DCB traffic classes */
-       for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
+       bwg_pct = 100 / adapter->dcb_cfg.num_tcs.pg_tcs;
+       for (j = 0; j < adapter->dcb_cfg.num_tcs.pg_tcs; j++) {
                tc = &adapter->dcb_cfg.tc_config[j];
-               tc->path[DCB_TX_CONFIG].bwg_id = 0;
-               tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
-               tc->path[DCB_RX_CONFIG].bwg_id = 0;
-               tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
-               tc->dcb_pfc = pfc_disabled;
+               tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = 0;
+               tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bwg_pct;
+               tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = 0;
+               tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = bwg_pct;
+               tc->pfc = ixgbe_dcb_pfc_disabled;
        }
 
-       /* Initialize default user to priority mapping, UPx->TC0 */
+       /* reset back to TC 0 */
        tc = &adapter->dcb_cfg.tc_config[0];
-       tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
-       tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
 
-       adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
-       adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
+       /* total of all TCs bandwidth needs to be 100 */
+       bwg_pct += 100 % adapter->dcb_cfg.num_tcs.pg_tcs;
+       tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bwg_pct;
+       tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = bwg_pct;
+
+       /* Initialize default user to priority mapping, UPx->TC0 */
+       tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
+       tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
+
+       adapter->dcb_cfg.bw_percentage[IXGBE_DCB_TX_CONFIG][0] = 100;
+       adapter->dcb_cfg.bw_percentage[IXGBE_DCB_RX_CONFIG][0] = 100;
+       adapter->dcb_cfg.rx_pba_cfg = ixgbe_dcb_pba_equal;
        adapter->dcb_cfg.pfc_mode_enable = false;
+       adapter->dcb_cfg.round_robin_enable = false;
        adapter->dcb_set_bitmap = 0x00;
+#ifdef CONFIG_DCB
        adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
-       ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
-                          MAX_TRAFFIC_CLASS);
+#endif /* CONFIG_DCB */
+#ifdef CONFIG_DCB
+       /* XXX does this need to be initialized even w/o DCB? */
+       memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
+              sizeof(adapter->temp_dcb_cfg));
 
 #endif
+       if (hw->mac.type == ixgbe_mac_82599EB ||
+           hw->mac.type == ixgbe_mac_X540)
+               hw->mbx.ops.init_params(hw);
 
        /* default flow control settings */
        hw->fc.requested_mode = ixgbe_fc_full;
        hw->fc.current_mode = ixgbe_fc_full;    /* init for ethtool output */
-#ifdef CONFIG_DCB
+
        adapter->last_lfc_mode = hw->fc.current_mode;
-#endif
        ixgbe_pbthresh_setup(adapter);
        hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
        hw->fc.send_xon = true;
        hw->fc.disable_fc_autoneg = false;
 
-       /* enable itr by default in dynamic mode */
-       adapter->rx_itr_setting = 1;
-       adapter->tx_itr_setting = 1;
-
        /* set default ring sizes */
        adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
        adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
 
        /* set default work limits */
        adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
-
-       /* initialize eeprom parameters */
-       if (ixgbe_init_eeprom_params_generic(hw)) {
-               e_dev_err("EEPROM initialization failed\n");
-               return -EIO;
-       }
+       adapter->rx_work_limit = IXGBE_DEFAULT_RX_WORK;
 
        set_bit(__IXGBE_DOWN, &adapter->state);
-
-       return 0;
+out:
+       return err;
 }
 
 /**
@@ -5307,8 +5709,6 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
        if (!tx_ring->desc)
                goto err;
 
-       tx_ring->next_to_use = 0;
-       tx_ring->next_to_clean = 0;
        return 0;
 
 err:
@@ -5336,10 +5736,16 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
                err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
                if (!err)
                        continue;
+
                e_err(probe, "Allocation for Tx Queue %u failed\n", i);
-               break;
+               goto err_setup_tx;
        }
 
+       return 0;
+err_setup_tx:
+       /* rewind the index freeing the rings as we go */
+       while (i--)
+               ixgbe_free_tx_resources(adapter->tx_ring[i]);
        return err;
 }
 
@@ -5383,9 +5789,10 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
        if (!rx_ring->desc)
                goto err;
 
-       rx_ring->next_to_clean = 0;
-       rx_ring->next_to_use = 0;
+#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+       ixgbe_init_rx_page_offset(rx_ring);
 
+#endif
        return 0;
 err:
        vfree(rx_ring->rx_buffer_info);
@@ -5412,10 +5819,20 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
                err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
                if (!err)
                        continue;
+
                e_err(probe, "Allocation for Rx Queue %u failed\n", i);
-               break;
+               goto err_setup_rx;
        }
 
+#ifdef IXGBE_FCOE
+       err = ixgbe_setup_fcoe_ddp_resources(adapter);
+       if (!err)
+#endif
+               return 0;
+err_setup_rx:
+       /* rewind the index freeing the rings as we go */
+       while (i--)
+               ixgbe_free_rx_resources(adapter->rx_ring[i]);
        return err;
 }
 
@@ -5438,7 +5855,6 @@ void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
 
        dma_free_coherent(tx_ring->dev, tx_ring->size,
                          tx_ring->desc, tx_ring->dma);
-
        tx_ring->desc = NULL;
 }
 
@@ -5453,8 +5869,7 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_tx_queues; i++)
-               if (adapter->tx_ring[i]->desc)
-                       ixgbe_free_tx_resources(adapter->tx_ring[i]);
+               ixgbe_free_tx_resources(adapter->tx_ring[i]);
 }
 
 /**
@@ -5490,9 +5905,12 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
 {
        int i;
 
+#ifdef IXGBE_FCOE
+       ixgbe_free_fcoe_ddp_resources(adapter);
+
+#endif
        for (i = 0; i < adapter->num_rx_queues; i++)
-               if (adapter->rx_ring[i]->desc)
-                       ixgbe_free_rx_resources(adapter->rx_ring[i]);
+               ixgbe_free_rx_resources(adapter->rx_ring[i]);
 }
 
 /**
@@ -5571,15 +5989,28 @@ static int ixgbe_open(struct net_device *netdev)
        if (err)
                goto err_req_irq;
 
+       /* Notify the stack of the actual queue counts. */
+       netif_set_real_num_tx_queues(netdev,
+                                    adapter->num_rx_pools > 1 ? 1 :
+                                    adapter->num_tx_queues);
+
+       err = netif_set_real_num_rx_queues(netdev,
+                                          adapter->num_rx_pools > 1 ? 1 :
+                                          adapter->num_rx_queues);
+       if (err)
+               goto err_set_queues;
+
        ixgbe_up_complete(adapter);
 
        return 0;
 
+err_set_queues:
+       ixgbe_free_irq(adapter);
 err_req_irq:
-err_setup_rx:
        ixgbe_free_all_rx_resources(adapter);
-err_setup_tx:
+err_setup_rx:
        ixgbe_free_all_tx_resources(adapter);
+err_setup_tx:
        ixgbe_reset(adapter);
 
        return err;
@@ -5637,21 +6068,20 @@ static int ixgbe_resume(struct pci_dev *pdev)
 
        pci_wake_from_d3(pdev, false);
 
-       err = ixgbe_init_interrupt_scheme(adapter);
-       if (err) {
-               e_dev_err("Cannot initialize interrupts for device\n");
-               return err;
-       }
-
        ixgbe_reset(adapter);
 
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
 
-       if (netif_running(netdev)) {
+       rtnl_lock();
+
+       err = ixgbe_init_interrupt_scheme(adapter);
+       if (!err && netif_running(netdev))
                err = ixgbe_open(netdev);
-               if (err)
-                       return err;
-       }
+
+       rtnl_unlock();
+
+       if (err)
+               return err;
 
        netif_device_attach(netdev);
 
@@ -5659,6 +6089,12 @@ static int ixgbe_resume(struct pci_dev *pdev)
 }
 #endif /* CONFIG_PM */
 
+/*
+ * __ixgbe_shutdown is not used when power management
+ * is disabled on older kernels (<2.6.12). causes a compile
+ * warning/error, because it is defined and not used.
+ */
+#if defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER)
 static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
 {
        struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
@@ -5672,18 +6108,19 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
 
        netif_device_detach(netdev);
 
+       rtnl_lock();
+
        if (netif_running(netdev)) {
                ixgbe_down(adapter);
                ixgbe_free_irq(adapter);
+
                ixgbe_free_all_tx_resources(adapter);
                ixgbe_free_all_rx_resources(adapter);
        }
 
        ixgbe_clear_interrupt_scheme(adapter);
-#ifdef CONFIG_DCB
-       kfree(adapter->ixgbe_ieee_pfc);
-       kfree(adapter->ixgbe_ieee_ets);
-#endif
+
+       rtnl_unlock();
 
 #ifdef CONFIG_PM
        retval = pci_save_state(pdev);
@@ -5694,6 +6131,15 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
        if (wufc) {
                ixgbe_set_rx_mode(netdev);
 
+               /*
+                * Laser could be off for multispeed fiber so make sure it
+                * is on for WoL.
+                */
+               if (hw->phy.multispeed_fiber ||
+                   (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber &&
+                    hw->mac.type == ixgbe_mac_82599EB))
+                       ixgbe_enable_tx_laser(hw);
+
                /* turn on all-multi mode if wake on multicast is enabled */
                if (wufc & IXGBE_WUFC_MC) {
                        fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
@@ -5731,6 +6177,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
 
        return 0;
 }
+#endif /* defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER) */
 
 #ifdef CONFIG_PM
 static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -5753,6 +6200,7 @@ static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
 }
 #endif /* CONFIG_PM */
 
+#ifndef USE_REBOOT_NOTIFIER
 static void ixgbe_shutdown(struct pci_dev *pdev)
 {
        bool wake;
@@ -5765,13 +6213,41 @@ static void ixgbe_shutdown(struct pci_dev *pdev)
        }
 }
 
+#endif
+/**
+ * ixgbe_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+       /* update the stats data */
+       ixgbe_update_stats(adapter);
+
+#ifdef HAVE_NETDEV_STATS_IN_NETDEV
+       /* only return the current stats */
+       return &netdev->stats;
+#else
+       /* only return the current stats */
+       return &adapter->net_stats;
+#endif /* HAVE_NETDEV_STATS_IN_NETDEV */
+}
+
 /**
  * ixgbe_update_stats - Update the board statistics counters.
  * @adapter: board private structure
  **/
 void ixgbe_update_stats(struct ixgbe_adapter *adapter)
 {
-       struct net_device *netdev = adapter->netdev;
+#ifdef HAVE_NETDEV_STATS_IN_NETDEV
+       struct net_device_stats *net_stats = &adapter->netdev->stats;
+#else
+       struct net_device_stats *net_stats = &adapter->net_stats;
+#endif /* HAVE_NETDEV_STATS_IN_NETDEV */
        struct ixgbe_hw *hw = &adapter->hw;
        struct ixgbe_hw_stats *hwstats = &adapter->stats;
        u64 total_mpc = 0;
@@ -5779,11 +6255,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
        u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
        u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
        u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
-#ifdef IXGBE_FCOE
-       struct ixgbe_fcoe *fcoe = &adapter->fcoe;
-       unsigned int cpu;
-       u64 fcoe_noddp_counts_sum = 0, fcoe_noddp_ext_buff_counts_sum = 0;
-#endif /* IXGBE_FCOE */
+#ifndef IXGBE_NO_LRO
+       u32 flushed = 0, coal = 0;
+#endif
 
        if (test_bit(__IXGBE_DOWN, &adapter->state) ||
            test_bit(__IXGBE_RESETTING, &adapter->state))
@@ -5791,10 +6265,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
 
        if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
                u64 rsc_count = 0;
-               u64 rsc_flush = 0;
-               for (i = 0; i < 16; i++)
-                       adapter->hw_rx_no_dma_resources +=
-                               IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+               u64 rsc_flush = 0;
                for (i = 0; i < adapter->num_rx_queues; i++) {
                        rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
                        rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
@@ -5803,6 +6274,18 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
                adapter->rsc_total_flush = rsc_flush;
        }
 
+#ifndef IXGBE_NO_LRO
+       for (i = 0; i < adapter->num_q_vectors; i++) {
+               struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
+               if (!q_vector)
+                       continue;
+               flushed += q_vector->lrolist.stats.flushed;
+               coal += q_vector->lrolist.stats.coal;
+       }
+       adapter->lro_stats.flushed = flushed;
+       adapter->lro_stats.coal = coal;
+
+#endif
        for (i = 0; i < adapter->num_rx_queues; i++) {
                struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
                non_eop_descs += rx_ring->rx_stats.non_eop_descs;
@@ -5811,13 +6294,14 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
                hw_csum_rx_error += rx_ring->rx_stats.csum_err;
                bytes += rx_ring->stats.bytes;
                packets += rx_ring->stats.packets;
+
        }
        adapter->non_eop_descs = non_eop_descs;
        adapter->alloc_rx_page_failed = alloc_rx_page_failed;
        adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
        adapter->hw_csum_rx_error = hw_csum_rx_error;
-       netdev->stats.rx_bytes = bytes;
-       netdev->stats.rx_packets = packets;
+       net_stats->rx_bytes = bytes;
+       net_stats->rx_packets = packets;
 
        bytes = 0;
        packets = 0;
@@ -5831,8 +6315,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
        }
        adapter->restart_queue = restart_queue;
        adapter->tx_busy = tx_busy;
-       netdev->stats.tx_bytes = bytes;
-       netdev->stats.tx_packets = packets;
+       net_stats->tx_bytes = bytes;
+       net_stats->tx_packets = packets;
 
        hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
 
@@ -5897,6 +6381,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
                hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
                hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
        case ixgbe_mac_82599EB:
+               for (i = 0; i < 16; i++)
+                       adapter->hw_rx_no_dma_resources +=
+                                            IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
                hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
                IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
                hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
@@ -5904,27 +6391,33 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
                hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
                IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
                hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
+#ifdef HAVE_TX_MQ
                hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
                hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+#endif /* HAVE_TX_MQ */
 #ifdef IXGBE_FCOE
                hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
+               hwstats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
                hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
                hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
                hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
                hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
                hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
-               /* Add up per cpu counters for total ddp aloc fail */
-               if (fcoe->pcpu_noddp && fcoe->pcpu_noddp_ext_buff) {
+               /* Add up per cpu counters for total ddp alloc fail */
+               if (adapter->fcoe.ddp_pool) {
+                       struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+                       struct ixgbe_fcoe_ddp_pool *ddp_pool;
+                       unsigned int cpu;
+                       u64 noddp = 0, noddp_ext_buff = 0;
                        for_each_possible_cpu(cpu) {
-                               fcoe_noddp_counts_sum +=
-                                       *per_cpu_ptr(fcoe->pcpu_noddp, cpu);
-                               fcoe_noddp_ext_buff_counts_sum +=
-                                       *per_cpu_ptr(fcoe->
-                                               pcpu_noddp_ext_buff, cpu);
+                               ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
+                               noddp += ddp_pool->noddp;
+                               noddp_ext_buff += ddp_pool->noddp_ext_buff;
                        }
+                       hwstats->fcoe_noddp = noddp;
+                       hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
                }
-               hwstats->fcoe_noddp = fcoe_noddp_counts_sum;
-               hwstats->fcoe_noddp_ext_buff = fcoe_noddp_ext_buff_counts_sum;
+
 #endif /* IXGBE_FCOE */
                break;
        default:
@@ -5968,18 +6461,46 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
        hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
        hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
        hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
-
        /* Fill out the OS statistics structure */
-       netdev->stats.multicast = hwstats->mprc;
+       net_stats->multicast = hwstats->mprc;
 
        /* Rx Errors */
-       netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
-       netdev->stats.rx_dropped = 0;
-       netdev->stats.rx_length_errors = hwstats->rlec;
-       netdev->stats.rx_crc_errors = hwstats->crcerrs;
-       netdev->stats.rx_missed_errors = total_mpc;
+       net_stats->rx_errors = hwstats->crcerrs +
+                                      hwstats->rlec;
+       net_stats->rx_dropped = 0;
+       net_stats->rx_length_errors = hwstats->rlec;
+       net_stats->rx_crc_errors = hwstats->crcerrs;
+       net_stats->rx_missed_errors = total_mpc;
+
+       /*
+        * VF Stats Collection - skip while resetting because these
+        * are not clear on read and otherwise you'll sometimes get
+        * crazy values.
+        */
+       if (!test_bit(__IXGBE_RESETTING, &adapter->state)) {
+               for (i = 0; i < adapter->num_vfs; i++) {
+                       UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPRC(i),             \
+                                       adapter->vfinfo[i].last_vfstats.gprc, \
+                                       adapter->vfinfo[i].vfstats.gprc);
+                       UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPTC(i),             \
+                                       adapter->vfinfo[i].last_vfstats.gptc, \
+                                       adapter->vfinfo[i].vfstats.gptc);
+                       UPDATE_VF_COUNTER_36bit(IXGBE_PVFGORC_LSB(i),         \
+                                       IXGBE_PVFGORC_MSB(i),                 \
+                                       adapter->vfinfo[i].last_vfstats.gorc, \
+                                       adapter->vfinfo[i].vfstats.gorc);
+                       UPDATE_VF_COUNTER_36bit(IXGBE_PVFGOTC_LSB(i),         \
+                                       IXGBE_PVFGOTC_MSB(i),                 \
+                                       adapter->vfinfo[i].last_vfstats.gotc, \
+                                       adapter->vfinfo[i].vfstats.gotc);
+                       UPDATE_VF_COUNTER_32bit(IXGBE_PVFMPRC(i),             \
+                                       adapter->vfinfo[i].last_vfstats.mprc, \
+                                       adapter->vfinfo[i].vfstats.mprc);
+               }
+       }
 }
 
+#ifdef HAVE_TX_MQ
 /**
  * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
  * @adapter - pointer to the device adapter structure
@@ -6007,7 +6528,7 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
        if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
                for (i = 0; i < adapter->num_tx_queues; i++)
                        set_bit(__IXGBE_TX_FDIR_INIT_DONE,
-                               &(adapter->tx_ring[i]->state));
+                               &(adapter->tx_ring[i]->state));
                /* re-enable flow director interrupts */
                IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
        } else {
@@ -6016,6 +6537,7 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
        }
 }
 
+#endif /* HAVE_TX_MQ */
 /**
  * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
  * @adapter - pointer to the device adapter structure
@@ -6052,7 +6574,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
                        (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
        } else {
                /* get one bit for every active tx/rx interrupt vector */
-               for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
+               for (i = 0; i < adapter->num_q_vectors; i++) {
                        struct ixgbe_q_vector *qv = adapter->q_vector[i];
                        if (qv->rx.ring || qv->tx.ring)
                                eics |= ((u64)1 << i);
@@ -6061,7 +6583,6 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
 
        /* Cause software interrupt to ensure rings are cleaned */
        ixgbe_irq_rearm_queues(adapter, eics);
-
 }
 
 /**
@@ -6074,7 +6595,7 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        u32 link_speed = adapter->link_speed;
        bool link_up = adapter->link_up;
-       int i;
+       bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
 
        if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
                return;
@@ -6086,13 +6607,15 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
                link_speed = IXGBE_LINK_SPEED_10GB_FULL;
                link_up = true;
        }
-       if (link_up) {
-               if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-                       for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
-                               hw->mac.ops.fc_enable(hw, i);
-               } else {
-                       hw->mac.ops.fc_enable(hw, 0);
-               }
+
+#ifdef HAVE_DCBNL_IEEE
+       if (adapter->ixgbe_ieee_pfc)
+               pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
+
+#endif
+       if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
+               hw->mac.ops.fc_enable(hw);
+               ixgbe_set_rx_drop_en(adapter);
        }
 
        if (link_up ||
@@ -6146,6 +6669,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
                flow_rx = false;
                break;
        }
+
        e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
               (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
               "10 Gbps" :
@@ -6159,7 +6683,13 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
               (flow_tx ? "TX" : "None"))));
 
        netif_carrier_on(netdev);
+#ifdef IFLA_VF_MAX
        ixgbe_check_vf_rate_limit(adapter);
+#endif /* IFLA_VF_MAX */
+       netif_tx_wake_all_queues(netdev);
+
+       /* ping all the active vfs to let them know link has changed */
+       ixgbe_ping_all_vfs(adapter);
 }
 
 /**
@@ -6185,6 +6715,10 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
 
        e_info(drv, "NIC Link is Down\n");
        netif_carrier_off(netdev);
+       netif_tx_stop_all_queues(netdev);
+
+       /* ping all the active vfs to let them know link has changed */
+       ixgbe_ping_all_vfs(adapter);
 }
 
 /**
@@ -6318,12 +6852,13 @@ sfp_out:
        clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
 
        if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
-           (adapter->netdev->reg_state == NETREG_REGISTERED)) {
+           adapter->netdev_registered) {
                e_dev_err("failed to initialize because an unsupported "
                          "SFP+ module type was detected.\n");
                e_dev_err("Reload the driver after installing a "
                          "supported module.\n");
                unregister_netdev(adapter->netdev);
+               adapter->netdev_registered = false;
        }
 }
 
@@ -6458,7 +6993,6 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
            test_bit(__IXGBE_RESETTING, &adapter->state))
                return;
 
-       ixgbe_dump(adapter);
        netdev_err(adapter->netdev, "Reset adapter\n");
        adapter->tx_timeout_count++;
 
@@ -6480,46 +7014,30 @@ static void ixgbe_service_task(struct work_struct *work)
        ixgbe_sfp_link_config_subtask(adapter);
        ixgbe_check_overtemp_subtask(adapter);
        ixgbe_watchdog_subtask(adapter);
+#ifdef HAVE_TX_MQ
        ixgbe_fdir_reinit_subtask(adapter);
+#endif
        ixgbe_check_hang_subtask(adapter);
 
        ixgbe_service_event_complete(adapter);
 }
 
-void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
-                      u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
-{
-       struct ixgbe_adv_tx_context_desc *context_desc;
-       u16 i = tx_ring->next_to_use;
-
-       context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
-
-       i++;
-       tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
-
-       /* set bits to identify this as an advanced context descriptor */
-       type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
-
-       context_desc->vlan_macip_lens   = cpu_to_le32(vlan_macip_lens);
-       context_desc->seqnum_seed       = cpu_to_le32(fcoe_sof_eof);
-       context_desc->type_tucmd_mlhl   = cpu_to_le32(type_tucmd);
-       context_desc->mss_l4len_idx     = cpu_to_le32(mss_l4len_idx);
-}
-
 static int ixgbe_tso(struct ixgbe_ring *tx_ring,
                     struct ixgbe_tx_buffer *first,
-                    u32 tx_flags, __be16 protocol, u8 *hdr_len)
+                    u8 *hdr_len)
 {
+#ifdef NETIF_F_TSO
        struct sk_buff *skb = first->skb;
-       int err;
        u32 vlan_macip_lens, type_tucmd;
        u32 mss_l4len_idx, l4len;
 
        if (!skb_is_gso(skb))
+#endif /* NETIF_F_TSO */
                return 0;
+#ifdef NETIF_F_TSO
 
        if (skb_header_cloned(skb)) {
-               err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+               int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
                if (err)
                        return err;
        }
@@ -6527,7 +7045,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
        /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
        type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
 
-       if (protocol == __constant_htons(ETH_P_IP)) {
+       if (first->protocol == __constant_htons(ETH_P_IP)) {
                struct iphdr *iph = ip_hdr(skb);
                iph->tot_len = 0;
                iph->check = 0;
@@ -6536,17 +7054,29 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
                                                         IPPROTO_TCP,
                                                         0);
                type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+               first->tx_flags |= IXGBE_TX_FLAGS_TSO |
+                                  IXGBE_TX_FLAGS_CSUM |
+                                  IXGBE_TX_FLAGS_IPV4;
+#ifdef NETIF_F_TSO6
        } else if (skb_is_gso_v6(skb)) {
                ipv6_hdr(skb)->payload_len = 0;
                tcp_hdr(skb)->check =
                    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
                                     &ipv6_hdr(skb)->daddr,
                                     0, IPPROTO_TCP, 0);
+               first->tx_flags |= IXGBE_TX_FLAGS_TSO |
+                                  IXGBE_TX_FLAGS_CSUM;
+#endif
        }
 
+       /* compute header lengths */
        l4len = tcp_hdrlen(skb);
        *hdr_len = skb_transport_offset(skb) + l4len;
 
+       /* update gso size and bytecount with header size */
+       first->gso_segs = skb_shinfo(skb)->gso_segs;
+       first->bytecount += (first->gso_segs - 1) * *hdr_len;
+
        /* mss_l4len_id: use 1 as index for TSO */
        mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
        mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
@@ -6555,17 +7085,17 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
        /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
        vlan_macip_lens = skb_network_header_len(skb);
        vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
-       vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+       vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
 
        ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
-                         mss_l4len_idx);
+                         mss_l4len_idx);
 
        return 1;
+#endif
 }
 
-static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
-                         struct ixgbe_tx_buffer *first,
-                         u32 tx_flags, __be16 protocol)
+static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
+                         struct ixgbe_tx_buffer *first)
 {
        struct sk_buff *skb = first->skb;
        u32 vlan_macip_lens = 0;
@@ -6573,26 +7103,28 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
        u32 type_tucmd = 0;
 
        if (skb->ip_summed != CHECKSUM_PARTIAL) {
-           if (!(tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
-               !(tx_flags & IXGBE_TX_FLAGS_TXSW))
-                       return false;
+               if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
+                   !(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
+                       return;
        } else {
                u8 l4_hdr = 0;
-               switch (protocol) {
+               switch (first->protocol) {
                case __constant_htons(ETH_P_IP):
                        vlan_macip_lens |= skb_network_header_len(skb);
                        type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
                        l4_hdr = ip_hdr(skb)->protocol;
                        break;
+#ifdef NETIF_F_IPV6_CSUM
                case __constant_htons(ETH_P_IPV6):
                        vlan_macip_lens |= skb_network_header_len(skb);
                        l4_hdr = ipv6_hdr(skb)->nexthdr;
                        break;
+#endif
                default:
                        if (unlikely(net_ratelimit())) {
                                dev_warn(tx_ring->dev,
                                 "partial checksum but proto=%x!\n",
-                                skb->protocol);
+                                first->protocol);
                        }
                        break;
                }
@@ -6603,11 +7135,13 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
                        mss_l4len_idx = tcp_hdrlen(skb) <<
                                        IXGBE_ADVTXD_L4LEN_SHIFT;
                        break;
+#ifdef HAVE_SCTP
                case IPPROTO_SCTP:
                        type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
                        mss_l4len_idx = sizeof(struct sctphdr) <<
                                        IXGBE_ADVTXD_L4LEN_SHIFT;
                        break;
+#endif
                case IPPROTO_UDP:
                        mss_l4len_idx = sizeof(struct udphdr) <<
                                        IXGBE_ADVTXD_L4LEN_SHIFT;
@@ -6616,19 +7150,21 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
                        if (unlikely(net_ratelimit())) {
                                dev_warn(tx_ring->dev,
                                 "partial checksum but l4 proto=%x!\n",
-                                skb->protocol);
+                                l4_hdr);
                        }
                        break;
                }
+
+               /* update TX checksum flag */
+               first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
        }
 
+       /* vlan_macip_lens: MACLEN, VLAN tag */
        vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
-       vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+       vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
 
        ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
                          type_tucmd, mss_l4len_idx);
-
-       return (skb->ip_summed == CHECKSUM_PARTIAL);
 }
 
 static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
@@ -6641,7 +7177,6 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
        /* set HW vlan bit if vlan is present */
        if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
                cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
-
        /* set segmentation enable bits for TSO/FSO */
 #ifdef IXGBE_FCOE
        if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FSO))
@@ -6653,7 +7188,8 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
        return cmd_type;
 }
 
-static __le32 ixgbe_tx_olinfo_status(u32 tx_flags, unsigned int paylen)
+static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
+                                  u32 tx_flags, unsigned int paylen)
 {
        __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
 
@@ -6662,7 +7198,7 @@ static __le32 ixgbe_tx_olinfo_status(u32 tx_flags, unsigned int paylen)
                olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
 
        /* enble IPv4 checksum for TSO */
-       if ((tx_flags & IXGBE_TX_FLAGS_TSO) && (tx_flags & IXGBE_TX_FLAGS_IPV4))
+       if (tx_flags & IXGBE_TX_FLAGS_IPV4)
                olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
 
        /* use index 1 context for TSO/FSO/FCOE */
@@ -6684,7 +7220,7 @@ static __le32 ixgbe_tx_olinfo_status(u32 tx_flags, unsigned int paylen)
 #endif
                olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
 
-       return olinfo_status;
+       tx_desc->read.olinfo_status = olinfo_status;
 }
 
 #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
@@ -6692,147 +7228,143 @@ static __le32 ixgbe_tx_olinfo_status(u32 tx_flags, unsigned int paylen)
 
 static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
                         struct ixgbe_tx_buffer *first,
-                        u32 tx_flags,
                         const u8 hdr_len)
 {
+       dma_addr_t dma;
        struct sk_buff *skb = first->skb;
-       struct device *dev = tx_ring->dev;
-       struct ixgbe_tx_buffer *tx_buffer_info;
+       struct ixgbe_tx_buffer *tx_buffer;
        union ixgbe_adv_tx_desc *tx_desc;
-       dma_addr_t dma;
-       __le32 cmd_type, olinfo_status;
-       struct skb_frag_struct *frag;
-       unsigned int f = 0;
+#ifdef MAX_SKB_FRAGS
+       struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
        unsigned int data_len = skb->data_len;
+#endif
        unsigned int size = skb_headlen(skb);
-       u32 offset = 0;
-       u32 paylen = skb->len - hdr_len;
+       unsigned int paylen = skb->len - hdr_len;
+       u32 tx_flags = first->tx_flags;
+       __le32 cmd_type;
        u16 i = tx_ring->next_to_use;
-       u16 gso_segs;
+
+       tx_desc = IXGBE_TX_DESC(tx_ring, i);
+
+       ixgbe_tx_olinfo_status(tx_desc, tx_flags, paylen);
+       cmd_type = ixgbe_tx_cmd_type(tx_flags);
 
 #ifdef IXGBE_FCOE
        if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
-               if (data_len >= sizeof(struct fcoe_crc_eof)) {
-                       data_len -= sizeof(struct fcoe_crc_eof);
-               } else {
+               if (data_len < sizeof(struct fcoe_crc_eof)) {
                        size -= sizeof(struct fcoe_crc_eof) - data_len;
                        data_len = 0;
+               } else {
+                       data_len -= sizeof(struct fcoe_crc_eof);
                }
        }
 
 #endif
-       dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
-       if (dma_mapping_error(dev, dma))
+       dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+       if (dma_mapping_error(tx_ring->dev, dma))
                goto dma_error;
 
-       cmd_type = ixgbe_tx_cmd_type(tx_flags);
-       olinfo_status = ixgbe_tx_olinfo_status(tx_flags, paylen);
+       /* record length, and DMA address */
+       dma_unmap_len_set(first, len, size);
+       dma_unmap_addr_set(first, dma, dma);
 
-       tx_desc = IXGBE_TX_DESC(tx_ring, i);
+       tx_desc->read.buffer_addr = cpu_to_le64(dma);
 
+#ifdef MAX_SKB_FRAGS
        for (;;) {
-               while (size > IXGBE_MAX_DATA_PER_TXD) {
-                       tx_desc->read.buffer_addr = cpu_to_le64(dma + offset);
+#endif
+               while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
                        tx_desc->read.cmd_type_len =
                                cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
-                       tx_desc->read.olinfo_status = olinfo_status;
-
-                       offset += IXGBE_MAX_DATA_PER_TXD;
-                       size -= IXGBE_MAX_DATA_PER_TXD;
 
-                       tx_desc++;
                        i++;
+                       tx_desc++;
                        if (i == tx_ring->count) {
                                tx_desc = IXGBE_TX_DESC(tx_ring, 0);
                                i = 0;
                        }
-               }
 
-               tx_buffer_info = &tx_ring->tx_buffer_info[i];
-               tx_buffer_info->length = offset + size;
-               tx_buffer_info->tx_flags = tx_flags;
-               tx_buffer_info->dma = dma;
+                       dma += IXGBE_MAX_DATA_PER_TXD;
+                       size -= IXGBE_MAX_DATA_PER_TXD;
 
-               tx_desc->read.buffer_addr = cpu_to_le64(dma + offset);
-               tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
-               tx_desc->read.olinfo_status = olinfo_status;
+                       tx_desc->read.buffer_addr = cpu_to_le64(dma);
+                       tx_desc->read.olinfo_status = 0;
+               }
 
-               if (!data_len)
+#ifdef MAX_SKB_FRAGS
+               if (likely(!data_len))
                        break;
 
-               frag = &skb_shinfo(skb)->frags[f];
-#ifdef IXGBE_FCOE
-               size = min_t(unsigned int, data_len, frag->size);
-#else
-               size = frag->size;
-#endif
-               data_len -= size;
-               f++;
-
-               offset = 0;
-               tx_flags |= IXGBE_TX_FLAGS_MAPPED_AS_PAGE;
-
-               dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
-               if (dma_mapping_error(dev, dma))
-                       goto dma_error;
+               tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
 
-               tx_desc++;
                i++;
+               tx_desc++;
                if (i == tx_ring->count) {
                        tx_desc = IXGBE_TX_DESC(tx_ring, 0);
                        i = 0;
                }
-       }
 
-       tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD);
+#ifdef IXGBE_FCOE
+               size = min_t(unsigned int, data_len, skb_frag_size(frag));
+#else
+               size = skb_frag_size(frag);
+#endif
+               data_len -= size;
 
-       i++;
-       if (i == tx_ring->count)
-               i = 0;
+               dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+                                      DMA_TO_DEVICE);
+               if (dma_mapping_error(tx_ring->dev, dma))
+                       goto dma_error;
 
-       tx_ring->next_to_use = i;
+               tx_buffer = &tx_ring->tx_buffer_info[i];
+               dma_unmap_len_set(tx_buffer, len, size);
+               dma_unmap_addr_set(tx_buffer, dma, dma);
 
-       if (tx_flags & IXGBE_TX_FLAGS_TSO)
-               gso_segs = skb_shinfo(skb)->gso_segs;
-#ifdef IXGBE_FCOE
-       /* adjust for FCoE Sequence Offload */
-       else if (tx_flags & IXGBE_TX_FLAGS_FSO)
-               gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
-                                       skb_shinfo(skb)->gso_size);
-#endif /* IXGBE_FCOE */
-       else
-               gso_segs = 1;
+               tx_desc->read.buffer_addr = cpu_to_le64(dma);
+               tx_desc->read.olinfo_status = 0;
+
+               frag++;
+       }
 
-       /* multiply data chunks by size of headers */
-       tx_buffer_info->bytecount = paylen + (gso_segs * hdr_len);
-       tx_buffer_info->gso_segs = gso_segs;
+#endif /* MAX_SKB_FRAGS */
+       /* write last descriptor with RS and EOP bits */
+       cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
+       tx_desc->read.cmd_type_len = cmd_type;
 
        /* set the timestamp */
        first->time_stamp = jiffies;
 
        /*
-        * Force memory writes to complete before letting h/w
-        * know there are new descriptors to fetch.  (Only
-        * applicable for weak-ordered memory model archs,
-        * such as IA-64).
+        * Force memory writes to complete before letting h/w know there
+        * are new descriptors to fetch.  (Only applicable for weak-ordered
+        * memory model archs, such as IA-64).
+        *
+        * We also need this memory barrier to make certain all of the
+        * status bits have been updated before next_to_watch is written.
         */
        wmb();
 
        /* set next_to_watch value indicating a packet is present */
        first->next_to_watch = tx_desc;
 
+       i++;
+       if (i == tx_ring->count)
+               i = 0;
+
+       tx_ring->next_to_use = i;
+
        /* notify HW of packet */
        writel(i, tx_ring->tail);
 
        return;
 dma_error:
-       dev_err(dev, "TX DMA map failed\n");
+       dev_err(tx_ring->dev, "TX DMA map failed\n");
 
        /* clear dma mappings for failed tx_buffer_info map */
        for (;;) {
-               tx_buffer_info = &tx_ring->tx_buffer_info[i];
-               ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
-               if (tx_buffer_info == first)
+               tx_buffer = &tx_ring->tx_buffer_info[i];
+               ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
+               if (tx_buffer == first)
                        break;
                if (i == 0)
                        i = tx_ring->count;
@@ -6843,8 +7375,7 @@ dma_error:
 }
 
 static void ixgbe_atr(struct ixgbe_ring *ring,
-                     struct ixgbe_tx_buffer *first,
-                     u32 tx_flags, __be16 protocol)
+                     struct ixgbe_tx_buffer *first)
 {
        struct ixgbe_q_vector *q_vector = ring->q_vector;
        union ixgbe_atr_hash_dword input = { .dword = 0 };
@@ -6871,9 +7402,9 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
        hdr.network = skb_network_header(first->skb);
 
        /* Currently only IPv4/IPv6 with TCP is supported */
-       if ((protocol != __constant_htons(ETH_P_IPV6) ||
+       if ((first->protocol != __constant_htons(ETH_P_IPV6) ||
             hdr.ipv6->nexthdr != IPPROTO_TCP) &&
-           (protocol != __constant_htons(ETH_P_IP) ||
+           (first->protocol != __constant_htons(ETH_P_IP) ||
             hdr.ipv4->protocol != IPPROTO_TCP))
                return;
 
@@ -6890,7 +7421,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
        /* reset sample count */
        ring->atr_count = 0;
 
-       vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
+       vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
 
        /*
         * src and dst are inverted, think how the receiver sees them
@@ -6905,13 +7436,13 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
         * since src port and flex bytes occupy the same word XOR them together
         * and write the value to source port portion of compressed dword
         */
-       if (tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
+       if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
                common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
        else
-               common.port.src ^= th->dest ^ protocol;
+               common.port.src ^= th->dest ^ first->protocol;
        common.port.dst ^= th->source;
 
-       if (protocol == __constant_htons(ETH_P_IP)) {
+       if (first->protocol == __constant_htons(ETH_P_IP)) {
                input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
                common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
        } else {
@@ -6933,7 +7464,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
 
 static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
 {
-       netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+       netif_stop_subqueue(netdev_ring(tx_ring), ring_queue_index(tx_ring));
        /* Herbert's original patch had:
         *  smp_mb__after_netif_stop_queue();
         * but since that doesn't exist yet, just open code it. */
@@ -6945,7 +7476,7 @@ static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
                return -EBUSY;
 
        /* A reprieve! - use start_queue because it doesn't call schedule */
-       netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+       netif_start_subqueue(netdev_ring(tx_ring), ring_queue_index(tx_ring));
        ++tx_ring->tx_stats.restart_queue;
        return 0;
 }
@@ -6957,6 +7488,7 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
        return __ixgbe_maybe_stop_tx(tx_ring, size);
 }
 
+#ifdef HAVE_NETDEV_SELECT_QUEUE
 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
@@ -6965,24 +7497,53 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
 #ifdef IXGBE_FCOE
        __be16 protocol = vlan_get_protocol(skb);
 
-       if (((protocol == htons(ETH_P_FCOE)) ||
-           (protocol == htons(ETH_P_FIP))) &&
-           (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
-               txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
-               txq += adapter->ring_feature[RING_F_FCOE].mask;
+       if ((protocol == __constant_htons(ETH_P_FCOE)) ||
+           (protocol == __constant_htons(ETH_P_FIP))) {
+               if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+                       struct ixgbe_ring_feature *f;
+
+                       f = &adapter->ring_feature[RING_F_FCOE];
+
+                       while (txq >= f->indices)
+                               txq -= f->indices;
+                       txq += f->offset;
+
+                       return txq;
+               } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+                       txq = adapter->fcoe.up;
+                       return txq;
+               }
+       }
+
+#endif /* IXGBE_FCOE */
+#ifndef HAVE_MQPRIO
+       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+               struct ixgbe_ring_feature *f;
+               if (skb->priority == TC_PRIO_CONTROL)
+                       txq = adapter->tc - 1;
+               else
+                       txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK)
+                              >> 13;
+
+               f = &adapter->ring_feature[RING_F_RSS];
+               txq *= f->indices;
+               txq += __skb_tx_hash(dev, skb, f->indices);
+
                return txq;
        }
-#endif
 
+#endif
        if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
                while (unlikely(txq >= dev->real_num_tx_queues))
                        txq -= dev->real_num_tx_queues;
                return txq;
        }
 
-       return skb_tx_hash(dev, skb);
+       return __skb_tx_hash(dev, skb,
+                            adapter->ring_feature[RING_F_RSS].indices);
 }
 
+#endif /* HAVE_NETDEV_SELECT_QUEUE */
 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
                          struct ixgbe_adapter *adapter,
                          struct ixgbe_ring *tx_ring)
@@ -6990,8 +7551,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
        struct ixgbe_tx_buffer *first;
        int tso;
        u32 tx_flags = 0;
+#ifdef MAX_SKB_FRAGS
 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
        unsigned short f;
+#endif
 #endif
        u16 count = TXD_USE_COUNT(skb_headlen(skb));
        __be16 protocol = skb->protocol;
@@ -7004,11 +7567,13 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
         *       + 1 desc for context descriptor,
         * otherwise try next time
         */
+#ifdef MAX_SKB_FRAGS
 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
                count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
 #else
        count += skb_shinfo(skb)->nr_frags;
+#endif
 #endif
        if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
                tx_ring->tx_stats.tx_busy++;
@@ -7018,6 +7583,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
        /* record the location of the first descriptor for this packet */
        first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
        first->skb = skb;
+       first->bytecount = skb->len;
+       first->gso_segs = 1;
 
        /* if we have a HW VLAN tag being added default to the HW one */
        if (vlan_tx_tag_present(skb)) {
@@ -7041,17 +7608,27 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
         * Use the l2switch_enable flag - would be false if the DMA
         * Tx switch had been disabled.
         */
-       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+       if (adapter->flags & IXGBE_FLAG_SRIOV_L2SWITCH_ENABLE)
                tx_flags |= IXGBE_TX_FLAGS_TXSW;
 
 #endif
-       /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */
+#ifdef HAVE_TX_MQ
        if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
            ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
             (skb->priority != TC_PRIO_CONTROL))) {
                tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
-               tx_flags |= (skb->priority & 0x7) <<
-                                       IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
+#ifdef IXGBE_FCOE
+               /* for FCoE with DCB, we force the priority to what
+                * was specified by the switch */
+               if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+                   ((protocol == __constant_htons(ETH_P_FCOE)) ||
+                    (protocol == __constant_htons(ETH_P_FIP))))
+                       tx_flags |= adapter->fcoe.up <<
+                                   IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
+               else
+#endif /* IXGBE_FCOE */
+                       tx_flags |= skb->priority <<
+                                   IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
                if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
                        struct vlan_ethhdr *vhdr;
                        if (skb_header_cloned(skb) &&
@@ -7065,44 +7642,42 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
                }
        }
 
+#endif /* HAVE_TX_MQ */
+       /* record initial flags and protocol */
+       first->tx_flags = tx_flags;
+       first->protocol = protocol;
+
 #ifdef IXGBE_FCOE
        /* setup tx offload for FCoE */
        if ((protocol == __constant_htons(ETH_P_FCOE)) &&
-           (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
-               tso = ixgbe_fso(tx_ring, first, tx_flags, &hdr_len);
+           (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
+               tso = ixgbe_fso(tx_ring, first, &hdr_len);
                if (tso < 0)
                        goto out_drop;
-               else if (tso)
-                       tx_flags |= IXGBE_TX_FLAGS_FSO |
-                                   IXGBE_TX_FLAGS_FCOE;
-               else
-                       tx_flags |= IXGBE_TX_FLAGS_FCOE;
 
                goto xmit_fcoe;
        }
 
 #endif /* IXGBE_FCOE */
-       /* setup IPv4/IPv6 offloads */
-       if (protocol == __constant_htons(ETH_P_IP))
-               tx_flags |= IXGBE_TX_FLAGS_IPV4;
-
-       tso = ixgbe_tso(tx_ring, first, tx_flags, protocol, &hdr_len);
+       tso = ixgbe_tso(tx_ring, first, &hdr_len);
        if (tso < 0)
                goto out_drop;
-       else if (tso)
-               tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
-       else if (ixgbe_tx_csum(tx_ring, first, tx_flags, protocol))
-               tx_flags |= IXGBE_TX_FLAGS_CSUM;
+       else if (!tso)
+               ixgbe_tx_csum(tx_ring, first);
 
        /* add the ATR filter if ATR is on */
        if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
-               ixgbe_atr(tx_ring, first, tx_flags, protocol);
+               ixgbe_atr(tx_ring, first);
 
 #ifdef IXGBE_FCOE
 xmit_fcoe:
 #endif /* IXGBE_FCOE */
-       ixgbe_tx_map(tx_ring, first, tx_flags, hdr_len);
+       ixgbe_tx_map(tx_ring, first, hdr_len);
+
+#ifndef HAVE_TRANS_START_IN_QUEUE
+       netdev_ring(tx_ring)->trans_start = jiffies;
 
+#endif
        ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
 
        return NETDEV_TX_OK;
@@ -7119,6 +7694,9 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_ring *tx_ring;
+#ifdef HAVE_TX_MQ
+       unsigned int r_idx = skb->queue_mapping;
+#endif
 
        if (skb->len <= 0) {
                dev_kfree_skb_any(skb);
@@ -7135,7 +7713,13 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
                skb->len = 17;
        }
 
-       tx_ring = adapter->tx_ring[skb->queue_mapping];
+#ifdef HAVE_TX_MQ
+       if (r_idx >= adapter->num_tx_queues)
+               r_idx = r_idx % adapter->num_tx_queues;
+       tx_ring = adapter->tx_ring[r_idx];
+#else
+       tx_ring = adapter->tx_ring[0];
+#endif
        return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
 }
 
@@ -7151,56 +7735,25 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
        struct sockaddr *addr = p;
+       int ret;
 
        if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
 
+       ixgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
        memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
 
-       hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
-                           IXGBE_RAH_AV);
-
-       return 0;
-}
-
-static int
-ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       struct ixgbe_hw *hw = &adapter->hw;
-       u16 value;
-       int rc;
-
-       if (prtad != hw->phy.mdio.prtad)
-               return -EINVAL;
-       rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
-       if (!rc)
-               rc = value;
-       return rc;
-}
-
-static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
-                           u16 addr, u16 value)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       struct ixgbe_hw *hw = &adapter->hw;
-
-       if (prtad != hw->phy.mdio.prtad)
-               return -EINVAL;
-       return hw->phy.ops.write_reg(hw, addr, devad, value);
-}
-
-static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-       return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
+       /* set the correct pool for the new PF MAC address in entry 0 */
+       ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
+       return (ret > 0 ? 0 : ret);
 }
 
+#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN)
 /**
  * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
- * netdev->dev_addrs
+ * netdev->dev_addr_list
  * @netdev: network interface device structure
  *
  * Returns non-zero on failure
@@ -7209,19 +7762,23 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev)
 {
        int err = 0;
        struct ixgbe_adapter *adapter = netdev_priv(dev);
-       struct ixgbe_mac_info *mac = &adapter->hw.mac;
+       struct ixgbe_hw *hw = &adapter->hw;
 
-       if (is_valid_ether_addr(mac->san_addr)) {
+       if (is_valid_ether_addr(hw->mac.san_addr)) {
                rtnl_lock();
-               err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
+               err = dev_addr_add(dev, hw->mac.san_addr,
+                                  NETDEV_HW_ADDR_T_SAN);
                rtnl_unlock();
+
+               /* update SAN MAC vmdq pool selection */
+               hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
        }
        return err;
 }
 
 /**
  * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
- * netdev->dev_addrs
+ * netdev->dev_addr_list
  * @netdev: network interface device structure
  *
  * Returns non-zero on failure
@@ -7240,6 +7797,27 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev)
        return err;
 }
 
+#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) */
+
+/**
+ * ixgbe_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+       switch (cmd) {
+#ifdef ETHTOOL_OPS_COMPAT
+       case SIOCETHTOOL:
+               return ethtool_ioctl(ifr);
+#endif
+
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
 #ifdef CONFIG_NET_POLL_CONTROLLER
 /*
  * Polling 'interrupt' - used by things like netconsole to send skbs
@@ -7249,7 +7827,6 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev)
 static void ixgbe_netpoll(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       int i;
 
        /* if interface is down do nothing */
        if (test_bit(__IXGBE_DOWN, &adapter->state))
@@ -7257,68 +7834,18 @@ static void ixgbe_netpoll(struct net_device *netdev)
 
        adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
-               int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-               for (i = 0; i < num_q_vectors; i++) {
-                       struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
-                       ixgbe_msix_clean_rings(0, q_vector);
-               }
+               int i;
+               for (i = 0; i < adapter->num_q_vectors; i++)
+                       ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
        } else {
-               ixgbe_intr(adapter->pdev->irq, netdev);
+               ixgbe_intr(0, adapter);
        }
        adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
 }
-
-#endif
-static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
-                                                  struct rtnl_link_stats64 *stats)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       int i;
-
-       rcu_read_lock();
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
-               u64 bytes, packets;
-               unsigned int start;
-
-               if (ring) {
-                       do {
-                               start = u64_stats_fetch_begin_bh(&ring->syncp);
-                               packets = ring->stats.packets;
-                               bytes   = ring->stats.bytes;
-                       } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
-                       stats->rx_packets += packets;
-                       stats->rx_bytes   += bytes;
-               }
-       }
-
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
-               u64 bytes, packets;
-               unsigned int start;
-
-               if (ring) {
-                       do {
-                               start = u64_stats_fetch_begin_bh(&ring->syncp);
-                               packets = ring->stats.packets;
-                               bytes   = ring->stats.bytes;
-                       } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
-                       stats->tx_packets += packets;
-                       stats->tx_bytes   += bytes;
-               }
-       }
-       rcu_read_unlock();
-       /* following stats updated by ixgbe_watchdog_task() */
-       stats->multicast        = netdev->stats.multicast;
-       stats->rx_errors        = netdev->stats.rx_errors;
-       stats->rx_length_errors = netdev->stats.rx_length_errors;
-       stats->rx_crc_errors    = netdev->stats.rx_crc_errors;
-       stats->rx_missed_errors = netdev->stats.rx_missed_errors;
-       return stats;
-}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
 
 /* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
- * #adapter: pointer to ixgbe_adapter
+ * @adapter: pointer to ixgbe_adapter
  * @tc: number of traffic classes currently enabled
  *
  * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm
@@ -7339,7 +7866,7 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
        reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
        rsave = reg;
 
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+       for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
                u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
 
                /* If up2tc is out of bounds default to zero */
@@ -7353,7 +7880,35 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
        return;
 }
 
-/* ixgbe_setup_tc - routine to configure net_device for multiple traffic
+/**
+ * ixgbe_set_prio_tc_map - Configure netdev prio tc map
+ * @adapter: Pointer to adapter struct
+ *
+ * Populate the netdev user priority to tc map
+ */
+static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
+{
+#ifdef HAVE_DCBNL_IEEE
+       struct net_device *dev = adapter->netdev;
+       struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
+       struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
+       u8 prio;
+
+       for (prio = 0; prio < IXGBE_DCB_MAX_USER_PRIORITY; prio++) {
+               u8 tc = 0;
+
+               if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
+                       tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
+               else if (ets)
+                       tc = ets->prio_tc[prio];
+
+               netdev_set_prio_tc_map(dev, prio, tc);
+       }
+#endif
+}
+
+/**
+ * ixgbe_setup_tc - routine to configure net_device for multiple traffic
  * classes.
  *
  * @netdev: net device to configure
@@ -7364,16 +7919,10 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        struct ixgbe_hw *hw = &adapter->hw;
 
-       /* Multiple traffic classes requires multiple queues */
-       if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
-               e_err(drv, "Enable failed, needs MSI-X\n");
-               return -EINVAL;
-       }
-
        /* Hardware supports up to 8 traffic classes */
        if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
            (hw->mac.type == ixgbe_mac_82598EB &&
-            tc < MAX_TRAFFIC_CLASS))
+            tc < IXGBE_DCB_MAX_TRAFFIC_CLASS))
                return -EINVAL;
 
        /* Hardware has to reinitialize queues and interrupts to
@@ -7386,18 +7935,21 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
 
        if (tc) {
                netdev_set_num_tc(dev, tc);
-               adapter->last_lfc_mode = adapter->hw.fc.current_mode;
+               ixgbe_set_prio_tc_map(adapter);
+
                adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
-               adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
 
-               if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+               if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+                       adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
                        adapter->hw.fc.requested_mode = ixgbe_fc_none;
+               }
        } else {
                netdev_reset_tc(dev);
-               adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
+
+               if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+                       adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
 
                adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
-               adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
 
                adapter->temp_dcb_cfg.pfc_mode_enable = false;
                adapter->dcb_cfg.pfc_mode_enable = false;
@@ -7421,29 +7973,29 @@ void ixgbe_do_reset(struct net_device *netdev)
                ixgbe_reset(adapter);
 }
 
+#ifdef HAVE_NDO_SET_FEATURES
 static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
                                            netdev_features_t features)
 {
+#if defined(CONFIG_DCB) || defined(IXGBE_NO_LRO)
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
+#endif
 
 #ifdef CONFIG_DCB
        if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
-               features &= ~NETIF_F_HW_VLAN_RX;
+               features |= NETIF_F_HW_VLAN_RX;
 #endif
 
-       /* return error if RXHASH is being enabled when RSS is not supported */
-       if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
-               features &= ~NETIF_F_RXHASH;
-
        /* If Rx checksum is disabled, then RSC/LRO should also be disabled */
        if (!(features & NETIF_F_RXCSUM))
                features &= ~NETIF_F_LRO;
 
+#ifdef IXGBE_NO_LRO
        /* Turn off LRO if not RSC capable */
        if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
                features &= ~NETIF_F_LRO;
-       
 
+#endif
        return features;
 }
 
@@ -7451,7 +8003,6 @@ static int ixgbe_set_features(struct net_device *netdev,
                              netdev_features_t features)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       netdev_features_t changed = netdev->features ^ features;
        bool need_reset = false;
 
        /* Make sure RSC matches LRO, reset if change */
@@ -7465,9 +8016,14 @@ static int ixgbe_set_features(struct net_device *netdev,
                    adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
                        adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
                        need_reset = true;
-               } else if ((changed ^ features) & NETIF_F_LRO) {
+               } else if ((netdev->features ^ features) & NETIF_F_LRO) {
+#ifdef IXGBE_NO_LRO
                        e_info(probe, "rx-usecs set too low, "
                               "disabling RSC\n");
+#else
+                       e_info(probe, "rx-usecs set too low, "
+                              "falling back to software LRO\n");
+#endif
                }
        }
 
@@ -7475,23 +8031,47 @@ static int ixgbe_set_features(struct net_device *netdev,
         * Check if Flow Director n-tuple support was enabled or disabled.  If
         * the state changed, we need to reset.
         */
-       if (!(features & NETIF_F_NTUPLE)) {
-               if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
-                       /* turn off Flow Director, set ATR and reset */
-                       if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
-                           !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
-                               adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
-                       need_reset = true;
-               }
-               adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
-       } else if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
+       switch (features & NETIF_F_NTUPLE) {
+       case NETIF_F_NTUPLE:
                /* turn off ATR, enable perfect filters and reset */
+               if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+                       need_reset = true;
+
                adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
                adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
-               need_reset = true;
+               break;
+       default:
+               /* turn off perfect filters, enable ATR and reset */
+               if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+                       need_reset = true;
+
+               adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+
+               /* We cannot enable ATR if VMDq is enabled */
+               if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
+                       break;
+
+               /* We cannot enable ATR if we have 2 or more traffic classes */
+               if (netdev_get_num_tc(netdev) > 1)
+                       break;
+
+               /* We cannot enable ATR if RSS is disabled */
+               if (adapter->ring_feature[RING_F_RSS].limit <= 1)
+                       break;
+
+               /* A sample rate of 0 indicates ATR disabled */
+               if (!adapter->atr_sample_rate)
+                       break;
+
+               adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+               break;
        }
 
-       netdev->features = features;
+       if (features & NETIF_F_HW_VLAN_RX)
+               ixgbe_vlan_stripping_enable(adapter);
+       else
+               ixgbe_vlan_stripping_disable(adapter);
+
        if (need_reset)
                ixgbe_do_reset(netdev);
 
@@ -7499,58 +8079,151 @@ static int ixgbe_set_features(struct net_device *netdev,
 
 }
 
+#endif /* HAVE_NDO_SET_FEATURES */
+#ifdef HAVE_NET_DEVICE_OPS
 static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_open               = ixgbe_open,
        .ndo_stop               = ixgbe_close,
        .ndo_start_xmit         = ixgbe_xmit_frame,
        .ndo_select_queue       = ixgbe_select_queue,
        .ndo_set_rx_mode        = ixgbe_set_rx_mode,
-       .ndo_set_multicast_list = ixgbe_set_rx_mode,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_mac_address    = ixgbe_set_mac,
        .ndo_change_mtu         = ixgbe_change_mtu,
        .ndo_tx_timeout         = ixgbe_tx_timeout,
+#ifdef NETIF_F_HW_VLAN_TX
        .ndo_vlan_rx_add_vid    = ixgbe_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = ixgbe_vlan_rx_kill_vid,
+#endif
        .ndo_do_ioctl           = ixgbe_ioctl,
+#ifdef IFLA_VF_MAX
        .ndo_set_vf_mac         = ixgbe_ndo_set_vf_mac,
        .ndo_set_vf_vlan        = ixgbe_ndo_set_vf_vlan,
        .ndo_set_vf_tx_rate     = ixgbe_ndo_set_vf_bw,
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+       .ndo_set_vf_spoofchk    = ixgbe_ndo_set_vf_spoofchk,
+#endif
        .ndo_get_vf_config      = ixgbe_ndo_get_vf_config,
-       .ndo_get_stats64        = ixgbe_get_stats64,
+#endif
+       .ndo_get_stats          = ixgbe_get_stats,
+#ifdef HAVE_SETUP_TC
        .ndo_setup_tc           = ixgbe_setup_tc,
+#endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = ixgbe_netpoll,
 #endif
 #ifdef IXGBE_FCOE
        .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
+#ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET
        .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
+#endif
        .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
+#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE
        .ndo_fcoe_enable = ixgbe_fcoe_enable,
        .ndo_fcoe_disable = ixgbe_fcoe_disable,
+#endif
+#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN
        .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
+#endif
 #endif /* IXGBE_FCOE */
+#ifdef HAVE_NDO_SET_FEATURES
        .ndo_set_features = ixgbe_set_features,
        .ndo_fix_features = ixgbe_fix_features,
+#endif /* HAVE_NDO_SET_FEATURES */
+#ifdef HAVE_VLAN_RX_REGISTER
+       .ndo_vlan_rx_register   = &ixgbe_vlan_mode,
+#endif
 };
 
-static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
-                                    const struct ixgbe_info *ii)
+#endif /* HAVE_NET_DEVICE_OPS */
+
+
+
+void ixgbe_assign_netdev_ops(struct net_device *dev)
 {
-#ifdef CONFIG_PCI_IOV
+#ifdef HAVE_NET_DEVICE_OPS
+       dev->netdev_ops = &ixgbe_netdev_ops;
+#else /* HAVE_NET_DEVICE_OPS */
+       dev->open = &ixgbe_open;
+       dev->stop = &ixgbe_close;
+       dev->hard_start_xmit = &ixgbe_xmit_frame;
+       dev->get_stats = &ixgbe_get_stats;
+#ifdef HAVE_SET_RX_MODE
+       dev->set_rx_mode = &ixgbe_set_rx_mode;
+#endif
+       dev->set_multicast_list = &ixgbe_set_rx_mode;
+       dev->set_mac_address = &ixgbe_set_mac;
+       dev->change_mtu = &ixgbe_change_mtu;
+       dev->do_ioctl = &ixgbe_ioctl;
+#ifdef HAVE_TX_TIMEOUT
+       dev->tx_timeout = &ixgbe_tx_timeout;
+#endif
+#ifdef NETIF_F_HW_VLAN_TX
+       dev->vlan_rx_register = &ixgbe_vlan_mode;
+       dev->vlan_rx_add_vid = &ixgbe_vlan_rx_add_vid;
+       dev->vlan_rx_kill_vid = &ixgbe_vlan_rx_kill_vid;
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       dev->poll_controller = &ixgbe_netpoll;
+#endif
+#ifdef HAVE_NETDEV_SELECT_QUEUE
+       dev->select_queue = &ixgbe_select_queue;
+#endif /* HAVE_NETDEV_SELECT_QUEUE */
+#endif /* HAVE_NET_DEVICE_OPS */
+       ixgbe_set_ethtool_ops(dev);
+       dev->watchdog_timeo = 5 * HZ;
+}
+
+
+/**
+ * ixgbe_wol_supported - Check whether device supports WoL
+ * @hw: hw specific details
+ * @device_id: the device ID
+ * @subdev_id: the subsystem device ID
+ *
+ * This function is used by probe and ethtool to determine
+ * which devices have WoL support
+ *
+ **/
+int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
+                       u16 subdevice_id)
+{
+       int is_wol_supported = 0;
        struct ixgbe_hw *hw = &adapter->hw;
+       u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
 
-       if (hw->mac.type == ixgbe_mac_82598EB)
-               return;
+       switch (device_id) {
+       case IXGBE_DEV_ID_82599_SFP:
+               /* Only these subdevices could supports WOL */
+               switch (subdevice_id) {
+               case IXGBE_SUBDEV_ID_82599_560FLR:
+                       /* only support first port */
+                       if (hw->bus.func != 0)
+                               break;
+               case IXGBE_SUBDEV_ID_82599_SFP:
+                       is_wol_supported = 1;
+                       break;
+               }
+               break;
+       case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+               /* All except this subdevice support WOL */
+               if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
+                       is_wol_supported = 1;
+               break;
+       case IXGBE_DEV_ID_82599_KX4:
+               is_wol_supported = 1;
+               break;
+       case IXGBE_DEV_ID_X540T:
+               /* check eeprom to see if enabled wol */
+               if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
+                   ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
+                    (hw->bus.func == 0))) {
+                       is_wol_supported = 1;
+               }
+               break;
+       }
 
-       /* The 82599 supports up to 64 VFs per physical function
-        * but this implementation limits allocation to 63 so that
-        * basic networking resources are still available to the
-        * physical function
-        */
-       adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
-       ixgbe_enable_sriov(adapter, ii);
-#endif /* CONFIG_PCI_IOV */
+       return is_wol_supported;
 }
 
 /**
@@ -7569,42 +8242,38 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 {
        struct net_device *netdev;
        struct ixgbe_adapter *adapter = NULL;
-       struct ixgbe_hw *hw;
-       const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
+       struct ixgbe_hw *hw = NULL;
        static int cards_found;
        int i, err, pci_using_dac;
+       u16 offset;
+       u16 eeprom_verh, eeprom_verl, eeprom_cfg_blkh, eeprom_cfg_blkl;
+       u32 etrack_id;
+       u16 build, major, patch;
+       char *info_string, *i_s_var;
        u8 part_str[IXGBE_PBANUM_LENGTH];
+       enum ixgbe_mac_type mac_type = ixgbe_mac_unknown;
+#ifdef HAVE_TX_MQ
        unsigned int indices = num_possible_cpus();
+#endif /* HAVE_TX_MQ */
 #ifdef IXGBE_FCOE
        u16 device_caps;
 #endif
-       u32 eec;
-       u16 wol_cap;
-
-       /* Catch broken hardware that put the wrong VF device ID in
-        * the PCIe SR-IOV capability.
-        */
-       if (pdev->is_virtfn) {
-               WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
-                    pci_name(pdev), pdev->vendor, pdev->device);
-               return -EINVAL;
-       }
 
        err = pci_enable_device_mem(pdev);
        if (err)
                return err;
 
-       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
-           !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+       if (!dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64)) &&
+           !dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64))) {
                pci_using_dac = 1;
        } else {
-               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+               err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32));
                if (err) {
-                       err = dma_set_coherent_mask(&pdev->dev,
+                       err = dma_set_coherent_mask(pci_dev_to_dev(pdev),
                                                    DMA_BIT_MASK(32));
                        if (err) {
-                               dev_err(&pdev->dev,
-                                       "No usable DMA configuration, aborting\n");
+                               dev_err(pci_dev_to_dev(pdev), "No usable DMA "
+                                       "configuration, aborting\n");
                                goto err_dma;
                        }
                }
@@ -7614,30 +8283,65 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
                                           IORESOURCE_MEM), ixgbe_driver_name);
        if (err) {
-               dev_err(&pdev->dev,
+               dev_err(pci_dev_to_dev(pdev),
                        "pci_request_selected_regions failed 0x%x\n", err);
                goto err_pci_reg;
        }
 
+       /*
+        * The mac_type is needed before we have the adapter is  set up
+        * so rather than maintain two devID -> MAC tables we dummy up
+        * an ixgbe_hw stuct and use ixgbe_set_mac_type.
+        */
+       hw = vmalloc(sizeof(struct ixgbe_hw));
+       if (!hw) {
+               pr_info("Unable to allocate memory for early mac "
+                       "check\n");
+       } else {
+               hw->vendor_id = pdev->vendor;
+               hw->device_id = pdev->device;
+               ixgbe_set_mac_type(hw);
+               mac_type = hw->mac.type;
+               vfree(hw);
+       }
+
+       /*
+        * Workaround of Silicon errata on 82598. Disable LOs in the PCI switch
+        * port to which the 82598 is connected to prevent duplicate
+        * completions caused by LOs.  We need the mac type so that we only
+        * do this on 82598 devices, ixgbe_set_mac_type does this for us if
+        * we set it's device ID.
+        */
+       if (mac_type == ixgbe_mac_82598EB)
+               pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
+
        pci_enable_pcie_error_reporting(pdev);
 
        pci_set_master(pdev);
-       pci_save_state(pdev);
 
-#ifdef CONFIG_IXGBE_DCB
-       indices *= MAX_TRAFFIC_CLASS;
+#ifdef HAVE_TX_MQ
+#ifdef CONFIG_DCB
+       indices *= IXGBE_DCB_MAX_TRAFFIC_CLASS;
+#endif /* CONFIG_DCB */
+
+#ifdef IXGBE_FCOE
+       indices += min_t(unsigned int, num_possible_cpus(),
+                        IXGBE_MAX_FCOE_INDICES);
 #endif
 
-       if (ii->mac == ixgbe_mac_82598EB)
+       if (mac_type == ixgbe_mac_82598EB)
+#ifdef CONFIG_DCB
+               indices = min_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES * 4);
+#else
                indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
+#endif
        else
                indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
 
-#ifdef IXGBE_FCOE
-       indices += min_t(unsigned int, num_possible_cpus(),
-                        IXGBE_MAX_FCOE_INDICES);
-#endif
        netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
+#else /* HAVE_TX_MQ */
+       netdev = alloc_etherdev(sizeof(struct ixgbe_adapter));
+#endif /* HAVE_TX_MQ */
        if (!netdev) {
                err = -ENOMEM;
                goto err_alloc_etherdev;
@@ -7654,6 +8358,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        hw->back = adapter;
        adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
 
+#ifdef HAVE_PCI_ERS
+       /*
+        * call save state here in standalone driver because it relies on
+        * adapter struct to exist, and needs to call netdev_priv
+        */
+       pci_save_state(pdev);
+
+#endif
        hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
                              pci_resource_len(pdev, 0));
        if (!hw->hw_addr) {
@@ -7661,42 +8373,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                goto err_ioremap;
        }
 
-       for (i = 1; i <= 5; i++) {
-               if (pci_resource_len(pdev, i) == 0)
-                       continue;
-       }
+       ixgbe_assign_netdev_ops(netdev);
 
-       netdev->netdev_ops = &ixgbe_netdev_ops;
-       ixgbe_set_ethtool_ops(netdev);
-       netdev->watchdog_timeo = 5 * HZ;
        strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
 
        adapter->bd_number = cards_found;
 
-       /* Setup hw api */
-       memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
-       hw->mac.type  = ii->mac;
-
-       /* EEPROM */
-       memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
-       eec = IXGBE_READ_REG(hw, IXGBE_EEC);
-       /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
-       if (!(eec & (1 << 8)))
-               hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
-
-       /* PHY */
-       memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
-       hw->phy.sfp_type = ixgbe_sfp_type_unknown;
-       /* ixgbe_identify_phy_generic will set prtad and mmds properly */
-       hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
-       hw->phy.mdio.mmds = 0;
-       hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
-       hw->phy.mdio.dev = netdev;
-       hw->phy.mdio.mdio_read = ixgbe_mdio_read;
-       hw->phy.mdio.mdio_write = ixgbe_mdio_write;
-
-       ii->get_invariants(hw);
-
        /* setup the private structure */
        err = ixgbe_sw_init(adapter);
        if (err)
@@ -7713,8 +8395,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        }
 
        /*
-        * If there is a fan on this device and it has failed log the
-        * failure.
+        * If we have a fan, this is as early we know, warn if we
+        * have had a failure.
         */
        if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
                u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
@@ -7722,8 +8404,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                        e_crit(probe, "Fan has stopped, replace the adapter\n");
        }
 
-       if (allow_unsupported_sfp)
-               hw->allow_unsupported_sfp = allow_unsupported_sfp;
+       /*
+        * check_options must be called before setup_link to set up
+        * hw->fc completely
+        */
+       ixgbe_check_options(adapter);
 
        /* reset_hw fills in the perm_addr as well */
        hw->phy.reset_if_overtemp = true;
@@ -7743,80 +8428,143 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                goto err_sw_init;
        }
 
-       ixgbe_probe_vf(adapter, ii);
+#ifdef CONFIG_PCI_IOV
+       ixgbe_enable_sriov(adapter);
+
+#endif /* CONFIG_PCI_IOV */
+#ifdef MAX_SKB_FRAGS
+       netdev->features |= NETIF_F_SG |
+                           NETIF_F_IP_CSUM;
+
+#ifdef NETIF_F_IPV6_CSUM
+       netdev->features |= NETIF_F_IPV6_CSUM;
+#endif
+
+#ifdef NETIF_F_HW_VLAN_TX
+       netdev->features |= NETIF_F_HW_VLAN_TX |
+                           NETIF_F_HW_VLAN_RX;
+#endif
+#ifdef NETIF_F_TSO
+       netdev->features |= NETIF_F_TSO;
+#endif /* NETIF_F_TSO */
+#ifdef NETIF_F_TSO6
+       netdev->features |= NETIF_F_TSO6;
+#endif /* NETIF_F_TSO6 */
+#ifdef NETIF_F_RXHASH
+       netdev->features |= NETIF_F_RXHASH;
+#endif /* NETIF_F_RXHASH */
+
+#ifdef HAVE_NDO_SET_FEATURES
+       netdev->features |= NETIF_F_RXCSUM;
+
+       /* copy netdev features into list of user selectable features */
+       netdev->hw_features |= netdev->features;
+
+       /* give us the option of enabling RSC/LRO later */
+#ifdef IXGBE_NO_LRO
+       if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
+#endif
+               netdev->hw_features |= NETIF_F_LRO;
 
-       netdev->features = NETIF_F_SG |
-                          NETIF_F_IP_CSUM |
-                          NETIF_F_IPV6_CSUM |
-                          NETIF_F_HW_VLAN_TX |
-                          NETIF_F_HW_VLAN_RX |
-                          NETIF_F_HW_VLAN_FILTER |
-                          NETIF_F_TSO |
-                          NETIF_F_TSO6 |
-                          NETIF_F_RXHASH |
-                          NETIF_F_RXCSUM;
+#else
+#ifdef NETIF_F_GRO
 
-       netdev->hw_features = netdev->features;
+       /* this is only needed on kernels prior to 2.6.39 */
+       netdev->features |= NETIF_F_GRO;
+#endif /* NETIF_F_GRO */
+#endif
 
+#ifdef NETIF_F_HW_VLAN_TX
+       /* set this bit last since it cannot be part of hw_features */
+       netdev->features |= NETIF_F_HW_VLAN_FILTER;
+#endif
        switch (adapter->hw.mac.type) {
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X540:
                netdev->features |= NETIF_F_SCTP_CSUM;
+#ifdef HAVE_NDO_SET_FEATURES
                netdev->hw_features |= NETIF_F_SCTP_CSUM |
                                       NETIF_F_NTUPLE;
+#endif
                break;
        default:
                break;
        }
 
-       netdev->vlan_features |= NETIF_F_TSO;
-       netdev->vlan_features |= NETIF_F_TSO6;
-       netdev->vlan_features |= NETIF_F_IP_CSUM;
-       netdev->vlan_features |= NETIF_F_IPV6_CSUM;
-       netdev->vlan_features |= NETIF_F_SG;
-
-       netdev->priv_flags |= IFF_UNICAST_FLT;
+#ifdef HAVE_NETDEV_VLAN_FEATURES
+       netdev->vlan_features |= NETIF_F_SG |
+                                NETIF_F_IP_CSUM |
+                                NETIF_F_IPV6_CSUM |
+                                NETIF_F_TSO |
+                                NETIF_F_TSO6;
 
-       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
-               adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
-                                   IXGBE_FLAG_DCB_ENABLED);
-
-#ifdef CONFIG_IXGBE_DCB
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
+       if (netdev->features & NETIF_F_LRO) {
+               if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
+                   ((adapter->rx_itr_setting == 1) ||
+                    (adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR))) {
+                       adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+               } else if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
+#ifdef IXGBE_NO_LRO
+                       e_dev_info("InterruptThrottleRate set too high, "
+                                  "disabling RSC\n");
+#else
+                       e_dev_info("InterruptThrottleRate set too high, "
+                                  "falling back to software LRO\n");
+#endif
+               }
+       }
+#ifdef CONFIG_DCB
        netdev->dcbnl_ops = &dcbnl_ops;
 #endif
 
 #ifdef IXGBE_FCOE
+#ifdef NETIF_F_FSO
        if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
-               if (hw->mac.ops.get_device_caps) {
-                       hw->mac.ops.get_device_caps(hw, &device_caps);
-                       if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
-                               adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
+               ixgbe_get_device_caps(hw, &device_caps);
+               if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) {
+                       adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
+                       adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
+                       e_dev_info("FCoE offload feature is not available. "
+                                  "Disabling FCoE offload feature\n");
+               } else {
+                       netdev->features |= NETIF_F_FSO |
+                                           NETIF_F_FCOE_CRC;
+#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE
+                       ixgbe_fcoe_ddp_enable(adapter);
+                       adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
+                       netdev->features |= NETIF_F_FCOE_MTU;
+#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */
                }
+
+               adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
+
+#ifdef HAVE_NETDEV_VLAN_FEATURES
+               netdev->vlan_features |= NETIF_F_FSO |
+                                        NETIF_F_FCOE_CRC |
+                                        NETIF_F_FCOE_MTU;
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
        }
-       if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
-               netdev->vlan_features |= NETIF_F_FCOE_CRC;
-               netdev->vlan_features |= NETIF_F_FSO;
-               netdev->vlan_features |= NETIF_F_FCOE_MTU;
-       }
+#endif /* NETIF_F_FSO */
 #endif /* IXGBE_FCOE */
        if (pci_using_dac) {
                netdev->features |= NETIF_F_HIGHDMA;
+#ifdef HAVE_NETDEV_VLAN_FEATURES
                netdev->vlan_features |= NETIF_F_HIGHDMA;
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
        }
-
-       if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
-               netdev->hw_features |= NETIF_F_LRO;
-       if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
-               netdev->features |= NETIF_F_LRO;
+#endif /* MAX_SKB_FRAGS */
 
        /* make sure the EEPROM is good */
-       if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
+       if (hw->eeprom.ops.validate_checksum &&
+           (hw->eeprom.ops.validate_checksum(hw, NULL) < 0)) {
                e_dev_err("The EEPROM Checksum Is Not Valid\n");
                err = -EIO;
                goto err_sw_init;
        }
 
        memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
+#ifdef ETHTOOL_GPERMADDR
        memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
 
        if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
@@ -7824,6 +8572,21 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                err = -EIO;
                goto err_sw_init;
        }
+#else
+       if (ixgbe_validate_mac_addr(netdev->dev_addr)) {
+               e_dev_err("invalid MAC address\n");
+               err = -EIO;
+               goto err_sw_init;
+       }
+#endif
+       memcpy(&adapter->mac_table[0].addr, hw->mac.perm_addr,
+              netdev->addr_len);
+       adapter->mac_table[0].queue = VMDQ_P(0);
+       adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT |
+                                      IXGBE_MAC_STATE_IN_USE);
+       hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr,
+                           adapter->mac_table[0].queue,
+                           IXGBE_RAH_AV);
 
        setup_timer(&adapter->service_timer, &ixgbe_service_timer,
                    (unsigned long) adapter);
@@ -7835,81 +8598,44 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        if (err)
                goto err_sw_init;
 
-       if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
-               netdev->hw_features &= ~NETIF_F_RXHASH;
-               netdev->features &= ~NETIF_F_RXHASH;
-       }
-
-       /* WOL not supported for all but the following */
        adapter->wol = 0;
-       switch (pdev->device) {
-       case IXGBE_DEV_ID_82599_SFP:
-               /* Only these subdevice supports WOL */
-               switch (pdev->subsystem_device) {
-               case IXGBE_SUBDEV_ID_82599_560FLR:
-                       /* only support first port */
-                       if (hw->bus.func != 0)
-                               break;
-               case IXGBE_SUBDEV_ID_82599_SFP:
-                       adapter->wol = IXGBE_WUFC_MAG;
-                       break;
-               }
-               break;
-       case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
-               /* All except this subdevice support WOL */
-               if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
-                       adapter->wol = IXGBE_WUFC_MAG;
-               break;
-       case IXGBE_DEV_ID_82599_KX4:
+       if (ixgbe_wol_supported(adapter, pdev->device, pdev->subsystem_device))
                adapter->wol = IXGBE_WUFC_MAG;
-               break;
-       case IXGBE_DEV_ID_X540T:
-               /* Check eeprom to see if it is enabled */
-               hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
-               wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
 
-               if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
-                   ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
-                    (hw->bus.func == 0)))
-                       adapter->wol = IXGBE_WUFC_MAG;
-               break;
-       }
        device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
 
-       /* save off EEPROM version number */
-       hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
-       hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
-
-       /* pick up the PCI bus settings for reporting later */
-       hw->mac.ops.get_bus_info(hw);
-
-       /* print bus type/speed/width info */
-       e_dev_info("(PCI Express:%s:%s) %pM\n",
-                  (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
-                   hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
-                   "Unknown"),
-                  (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
-                   hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
-                   hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
-                   "Unknown"),
-                  netdev->dev_addr);
-
-       err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
-       if (err)
-               strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
-       if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
-               e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
-                          hw->mac.type, hw->phy.type, hw->phy.sfp_type,
-                          part_str);
-       else
-               e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
-                          hw->mac.type, hw->phy.type, part_str);
 
-       if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
-               e_dev_warn("PCI-Express bandwidth available for this card is "
-                          "not sufficient for optimal performance.\n");
-               e_dev_warn("For optimal performance a x8 PCI-Express slot "
-                          "is required.\n");
+       /*
+        * Save off EEPROM version number and Option Rom version which
+        * together make a unique identify for the eeprom
+        */
+       ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh);
+       ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl);
+       etrack_id = (eeprom_verh << 16) | eeprom_verl;
+
+       ixgbe_read_eeprom(hw, 0x17, &offset);
+
+       /* Make sure offset to SCSI block is valid */
+       if (!(offset == 0x0) && !(offset == 0xffff)) {
+               ixgbe_read_eeprom(hw, offset + 0x84, &eeprom_cfg_blkh);
+               ixgbe_read_eeprom(hw, offset + 0x83, &eeprom_cfg_blkl);
+
+               /* Only display Option Rom if exist */
+               if (eeprom_cfg_blkl && eeprom_cfg_blkh) {
+                       major = eeprom_cfg_blkl >> 8;
+                       build = (eeprom_cfg_blkl << 8) | (eeprom_cfg_blkh >> 8);
+                       patch = eeprom_cfg_blkh & 0x00ff;
+
+                       snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+                                "0x%08x, %d.%d.%d", etrack_id, major, build,
+                                patch);
+               } else {
+                       snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+                                "0x%08x", etrack_id);
+               }
+       } else {
+               snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+                        "0x%08x", etrack_id);
        }
 
        /* reset the hardware with the new settings */
@@ -7923,54 +8649,155 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                           "representative who provided you with this "
                           "hardware.\n");
        }
+       /* pick up the PCI bus settings for reporting later */
+       if (hw->mac.ops.get_bus_info)
+               hw->mac.ops.get_bus_info(hw);
+
        strcpy(netdev->name, "eth%d");
        err = register_netdev(netdev);
        if (err)
                goto err_register;
 
-       /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
-       if (hw->mac.ops.disable_tx_laser &&
-           ((hw->phy.multispeed_fiber) ||
-            ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
-             (hw->mac.type == ixgbe_mac_82599EB))))
-               hw->mac.ops.disable_tx_laser(hw);
+       adapter->netdev_registered = true;
+
+       /* power down the optics */
+       if ((hw->phy.multispeed_fiber) ||
+           ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+            (hw->mac.type == ixgbe_mac_82599EB)))
+               ixgbe_disable_tx_laser(hw);
 
        /* carrier off reporting is important to ethtool even BEFORE open */
        netif_carrier_off(netdev);
+       /* keep stopping all the transmit queues for older kernels */
+       netif_tx_stop_all_queues(netdev);
 
-#ifdef CONFIG_IXGBE_DCA
-       if (dca_add_requester(&pdev->dev) == 0) {
-               adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
-               ixgbe_setup_dca(adapter);
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+       if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) {
+               err = dca_add_requester(&pdev->dev);
+               switch (err) {
+               case 0:
+                       adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
+                       ixgbe_setup_dca(adapter);
+                       break;
+               /* -19 is returned from the kernel when no provider is found */
+               case -19:
+                       e_info(rx_err, "No DCA provider found. Please "
+                              "start ioatdma for DCA functionality.\n");
+                       break;
+               default:
+                       e_info(probe, "DCA registration failed: %d\n", err);
+                       break;
+               }
        }
 #endif
+
+       /* print all messages at the end so that we use our eth%d name */
+       /* print bus type/speed/width info */
+       e_dev_info("(PCI Express:%s:%s) ",
+                  (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
+                  hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
+                  "Unknown"),
+                  (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
+                  hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
+                  hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
+                  "Unknown"));
+
+       /* print the MAC address */
+       for (i = 0; i < 6; i++)
+               pr_cont("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
+
+       /* First try to read PBA as a string */
+       err = ixgbe_read_pba_string(hw, part_str, IXGBE_PBANUM_LENGTH);
+       if (err)
+               strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
+       if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
+               e_info(probe, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
+                      hw->mac.type, hw->phy.type, hw->phy.sfp_type, part_str);
+       else
+               e_info(probe, "MAC: %d, PHY: %d, PBA No: %s\n",
+                     hw->mac.type, hw->phy.type, part_str);
+
+       if (((hw->bus.speed == ixgbe_bus_speed_2500) &&
+            (hw->bus.width <= ixgbe_bus_width_pcie_x4)) ||
+           (hw->bus.width <= ixgbe_bus_width_pcie_x2)) {
+               e_dev_warn("PCI-Express bandwidth available for this "
+                          "card is not sufficient for optimal "
+                          "performance.\n");
+               e_dev_warn("For optimal performance a x8 PCI-Express "
+                          "slot is required.\n");
+       }
+
+#define INFO_STRING_LEN 255
+       info_string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
+       if (!info_string) {
+               e_err(probe, "allocation for info string failed\n");
+               goto no_info_string;
+       }
+       i_s_var = info_string;
+       i_s_var += sprintf(info_string, "Enabled Features: ");
+       i_s_var += sprintf(i_s_var, "RxQ: %d TxQ: %d ",
+                          adapter->num_rx_queues, adapter->num_tx_queues);
+#ifdef IXGBE_FCOE
+       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+               i_s_var += sprintf(i_s_var, "FCoE ");
+#endif
+       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
+               i_s_var += sprintf(i_s_var, "FdirHash ");
+       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
+               i_s_var += sprintf(i_s_var, "DCB ");
+       if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+               i_s_var += sprintf(i_s_var, "DCA ");
+       if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+               i_s_var += sprintf(i_s_var, "RSC ");
+#ifndef IXGBE_NO_LRO
+       else if (netdev->features & NETIF_F_LRO)
+               i_s_var += sprintf(i_s_var, "LRO ");
+#endif
+
+       BUG_ON(i_s_var > (info_string + INFO_STRING_LEN));
+       /* end features printing */
+       e_info(probe, "%s\n", info_string);
+       kfree(info_string);
+no_info_string:
+#ifdef CONFIG_PCI_IOV
        if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
-               e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
                for (i = 0; i < adapter->num_vfs; i++)
                        ixgbe_vf_configuration(pdev, (i | 0x10000000));
        }
+#endif
 
-       /* firmware requires driver version to be 0xFFFFFFFF
-        * since os does not support feature
-        */
-       if (hw->mac.ops.set_fw_drv_ver)
-               hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF,
-                                          0xFF);
+       /* firmware requires blank driver version */
+       ixgbe_set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF);
 
+#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN)
        /* add san mac addr to netdev */
        ixgbe_add_sanmac_netdev(netdev);
 
-       e_dev_info("Intel(R) 10 Gigabit Network Connection\n");
+#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && (NETDEV_HW_ADDR_T_SAN) */
+       e_info(probe, "Intel(R) 10 Gigabit Network Connection\n");
        cards_found++;
+
+#ifdef IXGBE_SYSFS
+       if (ixgbe_sysfs_init(adapter))
+               e_err(probe, "failed to allocate sysfs resources\n");
+#else
+#ifdef IXGBE_PROCFS
+       if (ixgbe_procfs_init(adapter))
+               e_err(probe, "failed to allocate procfs resources\n");
+#endif /* IXGBE_PROCFS */
+#endif /* IXGBE_SYSFS */
+
        return 0;
 
 err_register:
-       ixgbe_release_hw_control(adapter);
        ixgbe_clear_interrupt_scheme(adapter);
+       ixgbe_release_hw_control(adapter);
 err_sw_init:
-       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
-               ixgbe_disable_sriov(adapter);
+#ifdef CONFIG_PCI_IOV
+       ixgbe_disable_sriov(adapter);
+#endif /* CONFIG_PCI_IOV */
        adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
+       kfree(adapter->mac_table);
        iounmap(hw->hw_addr);
 err_ioremap:
        free_netdev(netdev);
@@ -7997,10 +8824,11 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
        struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
        struct net_device *netdev = adapter->netdev;
 
+
        set_bit(__IXGBE_DOWN, &adapter->state);
        cancel_work_sync(&adapter->service_task);
 
-#ifdef CONFIG_IXGBE_DCA
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
        if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
                adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
                dca_remove_requester(&pdev->dev);
@@ -8008,36 +8836,47 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
        }
 
 #endif
-#ifdef IXGBE_FCOE
-       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
-               ixgbe_cleanup_fcoe(adapter);
-
-#endif /* IXGBE_FCOE */
+#ifdef IXGBE_SYSFS
+       ixgbe_sysfs_exit(adapter);
+#else
+#ifdef IXGBE_PROCFS
+       ixgbe_procfs_exit(adapter);
+#endif /* IXGBE_PROCFS */
+#endif /* IXGBE-SYSFS */
 
+#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN)
        /* remove the added san mac */
        ixgbe_del_sanmac_netdev(netdev);
 
-       if (netdev->reg_state == NETREG_REGISTERED)
+#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && (NETDEV_HW_ADDR_T_SAN) */
+       if (adapter->netdev_registered) {
                unregister_netdev(netdev);
-
-       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
-               if (!(ixgbe_check_vf_assignment(adapter)))
-                       ixgbe_disable_sriov(adapter);
-               else
-                       e_dev_warn("Unloading driver while VFs are assigned "
-                                  "- VFs will not be deallocated\n");
+               adapter->netdev_registered = false;
        }
 
-       ixgbe_clear_interrupt_scheme(adapter);
+#ifdef CONFIG_PCI_IOV
+       ixgbe_disable_sriov(adapter);
+#endif /* CONFIG_PCI_IOV */
+
+#ifdef IXGBE_FCOE
+#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE
+       ixgbe_fcoe_ddp_disable(adapter);
 
+#endif
+#endif /* IXGBE_FCOE */
+       ixgbe_clear_interrupt_scheme(adapter);
        ixgbe_release_hw_control(adapter);
 
-       iounmap(adapter->hw.hw_addr);
-       pci_release_selected_regions(pdev, pci_select_bars(pdev,
-                                    IORESOURCE_MEM));
+#ifdef HAVE_DCBNL_IEEE
+       kfree(adapter->ixgbe_ieee_pfc);
+       kfree(adapter->ixgbe_ieee_ets);
 
-       e_dev_info("complete\n");
+#endif
+       iounmap(adapter->hw.hw_addr);
+       pci_release_selected_regions(pdev,
+                                    pci_select_bars(pdev, IORESOURCE_MEM));
 
+       kfree(adapter->mac_table);
        free_netdev(netdev);
 
        pci_disable_pcie_error_reporting(pdev);
@@ -8045,6 +8884,30 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
        pci_disable_device(pdev);
 }
 
+u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
+{
+       u16 value;
+       struct ixgbe_adapter *adapter = hw->back;
+
+       pci_read_config_word(adapter->pdev, reg, &value);
+       return value;
+}
+
+void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
+{
+       struct ixgbe_adapter *adapter = hw->back;
+
+       pci_write_config_word(adapter->pdev, reg, value);
+}
+
+void ewarn(struct ixgbe_hw *hw, const char *st, u32 status)
+{
+       struct ixgbe_adapter *adapter = hw->back;
+
+       netif_warn(adapter, drv, adapter->netdev,  "%s", st);
+}
+
+#ifdef HAVE_PCI_ERS
 /**
  * ixgbe_io_error_detected - called when PCI error is detected
  * @pdev: Pointer to PCI device
@@ -8167,7 +9030,6 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
 {
        struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
        pci_ers_result_t result;
-       int err;
 
        if (pci_enable_device_mem(pdev)) {
                e_err(probe, "Cannot re-enable PCI device after reset.\n");
@@ -8175,21 +9037,22 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
        } else {
                pci_set_master(pdev);
                pci_restore_state(pdev);
+               /*
+                * After second error pci->state_saved is false, this
+                * resets it so EEH doesn't break.
+                */
                pci_save_state(pdev);
 
                pci_wake_from_d3(pdev, false);
 
-               ixgbe_reset(adapter);
+               adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+               ixgbe_service_event_schedule(adapter);
+
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
                result = PCI_ERS_RESULT_RECOVERED;
        }
 
-       err = pci_cleanup_aer_uncorrect_error_status(pdev);
-       if (err) {
-               e_dev_err("pci_cleanup_aer_uncorrect_error_status "
-                         "failed 0x%0x\n", err);
-               /* non-fatal, continue */
-       }
+       pci_cleanup_aer_uncorrect_error_status(pdev);
 
        return result;
 }
@@ -8226,6 +9089,7 @@ static struct pci_error_handlers ixgbe_err_handler = {
        .resume = ixgbe_io_resume,
 };
 
+#endif
 static struct pci_driver ixgbe_driver = {
        .name     = ixgbe_driver_name,
        .id_table = ixgbe_pci_tbl,
@@ -8235,10 +9099,22 @@ static struct pci_driver ixgbe_driver = {
        .suspend  = ixgbe_suspend,
        .resume   = ixgbe_resume,
 #endif
+#ifndef USE_REBOOT_NOTIFIER
        .shutdown = ixgbe_shutdown,
+#endif
+#ifdef HAVE_PCI_ERS
        .err_handler = &ixgbe_err_handler
+#endif
 };
 
+bool ixgbe_is_ixgbe(struct pci_dev *pcidev)
+{
+       if (pci_dev_driver(pcidev) != &ixgbe_driver)
+               return false;
+       else
+               return true;
+}
+
 /**
  * ixgbe_init_module - Driver Registration Routine
  *
@@ -8251,7 +9127,13 @@ static int __init ixgbe_init_module(void)
        pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
        pr_info("%s\n", ixgbe_copyright);
 
-#ifdef CONFIG_IXGBE_DCA
+
+#ifdef IXGBE_PROCFS
+       if (ixgbe_procfs_topdir_init())
+               pr_info("Procfs failed to initialize topdir\n");
+#endif
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
        dca_register_notify(&dca_notifier);
 #endif
 
@@ -8269,14 +9151,16 @@ module_init(ixgbe_init_module);
  **/
 static void __exit ixgbe_exit_module(void)
 {
-#ifdef CONFIG_IXGBE_DCA
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
        dca_unregister_notify(&dca_notifier);
 #endif
        pci_unregister_driver(&ixgbe_driver);
-       rcu_barrier(); /* Wait for completion of call_rcu()'s */
+#ifdef IXGBE_PROCFS
+       ixgbe_procfs_topdir_exit();
+#endif
 }
 
-#ifdef CONFIG_IXGBE_DCA
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
 static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
                            void *p)
 {
@@ -8287,9 +9171,8 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
 
        return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
 }
-
-#endif /* CONFIG_IXGBE_DCA */
-
+#endif
 module_exit(ixgbe_exit_module);
 
 /* ixgbe_main.c */
+
index 4cf5e8e664a465aa285a41a140c10916f811358c..b0ec6820cb36d80daecfe6b403dfb792aec27ebd 100644 (file)
 
 *******************************************************************************/
 
-#include <linux/pci.h>
-#include <linux/delay.h>
 #include "ixgbe_type.h"
-#include "ixgbe_common.h"
 #include "ixgbe_mbx.h"
 
 /**
@@ -151,7 +148,7 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
                countdown--;
                if (!countdown)
                        break;
-               udelay(mbx->usec_delay);
+               udelay(mbx->udelay);
        }
 
 out:
@@ -177,7 +174,7 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
                countdown--;
                if (!countdown)
                        break;
-               udelay(mbx->usec_delay);
+               udelay(mbx->udelay);
        }
 
 out:
@@ -194,8 +191,7 @@ out:
  *  returns SUCCESS if it successfully received a message notification and
  *  copied it into the receive buffer.
  **/
-static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
-                                u16 mbx_id)
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
        s32 ret_val = IXGBE_ERR_MBX;
@@ -222,8 +218,8 @@ out:
  *  returns SUCCESS if it successfully copied message into the buffer and
  *  received an ack to that message within delay * timeout period
  **/
-static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
-                           u16 mbx_id)
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+                          u16 mbx_id)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
        s32 ret_val = IXGBE_ERR_MBX;
@@ -242,6 +238,241 @@ out:
        return ret_val;
 }
 
+/**
+ *  ixgbe_init_mbx_ops_generic - Initialize MB function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Setups up the mailbox read and write message function pointers
+ **/
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+       mbx->ops.read_posted = ixgbe_read_posted_mbx;
+       mbx->ops.write_posted = ixgbe_write_posted_mbx;
+}
+
+/**
+ *  ixgbe_read_v2p_mailbox - read v2p mailbox
+ *  @hw: pointer to the HW structure
+ *
+ *  This function is used to read the v2p mailbox without losing the read to
+ *  clear status bits.
+ **/
+static u32 ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw)
+{
+       u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
+
+       v2p_mailbox |= hw->mbx.v2p_mailbox;
+       hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
+
+       return v2p_mailbox;
+}
+
+/**
+ *  ixgbe_check_for_bit_vf - Determine if a status bit was set
+ *  @hw: pointer to the HW structure
+ *  @mask: bitmask for bits to be tested and cleared
+ *
+ *  This function is used to check for the read to clear bits within
+ *  the V2P mailbox.
+ **/
+static s32 ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
+{
+       u32 v2p_mailbox = ixgbe_read_v2p_mailbox(hw);
+       s32 ret_val = IXGBE_ERR_MBX;
+
+       if (v2p_mailbox & mask)
+               ret_val = 0;
+
+       hw->mbx.v2p_mailbox &= ~mask;
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_msg_vf - checks to see if the PF has sent mail
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the PF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, u16 mbx_id)
+{
+       s32 ret_val = IXGBE_ERR_MBX;
+
+       if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
+               ret_val = 0;
+               hw->mbx.stats.reqs++;
+       }
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_ack_vf - checks to see if the PF has ACK'd
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the PF has set the ACK bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, u16 mbx_id)
+{
+       s32 ret_val = IXGBE_ERR_MBX;
+
+       if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
+               ret_val = 0;
+               hw->mbx.stats.acks++;
+       }
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_rst_vf - checks to see if the PF has reset
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns true if the PF has set the reset done bit or else false
+ **/
+static s32 ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, u16 mbx_id)
+{
+       s32 ret_val = IXGBE_ERR_MBX;
+
+       if (!ixgbe_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
+           IXGBE_VFMAILBOX_RSTI))) {
+               ret_val = 0;
+               hw->mbx.stats.rsts++;
+       }
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_obtain_mbx_lock_vf - obtain mailbox lock
+ *  @hw: pointer to the HW structure
+ *
+ *  return SUCCESS if we obtained the mailbox lock
+ **/
+static s32 ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
+{
+       s32 ret_val = IXGBE_ERR_MBX;
+
+       /* Take ownership of the buffer */
+       IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
+
+       /* reserve mailbox for vf use */
+       if (ixgbe_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
+               ret_val = 0;
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_write_mbx_vf - Write a message to the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+                             u16 mbx_id)
+{
+       s32 ret_val;
+       u16 i;
+
+
+       /* lock the mailbox to prevent pf/vf race condition */
+       ret_val = ixgbe_obtain_mbx_lock_vf(hw);
+       if (ret_val)
+               goto out_no_write;
+
+       /* flush msg and acks as we are overwriting the message buffer */
+       ixgbe_check_for_msg_vf(hw, 0);
+       ixgbe_check_for_ack_vf(hw, 0);
+
+       /* copy the caller specified message to the mailbox memory buffer */
+       for (i = 0; i < size; i++)
+               IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
+
+       /* update stats */
+       hw->mbx.stats.msgs_tx++;
+
+       /* Drop VFU and interrupt the PF to tell it a message has been sent */
+       IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
+
+out_no_write:
+       return ret_val;
+}
+
+/**
+ *  ixgbe_read_mbx_vf - Reads a message from the inbox intended for vf
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to read
+ *
+ *  returns SUCCESS if it successfuly read message from buffer
+ **/
+static s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+                            u16 mbx_id)
+{
+       s32 ret_val = 0;
+       u16 i;
+
+       /* lock the mailbox to prevent pf/vf race condition */
+       ret_val = ixgbe_obtain_mbx_lock_vf(hw);
+       if (ret_val)
+               goto out_no_read;
+
+       /* copy the message from the mailbox memory buffer */
+       for (i = 0; i < size; i++)
+               msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
+
+       /* Acknowledge receipt and release mailbox, then we're done */
+       IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
+
+       /* update stats */
+       hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+       return ret_val;
+}
+
+/**
+ *  ixgbe_init_mbx_params_vf - set initial values for vf mailbox
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+       /* start mailbox as timed out and let the reset_hw call set the timeout
+        * value to begin communications */
+       mbx->timeout = 0;
+       mbx->udelay = IXGBE_VF_MBX_INIT_DELAY;
+
+       mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+       mbx->ops.read = ixgbe_read_mbx_vf;
+       mbx->ops.write = ixgbe_write_mbx_vf;
+       mbx->ops.read_posted = ixgbe_read_posted_mbx;
+       mbx->ops.write_posted = ixgbe_write_posted_mbx;
+       mbx->ops.check_for_msg = ixgbe_check_for_msg_vf;
+       mbx->ops.check_for_ack = ixgbe_check_for_ack_vf;
+       mbx->ops.check_for_rst = ixgbe_check_for_rst_vf;
+
+       mbx->stats.msgs_tx = 0;
+       mbx->stats.msgs_rx = 0;
+       mbx->stats.reqs = 0;
+       mbx->stats.acks = 0;
+       mbx->stats.rsts = 0;
+}
+
 static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
 {
        u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
@@ -269,7 +500,7 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
        u32 vf_bit = vf_number % 16;
 
        if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
-                                   index)) {
+                                   index)) {
                ret_val = 0;
                hw->mbx.stats.reqs++;
        }
@@ -291,7 +522,7 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
        u32 vf_bit = vf_number % 16;
 
        if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
-                                   index)) {
+                                   index)) {
                ret_val = 0;
                hw->mbx.stats.acks++;
        }
@@ -366,7 +597,7 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
  *  returns SUCCESS if it successfully copied message into the buffer
  **/
 static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
-                              u16 vf_number)
+                             u16 vf_number)
 {
        s32 ret_val;
        u16 i;
@@ -407,7 +638,7 @@ out_no_write:
  *  a message due to a VF request so no polling for message is needed.
  **/
 static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
-                             u16 vf_number)
+                            u16 vf_number)
 {
        s32 ret_val;
        u16 i;
@@ -431,7 +662,6 @@ out_no_read:
        return ret_val;
 }
 
-#ifdef CONFIG_PCI_IOV
 /**
  *  ixgbe_init_mbx_params_pf - set initial values for pf mailbox
  *  @hw: pointer to the HW structure
@@ -447,25 +677,21 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
                return;
 
        mbx->timeout = 0;
-       mbx->usec_delay = 0;
+       mbx->udelay = 0;
+
+       mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+       mbx->ops.read = ixgbe_read_mbx_pf;
+       mbx->ops.write = ixgbe_write_mbx_pf;
+       mbx->ops.read_posted = ixgbe_read_posted_mbx;
+       mbx->ops.write_posted = ixgbe_write_posted_mbx;
+       mbx->ops.check_for_msg = ixgbe_check_for_msg_pf;
+       mbx->ops.check_for_ack = ixgbe_check_for_ack_pf;
+       mbx->ops.check_for_rst = ixgbe_check_for_rst_pf;
 
        mbx->stats.msgs_tx = 0;
        mbx->stats.msgs_rx = 0;
        mbx->stats.reqs = 0;
        mbx->stats.acks = 0;
        mbx->stats.rsts = 0;
-
-       mbx->size = IXGBE_VFMAILBOX_SIZE;
 }
-#endif /* CONFIG_PCI_IOV */
-
-struct ixgbe_mbx_operations mbx_ops_generic = {
-       .read                   = ixgbe_read_mbx_pf,
-       .write                  = ixgbe_write_mbx_pf,
-       .read_posted            = ixgbe_read_posted_mbx,
-       .write_posted           = ixgbe_write_posted_mbx,
-       .check_for_msg          = ixgbe_check_for_msg_pf,
-       .check_for_ack          = ixgbe_check_for_ack_pf,
-       .check_for_rst          = ixgbe_check_for_rst_pf,
-};
-
index 310bdd9610757558827d31155ad1bc839d617bfb..124f00de7f472de7cbfe16240619b897acb34cdf 100644 (file)
 
 #include "ixgbe_type.h"
 
-#define IXGBE_VFMAILBOX_SIZE        16 /* 16 32 bit words - 64 bytes */
-#define IXGBE_ERR_MBX               -100
-
-#define IXGBE_VFMAILBOX             0x002FC
-#define IXGBE_VFMBMEM               0x00200
-
-#define IXGBE_PFMAILBOX_STS   0x00000001 /* Initiate message send to VF */
-#define IXGBE_PFMAILBOX_ACK   0x00000002 /* Ack message recv'd from VF */
-#define IXGBE_PFMAILBOX_VFU   0x00000004 /* VF owns the mailbox buffer */
-#define IXGBE_PFMAILBOX_PFU   0x00000008 /* PF owns the mailbox buffer */
-#define IXGBE_PFMAILBOX_RVFU  0x00000010 /* Reset VFU - used when VF stuck */
-
-#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
-#define IXGBE_MBVFICR_VFREQ_VF1  0x00000001 /* bit for VF 1 message */
-#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
-#define IXGBE_MBVFICR_VFACK_VF1  0x00010000 /* bit for VF 1 ack */
+#define IXGBE_VFMAILBOX_SIZE   16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX          -100
+
+#define IXGBE_VFMAILBOX                0x002FC
+#define IXGBE_VFMBMEM          0x00200
+
+/* Define mailbox register bits */
+#define IXGBE_VFMAILBOX_REQ    0x00000001 /* Request for PF Ready bit */
+#define IXGBE_VFMAILBOX_ACK    0x00000002 /* Ack PF message received */
+#define IXGBE_VFMAILBOX_VFU    0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFU    0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFSTS  0x00000010 /* PF wrote a message in the MB */
+#define IXGBE_VFMAILBOX_PFACK  0x00000020 /* PF ack the previous VF msg */
+#define IXGBE_VFMAILBOX_RSTI   0x00000040 /* PF has reset indication */
+#define IXGBE_VFMAILBOX_RSTD   0x00000080 /* PF has indicated reset done */
+#define IXGBE_VFMAILBOX_R2C_BITS       0x000000B0 /* All read to clear bits */
+
+#define IXGBE_PFMAILBOX_STS    0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK    0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU    0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU    0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU   0x00000010 /* Reset VFU - used when VF stuck */
+
+#define IXGBE_MBVFICR_VFREQ_MASK       0x0000FFFF /* bits for VF messages */
+#define IXGBE_MBVFICR_VFREQ_VF1                0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFACK_MASK       0xFFFF0000 /* bits for VF acks */
+#define IXGBE_MBVFICR_VFACK_VF1                0x00010000 /* bit for VF 1 ack */
 
 
 /* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
  * PF.  The reverse is true if it is IXGBE_PF_*.
  * Message ACK's are the value or'd with 0xF0000000
  */
-#define IXGBE_VT_MSGTYPE_ACK      0x80000000  /* Messages below or'd with
-                                               * this are the ACK */
-#define IXGBE_VT_MSGTYPE_NACK     0x40000000  /* Messages below or'd with
-                                               * this are the NACK */
-#define IXGBE_VT_MSGTYPE_CTS      0x20000000  /* Indicates that VF is still
-                                                 clear to send requests */
-#define IXGBE_VT_MSGINFO_SHIFT    16
-/* bits 23:16 are used for exra info for certain messages */
-#define IXGBE_VT_MSGINFO_MASK     (0xFF << IXGBE_VT_MSGINFO_SHIFT)
-
-#define IXGBE_VF_RESET            0x01 /* VF requests reset */
-#define IXGBE_VF_SET_MAC_ADDR     0x02 /* VF requests PF to set MAC addr */
-#define IXGBE_VF_SET_MULTICAST    0x03 /* VF requests PF to set MC addr */
-#define IXGBE_VF_SET_VLAN         0x04 /* VF requests PF to set VLAN */
-#define IXGBE_VF_SET_LPE          0x05 /* VF requests PF to set VMOLR.LPE */
-#define IXGBE_VF_SET_MACVLAN      0x06 /* VF requests PF for unicast filter */
+#define IXGBE_VT_MSGTYPE_ACK   0x80000000 /* Messages below or'd with
+                                           * this are the ACK */
+#define IXGBE_VT_MSGTYPE_NACK  0x40000000 /* Messages below or'd with
+                                           * this are the NACK */
+#define IXGBE_VT_MSGTYPE_CTS   0x20000000 /* Indicates that VF is still
+                                           * clear to send requests */
+#define IXGBE_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for extra info for certain messages */
+#define IXGBE_VT_MSGINFO_MASK  (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+
+#define IXGBE_VF_RESET         0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR  0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN      0x04 /* VF requests PF to set VLAN */
+#define IXGBE_VF_SET_LPE       0x05 /* VF requests PF to set VMOLR.LPE */
+#define IXGBE_VF_SET_MACVLAN   0x06 /* VF requests PF for unicast filter */
 
 /* length of permanent address message returned from PF */
-#define IXGBE_VF_PERMADDR_MSG_LEN 4
+#define IXGBE_VF_PERMADDR_MSG_LEN      4
 /* word in permanent address message with the current multicast type */
-#define IXGBE_VF_MC_TYPE_WORD     3
+#define IXGBE_VF_MC_TYPE_WORD          3
 
-#define IXGBE_PF_CONTROL_MSG      0x0100 /* PF control message */
+#define IXGBE_PF_CONTROL_MSG           0x0100 /* PF control message */
 
-#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
-#define IXGBE_VF_MBX_INIT_DELAY   500  /* microseconds between retries */
+
+#define IXGBE_VF_MBX_INIT_TIMEOUT      2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY                500  /* microseconds between retries */
 
 s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
 s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
 s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
 s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
 s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
-#ifdef CONFIG_PCI_IOV
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
+void ixgbe_init_mbx_params_vf(struct ixgbe_hw *);
 void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
-#endif /* CONFIG_PCI_IOV */
-
-extern struct ixgbe_mbx_operations mbx_ops_generic;
 
 #endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_osdep.h b/drivers/net/ixgbe/ixgbe_osdep.h
new file mode 100644 (file)
index 0000000..c96885f
--- /dev/null
@@ -0,0 +1,132 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2012 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* glue for the OS independent part of ixgbe
+ * includes register access macros
+ */
+
+#ifndef _IXGBE_OSDEP_H_
+#define _IXGBE_OSDEP_H_
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/sched.h>
+#include "kcompat.h"
+
+
+#ifndef msleep
+#define msleep(x)      do { if (in_interrupt()) { \
+                               /* Don't mdelay in interrupt context! */ \
+                               BUG(); \
+                       } else { \
+                               msleep(x); \
+                       } } while (0)
+
+#endif
+
+#undef ASSERT
+
+#ifdef DBG
+#define hw_dbg(hw, S, A...)    printk(KERN_DEBUG S, ## A)
+#else
+#define hw_dbg(hw, S, A...)    do {} while (0)
+#endif
+
+#define e_dev_info(format, arg...) \
+       dev_info(pci_dev_to_dev(adapter->pdev), format, ## arg)
+#define e_dev_warn(format, arg...) \
+       dev_warn(pci_dev_to_dev(adapter->pdev), format, ## arg)
+#define e_dev_err(format, arg...) \
+       dev_err(pci_dev_to_dev(adapter->pdev), format, ## arg)
+#define e_dev_notice(format, arg...) \
+       dev_notice(pci_dev_to_dev(adapter->pdev), format, ## arg)
+#define e_info(msglvl, format, arg...) \
+       netif_info(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_err(msglvl, format, arg...) \
+       netif_err(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_warn(msglvl, format, arg...) \
+       netif_warn(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_crit(msglvl, format, arg...) \
+       netif_crit(adapter, msglvl, adapter->netdev, format, ## arg)
+
+
+#ifdef DBG
+#define IXGBE_WRITE_REG(a, reg, value) do {\
+       switch (reg) { \
+       case IXGBE_EIMS: \
+       case IXGBE_EIMC: \
+       case IXGBE_EIAM: \
+       case IXGBE_EIAC: \
+       case IXGBE_EICR: \
+       case IXGBE_EICS: \
+               printk("%s: Reg - 0x%05X, value - 0x%08X\n", __func__, \
+                      reg, (u32)(value)); \
+       default: \
+               break; \
+       } \
+       writel((value), ((a)->hw_addr + (reg))); \
+} while (0)
+#else
+#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+#endif
+
+#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
+
+#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+       writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
+
+#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \
+       readl((a)->hw_addr + (reg) + ((offset) << 2)))
+
+#ifndef writeq
+#define writeq(val, addr)      do { writel((u32) (val), addr); \
+                                    writel((u32) (val >> 32), (addr + 4)); \
+                               } while (0);
+#endif
+
+#define IXGBE_WRITE_REG64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
+
+#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
+struct ixgbe_hw;
+extern u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg);
+extern void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value);
+extern void ewarn(struct ixgbe_hw *hw, const char *str, u32 status);
+
+#define IXGBE_READ_PCIE_WORD ixgbe_read_pci_cfg_word
+#define IXGBE_WRITE_PCIE_WORD ixgbe_write_pci_cfg_word
+#define IXGBE_EEPROM_GRANT_ATTEMPS 100
+#define IXGBE_HTONL(_i) htonl(_i)
+#define IXGBE_NTOHL(_i) ntohl(_i)
+#define IXGBE_NTOHS(_i) ntohs(_i)
+#define IXGBE_CPU_TO_LE32(_i) cpu_to_le32(_i)
+#define IXGBE_LE32_TO_CPUS(_i) le32_to_cpus(_i)
+#define EWARN(H, W, S) ewarn(H, W, S) 
+
+#endif /* _IXGBE_OSDEP_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_param.c b/drivers/net/ixgbe/ixgbe_param.c
new file mode 100644 (file)
index 0000000..ae8d7a4
--- /dev/null
@@ -0,0 +1,1094 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2012 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/types.h>
+#include <linux/module.h>
+
+#include "ixgbe.h"
+
+/* This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+
+#define IXGBE_MAX_NIC  32
+
+#define OPTION_UNSET   -1
+#define OPTION_DISABLED        0
+#define OPTION_ENABLED 1
+
+#define STRINGIFY(foo) #foo /* magic for getting defines into strings */
+#define XSTRINGIFY(bar)        STRINGIFY(bar)
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+
+#define IXGBE_PARAM_INIT { [0 ... IXGBE_MAX_NIC] = OPTION_UNSET }
+#ifndef module_param_array
+/* Module Parameters are always initialized to -1, so that the driver
+ * can tell the difference between no user specified value or the
+ * user asking for the default value.
+ * The true default values are loaded in when ixgbe_check_options is called.
+ *
+ * This is a GCC extension to ANSI C.
+ * See the item "Labeled Elements in Initializers" in the section
+ * "Extensions to the C Language Family" of the GCC documentation.
+ */
+
+#define IXGBE_PARAM(X, desc) \
+       static const int __devinitdata X[IXGBE_MAX_NIC+1] = IXGBE_PARAM_INIT; \
+       MODULE_PARM(X, "1-" __MODULE_STRING(IXGBE_MAX_NIC) "i"); \
+       MODULE_PARM_DESC(X, desc);
+#else
+#define IXGBE_PARAM(X, desc) \
+       static int __devinitdata X[IXGBE_MAX_NIC+1] = IXGBE_PARAM_INIT; \
+       static unsigned int num_##X; \
+       module_param_array_named(X, X, int, &num_##X, 0); \
+       MODULE_PARM_DESC(X, desc);
+#endif
+
+/* IntMode (Interrupt Mode)
+ *
+ * Valid Range: 0-2
+ *  - 0 - Legacy Interrupt
+ *  - 1 - MSI Interrupt
+ *  - 2 - MSI-X Interrupt(s)
+ *
+ * Default Value: 2
+ */
+IXGBE_PARAM(InterruptType, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), "
+           "default IntMode (deprecated)");
+IXGBE_PARAM(IntMode, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), "
+           "default 2");
+#define IXGBE_INT_LEGACY               0
+#define IXGBE_INT_MSI                  1
+#define IXGBE_INT_MSIX                 2
+#define IXGBE_DEFAULT_INT              IXGBE_INT_MSIX
+
+/* MQ - Multiple Queue enable/disable
+ *
+ * Valid Range: 0, 1
+ *  - 0 - disables MQ
+ *  - 1 - enables MQ
+ *
+ * Default Value: 1
+ */
+
+IXGBE_PARAM(MQ, "Disable or enable Multiple Queues, default 1");
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+/* DCA - Direct Cache Access (DCA) Control
+ *
+ * This option allows the device to hint to DCA enabled processors
+ * which CPU should have its cache warmed with the data being
+ * transferred over PCIe.  This can increase performance by reducing
+ * cache misses.  ixgbe hardware supports DCA for:
+ * tx descriptor writeback
+ * rx descriptor writeback
+ * rx data
+ * rx data header only (in packet split mode)
+ *
+ * enabling option 2 can cause cache thrash in some tests, particularly
+ * if the CPU is completely utilized
+ *
+ * Valid Range: 0 - 2
+ *  - 0 - disables DCA
+ *  - 1 - enables DCA
+ *  - 2 - enables DCA with rx data included
+ *
+ * Default Value: 2
+ */
+
+#define IXGBE_MAX_DCA 2
+
+IXGBE_PARAM(DCA, "Disable or enable Direct Cache Access, 0=disabled, "
+           "1=descriptor only, 2=descriptor and data");
+#endif
+/* RSS - Receive-Side Scaling (RSS) Descriptor Queues
+ *
+ * Valid Range: 0-16
+ *  - 0 - enables RSS and sets the Desc. Q's to min(16, num_online_cpus()).
+ *  - 1-16 - enables RSS and sets the Desc. Q's to the specified value.
+ *
+ * Default Value: 0
+ */
+
+IXGBE_PARAM(RSS, "Number of Receive-Side Scaling Descriptor Queues, "
+           "default 0=number of cpus");
+
+/* VMDQ - Virtual Machine Device Queues (VMDQ)
+ *
+ * Valid Range: 1-16
+ *  - 1 Disables VMDQ by allocating only a single queue.
+ *  - 2-16 - enables VMDQ and sets the Desc. Q's to the specified value.
+ *
+ * Default Value: 1
+ */
+
+#define IXGBE_DEFAULT_NUM_VMDQ 8
+
+IXGBE_PARAM(VMDQ, "Number of Virtual Machine Device Queues: 0/1 = disable, "
+           "2-16 enable (default=" XSTRINGIFY(IXGBE_DEFAULT_NUM_VMDQ) ")");
+
+#ifdef CONFIG_PCI_IOV
+/* max_vfs - SR I/O Virtualization
+ *
+ * Valid Range: 0-63
+ *  - 0 Disables SR-IOV
+ *  - 1-63 - enables SR-IOV and sets the number of VFs enabled
+ *
+ * Default Value: 0
+ */
+
+#define MAX_SRIOV_VFS 63
+
+IXGBE_PARAM(max_vfs, "Number of Virtual Functions: 0 = disable (default), "
+           "1-" XSTRINGIFY(MAX_SRIOV_VFS) " = enable "
+           "this many VFs");
+
+/* L2LBen - L2 Loopback enable
+ *
+ * Valid Range: 0-1
+ *  - 0 Disables L2 loopback
+ *  - 1 Enables L2 loopback
+ *
+ * Default Value: 1
+ */
+/*
+ *Note:
+ *=====
+ * This is a temporary solution to enable SR-IOV features testing with
+ * external switches. As soon as an integrated VEB management interface
+ * becomes available this feature will be removed.
+*/
+IXGBE_PARAM(L2LBen, "L2 Loopback Enable: 0 = disable, 1 = enable (default)");
+#endif
+
+/* Interrupt Throttle Rate (interrupts/sec)
+ *
+ * Valid Range: 956-488281 (0=off, 1=dynamic)
+ *
+ * Default Value: 1
+ */
+#define DEFAULT_ITR            1
+IXGBE_PARAM(InterruptThrottleRate, "Maximum interrupts per second, per vector, "
+           "(0,1,956-488281), default 1");
+#define MAX_ITR                IXGBE_MAX_INT_RATE
+#define MIN_ITR                IXGBE_MIN_INT_RATE
+
+#ifndef IXGBE_NO_LLI
+/* LLIPort (Low Latency Interrupt TCP Port)
+ *
+ * Valid Range: 0 - 65535
+ *
+ * Default Value: 0 (disabled)
+ */
+IXGBE_PARAM(LLIPort, "Low Latency Interrupt TCP Port (0-65535)");
+
+#define DEFAULT_LLIPORT                0
+#define MAX_LLIPORT            0xFFFF
+#define MIN_LLIPORT            0
+
+/* LLIPush (Low Latency Interrupt on TCP Push flag)
+ *
+ * Valid Range: 0,1
+ *
+ * Default Value: 0 (disabled)
+ */
+IXGBE_PARAM(LLIPush, "Low Latency Interrupt on TCP Push flag (0,1)");
+
+#define DEFAULT_LLIPUSH                0
+#define MAX_LLIPUSH            1
+#define MIN_LLIPUSH            0
+
+/* LLISize (Low Latency Interrupt on Packet Size)
+ *
+ * Valid Range: 0 - 1500
+ *
+ * Default Value: 0 (disabled)
+ */
+IXGBE_PARAM(LLISize, "Low Latency Interrupt on Packet Size (0-1500)");
+
+#define DEFAULT_LLISIZE                0
+#define MAX_LLISIZE            1500
+#define MIN_LLISIZE            0
+
+/* LLIEType (Low Latency Interrupt Ethernet Type)
+ *
+ * Valid Range: 0 - 0x8fff
+ *
+ * Default Value: 0 (disabled)
+ */
+IXGBE_PARAM(LLIEType, "Low Latency Interrupt Ethernet Protocol Type");
+
+#define DEFAULT_LLIETYPE       0
+#define MAX_LLIETYPE           0x8fff
+#define MIN_LLIETYPE           0
+
+/* LLIVLANP (Low Latency Interrupt on VLAN priority threshold)
+ *
+ * Valid Range: 0 - 7
+ *
+ * Default Value: 0 (disabled)
+ */
+IXGBE_PARAM(LLIVLANP, "Low Latency Interrupt on VLAN priority threshold");
+
+#define DEFAULT_LLIVLANP       0
+#define MAX_LLIVLANP           7
+#define MIN_LLIVLANP           0
+
+#endif /* IXGBE_NO_LLI */
+#ifdef HAVE_TX_MQ
+/* Flow Director packet buffer allocation level
+ *
+ * Valid Range: 1-3
+ *   1 = 8k hash/2k perfect,
+ *   2 = 16k hash/4k perfect,
+ *   3 = 32k hash/8k perfect
+ *
+ * Default Value: 0
+ */
+IXGBE_PARAM(FdirPballoc, "Flow Director packet buffer allocation level:\n"
+           "\t\t\t1 = 8k hash filters or 2k perfect filters\n"
+           "\t\t\t2 = 16k hash filters or 4k perfect filters\n"
+           "\t\t\t3 = 32k hash filters or 8k perfect filters");
+
+#define IXGBE_DEFAULT_FDIR_PBALLOC IXGBE_FDIR_PBALLOC_64K
+
+/* Software ATR packet sample rate
+ *
+ * Valid Range: 0-255  0 = off, 1-255 = rate of Tx packet inspection
+ *
+ * Default Value: 20
+ */
+IXGBE_PARAM(AtrSampleRate, "Software ATR Tx packet sample rate");
+
+#define IXGBE_MAX_ATR_SAMPLE_RATE      255
+#define IXGBE_MIN_ATR_SAMPLE_RATE      1
+#define IXGBE_ATR_SAMPLE_RATE_OFF      0
+#define IXGBE_DEFAULT_ATR_SAMPLE_RATE  20
+
+#endif /* HAVE_TX_MQ */
+#ifdef IXGBE_FCOE
+/* FCoE - Fibre Channel over Ethernet Offload  Enable/Disable
+ *
+ * Valid Range: 0, 1
+ *  - 0 - disables FCoE Offload
+ *  - 1 - enables FCoE Offload
+ *
+ * Default Value: 1
+ */
+IXGBE_PARAM(FCoE, "Disable or enable FCoE Offload, default 1");
+
+#endif /* IXGBE_FCOE */
+/* Enable/disable Large Receive Offload
+ *
+ * Valid Values: 0(off), 1(on)
+ *
+ * Default Value: 1
+ */
+IXGBE_PARAM(LRO, "Large Receive Offload (0,1), default 1 = on");
+
+/* Enable/disable support for untested SFP+ modules on 82599-based adapters
+ *
+ * Valid Values: 0(Disable), 1(Enable)
+ *
+ * Default Value: 0
+ */
+IXGBE_PARAM(allow_unsupported_sfp, "Allow unsupported and untested "
+           "SFP+ modules on 82599 based adapters, default 0 = Disable");
+
+struct ixgbe_option {
+       enum { enable_option, range_option, list_option } type;
+       const char *name;
+       const char *err;
+       int def;
+       union {
+               struct { /* range_option info */
+                       int min;
+                       int max;
+               } r;
+               struct { /* list_option info */
+                       int nr;
+                       const struct ixgbe_opt_list {
+                               int i;
+                               char *str;
+                       } *p;
+               } l;
+       } arg;
+};
+
+static int __devinit ixgbe_validate_option(unsigned int *value,
+                                          struct ixgbe_option *opt)
+{
+       if (*value == OPTION_UNSET) {
+               *value = opt->def;
+               return 0;
+       }
+
+       switch (opt->type) {
+       case enable_option:
+               switch (*value) {
+               case OPTION_ENABLED:
+                       printk(KERN_INFO "ixgbe: %s Enabled\n", opt->name);
+                       return 0;
+               case OPTION_DISABLED:
+                       printk(KERN_INFO "ixgbe: %s Disabled\n", opt->name);
+                       return 0;
+               }
+               break;
+       case range_option:
+               if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+                       printk(KERN_INFO "ixgbe: %s set to %d\n", opt->name,
+                              *value);
+                       return 0;
+               }
+               break;
+       case list_option: {
+               int i;
+               const struct ixgbe_opt_list *ent;
+
+               for (i = 0; i < opt->arg.l.nr; i++) {
+                       ent = &opt->arg.l.p[i];
+                       if (*value == ent->i) {
+                               if (ent->str[0] != '\0')
+                                       printk(KERN_INFO "%s\n", ent->str);
+                               return 0;
+                       }
+               }
+       }
+               break;
+       default:
+               BUG();
+       }
+
+       printk(KERN_INFO "ixgbe: Invalid %s specified (%d),  %s\n",
+              opt->name, *value, opt->err);
+       *value = opt->def;
+       return -1;
+}
+
+#define LIST_LEN(l) (sizeof(l) / sizeof(l[0]))
+
+/**
+ * ixgbe_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input.  If an invalid value is given, or if no user specified
+ * value exists, a default value is used.  The final value is stored
+ * in a variable in the adapter structure.
+ **/
+void __devinit ixgbe_check_options(struct ixgbe_adapter *adapter)
+{
+       int bd = adapter->bd_number;
+       u32 *aflags = &adapter->flags;
+       struct ixgbe_ring_feature *feature = adapter->ring_feature;
+
+       if (bd >= IXGBE_MAX_NIC) {
+               printk(KERN_NOTICE
+                      "Warning: no configuration for board #%d\n", bd);
+               printk(KERN_NOTICE "Using defaults for all values\n");
+#ifndef module_param_array
+               bd = IXGBE_MAX_NIC;
+#endif
+       }
+
+       { /* Interrupt Mode */
+               unsigned int int_mode;
+               static struct ixgbe_option opt = {
+                       .type = range_option,
+                       .name = "Interrupt Mode",
+                       .err =
+                         "using default of "__MODULE_STRING(IXGBE_DEFAULT_INT),
+                       .def = IXGBE_DEFAULT_INT,
+                       .arg = { .r = { .min = IXGBE_INT_LEGACY,
+                                       .max = IXGBE_INT_MSIX} }
+               };
+
+#ifdef module_param_array
+               if (num_IntMode > bd || num_InterruptType > bd) {
+#endif
+                       int_mode = IntMode[bd];
+                       if (int_mode == OPTION_UNSET)
+                               int_mode = InterruptType[bd];
+                       ixgbe_validate_option(&int_mode, &opt);
+                       switch (int_mode) {
+                       case IXGBE_INT_MSIX:
+                               if (!(*aflags & IXGBE_FLAG_MSIX_CAPABLE))
+                                       printk(KERN_INFO
+                                              "Ignoring MSI-X setting; "
+                                              "support unavailable\n");
+                               break;
+                       case IXGBE_INT_MSI:
+                               if (!(*aflags & IXGBE_FLAG_MSI_CAPABLE)) {
+                                       printk(KERN_INFO
+                                              "Ignoring MSI setting; "
+                                              "support unavailable\n");
+                               } else {
+                                       *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE;
+                               }
+                               break;
+                       case IXGBE_INT_LEGACY:
+                       default:
+                               *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE;
+                               *aflags &= ~IXGBE_FLAG_MSI_CAPABLE;
+                               break;
+                       }
+#ifdef module_param_array
+               } else {
+                       /* default settings */
+                       if (opt.def == IXGBE_INT_MSIX &&
+                           *aflags & IXGBE_FLAG_MSIX_CAPABLE) {
+                               *aflags |= IXGBE_FLAG_MSIX_CAPABLE;
+                               *aflags |= IXGBE_FLAG_MSI_CAPABLE;
+                       } else if (opt.def == IXGBE_INT_MSI &&
+                           *aflags & IXGBE_FLAG_MSI_CAPABLE) {
+                               *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE;
+                               *aflags |= IXGBE_FLAG_MSI_CAPABLE;
+                       } else {
+                               *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE;
+                               *aflags &= ~IXGBE_FLAG_MSI_CAPABLE;
+                       }
+               }
+#endif
+       }
+       { /* Multiple Queue Support */
+               static struct ixgbe_option opt = {
+                       .type = enable_option,
+                       .name = "Multiple Queue Support",
+                       .err  = "defaulting to Enabled",
+                       .def  = OPTION_ENABLED
+               };
+
+#ifdef module_param_array
+               if (num_MQ > bd) {
+#endif
+                       unsigned int mq = MQ[bd];
+                       ixgbe_validate_option(&mq, &opt);
+                       if (mq)
+                               *aflags |= IXGBE_FLAG_MQ_CAPABLE;
+                       else
+                               *aflags &= ~IXGBE_FLAG_MQ_CAPABLE;
+#ifdef module_param_array
+               } else {
+                       if (opt.def == OPTION_ENABLED)
+                               *aflags |= IXGBE_FLAG_MQ_CAPABLE;
+                       else
+                               *aflags &= ~IXGBE_FLAG_MQ_CAPABLE;
+               }
+#endif
+               /* Check Interoperability */
+               if ((*aflags & IXGBE_FLAG_MQ_CAPABLE) &&
+                   !(*aflags & IXGBE_FLAG_MSIX_CAPABLE)) {
+                       DPRINTK(PROBE, INFO,
+                               "Multiple queues are not supported while MSI-X "
+                               "is disabled.  Disabling Multiple Queues.\n");
+                       *aflags &= ~IXGBE_FLAG_MQ_CAPABLE;
+               }
+       }
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+       { /* Direct Cache Access (DCA) */
+               static struct ixgbe_option opt = {
+                       .type = range_option,
+                       .name = "Direct Cache Access (DCA)",
+                       .err  = "defaulting to Enabled",
+                       .def  = IXGBE_MAX_DCA,
+                       .arg  = { .r = { .min = OPTION_DISABLED,
+                                        .max = IXGBE_MAX_DCA} }
+               };
+               unsigned int dca = opt.def;
+
+#ifdef module_param_array
+               if (num_DCA > bd) {
+#endif
+                       dca = DCA[bd];
+                       ixgbe_validate_option(&dca, &opt);
+                       if (!dca)
+                               *aflags &= ~IXGBE_FLAG_DCA_CAPABLE;
+
+                       /* Check Interoperability */
+                       if (!(*aflags & IXGBE_FLAG_DCA_CAPABLE)) {
+                               DPRINTK(PROBE, INFO, "DCA is disabled\n");
+                               *aflags &= ~IXGBE_FLAG_DCA_ENABLED;
+                       }
+
+                       if (dca == IXGBE_MAX_DCA) {
+                               DPRINTK(PROBE, INFO,
+                                       "DCA enabled for rx data\n");
+                               adapter->flags |= IXGBE_FLAG_DCA_ENABLED_DATA;
+                       }
+#ifdef module_param_array
+               } else {
+                       /* make sure to clear the capability flag if the
+                        * option is disabled by default above */
+                       if (opt.def == OPTION_DISABLED)
+                               *aflags &= ~IXGBE_FLAG_DCA_CAPABLE;
+               }
+#endif
+               if (dca == IXGBE_MAX_DCA)
+                       adapter->flags |= IXGBE_FLAG_DCA_ENABLED_DATA;
+       }
+#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
+       { /* Receive-Side Scaling (RSS) */
+               static struct ixgbe_option opt = {
+                       .type = range_option,
+                       .name = "Receive-Side Scaling (RSS)",
+                       .err  = "using default.",
+                       .def  = 0,
+                       .arg  = { .r = { .min = 0,
+                                        .max = IXGBE_MAX_RSS_INDICES} }
+               };
+               unsigned int rss = RSS[bd];
+
+#ifdef module_param_array
+               if (num_RSS > bd) {
+#endif
+                       ixgbe_validate_option(&rss, &opt);
+                       /* base it off num_online_cpus() with hardware limit */
+                       if (!rss)
+                               rss = min_t(int, IXGBE_MAX_RSS_INDICES,
+                                           num_online_cpus());
+                       else
+                               feature[RING_F_FDIR].limit = rss;
+
+                       feature[RING_F_RSS].limit = rss;
+#ifdef module_param_array
+               } else if (opt.def == 0) {
+                       rss = min_t(int, IXGBE_MAX_RSS_INDICES,
+                                   num_online_cpus());
+                       feature[RING_F_RSS].limit = rss;
+               }
+#endif
+               /* Check Interoperability */
+               if (rss > 1) {
+                       if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) {
+                               DPRINTK(PROBE, INFO,
+                                       "Multiqueue is disabled.  "
+                                       "Limiting RSS.\n");
+                               feature[RING_F_RSS].limit = 1;
+                       }
+               }
+       }
+       { /* Virtual Machine Device Queues (VMDQ) */
+               static struct ixgbe_option opt = {
+                       .type = range_option,
+                       .name = "Virtual Machine Device Queues (VMDQ)",
+                       .err  = "defaulting to Disabled",
+                       .def  = OPTION_DISABLED,
+                       .arg  = { .r = { .min = OPTION_DISABLED,
+                                        .max = IXGBE_MAX_VMDQ_INDICES
+                               } }
+               };
+
+               switch (adapter->hw.mac.type) {
+               case ixgbe_mac_82598EB:
+                       /* 82598 only supports up to 16 pools */
+                               opt.arg.r.max = 16;
+                       break;
+               default:
+                       break;
+               }
+
+#ifdef module_param_array
+               if (num_VMDQ > bd) {
+#endif
+                       unsigned int vmdq = VMDQ[bd];
+
+                       ixgbe_validate_option(&vmdq, &opt);
+
+                       /* zero or one both mean disabled from our driver's
+                        * perspective */
+                       if (vmdq > 1)
+                               *aflags |= IXGBE_FLAG_VMDQ_ENABLED;
+                       else
+                               *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED;
+
+                       feature[RING_F_VMDQ].limit = vmdq;
+#ifdef module_param_array
+               } else {
+                       if (opt.def == OPTION_DISABLED)
+                               *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED;
+                       else
+                               *aflags |= IXGBE_FLAG_VMDQ_ENABLED;
+
+                       feature[RING_F_VMDQ].limit = opt.def;
+               }
+#endif
+               /* Check Interoperability */
+               if (*aflags & IXGBE_FLAG_VMDQ_ENABLED) {
+                       if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) {
+                               DPRINTK(PROBE, INFO,
+                                       "VMDQ is not supported while multiple "
+                                       "queues are disabled.  "
+                                       "Disabling VMDQ.\n");
+                               *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED;
+                               feature[RING_F_VMDQ].limit = 0;
+                       }
+               }
+       }
+#ifdef CONFIG_PCI_IOV
+       { /* Single Root I/O Virtualization (SR-IOV) */
+               static struct ixgbe_option opt = {
+                       .type = range_option,
+                       .name = "I/O Virtualization (IOV)",
+                       .err  = "defaulting to Disabled",
+                       .def  = OPTION_DISABLED,
+                       .arg  = { .r = { .min = OPTION_DISABLED,
+                                        .max = MAX_SRIOV_VFS} }
+               };
+
+#ifdef module_param_array
+               if (num_max_vfs > bd) {
+#endif
+                       unsigned int vfs = max_vfs[bd];
+                       if (ixgbe_validate_option(&vfs, &opt)) {
+                               vfs = 0;
+                               DPRINTK(PROBE, INFO,
+                                       "max_vfs out of range "
+                                       "Disabling SR-IOV.\n");
+                       }
+
+                       adapter->num_vfs = vfs;
+
+                       if (vfs)
+                               *aflags |= IXGBE_FLAG_SRIOV_ENABLED;
+                       else
+                               *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+#ifdef module_param_array
+               } else {
+                       if (opt.def == OPTION_DISABLED) {
+                               adapter->num_vfs = 0;
+                               *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+                       } else {
+                               adapter->num_vfs = opt.def;
+                               *aflags |= IXGBE_FLAG_SRIOV_ENABLED;
+                       }
+               }
+#endif
+
+               /* Check Interoperability */
+               if (*aflags & IXGBE_FLAG_SRIOV_ENABLED) {
+                       if (!(*aflags & IXGBE_FLAG_SRIOV_CAPABLE)) {
+                               DPRINTK(PROBE, INFO,
+                                       "IOV is not supported on this "
+                                       "hardware.  Disabling IOV.\n");
+                               *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+                               adapter->num_vfs = 0;
+                       } else if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) {
+                               DPRINTK(PROBE, INFO,
+                                       "IOV is not supported while multiple "
+                                       "queues are disabled.  "
+                                       "Disabling IOV.\n");
+                               *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+                               adapter->num_vfs = 0;
+                       }
+               }
+       }
+       { /* L2 Loopback Enable in SR-IOV mode */
+               static struct ixgbe_option opt = {
+                       .type = range_option,
+                       .name = "L2 Loopback Enable",
+                       .err  = "defaulting to Enable",
+                       .def  = OPTION_ENABLED,
+                       .arg  = { .r = { .min = OPTION_DISABLED,
+                                        .max = OPTION_ENABLED} }
+               };
+
+#ifdef module_param_array
+               if (num_L2LBen > bd) {
+#endif
+                       unsigned int l2LBen = L2LBen[bd];
+                       ixgbe_validate_option(&l2LBen, &opt);
+                       if (l2LBen)
+                               adapter->flags |=
+                                       IXGBE_FLAG_SRIOV_L2LOOPBACK_ENABLE;
+#ifdef module_param_array
+               } else {
+                       if (opt.def == OPTION_ENABLED)
+                               adapter->flags |=
+                                       IXGBE_FLAG_SRIOV_L2LOOPBACK_ENABLE;
+               }
+#endif
+       }
+#endif /* CONFIG_PCI_IOV */
+       { /* Interrupt Throttling Rate */
+               static struct ixgbe_option opt = {
+                       .type = range_option,
+                       .name = "Interrupt Throttling Rate (ints/sec)",
+                       .err  = "using default of "__MODULE_STRING(DEFAULT_ITR),
+                       .def  = DEFAULT_ITR,
+                       .arg  = { .r = { .min = MIN_ITR,
+                                        .max = MAX_ITR } }
+               };
+
+#ifdef module_param_array
+               if (num_InterruptThrottleRate > bd) {
+#endif
+                       u32 itr = InterruptThrottleRate[bd];
+                       switch (itr) {
+                       case 0:
+                               DPRINTK(PROBE, INFO, "%s turned off\n",
+                                       opt.name);
+                               adapter->rx_itr_setting = 0;
+                               break;
+                       case 1:
+                               DPRINTK(PROBE, INFO, "dynamic interrupt "
+                                       "throttling enabled\n");
+                               adapter->rx_itr_setting = 1;
+                               break;
+                       default:
+                               ixgbe_validate_option(&itr, &opt);
+                               /* the first bit is used as control */
+                               adapter->rx_itr_setting = (1000000/itr) << 2;
+                               break;
+                       }
+                       adapter->tx_itr_setting = adapter->rx_itr_setting;
+#ifdef module_param_array
+               } else {
+                       adapter->rx_itr_setting = opt.def;
+                       adapter->tx_itr_setting = opt.def;
+               }
+#endif
+       }
+#ifndef IXGBE_NO_LLI
+       { /* Low Latency Interrupt TCP Port*/
+               static struct ixgbe_option opt = {
+                       .type = range_option,
+                       .name = "Low Latency Interrupt TCP Port",
+                       .err  = "using default of "
+                                       __MODULE_STRING(DEFAULT_LLIPORT),
+                       .def  = DEFAULT_LLIPORT,
+                       .arg  = { .r = { .min = MIN_LLIPORT,
+                                        .max = MAX_LLIPORT } }
+               };
+
+#ifdef module_param_array
+               if (num_LLIPort > bd) {
+#endif
+                       adapter->lli_port = LLIPort[bd];
+                       if (adapter->lli_port) {
+                               ixgbe_validate_option(&adapter->lli_port, &opt);
+                       } else {
+                               DPRINTK(PROBE, INFO, "%s turned off\n",
+                                       opt.name);
+                       }
+#ifdef module_param_array
+               } else {
+                       adapter->lli_port = opt.def;
+               }
+#endif
+       }
+       { /* Low Latency Interrupt on Packet Size */
+               static struct ixgbe_option opt = {
+                       .type = range_option,
+                       .name = "Low Latency Interrupt on Packet Size",
+                       .err  = "using default of "
+                                       __MODULE_STRING(DEFAULT_LLISIZE),
+                       .def  = DEFAULT_LLISIZE,
+                       .arg  = { .r = { .min = MIN_LLISIZE,
+                                        .max = MAX_LLISIZE } }
+               };
+
+#ifdef module_param_array
+               if (num_LLISize > bd) {
+#endif
+                       adapter->lli_size = LLISize[bd];
+                       if (adapter->lli_size) {
+                               ixgbe_validate_option(&adapter->lli_size, &opt);
+                       } else {
+                               DPRINTK(PROBE, INFO, "%s turned off\n",
+                                       opt.name);
+                       }
+#ifdef module_param_array
+               } else {
+                       adapter->lli_size = opt.def;
+               }
+#endif
+       }
+       { /*Low Latency Interrupt on TCP Push flag*/
+               static struct ixgbe_option opt = {
+                       .type = enable_option,
+                       .name = "Low Latency Interrupt on TCP Push flag",
+                       .err  = "defaulting to Disabled",
+                       .def  = OPTION_DISABLED
+               };
+
+#ifdef module_param_array
+               if (num_LLIPush > bd) {
+#endif
+                       unsigned int lli_push = LLIPush[bd];
+                       ixgbe_validate_option(&lli_push, &opt);
+                       if (lli_push)
+                               *aflags |= IXGBE_FLAG_LLI_PUSH;
+                       else
+                               *aflags &= ~IXGBE_FLAG_LLI_PUSH;
+#ifdef module_param_array
+               } else {
+                       if (opt.def == OPTION_ENABLED)
+                               *aflags |= IXGBE_FLAG_LLI_PUSH;
+                       else
+                               *aflags &= ~IXGBE_FLAG_LLI_PUSH;
+               }
+#endif
+       }
+       { /* Low Latency Interrupt EtherType*/
+               static struct ixgbe_option opt = {
+                       .type = range_option,
+                       .name = "Low Latency Interrupt on Ethernet Protocol "
+                               "Type",
+                       .err  = "using default of "
+                                       __MODULE_STRING(DEFAULT_LLIETYPE),
+                       .def  = DEFAULT_LLIETYPE,
+                       .arg  = { .r = { .min = MIN_LLIETYPE,
+                                        .max = MAX_LLIETYPE } }
+               };
+
+#ifdef module_param_array
+               if (num_LLIEType > bd) {
+#endif
+                       adapter->lli_etype = LLIEType[bd];
+                       if (adapter->lli_etype) {
+                               ixgbe_validate_option(&adapter->lli_etype,
+                                                     &opt);
+                       } else {
+                               DPRINTK(PROBE, INFO, "%s turned off\n",
+                                       opt.name);
+                       }
+#ifdef module_param_array
+               } else {
+                       adapter->lli_etype = opt.def;
+               }
+#endif
+       }
+       { /* LLI VLAN Priority */
+               static struct ixgbe_option opt = {
+                       .type = range_option,
+                       .name = "Low Latency Interrupt on VLAN priority "
+                               "threashold",
+                       .err  = "using default of "
+                                       __MODULE_STRING(DEFAULT_LLIVLANP),
+                       .def  = DEFAULT_LLIVLANP,
+                       .arg  = { .r = { .min = MIN_LLIVLANP,
+                                        .max = MAX_LLIVLANP } }
+               };
+
+#ifdef module_param_array
+               if (num_LLIVLANP > bd) {
+#endif
+                       adapter->lli_vlan_pri = LLIVLANP[bd];
+                       if (adapter->lli_vlan_pri) {
+                               ixgbe_validate_option(&adapter->lli_vlan_pri,
+                                                     &opt);
+                       } else {
+                               DPRINTK(PROBE, INFO, "%s turned off\n",
+                                       opt.name);
+                       }
+#ifdef module_param_array
+               } else {
+                       adapter->lli_vlan_pri = opt.def;
+               }
+#endif
+       }
+#endif /* IXGBE_NO_LLI */
+#ifdef HAVE_TX_MQ
+       { /* Flow Director packet buffer allocation */
+               unsigned int fdir_pballoc_mode;
+               static struct ixgbe_option opt = {
+                       .type = range_option,
+                       .name = "Flow Director packet buffer allocation",
+                       .err = "using default of "
+                               __MODULE_STRING(IXGBE_DEFAULT_FDIR_PBALLOC),
+                       .def = IXGBE_DEFAULT_FDIR_PBALLOC,
+                       .arg = {.r = {.min = IXGBE_FDIR_PBALLOC_64K,
+                                     .max = IXGBE_FDIR_PBALLOC_256K} }
+               };
+               char pstring[10];
+
+               if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+                       adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_NONE;
+               } else if (num_FdirPballoc > bd) {
+                       fdir_pballoc_mode = FdirPballoc[bd];
+                       ixgbe_validate_option(&fdir_pballoc_mode, &opt);
+                       switch (fdir_pballoc_mode) {
+                       case IXGBE_FDIR_PBALLOC_256K:
+                               adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_256K;
+                               sprintf(pstring, "256kB");
+                               break;
+                       case IXGBE_FDIR_PBALLOC_128K:
+                               adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_128K;
+                               sprintf(pstring, "128kB");
+                               break;
+                       case IXGBE_FDIR_PBALLOC_64K:
+                       default:
+                               adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
+                               sprintf(pstring, "64kB");
+                               break;
+                       }
+                       DPRINTK(PROBE, INFO, "Flow Director will be allocated "
+                               "%s of packet buffer\n", pstring);
+               } else {
+                       adapter->fdir_pballoc = opt.def;
+               }
+       }
+       { /* Flow Director ATR Tx sample packet rate */
+               static struct ixgbe_option opt = {
+                       .type = range_option,
+                       .name = "Software ATR Tx packet sample rate",
+                       .err = "using default of "
+                               __MODULE_STRING(IXGBE_DEFAULT_ATR_SAMPLE_RATE),
+                       .def = IXGBE_DEFAULT_ATR_SAMPLE_RATE,
+                       .arg = {.r = {.min = IXGBE_ATR_SAMPLE_RATE_OFF,
+                                     .max = IXGBE_MAX_ATR_SAMPLE_RATE} }
+               };
+               static const char atr_string[] =
+                                           "ATR Tx Packet sample rate set to";
+
+               if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+                       adapter->atr_sample_rate = IXGBE_ATR_SAMPLE_RATE_OFF;
+               } else if (num_AtrSampleRate > bd) {
+                       adapter->atr_sample_rate = AtrSampleRate[bd];
+
+                       if (adapter->atr_sample_rate) {
+                               ixgbe_validate_option(&adapter->atr_sample_rate,
+                                                     &opt);
+                               DPRINTK(PROBE, INFO, "%s %d\n", atr_string,
+                                       adapter->atr_sample_rate);
+                       }
+               } else {
+                       adapter->atr_sample_rate = opt.def;
+               }
+       }
+#endif /* HAVE_TX_MQ */
+#ifdef IXGBE_FCOE
+       {
+               *aflags &= ~IXGBE_FLAG_FCOE_CAPABLE;
+
+               switch (adapter->hw.mac.type) {
+               case ixgbe_mac_X540:
+               case ixgbe_mac_82599EB: {
+                       struct ixgbe_option opt = {
+                               .type = enable_option,
+                               .name = "Enabled/Disable FCoE offload",
+                               .err = "defaulting to Enabled",
+                               .def = OPTION_ENABLED
+                       };
+#ifdef module_param_array
+                       if (num_FCoE > bd) {
+#endif
+                               unsigned int fcoe = FCoE[bd];
+
+                               ixgbe_validate_option(&fcoe, &opt);
+                               if (fcoe)
+                                       *aflags |= IXGBE_FLAG_FCOE_CAPABLE;
+#ifdef module_param_array
+                       } else {
+                               if (opt.def == OPTION_ENABLED)
+                                       *aflags |= IXGBE_FLAG_FCOE_CAPABLE;
+                       }
+#endif
+                       DPRINTK(PROBE, INFO, "FCoE Offload feature %sabled\n",
+                               (*aflags & IXGBE_FLAG_FCOE_CAPABLE) ?
+                               "en" : "dis");
+               }
+                       break;
+               default:
+                       break;
+               }
+       }
+#endif /* IXGBE_FCOE */
+       { /* LRO - Enable Large Receive Offload */
+               struct ixgbe_option opt = {
+                       .type = enable_option,
+                       .name = "LRO - Large Receive Offload",
+                       .err  = "defaulting to Enabled",
+                       .def  = OPTION_ENABLED
+               };
+               struct net_device *netdev = adapter->netdev;
+
+#ifdef IXGBE_NO_LRO
+               if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
+                       opt.def = OPTION_DISABLED;
+
+#endif
+#ifdef module_param_array
+               if (num_LRO > bd) {
+#endif
+                       unsigned int lro = LRO[bd];
+                       ixgbe_validate_option(&lro, &opt);
+                       if (lro)
+                               netdev->features |= NETIF_F_LRO;
+                       else
+                               netdev->features &= ~NETIF_F_LRO;
+#ifdef module_param_array
+               } else if (opt.def == OPTION_ENABLED) {
+                       netdev->features |= NETIF_F_LRO;
+               } else {
+                       netdev->features &= ~NETIF_F_LRO;
+               }
+#endif
+#ifdef IXGBE_NO_LRO
+               if ((netdev->features & NETIF_F_LRO) &&
+                   !(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) {
+                       DPRINTK(PROBE, INFO,
+                               "RSC is not supported on this "
+                               "hardware.  Disabling RSC.\n");
+                       netdev->features &= ~NETIF_F_LRO;
+               }
+#endif
+       }
+       { /*
+          * allow_unsupported_sfp - Enable/Disable support for unsupported
+          * and untested SFP+ modules.
+          */
+       struct ixgbe_option opt = {
+                       .type = enable_option,
+                       .name = "allow_unsupported_sfp",
+                       .err  = "defaulting to Disabled",
+                       .def  = OPTION_DISABLED
+               };
+#ifdef module_param_array
+               if (num_allow_unsupported_sfp > bd) {
+#endif
+                       unsigned int enable_unsupported_sfp =
+                                                     allow_unsupported_sfp[bd];
+                       ixgbe_validate_option(&enable_unsupported_sfp, &opt);
+                       if (enable_unsupported_sfp) {
+                               adapter->hw.allow_unsupported_sfp = true;
+                       } else {
+                               adapter->hw.allow_unsupported_sfp = false;
+                       }
+#ifdef module_param_array
+               } else if (opt.def == OPTION_ENABLED) {
+                               adapter->hw.allow_unsupported_sfp = true;
+               } else {
+                               adapter->hw.allow_unsupported_sfp = false;
+               }
+#endif
+       }
+}
index 5a465e83c471d775c249879533337f94f9d5b282..4de5040ea789b85cc39e811fc059928ee0ed4a85 100644 (file)
 
 *******************************************************************************/
 
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-
+#include "ixgbe_api.h"
 #include "ixgbe_common.h"
 #include "ixgbe_phy.h"
 
@@ -43,9 +40,36 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
 static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
 static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
 static bool ixgbe_get_i2c_data(u32 *i2cctl);
-static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
-static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
-static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
+
+/**
+ *  ixgbe_init_phy_ops_generic - Inits PHY function ptrs
+ *  @hw: pointer to the hardware structure
+ *
+ *  Initialize the function pointers.
+ **/
+s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
+{
+       struct ixgbe_phy_info *phy = &hw->phy;
+
+       /* PHY */
+       phy->ops.identify = &ixgbe_identify_phy_generic;
+       phy->ops.reset = &ixgbe_reset_phy_generic;
+       phy->ops.read_reg = &ixgbe_read_phy_reg_generic;
+       phy->ops.write_reg = &ixgbe_write_phy_reg_generic;
+       phy->ops.setup_link = &ixgbe_setup_phy_link_generic;
+       phy->ops.setup_link_speed = &ixgbe_setup_phy_link_speed_generic;
+       phy->ops.check_link = NULL;
+       phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_generic;
+       phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_generic;
+       phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_generic;
+       phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic;
+       phy->ops.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic;
+       phy->ops.i2c_bus_clear = &ixgbe_i2c_bus_clear;
+       phy->ops.identify_sfp = &ixgbe_identify_module_generic;
+       phy->sfp_type = ixgbe_sfp_type_unknown;
+       phy->ops.check_overtemp = &ixgbe_tn_check_overtemp;
+       return 0;
+}
 
 /**
  *  ixgbe_identify_phy_generic - Get physical layer module
@@ -61,20 +85,20 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
 
        if (hw->phy.type == ixgbe_phy_unknown) {
                for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
-                       hw->phy.mdio.prtad = phy_addr;
-                       if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) {
+                       if (ixgbe_validate_phy_addr(hw, phy_addr)) {
+                               hw->phy.addr = phy_addr;
                                ixgbe_get_phy_id(hw);
                                hw->phy.type =
-                                       ixgbe_get_phy_type_from_id(hw->phy.id);
+                                       ixgbe_get_phy_type_from_id(hw->phy.id);
 
                                if (hw->phy.type == ixgbe_phy_unknown) {
                                        hw->phy.ops.read_reg(hw,
-                                                            MDIO_PMA_EXTABLE,
-                                                            MDIO_MMD_PMAPMD,
-                                                            &ext_ability);
+                                                 IXGBE_MDIO_PHY_EXT_ABILITY,
+                                                 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+                                                 &ext_ability);
                                        if (ext_ability &
-                                           (MDIO_PMA_EXTABLE_10GBT |
-                                            MDIO_PMA_EXTABLE_1000BT))
+                                           (IXGBE_MDIO_PHY_10GBASET_ABILITY |
+                                            IXGBE_MDIO_PHY_1000BASET_ABILITY))
                                                hw->phy.type =
                                                         ixgbe_phy_cu_unknown;
                                        else
@@ -88,7 +112,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
                }
                /* clear value if nothing found */
                if (status != 0)
-                       hw->phy.mdio.prtad = 0;
+                       hw->phy.addr = 0;
        } else {
                status = 0;
        }
@@ -96,24 +120,46 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
        return status;
 }
 
+/**
+ *  ixgbe_validate_phy_addr - Determines phy address is valid
+ *  @hw: pointer to hardware structure
+ *
+ **/
+bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
+{
+       u16 phy_id = 0;
+       bool valid = false;
+
+       hw->phy.addr = phy_addr;
+       hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
+                            IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id);
+
+       if (phy_id != 0xFFFF && phy_id != 0x0)
+               valid = true;
+
+       return valid;
+}
+
 /**
  *  ixgbe_get_phy_id - Get the phy type
  *  @hw: pointer to hardware structure
  *
  **/
-static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
+s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
 {
        u32 status;
        u16 phy_id_high = 0;
        u16 phy_id_low = 0;
 
-       status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
-                                     &phy_id_high);
+       status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
+                                     IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+                                     &phy_id_high);
 
        if (status == 0) {
                hw->phy.id = (u32)(phy_id_high << 16);
-               status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD,
-                                             &phy_id_low);
+               status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW,
+                                             IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+                                             &phy_id_low);
                hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
                hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
        }
@@ -125,7 +171,7 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
  *  @hw: pointer to hardware structure
  *
  **/
-static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
+enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
 {
        enum ixgbe_phy_type phy_type;
 
@@ -147,6 +193,7 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
                break;
        }
 
+       hw_dbg(hw, "phy type found is %d\n", phy_type);
        return phy_type;
 }
 
@@ -175,9 +222,9 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
         * Perform soft PHY reset to the PHY_XS.
         * This will cause a soft reset to the PHY
         */
-       hw->phy.ops.write_reg(hw, MDIO_CTRL1,
-                             MDIO_MMD_PHYXS,
-                             MDIO_CTRL1_RESET);
+       hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+                             IXGBE_MDIO_PHY_XS_DEV_TYPE,
+                             IXGBE_MDIO_PHY_XS_RESET);
 
        /*
         * Poll for reset bit to self-clear indicating reset is complete.
@@ -186,15 +233,15 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
         */
        for (i = 0; i < 30; i++) {
                msleep(100);
-               hw->phy.ops.read_reg(hw, MDIO_CTRL1,
-                                    MDIO_MMD_PHYXS, &ctrl);
-               if (!(ctrl & MDIO_CTRL1_RESET)) {
+               hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+                                    IXGBE_MDIO_PHY_XS_DEV_TYPE, &ctrl);
+               if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) {
                        udelay(2);
                        break;
                }
        }
 
-       if (ctrl & MDIO_CTRL1_RESET) {
+       if (ctrl & IXGBE_MDIO_PHY_XS_RESET) {
                status = IXGBE_ERR_RESET_FAILED;
                hw_dbg(hw, "PHY reset polling failed to complete.\n");
        }
@@ -210,7 +257,7 @@ out:
  *  @phy_data: Pointer to read data from PHY register
  **/
 s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
-                               u32 device_type, u16 *phy_data)
+                              u32 device_type, u16 *phy_data)
 {
        u32 command;
        u32 i;
@@ -229,9 +276,9 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
        if (status == 0) {
                /* Setup and write the address cycle command */
                command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
-                          (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
-                          (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
-                          (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+                          (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+                          (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+                          (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
 
                IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
 
@@ -260,10 +307,9 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
                         * command
                         */
                        command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
-                                  (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
-                                  (hw->phy.mdio.prtad <<
-                                   IXGBE_MSCA_PHY_ADDR_SHIFT) |
-                                  (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
+                                  (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+                                  (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+                                  (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
 
                        IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
 
@@ -309,7 +355,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
  *  @phy_data: Data to write to the PHY register
  **/
 s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
-                                u32 device_type, u16 phy_data)
+                               u32 device_type, u16 phy_data)
 {
        u32 command;
        u32 i;
@@ -330,9 +376,9 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
 
                /* Setup and write the address cycle command */
                command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
-                          (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
-                          (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
-                          (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+                          (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+                          (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+                          (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
 
                IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
 
@@ -361,10 +407,9 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
                         * command
                         */
                        command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
-                                  (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
-                                  (hw->phy.mdio.prtad <<
-                                   IXGBE_MSCA_PHY_ADDR_SHIFT) |
-                                  (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
+                                  (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+                                  (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+                                  (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
 
                        IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
 
@@ -413,16 +458,16 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
 
        if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
                /* Set or unset auto-negotiation 10G advertisement */
-               hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
-                                    MDIO_MMD_AN,
+               hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+                                    IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
                                     &autoneg_reg);
 
-               autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
+               autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
-                       autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
+                       autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
 
-               hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
-                                     MDIO_MMD_AN,
+               hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+                                     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
                                      autoneg_reg);
        }
 
@@ -430,7 +475,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
                /* Set or unset auto-negotiation 1G advertisement */
                hw->phy.ops.read_reg(hw,
                                     IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
-                                    MDIO_MMD_AN,
+                                    IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
                                     &autoneg_reg);
 
                autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
@@ -439,47 +484,46 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
 
                hw->phy.ops.write_reg(hw,
                                      IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
-                                     MDIO_MMD_AN,
+                                     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
                                      autoneg_reg);
        }
 
        if (speed & IXGBE_LINK_SPEED_100_FULL) {
                /* Set or unset auto-negotiation 100M advertisement */
-               hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
-                                    MDIO_MMD_AN,
+               hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+                                    IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
                                     &autoneg_reg);
 
-               autoneg_reg &= ~(ADVERTISE_100FULL |
-                                ADVERTISE_100HALF);
+               autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE |
+                                IXGBE_MII_100BASE_T_ADVERTISE_HALF);
                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
-                       autoneg_reg |= ADVERTISE_100FULL;
+                       autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
 
-               hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
-                                     MDIO_MMD_AN,
+               hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+                                     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
                                      autoneg_reg);
        }
 
        /* Restart PHY autonegotiation and wait for completion */
-       hw->phy.ops.read_reg(hw, MDIO_CTRL1,
-                            MDIO_MMD_AN, &autoneg_reg);
+       hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+                            IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
 
-       autoneg_reg |= MDIO_AN_CTRL1_RESTART;
+       autoneg_reg |= IXGBE_MII_RESTART;
 
-       hw->phy.ops.write_reg(hw, MDIO_CTRL1,
-                             MDIO_MMD_AN, autoneg_reg);
+       hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+                             IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
 
        /* Wait for autonegotiation to finish */
        for (time_out = 0; time_out < max_time_out; time_out++) {
                udelay(10);
                /* Restart PHY autonegotiation and wait for completion */
-               status = hw->phy.ops.read_reg(hw, MDIO_STAT1,
-                                             MDIO_MMD_AN,
+               status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+                                             IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
                                              &autoneg_reg);
 
-               autoneg_reg &= MDIO_AN_STAT1_COMPLETE;
-               if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) {
+               autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
+               if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE)
                        break;
-               }
        }
 
        if (time_out == max_time_out) {
@@ -497,9 +541,9 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
  *  @autoneg: true if autonegotiation enabled
  **/
 s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
-                                       ixgbe_link_speed speed,
-                                       bool autoneg,
-                                       bool autoneg_wait_to_complete)
+                                      ixgbe_link_speed speed,
+                                      bool autoneg,
+                                      bool autoneg_wait_to_complete)
 {
 
        /*
@@ -524,16 +568,16 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
 }
 
 /**
- * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
- * @hw: pointer to hardware structure
- * @speed: pointer to link speed
- * @autoneg: boolean auto-negotiation value
+ *  ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @autoneg: boolean auto-negotiation value
  *
- * Determines the link capabilities by reading the AUTOC register.
- */
+ *  Determines the link capabilities by reading the AUTOC register.
+ **/
 s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
-                                               ixgbe_link_speed *speed,
-                                               bool *autoneg)
+                                              ixgbe_link_speed *speed,
+                                              bool *autoneg)
 {
        s32 status = IXGBE_ERR_LINK_SETUP;
        u16 speed_ability;
@@ -541,15 +585,16 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
        *speed = 0;
        *autoneg = true;
 
-       status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
-                                     &speed_ability);
+       status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
+                                     IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+                                     &speed_ability);
 
        if (status == 0) {
-               if (speed_ability & MDIO_SPEED_10G)
+               if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
                        *speed |= IXGBE_LINK_SPEED_10GB_FULL;
-               if (speed_ability & MDIO_PMA_SPEED_1000)
+               if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G)
                        *speed |= IXGBE_LINK_SPEED_1GB_FULL;
-               if (speed_ability & MDIO_PMA_SPEED_100)
+               if (speed_ability & IXGBE_MDIO_PHY_SPEED_100M)
                        *speed |= IXGBE_LINK_SPEED_100_FULL;
        }
 
@@ -585,13 +630,12 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
        for (time_out = 0; time_out < max_time_out; time_out++) {
                udelay(10);
                status = hw->phy.ops.read_reg(hw,
-                                             MDIO_STAT1,
-                                             MDIO_MMD_VEND1,
-                                             &phy_data);
-               phy_link = phy_data &
-                           IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
+                                       IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
+                                       IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+                                       &phy_data);
+               phy_link = phy_data & IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
                phy_speed = phy_data &
-                           IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
+                                IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
                if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
                        *link_up = true;
                        if (phy_speed ==
@@ -623,23 +667,23 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
 
        if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
                /* Set or unset auto-negotiation 10G advertisement */
-               hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
-                                    MDIO_MMD_AN,
+               hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+                                    IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
                                     &autoneg_reg);
 
-               autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
+               autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
-                       autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
+                       autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
 
-               hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
-                                     MDIO_MMD_AN,
+               hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+                                     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
                                      autoneg_reg);
        }
 
        if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
                /* Set or unset auto-negotiation 1G advertisement */
                hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
-                                    MDIO_MMD_AN,
+                                    IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
                                     &autoneg_reg);
 
                autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
@@ -647,45 +691,44 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
                        autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
 
                hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
-                                     MDIO_MMD_AN,
+                                     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
                                      autoneg_reg);
        }
 
        if (speed & IXGBE_LINK_SPEED_100_FULL) {
                /* Set or unset auto-negotiation 100M advertisement */
-               hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
-                                    MDIO_MMD_AN,
+               hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+                                    IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
                                     &autoneg_reg);
 
-               autoneg_reg &= ~(ADVERTISE_100FULL |
-                                ADVERTISE_100HALF);
+               autoneg_reg &= ~IXGBE_MII_100BASE_T_ADVERTISE;
                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
-                       autoneg_reg |= ADVERTISE_100FULL;
+                       autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
 
-               hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
-                                     MDIO_MMD_AN,
+               hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+                                     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
                                      autoneg_reg);
        }
 
        /* Restart PHY autonegotiation and wait for completion */
-       hw->phy.ops.read_reg(hw, MDIO_CTRL1,
-                            MDIO_MMD_AN, &autoneg_reg);
+       hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+                            IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
 
-       autoneg_reg |= MDIO_AN_CTRL1_RESTART;
+       autoneg_reg |= IXGBE_MII_RESTART;
 
-       hw->phy.ops.write_reg(hw, MDIO_CTRL1,
-                             MDIO_MMD_AN, autoneg_reg);
+       hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+                             IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
 
        /* Wait for autonegotiation to finish */
        for (time_out = 0; time_out < max_time_out; time_out++) {
                udelay(10);
                /* Restart PHY autonegotiation and wait for completion */
-               status = hw->phy.ops.read_reg(hw, MDIO_STAT1,
-                                             MDIO_MMD_AN,
+               status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+                                             IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
                                              &autoneg_reg);
 
-               autoneg_reg &= MDIO_AN_STAT1_COMPLETE;
-               if (autoneg_reg == MDIO_AN_STAT1_COMPLETE)
+               autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
+               if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE)
                        break;
        }
 
@@ -708,7 +751,7 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
        s32 status = 0;
 
        status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
-                                     MDIO_MMD_VEND1,
+                                     IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
                                      firmware_version);
 
        return status;
@@ -725,7 +768,7 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
        s32 status = 0;
 
        status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
-                                     MDIO_MMD_VEND1,
+                                     IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
                                      firmware_version);
 
        return status;
@@ -744,21 +787,23 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
        s32 ret_val = 0;
        u32 i;
 
-       hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data);
+       hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+                            IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
 
        /* reset the PHY and poll for completion */
-       hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
-                             (phy_data | MDIO_CTRL1_RESET));
+       hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+                             IXGBE_MDIO_PHY_XS_DEV_TYPE,
+                             (phy_data | IXGBE_MDIO_PHY_XS_RESET));
 
        for (i = 0; i < 100; i++) {
-               hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
-                                    &phy_data);
-               if ((phy_data & MDIO_CTRL1_RESET) == 0)
+               hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+                                    IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
+               if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0)
                        break;
-               usleep_range(10000, 20000);
+               msleep(10);
        }
 
-       if ((phy_data & MDIO_CTRL1_RESET) != 0) {
+       if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) {
                hw_dbg(hw, "PHY reset did not complete.\n");
                ret_val = IXGBE_ERR_PHY;
                goto out;
@@ -766,7 +811,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
 
        /* Get init offsets */
        ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
-                                                     &data_offset);
+                                                     &data_offset);
        if (ret_val != 0)
                goto out;
 
@@ -778,25 +823,25 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
                 */
                ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
                control = (eword & IXGBE_CONTROL_MASK_NL) >>
-                          IXGBE_CONTROL_SHIFT_NL;
+                          IXGBE_CONTROL_SHIFT_NL;
                edata = eword & IXGBE_DATA_MASK_NL;
                switch (control) {
                case IXGBE_DELAY_NL:
                        data_offset++;
                        hw_dbg(hw, "DELAY: %d MS\n", edata);
-                       usleep_range(edata * 1000, edata * 2000);
+                       msleep(edata);
                        break;
                case IXGBE_DATA_NL:
                        hw_dbg(hw, "DATA:\n");
                        data_offset++;
                        hw->eeprom.ops.read(hw, data_offset++,
-                                           &phy_offset);
+                                           &phy_offset);
                        for (i = 0; i < edata; i++) {
                                hw->eeprom.ops.read(hw, data_offset, &eword);
                                hw->phy.ops.write_reg(hw, phy_offset,
-                                                     MDIO_MMD_PMAPMD, eword);
+                                                     IXGBE_TWINAX_DEV, eword);
                                hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
-                                      phy_offset);
+                                         phy_offset);
                                data_offset++;
                                phy_offset++;
                        }
@@ -826,6 +871,31 @@ out:
        return ret_val;
 }
 
+/**
+ *  ixgbe_identify_module_generic - Identifies module type
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines HW type and calls appropriate function.
+ **/
+s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
+{
+       s32 status = IXGBE_ERR_SFP_NOT_PRESENT;
+
+       switch (hw->mac.ops.get_media_type(hw)) {
+       case ixgbe_media_type_fiber:
+               status = ixgbe_identify_sfp_module_generic(hw);
+               break;
+
+
+       default:
+               hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+               status = IXGBE_ERR_SFP_NOT_PRESENT;
+               break;
+       }
+
+       return status;
+}
+
 /**
  *  ixgbe_identify_sfp_module_generic - Identifies SFP modules
  *  @hw: pointer to hardware structure
@@ -834,7 +904,6 @@ out:
  **/
 s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
 {
-       struct ixgbe_adapter *adapter = hw->back;
        s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
        u32 vendor_oui = 0;
        enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
@@ -854,7 +923,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
 
        status = hw->phy.ops.read_i2c_eeprom(hw,
                                             IXGBE_SFF_IDENTIFIER,
-                                            &identifier);
+                                            &identifier);
 
        if (status == IXGBE_ERR_SWFW_SYNC ||
            status == IXGBE_ERR_I2C ||
@@ -907,6 +976,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                  * 8   SFP_act_lmt_DA_CORE1 - 82599-specific
                  * 9   SFP_1g_cu_CORE0 - 82599-specific
                  * 10  SFP_1g_cu_CORE1 - 82599-specific
+                 * 11  SFP_1g_sx_CORE0 - 82599-specific
+                 * 12  SFP_1g_sx_CORE1 - 82599-specific
                  */
                if (hw->mac.type == ixgbe_mac_82598EB) {
                        if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
@@ -921,10 +992,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                        if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
                                if (hw->bus.lan_id == 0)
                                        hw->phy.sfp_type =
-                                                    ixgbe_sfp_type_da_cu_core0;
+                                                    ixgbe_sfp_type_da_cu_core0;
                                else
                                        hw->phy.sfp_type =
-                                                    ixgbe_sfp_type_da_cu_core1;
+                                                    ixgbe_sfp_type_da_cu_core1;
                        } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
                                hw->phy.ops.read_i2c_eeprom(
                                                hw, IXGBE_SFF_CABLE_SPEC_COMP,
@@ -946,10 +1017,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                                    IXGBE_SFF_10GBASELR_CAPABLE)) {
                                if (hw->bus.lan_id == 0)
                                        hw->phy.sfp_type =
-                                                     ixgbe_sfp_type_srlr_core0;
+                                                     ixgbe_sfp_type_srlr_core0;
                                else
                                        hw->phy.sfp_type =
-                                                     ixgbe_sfp_type_srlr_core1;
+                                                     ixgbe_sfp_type_srlr_core1;
                        } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
                                if (hw->bus.lan_id == 0)
                                        hw->phy.sfp_type =
@@ -957,6 +1028,13 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                                else
                                        hw->phy.sfp_type =
                                                ixgbe_sfp_type_1g_cu_core1;
+                       } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
+                               if (hw->bus.lan_id == 0)
+                                       hw->phy.sfp_type =
+                                               ixgbe_sfp_type_1g_sx_core0;
+                               else
+                                       hw->phy.sfp_type =
+                                               ixgbe_sfp_type_1g_sx_core1;
                        } else {
                                hw->phy.sfp_type = ixgbe_sfp_type_unknown;
                        }
@@ -977,8 +1055,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                if (hw->phy.type != ixgbe_phy_nl) {
                        hw->phy.id = identifier;
                        status = hw->phy.ops.read_i2c_eeprom(hw,
-                                                   IXGBE_SFF_VENDOR_OUI_BYTE0,
-                                                   &oui_bytes[0]);
+                                                   IXGBE_SFF_VENDOR_OUI_BYTE0,
+                                                   &oui_bytes[0]);
 
                        if (status == IXGBE_ERR_SWFW_SYNC ||
                            status == IXGBE_ERR_I2C ||
@@ -986,8 +1064,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                                goto err_read_i2c_eeprom;
 
                        status = hw->phy.ops.read_i2c_eeprom(hw,
-                                                   IXGBE_SFF_VENDOR_OUI_BYTE1,
-                                                   &oui_bytes[1]);
+                                                   IXGBE_SFF_VENDOR_OUI_BYTE1,
+                                                   &oui_bytes[1]);
 
                        if (status == IXGBE_ERR_SWFW_SYNC ||
                            status == IXGBE_ERR_I2C ||
@@ -995,8 +1073,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                                goto err_read_i2c_eeprom;
 
                        status = hw->phy.ops.read_i2c_eeprom(hw,
-                                                   IXGBE_SFF_VENDOR_OUI_BYTE2,
-                                                   &oui_bytes[2]);
+                                                   IXGBE_SFF_VENDOR_OUI_BYTE2,
+                                                   &oui_bytes[2]);
 
                        if (status == IXGBE_ERR_SWFW_SYNC ||
                            status == IXGBE_ERR_I2C ||
@@ -1049,7 +1127,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                /* Verify supported 1G SFP modules */
                if (comp_codes_10g == 0 &&
                    !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
-                     hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) {
+                     hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+                     hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0  ||
+                     hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
                        hw->phy.type = ixgbe_phy_sfp_unsupported;
                        status = IXGBE_ERR_SFP_NOT_SUPPORTED;
                        goto out;
@@ -1061,20 +1141,30 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                        goto out;
                }
 
-               hw->mac.ops.get_device_caps(hw, &enforce_sfp);
+               ixgbe_get_device_caps(hw, &enforce_sfp);
                if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
                    !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
-                     (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) {
+                     (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) ||
+                     (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0)  ||
+                     (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1))) {
                        /* Make sure we're a supported PHY type */
                        if (hw->phy.type == ixgbe_phy_sfp_intel) {
                                status = 0;
                        } else {
-                               if (hw->allow_unsupported_sfp) {
-                                       e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics.  Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter.  Intel Corporation is not responsible for any harm caused by using untested modules.");
+                               if (hw->allow_unsupported_sfp == true) {
+                                       EWARN(hw, "WARNING: Intel (R) Network "
+                                             "Connections are quality tested "
+                                             "using Intel (R) Ethernet Optics."
+                                             " Using untested modules is not "
+                                             "supported and may cause unstable"
+                                             " operation or damage to the "
+                                             "module or the adapter. Intel "
+                                             "Corporation is not responsible "
+                                             "for any harm caused by using "
+                                             "untested modules.\n", status);
                                        status = 0;
                                } else {
-                                       hw_dbg(hw,
-                                              "SFP+ module not supported\n");
+                                       hw_dbg(hw, "SFP+ module not supported\n");
                                        hw->phy.type =
                                                ixgbe_phy_sfp_unsupported;
                                        status = IXGBE_ERR_SFP_NOT_SUPPORTED;
@@ -1097,6 +1187,8 @@ err_read_i2c_eeprom:
        return IXGBE_ERR_SFP_NOT_PRESENT;
 }
 
+
+
 /**
  *  ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
  *  @hw: pointer to hardware structure
@@ -1107,8 +1199,8 @@ err_read_i2c_eeprom:
  *  so it returns the offsets to the phy init sequence block.
  **/
 s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
-                                        u16 *list_offset,
-                                        u16 *data_offset)
+                                       u16 *list_offset,
+                                       u16 *data_offset)
 {
        u16 sfp_id;
        u16 sfp_type = hw->phy.sfp_type;
@@ -1128,10 +1220,12 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
         * SR modules
         */
        if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
-           sfp_type == ixgbe_sfp_type_1g_cu_core0)
+           sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+           sfp_type == ixgbe_sfp_type_1g_sx_core0)
                sfp_type = ixgbe_sfp_type_srlr_core0;
        else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
-                sfp_type == ixgbe_sfp_type_1g_cu_core1)
+                sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+                sfp_type == ixgbe_sfp_type_1g_sx_core1)
                sfp_type = ixgbe_sfp_type_srlr_core1;
 
        /* Read offset to PHY init contents */
@@ -1183,11 +1277,11 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
  *  Performs byte read operation to SFP module's EEPROM over I2C interface.
  **/
 s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
-                                  u8 *eeprom_data)
+                                 u8 *eeprom_data)
 {
        return hw->phy.ops.read_i2c_byte(hw, byte_offset,
-                                        IXGBE_I2C_EEPROM_DEV_ADDR,
-                                        eeprom_data);
+                                        IXGBE_I2C_EEPROM_DEV_ADDR,
+                                        eeprom_data);
 }
 
 /**
@@ -1199,11 +1293,11 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
  *  Performs byte write operation to SFP module's EEPROM over I2C interface.
  **/
 s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
-                                   u8 eeprom_data)
+                                  u8 eeprom_data)
 {
        return hw->phy.ops.write_i2c_byte(hw, byte_offset,
-                                         IXGBE_I2C_EEPROM_DEV_ADDR,
-                                         eeprom_data);
+                                         IXGBE_I2C_EEPROM_DEV_ADDR,
+                                         eeprom_data);
 }
 
 /**
@@ -1216,7 +1310,7 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
  *  a specified device address.
  **/
 s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
-                                u8 dev_addr, u8 *data)
+                               u8 dev_addr, u8 *data)
 {
        s32 status = 0;
        u32 max_retry = 10;
@@ -1231,7 +1325,8 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
                swfw_mask = IXGBE_GSSR_PHY0_SM;
 
        do {
-               if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) {
+               if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
+                   != 0) {
                        status = IXGBE_ERR_SWFW_SYNC;
                        goto read_byte_out;
                }
@@ -1305,7 +1400,7 @@ read_byte_out:
  *  a specified device address.
  **/
 s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
-                                 u8 dev_addr, u8 data)
+                                u8 dev_addr, u8 data)
 {
        s32 status = 0;
        u32 max_retry = 1;
@@ -1582,13 +1677,22 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
  **/
 static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
 {
-       *i2cctl |= IXGBE_I2C_CLK_OUT;
+       u32 i = 0;
+       u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
+       u32 i2cctl_r = 0;
 
-       IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
-       IXGBE_WRITE_FLUSH(hw);
+       for (i = 0; i < timeout; i++) {
+               *i2cctl |= IXGBE_I2C_CLK_OUT;
 
-       /* SCL rise time (1000ns) */
-       udelay(IXGBE_I2C_T_RISE);
+               IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+               IXGBE_WRITE_FLUSH(hw);
+               /* SCL rise time (1000ns) */
+               udelay(IXGBE_I2C_T_RISE);
+
+               i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+               if (i2cctl_r & IXGBE_I2C_CLK_IN)
+                       break;
+       }
 }
 
 /**
@@ -1669,7 +1773,7 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl)
  *  Clears the I2C bus by sending nine clock pulses.
  *  Used when data line is stuck low.
  **/
-static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
+void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
 {
        u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
        u32 i;
@@ -1712,7 +1816,7 @@ s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
 
        /* Check that the LASI temp alarm status was triggered */
        hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
-                            MDIO_MMD_PMAPMD, &phy_data);
+                            IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_data);
 
        if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
                goto out;
index cc18165b4c05b55ce1bd01c51ca8b81f36aa4ca9..4caf281df1242a5badbf5f8f3235bc478ffa319e 100644 (file)
 #define IXGBE_I2C_EEPROM_DEV_ADDR    0xA0
 
 /* EEPROM byte offsets */
-#define IXGBE_SFF_IDENTIFIER         0x0
-#define IXGBE_SFF_IDENTIFIER_SFP     0x3
-#define IXGBE_SFF_VENDOR_OUI_BYTE0   0x25
-#define IXGBE_SFF_VENDOR_OUI_BYTE1   0x26
-#define IXGBE_SFF_VENDOR_OUI_BYTE2   0x27
-#define IXGBE_SFF_1GBE_COMP_CODES    0x6
-#define IXGBE_SFF_10GBE_COMP_CODES   0x3
-#define IXGBE_SFF_CABLE_TECHNOLOGY   0x8
-#define IXGBE_SFF_CABLE_SPEC_COMP    0x3C
+#define IXGBE_SFF_IDENTIFIER           0x0
+#define IXGBE_SFF_IDENTIFIER_SFP       0x3
+#define IXGBE_SFF_VENDOR_OUI_BYTE0     0x25
+#define IXGBE_SFF_VENDOR_OUI_BYTE1     0x26
+#define IXGBE_SFF_VENDOR_OUI_BYTE2     0x27
+#define IXGBE_SFF_1GBE_COMP_CODES      0x6
+#define IXGBE_SFF_10GBE_COMP_CODES     0x3
+#define IXGBE_SFF_CABLE_TECHNOLOGY     0x8
+#define IXGBE_SFF_CABLE_SPEC_COMP      0x3C
 
 /* Bitmasks */
-#define IXGBE_SFF_DA_PASSIVE_CABLE           0x4
-#define IXGBE_SFF_DA_ACTIVE_CABLE            0x8
-#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING    0x4
-#define IXGBE_SFF_1GBASESX_CAPABLE           0x1
-#define IXGBE_SFF_1GBASELX_CAPABLE           0x2
-#define IXGBE_SFF_1GBASET_CAPABLE            0x8
-#define IXGBE_SFF_10GBASESR_CAPABLE          0x10
-#define IXGBE_SFF_10GBASELR_CAPABLE          0x20
-#define IXGBE_I2C_EEPROM_READ_MASK           0x100
-#define IXGBE_I2C_EEPROM_STATUS_MASK         0x3
-#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
-#define IXGBE_I2C_EEPROM_STATUS_PASS         0x1
-#define IXGBE_I2C_EEPROM_STATUS_FAIL         0x2
-#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS  0x3
+#define IXGBE_SFF_DA_PASSIVE_CABLE     0x4
+#define IXGBE_SFF_DA_ACTIVE_CABLE      0x8
+#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING      0x4
+#define IXGBE_SFF_1GBASESX_CAPABLE     0x1
+#define IXGBE_SFF_1GBASELX_CAPABLE     0x2
+#define IXGBE_SFF_1GBASET_CAPABLE      0x8
+#define IXGBE_SFF_10GBASESR_CAPABLE    0x10
+#define IXGBE_SFF_10GBASELR_CAPABLE    0x20
+#define IXGBE_I2C_EEPROM_READ_MASK     0x100
+#define IXGBE_I2C_EEPROM_STATUS_MASK   0x3
+#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION   0x0
+#define IXGBE_I2C_EEPROM_STATUS_PASS   0x1
+#define IXGBE_I2C_EEPROM_STATUS_FAIL   0x2
+#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS    0x3
 
 /* Flow control defines */
-#define IXGBE_TAF_SYM_PAUSE                  0x400
-#define IXGBE_TAF_ASM_PAUSE                  0x800
+#define IXGBE_TAF_SYM_PAUSE            0x400
+#define IXGBE_TAF_ASM_PAUSE            0x800
 
 /* Bit-shift macros */
-#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT    24
-#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT    16
-#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT    8
+#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT       24
+#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT       16
+#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT       8
 
 /* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
-#define IXGBE_SFF_VENDOR_OUI_TYCO     0x00407600
-#define IXGBE_SFF_VENDOR_OUI_FTL      0x00906500
-#define IXGBE_SFF_VENDOR_OUI_AVAGO    0x00176A00
-#define IXGBE_SFF_VENDOR_OUI_INTEL    0x001B2100
+#define IXGBE_SFF_VENDOR_OUI_TYCO      0x00407600
+#define IXGBE_SFF_VENDOR_OUI_FTL       0x00906500
+#define IXGBE_SFF_VENDOR_OUI_AVAGO     0x00176A00
+#define IXGBE_SFF_VENDOR_OUI_INTEL     0x001B2100
 
 /* I2C SDA and SCL timing parameters for standard mode */
-#define IXGBE_I2C_T_HD_STA  4
-#define IXGBE_I2C_T_LOW     5
-#define IXGBE_I2C_T_HIGH    4
-#define IXGBE_I2C_T_SU_STA  5
-#define IXGBE_I2C_T_HD_DATA 5
-#define IXGBE_I2C_T_SU_DATA 1
-#define IXGBE_I2C_T_RISE    1
-#define IXGBE_I2C_T_FALL    1
-#define IXGBE_I2C_T_SU_STO  4
-#define IXGBE_I2C_T_BUF     5
-
-#define IXGBE_TN_LASI_STATUS_REG        0x9005
-#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
+#define IXGBE_I2C_T_HD_STA     4
+#define IXGBE_I2C_T_LOW                5
+#define IXGBE_I2C_T_HIGH       4
+#define IXGBE_I2C_T_SU_STA     5
+#define IXGBE_I2C_T_HD_DATA    5
+#define IXGBE_I2C_T_SU_DATA    1
+#define IXGBE_I2C_T_RISE       1
+#define IXGBE_I2C_T_FALL       1
+#define IXGBE_I2C_T_SU_STO     4
+#define IXGBE_I2C_T_BUF                5
+
+#define IXGBE_TN_LASI_STATUS_REG       0x9005
+#define IXGBE_TN_LASI_STATUS_TEMP_ALARM        0x0008
 
 s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
+bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
+enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
+s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
 s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
 s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
 s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
-                               u32 device_type, u16 *phy_data);
+                              u32 device_type, u16 *phy_data);
 s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
-                                u32 device_type, u16 phy_data);
+                               u32 device_type, u16 phy_data);
 s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
 s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
-                                       ixgbe_link_speed speed,
-                                       bool autoneg,
-                                       bool autoneg_wait_to_complete);
+                                      ixgbe_link_speed speed,
+                                      bool autoneg,
+                                      bool autoneg_wait_to_complete);
 s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
-                                               ixgbe_link_speed *speed,
-                                               bool *autoneg);
+                                              ixgbe_link_speed *speed,
+                                              bool *autoneg);
 
 /* PHY specific */
 s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
-                             ixgbe_link_speed *speed,
-                             bool *link_up);
+                            ixgbe_link_speed *speed,
+                            bool *link_up);
 s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
 s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
-                                       u16 *firmware_version);
+                                      u16 *firmware_version);
 s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
-                                           u16 *firmware_version);
+                                          u16 *firmware_version);
 
 s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
+s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
 s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
 s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
-                                        u16 *list_offset,
-                                        u16 *data_offset);
+                                       u16 *list_offset,
+                                       u16 *data_offset);
 s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
 s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
-                                u8 dev_addr, u8 *data);
+                               u8 dev_addr, u8 *data);
 s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
-                                 u8 dev_addr, u8 data);
+                                u8 dev_addr, u8 data);
 s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
-                                  u8 *eeprom_data);
+                                 u8 *eeprom_data);
 s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
-                                   u8 eeprom_data);
+                                  u8 eeprom_data);
+void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
 #endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_procfs.c b/drivers/net/ixgbe/ixgbe_procfs.c
new file mode 100644 (file)
index 0000000..339a2d4
--- /dev/null
@@ -0,0 +1,912 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2012 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "ixgbe.h"
+#include "ixgbe_common.h"
+#include "ixgbe_type.h"
+
+#ifdef IXGBE_PROCFS
+#ifndef IXGBE_SYSFS
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+
+static struct proc_dir_entry *ixgbe_top_dir = NULL;
+
+static struct net_device_stats *procfs_get_stats(struct net_device *netdev)
+{
+#ifndef HAVE_NETDEV_STATS_IN_NETDEV
+       struct ixgbe_adapter *adapter;
+#endif
+       if (netdev == NULL)
+               return NULL;
+
+#ifdef HAVE_NETDEV_STATS_IN_NETDEV
+       /* only return the current stats */
+       return &netdev->stats;
+#else
+       adapter = netdev_priv(netdev);
+
+       /* only return the current stats */
+       return &adapter->net_stats;
+#endif /* HAVE_NETDEV_STATS_IN_NETDEV */
+}
+
+bool ixgbe_thermal_present(struct ixgbe_adapter *adapter)
+{
+       s32 status;
+       if (adapter == NULL)
+               return false;
+       status = ixgbe_init_thermal_sensor_thresh_generic(&(adapter->hw));
+       if (status != 0)
+               return false;
+
+       return true;
+}
+
+static int ixgbe_fwbanner(char *page, char **start, off_t off, int count,
+                        int *eof, void *data)
+{
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+
+       return snprintf(page, count, "%s\n", adapter->eeprom_id);
+}
+
+static int ixgbe_porttype(char *page, char **start, off_t off,
+                         int count, int *eof, void *data)
+{
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+       return snprintf(page, count, "%d\n",
+                       test_bit(__IXGBE_DOWN, &adapter->state));
+}
+
+static int ixgbe_portspeed(char *page, char **start, off_t off,
+                          int count, int *eof, void *data)
+{
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       int speed = 0;
+
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+
+       switch (adapter->link_speed) {
+       case IXGBE_LINK_SPEED_100_FULL:
+               speed = 1;
+               break;
+       case IXGBE_LINK_SPEED_1GB_FULL:
+               speed = 10;
+               break;
+       case IXGBE_LINK_SPEED_10GB_FULL:
+               speed = 100;
+               break;
+       }
+       return snprintf(page, count, "%d\n", speed);
+}
+
+static int ixgbe_wqlflag(char *page, char **start, off_t off,
+                        int count, int *eof, void *data)
+{
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+
+       return snprintf(page, count, "%d\n", adapter->wol);
+}
+
+static int ixgbe_xflowctl(char *page, char **start, off_t off,
+                         int count, int *eof, void *data)
+{
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       struct ixgbe_hw *hw;
+
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+
+       hw = &adapter->hw;
+       if (hw == NULL)
+               return snprintf(page, count, "error: no hw data\n");
+
+       return snprintf(page, count, "%d\n", hw->fc.current_mode);
+}
+
+static int ixgbe_rxdrops(char *page, char **start, off_t off,
+                        int count, int *eof, void *data)
+{
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       struct net_device_stats *net_stats;
+
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+       net_stats  = procfs_get_stats(adapter->netdev);
+       if (net_stats == NULL)
+               return snprintf(page, count, "error: no net stats\n");
+
+       return snprintf(page, count, "%lu\n",
+                       net_stats->rx_dropped);
+}
+
+static int ixgbe_rxerrors(char *page, char **start, off_t off,
+                         int count, int *eof, void *data)
+{
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       struct net_device_stats *net_stats;
+
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+       net_stats  = procfs_get_stats(adapter->netdev);
+       if (net_stats == NULL)
+               return snprintf(page, count, "error: no net stats\n");
+
+       return snprintf(page, count, "%lu\n", net_stats->rx_errors);
+}
+
+static int ixgbe_rxupacks(char *page, char **start, off_t off,
+                         int count, int *eof, void *data)
+{
+       struct ixgbe_hw *hw;
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+
+       hw = &adapter->hw;
+       if (hw == NULL)
+               return snprintf(page, count, "error: no hw data\n");
+
+       return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_TPR));
+}
+
+static int ixgbe_rxmpacks(char *page, char **start, off_t off,
+                         int count, int *eof, void *data)
+{
+       struct ixgbe_hw *hw;
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+
+       hw = &adapter->hw;
+       if (hw == NULL)
+               return snprintf(page, count, "error: no hw data\n");
+
+       return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_MPRC));
+}
+
+static int ixgbe_rxbpacks(char *page, char **start, off_t off,
+                         int count, int *eof, void *data)
+{
+       struct ixgbe_hw *hw;
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+
+       hw = &adapter->hw;
+       if (hw == NULL)
+               return snprintf(page, count, "error: no hw data\n");
+
+       return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_BPRC));
+}
+
+static int ixgbe_txupacks(char *page, char **start, off_t off,
+                         int count, int *eof, void *data)
+{
+       struct ixgbe_hw *hw;
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+
+       hw = &adapter->hw;
+       if (hw == NULL)
+               return snprintf(page, count, "error: no hw data\n");
+
+       return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_TPT));
+}
+
+static int ixgbe_txmpacks(char *page, char **start, off_t off,
+                         int count, int *eof, void *data)
+{
+       struct ixgbe_hw *hw;
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+
+       hw = &adapter->hw;
+       if (hw == NULL)
+               return snprintf(page, count, "error: no hw data\n");
+
+       return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_MPTC));
+}
+
+static int ixgbe_txbpacks(char *page, char **start, off_t off,
+                         int count, int *eof, void *data)
+{
+       struct ixgbe_hw *hw;
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+
+       hw = &adapter->hw;
+       if (hw == NULL)
+               return snprintf(page, count, "error: no hw data\n");
+
+       return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_BPTC));
+}
+
+static int ixgbe_txerrors(char *page, char **start, off_t off,
+                         int count, int *eof, void *data)
+{
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       struct net_device_stats *net_stats;
+
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+       net_stats  = procfs_get_stats(adapter->netdev);
+       if (net_stats == NULL)
+               return snprintf(page, count, "error: no net stats\n");
+
+       return snprintf(page, count, "%lu\n",
+                       net_stats->tx_errors);
+}
+
+static int ixgbe_txdrops(char *page, char **start, off_t off,
+                        int count, int *eof, void *data)
+{
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       struct net_device_stats *net_stats;
+
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+       net_stats  = procfs_get_stats(adapter->netdev);
+       if (net_stats == NULL)
+               return snprintf(page, count, "error: no net stats\n");
+
+       return snprintf(page, count, "%lu\n",
+                       net_stats->tx_dropped);
+}
+
+static int ixgbe_rxframes(char *page, char **start, off_t off,
+                         int count, int *eof, void *data)
+{
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       struct net_device_stats *net_stats;
+
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+       net_stats  = procfs_get_stats(adapter->netdev);
+       if (net_stats == NULL)
+               return snprintf(page, count, "error: no net stats\n");
+
+       return snprintf(page, count, "%lu\n",
+                       net_stats->rx_packets);
+}
+
+static int ixgbe_rxbytes(char *page, char **start, off_t off,
+                        int count, int *eof, void *data)
+{
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       struct net_device_stats *net_stats;
+
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+       net_stats  = procfs_get_stats(adapter->netdev);
+       if (net_stats == NULL)
+               return snprintf(page, count, "error: no net stats\n");
+
+       return snprintf(page, count, "%lu\n",
+                       net_stats->rx_bytes);
+}
+
+static int ixgbe_txframes(char *page, char **start, off_t off,
+                         int count, int *eof, void *data)
+{
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       struct net_device_stats *net_stats;
+
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+       net_stats  = procfs_get_stats(adapter->netdev);
+       if (net_stats == NULL)
+               return snprintf(page, count, "error: no net stats\n");
+
+       return snprintf(page, count, "%lu\n",
+                       net_stats->tx_packets);
+}
+
+static int ixgbe_txbytes(char *page, char **start, off_t off,
+                        int count, int *eof, void *data)
+{
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       struct net_device_stats *net_stats;
+
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+       net_stats  = procfs_get_stats(adapter->netdev);
+       if (net_stats == NULL)
+               return snprintf(page, count, "error: no net stats\n");
+
+       return snprintf(page, count, "%lu\n",
+                       net_stats->tx_bytes);
+}
+
+static int ixgbe_linkstat(char *page, char **start, off_t off,
+                         int count, int *eof, void *data)
+{
+       u32 link_speed;
+       bool link_up = false;
+       int bitmask = 0;
+       struct ixgbe_hw *hw;
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+
+       hw = &adapter->hw;
+       if (hw == NULL)
+               return snprintf(page, count, "error: no hw data\n");
+
+
+       if (test_bit(__IXGBE_DOWN, &adapter->state))
+               bitmask |= 1;
+
+       if (hw->mac.ops.check_link)
+               hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+       else
+               /* always assume link is up, if no check link function */
+               link_up = true;
+       if (link_up)
+               bitmask |= 2;
+       return snprintf(page, count, "0x%X\n", bitmask);
+}
+
+static int ixgbe_funcid(char *page, char **start, off_t off,
+                       int count, int *eof, void *data)
+{
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       struct ixgbe_hw *hw;
+
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+
+       hw = &adapter->hw;
+       if (hw == NULL)
+               return snprintf(page, count, "error: no hw data\n");
+
+       return snprintf(page, count, "0x%X\n", hw->bus.func);
+}
+
+static int ixgbe_funcvers(char *page, char **start, off_t off,
+                         int count, int *eof, void *data)
+{
+       return snprintf(page, count, "%s\n", ixgbe_driver_version);
+}
+
+static int ixgbe_macburn(char *page, char **start, off_t off,
+                        int count, int *eof, void *data)
+{
+       struct ixgbe_hw *hw;
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+
+       hw = &adapter->hw;
+       if (hw == NULL)
+               return snprintf(page, count, "error: no hw data\n");
+
+       return snprintf(page, count, "0x%X%X%X%X%X%X\n",
+                      (unsigned int)hw->mac.perm_addr[0],
+                      (unsigned int)hw->mac.perm_addr[1],
+                      (unsigned int)hw->mac.perm_addr[2],
+                      (unsigned int)hw->mac.perm_addr[3],
+                      (unsigned int)hw->mac.perm_addr[4],
+                      (unsigned int)hw->mac.perm_addr[5]);
+}
+
+static int ixgbe_macadmn(char *page, char **start, off_t off,
+                        int count, int *eof, void *data)
+{
+       struct ixgbe_hw *hw;
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+
+       hw = &adapter->hw;
+       if (hw == NULL)
+               return snprintf(page, count, "error: no hw data\n");
+
+       return snprintf(page, count, "0x%X%X%X%X%X%X\n",
+                      (unsigned int)hw->mac.addr[0],
+                      (unsigned int)hw->mac.addr[1],
+                      (unsigned int)hw->mac.addr[2],
+                      (unsigned int)hw->mac.addr[3],
+                      (unsigned int)hw->mac.addr[4],
+                      (unsigned int)hw->mac.addr[5]);
+}
+
+static int ixgbe_maclla1(char *page, char **start, off_t off,
+                        int count, int *eof, void *data)
+{
+       struct ixgbe_hw *hw;
+       u16 eeprom_buff[6];
+       int first_word = 0x37;
+       int word_count = 6;
+       int rc;
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+
+       hw = &adapter->hw;
+       if (hw == NULL)
+               return snprintf(page, count, "error: no hw data\n");
+
+       rc = ixgbe_read_eeprom_buffer(hw, first_word, word_count,
+                                          eeprom_buff);
+       if (rc != 0)
+               return snprintf(page, count, "error: reading buffer\n");
+
+       switch (hw->bus.func) {
+       case 0:
+               return snprintf(page, count, "0x%04X%04X%04X\n",
+                               eeprom_buff[0],
+                               eeprom_buff[1],
+                               eeprom_buff[2]);
+       case 1:
+               return snprintf(page, count, "0x%04X%04X%04X\n",
+                               eeprom_buff[3],
+                               eeprom_buff[4],
+                               eeprom_buff[5]);
+       }
+       return snprintf(page, count, "unexpected port %d\n", hw->bus.func);
+}
+
+static int ixgbe_mtusize(char *page, char **start, off_t off,
+                        int count, int *eof, void *data)
+{
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       struct net_device *netdev;
+
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+       netdev = adapter->netdev;
+       if (netdev == NULL)
+               return snprintf(page, count, "error: no net device\n");
+
+       return snprintf(page, count, "%d\n", netdev->mtu);
+}
+
+static int ixgbe_featflag(char *page, char **start, off_t off,
+                         int count, int *eof, void *data)
+{
+       int bitmask = 0;
+#ifndef HAVE_NDO_SET_FEATURES
+       struct ixgbe_ring *ring;
+#endif
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       struct net_device *netdev;
+
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+       netdev = adapter->netdev;
+       if (netdev == NULL)
+               return snprintf(page, count, "error: no net device\n");
+
+#ifndef HAVE_NDO_SET_FEATURES
+       /* ixgbe_get_rx_csum(netdev) doesn't compile so hard code */
+       ring = adapter->rx_ring[0];
+       bitmask = test_bit(__IXGBE_RX_CSUM_ENABLED, &ring->state);
+       return snprintf(page, count, "%d\n", bitmask);
+#else
+       if (adapter->netdev->features & NETIF_F_RXCSUM)
+               bitmask |= 1;
+       return snprintf(page, count, "%d\n", bitmask);
+#endif
+}
+
+static int ixgbe_lsominct(char *page, char **start, off_t off,
+                         int count, int *eof, void *data)
+{
+       return snprintf(page, count, "%d\n", 1);
+}
+
+static int ixgbe_prommode(char *page, char **start, off_t off,
+                         int count, int *eof, void *data)
+{
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       struct net_device *netdev;
+
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+       netdev = adapter->netdev;
+       if (netdev == NULL)
+               return snprintf(page, count, "error: no net device\n");
+
+       return snprintf(page, count, "%d\n",
+                       netdev->flags & IFF_PROMISC);
+}
+
+static int ixgbe_txdscqsz(char *page, char **start, off_t off,
+                         int count, int *eof, void *data)
+{
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+
+       return snprintf(page, count, "%d\n", adapter->tx_ring[0]->count);
+}
+
+static int ixgbe_rxdscqsz(char *page, char **start, off_t off,
+                         int count, int *eof, void *data)
+{
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+
+       return snprintf(page, count, "%d\n", adapter->rx_ring[0]->count);
+}
+
+static int ixgbe_rxqavg(char *page, char **start, off_t off,
+                       int count, int *eof, void *data)
+{
+       int index;
+       int diff = 0;
+       u16 ntc;
+       u16 ntu;
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+
+       for (index = 0; index < adapter->num_rx_queues; index++) {
+               ntc = adapter->rx_ring[index]->next_to_clean;
+               ntu = adapter->rx_ring[index]->next_to_use;
+
+               if (ntc >= ntu)
+                       diff += (ntc - ntu);
+               else
+                       diff += (adapter->rx_ring[index]->count - ntu + ntc);
+       }
+       if (adapter->num_rx_queues <= 0)
+               return snprintf(page, count,
+                               "can't calculate, number of queues %d\n",
+                               adapter->num_rx_queues);
+       return snprintf(page, count, "%d\n", diff/adapter->num_rx_queues);
+}
+
+static int ixgbe_txqavg(char *page, char **start, off_t off,
+                       int count, int *eof, void *data)
+{
+       int index;
+       int diff = 0;
+       u16 ntc;
+       u16 ntu;
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+
+       for (index = 0; index < adapter->num_tx_queues; index++) {
+               ntc = adapter->tx_ring[index]->next_to_clean;
+               ntu = adapter->tx_ring[index]->next_to_use;
+
+               if (ntc >= ntu)
+                       diff += (ntc - ntu);
+               else
+                       diff += (adapter->tx_ring[index]->count - ntu + ntc);
+       }
+       if (adapter->num_tx_queues <= 0)
+               return snprintf(page, count,
+                               "can't calculate, number of queues %d\n",
+                               adapter->num_tx_queues);
+       return snprintf(page, count, "%d\n",
+                       diff/adapter->num_tx_queues);
+}
+
+static int ixgbe_iovotype(char *page, char **start, off_t off,
+                         int count, int *eof, void *data)
+{
+       return snprintf(page, count, "2\n");
+}
+
+static int ixgbe_funcnbr(char *page, char **start, off_t off,
+                        int count, int *eof, void *data)
+{
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+
+       return snprintf(page, count, "%d\n", adapter->num_vfs);
+}
+
+static int ixgbe_pciebnbr(char *page, char **start, off_t off,
+                        int count, int *eof, void *data)
+{
+       struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+       if (adapter == NULL)
+               return snprintf(page, count, "error: no adapter\n");
+
+       return snprintf(page, count, "%d\n", adapter->pdev->bus->number);
+}
+
+static int ixgbe_therm_location(char *page, char **start, off_t off,
+                                int count, int *eof, void *data)
+{
+       struct ixgbe_therm_proc_data *therm_data =
+               (struct ixgbe_therm_proc_data *)data;
+
+       if (therm_data == NULL)
+               return snprintf(page, count, "error: no therm_data\n");
+
+       return snprintf(page, count, "%d\n", therm_data->sensor_data->location);
+}
+
+
+static int ixgbe_therm_maxopthresh(char *page, char **start, off_t off,
+                                   int count, int *eof, void *data)
+{
+       struct ixgbe_therm_proc_data *therm_data =
+               (struct ixgbe_therm_proc_data *)data;
+
+       if (therm_data == NULL)
+               return snprintf(page, count, "error: no therm_data\n");
+
+       return snprintf(page, count, "%d\n",
+                       therm_data->sensor_data->max_op_thresh);
+}
+
+
+static int ixgbe_therm_cautionthresh(char *page, char **start, off_t off,
+                                     int count, int *eof, void *data)
+{
+       struct ixgbe_therm_proc_data *therm_data =
+               (struct ixgbe_therm_proc_data *)data;
+
+       if (therm_data == NULL)
+               return snprintf(page, count, "error: no therm_data\n");
+
+       return snprintf(page, count, "%d\n",
+                       therm_data->sensor_data->caution_thresh);
+}
+
+static int ixgbe_therm_temp(char *page, char **start, off_t off,
+                            int count, int *eof, void *data)
+{
+       s32 status;
+       struct ixgbe_therm_proc_data *therm_data =
+               (struct ixgbe_therm_proc_data *)data;
+
+       if (therm_data == NULL)
+               return snprintf(page, count, "error: no therm_data\n");
+
+       status = ixgbe_get_thermal_sensor_data_generic(therm_data->hw);
+       if (status != 0)
+               snprintf(page, count, "error: status %d returned\n", status);
+
+       return snprintf(page, count, "%d\n", therm_data->sensor_data->temp);
+}
+
+
+struct ixgbe_proc_type {
+       char name[32];
+       int (*read)(char*, char**, off_t, int, int*, void*);
+};
+
+struct ixgbe_proc_type ixgbe_proc_entries[] = {
+       {"fwbanner", &ixgbe_fwbanner},
+       {"porttype", &ixgbe_porttype},
+       {"portspeed", &ixgbe_portspeed},
+       {"wqlflag", &ixgbe_wqlflag},
+       {"xflowctl", &ixgbe_xflowctl},
+       {"rxdrops", &ixgbe_rxdrops},
+       {"rxerrors", &ixgbe_rxerrors},
+       {"rxupacks", &ixgbe_rxupacks},
+       {"rxmpacks", &ixgbe_rxmpacks},
+       {"rxbpacks", &ixgbe_rxbpacks},
+       {"txdrops", &ixgbe_txdrops},
+       {"txerrors", &ixgbe_txerrors},
+       {"txupacks", &ixgbe_txupacks},
+       {"txmpacks", &ixgbe_txmpacks},
+       {"txbpacks", &ixgbe_txbpacks},
+       {"rxframes", &ixgbe_rxframes},
+       {"rxbytes", &ixgbe_rxbytes},
+       {"txframes", &ixgbe_txframes},
+       {"txbytes", &ixgbe_txbytes},
+       {"linkstat", &ixgbe_linkstat},
+       {"funcid", &ixgbe_funcid},
+       {"funcvers", &ixgbe_funcvers},
+       {"macburn", &ixgbe_macburn},
+       {"macadmn", &ixgbe_macadmn},
+       {"maclla1", &ixgbe_maclla1},
+       {"mtusize", &ixgbe_mtusize},
+       {"featflag", &ixgbe_featflag},
+       {"lsominct", &ixgbe_lsominct},
+       {"prommode", &ixgbe_prommode},
+       {"txdscqsz", &ixgbe_txdscqsz},
+       {"rxdscqsz", &ixgbe_rxdscqsz},
+       {"txqavg", &ixgbe_txqavg},
+       {"rxqavg", &ixgbe_rxqavg},
+       {"iovotype", &ixgbe_iovotype},
+       {"funcnbr", &ixgbe_funcnbr},
+       {"pciebnbr", &ixgbe_pciebnbr},
+       {"", NULL}
+};
+
+struct ixgbe_proc_type ixgbe_internal_entries[] = {
+       {"location", &ixgbe_therm_location},
+       {"temp", &ixgbe_therm_temp},
+       {"cautionthresh", &ixgbe_therm_cautionthresh},
+       {"maxopthresh", &ixgbe_therm_maxopthresh},
+       {"", NULL}
+};
+
+void ixgbe_del_proc_entries(struct ixgbe_adapter *adapter)
+{
+       int index, i;
+       char buf[16];   /* much larger than the sensor number will ever be */
+
+       if (ixgbe_top_dir == NULL)
+               return;
+
+       for (i = 0; i < IXGBE_MAX_SENSORS; i++) {
+               if (adapter->therm_dir[i] == NULL)
+                       continue;
+
+               for (index = 0; ; index++) {
+                       if (ixgbe_internal_entries[index].read == NULL)
+                               break;
+
+                        remove_proc_entry(ixgbe_internal_entries[index].name,
+                                          adapter->therm_dir[i]);
+               }
+               snprintf(buf, sizeof(buf), "sensor_%d", i);
+               remove_proc_entry(buf, adapter->info_dir);
+       }
+
+       if (adapter->info_dir != NULL) {
+               for (index = 0; ; index++) {
+                       if (ixgbe_proc_entries[index].read == NULL)
+                               break;
+                       remove_proc_entry(ixgbe_proc_entries[index].name,
+                                         adapter->info_dir);
+               }
+               remove_proc_entry("info", adapter->eth_dir);
+       }
+
+       if (adapter->eth_dir != NULL)
+               remove_proc_entry(pci_name(adapter->pdev), ixgbe_top_dir);
+}
+
+/* called from ixgbe_main.c */
+void ixgbe_procfs_exit(struct ixgbe_adapter *adapter)
+{
+       ixgbe_del_proc_entries(adapter);
+}
+
+int ixgbe_procfs_topdir_init()
+{
+       ixgbe_top_dir = proc_mkdir("driver/ixgbe", NULL);
+       if (ixgbe_top_dir == NULL)
+               return -ENOMEM;
+
+       return 0;
+}
+
+void ixgbe_procfs_topdir_exit()
+{
+       remove_proc_entry("driver/ixgbe", NULL);
+}
+
+/* called from ixgbe_main.c */
+int ixgbe_procfs_init(struct ixgbe_adapter *adapter)
+{
+       int rc = 0;
+       int i;
+       int index;
+       char buf[16];   /* much larger than the sensor number will ever be */
+
+       adapter->eth_dir = NULL;
+       adapter->info_dir = NULL;
+       for (i = 0; i < IXGBE_MAX_SENSORS; i++)
+               adapter->therm_dir[i] = NULL;
+
+       if (ixgbe_top_dir == NULL) {
+               rc = -ENOMEM;
+               goto fail;
+       }
+
+       adapter->eth_dir = proc_mkdir(pci_name(adapter->pdev), ixgbe_top_dir);
+       if (adapter->eth_dir == NULL) {
+               rc = -ENOMEM;
+               goto fail;
+       }
+
+       adapter->info_dir = proc_mkdir("info", adapter->eth_dir);
+       if (adapter->info_dir == NULL) {
+               rc = -ENOMEM;
+               goto fail;
+       }
+       for (index = 0; ; index++) {
+               if (ixgbe_proc_entries[index].read == NULL)
+                       break;
+               if (!(create_proc_read_entry(ixgbe_proc_entries[index].name,
+                                          0444,
+                                          adapter->info_dir,
+                                          ixgbe_proc_entries[index].read,
+                                          adapter))) {
+
+                       rc = -ENOMEM;
+                       goto fail;
+               }
+       }
+       if (ixgbe_thermal_present(adapter) == false)
+               goto exit;
+
+       for (i = 0; i < IXGBE_MAX_SENSORS; i++) {
+
+               if (adapter->hw.mac.thermal_sensor_data.sensor[i].location ==
+                   0)
+                       continue;
+
+               snprintf(buf, sizeof(buf), "sensor_%d", i);
+               adapter->therm_dir[i] = proc_mkdir(buf, adapter->info_dir);
+               if (adapter->therm_dir[i] == NULL) {
+                       rc = -ENOMEM;
+                       goto fail;
+               }
+               for (index = 0; ; index++) {
+                       if (ixgbe_internal_entries[index].read == NULL)
+                               break;
+                       /*
+                        * therm_data struct contains pointer the read func
+                        * will be needing
+                        */
+                       adapter->therm_data[i].hw = &adapter->hw;
+                       adapter->therm_data[i].sensor_data =
+                               &adapter->hw.mac.thermal_sensor_data.sensor[i];
+
+                       if (!(create_proc_read_entry(
+                                          ixgbe_internal_entries[index].name,
+                                          0444,
+                                          adapter->therm_dir[i],
+                                          ixgbe_internal_entries[index].read,
+                                          &adapter->therm_data[i]))) {
+                               rc = -ENOMEM;
+                               goto fail;
+                       }
+               }
+       }
+       goto exit;
+
+fail:
+       ixgbe_del_proc_entries(adapter);
+exit:
+       return rc;
+}
+
+#endif /* !IXGBE_SYSFS */
+#endif /* IXGBE_PROCFS */
diff --git a/drivers/net/ixgbe/ixgbe_ptp.c b/drivers/net/ixgbe/ixgbe_ptp.c
new file mode 100644 (file)
index 0000000..f84e19c
--- /dev/null
@@ -0,0 +1,28 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2012 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "ixgbe.h"
index 14d33387e829e214fc47d68a16076b8e6dbe94d5..e04295e91f9c1a699778f10d10953445ff23a46d 100644 (file)
@@ -25,6 +25,7 @@
 
 *******************************************************************************/
 
+
 #include <linux/types.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/ip.h>
 #include <linux/tcp.h>
 #include <linux/ipv6.h>
-#ifdef NETIF_F_HW_VLAN_TX
-#include <linux/if_vlan.h>
-#endif
 
 #include "ixgbe.h"
 #include "ixgbe_type.h"
 #include "ixgbe_sriov.h"
 
 #ifdef CONFIG_PCI_IOV
-static int ixgbe_find_enabled_vfs(struct ixgbe_adapter *adapter)
-{
-       struct pci_dev *pdev = adapter->pdev;
-       struct pci_dev *pvfdev;
-       u16 vf_devfn = 0;
-       int device_id;
-       int vfs_found = 0;
+#if defined(IFLA_VF_MAX) && defined(__VMKLNX__)
+#define pci_enable_sriov(dev,vfs) \
+       (vmklnx_enable_vfs((dev), (vfs), NULL, NULL) != (vfs) ? -ENOTSUPP : 0)
 
-       switch (adapter->hw.mac.type) {
-       case ixgbe_mac_82599EB:
-               device_id = IXGBE_DEV_ID_82599_VF;
+#define pci_disable_sriov(dev) \
+       vmklnx_disable_vfs((dev), adapter->num_vfs, NULL, NULL)
+
+static VMK_ReturnStatus ixgbe_passthru_ops(struct net_device *netdev,
+                                               vmk_NetPTOP op,
+                                               void *pargs)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       VMK_ReturnStatus ret;
+       
+       switch (op) {
+       case VMK_NETPTOP_VF_SET_MAC:
+       {
+               vmk_NetPTOPVFSetMacArgs *args = pargs;
+
+               if (is_zero_ether_addr(args->mac)) {
+                       /* Remove the VF mac address */
+                       ixgbe_del_mac_filter(adapter,
+                               adapter->vfinfo[args->vf].vf_mac_addresses,
+                               args->vf);
+                       memset(adapter->vfinfo[args->vf].vf_mac_addresses,
+                               0, ETH_ALEN);
+                       adapter->vfinfo[args->vf].pf_set_mac = false;
+                       ret = VMK_OK;
+               } else {
+                       if (ixgbe_ndo_set_vf_mac(netdev, 
+                                                args->vf, args->mac) < 0)
+                               ret = VMK_FAILURE;
+                       else
+                               ret = VMK_OK;
+               }
                break;
-       case ixgbe_mac_X540:
-               device_id = IXGBE_DEV_ID_X540_VF;
+       }
+       case VMK_NETPTOP_VF_SET_DEFAULT_VLAN:
+       {
+               vmk_NetPTOPVFSetDefaultVlanArgs *args = pargs;
+               
+               if (args->enable) {
+                       adapter->vfinfo[args->vf].pf_set_vlan = true;
+                       ret = ixgbe_ndo_set_vf_vlan(netdev, args->vf, args->vid,
+                               args->prio) ? VMK_FAILURE : VMK_OK;
+               } else {
+                       adapter->vfinfo[args->vf].pf_set_vlan = false;
+                       ret = ixgbe_ndo_set_vf_vlan(netdev, args->vf, 0, 0) ?
+                               VMK_FAILURE : VMK_OK;
+               }
                break;
+       }
        default:
-               device_id = 0;
+               e_err(probe, "Unhandled OP %d\n", op);
+               ret = VMK_FAILURE;
                break;
        }
-
-       vf_devfn = pdev->devfn + 0x80;
-       pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
-       while (pvfdev) {
-               if (pvfdev->devfn == vf_devfn &&
-                   (pvfdev->bus->number >= pdev->bus->number))
-                       vfs_found++;
-               vf_devfn += 2;
-               pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
-                                       device_id, pvfdev);
-       }
-
-       return vfs_found;
+       return ret;
 }
+#endif
 
-void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
-                        const struct ixgbe_info *ii)
+void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       int err = 0;
        int num_vf_macvlans, i;
        struct vf_macvlans *mv_list;
        int pre_existing_vfs = 0;
 
-       pre_existing_vfs = ixgbe_find_enabled_vfs(adapter);
+       pre_existing_vfs = pci_num_vf(adapter->pdev);
        if (!pre_existing_vfs && !adapter->num_vfs)
                return;
 
@@ -106,18 +129,35 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
                         "enabled for this device - Please reload all "
                         "VF drivers to avoid spoofed packet errors\n");
        } else {
+               int err;
+               /*
+                * The 82599 supports up to 64 VFs per physical function
+                * but this implementation limits allocation to 63 so that
+                * basic networking resources are still available to the
+                * physical function
+                */
+
+               adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63);
+
                err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+               if (err) {
+                       e_err(probe, "Failed to enable PCI sriov: %d\n", err);
+                       adapter->num_vfs = 0;
+                       return;
+               }
        }
-       if (err) {
-               e_err(probe, "Failed to enable PCI sriov: %d\n", err);
-               goto err_novfs;
-       }
-       adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
 
+       adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
        e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs);
 
+       /* Enable VMDq flag so device will be set in VM mode */
+       adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
+       if (!adapter->ring_feature[RING_F_VMDQ].limit)
+               adapter->ring_feature[RING_F_VMDQ].limit = 1;
+       adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs;
+
        num_vf_macvlans = hw->mac.num_rar_entries -
-       (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
+               (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
 
        adapter->mv_list = mv_list = kcalloc(num_vf_macvlans,
                                             sizeof(struct vf_macvlans),
@@ -128,8 +168,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
                for (i = 0; i < num_vf_macvlans; i++) {
                        mv_list->vf = -1;
                        mv_list->free = true;
-                       mv_list->rar_entry = hw->mac.num_rar_entries -
-                               (i + adapter->num_vfs + 1);
                        list_add(&mv_list->l, &adapter->vf_mvs.l);
                        mv_list++;
                }
@@ -142,47 +180,130 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
                kcalloc(adapter->num_vfs,
                        sizeof(struct vf_data_storage), GFP_KERNEL);
        if (adapter->vfinfo) {
-               /* Now that we're sure SR-IOV is enabled
-                * and memory allocated set up the mailbox parameters
-                */
-               ixgbe_init_mbx_params_pf(hw);
-               memcpy(&hw->mbx.ops, ii->mbx_ops,
-                      sizeof(hw->mbx.ops));
+               /* enable L2 switch and replication */
+               adapter->flags |= IXGBE_FLAG_SRIOV_L2SWITCH_ENABLE |
+                                 IXGBE_FLAG_SRIOV_REPLICATION_ENABLE;
+
+               /* limit traffic classes based on VFs enabled */
+               if (adapter->num_vfs < 32) {
+                       adapter->dcb_cfg.num_tcs.pg_tcs = 4;
+                       adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
+               } else {
+                       adapter->dcb_cfg.num_tcs.pg_tcs = 1;
+                       adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
+               }
+               adapter->dcb_cfg.vt_mode = true;
+
+               /* We do not support RSS w/ SR-IOV */
+               adapter->ring_feature[RING_F_RSS].limit = 1;
 
-               /* Disable RSC when in SR-IOV mode */
+               /* disable RSC when in SR-IOV mode */
                adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
                                     IXGBE_FLAG2_RSC_ENABLED);
+#ifdef IXGBE_FCOE
+               /*
+                * When SR-IOV is enabled 82599 cannot support jumbo frames
+                * so we must disable FCoE because we cannot support FCoE MTU.
+                */
+               if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+                       adapter->flags &= ~(IXGBE_FLAG_FCOE_ENABLED |
+                                           IXGBE_FLAG_FCOE_CAPABLE);
+#endif
+#if defined(IFLA_VF_MAX) && defined(__VMKLNX__)
+               /* Register control callback */
+               e_info(probe, "Registered Passthru Ops\n");
+               VMK_REGISTER_PT_OPS(adapter->netdev, ixgbe_passthru_ops);
+#endif
+               /* enable spoof checking for all VFs */
+               for (i = 0; i < adapter->num_vfs; i++)
+                       adapter->vfinfo[i].spoofchk_enabled = true;
+
                return;
        }
 
        /* Oh oh */
        e_err(probe, "Unable to allocate memory for VF Data Storage - "
-             "SRIOV disabled\n");
-       pci_disable_sriov(adapter->pdev);
+               "SRIOV disabled\n");
+       ixgbe_disable_sriov(adapter);
+}
 
-err_novfs:
-       adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
-       adapter->num_vfs = 0;
+static bool ixgbe_vfs_are_assigned(struct ixgbe_adapter *adapter)
+{
+#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED
+       struct pci_dev *pdev = adapter->pdev;
+       struct pci_dev *vfdev;
+       int dev_id;
+
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82599EB:
+               dev_id = IXGBE_DEV_ID_82599_VF;
+               break;
+       case ixgbe_mac_X540:
+               dev_id = IXGBE_DEV_ID_X540_VF;
+               break;
+       default:
+               return false;
+       }
+
+       /* loop through all the VFs to see if we own any that are assigned */
+       vfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, dev_id, NULL);
+       while (vfdev) {
+               /* if we don't own it we don't care */
+               if (vfdev->is_virtfn && vfdev->physfn == pdev) {
+                       /* if it is assigned we cannot release it */
+                       if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
+                               return true;
+               }
+
+               vfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, dev_id, vfdev);
+       }
+
+#endif
+       return false;
 }
-#endif /* #ifdef CONFIG_PCI_IOV */
 
+#endif /* CONFIG_PCI_IOV */
 void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       u32 gcr;
        u32 gpie;
        u32 vmdctl;
-       int i;
+
+       /* if SR-IOV is already disabled then there is nothing to do */
+       if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+               return;
+
+       /* set num VFs to 0 to prevent access to vfinfo */
+       adapter->num_vfs = 0;
+
+       if (adapter->vfinfo) {  
+               kfree(adapter->vfinfo);
+               adapter->vfinfo = NULL;
+       }
+       if (adapter->mv_list) {
+               kfree(adapter->mv_list);
+               adapter->mv_list = NULL;
+       }
 
 #ifdef CONFIG_PCI_IOV
+       /*
+        * If our VFs are assigned we cannot shut down SR-IOV
+        * without causing issues, so just leave the hardware
+        * available but disabled
+        */
+       if (ixgbe_vfs_are_assigned(adapter)) {
+               e_dev_warn("Unloading driver while VFs are assigned "
+                          "- VFs will not be deallocated\n");
+               return;
+       }
+
        /* disable iov and allow time for transactions to clear */
        pci_disable_sriov(adapter->pdev);
-#endif
 
+#endif
        /* turn off device IOV mode */
-       gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
-       gcr &= ~(IXGBE_GCR_EXT_SRIOV);
-       IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
+       IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0);
        gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
        gpie &= ~IXGBE_GPIE_VTMODE_MASK;
        IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
@@ -193,24 +314,19 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
        IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
        IXGBE_WRITE_FLUSH(hw);
 
+       /* Disable VMDq flag so device will be set in VM mode */
+       if (adapter->ring_feature[RING_F_VMDQ].limit == 1)
+               adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
+       adapter->ring_feature[RING_F_VMDQ].offset = 0;
+
        /* take a breather then clean up driver data */
        msleep(100);
 
-       /* Release reference to VF devices */
-       for (i = 0; i < adapter->num_vfs; i++) {
-               if (adapter->vfinfo[i].vfdev)
-                       pci_dev_put(adapter->vfinfo[i].vfdev);
-       }
-       kfree(adapter->vfinfo);
-       kfree(adapter->mv_list);
-       adapter->vfinfo = NULL;
-
-       adapter->num_vfs = 0;
        adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
 }
 
-static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
-                                  int entries, u16 *hash_list, u32 vf)
+int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
+                           int entries, u16 *hash_list, u32 vf)
 {
        struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
        struct ixgbe_hw *hw = &adapter->hw;
@@ -218,24 +334,21 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
        u32 vector_bit;
        u32 vector_reg;
        u32 mta_reg;
+       u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
 
        /* only so many hash values supported */
        entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
 
-       /*
-        * salt away the number of multi cast addresses assigned
+       /* salt away the number of multi cast addresses assigned
         * to this VF for later use to restore when the PF multi cast
         * list changes
         */
        vfinfo->num_vf_mc_hashes = entries;
 
-       /*
-        * VFs are limited to using the MTA hash table for their multicast
-        * addresses
-        */
-       for (i = 0; i < entries; i++) {
+       /* VFs are limited to using the MTA hash table for their multicast
+        * addresses */
+       for (i = 0; i < entries; i++)
                vfinfo->vf_mc_hashes[i] = hash_list[i];
-       }
 
        for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
                vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
@@ -244,25 +357,12 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
                mta_reg |= (1 << vector_bit);
                IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
        }
+       vmolr |= IXGBE_VMOLR_ROMPE;
+       IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
 
        return 0;
 }
 
-static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter)
-{
-       struct ixgbe_hw *hw = &adapter->hw;
-       struct list_head *pos;
-       struct vf_macvlans *entry;
-
-       list_for_each(pos, &adapter->vf_mvs.l) {
-               entry = list_entry(pos, struct vf_macvlans, l);
-               if (entry->free == false)
-                       hw->mac.ops.set_rar(hw, entry->rar_entry,
-                                           entry->vf_macvlan,
-                                           entry->vf, IXGBE_RAH_AV);
-       }
-}
-
 void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
@@ -273,6 +373,7 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
        u32 mta_reg;
 
        for (i = 0; i < adapter->num_vfs; i++) {
+               u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i));
                vfinfo = &adapter->vfinfo[i];
                for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
                        hw->addr_ctrl.mta_in_use++;
@@ -282,19 +383,27 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
                        mta_reg |= (1 << vector_bit);
                        IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
                }
+               if (vfinfo->num_vf_mc_hashes)
+                       vmolr |= IXGBE_VMOLR_ROMPE;
+               else
+                       vmolr &= ~IXGBE_VMOLR_ROMPE;
+               IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
        }
 
        /* Restore any VF macvlans */
-       ixgbe_restore_vf_macvlans(adapter);
+       ixgbe_full_sync_mac_table(adapter);
 }
 
-static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
-                            u32 vf)
+int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf)
 {
-       return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
+       /* VLAN 0 is a special case, don't allow it to be removed */
+       if (!vid && !add)
+               return 0;
+
+       return ixgbe_set_vfta(&adapter->hw, vid, vf, (bool)add);
 }
 
-static void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
+void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        int new_mtu = msgbuf[1];
@@ -318,14 +427,13 @@ static void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
                IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
        }
 
-       e_info(hw, "VF requests change max MTU to %d\n", new_mtu);
+       e_info(drv, "VF requests change max MTU to %d\n", new_mtu);
 }
 
-static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
+void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
 {
        u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
-       vmolr |= (IXGBE_VMOLR_ROMPE |
-                 IXGBE_VMOLR_BAM);
+       vmolr |=  IXGBE_VMOLR_BAM;
        if (aupe)
                vmolr |= IXGBE_VMOLR_AUPE;
        else
@@ -344,10 +452,9 @@ static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
                IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
 }
 
-static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
+inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       int rar_entry = hw->mac.num_rar_entries - (vf + 1);
 
        /* reset offloads to defaults */
        if (adapter->vfinfo[vf].pf_vlan) {
@@ -359,6 +466,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
                                  VLAN_PRIO_SHIFT)), vf);
                ixgbe_set_vmolr(hw, vf, false);
        } else {
+               ixgbe_set_vf_vlan(adapter, true, 0, vf);
                ixgbe_set_vmvir(adapter, 0, vf);
                ixgbe_set_vmolr(hw, vf, true);
        }
@@ -369,27 +477,29 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
        /* Flush and reset the mta with the new values */
        ixgbe_set_rx_mode(adapter->netdev);
 
-       hw->mac.ops.clear_rar(hw, rar_entry);
+       ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
 }
 
-static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
-                           int vf, unsigned char *mac_addr)
+int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
+                    int vf, unsigned char *mac_addr)
 {
-       struct ixgbe_hw *hw = &adapter->hw;
-       int rar_entry = hw->mac.num_rar_entries - (vf + 1);
-
-       memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
-       hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
+       s32 retval = 0;
+       ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
+       retval = ixgbe_add_mac_filter(adapter, mac_addr, vf);
+       if (retval >= 0)
+               memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
+       else 
+               memset(adapter->vfinfo[vf].vf_mac_addresses, 0, ETH_ALEN);
 
-       return 0;
+       return retval;
 }
 
 static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
                                int vf, int index, unsigned char *mac_addr)
 {
-       struct ixgbe_hw *hw = &adapter->hw;
        struct list_head *pos;
        struct vf_macvlans *entry;
+       s32 retval = 0;
 
        if (index <= 1) {
                list_for_each(pos, &adapter->vf_mvs.l) {
@@ -398,7 +508,8 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
                                entry->vf = -1;
                                entry->free = true;
                                entry->is_macvlan = false;
-                               hw->mac.ops.clear_rar(hw, entry->rar_entry);
+                               ixgbe_del_mac_filter(adapter,
+                                                    entry->vf_macvlan, vf);
                        }
                }
        }
@@ -429,85 +540,50 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
        if (!entry || !entry->free)
                return -ENOSPC;
 
-       entry->free = false;
-       entry->is_macvlan = true;
-       entry->vf = vf;
-       memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
-
-       hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV);
+       retval = ixgbe_add_mac_filter(adapter, mac_addr, vf);
+       if (retval >= 0) {
+               entry->free = false;
+               entry->is_macvlan = true;
+               entry->vf = vf;
+               memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
+       }
 
-       return 0;
+       return retval;
 }
 
-int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter)
-{
 #ifdef CONFIG_PCI_IOV
-       int i;
-       for (i = 0; i < adapter->num_vfs; i++) {
-               if (adapter->vfinfo[i].vfdev->dev_flags &
-                               PCI_DEV_FLAGS_ASSIGNED)
-                       return true;
-       }
-#endif
-       return false;
-}
-
 int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
 {
        unsigned char vf_mac_addr[6];
        struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
        unsigned int vfn = (event_mask & 0x3f);
-       struct pci_dev *pvfdev;
-       unsigned int device_id;
-       u16 thisvf_devfn = (pdev->devfn + 0x80 + (vfn << 1)) |
-                               (pdev->devfn & 1);
-
        bool enable = ((event_mask & 0x10000000U) != 0);
 
        if (enable) {
                random_ether_addr(vf_mac_addr);
-               e_info(probe, "IOV: VF %d is enabled MAC %pM\n",
-                      vfn, vf_mac_addr);
-               /*
-                * Store away the VF "permananet" MAC address, it will ask
+               e_info(probe, "IOV: VF %d is enabled "
+                      "mac %02X:%02X:%02X:%02X:%02X:%02X\n",
+                      vfn,
+                      vf_mac_addr[0], vf_mac_addr[1], vf_mac_addr[2],
+                      vf_mac_addr[3], vf_mac_addr[4], vf_mac_addr[5]);
+               /* Store away the VF "permananet" MAC address, it will ask
                 * for it later.
                 */
                memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
-
-               switch (adapter->hw.mac.type) {
-               case ixgbe_mac_82599EB:
-                       device_id = IXGBE_DEV_ID_82599_VF;
-                       break;
-               case ixgbe_mac_X540:
-                       device_id = IXGBE_DEV_ID_X540_VF;
-                       break;
-               default:
-                       device_id = 0;
-                       break;
-               }
-
-               pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
-               while (pvfdev) {
-                       if (pvfdev->devfn == thisvf_devfn)
-                               break;
-                       pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
-                                               device_id, pvfdev);
-               }
-               if (pvfdev)
-                       adapter->vfinfo[vfn].vfdev = pvfdev;
-               else
-                       e_err(drv, "Couldn't find pci dev ptr for VF %4.4x\n",
-                             thisvf_devfn);
        }
 
        return 0;
 }
+#endif /* CONFIG_PCI_IOV */
 
-static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
+inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        u32 reg;
        u32 reg_offset, vf_shift;
+        /* q_per_pool assumes that DCB is not enabled, hence in 64 pool mode */
+        u32 q_per_pool = 2;
+        int i;
 
        vf_shift = vf % 32;
        reg_offset = vf / 32;
@@ -521,18 +597,26 @@ static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
        reg |= (reg | (1 << vf_shift));
        IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
 
-       /* Enable counting of spoofed packets in the SSVPC register */
        reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
        reg |= (1 << vf_shift);
        IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
 
+       /*
+        * Reset the VFs TDWBAL and TDWBAH registers
+        * which are not cleared by an FLR
+        */
+       for (i = 0; i < q_per_pool; i++) {
+               IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0);
+       }
+
        ixgbe_vf_reset_event(adapter, vf);
 }
 
 static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
 {
        u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
-       u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
+       u32 msgbuf[mbx_size];
        struct ixgbe_hw *hw = &adapter->hw;
        s32 retval;
        int entries;
@@ -543,7 +627,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
        retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
 
        if (retval)
-               pr_err("Error receiving message from VF\n");
+               printk(KERN_ERR "Error receiving message from VF\n");
 
        /* this is a message we already processed, do nothing */
        if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
@@ -557,27 +641,18 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
        if (msgbuf[0] == IXGBE_VF_RESET) {
                unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
                new_mac = (u8 *)(&msgbuf[1]);
-               e_info(probe, "VF Reset msg received from vf %d\n", vf);
                adapter->vfinfo[vf].clear_to_send = false;
                ixgbe_vf_reset_msg(adapter, vf);
                adapter->vfinfo[vf].clear_to_send = true;
-
-               if (is_valid_ether_addr(new_mac) &&
-                   !adapter->vfinfo[vf].pf_set_mac)
-                       ixgbe_set_vf_mac(adapter, vf, vf_mac);
-               else
-                       ixgbe_set_vf_mac(adapter,
-                                vf, adapter->vfinfo[vf].vf_mac_addresses);
+               ixgbe_set_vf_mac(adapter, vf, vf_mac);
 
                /* reply to reset with ack and vf mac address */
                msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
-               memcpy(new_mac, vf_mac, ETH_ALEN);
-               /*
-                * Piggyback the multicast filter type so VF can compute the
-                * correct vectors
-                */
+               memcpy(new_mac, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
+               /* Piggyback the multicast filter type so VF can compute the
+                * correct vectors */
                msgbuf[3] = hw->mac.mc_filter_type;
-               ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
+               retval = ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
 
                return retval;
        }
@@ -593,7 +668,11 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
                new_mac = ((u8 *)(&msgbuf[1]));
                if (is_valid_ether_addr(new_mac) &&
                    !adapter->vfinfo[vf].pf_set_mac) {
-                       ixgbe_set_vf_mac(adapter, vf, new_mac);
+                       e_info(probe, "Set MAC msg received from VF %d\n", vf);
+                       if (ixgbe_set_vf_mac(adapter, vf, new_mac) >= 0)
+                               retval = 0 ;
+                       else
+                               retval = -1;
                } else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses,
                                  new_mac, ETH_ALEN)) {
                        e_warn(drv, "VF %d attempted to override "
@@ -604,17 +683,18 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
                break;
        case IXGBE_VF_SET_MULTICAST:
                entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
-                         >> IXGBE_VT_MSGINFO_SHIFT;
+                                       >> IXGBE_VT_MSGINFO_SHIFT;
                hash_list = (u16 *)&msgbuf[1];
                retval = ixgbe_set_vf_multicasts(adapter, entries,
-                                                hash_list, vf);
+                                                hash_list, vf);
                break;
        case IXGBE_VF_SET_LPE:
+               e_info(probe, "Set LPE msg received from vf %d\n", vf);
                ixgbe_set_vf_lpe(adapter, msgbuf);
                break;
        case IXGBE_VF_SET_VLAN:
                add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
-                     >> IXGBE_VT_MSGINFO_SHIFT;
+                                       >> IXGBE_VT_MSGINFO_SHIFT;
                vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
                if (adapter->vfinfo[vf].pf_vlan) {
                        e_warn(drv, "VF %d attempted to override "
@@ -623,24 +703,35 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
                               vf);
                        retval = -1;
                } else {
+                       if (add)
+                               adapter->vfinfo[vf].vlan_count++;
+                       else if (adapter->vfinfo[vf].vlan_count)
+                               adapter->vfinfo[vf].vlan_count--;
                        retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
+                       if (!retval && adapter->vfinfo[vf].spoofchk_enabled)
+                               hw->mac.ops.set_vlan_anti_spoofing(hw,
+                                                               true, vf);
                }
                break;
        case IXGBE_VF_SET_MACVLAN:
                index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
                        IXGBE_VT_MSGINFO_SHIFT;
+               if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
+                       e_warn(drv, "VF %d requested MACVLAN filter but is "
+                                   "administratively denied\n", vf);
+                       retval = -1;
+                       break;
+               }
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
                /*
                 * If the VF is allowed to set MAC filters then turn off
                 * anti-spoofing to avoid false positives.  An index
                 * greater than 0 will indicate the VF is setting a
                 * macvlan MAC filter.
                 */
-               if (index > 0 && adapter->antispoofing_enabled) {
-                       hw->mac.ops.set_mac_anti_spoofing(hw, false,
-                                                         adapter->num_vfs);
-                       hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
-                       adapter->antispoofing_enabled = false;
-               }
+               if (index > 0 && adapter->vfinfo[vf].spoofchk_enabled)
+                       ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
+#endif
                retval = ixgbe_set_vf_macvlan(adapter, vf, index,
                                              (unsigned char *)(&msgbuf[1]));
                if (retval == -ENOSPC)
@@ -722,68 +813,107 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
        }
 }
 
+#ifdef IFLA_VF_MAX
 int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
 {
+       s32 retval = 0;
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
                return -EINVAL;
        adapter->vfinfo[vf].pf_set_mac = true;
        dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
-       dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
-                                     " change effective.");
+       dev_info(&adapter->pdev->dev, "Reload the VF driver to make this change effective.\n");
+       retval = ixgbe_set_vf_mac(adapter, vf, mac);
+       if (retval >= 0) {
+               adapter->vfinfo[vf].pf_set_mac = true;
+               if (test_bit(__IXGBE_DOWN, &adapter->state)) {
+                       dev_warn(&adapter->pdev->dev, "The VF MAC address has been set, but the PF device is not up.\n");
+                       dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n");
+               }
+       } else {
+               dev_warn(&adapter->pdev->dev, "The VF MAC address was NOT set due to invalid or duplicate MAC address.\n");
+       }
+       return retval;
+}
+
+static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter,
+                                  int vf, u16 vlan, u8 qos)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int err;
+
+       err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
+       if (err)
+               goto out;
+       ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
+       ixgbe_set_vmolr(hw, vf, false);
+       if (adapter->vfinfo[vf].spoofchk_enabled)
+               hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
+       adapter->vfinfo[vf].vlan_count++;
+       adapter->vfinfo[vf].pf_vlan = vlan;
+       adapter->vfinfo[vf].pf_qos = qos;
+       dev_info(&adapter->pdev->dev,
+                "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
        if (test_bit(__IXGBE_DOWN, &adapter->state)) {
-               dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
-                        " but the PF device is not up.\n");
-               dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
-                        " attempting to use the VF device.\n");
+               dev_warn(&adapter->pdev->dev, "The VF VLAN has been set, but the PF device is not up.\n");
+               dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n");
        }
-       return ixgbe_set_vf_mac(adapter, vf, mac);
+
+out:
+       return err;
+}
+
+static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int err;
+
+       err = ixgbe_set_vf_vlan(adapter, false,
+                               adapter->vfinfo[vf].pf_vlan, vf);
+       ixgbe_set_vmvir(adapter, 0, vf);
+       ixgbe_set_vmolr(hw, vf, true);
+       hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
+       if (adapter->vfinfo[vf].vlan_count)
+               adapter->vfinfo[vf].vlan_count--;
+       adapter->vfinfo[vf].pf_vlan = 0;
+       adapter->vfinfo[vf].pf_qos = 0;
+
+       return err;
 }
 
 int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
 {
        int err = 0;
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       struct ixgbe_hw *hw = &adapter->hw;
 
-       if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
+       /* VLAN IDs accepted range 0-4094 */
+       if ((vf >= adapter->num_vfs) || (vlan > VLAN_VID_MASK-1) || (qos > 7))
                return -EINVAL;
        if (vlan || qos) {
-               err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
+               /*
+                * Check if there is already a port VLAN set, if so
+                * we have to delete the old one first before we
+                * can set the new one.  The usage model had
+                * previously assumed the user would delete the
+                * old port VLAN before setting a new one but this
+                * is not necessarily the case.
+                */
+               if (adapter->vfinfo[vf].pf_vlan)
+                       err = ixgbe_disable_port_vlan(adapter, vf);
                if (err)
                        goto out;
-               ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
-               ixgbe_set_vmolr(hw, vf, false);
-               if (adapter->antispoofing_enabled)
-                       hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
-               adapter->vfinfo[vf].pf_vlan = vlan;
-               adapter->vfinfo[vf].pf_qos = qos;
-               dev_info(&adapter->pdev->dev,
-                        "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
-               if (test_bit(__IXGBE_DOWN, &adapter->state)) {
-                       dev_warn(&adapter->pdev->dev,
-                                "The VF VLAN has been set,"
-                                " but the PF device is not up.\n");
-                       dev_warn(&adapter->pdev->dev,
-                                "Bring the PF device up before"
-                                " attempting to use the VF device.\n");
-               }
+               err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos);
+
        } else {
-               err = ixgbe_set_vf_vlan(adapter, false,
-                                       adapter->vfinfo[vf].pf_vlan, vf);
-               ixgbe_set_vmvir(adapter, vlan, vf);
-               ixgbe_set_vmolr(hw, vf, true);
-               hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
-               adapter->vfinfo[vf].pf_vlan = 0;
-               adapter->vfinfo[vf].pf_qos = 0;
-       }
+               err = ixgbe_disable_port_vlan(adapter, vf);
+       }
 out:
-       return err;
+       return err;
 }
 
-static int ixgbe_link_mbps(int internal_link_speed)
+static int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
 {
-       switch (internal_link_speed) {
+       switch (adapter->link_speed) {
        case IXGBE_LINK_SPEED_100_FULL:
                return 100;
        case IXGBE_LINK_SPEED_1GB_FULL:
@@ -795,27 +925,30 @@ static int ixgbe_link_mbps(int internal_link_speed)
        }
 }
 
-static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
-                                   int link_speed)
+static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf)
 {
-       int rf_dec, rf_int;
-       u32 bcnrc_val;
+       struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 bcnrc_val = 0;
+       u16 queue, queues_per_pool;
+       u16 tx_rate = adapter->vfinfo[vf].tx_rate;
+
+       if (tx_rate) {
+               /* start with base link speed value */
+               bcnrc_val = adapter->vf_rate_link_speed;
 
-       if (tx_rate != 0) {
                /* Calculate the rate factor values to set */
-               rf_int = link_speed / tx_rate;
-               rf_dec = (link_speed - (rf_int * tx_rate));
-               rf_dec = (rf_dec * (1<<IXGBE_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
-
-               bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
-               bcnrc_val |= ((rf_int<<IXGBE_RTTBCNRC_RF_INT_SHIFT) &
-                              IXGBE_RTTBCNRC_RF_INT_MASK);
-               bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
-       } else {
-               bcnrc_val = 0;
+               bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
+               bcnrc_val /= tx_rate;
+
+               /* clear everything but the rate factor */
+               bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
+                            IXGBE_RTTBCNRC_RF_DEC_MASK;
+
+               /* enable the rate scheduler */
+               bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
        }
 
-       IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */
        /*
         * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
         * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
@@ -832,57 +965,99 @@ static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
                break;
        }
 
-       IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+       /* determine how many queues per pool based on VMDq mask */
+       queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
+
+       /* write value for all Tx queues belonging to VF */
+       for (queue = 0; queue < queues_per_pool; queue++) {
+               unsigned int reg_idx = (vf * queues_per_pool) + queue;
+
+               IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx);
+               IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+       }
 }
 
 void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
 {
-       int actual_link_speed, i;
-       bool reset_rate = false;
+       int i;
 
        /* VF Tx rate limit was not set */
-       if (adapter->vf_rate_link_speed == 0)
+       if (!adapter->vf_rate_link_speed)
                return;
 
-       actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
-       if (actual_link_speed != adapter->vf_rate_link_speed) {
-               reset_rate = true;
+       if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) {
                adapter->vf_rate_link_speed = 0;
                dev_info(&adapter->pdev->dev,
-                        "Link speed has been changed. VF Transmit rate "
-                        "is disabled\n");
+                        "Link speed has been changed. VF Transmit rate is disabled\n");
        }
 
        for (i = 0; i < adapter->num_vfs; i++) {
-               if (reset_rate)
+               if (!adapter->vf_rate_link_speed)
                        adapter->vfinfo[i].tx_rate = 0;
 
-               ixgbe_set_vf_rate_limit(&adapter->hw, i,
-                                       adapter->vfinfo[i].tx_rate,
-                                       actual_link_speed);
+               ixgbe_set_vf_rate_limit(adapter, i);
        }
 }
 
 int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       struct ixgbe_hw *hw = &adapter->hw;
-       int actual_link_speed;
+       int link_speed;
+
+       /* verify VF is active */
+       if (vf >= adapter->num_vfs)
+               return -EINVAL;
+
+       /* verify link is up */
+       if (!adapter->link_up)
+               return -EINVAL;
+
+       /* verify we are linked at 10Gbps */
+       link_speed = ixgbe_link_mbps(adapter);
+       if (link_speed != 10000)
+               return -EINVAL;
 
-       actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
-       if ((vf >= adapter->num_vfs) || (!adapter->link_up) ||
-           (tx_rate > actual_link_speed) || (actual_link_speed != 10000) ||
-           ((tx_rate != 0) && (tx_rate <= 10)))
-           /* rate limit cannot be set to 10Mb or less in 10Gb adapters */
+       /* rate limit cannot be less than 10Mbs or greater than link speed */
+       if (tx_rate && ((tx_rate <= 10) || (tx_rate > link_speed)))
                return -EINVAL;
 
-       adapter->vf_rate_link_speed = actual_link_speed;
-       adapter->vfinfo[vf].tx_rate = (u16)tx_rate;
-       ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
+       /* store values */
+       adapter->vf_rate_link_speed = link_speed;
+       adapter->vfinfo[vf].tx_rate = tx_rate;
+
+       /* update hardware configuration */
+       ixgbe_set_vf_rate_limit(adapter, vf);
 
        return 0;
 }
 
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       int vf_target_reg = vf >> 3;
+       int vf_target_shift = vf % 8;
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 regval;
+
+       adapter->vfinfo[vf].spoofchk_enabled = setting;
+
+       regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
+       regval &= ~(1 << vf_target_shift);
+       regval |= (setting << vf_target_shift);
+       IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
+
+       if (adapter->vfinfo[vf].vlan_count) {
+               vf_target_shift += IXGBE_SPOOF_VLANAS_SHIFT;
+               regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
+               regval &= ~(1 << vf_target_shift);
+               regval |= (setting << vf_target_shift);
+               IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
+       }
+
+       return 0;
+}
+#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */
 int ixgbe_ndo_get_vf_config(struct net_device *netdev,
                            int vf, struct ifla_vf_info *ivi)
 {
@@ -894,5 +1069,9 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
        ivi->tx_rate = adapter->vfinfo[vf].tx_rate;
        ivi->vlan = adapter->vfinfo[vf].pf_vlan;
        ivi->qos = adapter->vfinfo[vf].pf_qos;
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+       ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
+#endif
        return 0;
 }
+#endif /* IFLA_VF_MAX */
index 991c5d7cd0c399eb0be63ecbc35c78d8c3c54d82..8dc6a1145ea7d3d4a70c2155afd4a1ec1aa01f0d 100644 (file)
 
 *******************************************************************************/
 
+
 #ifndef _IXGBE_SRIOV_H_
 #define _IXGBE_SRIOV_H_
 
+int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
+                           int entries, u16 *hash_list, u32 vf);
 void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
+int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf);
+void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe);
+void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf);
+void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
 void ixgbe_msg_task(struct ixgbe_adapter *adapter);
-int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
+int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
+                    int vf, unsigned char *mac_addr);
 void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
 void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
+#ifdef IFLA_VF_MAX
 int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
 int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
-                          u8 qos);
+                         u8 qos);
 int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
+#endif
 int ixgbe_ndo_get_vf_config(struct net_device *netdev,
                            int vf, struct ifla_vf_info *ivi);
-void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
+#endif /* IFLA_VF_MAX */
 void ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
-int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter);
 #ifdef CONFIG_PCI_IOV
-void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
-                       const struct ixgbe_info *ii);
+int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
+void ixgbe_enable_sriov(struct ixgbe_adapter *adapter);
 #endif
+#ifdef IFLA_VF_MAX
+void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
+#endif /* IFLA_VF_MAX */
+void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
 
+/*
+ * These are defined in ixgbe_type.h on behalf of the VF driver
+ * but we need them here unwrapped for the PF driver.
+ */
+#define IXGBE_DEV_ID_82599_VF                  0x10ED
+#define IXGBE_DEV_ID_X540_VF                   0x1515
 
 #endif /* _IXGBE_SRIOV_H_ */
 
diff --git a/drivers/net/ixgbe/ixgbe_sysfs.c b/drivers/net/ixgbe/ixgbe_sysfs.c
new file mode 100644 (file)
index 0000000..8e3c18e
--- /dev/null
@@ -0,0 +1,1026 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2012 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "ixgbe.h"
+#include "ixgbe_common.h"
+#include "ixgbe_type.h"
+
+#ifdef IXGBE_SYSFS
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+
+/*
+ * This file provides a sysfs interface to export information from the
+ * driver.  The information presented is READ-ONLY.
+ */
+
+static struct net_device_stats *sysfs_get_stats(struct net_device *netdev)
+{
+#ifndef HAVE_NETDEV_STATS_IN_NETDEV
+       struct ixgbe_adapter *adapter;
+#endif
+       if (netdev == NULL)
+               return NULL;
+
+#ifdef HAVE_NETDEV_STATS_IN_NETDEV
+       /* only return the current stats */
+       return &netdev->stats;
+#else
+       adapter = netdev_priv(netdev);
+
+       /* only return the current stats */
+       return &adapter->net_stats;
+#endif /* HAVE_NETDEV_STATS_IN_NETDEV */
+}
+
+static struct net_device *ixgbe_get_netdev(struct kobject *kobj)
+{
+       struct net_device *netdev;
+       struct kobject *parent = kobj->parent;
+       struct device *device_info_kobj;
+
+       if (kobj == NULL)
+               return NULL;
+
+       device_info_kobj = container_of(parent, struct device, kobj);
+       if (device_info_kobj == NULL)
+               return NULL;
+
+       netdev = container_of(device_info_kobj, struct net_device, dev);
+       return netdev;
+}
+
+static struct ixgbe_adapter *ixgbe_get_adapter(struct kobject *kobj)
+{
+       struct ixgbe_adapter *adapter;
+       struct net_device *netdev = ixgbe_get_netdev(kobj);
+       if (netdev == NULL)
+               return NULL;
+       adapter = netdev_priv(netdev);
+       return adapter;
+}
+
+
+static bool ixgbe_thermal_present(struct kobject *kobj)
+{
+       s32 status;
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj);
+
+       if (adapter == NULL)
+               return false;
+
+       status = ixgbe_init_thermal_sensor_thresh_generic(&(adapter->hw));
+       if (status != 0)
+               return false;
+
+       return true;
+}
+
+/*
+ * ixgbe_name_to_idx - Convert the directory name to the sensor offset.
+ * @ c: pointer to the directory name string
+ *
+ * The directory name is in the form "sensor_n" where n is '0' -
+ * 'IXGBE_MAX_SENSORS'.  IXGBE_MAX_SENSORS will never be greater than
+ * 9.  This function takes advantage of that to keep it simple.
+ */
+static int ixgbe_name_to_idx(const char *c)
+{
+       /* find first digit */
+       while (*c < '0' || *c > '9') {
+               if (*c == '\n')
+                       return -1;
+               c++;
+       }
+
+       return ((int)(*c - '0'));
+}
+
+static ssize_t ixgbe_fwbanner(struct kobject *kobj,
+                             struct kobj_attribute *attr, char *buf)
+{
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj);
+
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+       return snprintf(buf, PAGE_SIZE, "%s\n", adapter->eeprom_id);
+}
+
+static ssize_t ixgbe_porttype(struct kobject *kobj,
+                             struct kobj_attribute *attr, char *buf)
+{
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj);
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+       return snprintf(buf, PAGE_SIZE, "%d\n",
+                       test_bit(__IXGBE_DOWN, &adapter->state));
+}
+
+static ssize_t ixgbe_portspeed(struct kobject *kobj,
+                             struct kobj_attribute *attr, char *buf)
+{
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj);
+       int speed = 0;
+
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+       switch (adapter->link_speed) {
+       case IXGBE_LINK_SPEED_100_FULL:
+               speed = 1;
+               break;
+       case IXGBE_LINK_SPEED_1GB_FULL:
+               speed = 10;
+               break;
+       case IXGBE_LINK_SPEED_10GB_FULL:
+               speed = 100;
+               break;
+       }
+       return snprintf(buf, PAGE_SIZE, "%d\n", speed);
+}
+
+static ssize_t ixgbe_wqlflag(struct kobject *kobj,
+                            struct kobj_attribute *attr, char *buf)
+{
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj);
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", adapter->wol);
+}
+
+static ssize_t ixgbe_xflowctl(struct kobject *kobj,
+                             struct kobj_attribute *attr, char *buf)
+{
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj);
+       struct ixgbe_hw *hw;
+
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+       hw = &adapter->hw;
+       if (hw == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", hw->fc.current_mode);
+}
+
+static ssize_t ixgbe_rxdrops(struct kobject *kobj,
+                            struct kobj_attribute *attr, char *buf)
+{
+       struct net_device_stats *net_stats;
+       struct net_device *netdev = ixgbe_get_netdev(kobj);
+       if (netdev == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no net device\n");
+
+       net_stats  = sysfs_get_stats(netdev);
+       if (net_stats == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no net stats\n");
+
+       return snprintf(buf, PAGE_SIZE, "%lu\n",
+                       net_stats->rx_dropped);
+}
+
+static ssize_t ixgbe_rxerrors(struct kobject *kobj,
+                             struct kobj_attribute *attr, char *buf)
+{
+       struct net_device_stats *net_stats;
+       struct net_device *netdev = ixgbe_get_netdev(kobj);
+       if (netdev == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no net device\n");
+
+       net_stats  = sysfs_get_stats(netdev);
+       if (net_stats == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no net stats\n");
+       return snprintf(buf, PAGE_SIZE, "%lu\n", net_stats->rx_errors);
+}
+
+static ssize_t ixgbe_rxupacks(struct kobject *kobj,
+                             struct kobj_attribute *attr, char *buf)
+{
+       struct ixgbe_hw *hw;
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj);
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+       hw = &adapter->hw;
+       if (hw == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", IXGBE_READ_REG(hw, IXGBE_TPR));
+}
+
+static ssize_t ixgbe_rxmpacks(struct kobject *kobj,
+                             struct kobj_attribute *attr, char *buf)
+{
+       struct ixgbe_hw *hw;
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj);
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+       hw = &adapter->hw;
+       if (hw == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", IXGBE_READ_REG(hw, IXGBE_MPRC));
+}
+
+static ssize_t ixgbe_rxbpacks(struct kobject *kobj,
+                             struct kobj_attribute *attr, char *buf)
+{
+       struct ixgbe_hw *hw;
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj);
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+       hw = &adapter->hw;
+       if (hw == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", IXGBE_READ_REG(hw, IXGBE_BPRC));
+}
+
+static ssize_t ixgbe_txupacks(struct kobject *kobj,
+                             struct kobj_attribute *attr, char *buf)
+{
+       struct ixgbe_hw *hw;
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj);
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+       hw = &adapter->hw;
+       if (hw == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", IXGBE_READ_REG(hw, IXGBE_TPT));
+}
+
+static ssize_t ixgbe_txmpacks(struct kobject *kobj,
+                             struct kobj_attribute *attr, char *buf)
+{
+       struct ixgbe_hw *hw;
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj);
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+       hw = &adapter->hw;
+       if (hw == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", IXGBE_READ_REG(hw, IXGBE_MPTC));
+}
+
+static ssize_t ixgbe_txbpacks(struct kobject *kobj,
+                             struct kobj_attribute *attr, char *buf)
+{
+       struct ixgbe_hw *hw;
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj);
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+       hw = &adapter->hw;
+       if (hw == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", IXGBE_READ_REG(hw, IXGBE_BPTC));
+}
+
+static ssize_t ixgbe_txerrors(struct kobject *kobj,
+                             struct kobj_attribute *attr, char *buf)
+{
+       struct net_device_stats *net_stats;
+       struct net_device *netdev = ixgbe_get_netdev(kobj);
+       if (netdev == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no net device\n");
+
+       net_stats  = sysfs_get_stats(netdev);
+       if (net_stats == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no net stats\n");
+
+       return snprintf(buf, PAGE_SIZE, "%lu\n",
+                       net_stats->tx_errors);
+}
+
+static ssize_t ixgbe_txdrops(struct kobject *kobj,
+                            struct kobj_attribute *attr, char *buf)
+{
+       struct net_device_stats *net_stats;
+       struct net_device *netdev = ixgbe_get_netdev(kobj);
+       if (netdev == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no net device\n");
+
+       net_stats  = sysfs_get_stats(netdev);
+       if (net_stats == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no net stats\n");
+       return snprintf(buf, PAGE_SIZE, "%lu\n",
+                       net_stats->tx_dropped);
+}
+
+static ssize_t ixgbe_rxframes(struct kobject *kobj,
+                             struct kobj_attribute *attr, char *buf)
+{
+       struct net_device_stats *net_stats;
+       struct net_device *netdev = ixgbe_get_netdev(kobj);
+       if (netdev == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no net device\n");
+
+       net_stats  = sysfs_get_stats(netdev);
+       if (net_stats == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no net stats\n");
+
+       return snprintf(buf, PAGE_SIZE, "%lu\n",
+                       net_stats->rx_packets);
+}
+
+static ssize_t ixgbe_rxbytes(struct kobject *kobj,
+                            struct kobj_attribute *attr, char *buf)
+{
+       struct net_device_stats *net_stats;
+       struct net_device *netdev = ixgbe_get_netdev(kobj);
+       if (netdev == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no net device\n");
+
+       net_stats  = sysfs_get_stats(netdev);
+       if (net_stats == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no net stats\n");
+
+       return snprintf(buf, PAGE_SIZE, "%lu\n",
+                       net_stats->rx_bytes);
+}
+
+static ssize_t ixgbe_txframes(struct kobject *kobj,
+                             struct kobj_attribute *attr, char *buf)
+{
+       struct net_device_stats *net_stats;
+       struct net_device *netdev = ixgbe_get_netdev(kobj);
+       if (netdev == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no net device\n");
+
+       net_stats  = sysfs_get_stats(netdev);
+       if (net_stats == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no net stats\n");
+
+       return snprintf(buf, PAGE_SIZE, "%lu\n",
+                       net_stats->tx_packets);
+}
+
+static ssize_t ixgbe_txbytes(struct kobject *kobj,
+                            struct kobj_attribute *attr, char *buf)
+{
+       struct net_device_stats *net_stats;
+       struct net_device *netdev = ixgbe_get_netdev(kobj);
+       if (netdev == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no net device\n");
+
+       net_stats  = sysfs_get_stats(netdev);
+       if (net_stats == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no net stats\n");
+
+       return snprintf(buf, PAGE_SIZE, "%lu\n",
+                       net_stats->tx_bytes);
+}
+
+static ssize_t ixgbe_linkstat(struct kobject *kobj,
+                             struct kobj_attribute *attr, char *buf)
+{
+       u32 link_speed;
+       bool link_up = false;
+       int bitmask = 0;
+       struct ixgbe_hw *hw;
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj);
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+       hw = &adapter->hw;
+       if (hw == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
+
+
+       if (test_bit(__IXGBE_DOWN, &adapter->state))
+               bitmask |= 1;
+
+       if (hw->mac.ops.check_link)
+               hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+       else
+               /* always assume link is up, if no check link function */
+               link_up = true;
+       if (link_up)
+               bitmask |= 2;
+       return snprintf(buf, PAGE_SIZE, "0x%X\n", bitmask);
+}
+
+static ssize_t ixgbe_funcid(struct kobject *kobj,
+                            struct kobj_attribute *attr, char *buf)
+{
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj);
+       struct ixgbe_hw *hw;
+
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+       hw = &adapter->hw;
+       if (hw == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
+
+       return snprintf(buf, PAGE_SIZE, "0x%X\n", hw->bus.func);
+}
+
+static ssize_t ixgbe_funcvers(struct kobject *kobj,
+                            struct kobj_attribute *attr, char *buf)
+{
+       return snprintf(buf, PAGE_SIZE, "%s\n", ixgbe_driver_version);
+}
+
+static ssize_t ixgbe_macburn(struct kobject *kobj,
+                            struct kobj_attribute *attr, char *buf)
+{
+       struct ixgbe_hw *hw;
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj);
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+       hw = &adapter->hw;
+       if (hw == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
+
+       return snprintf(buf, PAGE_SIZE, "0x%X%X%X%X%X%X\n",
+                      (unsigned int)hw->mac.perm_addr[0],
+                      (unsigned int)hw->mac.perm_addr[1],
+                      (unsigned int)hw->mac.perm_addr[2],
+                      (unsigned int)hw->mac.perm_addr[3],
+                      (unsigned int)hw->mac.perm_addr[4],
+                      (unsigned int)hw->mac.perm_addr[5]);
+}
+
+static ssize_t ixgbe_macadmn(struct kobject *kobj,
+                            struct kobj_attribute *attr, char *buf)
+{
+       struct ixgbe_hw *hw;
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj);
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+       hw = &adapter->hw;
+       if (hw == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
+
+       return snprintf(buf, PAGE_SIZE, "0x%X%X%X%X%X%X\n",
+                      (unsigned int)hw->mac.addr[0],
+                      (unsigned int)hw->mac.addr[1],
+                      (unsigned int)hw->mac.addr[2],
+                      (unsigned int)hw->mac.addr[3],
+                      (unsigned int)hw->mac.addr[4],
+                      (unsigned int)hw->mac.addr[5]);
+}
+
+static ssize_t ixgbe_maclla1(struct kobject *kobj,
+                            struct kobj_attribute *attr, char *buf)
+{
+       struct ixgbe_hw *hw;
+       u16 eeprom_buff[6];
+       int first_word = 0x37;
+       int word_count = 6;
+       int rc;
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj);
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+       hw = &adapter->hw;
+       if (hw == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
+
+       rc = ixgbe_read_eeprom_buffer(hw, first_word, word_count,
+                                     eeprom_buff);
+       if (rc != 0)
+               return snprintf(buf, PAGE_SIZE, "error: reading buffer\n");
+
+       switch (hw->bus.func) {
+       case 0:
+               return snprintf(buf, PAGE_SIZE, "0x%04X%04X%04X\n",
+                               eeprom_buff[0],
+                               eeprom_buff[1],
+                               eeprom_buff[2]);
+       case 1:
+               return snprintf(buf, PAGE_SIZE, "0x%04X%04X%04X\n",
+                               eeprom_buff[3],
+                               eeprom_buff[4],
+                               eeprom_buff[5]);
+       }
+       return snprintf(buf, PAGE_SIZE, "unexpected port %d\n", hw->bus.func);
+}
+
+static ssize_t ixgbe_mtusize(struct kobject *kobj,
+                            struct kobj_attribute *attr, char *buf)
+{
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj);
+       struct net_device *netdev = ixgbe_get_netdev(kobj);
+       if (netdev == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no net device\n");
+
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", netdev->mtu);
+}
+
+static ssize_t ixgbe_featflag(struct kobject *kobj,
+                             struct kobj_attribute *attr, char *buf)
+{
+       int bitmask = 0;
+#ifndef HAVE_NDO_SET_FEATURES
+       struct ixgbe_ring *ring;
+#endif
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj);
+       struct net_device *netdev = ixgbe_get_netdev(kobj);
+       if (netdev == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no net device\n");
+
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+#ifndef HAVE_NDO_SET_FEATURES
+       /* ixgbe_get_rx_csum(netdev) doesn't compile so hard code */
+       ring = adapter->rx_ring[0];
+       bitmask = test_bit(__IXGBE_RX_CSUM_ENABLED, &ring->state);
+       return snprintf(buf, PAGE_SIZE, "%d\n", bitmask);
+#else
+       if (netdev->features & NETIF_F_RXCSUM)
+               bitmask |= 1;
+       return snprintf(buf, PAGE_SIZE, "%d\n", bitmask);
+#endif
+}
+
+static ssize_t ixgbe_lsominct(struct kobject *kobj,
+                             struct kobj_attribute *attr, char *buf)
+{
+       return snprintf(buf, PAGE_SIZE, "%d\n", 1);
+}
+
+static ssize_t ixgbe_prommode(struct kobject *kobj,
+                             struct kobj_attribute *attr, char *buf)
+{
+       struct net_device *netdev = ixgbe_get_netdev(kobj);
+       if (netdev == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no net device\n");
+
+       return snprintf(buf, PAGE_SIZE, "%d\n",
+                       netdev->flags & IFF_PROMISC);
+}
+
+static ssize_t ixgbe_txdscqsz(struct kobject *kobj,
+                             struct kobj_attribute *attr, char *buf)
+{
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj);
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", adapter->tx_ring[0]->count);
+}
+
+static ssize_t ixgbe_rxdscqsz(struct kobject *kobj,
+                             struct kobj_attribute *attr, char *buf)
+{
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj);
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", adapter->rx_ring[0]->count);
+}
+
+static ssize_t ixgbe_rxqavg(struct kobject *kobj,
+                           struct kobj_attribute *attr, char *buf)
+{
+       int index;
+       int diff = 0;
+       u16 ntc;
+       u16 ntu;
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj);
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+       for (index = 0; index < adapter->num_rx_queues; index++) {
+               ntc = adapter->rx_ring[index]->next_to_clean;
+               ntu = adapter->rx_ring[index]->next_to_use;
+
+               if (ntc >= ntu)
+                       diff += (ntc - ntu);
+               else
+                       diff += (adapter->rx_ring[index]->count - ntu + ntc);
+       }
+       if (adapter->num_rx_queues <= 0)
+               return snprintf(buf, PAGE_SIZE,
+                               "can't calculate, number of queues %d\n",
+                               adapter->num_rx_queues);
+       return snprintf(buf, PAGE_SIZE, "%d\n", diff/adapter->num_rx_queues);
+}
+
+static ssize_t ixgbe_txqavg(struct kobject *kobj,
+                           struct kobj_attribute *attr, char *buf)
+{
+       int index;
+       int diff = 0;
+       u16 ntc;
+       u16 ntu;
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj);
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+       for (index = 0; index < adapter->num_tx_queues; index++) {
+               ntc = adapter->tx_ring[index]->next_to_clean;
+               ntu = adapter->tx_ring[index]->next_to_use;
+
+               if (ntc >= ntu)
+                       diff += (ntc - ntu);
+               else
+                       diff += (adapter->tx_ring[index]->count - ntu + ntc);
+       }
+       if (adapter->num_tx_queues <= 0)
+               return snprintf(buf, PAGE_SIZE,
+                               "can't calculate, number of queues %d\n",
+                               adapter->num_tx_queues);
+       return snprintf(buf, PAGE_SIZE, "%d\n",
+                       diff/adapter->num_tx_queues);
+}
+
+static ssize_t ixgbe_iovotype(struct kobject *kobj,
+                             struct kobj_attribute *attr, char *buf)
+{
+       return snprintf(buf, PAGE_SIZE, "2\n");
+}
+
+static ssize_t ixgbe_funcnbr(struct kobject *kobj,
+                            struct kobj_attribute *attr, char *buf)
+{
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj);
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", adapter->num_vfs);
+}
+
+static ssize_t ixgbe_pciebnbr(struct kobject *kobj,
+                            struct kobj_attribute *attr, char *buf)
+{
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj);
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", adapter->pdev->bus->number);
+}
+
+static s32 ixgbe_sysfs_get_thermal_data(struct kobject *kobj, char *buf)
+{
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj->parent);
+       s32 status;
+
+       if (adapter == NULL) {
+               snprintf(buf, PAGE_SIZE, "error: missing adapter\n");
+               return 0;
+       }
+
+       if (&adapter->hw == NULL) {
+               snprintf(buf, PAGE_SIZE, "error: missing hw\n");
+               return 0;
+       }
+
+       status = ixgbe_get_thermal_sensor_data_generic(&adapter->hw);
+
+       return status;
+}
+
+static ssize_t ixgbe_sysfs_location(struct kobject *kobj,
+                                   struct kobj_attribute *attr, char *buf)
+{
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj->parent);
+       int idx;
+
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+       idx = ixgbe_name_to_idx(kobj->name);
+       if (idx == -1)
+               return snprintf(buf, PAGE_SIZE,
+                               "error: invalid sensor name %s\n", kobj->name);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n",
+               adapter->hw.mac.thermal_sensor_data.sensor[idx].location);
+}
+
+static ssize_t ixgbe_sysfs_temp(struct kobject *kobj,
+                               struct kobj_attribute *attr, char *buf)
+{
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj->parent);
+       int idx;
+
+       s32 status = ixgbe_sysfs_get_thermal_data(kobj, buf);
+
+       if (status != 0)
+               return snprintf(buf, PAGE_SIZE, "error: status %d returned",
+                               status);
+
+       idx = ixgbe_name_to_idx(kobj->name);
+       if (idx == -1)
+               return snprintf(buf, PAGE_SIZE,
+                               "error: invalid sensor name %s\n", kobj->name);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n",
+               adapter->hw.mac.thermal_sensor_data.sensor[idx].temp);
+}
+
+static ssize_t ixgbe_sysfs_maxopthresh(struct kobject *kobj,
+                                      struct kobj_attribute *attr, char *buf)
+{
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj->parent);
+       int idx;
+
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+       idx = ixgbe_name_to_idx(kobj->name);
+       if (idx == -1)
+               return snprintf(buf, PAGE_SIZE,
+                               "error: invalid sensor name %s\n", kobj->name);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n",
+               adapter->hw.mac.thermal_sensor_data.sensor[idx].max_op_thresh);
+}
+
+static ssize_t ixgbe_sysfs_cautionthresh(struct kobject *kobj,
+                                        struct kobj_attribute *attr, char *buf)
+{
+       struct ixgbe_adapter *adapter = ixgbe_get_adapter(kobj->parent);
+       int idx;
+
+       if (adapter == NULL)
+               return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
+
+       idx = ixgbe_name_to_idx(kobj->name);
+       if (idx == -1)
+               return snprintf(buf, PAGE_SIZE,
+                               "error: invalid sensor name %s\n", kobj->name);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n",
+               adapter->hw.mac.thermal_sensor_data.sensor[idx].caution_thresh);
+}
+
+/* Initialize the attributes */
+
+static struct kobj_attribute ixgbe_sysfs_location_attr =
+       __ATTR(location, 0444, ixgbe_sysfs_location, NULL);
+static struct kobj_attribute ixgbe_sysfs_temp_attr =
+       __ATTR(temp, 0444, ixgbe_sysfs_temp, NULL);
+static struct kobj_attribute ixgbe_sysfs_cautionthresh_attr =
+       __ATTR(cautionthresh, 0444, ixgbe_sysfs_cautionthresh, NULL);
+static struct kobj_attribute ixgbe_sysfs_maxopthresh_attr =
+       __ATTR(maxopthresh, 0444, ixgbe_sysfs_maxopthresh, NULL);
+
+static struct kobj_attribute ixgbe_sysfs_fwbanner_attr =
+       __ATTR(fwbanner, 0444, ixgbe_fwbanner, NULL);
+static struct kobj_attribute ixgbe_sysfs_porttype_attr =
+       __ATTR(porttype, 0444, ixgbe_porttype, NULL);
+static struct kobj_attribute ixgbe_sysfs_portspeed_attr =
+       __ATTR(portspeed, 0444, ixgbe_portspeed, NULL);
+static struct kobj_attribute ixgbe_sysfs_wqlflag_attr =
+       __ATTR(wqlflag, 0444, ixgbe_wqlflag, NULL);
+static struct kobj_attribute ixgbe_sysfs_xflowctl_attr =
+       __ATTR(xflowctl, 0444, ixgbe_xflowctl, NULL);
+static struct kobj_attribute ixgbe_sysfs_rxdrops_attr =
+       __ATTR(rxdrops, 0444, ixgbe_rxdrops, NULL);
+static struct kobj_attribute ixgbe_sysfs_rxerrors_attr =
+       __ATTR(rxerrors, 0444, ixgbe_rxerrors, NULL);
+static struct kobj_attribute ixgbe_sysfs_rxupacks_attr =
+       __ATTR(rxupacks, 0444, ixgbe_rxupacks, NULL);
+static struct kobj_attribute ixgbe_sysfs_rxmpacks_attr =
+       __ATTR(rxmpacks, 0444, ixgbe_rxmpacks, NULL);
+static struct kobj_attribute ixgbe_sysfs_rxbpacks_attr =
+       __ATTR(rxbpacks, 0444, ixgbe_rxbpacks, NULL);
+static struct kobj_attribute ixgbe_sysfs_txupacks_attr =
+       __ATTR(txupacks, 0444, ixgbe_txupacks, NULL);
+static struct kobj_attribute ixgbe_sysfs_txmpacks_attr =
+       __ATTR(txmpacks, 0444, ixgbe_txmpacks, NULL);
+static struct kobj_attribute ixgbe_sysfs_txbpacks_attr =
+       __ATTR(txbpacks, 0444, ixgbe_txbpacks, NULL);
+static struct kobj_attribute ixgbe_sysfs_txerrors_attr =
+       __ATTR(txerrors, 0444, ixgbe_txerrors, NULL);
+static struct kobj_attribute ixgbe_sysfs_txdrops_attr =
+       __ATTR(txdrops, 0444, ixgbe_txdrops, NULL);
+static struct kobj_attribute ixgbe_sysfs_rxframes_attr =
+       __ATTR(rxframes, 0444, ixgbe_rxframes, NULL);
+static struct kobj_attribute ixgbe_sysfs_rxbytes_attr =
+       __ATTR(rxbytes, 0444, ixgbe_rxbytes, NULL);
+static struct kobj_attribute ixgbe_sysfs_txframes_attr =
+       __ATTR(txframes, 0444, ixgbe_txframes, NULL);
+static struct kobj_attribute ixgbe_sysfs_txbytes_attr =
+       __ATTR(txbytes, 0444, ixgbe_txbytes, NULL);
+static struct kobj_attribute ixgbe_sysfs_linkstat_attr =
+       __ATTR(linkstat, 0444, ixgbe_linkstat, NULL);
+static struct kobj_attribute ixgbe_sysfs_funcid_attr =
+       __ATTR(funcid, 0444, ixgbe_funcid, NULL);
+static struct kobj_attribute ixgbe_sysfs_funvers_attr =
+       __ATTR(funcvers, 0444, ixgbe_funcvers, NULL);
+static struct kobj_attribute ixgbe_sysfs_macburn_attr =
+       __ATTR(macburn, 0444, ixgbe_macburn, NULL);
+static struct kobj_attribute ixgbe_sysfs_macadmn_attr =
+       __ATTR(macadmn, 0444, ixgbe_macadmn, NULL);
+static struct kobj_attribute ixgbe_sysfs_maclla1_attr =
+       __ATTR(maclla1, 0444, ixgbe_maclla1, NULL);
+static struct kobj_attribute ixgbe_sysfs_mtusize_attr =
+       __ATTR(mtusize, 0444, ixgbe_mtusize, NULL);
+static struct kobj_attribute ixgbe_sysfs_featflag_attr =
+       __ATTR(featflag, 0444, ixgbe_featflag, NULL);
+static struct kobj_attribute ixgbe_sysfs_lsominct_attr =
+       __ATTR(lsominct, 0444, ixgbe_lsominct, NULL);
+static struct kobj_attribute ixgbe_sysfs_prommode_attr =
+       __ATTR(prommode, 0444, ixgbe_prommode, NULL);
+static struct kobj_attribute ixgbe_sysfs_txdscqsz_attr =
+       __ATTR(txdscqsz, 0444, ixgbe_txdscqsz, NULL);
+static struct kobj_attribute ixgbe_sysfs_rxdscqsz_attr =
+       __ATTR(rxdscqsz, 0444, ixgbe_rxdscqsz, NULL);
+static struct kobj_attribute ixgbe_sysfs_txqavg_attr =
+       __ATTR(txqavg, 0444, ixgbe_txqavg, NULL);
+static struct kobj_attribute ixgbe_sysfs_rxqavg_attr =
+       __ATTR(rxqavg, 0444, ixgbe_rxqavg, NULL);
+static struct kobj_attribute ixgbe_sysfs_iovotype_attr =
+       __ATTR(iovotype, 0444, ixgbe_iovotype, NULL);
+static struct kobj_attribute ixgbe_sysfs_funcnbr_attr =
+       __ATTR(funcnbr, 0444, ixgbe_funcnbr, NULL);
+static struct kobj_attribute ixgbe_sysfs_pciebnbr_attr =
+       __ATTR(pciebnbr, 0444, ixgbe_pciebnbr, NULL);
+
+/* Add the attributes into an array, to be added to a group */
+
+static struct attribute *therm_attrs[] = {
+       &ixgbe_sysfs_location_attr.attr,
+       &ixgbe_sysfs_temp_attr.attr,
+       &ixgbe_sysfs_cautionthresh_attr.attr,
+       &ixgbe_sysfs_maxopthresh_attr.attr,
+       NULL
+};
+
+static struct attribute *attrs[] = {
+       &ixgbe_sysfs_fwbanner_attr.attr,
+       &ixgbe_sysfs_porttype_attr.attr,
+       &ixgbe_sysfs_portspeed_attr.attr,
+       &ixgbe_sysfs_wqlflag_attr.attr,
+       &ixgbe_sysfs_xflowctl_attr.attr,
+       &ixgbe_sysfs_rxdrops_attr.attr,
+       &ixgbe_sysfs_rxerrors_attr.attr,
+       &ixgbe_sysfs_rxupacks_attr.attr,
+       &ixgbe_sysfs_rxmpacks_attr.attr,
+       &ixgbe_sysfs_rxbpacks_attr.attr,
+       &ixgbe_sysfs_txdrops_attr.attr,
+       &ixgbe_sysfs_txerrors_attr.attr,
+       &ixgbe_sysfs_txupacks_attr.attr,
+       &ixgbe_sysfs_txmpacks_attr.attr,
+       &ixgbe_sysfs_txbpacks_attr.attr,
+       &ixgbe_sysfs_rxframes_attr.attr,
+       &ixgbe_sysfs_rxbytes_attr.attr,
+       &ixgbe_sysfs_txframes_attr.attr,
+       &ixgbe_sysfs_txbytes_attr.attr,
+       &ixgbe_sysfs_linkstat_attr.attr,
+       &ixgbe_sysfs_funcid_attr.attr,
+       &ixgbe_sysfs_funvers_attr.attr,
+       &ixgbe_sysfs_macburn_attr.attr,
+       &ixgbe_sysfs_macadmn_attr.attr,
+       &ixgbe_sysfs_maclla1_attr.attr,
+       &ixgbe_sysfs_mtusize_attr.attr,
+       &ixgbe_sysfs_featflag_attr.attr,
+       &ixgbe_sysfs_lsominct_attr.attr,
+       &ixgbe_sysfs_prommode_attr.attr,
+       &ixgbe_sysfs_txdscqsz_attr.attr,
+       &ixgbe_sysfs_rxdscqsz_attr.attr,
+       &ixgbe_sysfs_txqavg_attr.attr,
+       &ixgbe_sysfs_rxqavg_attr.attr,
+       &ixgbe_sysfs_iovotype_attr.attr,
+       &ixgbe_sysfs_funcnbr_attr.attr,
+       &ixgbe_sysfs_pciebnbr_attr.attr,
+       NULL
+};
+
+/* add attributes to a group */
+
+static struct attribute_group therm_attr_group = {
+       .attrs = therm_attrs,
+};
+
+/* add attributes to a group */
+static struct attribute_group attr_group = {
+       .attrs = attrs,
+};
+
+static void ixgbe_del_adapter(struct ixgbe_adapter *adapter)
+{
+       int i;
+
+       if (adapter == NULL)
+               return;
+
+       for (i = 0; i < IXGBE_MAX_SENSORS; i++) {
+               if (adapter->therm_kobj[i] == NULL)
+                       continue;
+               sysfs_remove_group(adapter->therm_kobj[i], &therm_attr_group);
+               kobject_put(adapter->therm_kobj[i]);
+       }
+       if (adapter->info_kobj != NULL) {
+               sysfs_remove_group(adapter->info_kobj, &attr_group);
+               kobject_put(adapter->info_kobj);
+       }
+}
+
+/* called from ixgbe_main.c */
+void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter)
+{
+       ixgbe_del_adapter(adapter);
+}
+
+/* called from ixgbe_main.c */
+int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
+{
+       struct net_device *netdev;
+       int rc = 0;
+       int i;
+
+       if (adapter == NULL)
+               goto err;
+       netdev = adapter->netdev;
+       if (netdev == NULL)
+               goto err;
+
+       adapter->info_kobj = NULL;
+       for (i = 0; i < IXGBE_MAX_SENSORS; i++)
+               adapter->therm_kobj[i] = NULL;
+
+       /* create info kobj and attribute listings in kobj */
+       adapter->info_kobj = kobject_create_and_add("info",
+                                       &(netdev->dev.kobj));
+       if (adapter->info_kobj == NULL)
+               goto err;
+       if (sysfs_create_group(adapter->info_kobj, &attr_group))
+               goto err;
+
+       /* Don't create thermal subkobjs if no data present */
+       if (ixgbe_thermal_present(adapter->info_kobj) != true)
+               goto exit;
+
+       for (i = 0; i < IXGBE_MAX_SENSORS; i++) {
+
+               char buf[16];
+
+               /*
+                * Likewise only create individual kobjs that have
+                * meaningful data.
+                */
+               if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0)
+                       continue;
+
+               /* directory named after sensor offset */
+               snprintf(buf, sizeof(buf), "sensor_%d", i);
+               adapter->therm_kobj[i] =
+                       kobject_create_and_add(buf, adapter->info_kobj);
+               if (adapter->therm_kobj[i] == NULL)
+                       goto err;
+               if (sysfs_create_group(adapter->therm_kobj[i],
+                                      &therm_attr_group))
+                       goto err;
+       }
+
+       goto exit;
+
+err:
+       ixgbe_del_adapter(adapter);
+       rc = -1;
+exit:
+       return rc;
+}
+
+#endif /* IXGBE_SYSFS */
index 8636e8344fc943bbafe10eae48be83eff760f6cc..6e8b2fa33d4c8bbe6351a424dc2b537be7cfa127 100644 (file)
 #ifndef _IXGBE_TYPE_H_
 #define _IXGBE_TYPE_H_
 
-#include <linux/types.h>
-#include <linux/mdio.h>
-#include <linux/netdevice.h>
+#include "ixgbe_osdep.h"
+
 
 /* Vendor ID */
-#define IXGBE_INTEL_VENDOR_ID   0x8086
+#define IXGBE_INTEL_VENDOR_ID                  0x8086
 
 /* Device IDs */
-#define IXGBE_DEV_ID_82598               0x10B6
-#define IXGBE_DEV_ID_82598_BX            0x1508
-#define IXGBE_DEV_ID_82598AF_DUAL_PORT   0x10C6
-#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
-#define IXGBE_DEV_ID_82598EB_SFP_LOM     0x10DB
-#define IXGBE_DEV_ID_82598AT             0x10C8
-#define IXGBE_DEV_ID_82598AT2            0x150B
-#define IXGBE_DEV_ID_82598EB_CX4         0x10DD
-#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
-#define IXGBE_DEV_ID_82598_DA_DUAL_PORT  0x10F1
-#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM      0x10E1
-#define IXGBE_DEV_ID_82598EB_XF_LR       0x10F4
-#define IXGBE_DEV_ID_82599_KX4           0x10F7
-#define IXGBE_DEV_ID_82599_KX4_MEZZ      0x1514
-#define IXGBE_DEV_ID_82599_KR            0x1517
-#define IXGBE_DEV_ID_82599_T3_LOM        0x151C
-#define IXGBE_DEV_ID_82599_CX4           0x10F9
-#define IXGBE_DEV_ID_82599_SFP           0x10FB
-#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE       0x152a
-#define IXGBE_DEV_ID_82599_SFP_FCOE      0x1529
-#define IXGBE_SUBDEV_ID_82599_SFP        0x11A9
-#define IXGBE_SUBDEV_ID_82599_560FLR     0x17D0
-#define IXGBE_DEV_ID_82599_SFP_EM        0x1507
-#define IXGBE_DEV_ID_82599_SFP_SF2       0x154D
-#define IXGBE_DEV_ID_82599EN_SFP         0x1557
-#define IXGBE_DEV_ID_82599_XAUI_LOM      0x10FC
-#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
-#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ  0x000C
-#define IXGBE_DEV_ID_82599_LS            0x154F
-#define IXGBE_DEV_ID_X540T               0x1528
-#define IXGBE_DEV_ID_82599_SFP_SF_QP     0x154A
-
-/* VF Device IDs */
-#define IXGBE_DEV_ID_82599_VF           0x10ED
-#define IXGBE_DEV_ID_X540_VF            0x1515
+#define IXGBE_DEV_ID_82598                     0x10B6
+#define IXGBE_DEV_ID_82598_BX                  0x1508
+#define IXGBE_DEV_ID_82598AF_DUAL_PORT         0x10C6
+#define IXGBE_DEV_ID_82598AF_SINGLE_PORT       0x10C7
+#define IXGBE_DEV_ID_82598AT                   0x10C8
+#define IXGBE_DEV_ID_82598AT2                  0x150B
+#define IXGBE_DEV_ID_82598EB_SFP_LOM           0x10DB
+#define IXGBE_DEV_ID_82598EB_CX4               0x10DD
+#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT       0x10EC
+#define IXGBE_DEV_ID_82598_DA_DUAL_PORT                0x10F1
+#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM     0x10E1
+#define IXGBE_DEV_ID_82598EB_XF_LR             0x10F4
+#define IXGBE_DEV_ID_82599_KX4                 0x10F7
+#define IXGBE_DEV_ID_82599_KX4_MEZZ            0x1514
+#define IXGBE_DEV_ID_82599_KR                  0x1517
+#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE     0x10F8
+#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ      0x000C
+#define IXGBE_DEV_ID_82599_CX4                 0x10F9
+#define IXGBE_DEV_ID_82599_SFP                 0x10FB
+#define IXGBE_SUBDEV_ID_82599_SFP              0x11A9
+#define IXGBE_SUBDEV_ID_82599_560FLR           0x17D0
+#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE      0x152A
+#define IXGBE_DEV_ID_82599_SFP_FCOE            0x1529
+#define IXGBE_DEV_ID_82599_SFP_EM              0x1507
+#define IXGBE_DEV_ID_82599_SFP_SF2             0x154D
+#define IXGBE_DEV_ID_82599_SFP_SF_QP           0x154A
+#define IXGBE_DEV_ID_82599EN_SFP               0x1557
+#define IXGBE_DEV_ID_82599_XAUI_LOM            0x10FC
+#define IXGBE_DEV_ID_82599_T3_LOM              0x151C
+#define IXGBE_DEV_ID_82599_LS                  0x154F
+#define IXGBE_DEV_ID_X540T                     0x1528
 
 /* General Registers */
-#define IXGBE_CTRL      0x00000
-#define IXGBE_STATUS    0x00008
-#define IXGBE_CTRL_EXT  0x00018
-#define IXGBE_ESDP      0x00020
-#define IXGBE_EODSDP    0x00028
-#define IXGBE_I2CCTL    0x00028
-#define IXGBE_LEDCTL    0x00200
-#define IXGBE_FRTIMER   0x00048
-#define IXGBE_TCPTIMER  0x0004C
-#define IXGBE_CORESPARE 0x00600
-#define IXGBE_EXVET     0x05078
+#define IXGBE_CTRL             0x00000
+#define IXGBE_STATUS           0x00008
+#define IXGBE_CTRL_EXT         0x00018
+#define IXGBE_ESDP             0x00020
+#define IXGBE_EODSDP           0x00028
+#define IXGBE_I2CCTL           0x00028
+#define IXGBE_PHY_GPIO         0x00028
+#define IXGBE_MAC_GPIO         0x00030
+#define IXGBE_PHYINT_STATUS0   0x00100
+#define IXGBE_PHYINT_STATUS1   0x00104
+#define IXGBE_PHYINT_STATUS2   0x00108
+#define IXGBE_LEDCTL           0x00200
+#define IXGBE_FRTIMER          0x00048
+#define IXGBE_TCPTIMER         0x0004C
+#define IXGBE_CORESPARE                0x00600
+#define IXGBE_EXVET            0x05078
 
 /* NVM Registers */
-#define IXGBE_EEC       0x10010
-#define IXGBE_EERD      0x10014
-#define IXGBE_EEWR      0x10018
-#define IXGBE_FLA       0x1001C
-#define IXGBE_EEMNGCTL  0x10110
-#define IXGBE_EEMNGDATA 0x10114
-#define IXGBE_FLMNGCTL  0x10118
-#define IXGBE_FLMNGDATA 0x1011C
-#define IXGBE_FLMNGCNT  0x10120
-#define IXGBE_FLOP      0x1013C
-#define IXGBE_GRC       0x10200
+#define IXGBE_EEC      0x10010
+#define IXGBE_EERD     0x10014
+#define IXGBE_EEWR     0x10018
+#define IXGBE_FLA      0x1001C
+#define IXGBE_EEMNGCTL 0x10110
+#define IXGBE_EEMNGDATA        0x10114
+#define IXGBE_FLMNGCTL 0x10118
+#define IXGBE_FLMNGDATA        0x1011C
+#define IXGBE_FLMNGCNT 0x10120
+#define IXGBE_FLOP     0x1013C
+#define IXGBE_GRC      0x10200
+#define IXGBE_SRAMREL  0x10210
+#define IXGBE_PHYDBG   0x10218
 
 /* General Receive Control */
-#define IXGBE_GRC_MNG  0x00000001 /* Manageability Enable */
-#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
+#define IXGBE_GRC_MNG  0x00000001 /* Manageability Enable */
+#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
 
-#define IXGBE_VPDDIAG0  0x10204
-#define IXGBE_VPDDIAG1  0x10208
+#define IXGBE_VPDDIAG0 0x10204
+#define IXGBE_VPDDIAG1 0x10208
 
 /* I2CCTL Bit Masks */
-#define IXGBE_I2C_CLK_IN    0x00000001
-#define IXGBE_I2C_CLK_OUT   0x00000002
-#define IXGBE_I2C_DATA_IN   0x00000004
-#define IXGBE_I2C_DATA_OUT  0x00000008
+#define IXGBE_I2C_CLK_IN       0x00000001
+#define IXGBE_I2C_CLK_OUT      0x00000002
+#define IXGBE_I2C_DATA_IN      0x00000004
+#define IXGBE_I2C_DATA_OUT     0x00000008
+#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT     500
+
+#define IXGBE_I2C_THERMAL_SENSOR_ADDR  0xF8
+#define IXGBE_EMC_INTERNAL_DATA                0x00
+#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20
+#define IXGBE_EMC_DIODE1_DATA          0x01
+#define IXGBE_EMC_DIODE1_THERM_LIMIT   0x19
+#define IXGBE_EMC_DIODE2_DATA          0x23
+#define IXGBE_EMC_DIODE2_THERM_LIMIT   0x1A
+
+#define IXGBE_MAX_SENSORS              3
+
+struct ixgbe_thermal_diode_data {
+       u8 location;
+       u8 temp;
+       u8 caution_thresh;
+       u8 max_op_thresh;
+};
+
+struct ixgbe_thermal_sensor_data {
+       struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS];
+};
 
 /* Interrupt Registers */
-#define IXGBE_EICR      0x00800
-#define IXGBE_EICS      0x00808
-#define IXGBE_EIMS      0x00880
-#define IXGBE_EIMC      0x00888
-#define IXGBE_EIAC      0x00810
-#define IXGBE_EIAM      0x00890
-#define IXGBE_EICS_EX(_i)   (0x00A90 + (_i) * 4)
-#define IXGBE_EIMS_EX(_i)   (0x00AA0 + (_i) * 4)
-#define IXGBE_EIMC_EX(_i)   (0x00AB0 + (_i) * 4)
-#define IXGBE_EIAM_EX(_i)   (0x00AD0 + (_i) * 4)
+#define IXGBE_EICR             0x00800
+#define IXGBE_EICS             0x00808
+#define IXGBE_EIMS             0x00880
+#define IXGBE_EIMC             0x00888
+#define IXGBE_EIAC             0x00810
+#define IXGBE_EIAM             0x00890
+#define IXGBE_EICS_EX(_i)      (0x00A90 + (_i) * 4)
+#define IXGBE_EIMS_EX(_i)      (0x00AA0 + (_i) * 4)
+#define IXGBE_EIMC_EX(_i)      (0x00AB0 + (_i) * 4)
+#define IXGBE_EIAM_EX(_i)      (0x00AD0 + (_i) * 4)
+/* 82599 EITR is only 12 bits, with the lower 3 always zero */
 /*
  * 82598 EITR is 16 bits but set the limits based on the max
- * supported by all ixgbe hardware.  82599 EITR is only 12 bits,
- * with the lower 3 always zero.
+ * supported by all ixgbe hardware
  */
-#define IXGBE_MAX_INT_RATE 488281
-#define IXGBE_MIN_INT_RATE 956
-#define IXGBE_MAX_EITR     0x00000FF8
-#define IXGBE_MIN_EITR     8
-#define IXGBE_EITR(_i)  (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
-                         (0x012300 + (((_i) - 24) * 4)))
-#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8
-#define IXGBE_EITR_LLI_MOD      0x00008000
-#define IXGBE_EITR_CNT_WDIS     0x80000000
-#define IXGBE_IVAR(_i)  (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
-#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */
-#define IXGBE_EITRSEL   0x00894
-#define IXGBE_MSIXT     0x00000 /* MSI-X Table. 0x0000 - 0x01C */
-#define IXGBE_MSIXPBA   0x02000 /* MSI-X Pending bit array */
-#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4)))
-#define IXGBE_GPIE      0x00898
+#define IXGBE_MAX_INT_RATE     488281
+#define IXGBE_MIN_INT_RATE     956
+#define IXGBE_MAX_EITR         0x00000FF8
+#define IXGBE_MIN_EITR         8
+#define IXGBE_EITR(_i)         (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
+                                (0x012300 + (((_i) - 24) * 4)))
+#define IXGBE_EITR_ITR_INT_MASK        0x00000FF8
+#define IXGBE_EITR_LLI_MOD     0x00008000
+#define IXGBE_EITR_CNT_WDIS    0x80000000
+#define IXGBE_IVAR(_i)         (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
+#define IXGBE_IVAR_MISC                0x00A00 /* misc MSI-X interrupt causes */
+#define IXGBE_EITRSEL          0x00894
+#define IXGBE_MSIXT            0x00000 /* MSI-X Table. 0x0000 - 0x01C */
+#define IXGBE_MSIXPBA          0x02000 /* MSI-X Pending bit array */
+#define IXGBE_PBACL(_i)        (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4)))
+#define IXGBE_GPIE             0x00898
 
 /* Flow Control Registers */
-#define IXGBE_FCADBUL   0x03210
-#define IXGBE_FCADBUH   0x03214
-#define IXGBE_FCAMACL   0x04328
-#define IXGBE_FCAMACH   0x0432C
-#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_PFCTOP    0x03008
-#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */
-#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */
-#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */
-#define IXGBE_FCRTV     0x032A0
-#define IXGBE_FCCFG     0x03D00
-#define IXGBE_TFCS      0x0CE00
+#define IXGBE_FCADBUL          0x03210
+#define IXGBE_FCADBUH          0x03214
+#define IXGBE_FCAMACL          0x04328
+#define IXGBE_FCAMACH          0x0432C
+#define IXGBE_FCRTH_82599(_i)  (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_FCRTL_82599(_i)  (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_PFCTOP           0x03008
+#define IXGBE_FCTTV(_i)                (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_FCRTL(_i)                (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */
+#define IXGBE_FCRTH(_i)                (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */
+#define IXGBE_FCRTV            0x032A0
+#define IXGBE_FCCFG            0x03D00
+#define IXGBE_TFCS             0x0CE00
 
 /* Receive DMA Registers */
-#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
+#define IXGBE_RDBAL(_i)        (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
                         (0x0D000 + (((_i) - 64) * 0x40)))
-#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
+#define IXGBE_RDBAH(_i)        (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
                         (0x0D004 + (((_i) - 64) * 0x40)))
-#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
+#define IXGBE_RDLEN(_i)        (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
                         (0x0D008 + (((_i) - 64) * 0x40)))
-#define IXGBE_RDH(_i)   (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
+#define IXGBE_RDH(_i)  (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
                         (0x0D010 + (((_i) - 64) * 0x40)))
-#define IXGBE_RDT(_i)   (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
+#define IXGBE_RDT(_i)  (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
                         (0x0D018 + (((_i) - 64) * 0x40)))
-#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
-                        (0x0D028 + (((_i) - 64) * 0x40)))
-#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
-                        (0x0D02C + (((_i) - 64) * 0x40)))
-#define IXGBE_RSCDBU     0x03028
-#define IXGBE_RDDCC      0x02F20
-#define IXGBE_RXMEMWRAP  0x03190
-#define IXGBE_STARCTRL   0x03024
+#define IXGBE_RXDCTL(_i)       (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
+                                (0x0D028 + (((_i) - 64) * 0x40)))
+#define IXGBE_RSCCTL(_i)       (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
+                                (0x0D02C + (((_i) - 64) * 0x40)))
+#define IXGBE_RSCDBU   0x03028
+#define IXGBE_RDDCC    0x02F20
+#define IXGBE_RXMEMWRAP        0x03190
+#define IXGBE_STARCTRL 0x03024
 /*
  * Split and Replication Receive Control Registers
  * 00-15 : 0x02100 + n*4
  * 16-64 : 0x01014 + n*0x40
  * 64-127: 0x0D014 + (n-64)*0x40
  */
-#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
-                          (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
-                         (0x0D014 + (((_i) - 64) * 0x40))))
+#define IXGBE_SRRCTL(_i)       (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
+                                (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
+                                (0x0D014 + (((_i) - 64) * 0x40))))
 /*
  * Rx DCA Control Register:
  * 00-15 : 0x02200 + n*4
  * 16-64 : 0x0100C + n*0x40
  * 64-127: 0x0D00C + (n-64)*0x40
  */
-#define IXGBE_DCA_RXCTRL(_i)    (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
-                                 (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
+#define IXGBE_DCA_RXCTRL(_i)   (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
+                                (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
                                 (0x0D00C + (((_i) - 64) * 0x40))))
-#define IXGBE_RDRXCTL           0x02F00
-#define IXGBE_RXPBSIZE(_i)      (0x03C00 + ((_i) * 4))
-                                             /* 8 of these 0x03C00 - 0x03C1C */
-#define IXGBE_RXCTRL    0x03000
-#define IXGBE_DROPEN    0x03D04
-#define IXGBE_RXPBSIZE_SHIFT 10
+#define IXGBE_RDRXCTL          0x02F00
+#define IXGBE_RDRXCTL_RSC_PUSH 0x80
+/* 8 of these 0x03C00 - 0x03C1C */
+#define IXGBE_RXPBSIZE(_i)     (0x03C00 + ((_i) * 4))
+#define IXGBE_RXCTRL           0x03000
+#define IXGBE_DROPEN           0x03D04
+#define IXGBE_RXPBSIZE_SHIFT   10
 
 /* Receive Registers */
-#define IXGBE_RXCSUM    0x05000
-#define IXGBE_RFCTL     0x05008
-#define IXGBE_DRECCCTL  0x02F08
-#define IXGBE_DRECCCTL_DISABLE 0
+#define IXGBE_RXCSUM           0x05000
+#define IXGBE_RFCTL            0x05008
+#define IXGBE_DRECCCTL         0x02F08
+#define IXGBE_DRECCCTL_DISABLE 0
+#define IXGBE_DRECCCTL2                0x02F8C
+
 /* Multicast Table Array - 128 entries */
-#define IXGBE_MTA(_i)   (0x05200 + ((_i) * 4))
-#define IXGBE_RAL(_i)   (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
-                         (0x0A200 + ((_i) * 8)))
-#define IXGBE_RAH(_i)   (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
-                         (0x0A204 + ((_i) * 8)))
-#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8))
-#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8))
+#define IXGBE_MTA(_i)          (0x05200 + ((_i) * 4))
+#define IXGBE_RAL(_i)          (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+                                (0x0A200 + ((_i) * 8)))
+#define IXGBE_RAH(_i)          (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+                                (0x0A204 + ((_i) * 8)))
+#define IXGBE_MPSAR_LO(_i)     (0x0A600 + ((_i) * 8))
+#define IXGBE_MPSAR_HI(_i)     (0x0A604 + ((_i) * 8))
 /* Packet split receive type */
-#define IXGBE_PSRTYPE(_i)    (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \
-                              (0x0EA00 + ((_i) * 4)))
+#define IXGBE_PSRTYPE(_i)      (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \
+                                (0x0EA00 + ((_i) * 4)))
 /* array of 4096 1-bit vlan filters */
-#define IXGBE_VFTA(_i)  (0x0A000 + ((_i) * 4))
+#define IXGBE_VFTA(_i)         (0x0A000 + ((_i) * 4))
 /*array of 4096 4-bit vlan vmdq indices */
-#define IXGBE_VFTAVIND(_j, _i)  (0x0A200 + ((_j) * 0x200) + ((_i) * 4))
-#define IXGBE_FCTRL     0x05080
-#define IXGBE_VLNCTRL   0x05088
-#define IXGBE_MCSTCTRL  0x05090
-#define IXGBE_MRQC      0x05818
-#define IXGBE_SAQF(_i)  (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */
-#define IXGBE_DAQF(_i)  (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */
-#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */
-#define IXGBE_FTQF(_i)  (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */
-#define IXGBE_ETQF(_i)  (0x05128 + ((_i) * 4)) /* EType Queue Filter */
-#define IXGBE_ETQS(_i)  (0x0EC00 + ((_i) * 4)) /* EType Queue Select */
-#define IXGBE_SYNQF     0x0EC30 /* SYN Packet Queue Filter */
-#define IXGBE_RQTC      0x0EC70
-#define IXGBE_MTQC      0x08120
-#define IXGBE_VLVF(_i)  (0x0F100 + ((_i) * 4))  /* 64 of these (0-63) */
-#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4))  /* 128 of these (0-127) */
-#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4))  /* 64 of these (0-63) */
-#define IXGBE_VT_CTL         0x051B0
-#define IXGBE_PFMAILBOX(_i)  (0x04B00 + (4 * (_i))) /* 64 total */
-#define IXGBE_PFMBMEM(_i)    (0x13000 + (64 * (_i))) /* 64 Mailboxes, 16 DW each */
-#define IXGBE_PFMBICR(_i)    (0x00710 + (4 * (_i))) /* 4 total */
-#define IXGBE_PFMBIMR(_i)    (0x00720 + (4 * (_i))) /* 4 total */
-#define IXGBE_VFRE(_i)       (0x051E0 + ((_i) * 4))
-#define IXGBE_VFTE(_i)       (0x08110 + ((_i) * 4))
-#define IXGBE_VMECM(_i)      (0x08790 + ((_i) * 4))
-#define IXGBE_QDE            0x2F04
-#define IXGBE_VMTXSW(_i)     (0x05180 + ((_i) * 4)) /* 2 total */
-#define IXGBE_VMOLR(_i)      (0x0F000 + ((_i) * 4)) /* 64 total */
-#define IXGBE_UTA(_i)        (0x0F400 + ((_i) * 4))
-#define IXGBE_MRCTL(_i)      (0x0F600 + ((_i) * 4))
-#define IXGBE_VMRVLAN(_i)    (0x0F610 + ((_i) * 4))
-#define IXGBE_VMRVM(_i)      (0x0F630 + ((_i) * 4))
-#define IXGBE_L34T_IMIR(_i)  (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
-#define IXGBE_RXFECCERR0         0x051B8
-#define IXGBE_LLITHRESH 0x0EC90
-#define IXGBE_IMIR(_i)  (0x05A80 + ((_i) * 4))  /* 8 of these (0-7) */
-#define IXGBE_IMIREXT(_i)       (0x05AA0 + ((_i) * 4))  /* 8 of these (0-7) */
-#define IXGBE_IMIRVP    0x05AC0
-#define IXGBE_VMD_CTL   0x0581C
-#define IXGBE_RETA(_i)  (0x05C00 + ((_i) * 4))  /* 32 of these (0-31) */
-#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4))  /* 10 of these (0-9) */
+#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4))
+#define IXGBE_FCTRL            0x05080
+#define IXGBE_VLNCTRL          0x05088
+#define IXGBE_MCSTCTRL         0x05090
+#define IXGBE_MRQC             0x05818
+#define IXGBE_SAQF(_i) (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */
+#define IXGBE_DAQF(_i) (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */
+#define IXGBE_SDPQF(_i)        (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */
+#define IXGBE_FTQF(_i) (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */
+#define IXGBE_ETQF(_i) (0x05128 + ((_i) * 4)) /* EType Queue Filter */
+#define IXGBE_ETQS(_i) (0x0EC00 + ((_i) * 4)) /* EType Queue Select */
+#define IXGBE_SYNQF    0x0EC30 /* SYN Packet Queue Filter */
+#define IXGBE_RQTC     0x0EC70
+#define IXGBE_MTQC     0x08120
+#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4))  /* 64 of these (0-63) */
+#define IXGBE_VLVFB(_i)        (0x0F200 + ((_i) * 4))  /* 128 of these (0-127) */
+#define IXGBE_VMVIR(_i)        (0x08000 + ((_i) * 4))  /* 64 of these (0-63) */
+#define IXGBE_VT_CTL           0x051B0
+#define IXGBE_PFMAILBOX(_i)    (0x04B00 + (4 * (_i))) /* 64 total */
+/* 64 Mailboxes, 16 DW each */
+#define IXGBE_PFMBMEM(_i)      (0x13000 + (64 * (_i)))
+#define IXGBE_PFMBICR(_i)      (0x00710 + (4 * (_i))) /* 4 total */
+#define IXGBE_PFMBIMR(_i)      (0x00720 + (4 * (_i))) /* 4 total */
+#define IXGBE_VFRE(_i)         (0x051E0 + ((_i) * 4))
+#define IXGBE_VFTE(_i)         (0x08110 + ((_i) * 4))
+#define IXGBE_VMECM(_i)                (0x08790 + ((_i) * 4))
+#define IXGBE_QDE              0x2F04
+#define IXGBE_VMTXSW(_i)       (0x05180 + ((_i) * 4)) /* 2 total */
+#define IXGBE_VMOLR(_i)                (0x0F000 + ((_i) * 4)) /* 64 total */
+#define IXGBE_UTA(_i)          (0x0F400 + ((_i) * 4))
+#define IXGBE_MRCTL(_i)                (0x0F600 + ((_i) * 4))
+#define IXGBE_VMRVLAN(_i)      (0x0F610 + ((_i) * 4))
+#define IXGBE_VMRVM(_i)                (0x0F630 + ((_i) * 4))
+#define IXGBE_L34T_IMIR(_i)    (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
+#define IXGBE_RXFECCERR0       0x051B8
+#define IXGBE_LLITHRESH                0x0EC90
+#define IXGBE_IMIR(_i)         (0x05A80 + ((_i) * 4))  /* 8 of these (0-7) */
+#define IXGBE_IMIREXT(_i)      (0x05AA0 + ((_i) * 4))  /* 8 of these (0-7) */
+#define IXGBE_IMIRVP           0x05AC0
+#define IXGBE_VMD_CTL          0x0581C
+#define IXGBE_RETA(_i)         (0x05C00 + ((_i) * 4))  /* 32 of these (0-31) */
+#define IXGBE_RSSRK(_i)                (0x05C80 + ((_i) * 4))  /* 10 of these (0-9) */
 
 /* Flow Director registers */
-#define IXGBE_FDIRCTRL  0x0EE00
-#define IXGBE_FDIRHKEY  0x0EE68
-#define IXGBE_FDIRSKEY  0x0EE6C
-#define IXGBE_FDIRDIP4M 0x0EE3C
-#define IXGBE_FDIRSIP4M 0x0EE40
-#define IXGBE_FDIRTCPM  0x0EE44
-#define IXGBE_FDIRUDPM  0x0EE48
-#define IXGBE_FDIRIP6M  0x0EE74
-#define IXGBE_FDIRM     0x0EE70
+#define IXGBE_FDIRCTRL 0x0EE00
+#define IXGBE_FDIRHKEY 0x0EE68
+#define IXGBE_FDIRSKEY 0x0EE6C
+#define IXGBE_FDIRDIP4M        0x0EE3C
+#define IXGBE_FDIRSIP4M        0x0EE40
+#define IXGBE_FDIRTCPM 0x0EE44
+#define IXGBE_FDIRUDPM 0x0EE48
+#define IXGBE_FDIRIP6M 0x0EE74
+#define IXGBE_FDIRM    0x0EE70
 
 /* Flow Director Stats registers */
-#define IXGBE_FDIRFREE  0x0EE38
-#define IXGBE_FDIRLEN   0x0EE4C
-#define IXGBE_FDIRUSTAT 0x0EE50
-#define IXGBE_FDIRFSTAT 0x0EE54
-#define IXGBE_FDIRMATCH 0x0EE58
-#define IXGBE_FDIRMISS  0x0EE5C
+#define IXGBE_FDIRFREE 0x0EE38
+#define IXGBE_FDIRLEN  0x0EE4C
+#define IXGBE_FDIRUSTAT        0x0EE50
+#define IXGBE_FDIRFSTAT        0x0EE54
+#define IXGBE_FDIRMATCH        0x0EE58
+#define IXGBE_FDIRMISS 0x0EE5C
 
 /* Flow Director Programming registers */
 #define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */
-#define IXGBE_FDIRIPSA      0x0EE18
-#define IXGBE_FDIRIPDA      0x0EE1C
-#define IXGBE_FDIRPORT      0x0EE20
-#define IXGBE_FDIRVLAN      0x0EE24
-#define IXGBE_FDIRHASH      0x0EE28
-#define IXGBE_FDIRCMD       0x0EE2C
+#define IXGBE_FDIRIPSA 0x0EE18
+#define IXGBE_FDIRIPDA 0x0EE1C
+#define IXGBE_FDIRPORT 0x0EE20
+#define IXGBE_FDIRVLAN 0x0EE24
+#define IXGBE_FDIRHASH 0x0EE28
+#define IXGBE_FDIRCMD  0x0EE2C
 
 /* Transmit DMA registers */
-#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/
-#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
-#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40))
-#define IXGBE_TDH(_i)   (0x06010 + ((_i) * 0x40))
-#define IXGBE_TDT(_i)   (0x06018 + ((_i) * 0x40))
-#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40))
-#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40))
-#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
-#define IXGBE_DTXCTL    0x07E00
-
-#define IXGBE_DMATXCTL      0x04A80
-#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */
-#define IXGBE_PFDTXGSWC     0x08220
-#define IXGBE_DTXMXSZRQ     0x08100
-#define IXGBE_DTXTCPFLGL    0x04A88
-#define IXGBE_DTXTCPFLGH    0x04A8C
-#define IXGBE_LBDRPEN       0x0CA00
-#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */
-
-#define IXGBE_DMATXCTL_TE       0x1 /* Transmit Enable */
-#define IXGBE_DMATXCTL_NS       0x2 /* No Snoop LSO hdr buffer */
-#define IXGBE_DMATXCTL_GDV      0x8 /* Global Double VLAN */
-#define IXGBE_DMATXCTL_VT_SHIFT 16  /* VLAN EtherType */
-
-#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
+#define IXGBE_TDBAL(_i)                (0x06000 + ((_i) * 0x40)) /* 32 of them (0-31)*/
+#define IXGBE_TDBAH(_i)                (0x06004 + ((_i) * 0x40))
+#define IXGBE_TDLEN(_i)                (0x06008 + ((_i) * 0x40))
+#define IXGBE_TDH(_i)          (0x06010 + ((_i) * 0x40))
+#define IXGBE_TDT(_i)          (0x06018 + ((_i) * 0x40))
+#define IXGBE_TXDCTL(_i)       (0x06028 + ((_i) * 0x40))
+#define IXGBE_TDWBAL(_i)       (0x06038 + ((_i) * 0x40))
+#define IXGBE_TDWBAH(_i)       (0x0603C + ((_i) * 0x40))
+#define IXGBE_DTXCTL           0x07E00
+
+#define IXGBE_DMATXCTL         0x04A80
+#define IXGBE_PFVFSPOOF(_i)    (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */
+#define IXGBE_PFDTXGSWC                0x08220
+#define IXGBE_DTXMXSZRQ                0x08100
+#define IXGBE_DTXTCPFLGL       0x04A88
+#define IXGBE_DTXTCPFLGH       0x04A8C
+#define IXGBE_LBDRPEN          0x0CA00
+#define IXGBE_TXPBTHRESH(_i)   (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */
+
+#define IXGBE_DMATXCTL_TE      0x1 /* Transmit Enable */
+#define IXGBE_DMATXCTL_NS      0x2 /* No Snoop LSO hdr buffer */
+#define IXGBE_DMATXCTL_GDV     0x8 /* Global Double VLAN */
+#define IXGBE_DMATXCTL_VT_SHIFT        16  /* VLAN EtherType */
+
+#define IXGBE_PFDTXGSWC_VT_LBEN        0x1 /* Local L2 VT switch enable */
 
 /* Anti-spoofing defines */
-#define IXGBE_SPOOF_MACAS_MASK          0xFF
-#define IXGBE_SPOOF_VLANAS_MASK         0xFF00
-#define IXGBE_SPOOF_VLANAS_SHIFT        8
-#define IXGBE_PFVFSPOOF_REG_COUNT       8
-
-#define IXGBE_DCA_TXCTRL(_i)    (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
+#define IXGBE_SPOOF_MACAS_MASK         0xFF
+#define IXGBE_SPOOF_VLANAS_MASK                0xFF00
+#define IXGBE_SPOOF_VLANAS_SHIFT       8
+#define IXGBE_PFVFSPOOF_REG_COUNT      8
+/* 16 of these (0-15) */
+#define IXGBE_DCA_TXCTRL(_i)           (0x07200 + ((_i) * 4))
 /* Tx DCA Control register : 128 of these (0-127) */
-#define IXGBE_DCA_TXCTRL_82599(_i)  (0x0600C + ((_i) * 0x40))
-#define IXGBE_TIPG      0x0CB00
-#define IXGBE_TXPBSIZE(_i)      (0x0CC00 + ((_i) * 4)) /* 8 of these */
-#define IXGBE_MNGTXMAP  0x0CD10
-#define IXGBE_TIPG_FIBER_DEFAULT 3
-#define IXGBE_TXPBSIZE_SHIFT    10
+#define IXGBE_DCA_TXCTRL_82599(_i)     (0x0600C + ((_i) * 0x40))
+#define IXGBE_TIPG                     0x0CB00
+#define IXGBE_TXPBSIZE(_i)             (0x0CC00 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_MNGTXMAP                 0x0CD10
+#define IXGBE_TIPG_FIBER_DEFAULT       3
+#define IXGBE_TXPBSIZE_SHIFT           10
 
 /* Wake up registers */
-#define IXGBE_WUC       0x05800
-#define IXGBE_WUFC      0x05808
-#define IXGBE_WUS       0x05810
-#define IXGBE_IPAV      0x05838
-#define IXGBE_IP4AT     0x05840 /* IPv4 table 0x5840-0x5858 */
-#define IXGBE_IP6AT     0x05880 /* IPv6 table 0x5880-0x588F */
-
-#define IXGBE_WUPL      0x05900
-#define IXGBE_WUPM      0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
-#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */
-#define IXGBE_FHFT_EXT(_n)     (0x09800 + ((_n) * 0x100)) /* Ext Flexible Host
-                                                           * Filter Table */
-
-#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX         4
-#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX     2
+#define IXGBE_WUC      0x05800
+#define IXGBE_WUFC     0x05808
+#define IXGBE_WUS      0x05810
+#define IXGBE_IPAV     0x05838
+#define IXGBE_IP4AT    0x05840 /* IPv4 table 0x5840-0x5858 */
+#define IXGBE_IP6AT    0x05880 /* IPv6 table 0x5880-0x588F */
+
+#define IXGBE_WUPL     0x05900
+#define IXGBE_WUPM     0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
+#define IXGBE_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flex host filter table */
+/* Ext Flexible Host Filter Table */
+#define IXGBE_FHFT_EXT(_n)     (0x09800 + (_n * 0x100))
+
+#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX                4
+#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX    2
 
 /* Each Flexible Filter is at most 128 (0x80) bytes in length */
-#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX  128
-#define IXGBE_FHFT_LENGTH_OFFSET        0xFC  /* Length byte in FHFT */
-#define IXGBE_FHFT_LENGTH_MASK          0x0FF /* Length in lower byte */
+#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX         128
+#define IXGBE_FHFT_LENGTH_OFFSET               0xFC  /* Length byte in FHFT */
+#define IXGBE_FHFT_LENGTH_MASK                 0x0FF /* Length in lower byte */
 
 /* Definitions for power management and wakeup registers */
 /* Wake Up Control */
-#define IXGBE_WUC_PME_EN     0x00000002 /* PME Enable */
-#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */
-#define IXGBE_WUC_WKEN       0x00000010 /* Enable PE_WAKE_N pin assertion  */
+#define IXGBE_WUC_PME_EN       0x00000002 /* PME Enable */
+#define IXGBE_WUC_PME_STATUS   0x00000004 /* PME Status */
+#define IXGBE_WUC_WKEN         0x00000010 /* Enable PE_WAKE_N pin assertion  */
 
 /* Wake Up Filter Control */
-#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
-#define IXGBE_WUFC_MAG  0x00000002 /* Magic Packet Wakeup Enable */
-#define IXGBE_WUFC_EX   0x00000004 /* Directed Exact Wakeup Enable */
-#define IXGBE_WUFC_MC   0x00000008 /* Directed Multicast Wakeup Enable */
-#define IXGBE_WUFC_BC   0x00000010 /* Broadcast Wakeup Enable */
-#define IXGBE_WUFC_ARP  0x00000020 /* ARP Request Packet Wakeup Enable */
-#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
-#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
-#define IXGBE_WUFC_MNG  0x00000100 /* Directed Mgmt Packet Wakeup Enable */
-
-#define IXGBE_WUFC_IGNORE_TCO   0x00008000 /* Ignore WakeOn TCO packets */
-#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
-#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
-#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
-#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
-#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */
-#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
-#define IXGBE_WUFC_FLX_FILTERS     0x000F0000 /* Mask for 4 flex filters */
-#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 /* Mask for Ext. flex filters */
-#define IXGBE_WUFC_ALL_FILTERS     0x003F00FF /* Mask for all wakeup filters */
-#define IXGBE_WUFC_FLX_OFFSET      16 /* Offset to the Flexible Filters bits */
+#define IXGBE_WUFC_LNKC        0x00000001 /* Link Status Change Wakeup Enable */
+#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define IXGBE_WUFC_EX  0x00000004 /* Directed Exact Wakeup Enable */
+#define IXGBE_WUFC_MC  0x00000008 /* Directed Multicast Wakeup Enable */
+#define IXGBE_WUFC_BC  0x00000010 /* Broadcast Wakeup Enable */
+#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+#define IXGBE_WUFC_IPV4        0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define IXGBE_WUFC_IPV6        0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */
+
+#define IXGBE_WUFC_IGNORE_TCO  0x00008000 /* Ignore WakeOn TCO packets */
+#define IXGBE_WUFC_FLX0        0x00010000 /* Flexible Filter 0 Enable */
+#define IXGBE_WUFC_FLX1        0x00020000 /* Flexible Filter 1 Enable */
+#define IXGBE_WUFC_FLX2        0x00040000 /* Flexible Filter 2 Enable */
+#define IXGBE_WUFC_FLX3        0x00080000 /* Flexible Filter 3 Enable */
+#define IXGBE_WUFC_FLX4        0x00100000 /* Flexible Filter 4 Enable */
+#define IXGBE_WUFC_FLX5        0x00200000 /* Flexible Filter 5 Enable */
+#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */
+/* Mask for Ext. flex filters */
+#define IXGBE_WUFC_EXT_FLX_FILTERS     0x00300000
+#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all wakeup filters */
+#define IXGBE_WUFC_FLX_OFFSET  16 /* Offset to the Flexible Filters bits */
 
 /* Wake Up Status */
-#define IXGBE_WUS_LNKC  IXGBE_WUFC_LNKC
-#define IXGBE_WUS_MAG   IXGBE_WUFC_MAG
-#define IXGBE_WUS_EX    IXGBE_WUFC_EX
-#define IXGBE_WUS_MC    IXGBE_WUFC_MC
-#define IXGBE_WUS_BC    IXGBE_WUFC_BC
-#define IXGBE_WUS_ARP   IXGBE_WUFC_ARP
-#define IXGBE_WUS_IPV4  IXGBE_WUFC_IPV4
-#define IXGBE_WUS_IPV6  IXGBE_WUFC_IPV6
-#define IXGBE_WUS_MNG   IXGBE_WUFC_MNG
-#define IXGBE_WUS_FLX0  IXGBE_WUFC_FLX0
-#define IXGBE_WUS_FLX1  IXGBE_WUFC_FLX1
-#define IXGBE_WUS_FLX2  IXGBE_WUFC_FLX2
-#define IXGBE_WUS_FLX3  IXGBE_WUFC_FLX3
-#define IXGBE_WUS_FLX4  IXGBE_WUFC_FLX4
-#define IXGBE_WUS_FLX5  IXGBE_WUFC_FLX5
-#define IXGBE_WUS_FLX_FILTERS  IXGBE_WUFC_FLX_FILTERS
+#define IXGBE_WUS_LNKC         IXGBE_WUFC_LNKC
+#define IXGBE_WUS_MAG          IXGBE_WUFC_MAG
+#define IXGBE_WUS_EX           IXGBE_WUFC_EX
+#define IXGBE_WUS_MC           IXGBE_WUFC_MC
+#define IXGBE_WUS_BC           IXGBE_WUFC_BC
+#define IXGBE_WUS_ARP          IXGBE_WUFC_ARP
+#define IXGBE_WUS_IPV4         IXGBE_WUFC_IPV4
+#define IXGBE_WUS_IPV6         IXGBE_WUFC_IPV6
+#define IXGBE_WUS_MNG          IXGBE_WUFC_MNG
+#define IXGBE_WUS_FLX0         IXGBE_WUFC_FLX0
+#define IXGBE_WUS_FLX1         IXGBE_WUFC_FLX1
+#define IXGBE_WUS_FLX2         IXGBE_WUFC_FLX2
+#define IXGBE_WUS_FLX3         IXGBE_WUFC_FLX3
+#define IXGBE_WUS_FLX4         IXGBE_WUFC_FLX4
+#define IXGBE_WUS_FLX5         IXGBE_WUFC_FLX5
+#define IXGBE_WUS_FLX_FILTERS  IXGBE_WUFC_FLX_FILTERS
 
 /* Wake Up Packet Length */
-#define IXGBE_WUPL_LENGTH_MASK 0xFFFF
+#define IXGBE_WUPL_LENGTH_MASK 0xFFFF
 
 /* DCB registers */
-#define MAX_TRAFFIC_CLASS        8
-#define X540_TRAFFIC_CLASS       4
-#define IXGBE_RMCS      0x03D00
-#define IXGBE_DPMCS     0x07F40
-#define IXGBE_PDPMCS    0x0CD00
-#define IXGBE_RUPPBMR   0x050A0
-#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_TDTQ2TCCR(_i)     (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */
-#define IXGBE_TDTQ2TCSR(_i)     (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */
-#define IXGBE_TDPT2TCCR(_i)     (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_TDPT2TCSR(_i)     (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_DCB_MAX_TRAFFIC_CLASS    8
+#define IXGBE_RMCS             0x03D00
+#define IXGBE_DPMCS            0x07F40
+#define IXGBE_PDPMCS           0x0CD00
+#define IXGBE_RUPPBMR          0x050A0
+#define IXGBE_RT2CR(_i)                (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RT2SR(_i)                (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TDTQ2TCCR(_i)    (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */
+#define IXGBE_TDTQ2TCSR(_i)    (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */
+#define IXGBE_TDPT2TCCR(_i)    (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TDPT2TCSR(_i)    (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
 
 
 /* Security Control Registers */
-#define IXGBE_SECTXCTRL         0x08800
-#define IXGBE_SECTXSTAT         0x08804
-#define IXGBE_SECTXBUFFAF       0x08808
-#define IXGBE_SECTXMINIFG       0x08810
-#define IXGBE_SECRXCTRL         0x08D00
-#define IXGBE_SECRXSTAT         0x08D04
+#define IXGBE_SECTXCTRL                0x08800
+#define IXGBE_SECTXSTAT                0x08804
+#define IXGBE_SECTXBUFFAF      0x08808
+#define IXGBE_SECTXMINIFG      0x08810
+#define IXGBE_SECRXCTRL                0x08D00
+#define IXGBE_SECRXSTAT                0x08D04
 
 /* Security Bit Fields and Masks */
-#define IXGBE_SECTXCTRL_SECTX_DIS       0x00000001
-#define IXGBE_SECTXCTRL_TX_DIS          0x00000002
-#define IXGBE_SECTXCTRL_STORE_FORWARD   0x00000004
+#define IXGBE_SECTXCTRL_SECTX_DIS      0x00000001
+#define IXGBE_SECTXCTRL_TX_DIS         0x00000002
+#define IXGBE_SECTXCTRL_STORE_FORWARD  0x00000004
 
-#define IXGBE_SECTXSTAT_SECTX_RDY       0x00000001
-#define IXGBE_SECTXSTAT_ECC_TXERR       0x00000002
+#define IXGBE_SECTXSTAT_SECTX_RDY      0x00000001
+#define IXGBE_SECTXSTAT_ECC_TXERR      0x00000002
 
-#define IXGBE_SECRXCTRL_SECRX_DIS       0x00000001
-#define IXGBE_SECRXCTRL_RX_DIS          0x00000002
+#define IXGBE_SECRXCTRL_SECRX_DIS      0x00000001
+#define IXGBE_SECRXCTRL_RX_DIS         0x00000002
 
-#define IXGBE_SECRXSTAT_SECRX_RDY       0x00000001
-#define IXGBE_SECRXSTAT_ECC_RXERR       0x00000002
+#define IXGBE_SECRXSTAT_SECRX_RDY      0x00000001
+#define IXGBE_SECRXSTAT_ECC_RXERR      0x00000002
 
 /* LinkSec (MacSec) Registers */
-#define IXGBE_LSECTXCAP         0x08A00
-#define IXGBE_LSECRXCAP         0x08F00
-#define IXGBE_LSECTXCTRL        0x08A04
-#define IXGBE_LSECTXSCL         0x08A08 /* SCI Low */
-#define IXGBE_LSECTXSCH         0x08A0C /* SCI High */
-#define IXGBE_LSECTXSA          0x08A10
-#define IXGBE_LSECTXPN0         0x08A14
-#define IXGBE_LSECTXPN1         0x08A18
-#define IXGBE_LSECTXKEY0(_n)    (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */
-#define IXGBE_LSECTXKEY1(_n)    (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */
-#define IXGBE_LSECRXCTRL        0x08F04
-#define IXGBE_LSECRXSCL         0x08F08
-#define IXGBE_LSECRXSCH         0x08F0C
-#define IXGBE_LSECRXSA(_i)      (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */
-#define IXGBE_LSECRXPN(_i)      (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */
-#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m))))
-#define IXGBE_LSECTXUT          0x08A3C /* OutPktsUntagged */
-#define IXGBE_LSECTXPKTE        0x08A40 /* OutPktsEncrypted */
-#define IXGBE_LSECTXPKTP        0x08A44 /* OutPktsProtected */
-#define IXGBE_LSECTXOCTE        0x08A48 /* OutOctetsEncrypted */
-#define IXGBE_LSECTXOCTP        0x08A4C /* OutOctetsProtected */
-#define IXGBE_LSECRXUT          0x08F40 /* InPktsUntagged/InPktsNoTag */
-#define IXGBE_LSECRXOCTD        0x08F44 /* InOctetsDecrypted */
-#define IXGBE_LSECRXOCTV        0x08F48 /* InOctetsValidated */
-#define IXGBE_LSECRXBAD         0x08F4C /* InPktsBadTag */
-#define IXGBE_LSECRXNOSCI       0x08F50 /* InPktsNoSci */
-#define IXGBE_LSECRXUNSCI       0x08F54 /* InPktsUnknownSci */
-#define IXGBE_LSECRXUNCH        0x08F58 /* InPktsUnchecked */
-#define IXGBE_LSECRXDELAY       0x08F5C /* InPktsDelayed */
-#define IXGBE_LSECRXLATE        0x08F60 /* InPktsLate */
-#define IXGBE_LSECRXOK(_n)      (0x08F64 + (0x04 * (_n))) /* InPktsOk */
-#define IXGBE_LSECRXINV(_n)     (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */
-#define IXGBE_LSECRXNV(_n)      (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */
-#define IXGBE_LSECRXUNSA        0x08F7C /* InPktsUnusedSa */
-#define IXGBE_LSECRXNUSA        0x08F80 /* InPktsNotUsingSa */
+#define IXGBE_LSECTXCAP                0x08A00
+#define IXGBE_LSECRXCAP                0x08F00
+#define IXGBE_LSECTXCTRL       0x08A04
+#define IXGBE_LSECTXSCL                0x08A08 /* SCI Low */
+#define IXGBE_LSECTXSCH                0x08A0C /* SCI High */
+#define IXGBE_LSECTXSA         0x08A10
+#define IXGBE_LSECTXPN0                0x08A14
+#define IXGBE_LSECTXPN1                0x08A18
+#define IXGBE_LSECTXKEY0(_n)   (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */
+#define IXGBE_LSECTXKEY1(_n)   (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */
+#define IXGBE_LSECRXCTRL       0x08F04
+#define IXGBE_LSECRXSCL                0x08F08
+#define IXGBE_LSECRXSCH                0x08F0C
+#define IXGBE_LSECRXSA(_i)     (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */
+#define IXGBE_LSECRXPN(_i)     (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */
+#define IXGBE_LSECRXKEY(_n, _m)        (0x08F20 + ((0x10 * (_n)) + (4 * (_m))))
+#define IXGBE_LSECTXUT         0x08A3C /* OutPktsUntagged */
+#define IXGBE_LSECTXPKTE       0x08A40 /* OutPktsEncrypted */
+#define IXGBE_LSECTXPKTP       0x08A44 /* OutPktsProtected */
+#define IXGBE_LSECTXOCTE       0x08A48 /* OutOctetsEncrypted */
+#define IXGBE_LSECTXOCTP       0x08A4C /* OutOctetsProtected */
+#define IXGBE_LSECRXUT         0x08F40 /* InPktsUntagged/InPktsNoTag */
+#define IXGBE_LSECRXOCTD       0x08F44 /* InOctetsDecrypted */
+#define IXGBE_LSECRXOCTV       0x08F48 /* InOctetsValidated */
+#define IXGBE_LSECRXBAD                0x08F4C /* InPktsBadTag */
+#define IXGBE_LSECRXNOSCI      0x08F50 /* InPktsNoSci */
+#define IXGBE_LSECRXUNSCI      0x08F54 /* InPktsUnknownSci */
+#define IXGBE_LSECRXUNCH       0x08F58 /* InPktsUnchecked */
+#define IXGBE_LSECRXDELAY      0x08F5C /* InPktsDelayed */
+#define IXGBE_LSECRXLATE       0x08F60 /* InPktsLate */
+#define IXGBE_LSECRXOK(_n)     (0x08F64 + (0x04 * (_n))) /* InPktsOk */
+#define IXGBE_LSECRXINV(_n)    (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */
+#define IXGBE_LSECRXNV(_n)     (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */
+#define IXGBE_LSECRXUNSA       0x08F7C /* InPktsUnusedSa */
+#define IXGBE_LSECRXNUSA       0x08F80 /* InPktsNotUsingSa */
 
 /* LinkSec (MacSec) Bit Fields and Masks */
-#define IXGBE_LSECTXCAP_SUM_MASK        0x00FF0000
-#define IXGBE_LSECTXCAP_SUM_SHIFT       16
-#define IXGBE_LSECRXCAP_SUM_MASK        0x00FF0000
-#define IXGBE_LSECRXCAP_SUM_SHIFT       16
-
-#define IXGBE_LSECTXCTRL_EN_MASK        0x00000003
-#define IXGBE_LSECTXCTRL_DISABLE        0x0
-#define IXGBE_LSECTXCTRL_AUTH           0x1
-#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT   0x2
-#define IXGBE_LSECTXCTRL_AISCI          0x00000020
-#define IXGBE_LSECTXCTRL_PNTHRSH_MASK   0xFFFFFF00
-#define IXGBE_LSECTXCTRL_RSV_MASK       0x000000D8
-
-#define IXGBE_LSECRXCTRL_EN_MASK        0x0000000C
-#define IXGBE_LSECRXCTRL_EN_SHIFT       2
-#define IXGBE_LSECRXCTRL_DISABLE        0x0
-#define IXGBE_LSECRXCTRL_CHECK          0x1
-#define IXGBE_LSECRXCTRL_STRICT         0x2
-#define IXGBE_LSECRXCTRL_DROP           0x3
-#define IXGBE_LSECRXCTRL_PLSH           0x00000040
-#define IXGBE_LSECRXCTRL_RP             0x00000080
-#define IXGBE_LSECRXCTRL_RSV_MASK       0xFFFFFF33
+#define IXGBE_LSECTXCAP_SUM_MASK       0x00FF0000
+#define IXGBE_LSECTXCAP_SUM_SHIFT      16
+#define IXGBE_LSECRXCAP_SUM_MASK       0x00FF0000
+#define IXGBE_LSECRXCAP_SUM_SHIFT      16
+
+#define IXGBE_LSECTXCTRL_EN_MASK       0x00000003
+#define IXGBE_LSECTXCTRL_DISABLE       0x0
+#define IXGBE_LSECTXCTRL_AUTH          0x1
+#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT  0x2
+#define IXGBE_LSECTXCTRL_AISCI         0x00000020
+#define IXGBE_LSECTXCTRL_PNTHRSH_MASK  0xFFFFFF00
+#define IXGBE_LSECTXCTRL_RSV_MASK      0x000000D8
+
+#define IXGBE_LSECRXCTRL_EN_MASK       0x0000000C
+#define IXGBE_LSECRXCTRL_EN_SHIFT      2
+#define IXGBE_LSECRXCTRL_DISABLE       0x0
+#define IXGBE_LSECRXCTRL_CHECK         0x1
+#define IXGBE_LSECRXCTRL_STRICT                0x2
+#define IXGBE_LSECRXCTRL_DROP          0x3
+#define IXGBE_LSECRXCTRL_PLSH          0x00000040
+#define IXGBE_LSECRXCTRL_RP            0x00000080
+#define IXGBE_LSECRXCTRL_RSV_MASK      0xFFFFFF33
 
 /* IpSec Registers */
-#define IXGBE_IPSTXIDX          0x08900
-#define IXGBE_IPSTXSALT         0x08904
-#define IXGBE_IPSTXKEY(_i)      (0x08908 + (4 * (_i))) /* 4 of these (0-3) */
-#define IXGBE_IPSRXIDX          0x08E00
-#define IXGBE_IPSRXIPADDR(_i)   (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */
-#define IXGBE_IPSRXSPI          0x08E14
-#define IXGBE_IPSRXIPIDX        0x08E18
-#define IXGBE_IPSRXKEY(_i)      (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */
-#define IXGBE_IPSRXSALT         0x08E2C
-#define IXGBE_IPSRXMOD          0x08E30
-
-#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE    0x4
+#define IXGBE_IPSTXIDX         0x08900
+#define IXGBE_IPSTXSALT                0x08904
+#define IXGBE_IPSTXKEY(_i)     (0x08908 + (4 * (_i))) /* 4 of these (0-3) */
+#define IXGBE_IPSRXIDX         0x08E00
+#define IXGBE_IPSRXIPADDR(_i)  (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */
+#define IXGBE_IPSRXSPI         0x08E14
+#define IXGBE_IPSRXIPIDX       0x08E18
+#define IXGBE_IPSRXKEY(_i)     (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */
+#define IXGBE_IPSRXSALT                0x08E2C
+#define IXGBE_IPSRXMOD         0x08E30
+
+#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE   0x4
 
 /* DCB registers */
-#define IXGBE_RTRPCS      0x02430
-#define IXGBE_RTTDCS      0x04900
-#define IXGBE_RTTDCS_ARBDIS     0x00000040 /* DCB arbiter disable */
-#define IXGBE_RTTPCS      0x0CD00
-#define IXGBE_RTRUP2TC    0x03020
-#define IXGBE_RTTUP2TC    0x0C800
-#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_TXLLQ(_i)   (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */
-#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTTDQSEL    0x04904
-#define IXGBE_RTTDT1C     0x04908
-#define IXGBE_RTTDT1S     0x0490C
-#define IXGBE_RTTDTECC    0x04990
-#define IXGBE_RTTDTECC_NO_BCN   0x00000100
-#define IXGBE_RTTBCNRC    0x04984
-#define IXGBE_RTTBCNRC_RS_ENA  0x80000000
+#define IXGBE_RTRPCS           0x02430
+#define IXGBE_RTTDCS           0x04900
+#define IXGBE_RTTDCS_ARBDIS    0x00000040 /* DCB arbiter disable */
+#define IXGBE_RTTPCS           0x0CD00
+#define IXGBE_RTRUP2TC         0x03020
+#define IXGBE_RTTUP2TC         0x0C800
+#define IXGBE_RTRPT4C(_i)      (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TXLLQ(_i)                (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_RTRPT4S(_i)      (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTDT2C(_i)      (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTDT2S(_i)      (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTPT2C(_i)      (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTPT2S(_i)      (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTDQSEL         0x04904
+#define IXGBE_RTTDT1C          0x04908
+#define IXGBE_RTTDT1S          0x0490C
+#define IXGBE_RTTDTECC         0x04990
+#define IXGBE_RTTDTECC_NO_BCN  0x00000100
+
+#define IXGBE_RTTBCNRC                 0x04984
+#define IXGBE_RTTBCNRC_RS_ENA          0x80000000
 #define IXGBE_RTTBCNRC_RF_DEC_MASK     0x00003FFF
 #define IXGBE_RTTBCNRC_RF_INT_SHIFT    14
-#define IXGBE_RTTBCNRC_RF_INT_MASK     \
+#define IXGBE_RTTBCNRC_RF_INT_MASK \
        (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
-#define IXGBE_RTTBCNRM    0x04980
+#define IXGBE_RTTBCNRM 0x04980
 
 /* FCoE DMA Context Registers */
-#define IXGBE_FCPTRL    0x02410 /* FC User Desc. PTR Low */
-#define IXGBE_FCPTRH    0x02414 /* FC USer Desc. PTR High */
-#define IXGBE_FCBUFF    0x02418 /* FC Buffer Control */
-#define IXGBE_FCDMARW   0x02420 /* FC Receive DMA RW */
-#define IXGBE_FCINVST0  0x03FC0 /* FC Invalid DMA Context Status Reg 0 */
-#define IXGBE_FCINVST(_i)       (IXGBE_FCINVST0 + ((_i) * 4))
-#define IXGBE_FCBUFF_VALID      (1 << 0)   /* DMA Context Valid */
-#define IXGBE_FCBUFF_BUFFSIZE   (3 << 3)   /* User Buffer Size */
-#define IXGBE_FCBUFF_WRCONTX    (1 << 7)   /* 0: Initiator, 1: Target */
-#define IXGBE_FCBUFF_BUFFCNT    0x0000ff00 /* Number of User Buffers */
-#define IXGBE_FCBUFF_OFFSET     0xffff0000 /* User Buffer Offset */
-#define IXGBE_FCBUFF_BUFFSIZE_SHIFT  3
-#define IXGBE_FCBUFF_BUFFCNT_SHIFT   8
-#define IXGBE_FCBUFF_OFFSET_SHIFT    16
-#define IXGBE_FCDMARW_WE        (1 << 14)   /* Write enable */
-#define IXGBE_FCDMARW_RE        (1 << 15)   /* Read enable */
-#define IXGBE_FCDMARW_FCOESEL   0x000001ff  /* FC X_ID: 11 bits */
-#define IXGBE_FCDMARW_LASTSIZE  0xffff0000  /* Last User Buffer Size */
-#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16
-
+#define IXGBE_FCPTRL           0x02410 /* FC User Desc. PTR Low */
+#define IXGBE_FCPTRH           0x02414 /* FC USer Desc. PTR High */
+#define IXGBE_FCBUFF           0x02418 /* FC Buffer Control */
+#define IXGBE_FCDMARW          0x02420 /* FC Receive DMA RW */
+#define IXGBE_FCINVST0         0x03FC0 /* FC Invalid DMA Context Status Reg 0*/
+#define IXGBE_FCINVST(_i)      (IXGBE_FCINVST0 + ((_i) * 4))
+#define IXGBE_FCBUFF_VALID     (1 << 0)   /* DMA Context Valid */
+#define IXGBE_FCBUFF_BUFFSIZE  (3 << 3)   /* User Buffer Size */
+#define IXGBE_FCBUFF_WRCONTX   (1 << 7)   /* 0: Initiator, 1: Target */
+#define IXGBE_FCBUFF_BUFFCNT   0x0000ff00 /* Number of User Buffers */
+#define IXGBE_FCBUFF_OFFSET    0xffff0000 /* User Buffer Offset */
+#define IXGBE_FCBUFF_BUFFSIZE_SHIFT    3
+#define IXGBE_FCBUFF_BUFFCNT_SHIFT     8
+#define IXGBE_FCBUFF_OFFSET_SHIFT      16
+#define IXGBE_FCDMARW_WE               (1 << 14)   /* Write enable */
+#define IXGBE_FCDMARW_RE               (1 << 15)   /* Read enable */
+#define IXGBE_FCDMARW_FCOESEL          0x000001ff  /* FC X_ID: 11 bits */
+#define IXGBE_FCDMARW_LASTSIZE         0xffff0000  /* Last User Buffer Size */
+#define IXGBE_FCDMARW_LASTSIZE_SHIFT   16
 /* FCoE SOF/EOF */
-#define IXGBE_TEOFF     0x04A94 /* Tx FC EOF */
-#define IXGBE_TSOFF     0x04A98 /* Tx FC SOF */
-#define IXGBE_REOFF     0x05158 /* Rx FC EOF */
-#define IXGBE_RSOFF     0x051F8 /* Rx FC SOF */
+#define IXGBE_TEOFF            0x04A94 /* Tx FC EOF */
+#define IXGBE_TSOFF            0x04A98 /* Tx FC SOF */
+#define IXGBE_REOFF            0x05158 /* Rx FC EOF */
+#define IXGBE_RSOFF            0x051F8 /* Rx FC SOF */
 /* FCoE Filter Context Registers */
-#define IXGBE_FCFLT     0x05108 /* FC FLT Context */
-#define IXGBE_FCFLTRW   0x05110 /* FC Filter RW Control */
-#define IXGBE_FCPARAM   0x051d8 /* FC Offset Parameter */
-#define IXGBE_FCFLT_VALID       (1 << 0)   /* Filter Context Valid */
-#define IXGBE_FCFLT_FIRST       (1 << 1)   /* Filter First */
-#define IXGBE_FCFLT_SEQID       0x00ff0000 /* Sequence ID */
-#define IXGBE_FCFLT_SEQCNT      0xff000000 /* Sequence Count */
-#define IXGBE_FCFLTRW_RVALDT    (1 << 13)  /* Fast Re-Validation */
-#define IXGBE_FCFLTRW_WE        (1 << 14)  /* Write Enable */
-#define IXGBE_FCFLTRW_RE        (1 << 15)  /* Read Enable */
+#define IXGBE_FCFLT            0x05108 /* FC FLT Context */
+#define IXGBE_FCFLTRW          0x05110 /* FC Filter RW Control */
+#define IXGBE_FCPARAM          0x051d8 /* FC Offset Parameter */
+#define IXGBE_FCFLT_VALID      (1 << 0)   /* Filter Context Valid */
+#define IXGBE_FCFLT_FIRST      (1 << 1)   /* Filter First */
+#define IXGBE_FCFLT_SEQID      0x00ff0000 /* Sequence ID */
+#define IXGBE_FCFLT_SEQCNT     0xff000000 /* Sequence Count */
+#define IXGBE_FCFLTRW_RVALDT   (1 << 13)  /* Fast Re-Validation */
+#define IXGBE_FCFLTRW_WE       (1 << 14)  /* Write Enable */
+#define IXGBE_FCFLTRW_RE       (1 << 15)  /* Read Enable */
 /* FCoE Receive Control */
-#define IXGBE_FCRXCTRL  0x05100 /* FC Receive Control */
-#define IXGBE_FCRXCTRL_FCOELLI  (1 << 0)   /* Low latency interrupt */
-#define IXGBE_FCRXCTRL_SAVBAD   (1 << 1)   /* Save Bad Frames */
-#define IXGBE_FCRXCTRL_FRSTRDH  (1 << 2)   /* EN 1st Read Header */
-#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3)   /* EN Last Header in Seq */
-#define IXGBE_FCRXCTRL_ALLH     (1 << 4)   /* EN All Headers */
-#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5)   /* EN 1st Seq. Header */
-#define IXGBE_FCRXCTRL_ICRC     (1 << 6)   /* Ignore Bad FC CRC */
-#define IXGBE_FCRXCTRL_FCCRCBO  (1 << 7)   /* FC CRC Byte Ordering */
-#define IXGBE_FCRXCTRL_FCOEVER  0x00000f00 /* FCoE Version: 4 bits */
-#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8
+#define IXGBE_FCRXCTRL         0x05100 /* FC Receive Control */
+#define IXGBE_FCRXCTRL_FCOELLI (1 << 0)   /* Low latency interrupt */
+#define IXGBE_FCRXCTRL_SAVBAD  (1 << 1)   /* Save Bad Frames */
+#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2)   /* EN 1st Read Header */
+#define IXGBE_FCRXCTRL_LASTSEQH        (1 << 3)   /* EN Last Header in Seq */
+#define IXGBE_FCRXCTRL_ALLH    (1 << 4)   /* EN All Headers */
+#define IXGBE_FCRXCTRL_FRSTSEQH        (1 << 5)   /* EN 1st Seq. Header */
+#define IXGBE_FCRXCTRL_ICRC    (1 << 6)   /* Ignore Bad FC CRC */
+#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7)   /* FC CRC Byte Ordering */
+#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */
+#define IXGBE_FCRXCTRL_FCOEVER_SHIFT   8
 /* FCoE Redirection */
-#define IXGBE_FCRECTL   0x0ED00 /* FC Redirection Control */
-#define IXGBE_FCRETA0   0x0ED10 /* FC Redirection Table 0 */
-#define IXGBE_FCRETA(_i)        (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */
-#define IXGBE_FCRECTL_ENA       0x1        /* FCoE Redir Table Enable */
-#define IXGBE_FCRETA_SIZE       8          /* Max entries in FCRETA */
-#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */
+#define IXGBE_FCRECTL          0x0ED00 /* FC Redirection Control */
+#define IXGBE_FCRETA0          0x0ED10 /* FC Redirection Table 0 */
+#define IXGBE_FCRETA(_i)       (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */
+#define IXGBE_FCRECTL_ENA      0x1 /* FCoE Redir Table Enable */
+#define IXGBE_FCRETASEL_ENA    0x2 /* FCoE FCRETASEL bit */
+#define IXGBE_FCRETA_SIZE      8 /* Max entries in FCRETA */
+#define IXGBE_FCRETA_ENTRY_MASK        0x0000007f /* 7 bits for the queue index */
 
 /* Stats registers */
-#define IXGBE_CRCERRS   0x04000
-#define IXGBE_ILLERRC   0x04004
-#define IXGBE_ERRBC     0x04008
-#define IXGBE_MSPDC     0x04010
-#define IXGBE_MPC(_i)   (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/
-#define IXGBE_MLFC      0x04034
-#define IXGBE_MRFC      0x04038
-#define IXGBE_RLEC      0x04040
-#define IXGBE_LXONTXC   0x03F60
-#define IXGBE_LXONRXC   0x0CF60
-#define IXGBE_LXOFFTXC  0x03F68
-#define IXGBE_LXOFFRXC  0x0CF68
-#define IXGBE_LXONRXCNT 0x041A4
-#define IXGBE_LXOFFRXCNT 0x041A8
-#define IXGBE_PXONRXCNT(_i)     (0x04140 + ((_i) * 4)) /* 8 of these */
-#define IXGBE_PXOFFRXCNT(_i)    (0x04160 + ((_i) * 4)) /* 8 of these */
-#define IXGBE_PXON2OFFCNT(_i)   (0x03240 + ((_i) * 4)) /* 8 of these */
-#define IXGBE_PXONTXC(_i)       (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/
-#define IXGBE_PXONRXC(_i)       (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/
-#define IXGBE_PXOFFTXC(_i)      (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/
-#define IXGBE_PXOFFRXC(_i)      (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/
-#define IXGBE_PRC64     0x0405C
-#define IXGBE_PRC127    0x04060
-#define IXGBE_PRC255    0x04064
-#define IXGBE_PRC511    0x04068
-#define IXGBE_PRC1023   0x0406C
-#define IXGBE_PRC1522   0x04070
-#define IXGBE_GPRC      0x04074
-#define IXGBE_BPRC      0x04078
-#define IXGBE_MPRC      0x0407C
-#define IXGBE_GPTC      0x04080
-#define IXGBE_GORCL     0x04088
-#define IXGBE_GORCH     0x0408C
-#define IXGBE_GOTCL     0x04090
-#define IXGBE_GOTCH     0x04094
-#define IXGBE_RNBC(_i)  (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/
-#define IXGBE_RUC       0x040A4
-#define IXGBE_RFC       0x040A8
-#define IXGBE_ROC       0x040AC
-#define IXGBE_RJC       0x040B0
-#define IXGBE_MNGPRC    0x040B4
-#define IXGBE_MNGPDC    0x040B8
-#define IXGBE_MNGPTC    0x0CF90
-#define IXGBE_TORL      0x040C0
-#define IXGBE_TORH      0x040C4
-#define IXGBE_TPR       0x040D0
-#define IXGBE_TPT       0x040D4
-#define IXGBE_PTC64     0x040D8
-#define IXGBE_PTC127    0x040DC
-#define IXGBE_PTC255    0x040E0
-#define IXGBE_PTC511    0x040E4
-#define IXGBE_PTC1023   0x040E8
-#define IXGBE_PTC1522   0x040EC
-#define IXGBE_MPTC      0x040F0
-#define IXGBE_BPTC      0x040F4
-#define IXGBE_XEC       0x04120
-#define IXGBE_SSVPC     0x08780
-
-#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4))
-#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \
-                         (0x08600 + ((_i) * 4)))
-#define IXGBE_TQSM(_i)  (0x08600 + ((_i) * 4))
-
-#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */
-#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */
-#define IXGBE_FCCRC     0x05118 /* Count of Good Eth CRC w/ Bad FC CRC */
-#define IXGBE_FCOERPDC  0x0241C /* FCoE Rx Packets Dropped Count */
-#define IXGBE_FCLAST    0x02424 /* FCoE Last Error Count */
-#define IXGBE_FCOEPRC   0x02428 /* Number of FCoE Packets Received */
-#define IXGBE_FCOEDWRC  0x0242C /* Number of FCoE DWords Received */
-#define IXGBE_FCOEPTC   0x08784 /* Number of FCoE Packets Transmitted */
-#define IXGBE_FCOEDWTC  0x08788 /* Number of FCoE DWords Transmitted */
-#define IXGBE_O2BGPTC   0x041C4
-#define IXGBE_O2BSPC    0x087B0
-#define IXGBE_B2OSPC    0x041C0
-#define IXGBE_B2OGPRC   0x02F90
-#define IXGBE_PCRC8ECL  0x0E810
-#define IXGBE_PCRC8ECH  0x0E811
-#define IXGBE_PCRC8ECH_MASK     0x1F
-#define IXGBE_LDPCECL   0x0E820
-#define IXGBE_LDPCECH   0x0E821
+#define IXGBE_CRCERRS  0x04000
+#define IXGBE_ILLERRC  0x04004
+#define IXGBE_ERRBC    0x04008
+#define IXGBE_MSPDC    0x04010
+#define IXGBE_MPC(_i)  (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/
+#define IXGBE_MLFC     0x04034
+#define IXGBE_MRFC     0x04038
+#define IXGBE_RLEC     0x04040
+#define IXGBE_LXONTXC  0x03F60
+#define IXGBE_LXONRXC  0x0CF60
+#define IXGBE_LXOFFTXC 0x03F68
+#define IXGBE_LXOFFRXC 0x0CF68
+#define IXGBE_LXONRXCNT                0x041A4
+#define IXGBE_LXOFFRXCNT       0x041A8
+#define IXGBE_PXONRXCNT(_i)    (0x04140 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_PXOFFRXCNT(_i)   (0x04160 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_PXON2OFFCNT(_i)  (0x03240 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_PXONTXC(_i)      (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/
+#define IXGBE_PXONRXC(_i)      (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/
+#define IXGBE_PXOFFTXC(_i)     (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/
+#define IXGBE_PXOFFRXC(_i)     (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/
+#define IXGBE_PRC64            0x0405C
+#define IXGBE_PRC127           0x04060
+#define IXGBE_PRC255           0x04064
+#define IXGBE_PRC511           0x04068
+#define IXGBE_PRC1023          0x0406C
+#define IXGBE_PRC1522          0x04070
+#define IXGBE_GPRC             0x04074
+#define IXGBE_BPRC             0x04078
+#define IXGBE_MPRC             0x0407C
+#define IXGBE_GPTC             0x04080
+#define IXGBE_GORCL            0x04088
+#define IXGBE_GORCH            0x0408C
+#define IXGBE_GOTCL            0x04090
+#define IXGBE_GOTCH            0x04094
+#define IXGBE_RNBC(_i)         (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/
+#define IXGBE_RUC              0x040A4
+#define IXGBE_RFC              0x040A8
+#define IXGBE_ROC              0x040AC
+#define IXGBE_RJC              0x040B0
+#define IXGBE_MNGPRC           0x040B4
+#define IXGBE_MNGPDC           0x040B8
+#define IXGBE_MNGPTC           0x0CF90
+#define IXGBE_TORL             0x040C0
+#define IXGBE_TORH             0x040C4
+#define IXGBE_TPR              0x040D0
+#define IXGBE_TPT              0x040D4
+#define IXGBE_PTC64            0x040D8
+#define IXGBE_PTC127           0x040DC
+#define IXGBE_PTC255           0x040E0
+#define IXGBE_PTC511           0x040E4
+#define IXGBE_PTC1023          0x040E8
+#define IXGBE_PTC1522          0x040EC
+#define IXGBE_MPTC             0x040F0
+#define IXGBE_BPTC             0x040F4
+#define IXGBE_XEC              0x04120
+#define IXGBE_SSVPC            0x08780
+
+#define IXGBE_RQSMR(_i)        (0x02300 + ((_i) * 4))
+#define IXGBE_TQSMR(_i)        (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \
+                        (0x08600 + ((_i) * 4)))
+#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4))
+
+#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_L(_i)       (0x01034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_H(_i)       (0x01038 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QPRDC(_i)                (0x01430 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBTC_L(_i)       (0x08700 + ((_i) * 0x8)) /* 16 of these */
+#define IXGBE_QBTC_H(_i)       (0x08704 + ((_i) * 0x8)) /* 16 of these */
+#define IXGBE_FCCRC            0x05118 /* Num of Good Eth CRC w/ Bad FC CRC */
+#define IXGBE_FCOERPDC         0x0241C /* FCoE Rx Packets Dropped Count */
+#define IXGBE_FCLAST           0x02424 /* FCoE Last Error Count */
+#define IXGBE_FCOEPRC          0x02428 /* Number of FCoE Packets Received */
+#define IXGBE_FCOEDWRC         0x0242C /* Number of FCoE DWords Received */
+#define IXGBE_FCOEPTC          0x08784 /* Number of FCoE Packets Transmitted */
+#define IXGBE_FCOEDWTC         0x08788 /* Number of FCoE DWords Transmitted */
+#define IXGBE_FCCRC_CNT_MASK   0x0000FFFF /* CRC_CNT: bit 0 - 15 */
+#define IXGBE_FCLAST_CNT_MASK  0x0000FFFF /* Last_CNT: bit 0 - 15 */
+#define IXGBE_O2BGPTC          0x041C4
+#define IXGBE_O2BSPC           0x087B0
+#define IXGBE_B2OSPC           0x041C0
+#define IXGBE_B2OGPRC          0x02F90
+#define IXGBE_BUPRC            0x04180
+#define IXGBE_BMPRC            0x04184
+#define IXGBE_BBPRC            0x04188
+#define IXGBE_BUPTC            0x0418C
+#define IXGBE_BMPTC            0x04190
+#define IXGBE_BBPTC            0x04194
+#define IXGBE_BCRCERRS         0x04198
+#define IXGBE_BXONRXC          0x0419C
+#define IXGBE_BXOFFRXC         0x041E0
+#define IXGBE_BXONTXC          0x041E4
+#define IXGBE_BXOFFTXC         0x041E8
+#define IXGBE_PCRC8ECL         0x0E810
+#define IXGBE_PCRC8ECH         0x0E811
+#define IXGBE_PCRC8ECH_MASK    0x1F
+#define IXGBE_LDPCECL          0x0E820
+#define IXGBE_LDPCECH          0x0E821
 
 /* Management */
-#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_MANC      0x05820
-#define IXGBE_MFVAL     0x05824
-#define IXGBE_MANC2H    0x05860
-#define IXGBE_MDEF(_i)  (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_MIPAF     0x058B0
-#define IXGBE_MMAL(_i)  (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */
-#define IXGBE_MMAH(_i)  (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */
-#define IXGBE_FTFT      0x09400 /* 0x9400-0x97FC */
-#define IXGBE_METF(_i)  (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */
-#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_LSWFW     0x15014
+#define IXGBE_MAVTV(_i)                (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MFUTP(_i)                (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MANC             0x05820
+#define IXGBE_MFVAL            0x05824
+#define IXGBE_MANC2H           0x05860
+#define IXGBE_MDEF(_i)         (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MIPAF            0x058B0
+#define IXGBE_MMAL(_i)         (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */
+#define IXGBE_MMAH(_i)         (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */
+#define IXGBE_FTFT             0x09400 /* 0x9400-0x97FC */
+#define IXGBE_METF(_i)         (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_MDEF_EXT(_i)     (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_LSWFW            0x15014
+#define IXGBE_BMCIP(_i)                (0x05050 + ((_i) * 4)) /* 0x5050-0x505C */
+#define IXGBE_BMCIPVAL         0x05060
+#define IXGBE_BMCIP_IPADDR_TYPE        0x00000001
+#define IXGBE_BMCIP_IPADDR_VALID       0x00000002
+
+/* Management Bit Fields and Masks */
+#define IXGBE_MANC_EN_BMC2OS   0x10000000 /* Ena BMC2OS and OS2BMC traffic */
+#define IXGBE_MANC_EN_BMC2OS_SHIFT     28
+
+/* Firmware Semaphore Register */
+#define IXGBE_FWSM_MODE_MASK   0xE
 
 /* ARC Subsystem registers */
-#define IXGBE_HICR      0x15F00
-#define IXGBE_FWSTS     0x15F0C
-#define IXGBE_HSMC0R    0x15F04
-#define IXGBE_HSMC1R    0x15F08
-#define IXGBE_SWSR      0x15F10
-#define IXGBE_HFDR      0x15FE8
-#define IXGBE_FLEX_MNG  0x15800 /* 0x15800 - 0x15EFC */
-
-#define IXGBE_HICR_EN              0x01  /* Enable bit - RO */
+#define IXGBE_HICR             0x15F00
+#define IXGBE_FWSTS            0x15F0C
+#define IXGBE_HSMC0R           0x15F04
+#define IXGBE_HSMC1R           0x15F08
+#define IXGBE_SWSR             0x15F10
+#define IXGBE_HFDR             0x15FE8
+#define IXGBE_FLEX_MNG         0x15800 /* 0x15800 - 0x15EFC */
+
+#define IXGBE_HICR_EN          0x01  /* Enable bit - RO */
 /* Driver sets this bit when done to put command in RAM */
-#define IXGBE_HICR_C               0x02
-#define IXGBE_HICR_SV              0x04  /* Status Validity */
-#define IXGBE_HICR_FW_RESET_ENABLE 0x40
-#define IXGBE_HICR_FW_RESET        0x80
+#define IXGBE_HICR_C           0x02
+#define IXGBE_HICR_SV          0x04  /* Status Validity */
+#define IXGBE_HICR_FW_RESET_ENABLE     0x40
+#define IXGBE_HICR_FW_RESET    0x80
 
 /* PCI-E registers */
-#define IXGBE_GCR       0x11000
-#define IXGBE_GTV       0x11004
-#define IXGBE_FUNCTAG   0x11008
-#define IXGBE_GLT       0x1100C
-#define IXGBE_GSCL_1    0x11010
-#define IXGBE_GSCL_2    0x11014
-#define IXGBE_GSCL_3    0x11018
-#define IXGBE_GSCL_4    0x1101C
-#define IXGBE_GSCN_0    0x11020
-#define IXGBE_GSCN_1    0x11024
-#define IXGBE_GSCN_2    0x11028
-#define IXGBE_GSCN_3    0x1102C
-#define IXGBE_FACTPS    0x10150
-#define IXGBE_PCIEANACTL  0x11040
-#define IXGBE_SWSM      0x10140
-#define IXGBE_FWSM      0x10148
-#define IXGBE_GSSR      0x10160
-#define IXGBE_MREVID    0x11064
-#define IXGBE_DCA_ID    0x11070
-#define IXGBE_DCA_CTRL  0x11074
-#define IXGBE_SWFW_SYNC IXGBE_GSSR
-
-/* PCIe registers 82599-specific */
-#define IXGBE_GCR_EXT           0x11050
-#define IXGBE_GSCL_5_82599      0x11030
-#define IXGBE_GSCL_6_82599      0x11034
-#define IXGBE_GSCL_7_82599      0x11038
-#define IXGBE_GSCL_8_82599      0x1103C
-#define IXGBE_PHYADR_82599      0x11040
-#define IXGBE_PHYDAT_82599      0x11044
-#define IXGBE_PHYCTL_82599      0x11048
-#define IXGBE_PBACLR_82599      0x11068
-#define IXGBE_CIAA_82599        0x11088
-#define IXGBE_CIAD_82599        0x1108C
-#define IXGBE_PICAUSE           0x110B0
-#define IXGBE_PIENA             0x110B8
-#define IXGBE_CDQ_MBR_82599     0x110B4
-#define IXGBE_PCIESPARE         0x110BC
-#define IXGBE_MISC_REG_82599    0x110F0
-#define IXGBE_ECC_CTRL_0_82599  0x11100
-#define IXGBE_ECC_CTRL_1_82599  0x11104
-#define IXGBE_ECC_STATUS_82599  0x110E0
-#define IXGBE_BAR_CTRL_82599    0x110F4
+#define IXGBE_GCR              0x11000
+#define IXGBE_GTV              0x11004
+#define IXGBE_FUNCTAG          0x11008
+#define IXGBE_GLT              0x1100C
+#define IXGBE_PCIEPIPEADR      0x11004
+#define IXGBE_PCIEPIPEDAT      0x11008
+#define IXGBE_GSCL_1           0x11010
+#define IXGBE_GSCL_2           0x11014
+#define IXGBE_GSCL_3           0x11018
+#define IXGBE_GSCL_4           0x1101C
+#define IXGBE_GSCN_0           0x11020
+#define IXGBE_GSCN_1           0x11024
+#define IXGBE_GSCN_2           0x11028
+#define IXGBE_GSCN_3           0x1102C
+#define IXGBE_FACTPS           0x10150
+#define IXGBE_PCIEANACTL       0x11040
+#define IXGBE_SWSM             0x10140
+#define IXGBE_FWSM             0x10148
+#define IXGBE_GSSR             0x10160
+#define IXGBE_MREVID           0x11064
+#define IXGBE_DCA_ID           0x11070
+#define IXGBE_DCA_CTRL         0x11074
+#define IXGBE_SWFW_SYNC                IXGBE_GSSR
+
+/* PCI-E registers 82599-Specific */
+#define IXGBE_GCR_EXT          0x11050
+#define IXGBE_GSCL_5_82599     0x11030
+#define IXGBE_GSCL_6_82599     0x11034
+#define IXGBE_GSCL_7_82599     0x11038
+#define IXGBE_GSCL_8_82599     0x1103C
+#define IXGBE_PHYADR_82599     0x11040
+#define IXGBE_PHYDAT_82599     0x11044
+#define IXGBE_PHYCTL_82599     0x11048
+#define IXGBE_PBACLR_82599     0x11068
+#define IXGBE_CIAA_82599       0x11088
+#define IXGBE_CIAD_82599       0x1108C
+#define IXGBE_PICAUSE          0x110B0
+#define IXGBE_PIENA            0x110B8
+#define IXGBE_CDQ_MBR_82599    0x110B4
+#define IXGBE_PCIESPARE                0x110BC
+#define IXGBE_MISC_REG_82599   0x110F0
+#define IXGBE_ECC_CTRL_0_82599 0x11100
+#define IXGBE_ECC_CTRL_1_82599 0x11104
+#define IXGBE_ECC_STATUS_82599 0x110E0
+#define IXGBE_BAR_CTRL_82599   0x110F4
 
 /* PCI Express Control */
-#define IXGBE_GCR_CMPL_TMOUT_MASK       0x0000F000
-#define IXGBE_GCR_CMPL_TMOUT_10ms       0x00001000
-#define IXGBE_GCR_CMPL_TMOUT_RESEND     0x00010000
-#define IXGBE_GCR_CAP_VER2              0x00040000
-
-#define IXGBE_GCR_EXT_MSIX_EN           0x80000000
-#define IXGBE_GCR_EXT_BUFFERS_CLEAR     0x40000000
-#define IXGBE_GCR_EXT_VT_MODE_16        0x00000001
-#define IXGBE_GCR_EXT_VT_MODE_32        0x00000002
-#define IXGBE_GCR_EXT_VT_MODE_64        0x00000003
-#define IXGBE_GCR_EXT_SRIOV             (IXGBE_GCR_EXT_MSIX_EN | \
-                                         IXGBE_GCR_EXT_VT_MODE_64)
-
+#define IXGBE_GCR_CMPL_TMOUT_MASK      0x0000F000
+#define IXGBE_GCR_CMPL_TMOUT_10ms      0x00001000
+#define IXGBE_GCR_CMPL_TMOUT_RESEND    0x00010000
+#define IXGBE_GCR_CAP_VER2             0x00040000
+
+#define IXGBE_GCR_EXT_MSIX_EN          0x80000000
+#define IXGBE_GCR_EXT_BUFFERS_CLEAR    0x40000000
+#define IXGBE_GCR_EXT_VT_MODE_16       0x00000001
+#define IXGBE_GCR_EXT_VT_MODE_32       0x00000002
+#define IXGBE_GCR_EXT_VT_MODE_64       0x00000003
+#define IXGBE_GCR_EXT_SRIOV            (IXGBE_GCR_EXT_MSIX_EN | \
+                                        IXGBE_GCR_EXT_VT_MODE_64)
+#define IXGBE_GCR_EXT_VT_MODE_MASK     0x00000003
 /* Time Sync Registers */
-#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
-#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
-#define IXGBE_RXSTMPL    0x051E8 /* Rx timestamp Low - RO */
-#define IXGBE_RXSTMPH    0x051A4 /* Rx timestamp High - RO */
-#define IXGBE_RXSATRL    0x051A0 /* Rx timestamp attribute low - RO */
-#define IXGBE_RXSATRH    0x051A8 /* Rx timestamp attribute high - RO */
-#define IXGBE_RXMTRL     0x05120 /* RX message type register low - RW */
-#define IXGBE_TXSTMPL    0x08C04 /* Tx timestamp value Low - RO */
-#define IXGBE_TXSTMPH    0x08C08 /* Tx timestamp value High - RO */
-#define IXGBE_SYSTIML    0x08C0C /* System time register Low - RO */
-#define IXGBE_SYSTIMH    0x08C10 /* System time register High - RO */
-#define IXGBE_TIMINCA    0x08C14 /* Increment attributes register - RW */
-#define IXGBE_TIMADJL    0x08C18 /* Time Adjustment Offset register Low - RW */
-#define IXGBE_TIMADJH    0x08C1C /* Time Adjustment Offset register High - RW */
-#define IXGBE_TSAUXC     0x08C20 /* TimeSync Auxiliary Control register - RW */
-#define IXGBE_TRGTTIML0  0x08C24 /* Target Time Register 0 Low - RW */
-#define IXGBE_TRGTTIMH0  0x08C28 /* Target Time Register 0 High - RW */
-#define IXGBE_TRGTTIML1  0x08C2C /* Target Time Register 1 Low - RW */
-#define IXGBE_TRGTTIMH1  0x08C30 /* Target Time Register 1 High - RW */
-#define IXGBE_FREQOUT0   0x08C34 /* Frequency Out 0 Control register - RW */
-#define IXGBE_FREQOUT1   0x08C38 /* Frequency Out 1 Control register - RW */
-#define IXGBE_AUXSTMPL0  0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */
-#define IXGBE_AUXSTMPH0  0x08C40 /* Auxiliary Time Stamp 0 register High - RO */
-#define IXGBE_AUXSTMPL1  0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */
-#define IXGBE_AUXSTMPH1  0x08C48 /* Auxiliary Time Stamp 1 register High - RO */
+#define IXGBE_TSYNCRXCTL       0x05188 /* Rx Time Sync Control register - RW */
+#define IXGBE_TSYNCTXCTL       0x08C00 /* Tx Time Sync Control register - RW */
+#define IXGBE_RXSTMPL  0x051E8 /* Rx timestamp Low - RO */
+#define IXGBE_RXSTMPH  0x051A4 /* Rx timestamp High - RO */
+#define IXGBE_RXSATRL  0x051A0 /* Rx timestamp attribute low - RO */
+#define IXGBE_RXSATRH  0x051A8 /* Rx timestamp attribute high - RO */
+#define IXGBE_RXMTRL   0x05120 /* RX message type register low - RW */
+#define IXGBE_TXSTMPL  0x08C04 /* Tx timestamp value Low - RO */
+#define IXGBE_TXSTMPH  0x08C08 /* Tx timestamp value High - RO */
+#define IXGBE_SYSTIML  0x08C0C /* System time register Low - RO */
+#define IXGBE_SYSTIMH  0x08C10 /* System time register High - RO */
+#define IXGBE_TIMINCA  0x08C14 /* Increment attributes register - RW */
+#define IXGBE_TIMADJL  0x08C18 /* Time Adjustment Offset register Low - RW */
+#define IXGBE_TIMADJH  0x08C1C /* Time Adjustment Offset register High - RW */
+#define IXGBE_TSAUXC   0x08C20 /* TimeSync Auxiliary Control register - RW */
+#define IXGBE_TRGTTIML0        0x08C24 /* Target Time Register 0 Low - RW */
+#define IXGBE_TRGTTIMH0        0x08C28 /* Target Time Register 0 High - RW */
+#define IXGBE_TRGTTIML1        0x08C2C /* Target Time Register 1 Low - RW */
+#define IXGBE_TRGTTIMH1        0x08C30 /* Target Time Register 1 High - RW */
+#define IXGBE_CLKTIML  0x08C34 /* Clock Out Time Register Low - RW */
+#define IXGBE_CLKTIMH  0x08C38 /* Clock Out Time Register High - RW */
+#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */
+#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */
+#define IXGBE_AUXSTMPL0        0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */
+#define IXGBE_AUXSTMPH0        0x08C40 /* Auxiliary Time Stamp 0 register High - RO */
+#define IXGBE_AUXSTMPL1        0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */
+#define IXGBE_AUXSTMPH1        0x08C48 /* Auxiliary Time Stamp 1 register High - RO */
 
 /* Diagnostic Registers */
-#define IXGBE_RDSTATCTL   0x02C20
-#define IXGBE_RDSTAT(_i)  (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */
-#define IXGBE_RDHMPN      0x02F08
-#define IXGBE_RIC_DW(_i)  (0x02F10 + ((_i) * 4))
-#define IXGBE_RDPROBE     0x02F20
-#define IXGBE_RDMAM       0x02F30
-#define IXGBE_RDMAD       0x02F34
-#define IXGBE_TDSTATCTL   0x07C20
-#define IXGBE_TDSTAT(_i)  (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */
-#define IXGBE_TDHMPN      0x07F08
-#define IXGBE_TDHMPN2     0x082FC
-#define IXGBE_TXDESCIC    0x082CC
-#define IXGBE_TIC_DW(_i)  (0x07F10 + ((_i) * 4))
-#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4))
-#define IXGBE_TDPROBE     0x07F20
-#define IXGBE_TXBUFCTRL   0x0C600
-#define IXGBE_TXBUFDATA0  0x0C610
-#define IXGBE_TXBUFDATA1  0x0C614
-#define IXGBE_TXBUFDATA2  0x0C618
-#define IXGBE_TXBUFDATA3  0x0C61C
-#define IXGBE_RXBUFCTRL   0x03600
-#define IXGBE_RXBUFDATA0  0x03610
-#define IXGBE_RXBUFDATA1  0x03614
-#define IXGBE_RXBUFDATA2  0x03618
-#define IXGBE_RXBUFDATA3  0x0361C
-#define IXGBE_PCIE_DIAG(_i)     (0x11090 + ((_i) * 4)) /* 8 of these */
-#define IXGBE_RFVAL     0x050A4
-#define IXGBE_MDFTC1    0x042B8
-#define IXGBE_MDFTC2    0x042C0
-#define IXGBE_MDFTFIFO1 0x042C4
-#define IXGBE_MDFTFIFO2 0x042C8
-#define IXGBE_MDFTS     0x042CC
-#define IXGBE_RXDATAWRPTR(_i)   (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/
-#define IXGBE_RXDESCWRPTR(_i)   (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/
-#define IXGBE_RXDATARDPTR(_i)   (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/
-#define IXGBE_RXDESCRDPTR(_i)   (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/
-#define IXGBE_TXDATAWRPTR(_i)   (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/
-#define IXGBE_TXDESCWRPTR(_i)   (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/
-#define IXGBE_TXDATARDPTR(_i)   (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/
-#define IXGBE_TXDESCRDPTR(_i)   (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/
-#define IXGBE_PCIEECCCTL 0x1106C
-#define IXGBE_RXWRPTR(_i)       (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/
-#define IXGBE_RXUSED(_i)        (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/
-#define IXGBE_RXRDPTR(_i)       (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/
-#define IXGBE_RXRDWRPTR(_i)     (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/
-#define IXGBE_TXWRPTR(_i)       (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/
-#define IXGBE_TXUSED(_i)        (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/
-#define IXGBE_TXRDPTR(_i)       (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/
-#define IXGBE_TXRDWRPTR(_i)     (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/
-#define IXGBE_PCIEECCCTL0 0x11100
-#define IXGBE_PCIEECCCTL1 0x11104
-#define IXGBE_RXDBUECC  0x03F70
-#define IXGBE_TXDBUECC  0x0CF70
-#define IXGBE_RXDBUEST 0x03F74
-#define IXGBE_TXDBUEST 0x0CF74
-#define IXGBE_PBTXECC   0x0C300
-#define IXGBE_PBRXECC   0x03300
-#define IXGBE_GHECCR    0x110B0
+#define IXGBE_RDSTATCTL                0x02C20
+#define IXGBE_RDSTAT(_i)       (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */
+#define IXGBE_RDHMPN           0x02F08
+#define IXGBE_RIC_DW(_i)       (0x02F10 + ((_i) * 4))
+#define IXGBE_RDPROBE          0x02F20
+#define IXGBE_RDMAM            0x02F30
+#define IXGBE_RDMAD            0x02F34
+#define IXGBE_TDSTATCTL                0x07C20
+#define IXGBE_TDSTAT(_i)       (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */
+#define IXGBE_TDHMPN           0x07F08
+#define IXGBE_TDHMPN2          0x082FC
+#define IXGBE_TXDESCIC         0x082CC
+#define IXGBE_TIC_DW(_i)       (0x07F10 + ((_i) * 4))
+#define IXGBE_TIC_DW2(_i)      (0x082B0 + ((_i) * 4))
+#define IXGBE_TDPROBE          0x07F20
+#define IXGBE_TXBUFCTRL                0x0C600
+#define IXGBE_TXBUFDATA0       0x0C610
+#define IXGBE_TXBUFDATA1       0x0C614
+#define IXGBE_TXBUFDATA2       0x0C618
+#define IXGBE_TXBUFDATA3       0x0C61C
+#define IXGBE_RXBUFCTRL                0x03600
+#define IXGBE_RXBUFDATA0       0x03610
+#define IXGBE_RXBUFDATA1       0x03614
+#define IXGBE_RXBUFDATA2       0x03618
+#define IXGBE_RXBUFDATA3       0x0361C
+#define IXGBE_PCIE_DIAG(_i)    (0x11090 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_RFVAL            0x050A4
+#define IXGBE_MDFTC1           0x042B8
+#define IXGBE_MDFTC2           0x042C0
+#define IXGBE_MDFTFIFO1                0x042C4
+#define IXGBE_MDFTFIFO2                0x042C8
+#define IXGBE_MDFTS            0x042CC
+#define IXGBE_RXDATAWRPTR(_i)  (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/
+#define IXGBE_RXDESCWRPTR(_i)  (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/
+#define IXGBE_RXDATARDPTR(_i)  (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/
+#define IXGBE_RXDESCRDPTR(_i)  (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/
+#define IXGBE_TXDATAWRPTR(_i)  (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/
+#define IXGBE_TXDESCWRPTR(_i)  (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/
+#define IXGBE_TXDATARDPTR(_i)  (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/
+#define IXGBE_TXDESCRDPTR(_i)  (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/
+#define IXGBE_PCIEECCCTL       0x1106C
+#define IXGBE_RXWRPTR(_i)      (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/
+#define IXGBE_RXUSED(_i)       (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/
+#define IXGBE_RXRDPTR(_i)      (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/
+#define IXGBE_RXRDWRPTR(_i)    (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/
+#define IXGBE_TXWRPTR(_i)      (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/
+#define IXGBE_TXUSED(_i)       (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/
+#define IXGBE_TXRDPTR(_i)      (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/
+#define IXGBE_TXRDWRPTR(_i)    (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/
+#define IXGBE_PCIEECCCTL0      0x11100
+#define IXGBE_PCIEECCCTL1      0x11104
+#define IXGBE_RXDBUECC         0x03F70
+#define IXGBE_TXDBUECC         0x0CF70
+#define IXGBE_RXDBUEST         0x03F74
+#define IXGBE_TXDBUEST         0x0CF74
+#define IXGBE_PBTXECC          0x0C300
+#define IXGBE_PBRXECC          0x03300
+#define IXGBE_GHECCR           0x110B0
 
 /* MAC Registers */
-#define IXGBE_PCS1GCFIG 0x04200
-#define IXGBE_PCS1GLCTL 0x04208
-#define IXGBE_PCS1GLSTA 0x0420C
-#define IXGBE_PCS1GDBG0 0x04210
-#define IXGBE_PCS1GDBG1 0x04214
-#define IXGBE_PCS1GANA  0x04218
-#define IXGBE_PCS1GANLP 0x0421C
-#define IXGBE_PCS1GANNP 0x04220
-#define IXGBE_PCS1GANLPNP 0x04224
-#define IXGBE_HLREG0    0x04240
-#define IXGBE_HLREG1    0x04244
-#define IXGBE_PAP       0x04248
-#define IXGBE_MACA      0x0424C
-#define IXGBE_APAE      0x04250
-#define IXGBE_ARD       0x04254
-#define IXGBE_AIS       0x04258
-#define IXGBE_MSCA      0x0425C
-#define IXGBE_MSRWD     0x04260
-#define IXGBE_MLADD     0x04264
-#define IXGBE_MHADD     0x04268
-#define IXGBE_MAXFRS    0x04268
-#define IXGBE_TREG      0x0426C
-#define IXGBE_PCSS1     0x04288
-#define IXGBE_PCSS2     0x0428C
-#define IXGBE_XPCSS     0x04290
-#define IXGBE_MFLCN     0x04294
-#define IXGBE_SERDESC   0x04298
-#define IXGBE_MACS      0x0429C
-#define IXGBE_AUTOC     0x042A0
-#define IXGBE_LINKS     0x042A4
-#define IXGBE_LINKS2    0x04324
-#define IXGBE_AUTOC2    0x042A8
-#define IXGBE_AUTOC3    0x042AC
-#define IXGBE_ANLP1     0x042B0
-#define IXGBE_ANLP2     0x042B4
-#define IXGBE_MACC      0x04330
-#define IXGBE_ATLASCTL  0x04800
-#define IXGBE_MMNGC     0x042D0
-#define IXGBE_ANLPNP1   0x042D4
-#define IXGBE_ANLPNP2   0x042D8
-#define IXGBE_KRPCSFC   0x042E0
-#define IXGBE_KRPCSS    0x042E4
-#define IXGBE_FECS1     0x042E8
-#define IXGBE_FECS2     0x042EC
-#define IXGBE_SMADARCTL 0x14F10
-#define IXGBE_MPVC      0x04318
-#define IXGBE_SGMIIC    0x04314
+#define IXGBE_PCS1GCFIG                0x04200
+#define IXGBE_PCS1GLCTL                0x04208
+#define IXGBE_PCS1GLSTA                0x0420C
+#define IXGBE_PCS1GDBG0                0x04210
+#define IXGBE_PCS1GDBG1                0x04214
+#define IXGBE_PCS1GANA         0x04218
+#define IXGBE_PCS1GANLP                0x0421C
+#define IXGBE_PCS1GANNP                0x04220
+#define IXGBE_PCS1GANLPNP      0x04224
+#define IXGBE_HLREG0           0x04240
+#define IXGBE_HLREG1           0x04244
+#define IXGBE_PAP              0x04248
+#define IXGBE_MACA             0x0424C
+#define IXGBE_APAE             0x04250
+#define IXGBE_ARD              0x04254
+#define IXGBE_AIS              0x04258
+#define IXGBE_MSCA             0x0425C
+#define IXGBE_MSRWD            0x04260
+#define IXGBE_MLADD            0x04264
+#define IXGBE_MHADD            0x04268
+#define IXGBE_MAXFRS           0x04268
+#define IXGBE_TREG             0x0426C
+#define IXGBE_PCSS1            0x04288
+#define IXGBE_PCSS2            0x0428C
+#define IXGBE_XPCSS            0x04290
+#define IXGBE_MFLCN            0x04294
+#define IXGBE_SERDESC          0x04298
+#define IXGBE_MACS             0x0429C
+#define IXGBE_AUTOC            0x042A0
+#define IXGBE_LINKS            0x042A4
+#define IXGBE_LINKS2           0x04324
+#define IXGBE_AUTOC2           0x042A8
+#define IXGBE_AUTOC3           0x042AC
+#define IXGBE_ANLP1            0x042B0
+#define IXGBE_ANLP2            0x042B4
+#define IXGBE_MACC             0x04330
+#define IXGBE_ATLASCTL         0x04800
+#define IXGBE_MMNGC            0x042D0
+#define IXGBE_ANLPNP1          0x042D4
+#define IXGBE_ANLPNP2          0x042D8
+#define IXGBE_KRPCSFC          0x042E0
+#define IXGBE_KRPCSS           0x042E4
+#define IXGBE_FECS1            0x042E8
+#define IXGBE_FECS2            0x042EC
+#define IXGBE_SMADARCTL                0x14F10
+#define IXGBE_MPVC             0x04318
+#define IXGBE_SGMIIC           0x04314
 
 /* Statistics Registers */
-#define IXGBE_RXNFGPC      0x041B0
-#define IXGBE_RXNFGBCL     0x041B4
-#define IXGBE_RXNFGBCH     0x041B8
-#define IXGBE_RXDGPC       0x02F50
-#define IXGBE_RXDGBCL      0x02F54
-#define IXGBE_RXDGBCH      0x02F58
-#define IXGBE_RXDDGPC      0x02F5C
-#define IXGBE_RXDDGBCL     0x02F60
-#define IXGBE_RXDDGBCH     0x02F64
-#define IXGBE_RXLPBKGPC    0x02F68
-#define IXGBE_RXLPBKGBCL   0x02F6C
-#define IXGBE_RXLPBKGBCH   0x02F70
-#define IXGBE_RXDLPBKGPC   0x02F74
-#define IXGBE_RXDLPBKGBCL  0x02F78
-#define IXGBE_RXDLPBKGBCH  0x02F7C
-#define IXGBE_TXDGPC       0x087A0
-#define IXGBE_TXDGBCL      0x087A4
-#define IXGBE_TXDGBCH      0x087A8
-
-#define IXGBE_RXDSTATCTRL 0x02F40
+#define IXGBE_RXNFGPC          0x041B0
+#define IXGBE_RXNFGBCL         0x041B4
+#define IXGBE_RXNFGBCH         0x041B8
+#define IXGBE_RXDGPC           0x02F50
+#define IXGBE_RXDGBCL          0x02F54
+#define IXGBE_RXDGBCH          0x02F58
+#define IXGBE_RXDDGPC          0x02F5C
+#define IXGBE_RXDDGBCL         0x02F60
+#define IXGBE_RXDDGBCH         0x02F64
+#define IXGBE_RXLPBKGPC                0x02F68
+#define IXGBE_RXLPBKGBCL       0x02F6C
+#define IXGBE_RXLPBKGBCH       0x02F70
+#define IXGBE_RXDLPBKGPC       0x02F74
+#define IXGBE_RXDLPBKGBCL      0x02F78
+#define IXGBE_RXDLPBKGBCH      0x02F7C
+#define IXGBE_TXDGPC           0x087A0
+#define IXGBE_TXDGBCL          0x087A4
+#define IXGBE_TXDGBCH          0x087A8
+
+#define IXGBE_RXDSTATCTRL      0x02F40
 
 /* Copper Pond 2 link timeout */
 #define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50
 
 /* Omer CORECTL */
-#define IXGBE_CORECTL           0x014F00
+#define IXGBE_CORECTL                  0x014F00
 /* BARCTRL */
-#define IXGBE_BARCTRL               0x110F4
-#define IXGBE_BARCTRL_FLSIZE        0x0700
-#define IXGBE_BARCTRL_FLSIZE_SHIFT  8
-#define IXGBE_BARCTRL_CSRSIZE       0x2000
+#define IXGBE_BARCTRL                  0x110F4
+#define IXGBE_BARCTRL_FLSIZE           0x0700
+#define IXGBE_BARCTRL_FLSIZE_SHIFT     8
+#define IXGBE_BARCTRL_CSRSIZE          0x2000
 
 /* RSCCTL Bit Masks */
-#define IXGBE_RSCCTL_RSCEN          0x01
-#define IXGBE_RSCCTL_MAXDESC_1      0x00
-#define IXGBE_RSCCTL_MAXDESC_4      0x04
-#define IXGBE_RSCCTL_MAXDESC_8      0x08
-#define IXGBE_RSCCTL_MAXDESC_16     0x0C
+#define IXGBE_RSCCTL_RSCEN     0x01
+#define IXGBE_RSCCTL_MAXDESC_1 0x00
+#define IXGBE_RSCCTL_MAXDESC_4 0x04
+#define IXGBE_RSCCTL_MAXDESC_8 0x08
+#define IXGBE_RSCCTL_MAXDESC_16        0x0C
 
 /* RSCDBU Bit Masks */
-#define IXGBE_RSCDBU_RSCSMALDIS_MASK    0x0000007F
-#define IXGBE_RSCDBU_RSCACKDIS          0x00000080
+#define IXGBE_RSCDBU_RSCSMALDIS_MASK   0x0000007F
+#define IXGBE_RSCDBU_RSCACKDIS         0x00000080
 
 /* RDRXCTL Bit Masks */
-#define IXGBE_RDRXCTL_RDMTS_1_2     0x00000000 /* Rx Desc Min Threshold Size */
-#define IXGBE_RDRXCTL_CRCSTRIP      0x00000002 /* CRC Strip */
-#define IXGBE_RDRXCTL_MVMEN         0x00000020
-#define IXGBE_RDRXCTL_DMAIDONE      0x00000008 /* DMA init cycle done */
-#define IXGBE_RDRXCTL_AGGDIS        0x00010000 /* Aggregation disable */
-#define IXGBE_RDRXCTL_RSCFRSTSIZE   0x003E0000 /* RSC First packet size */
-#define IXGBE_RDRXCTL_RSCLLIDIS     0x00800000 /* Disable RSC compl on LLI */
-#define IXGBE_RDRXCTL_RSCACKC       0x02000000 /* must set 1 when RSC enabled */
-#define IXGBE_RDRXCTL_FCOE_WRFIX    0x04000000 /* must set 1 when RSC enabled */
+#define IXGBE_RDRXCTL_RDMTS_1_2                0x00000000 /* Rx Desc Min THLD Size */
+#define IXGBE_RDRXCTL_CRCSTRIP         0x00000002 /* CRC Strip */
+#define IXGBE_RDRXCTL_MVMEN            0x00000020
+#define IXGBE_RDRXCTL_DMAIDONE         0x00000008 /* DMA init cycle done */
+#define IXGBE_RDRXCTL_AGGDIS           0x00010000 /* Aggregation disable */
+#define IXGBE_RDRXCTL_RSCFRSTSIZE      0x003E0000 /* RSC First packet size */
+#define IXGBE_RDRXCTL_RSCLLIDIS                0x00800000 /* Disabl RSC compl on LLI */
+#define IXGBE_RDRXCTL_RSCACKC          0x02000000 /* must set 1 when RSC ena */
+#define IXGBE_RDRXCTL_FCOE_WRFIX       0x04000000 /* must set 1 when RSC ena */
 
 /* RQTC Bit Masks and Shifts */
-#define IXGBE_RQTC_SHIFT_TC(_i)     ((_i) * 4)
-#define IXGBE_RQTC_TC0_MASK         (0x7 << 0)
-#define IXGBE_RQTC_TC1_MASK         (0x7 << 4)
-#define IXGBE_RQTC_TC2_MASK         (0x7 << 8)
-#define IXGBE_RQTC_TC3_MASK         (0x7 << 12)
-#define IXGBE_RQTC_TC4_MASK         (0x7 << 16)
-#define IXGBE_RQTC_TC5_MASK         (0x7 << 20)
-#define IXGBE_RQTC_TC6_MASK         (0x7 << 24)
-#define IXGBE_RQTC_TC7_MASK         (0x7 << 28)
+#define IXGBE_RQTC_SHIFT_TC(_i)        ((_i) * 4)
+#define IXGBE_RQTC_TC0_MASK    (0x7 << 0)
+#define IXGBE_RQTC_TC1_MASK    (0x7 << 4)
+#define IXGBE_RQTC_TC2_MASK    (0x7 << 8)
+#define IXGBE_RQTC_TC3_MASK    (0x7 << 12)
+#define IXGBE_RQTC_TC4_MASK    (0x7 << 16)
+#define IXGBE_RQTC_TC5_MASK    (0x7 << 20)
+#define IXGBE_RQTC_TC6_MASK    (0x7 << 24)
+#define IXGBE_RQTC_TC7_MASK    (0x7 << 28)
 
 /* PSRTYPE.RQPL Bit masks and shift */
-#define IXGBE_PSRTYPE_RQPL_MASK     0x7
-#define IXGBE_PSRTYPE_RQPL_SHIFT    29
+#define IXGBE_PSRTYPE_RQPL_MASK                0x7
+#define IXGBE_PSRTYPE_RQPL_SHIFT       29
 
 /* CTRL Bit Masks */
-#define IXGBE_CTRL_GIO_DIS      0x00000004 /* Global IO Master Disable bit */
-#define IXGBE_CTRL_LNK_RST      0x00000008 /* Link Reset. Resets everything. */
-#define IXGBE_CTRL_RST          0x04000000 /* Reset (SW) */
-#define IXGBE_CTRL_RST_MASK     (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST)
+#define IXGBE_CTRL_GIO_DIS     0x00000004 /* Global IO Master Disable bit */
+#define IXGBE_CTRL_LNK_RST     0x00000008 /* Link Reset. Resets everything. */
+#define IXGBE_CTRL_RST         0x04000000 /* Reset (SW) */
+#define IXGBE_CTRL_RST_MASK    (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST)
 
 /* FACTPS */
-#define IXGBE_FACTPS_LFS        0x40000000 /* LAN Function Select */
+#define IXGBE_FACTPS_LFS       0x40000000 /* LAN Function Select */
 
 /* MHADD Bit Masks */
-#define IXGBE_MHADD_MFS_MASK    0xFFFF0000
-#define IXGBE_MHADD_MFS_SHIFT   16
+#define IXGBE_MHADD_MFS_MASK   0xFFFF0000
+#define IXGBE_MHADD_MFS_SHIFT  16
 
 /* Extended Device Control */
-#define IXGBE_CTRL_EXT_PFRSTD   0x00004000 /* Physical Function Reset Done */
-#define IXGBE_CTRL_EXT_NS_DIS   0x00010000 /* No Snoop disable */
-#define IXGBE_CTRL_EXT_RO_DIS   0x00020000 /* Relaxed Ordering disable */
-#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
+#define IXGBE_CTRL_EXT_PFRSTD  0x00004000 /* Physical Function Reset Done */
+#define IXGBE_CTRL_EXT_NS_DIS  0x00010000 /* No Snoop disable */
+#define IXGBE_CTRL_EXT_RO_DIS  0x00020000 /* Relaxed Ordering disable */
+#define IXGBE_CTRL_EXT_DRV_LOAD        0x10000000 /* Driver loaded bit for FW */
 
 /* Direct Cache Access (DCA) definitions */
-#define IXGBE_DCA_CTRL_DCA_ENABLE  0x00000000 /* DCA Enable */
-#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
-
-#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
-#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
-
-#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
-#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599  0xFF000000 /* Rx CPUID Mask */
-#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */
-#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
-#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
-#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
-#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
-#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */
-#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */
-
-#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
-#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599  0xFF000000 /* Tx CPUID Mask */
-#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */
-#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
-#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
-#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
-#define IXGBE_DCA_MAX_QUEUES_82598   16 /* DCA regs only on 16 queues */
+#define IXGBE_DCA_CTRL_DCA_ENABLE      0x00000000 /* DCA Enable */
+#define IXGBE_DCA_CTRL_DCA_DISABLE     0x00000001 /* DCA Disable */
+
+#define IXGBE_DCA_CTRL_DCA_MODE_CB1    0x00 /* DCA Mode CB1 */
+#define IXGBE_DCA_CTRL_DCA_MODE_CB2    0x02 /* DCA Mode CB2 */
+
+#define IXGBE_DCA_RXCTRL_CPUID_MASK    0x0000001F /* Rx CPUID Mask */
+#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599      0xFF000000 /* Rx CPUID Mask */
+#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599     24 /* Rx CPUID Shift */
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN   (1 << 5) /* Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN   (1 << 6) /* Rx Desc header ena */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN   (1 << 7) /* Rx Desc payload ena */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN   (1 << 9) /* Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DATA_WRO_EN   (1 << 13) /* Rx wr data Relax Order */
+#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN   (1 << 15) /* Rx wr header RO */
+
+#define IXGBE_DCA_TXCTRL_CPUID_MASK    0x0000001F /* Tx CPUID Mask */
+#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599      0xFF000000 /* Tx CPUID Mask */
+#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599     24 /* Tx CPUID Shift */
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN   (1 << 5) /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN   (1 << 9) /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN   (1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN   (1 << 13) /* Tx rd data Relax Order */
+#define IXGBE_DCA_MAX_QUEUES_82598     16 /* DCA regs only on 16 queues */
 
 /* MSCA Bit Masks */
-#define IXGBE_MSCA_NP_ADDR_MASK      0x0000FFFF /* MDI Address (new protocol) */
-#define IXGBE_MSCA_NP_ADDR_SHIFT     0
-#define IXGBE_MSCA_DEV_TYPE_MASK     0x001F0000 /* Device Type (new protocol) */
-#define IXGBE_MSCA_DEV_TYPE_SHIFT    16 /* Register Address (old protocol */
-#define IXGBE_MSCA_PHY_ADDR_MASK     0x03E00000 /* PHY Address mask */
-#define IXGBE_MSCA_PHY_ADDR_SHIFT    21 /* PHY Address shift*/
-#define IXGBE_MSCA_OP_CODE_MASK      0x0C000000 /* OP CODE mask */
-#define IXGBE_MSCA_OP_CODE_SHIFT     26 /* OP CODE shift */
-#define IXGBE_MSCA_ADDR_CYCLE        0x00000000 /* OP CODE 00 (addr cycle) */
-#define IXGBE_MSCA_WRITE             0x04000000 /* OP CODE 01 (write) */
-#define IXGBE_MSCA_READ              0x0C000000 /* OP CODE 11 (read) */
-#define IXGBE_MSCA_READ_AUTOINC      0x08000000 /* OP CODE 10 (read, auto inc)*/
-#define IXGBE_MSCA_ST_CODE_MASK      0x30000000 /* ST Code mask */
-#define IXGBE_MSCA_ST_CODE_SHIFT     28 /* ST Code shift */
-#define IXGBE_MSCA_NEW_PROTOCOL      0x00000000 /* ST CODE 00 (new protocol) */
-#define IXGBE_MSCA_OLD_PROTOCOL      0x10000000 /* ST CODE 01 (old protocol) */
-#define IXGBE_MSCA_MDI_COMMAND       0x40000000 /* Initiate MDI command */
-#define IXGBE_MSCA_MDI_IN_PROG_EN    0x80000000 /* MDI in progress enable */
+#define IXGBE_MSCA_NP_ADDR_MASK                0x0000FFFF /* MDI Addr (new prot) */
+#define IXGBE_MSCA_NP_ADDR_SHIFT       0
+#define IXGBE_MSCA_DEV_TYPE_MASK       0x001F0000 /* Dev Type (new prot) */
+#define IXGBE_MSCA_DEV_TYPE_SHIFT      16 /* Register Address (old prot */
+#define IXGBE_MSCA_PHY_ADDR_MASK       0x03E00000 /* PHY Address mask */
+#define IXGBE_MSCA_PHY_ADDR_SHIFT      21 /* PHY Address shift*/
+#define IXGBE_MSCA_OP_CODE_MASK                0x0C000000 /* OP CODE mask */
+#define IXGBE_MSCA_OP_CODE_SHIFT       26 /* OP CODE shift */
+#define IXGBE_MSCA_ADDR_CYCLE          0x00000000 /* OP CODE 00 (addr cycle) */
+#define IXGBE_MSCA_WRITE               0x04000000 /* OP CODE 01 (wr) */
+#define IXGBE_MSCA_READ                        0x0C000000 /* OP CODE 11 (rd) */
+#define IXGBE_MSCA_READ_AUTOINC                0x08000000 /* OP CODE 10 (rd auto inc)*/
+#define IXGBE_MSCA_ST_CODE_MASK                0x30000000 /* ST Code mask */
+#define IXGBE_MSCA_ST_CODE_SHIFT       28 /* ST Code shift */
+#define IXGBE_MSCA_NEW_PROTOCOL                0x00000000 /* ST CODE 00 (new prot) */
+#define IXGBE_MSCA_OLD_PROTOCOL                0x10000000 /* ST CODE 01 (old prot) */
+#define IXGBE_MSCA_MDI_COMMAND         0x40000000 /* Initiate MDI command */
+#define IXGBE_MSCA_MDI_IN_PROG_EN      0x80000000 /* MDI in progress ena */
 
 /* MSRWD bit masks */
-#define IXGBE_MSRWD_WRITE_DATA_MASK     0x0000FFFF
-#define IXGBE_MSRWD_WRITE_DATA_SHIFT    0
-#define IXGBE_MSRWD_READ_DATA_MASK      0xFFFF0000
-#define IXGBE_MSRWD_READ_DATA_SHIFT     16
+#define IXGBE_MSRWD_WRITE_DATA_MASK    0x0000FFFF
+#define IXGBE_MSRWD_WRITE_DATA_SHIFT   0
+#define IXGBE_MSRWD_READ_DATA_MASK     0xFFFF0000
+#define IXGBE_MSRWD_READ_DATA_SHIFT    16
 
 /* Atlas registers */
-#define IXGBE_ATLAS_PDN_LPBK    0x24
-#define IXGBE_ATLAS_PDN_10G     0xB
-#define IXGBE_ATLAS_PDN_1G      0xC
-#define IXGBE_ATLAS_PDN_AN      0xD
+#define IXGBE_ATLAS_PDN_LPBK           0x24
+#define IXGBE_ATLAS_PDN_10G            0xB
+#define IXGBE_ATLAS_PDN_1G             0xC
+#define IXGBE_ATLAS_PDN_AN             0xD
 
 /* Atlas bit masks */
-#define IXGBE_ATLASCTL_WRITE_CMD        0x00010000
-#define IXGBE_ATLAS_PDN_TX_REG_EN       0x10
-#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL   0xF0
-#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL    0xF0
-#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL    0xF0
+#define IXGBE_ATLASCTL_WRITE_CMD       0x00010000
+#define IXGBE_ATLAS_PDN_TX_REG_EN      0x10
+#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL  0xF0
+#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL   0xF0
+#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL   0xF0
 
 /* Omer bit masks */
-#define IXGBE_CORECTL_WRITE_CMD         0x00010000
-
-/* MDIO definitions */
-
-#define IXGBE_MDIO_COMMAND_TIMEOUT     100 /* PHY Timeout for 1 GB mode */
-
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL      0x0    /* VS1 Control Reg */
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS       0x1    /* VS1 Status Reg */
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS  0x0008 /* 1 = Link Up */
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0 - 10G, 1 - 1G */
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED    0x0018
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED     0x0010
-
-#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR     0xC30A /* PHY_XS SDA/SCL Addr Reg */
-#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA     0xC30B /* PHY_XS SDA/SCL Data Reg */
-#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT     0xC30C /* PHY_XS SDA/SCL Status Reg */
+#define IXGBE_CORECTL_WRITE_CMD                0x00010000
+
+/* Device Type definitions for new protocol MDIO commands */
+#define IXGBE_MDIO_PMA_PMD_DEV_TYPE            0x1
+#define IXGBE_MDIO_PCS_DEV_TYPE                        0x3
+#define IXGBE_MDIO_PHY_XS_DEV_TYPE             0x4
+#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE           0x7
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE  0x1E   /* Device 30 */
+#define IXGBE_TWINAX_DEV                       1
+
+#define IXGBE_MDIO_COMMAND_TIMEOUT     100 /* PHY Timeout for 1 GB mode */
+
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL           0x0 /* VS1 Ctrl Reg */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS            0x1 /* VS1 Status Reg */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS       0x0008 /* 1 = Link Up */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS      0x0010 /* 0-10G, 1-1G */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED         0x0018
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED          0x0010
+
+#define IXGBE_MDIO_AUTO_NEG_CONTROL    0x0 /* AUTO_NEG Control Reg */
+#define IXGBE_MDIO_AUTO_NEG_STATUS     0x1 /* AUTO_NEG Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_ADVT       0x10 /* AUTO_NEG Advt Reg */
+#define IXGBE_MDIO_AUTO_NEG_LP         0x13 /* AUTO_NEG LP Status Reg */
+#define IXGBE_MDIO_PHY_XS_CONTROL      0x0 /* PHY_XS Control Reg */
+#define IXGBE_MDIO_PHY_XS_RESET                0x8000 /* PHY_XS Reset */
+#define IXGBE_MDIO_PHY_ID_HIGH         0x2 /* PHY ID High Reg*/
+#define IXGBE_MDIO_PHY_ID_LOW          0x3 /* PHY ID Low Reg*/
+#define IXGBE_MDIO_PHY_SPEED_ABILITY   0x4 /* Speed Ability Reg */
+#define IXGBE_MDIO_PHY_SPEED_10G       0x0001 /* 10G capable */
+#define IXGBE_MDIO_PHY_SPEED_1G                0x0010 /* 1G capable */
+#define IXGBE_MDIO_PHY_SPEED_100M      0x0020 /* 100M capable */
+#define IXGBE_MDIO_PHY_EXT_ABILITY     0xB /* Ext Ability Reg */
+#define IXGBE_MDIO_PHY_10GBASET_ABILITY                0x0004 /* 10GBaseT capable */
+#define IXGBE_MDIO_PHY_1000BASET_ABILITY       0x0020 /* 1000BaseT capable */
+#define IXGBE_MDIO_PHY_100BASETX_ABILITY       0x0080 /* 100BaseTX capable */
+#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE      0x0800 /* Set low power mode */
+
+#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR        0x0000 /* PMA/PMD Control Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR        0xC30A /* PHY_XS SDA/SCL Addr Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA        0xC30B /* PHY_XS SDA/SCL Data Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT        0xC30C /* PHY_XS SDA/SCL Status Reg */
 
 /* MII clause 22/28 definitions */
-#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
-#define IXGBE_MII_AUTONEG_XNP_TX_REG             0x17   /* 1G XNP Transmit */
-#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX      0x4000 /* full duplex, bit:14*/
-#define IXGBE_MII_1GBASE_T_ADVERTISE             0x8000 /* full duplex, bit:15*/
-#define IXGBE_MII_AUTONEG_REG                    0x0
+#define IXGBE_MDIO_PHY_LOW_POWER_MODE  0x0800
 
-#define IXGBE_PHY_REVISION_MASK        0xFFFFFFF0
-#define IXGBE_MAX_PHY_ADDR             32
+#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG   0x20   /* 10G Control Reg */
+#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
+#define IXGBE_MII_AUTONEG_XNP_TX_REG           0x17   /* 1G XNP Transmit */
+#define IXGBE_MII_AUTONEG_ADVERTISE_REG                0x10   /* 100M Advertisement */
+#define IXGBE_MII_10GBASE_T_ADVERTISE          0x1000 /* full duplex, bit:12*/
+#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX    0x4000 /* full duplex, bit:14*/
+#define IXGBE_MII_1GBASE_T_ADVERTISE           0x8000 /* full duplex, bit:15*/
+#define IXGBE_MII_100BASE_T_ADVERTISE          0x0100 /* full duplex, bit:8 */
+#define IXGBE_MII_100BASE_T_ADVERTISE_HALF     0x0080 /* half duplex, bit:7 */
+#define IXGBE_MII_RESTART                      0x200
+#define IXGBE_MII_AUTONEG_COMPLETE             0x20
+#define IXGBE_MII_AUTONEG_LINK_UP              0x04
+#define IXGBE_MII_AUTONEG_REG                  0x0
+
+#define IXGBE_PHY_REVISION_MASK                0xFFFFFFF0
+#define IXGBE_MAX_PHY_ADDR             32
 
 /* PHY IDs*/
-#define TN1010_PHY_ID    0x00A19410
-#define TNX_FW_REV       0xB
-#define X540_PHY_ID      0x01540200
-#define QT2022_PHY_ID    0x0043A400
-#define ATH_PHY_ID       0x03429050
-#define AQ_FW_REV        0x20
+#define TN1010_PHY_ID  0x00A19410
+#define TNX_FW_REV     0xB
+#define X540_PHY_ID    0x01540200
+#define AQ_FW_REV      0x20
+#define QT2022_PHY_ID  0x0043A400
+#define ATH_PHY_ID     0x03429050
 
 /* PHY Types */
-#define IXGBE_M88E1145_E_PHY_ID  0x01410CD0
+#define IXGBE_M88E1145_E_PHY_ID        0x01410CD0
 
 /* Special PHY Init Routine */
-#define IXGBE_PHY_INIT_OFFSET_NL 0x002B
-#define IXGBE_PHY_INIT_END_NL    0xFFFF
-#define IXGBE_CONTROL_MASK_NL    0xF000
-#define IXGBE_DATA_MASK_NL       0x0FFF
-#define IXGBE_CONTROL_SHIFT_NL   12
-#define IXGBE_DELAY_NL           0
-#define IXGBE_DATA_NL            1
-#define IXGBE_CONTROL_NL         0x000F
-#define IXGBE_CONTROL_EOL_NL     0x0FFF
-#define IXGBE_CONTROL_SOL_NL     0x0000
+#define IXGBE_PHY_INIT_OFFSET_NL       0x002B
+#define IXGBE_PHY_INIT_END_NL          0xFFFF
+#define IXGBE_CONTROL_MASK_NL          0xF000
+#define IXGBE_DATA_MASK_NL             0x0FFF
+#define IXGBE_CONTROL_SHIFT_NL         12
+#define IXGBE_DELAY_NL                 0
+#define IXGBE_DATA_NL                  1
+#define IXGBE_CONTROL_NL               0x000F
+#define IXGBE_CONTROL_EOL_NL           0x0FFF
+#define IXGBE_CONTROL_SOL_NL           0x0000
 
 /* General purpose Interrupt Enable */
-#define IXGBE_SDP0_GPIEN         0x00000001 /* SDP0 */
-#define IXGBE_SDP1_GPIEN         0x00000002 /* SDP1 */
-#define IXGBE_SDP2_GPIEN         0x00000004 /* SDP2 */
-#define IXGBE_GPIE_MSIX_MODE     0x00000010 /* MSI-X mode */
-#define IXGBE_GPIE_OCD           0x00000020 /* Other Clear Disable */
-#define IXGBE_GPIE_EIMEN         0x00000040 /* Immediate Interrupt Enable */
-#define IXGBE_GPIE_EIAME         0x40000000
-#define IXGBE_GPIE_PBA_SUPPORT   0x80000000
-#define IXGBE_GPIE_RSC_DELAY_SHIFT 11
-#define IXGBE_GPIE_VTMODE_MASK   0x0000C000 /* VT Mode Mask */
-#define IXGBE_GPIE_VTMODE_16     0x00004000 /* 16 VFs 8 queues per VF */
-#define IXGBE_GPIE_VTMODE_32     0x00008000 /* 32 VFs 4 queues per VF */
-#define IXGBE_GPIE_VTMODE_64     0x0000C000 /* 64 VFs 2 queues per VF */
+#define IXGBE_SDP0_GPIEN       0x00000001 /* SDP0 */
+#define IXGBE_SDP1_GPIEN       0x00000002 /* SDP1 */
+#define IXGBE_SDP2_GPIEN       0x00000004 /* SDP2 */
+#define IXGBE_GPIE_MSIX_MODE   0x00000010 /* MSI-X mode */
+#define IXGBE_GPIE_OCD         0x00000020 /* Other Clear Disable */
+#define IXGBE_GPIE_EIMEN       0x00000040 /* Immediate Interrupt Enable */
+#define IXGBE_GPIE_EIAME       0x40000000
+#define IXGBE_GPIE_PBA_SUPPORT 0x80000000
+#define IXGBE_GPIE_RSC_DELAY_SHIFT     11
+#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */
+#define IXGBE_GPIE_VTMODE_16   0x00004000 /* 16 VFs 8 queues per VF */
+#define IXGBE_GPIE_VTMODE_32   0x00008000 /* 32 VFs 4 queues per VF */
+#define IXGBE_GPIE_VTMODE_64   0x0000C000 /* 64 VFs 2 queues per VF */
 
 /* Packet Buffer Initialization */
-#define IXGBE_TXPBSIZE_20KB     0x00005000 /* 20KB Packet Buffer */
-#define IXGBE_TXPBSIZE_40KB     0x0000A000 /* 40KB Packet Buffer */
-#define IXGBE_RXPBSIZE_48KB     0x0000C000 /* 48KB Packet Buffer */
-#define IXGBE_RXPBSIZE_64KB     0x00010000 /* 64KB Packet Buffer */
-#define IXGBE_RXPBSIZE_80KB     0x00014000 /* 80KB Packet Buffer */
-#define IXGBE_RXPBSIZE_128KB    0x00020000 /* 128KB Packet Buffer */
-#define IXGBE_RXPBSIZE_MAX      0x00080000 /* 512KB Packet Buffer*/
-#define IXGBE_TXPBSIZE_MAX      0x00028000 /* 160KB Packet Buffer*/
-
-#define IXGBE_TXPKT_SIZE_MAX    0xA        /* Max Tx Packet size  */
+#define IXGBE_MAX_PACKET_BUFFERS       8
+
+#define IXGBE_TXPBSIZE_20KB    0x00005000 /* 20KB Packet Buffer */
+#define IXGBE_TXPBSIZE_40KB    0x0000A000 /* 40KB Packet Buffer */
+#define IXGBE_RXPBSIZE_48KB    0x0000C000 /* 48KB Packet Buffer */
+#define IXGBE_RXPBSIZE_64KB    0x00010000 /* 64KB Packet Buffer */
+#define IXGBE_RXPBSIZE_80KB    0x00014000 /* 80KB Packet Buffer */
+#define IXGBE_RXPBSIZE_128KB   0x00020000 /* 128KB Packet Buffer */
+#define IXGBE_RXPBSIZE_MAX     0x00080000 /* 512KB Packet Buffer */
+#define IXGBE_TXPBSIZE_MAX     0x00028000 /* 160KB Packet Buffer */
+
+#define IXGBE_TXPKT_SIZE_MAX   0xA /* Max Tx Packet size */
 #define IXGBE_MAX_PB           8
 
 /* Packet buffer allocation strategies */
 enum {
-       PBA_STRATEGY_EQUAL      = 0,    /* Distribute PB space equally */
+       PBA_STRATEGY_EQUAL      = 0, /* Distribute PB space equally */
 #define PBA_STRATEGY_EQUAL     PBA_STRATEGY_EQUAL
-       PBA_STRATEGY_WEIGHTED   = 1,    /* Weight front half of TCs */
+       PBA_STRATEGY_WEIGHTED   = 1, /* Weight front half of TCs */
 #define PBA_STRATEGY_WEIGHTED  PBA_STRATEGY_WEIGHTED
 };
 
 /* Transmit Flow Control status */
-#define IXGBE_TFCS_TXOFF         0x00000001
-#define IXGBE_TFCS_TXOFF0        0x00000100
-#define IXGBE_TFCS_TXOFF1        0x00000200
-#define IXGBE_TFCS_TXOFF2        0x00000400
-#define IXGBE_TFCS_TXOFF3        0x00000800
-#define IXGBE_TFCS_TXOFF4        0x00001000
-#define IXGBE_TFCS_TXOFF5        0x00002000
-#define IXGBE_TFCS_TXOFF6        0x00004000
-#define IXGBE_TFCS_TXOFF7        0x00008000
+#define IXGBE_TFCS_TXOFF       0x00000001
+#define IXGBE_TFCS_TXOFF0      0x00000100
+#define IXGBE_TFCS_TXOFF1      0x00000200
+#define IXGBE_TFCS_TXOFF2      0x00000400
+#define IXGBE_TFCS_TXOFF3      0x00000800
+#define IXGBE_TFCS_TXOFF4      0x00001000
+#define IXGBE_TFCS_TXOFF5      0x00002000
+#define IXGBE_TFCS_TXOFF6      0x00004000
+#define IXGBE_TFCS_TXOFF7      0x00008000
 
 /* TCP Timer */
-#define IXGBE_TCPTIMER_KS            0x00000100
-#define IXGBE_TCPTIMER_COUNT_ENABLE  0x00000200
-#define IXGBE_TCPTIMER_COUNT_FINISH  0x00000400
-#define IXGBE_TCPTIMER_LOOP          0x00000800
-#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF
+#define IXGBE_TCPTIMER_KS              0x00000100
+#define IXGBE_TCPTIMER_COUNT_ENABLE    0x00000200
+#define IXGBE_TCPTIMER_COUNT_FINISH    0x00000400
+#define IXGBE_TCPTIMER_LOOP            0x00000800
+#define IXGBE_TCPTIMER_DURATION_MASK   0x000000FF
 
 /* HLREG0 Bit Masks */
-#define IXGBE_HLREG0_TXCRCEN      0x00000001   /* bit  0 */
-#define IXGBE_HLREG0_RXCRCSTRP    0x00000002   /* bit  1 */
-#define IXGBE_HLREG0_JUMBOEN      0x00000004   /* bit  2 */
-#define IXGBE_HLREG0_TXPADEN      0x00000400   /* bit 10 */
-#define IXGBE_HLREG0_TXPAUSEEN    0x00001000   /* bit 12 */
-#define IXGBE_HLREG0_RXPAUSEEN    0x00004000   /* bit 14 */
-#define IXGBE_HLREG0_LPBK         0x00008000   /* bit 15 */
-#define IXGBE_HLREG0_MDCSPD       0x00010000   /* bit 16 */
-#define IXGBE_HLREG0_CONTMDC      0x00020000   /* bit 17 */
-#define IXGBE_HLREG0_CTRLFLTR     0x00040000   /* bit 18 */
-#define IXGBE_HLREG0_PREPEND      0x00F00000   /* bits 20-23 */
-#define IXGBE_HLREG0_PRIPAUSEEN   0x01000000   /* bit 24 */
-#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000   /* bits 25-26 */
-#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000   /* bit 27 */
-#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000   /* bit 28 */
+#define IXGBE_HLREG0_TXCRCEN           0x00000001 /* bit  0 */
+#define IXGBE_HLREG0_RXCRCSTRP         0x00000002 /* bit  1 */
+#define IXGBE_HLREG0_JUMBOEN           0x00000004 /* bit  2 */
+#define IXGBE_HLREG0_TXPADEN           0x00000400 /* bit 10 */
+#define IXGBE_HLREG0_TXPAUSEEN         0x00001000 /* bit 12 */
+#define IXGBE_HLREG0_RXPAUSEEN         0x00004000 /* bit 14 */
+#define IXGBE_HLREG0_LPBK              0x00008000 /* bit 15 */
+#define IXGBE_HLREG0_MDCSPD            0x00010000 /* bit 16 */
+#define IXGBE_HLREG0_CONTMDC           0x00020000 /* bit 17 */
+#define IXGBE_HLREG0_CTRLFLTR          0x00040000 /* bit 18 */
+#define IXGBE_HLREG0_PREPEND           0x00F00000 /* bits 20-23 */
+#define IXGBE_HLREG0_PRIPAUSEEN                0x01000000 /* bit 24 */
+#define IXGBE_HLREG0_RXPAUSERECDA      0x06000000 /* bits 25-26 */
+#define IXGBE_HLREG0_RXLNGTHERREN      0x08000000 /* bit 27 */
+#define IXGBE_HLREG0_RXPADSTRIPEN      0x10000000 /* bit 28 */
 
 /* VMD_CTL bitmasks */
-#define IXGBE_VMD_CTL_VMDQ_EN     0x00000001
-#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002
+#define IXGBE_VMD_CTL_VMDQ_EN          0x00000001
+#define IXGBE_VMD_CTL_VMDQ_FILTER      0x00000002
 
 /* VT_CTL bitmasks */
-#define IXGBE_VT_CTL_DIS_DEFPL  0x20000000 /* disable default pool */
-#define IXGBE_VT_CTL_REPLEN     0x40000000 /* replication enabled */
-#define IXGBE_VT_CTL_VT_ENABLE  0x00000001  /* Enable VT Mode */
-#define IXGBE_VT_CTL_POOL_SHIFT 7
-#define IXGBE_VT_CTL_POOL_MASK  (0x3F << IXGBE_VT_CTL_POOL_SHIFT)
+#define IXGBE_VT_CTL_DIS_DEFPL         0x20000000 /* disable default pool */
+#define IXGBE_VT_CTL_REPLEN            0x40000000 /* replication enabled */
+#define IXGBE_VT_CTL_VT_ENABLE         0x00000001  /* Enable VT Mode */
+#define IXGBE_VT_CTL_POOL_SHIFT                7
+#define IXGBE_VT_CTL_POOL_MASK         (0x3F << IXGBE_VT_CTL_POOL_SHIFT)
 
 /* VMOLR bitmasks */
-#define IXGBE_VMOLR_AUPE        0x01000000 /* accept untagged packets */
-#define IXGBE_VMOLR_ROMPE       0x02000000 /* accept packets in MTA tbl */
-#define IXGBE_VMOLR_ROPE        0x04000000 /* accept packets in UC tbl */
-#define IXGBE_VMOLR_BAM         0x08000000 /* accept broadcast packets */
-#define IXGBE_VMOLR_MPE         0x10000000 /* multicast promiscuous */
+#define IXGBE_VMOLR_AUPE       0x01000000 /* accept untagged packets */
+#define IXGBE_VMOLR_ROMPE      0x02000000 /* accept packets in MTA tbl */
+#define IXGBE_VMOLR_ROPE       0x04000000 /* accept packets in UC tbl */
+#define IXGBE_VMOLR_BAM                0x08000000 /* accept broadcast packets */
+#define IXGBE_VMOLR_MPE                0x10000000 /* multicast promiscuous */
 
 /* VFRE bitmask */
-#define IXGBE_VFRE_ENABLE_ALL   0xFFFFFFFF
+#define IXGBE_VFRE_ENABLE_ALL  0xFFFFFFFF
 
-#define IXGBE_VF_INIT_TIMEOUT   200 /* Number of retries to clear RSTI */
+#define IXGBE_VF_INIT_TIMEOUT  200 /* Number of retries to clear RSTI */
 
 /* RDHMPN and TDHMPN bitmasks */
-#define IXGBE_RDHMPN_RDICADDR       0x007FF800
-#define IXGBE_RDHMPN_RDICRDREQ      0x00800000
-#define IXGBE_RDHMPN_RDICADDR_SHIFT 11
-#define IXGBE_TDHMPN_TDICADDR       0x003FF800
-#define IXGBE_TDHMPN_TDICRDREQ      0x00800000
-#define IXGBE_TDHMPN_TDICADDR_SHIFT 11
-
-#define IXGBE_RDMAM_MEM_SEL_SHIFT   13
-#define IXGBE_RDMAM_DWORD_SHIFT     9
-#define IXGBE_RDMAM_DESC_COMP_FIFO  1
-#define IXGBE_RDMAM_DFC_CMD_FIFO    2
-#define IXGBE_RDMAM_TCN_STATUS_RAM  4
-#define IXGBE_RDMAM_WB_COLL_FIFO    5
-#define IXGBE_RDMAM_QSC_CNT_RAM     6
-#define IXGBE_RDMAM_QSC_QUEUE_CNT   8
-#define IXGBE_RDMAM_QSC_QUEUE_RAM   0xA
-#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE     135
-#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT     4
-#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE      48
-#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT      7
-#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE    256
-#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT    9
-#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE      8
-#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT      4
-#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE       64
-#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT       4
-#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE     32
-#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT     4
-#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE     128
-#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT     8
-
-#define IXGBE_TXDESCIC_READY        0x80000000
+#define IXGBE_RDHMPN_RDICADDR          0x007FF800
+#define IXGBE_RDHMPN_RDICRDREQ         0x00800000
+#define IXGBE_RDHMPN_RDICADDR_SHIFT    11
+#define IXGBE_TDHMPN_TDICADDR          0x003FF800
+#define IXGBE_TDHMPN_TDICRDREQ         0x00800000
+#define IXGBE_TDHMPN_TDICADDR_SHIFT    11
+
+#define IXGBE_RDMAM_MEM_SEL_SHIFT              13
+#define IXGBE_RDMAM_DWORD_SHIFT                        9
+#define IXGBE_RDMAM_DESC_COMP_FIFO             1
+#define IXGBE_RDMAM_DFC_CMD_FIFO               2
+#define IXGBE_RDMAM_RSC_HEADER_ADDR            3
+#define IXGBE_RDMAM_TCN_STATUS_RAM             4
+#define IXGBE_RDMAM_WB_COLL_FIFO               5
+#define IXGBE_RDMAM_QSC_CNT_RAM                        6
+#define IXGBE_RDMAM_QSC_FCOE_RAM               7
+#define IXGBE_RDMAM_QSC_QUEUE_CNT              8
+#define IXGBE_RDMAM_QSC_QUEUE_RAM              0xA
+#define IXGBE_RDMAM_QSC_RSC_RAM                        0xB
+#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE                135
+#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT                4
+#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE         48
+#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT         7
+#define IXGBE_RDMAM_RSC_HEADER_ADDR_RANGE      32
+#define IXGBE_RDMAM_RSC_HEADER_ADDR_COUNT      4
+#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE       256
+#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT       9
+#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE         8
+#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT         4
+#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE          64
+#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT          4
+#define IXGBE_RDMAM_QSC_FCOE_RAM_RANGE         512
+#define IXGBE_RDMAM_QSC_FCOE_RAM_COUNT         5
+#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE                32
+#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT                4
+#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE                128
+#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT                8
+#define IXGBE_RDMAM_QSC_RSC_RAM_RANGE          32
+#define IXGBE_RDMAM_QSC_RSC_RAM_COUNT          8
+
+#define IXGBE_TXDESCIC_READY   0x80000000
 
 /* Receive Checksum Control */
-#define IXGBE_RXCSUM_IPPCSE     0x00001000   /* IP payload checksum enable */
-#define IXGBE_RXCSUM_PCSD       0x00002000   /* packet checksum disabled */
+#define IXGBE_RXCSUM_IPPCSE    0x00001000 /* IP payload checksum enable */
+#define IXGBE_RXCSUM_PCSD      0x00002000 /* packet checksum disabled */
 
 /* FCRTL Bit Masks */
-#define IXGBE_FCRTL_XONE        0x80000000  /* XON enable */
-#define IXGBE_FCRTH_FCEN        0x80000000  /* Packet buffer fc enable */
+#define IXGBE_FCRTL_XONE       0x80000000 /* XON enable */
+#define IXGBE_FCRTH_FCEN       0x80000000 /* Packet buffer fc enable */
 
 /* PAP bit masks*/
-#define IXGBE_PAP_TXPAUSECNT_MASK   0x0000FFFF /* Pause counter mask */
+#define IXGBE_PAP_TXPAUSECNT_MASK      0x0000FFFF /* Pause counter mask */
 
 /* RMCS Bit Masks */
-#define IXGBE_RMCS_RRM          0x00000002 /* Receive Recycle Mode enable */
+#define IXGBE_RMCS_RRM                 0x00000002 /* Rx Recycle Mode enable */
 /* Receive Arbitration Control: 0 Round Robin, 1 DFP */
-#define IXGBE_RMCS_RAC          0x00000004
-#define IXGBE_RMCS_DFP          IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */
-#define IXGBE_RMCS_TFCE_802_3X         0x00000008 /* Tx Priority FC ena */
-#define IXGBE_RMCS_TFCE_PRIORITY       0x00000010 /* Tx Priority FC ena */
-#define IXGBE_RMCS_ARBDIS       0x00000040 /* Arbitration disable bit */
+#define IXGBE_RMCS_RAC                 0x00000004
+/* Deficit Fixed Prio ena */
+#define IXGBE_RMCS_DFP                 IXGBE_RMCS_RAC
+#define IXGBE_RMCS_TFCE_802_3X         0x00000008 /* Tx Priority FC ena */
+#define IXGBE_RMCS_TFCE_PRIORITY       0x00000010 /* Tx Priority FC ena */
+#define IXGBE_RMCS_ARBDIS              0x00000040 /* Arbitration disable bit */
 
 /* FCCFG Bit Masks */
-#define IXGBE_FCCFG_TFCE_802_3X         0x00000008 /* Tx link FC enable */
-#define IXGBE_FCCFG_TFCE_PRIORITY       0x00000010 /* Tx priority FC enable */
+#define IXGBE_FCCFG_TFCE_802_3X                0x00000008 /* Tx link FC enable */
+#define IXGBE_FCCFG_TFCE_PRIORITY      0x00000010 /* Tx priority FC enable */
 
 /* Interrupt register bitmasks */
 
 /* Extended Interrupt Cause Read */
-#define IXGBE_EICR_RTX_QUEUE    0x0000FFFF /* RTx Queue Interrupt */
-#define IXGBE_EICR_FLOW_DIR     0x00010000 /* FDir Exception */
-#define IXGBE_EICR_RX_MISS      0x00020000 /* Packet Buffer Overrun */
-#define IXGBE_EICR_PCI          0x00040000 /* PCI Exception */
-#define IXGBE_EICR_MAILBOX      0x00080000 /* VF to PF Mailbox Interrupt */
-#define IXGBE_EICR_LSC          0x00100000 /* Link Status Change */
-#define IXGBE_EICR_LINKSEC      0x00200000 /* PN Threshold */
-#define IXGBE_EICR_MNG          0x00400000 /* Manageability Event Interrupt */
-#define IXGBE_EICR_TS           0x00800000 /* Thermal Sensor Event */
-#define IXGBE_EICR_GPI_SDP0     0x01000000 /* Gen Purpose Interrupt on SDP0 */
-#define IXGBE_EICR_GPI_SDP1     0x02000000 /* Gen Purpose Interrupt on SDP1 */
-#define IXGBE_EICR_GPI_SDP2     0x04000000 /* Gen Purpose Interrupt on SDP2 */
-#define IXGBE_EICR_ECC          0x10000000 /* ECC Error */
-#define IXGBE_EICR_PBUR         0x10000000 /* Packet Buffer Handler Error */
-#define IXGBE_EICR_DHER         0x20000000 /* Descriptor Handler Error */
-#define IXGBE_EICR_TCP_TIMER    0x40000000 /* TCP Timer */
-#define IXGBE_EICR_OTHER        0x80000000 /* Interrupt Cause Active */
+#define IXGBE_EICR_RTX_QUEUE   0x0000FFFF /* RTx Queue Interrupt */
+#define IXGBE_EICR_FLOW_DIR    0x00010000 /* FDir Exception */
+#define IXGBE_EICR_RX_MISS     0x00020000 /* Packet Buffer Overrun */
+#define IXGBE_EICR_PCI         0x00040000 /* PCI Exception */
+#define IXGBE_EICR_MAILBOX     0x00080000 /* VF to PF Mailbox Interrupt */
+#define IXGBE_EICR_LSC         0x00100000 /* Link Status Change */
+#define IXGBE_EICR_LINKSEC     0x00200000 /* PN Threshold */
+#define IXGBE_EICR_MNG         0x00400000 /* Manageability Event Interrupt */
+#define IXGBE_EICR_TS          0x00800000 /* Thermal Sensor Event */
+#define IXGBE_EICR_TIMESYNC    0x01000000 /* Timesync Event */
+#define IXGBE_EICR_GPI_SDP0    0x01000000 /* Gen Purpose Interrupt on SDP0 */
+#define IXGBE_EICR_GPI_SDP1    0x02000000 /* Gen Purpose Interrupt on SDP1 */
+#define IXGBE_EICR_GPI_SDP2    0x04000000 /* Gen Purpose Interrupt on SDP2 */
+#define IXGBE_EICR_ECC         0x10000000 /* ECC Error */
+#define IXGBE_EICR_PBUR                0x10000000 /* Packet Buffer Handler Error */
+#define IXGBE_EICR_DHER                0x20000000 /* Descriptor Handler Error */
+#define IXGBE_EICR_TCP_TIMER   0x40000000 /* TCP Timer */
+#define IXGBE_EICR_OTHER       0x80000000 /* Interrupt Cause Active */
 
 /* Extended Interrupt Cause Set */
-#define IXGBE_EICS_RTX_QUEUE    IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EICS_FLOW_DIR     IXGBE_EICR_FLOW_DIR  /* FDir Exception */
-#define IXGBE_EICS_RX_MISS      IXGBE_EICR_RX_MISS   /* Pkt Buffer Overrun */
-#define IXGBE_EICS_PCI          IXGBE_EICR_PCI       /* PCI Exception */
-#define IXGBE_EICS_MAILBOX      IXGBE_EICR_MAILBOX   /* VF to PF Mailbox Int */
-#define IXGBE_EICS_LSC          IXGBE_EICR_LSC       /* Link Status Change */
-#define IXGBE_EICS_MNG          IXGBE_EICR_MNG       /* MNG Event Interrupt */
-#define IXGBE_EICS_GPI_SDP0     IXGBE_EICR_GPI_SDP0  /* SDP0 Gen Purpose Int */
-#define IXGBE_EICS_GPI_SDP1     IXGBE_EICR_GPI_SDP1  /* SDP1 Gen Purpose Int */
-#define IXGBE_EICS_GPI_SDP2     IXGBE_EICR_GPI_SDP2  /* SDP2 Gen Purpose Int */
-#define IXGBE_EICS_ECC          IXGBE_EICR_ECC       /* ECC Error */
-#define IXGBE_EICS_PBUR         IXGBE_EICR_PBUR      /* Pkt Buf Handler Err */
-#define IXGBE_EICS_DHER         IXGBE_EICR_DHER      /* Desc Handler Error */
-#define IXGBE_EICS_TCP_TIMER    IXGBE_EICR_TCP_TIMER /* TCP Timer */
-#define IXGBE_EICS_OTHER        IXGBE_EICR_OTHER     /* INT Cause Active */
+#define IXGBE_EICS_RTX_QUEUE   IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EICS_FLOW_DIR    IXGBE_EICR_FLOW_DIR  /* FDir Exception */
+#define IXGBE_EICS_RX_MISS     IXGBE_EICR_RX_MISS   /* Pkt Buffer Overrun */
+#define IXGBE_EICS_PCI         IXGBE_EICR_PCI /* PCI Exception */
+#define IXGBE_EICS_MAILBOX     IXGBE_EICR_MAILBOX   /* VF to PF Mailbox Int */
+#define IXGBE_EICS_LSC         IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EICS_MNG         IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EICS_TIMESYNC    IXGBE_EICR_TIMESYNC /* Timesync Event */
+#define IXGBE_EICS_GPI_SDP0    IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
+#define IXGBE_EICS_GPI_SDP1    IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
+#define IXGBE_EICS_GPI_SDP2    IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EICS_ECC         IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EICS_PBUR                IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
+#define IXGBE_EICS_DHER                IXGBE_EICR_DHER /* Desc Handler Error */
+#define IXGBE_EICS_TCP_TIMER   IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EICS_OTHER       IXGBE_EICR_OTHER /* INT Cause Active */
 
 /* Extended Interrupt Mask Set */
-#define IXGBE_EIMS_RTX_QUEUE    IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EIMS_FLOW_DIR     IXGBE_EICR_FLOW_DIR  /* FDir Exception */
-#define IXGBE_EIMS_RX_MISS      IXGBE_EICR_RX_MISS   /* Packet Buffer Overrun */
-#define IXGBE_EIMS_PCI          IXGBE_EICR_PCI       /* PCI Exception */
-#define IXGBE_EIMS_MAILBOX      IXGBE_EICR_MAILBOX   /* VF to PF Mailbox Int */
-#define IXGBE_EIMS_LSC          IXGBE_EICR_LSC       /* Link Status Change */
-#define IXGBE_EIMS_MNG          IXGBE_EICR_MNG       /* MNG Event Interrupt */
-#define IXGBE_EIMS_TS           IXGBE_EICR_TS        /* Thermel Sensor Event */
-#define IXGBE_EIMS_GPI_SDP0     IXGBE_EICR_GPI_SDP0  /* SDP0 Gen Purpose Int */
-#define IXGBE_EIMS_GPI_SDP1     IXGBE_EICR_GPI_SDP1  /* SDP1 Gen Purpose Int */
-#define IXGBE_EIMS_GPI_SDP2     IXGBE_EICR_GPI_SDP2  /* SDP2 Gen Purpose Int */
-#define IXGBE_EIMS_ECC          IXGBE_EICR_ECC       /* ECC Error */
-#define IXGBE_EIMS_PBUR         IXGBE_EICR_PBUR      /* Pkt Buf Handler Err */
-#define IXGBE_EIMS_DHER         IXGBE_EICR_DHER      /* Descr Handler Error */
-#define IXGBE_EIMS_TCP_TIMER    IXGBE_EICR_TCP_TIMER /* TCP Timer */
-#define IXGBE_EIMS_OTHER        IXGBE_EICR_OTHER     /* INT Cause Active */
+#define IXGBE_EIMS_RTX_QUEUE   IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMS_FLOW_DIR    IXGBE_EICR_FLOW_DIR /* FDir Exception */
+#define IXGBE_EIMS_RX_MISS     IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */
+#define IXGBE_EIMS_PCI         IXGBE_EICR_PCI /* PCI Exception */
+#define IXGBE_EIMS_MAILBOX     IXGBE_EICR_MAILBOX   /* VF to PF Mailbox Int */
+#define IXGBE_EIMS_LSC         IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMS_MNG         IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EIMS_TS          IXGBE_EICR_TS /* Thermal Sensor Event */
+#define IXGBE_EIMS_TIMESYNC    IXGBE_EICR_TIMESYNC /* Timesync Event */
+#define IXGBE_EIMS_GPI_SDP0    IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
+#define IXGBE_EIMS_GPI_SDP1    IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
+#define IXGBE_EIMS_GPI_SDP2    IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EIMS_ECC         IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EIMS_PBUR                IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
+#define IXGBE_EIMS_DHER                IXGBE_EICR_DHER /* Descr Handler Error */
+#define IXGBE_EIMS_TCP_TIMER   IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EIMS_OTHER       IXGBE_EICR_OTHER /* INT Cause Active */
 
 /* Extended Interrupt Mask Clear */
-#define IXGBE_EIMC_RTX_QUEUE    IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EIMC_FLOW_DIR     IXGBE_EICR_FLOW_DIR  /* FDir Exception */
-#define IXGBE_EIMC_RX_MISS      IXGBE_EICR_RX_MISS   /* Packet Buffer Overrun */
-#define IXGBE_EIMC_PCI          IXGBE_EICR_PCI       /* PCI Exception */
-#define IXGBE_EIMC_MAILBOX      IXGBE_EICR_MAILBOX   /* VF to PF Mailbox Int */
-#define IXGBE_EIMC_LSC          IXGBE_EICR_LSC       /* Link Status Change */
-#define IXGBE_EIMC_MNG          IXGBE_EICR_MNG       /* MNG Event Interrupt */
-#define IXGBE_EIMC_GPI_SDP0     IXGBE_EICR_GPI_SDP0  /* SDP0 Gen Purpose Int */
-#define IXGBE_EIMC_GPI_SDP1     IXGBE_EICR_GPI_SDP1  /* SDP1 Gen Purpose Int */
-#define IXGBE_EIMC_GPI_SDP2     IXGBE_EICR_GPI_SDP2  /* SDP2 Gen Purpose Int */
-#define IXGBE_EIMC_ECC          IXGBE_EICR_ECC       /* ECC Error */
-#define IXGBE_EIMC_PBUR         IXGBE_EICR_PBUR      /* Pkt Buf Handler Err */
-#define IXGBE_EIMC_DHER         IXGBE_EICR_DHER      /* Desc Handler Err */
-#define IXGBE_EIMC_TCP_TIMER    IXGBE_EICR_TCP_TIMER /* TCP Timer */
-#define IXGBE_EIMC_OTHER        IXGBE_EICR_OTHER     /* INT Cause Active */
+#define IXGBE_EIMC_RTX_QUEUE   IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMC_FLOW_DIR    IXGBE_EICR_FLOW_DIR /* FDir Exception */
+#define IXGBE_EIMC_RX_MISS     IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */
+#define IXGBE_EIMC_PCI         IXGBE_EICR_PCI /* PCI Exception */
+#define IXGBE_EIMC_MAILBOX     IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EIMC_LSC         IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMC_MNG         IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EIMC_TIMESYNC    IXGBE_EICR_TIMESYNC /* Timesync Event */
+#define IXGBE_EIMC_GPI_SDP0    IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
+#define IXGBE_EIMC_GPI_SDP1    IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
+#define IXGBE_EIMC_GPI_SDP2    IXGBE_EICR_GPI_SDP2  /* SDP2 Gen Purpose Int */
+#define IXGBE_EIMC_ECC         IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EIMC_PBUR                IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
+#define IXGBE_EIMC_DHER                IXGBE_EICR_DHER /* Desc Handler Err */
+#define IXGBE_EIMC_TCP_TIMER   IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EIMC_OTHER       IXGBE_EICR_OTHER /* INT Cause Active */
 
 #define IXGBE_EIMS_ENABLE_MASK ( \
-                                IXGBE_EIMS_RTX_QUEUE       | \
-                                IXGBE_EIMS_LSC             | \
-                                IXGBE_EIMS_TCP_TIMER       | \
-                                IXGBE_EIMS_OTHER)
+                               IXGBE_EIMS_RTX_QUEUE    | \
+                               IXGBE_EIMS_LSC          | \
+                               IXGBE_EIMS_TCP_TIMER    | \
+                               IXGBE_EIMS_OTHER)
 
 /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
-#define IXGBE_IMIR_PORT_IM_EN     0x00010000  /* TCP port enable */
-#define IXGBE_IMIR_PORT_BP        0x00020000  /* TCP port check bypass */
-#define IXGBE_IMIREXT_SIZE_BP     0x00001000  /* Packet size bypass */
-#define IXGBE_IMIREXT_CTRL_URG    0x00002000  /* Check URG bit in header */
-#define IXGBE_IMIREXT_CTRL_ACK    0x00004000  /* Check ACK bit in header */
-#define IXGBE_IMIREXT_CTRL_PSH    0x00008000  /* Check PSH bit in header */
-#define IXGBE_IMIREXT_CTRL_RST    0x00010000  /* Check RST bit in header */
-#define IXGBE_IMIREXT_CTRL_SYN    0x00020000  /* Check SYN bit in header */
-#define IXGBE_IMIREXT_CTRL_FIN    0x00040000  /* Check FIN bit in header */
-#define IXGBE_IMIREXT_CTRL_BP     0x00080000  /* Bypass check of control bits */
-#define IXGBE_IMIR_SIZE_BP_82599  0x00001000 /* Packet size bypass */
-#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */
-#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */
-#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */
-#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */
-#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */
-#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */
-#define IXGBE_IMIR_CTRL_BP_82599  0x00080000 /* Bypass check of control bits */
-#define IXGBE_IMIR_LLI_EN_82599   0x00100000 /* Enables low latency Int */
-#define IXGBE_IMIR_RX_QUEUE_MASK_82599  0x0000007F /* Rx Queue Mask */
-#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */
-#define IXGBE_IMIRVP_PRIORITY_MASK      0x00000007 /* VLAN priority mask */
-#define IXGBE_IMIRVP_PRIORITY_EN        0x00000008 /* VLAN priority enable */
-
-#define IXGBE_MAX_FTQF_FILTERS          128
-#define IXGBE_FTQF_PROTOCOL_MASK        0x00000003
-#define IXGBE_FTQF_PROTOCOL_TCP         0x00000000
-#define IXGBE_FTQF_PROTOCOL_UDP         0x00000001
-#define IXGBE_FTQF_PROTOCOL_SCTP        2
-#define IXGBE_FTQF_PRIORITY_MASK        0x00000007
-#define IXGBE_FTQF_PRIORITY_SHIFT       2
-#define IXGBE_FTQF_POOL_MASK            0x0000003F
-#define IXGBE_FTQF_POOL_SHIFT           8
-#define IXGBE_FTQF_5TUPLE_MASK_MASK     0x0000001F
-#define IXGBE_FTQF_5TUPLE_MASK_SHIFT    25
-#define IXGBE_FTQF_SOURCE_ADDR_MASK     0x1E
-#define IXGBE_FTQF_DEST_ADDR_MASK       0x1D
-#define IXGBE_FTQF_SOURCE_PORT_MASK     0x1B
-#define IXGBE_FTQF_DEST_PORT_MASK       0x17
-#define IXGBE_FTQF_PROTOCOL_COMP_MASK   0x0F
-#define IXGBE_FTQF_POOL_MASK_EN         0x40000000
-#define IXGBE_FTQF_QUEUE_ENABLE         0x80000000
+#define IXGBE_IMIR_PORT_IM_EN  0x00010000  /* TCP port enable */
+#define IXGBE_IMIR_PORT_BP     0x00020000  /* TCP port check bypass */
+#define IXGBE_IMIREXT_SIZE_BP  0x00001000  /* Packet size bypass */
+#define IXGBE_IMIREXT_CTRL_URG 0x00002000  /* Check URG bit in header */
+#define IXGBE_IMIREXT_CTRL_ACK 0x00004000  /* Check ACK bit in header */
+#define IXGBE_IMIREXT_CTRL_PSH 0x00008000  /* Check PSH bit in header */
+#define IXGBE_IMIREXT_CTRL_RST 0x00010000  /* Check RST bit in header */
+#define IXGBE_IMIREXT_CTRL_SYN 0x00020000  /* Check SYN bit in header */
+#define IXGBE_IMIREXT_CTRL_FIN 0x00040000  /* Check FIN bit in header */
+#define IXGBE_IMIREXT_CTRL_BP  0x00080000  /* Bypass check of control bits */
+#define IXGBE_IMIR_SIZE_BP_82599       0x00001000 /* Packet size bypass */
+#define IXGBE_IMIR_CTRL_URG_82599      0x00002000 /* Check URG bit in header */
+#define IXGBE_IMIR_CTRL_ACK_82599      0x00004000 /* Check ACK bit in header */
+#define IXGBE_IMIR_CTRL_PSH_82599      0x00008000 /* Check PSH bit in header */
+#define IXGBE_IMIR_CTRL_RST_82599      0x00010000 /* Check RST bit in header */
+#define IXGBE_IMIR_CTRL_SYN_82599      0x00020000 /* Check SYN bit in header */
+#define IXGBE_IMIR_CTRL_FIN_82599      0x00040000 /* Check FIN bit in header */
+#define IXGBE_IMIR_CTRL_BP_82599       0x00080000 /* Bypass chk of ctrl bits */
+#define IXGBE_IMIR_LLI_EN_82599                0x00100000 /* Enables low latency Int */
+#define IXGBE_IMIR_RX_QUEUE_MASK_82599 0x0000007F /* Rx Queue Mask */
+#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599        21 /* Rx Queue Shift */
+#define IXGBE_IMIRVP_PRIORITY_MASK     0x00000007 /* VLAN priority mask */
+#define IXGBE_IMIRVP_PRIORITY_EN       0x00000008 /* VLAN priority enable */
+
+#define IXGBE_MAX_FTQF_FILTERS         128
+#define IXGBE_FTQF_PROTOCOL_MASK       0x00000003
+#define IXGBE_FTQF_PROTOCOL_TCP                0x00000000
+#define IXGBE_FTQF_PROTOCOL_UDP                0x00000001
+#define IXGBE_FTQF_PROTOCOL_SCTP       2
+#define IXGBE_FTQF_PRIORITY_MASK       0x00000007
+#define IXGBE_FTQF_PRIORITY_SHIFT      2
+#define IXGBE_FTQF_POOL_MASK           0x0000003F
+#define IXGBE_FTQF_POOL_SHIFT          8
+#define IXGBE_FTQF_5TUPLE_MASK_MASK    0x0000001F
+#define IXGBE_FTQF_5TUPLE_MASK_SHIFT   25
+#define IXGBE_FTQF_SOURCE_ADDR_MASK    0x1E
+#define IXGBE_FTQF_DEST_ADDR_MASK      0x1D
+#define IXGBE_FTQF_SOURCE_PORT_MASK    0x1B
+#define IXGBE_FTQF_DEST_PORT_MASK      0x17
+#define IXGBE_FTQF_PROTOCOL_COMP_MASK  0x0F
+#define IXGBE_FTQF_POOL_MASK_EN                0x40000000
+#define IXGBE_FTQF_QUEUE_ENABLE                0x80000000
 
 /* Interrupt clear mask */
-#define IXGBE_IRQ_CLEAR_MASK    0xFFFFFFFF
+#define IXGBE_IRQ_CLEAR_MASK   0xFFFFFFFF
 
 /* Interrupt Vector Allocation Registers */
-#define IXGBE_IVAR_REG_NUM      25
-#define IXGBE_IVAR_REG_NUM_82599       64
-#define IXGBE_IVAR_TXRX_ENTRY   96
-#define IXGBE_IVAR_RX_ENTRY     64
-#define IXGBE_IVAR_RX_QUEUE(_i)    (0 + (_i))
-#define IXGBE_IVAR_TX_QUEUE(_i)    (64 + (_i))
-#define IXGBE_IVAR_TX_ENTRY     32
+#define IXGBE_IVAR_REG_NUM             25
+#define IXGBE_IVAR_REG_NUM_82599       64
+#define IXGBE_IVAR_TXRX_ENTRY          96
+#define IXGBE_IVAR_RX_ENTRY            64
+#define IXGBE_IVAR_RX_QUEUE(_i)                (0 + (_i))
+#define IXGBE_IVAR_TX_QUEUE(_i)                (64 + (_i))
+#define IXGBE_IVAR_TX_ENTRY            32
 
-#define IXGBE_IVAR_TCP_TIMER_INDEX       96 /* 0 based index */
-#define IXGBE_IVAR_OTHER_CAUSES_INDEX    97 /* 0 based index */
+#define IXGBE_IVAR_TCP_TIMER_INDEX     96 /* 0 based index */
+#define IXGBE_IVAR_OTHER_CAUSES_INDEX  97 /* 0 based index */
 
-#define IXGBE_MSIX_VECTOR(_i)   (0 + (_i))
+#define IXGBE_MSIX_VECTOR(_i)          (0 + (_i))
 
-#define IXGBE_IVAR_ALLOC_VAL    0x80 /* Interrupt Allocation valid */
+#define IXGBE_IVAR_ALLOC_VAL           0x80 /* Interrupt Allocation valid */
 
 /* ETYPE Queue Filter/Select Bit Masks */
-#define IXGBE_MAX_ETQF_FILTERS  8
-#define IXGBE_ETQF_FCOE         0x08000000 /* bit 27 */
-#define IXGBE_ETQF_BCN          0x10000000 /* bit 28 */
-#define IXGBE_ETQF_1588         0x40000000 /* bit 30 */
-#define IXGBE_ETQF_FILTER_EN    0x80000000 /* bit 31 */
-#define IXGBE_ETQF_POOL_ENABLE   (1 << 26) /* bit 26 */
-
-#define IXGBE_ETQS_RX_QUEUE     0x007F0000 /* bits 22:16 */
-#define IXGBE_ETQS_RX_QUEUE_SHIFT       16
-#define IXGBE_ETQS_LLI          0x20000000 /* bit 29 */
-#define IXGBE_ETQS_QUEUE_EN     0x80000000 /* bit 31 */
+#define IXGBE_MAX_ETQF_FILTERS         8
+#define IXGBE_ETQF_FCOE                        0x08000000 /* bit 27 */
+#define IXGBE_ETQF_BCN                 0x10000000 /* bit 28 */
+#define IXGBE_ETQF_1588                        0x40000000 /* bit 30 */
+#define IXGBE_ETQF_FILTER_EN           0x80000000 /* bit 31 */
+#define IXGBE_ETQF_POOL_ENABLE         (1 << 26) /* bit 26 */
+#define IXGBE_ETQF_POOL_SHIFT          20
+
+#define IXGBE_ETQS_RX_QUEUE            0x007F0000 /* bits 22:16 */
+#define IXGBE_ETQS_RX_QUEUE_SHIFT      16
+#define IXGBE_ETQS_LLI                 0x20000000 /* bit 29 */
+#define IXGBE_ETQS_QUEUE_EN            0x80000000 /* bit 31 */
 
 /*
  * ETQF filter list: one static filter per filter consumer. This is
- *                   to avoid filter collisions later. Add new filters
- *                   here!!
+ *                to avoid filter collisions later. Add new filters
+ *                here!!
  *
  * Current filters:
- *    EAPOL 802.1x (0x888e): Filter 0
- *    FCoE (0x8906):         Filter 2
- *    1588 (0x88f7):         Filter 3
- *    FIP  (0x8914):         Filter 4
+ *     EAPOL 802.1x (0x888e): Filter 0
+ *     FCoE (0x8906):   Filter 2
+ *     1588 (0x88f7):   Filter 3
+ *     FIP  (0x8914):   Filter 4
  */
-#define IXGBE_ETQF_FILTER_EAPOL          0
-#define IXGBE_ETQF_FILTER_FCOE           2
-#define IXGBE_ETQF_FILTER_1588           3
-#define IXGBE_ETQF_FILTER_FIP            4
+#define IXGBE_ETQF_FILTER_EAPOL                0
+#define IXGBE_ETQF_FILTER_FCOE         2
+#define IXGBE_ETQF_FILTER_1588         3
+#define IXGBE_ETQF_FILTER_FIP          4
 /* VLAN Control Bit Masks */
-#define IXGBE_VLNCTRL_VET       0x0000FFFF  /* bits 0-15 */
-#define IXGBE_VLNCTRL_CFI       0x10000000  /* bit 28 */
-#define IXGBE_VLNCTRL_CFIEN     0x20000000  /* bit 29 */
-#define IXGBE_VLNCTRL_VFE       0x40000000  /* bit 30 */
-#define IXGBE_VLNCTRL_VME       0x80000000  /* bit 31 */
+#define IXGBE_VLNCTRL_VET              0x0000FFFF  /* bits 0-15 */
+#define IXGBE_VLNCTRL_CFI              0x10000000  /* bit 28 */
+#define IXGBE_VLNCTRL_CFIEN            0x20000000  /* bit 29 */
+#define IXGBE_VLNCTRL_VFE              0x40000000  /* bit 30 */
+#define IXGBE_VLNCTRL_VME              0x80000000  /* bit 31 */
 
 /* VLAN pool filtering masks */
-#define IXGBE_VLVF_VIEN         0x80000000  /* filter is valid */
-#define IXGBE_VLVF_ENTRIES      64
-#define IXGBE_VLVF_VLANID_MASK  0x00000FFF
-
+#define IXGBE_VLVF_VIEN                        0x80000000  /* filter is valid */
+#define IXGBE_VLVF_ENTRIES             64
+#define IXGBE_VLVF_VLANID_MASK         0x00000FFF
 /* Per VF Port VLAN insertion rules */
-#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
-#define IXGBE_VMVIR_VLANA_NEVER   0x80000000 /* Never insert VLAN tag */
+#define IXGBE_VMVIR_VLANA_DEFAULT      0x40000000 /* Always use default VLAN */
+#define IXGBE_VMVIR_VLANA_NEVER                0x80000000 /* Never insert VLAN tag */
 
-#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100  /* 802.1q protocol */
+#define IXGBE_ETHERNET_IEEE_VLAN_TYPE  0x8100  /* 802.1q protocol */
 
 /* STATUS Bit Masks */
-#define IXGBE_STATUS_LAN_ID         0x0000000C /* LAN ID */
-#define IXGBE_STATUS_LAN_ID_SHIFT   2          /* LAN ID Shift*/
-#define IXGBE_STATUS_GIO            0x00080000 /* GIO Master Enable Status */
+#define IXGBE_STATUS_LAN_ID            0x0000000C /* LAN ID */
+#define IXGBE_STATUS_LAN_ID_SHIFT      2 /* LAN ID Shift*/
+#define IXGBE_STATUS_GIO               0x00080000 /* GIO Master Ena Status */
 
-#define IXGBE_STATUS_LAN_ID_0   0x00000000 /* LAN ID 0 */
-#define IXGBE_STATUS_LAN_ID_1   0x00000004 /* LAN ID 1 */
+#define IXGBE_STATUS_LAN_ID_0  0x00000000 /* LAN ID 0 */
+#define IXGBE_STATUS_LAN_ID_1  0x00000004 /* LAN ID 1 */
 
 /* ESDP Bit Masks */
-#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */
-#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */
-#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */
-#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */
-#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */
-#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
-#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
-#define IXGBE_ESDP_SDP4_DIR     0x00000004 /* SDP4 IO direction */
-#define IXGBE_ESDP_SDP5_DIR     0x00002000 /* SDP5 IO direction */
+#define IXGBE_ESDP_SDP0                0x00000001 /* SDP0 Data Value */
+#define IXGBE_ESDP_SDP1                0x00000002 /* SDP1 Data Value */
+#define IXGBE_ESDP_SDP2                0x00000004 /* SDP2 Data Value */
+#define IXGBE_ESDP_SDP3                0x00000008 /* SDP3 Data Value */
+#define IXGBE_ESDP_SDP4                0x00000010 /* SDP4 Data Value */
+#define IXGBE_ESDP_SDP5                0x00000020 /* SDP5 Data Value */
+#define IXGBE_ESDP_SDP6                0x00000040 /* SDP6 Data Value */
+#define IXGBE_ESDP_SDP0_DIR    0x00000100 /* SDP0 IO direction */
+#define IXGBE_ESDP_SDP1_DIR    0x00000200 /* SDP1 IO direction */
+#define IXGBE_ESDP_SDP4_DIR    0x00001000 /* SDP4 IO direction */
+#define IXGBE_ESDP_SDP5_DIR    0x00002000 /* SDP5 IO direction */
+#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 IO mode */
+#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */
+
 
 /* LEDCTL Bit Masks */
-#define IXGBE_LED_IVRT_BASE      0x00000040
-#define IXGBE_LED_BLINK_BASE     0x00000080
-#define IXGBE_LED_MODE_MASK_BASE 0x0000000F
-#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i)))
-#define IXGBE_LED_MODE_SHIFT(_i) (8 * (_i))
-#define IXGBE_LED_IVRT(_i)       IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i)
-#define IXGBE_LED_BLINK(_i)      IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i)
-#define IXGBE_LED_MODE_MASK(_i)  IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i)
+#define IXGBE_LED_IVRT_BASE            0x00000040
+#define IXGBE_LED_BLINK_BASE           0x00000080
+#define IXGBE_LED_MODE_MASK_BASE       0x0000000F
+#define IXGBE_LED_OFFSET(_base, _i)    (_base << (8 * (_i)))
+#define IXGBE_LED_MODE_SHIFT(_i)       (8*(_i))
+#define IXGBE_LED_IVRT(_i)     IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i)
+#define IXGBE_LED_BLINK(_i)    IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i)
+#define IXGBE_LED_MODE_MASK(_i)        IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i)
 
 /* LED modes */
-#define IXGBE_LED_LINK_UP       0x0
-#define IXGBE_LED_LINK_10G      0x1
-#define IXGBE_LED_MAC           0x2
-#define IXGBE_LED_FILTER        0x3
-#define IXGBE_LED_LINK_ACTIVE   0x4
-#define IXGBE_LED_LINK_1G       0x5
-#define IXGBE_LED_ON            0xE
-#define IXGBE_LED_OFF           0xF
+#define IXGBE_LED_LINK_UP      0x0
+#define IXGBE_LED_LINK_10G     0x1
+#define IXGBE_LED_MAC          0x2
+#define IXGBE_LED_FILTER       0x3
+#define IXGBE_LED_LINK_ACTIVE  0x4
+#define IXGBE_LED_LINK_1G      0x5
+#define IXGBE_LED_ON           0xE
+#define IXGBE_LED_OFF          0xF
 
 /* AUTOC Bit Masks */
 #define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000
-#define IXGBE_AUTOC_KX4_SUPP    0x80000000
-#define IXGBE_AUTOC_KX_SUPP     0x40000000
-#define IXGBE_AUTOC_PAUSE       0x30000000
-#define IXGBE_AUTOC_ASM_PAUSE   0x20000000
-#define IXGBE_AUTOC_SYM_PAUSE   0x10000000
-#define IXGBE_AUTOC_RF          0x08000000
-#define IXGBE_AUTOC_PD_TMR      0x06000000
-#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000
-#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000
-#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000
-#define IXGBE_AUTOC_FECA        0x00040000
-#define IXGBE_AUTOC_FECR        0x00020000
-#define IXGBE_AUTOC_KR_SUPP     0x00010000
-#define IXGBE_AUTOC_AN_RESTART  0x00001000
-#define IXGBE_AUTOC_FLU         0x00000001
-#define IXGBE_AUTOC_LMS_SHIFT   13
-#define IXGBE_AUTOC_LMS_10G_SERIAL      (0x3 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_KX_KR       (0x4 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_SGMII_1G_100M   (0x5 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_MASK            (0x7 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN   (0x0 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN  (0x1 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_1G_AN           (0x2 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_AN          (0x4 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN    (0x6 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_ATTACH_TYPE     (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-
-#define IXGBE_AUTOC_1G_PMA_PMD_MASK    0x00000200
-#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT   9
-#define IXGBE_AUTOC_10G_PMA_PMD_MASK   0x00000180
-#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT  7
-#define IXGBE_AUTOC_10G_XAUI   (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_10G_KX4    (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_10G_CX4    (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_BX      (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_KX      (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_SFI     (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_KX_BX   (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-
-#define IXGBE_AUTOC2_UPPER_MASK  0xFFFF0000
-#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK  0x00030000
-#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16
-#define IXGBE_AUTOC2_10G_KR  (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-
-#define IXGBE_MACC_FLU       0x00000001
-#define IXGBE_MACC_FSV_10G   0x00030000
-#define IXGBE_MACC_FS        0x00040000
-#define IXGBE_MAC_RX2TX_LPBK 0x00000002
+#define IXGBE_AUTOC_KX4_SUPP   0x80000000
+#define IXGBE_AUTOC_KX_SUPP    0x40000000
+#define IXGBE_AUTOC_PAUSE      0x30000000
+#define IXGBE_AUTOC_ASM_PAUSE  0x20000000
+#define IXGBE_AUTOC_SYM_PAUSE  0x10000000
+#define IXGBE_AUTOC_RF         0x08000000
+#define IXGBE_AUTOC_PD_TMR     0x06000000
+#define IXGBE_AUTOC_AN_RX_LOOSE        0x01000000
+#define IXGBE_AUTOC_AN_RX_DRIFT        0x00800000
+#define IXGBE_AUTOC_AN_RX_ALIGN        0x007C0000
+#define IXGBE_AUTOC_FECA       0x00040000
+#define IXGBE_AUTOC_FECR       0x00020000
+#define IXGBE_AUTOC_KR_SUPP    0x00010000
+#define IXGBE_AUTOC_AN_RESTART 0x00001000
+#define IXGBE_AUTOC_FLU                0x00000001
+#define IXGBE_AUTOC_LMS_SHIFT  13
+#define IXGBE_AUTOC_LMS_10G_SERIAL     (0x3 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_KX_KR      (0x4 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_SGMII_1G_100M  (0x5 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN        (0x6 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII        (0x7 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_MASK           (0x7 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN  (0x0 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_1G_AN          (0x2 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_AN         (0x4 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN   (0x6 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_ATTACH_TYPE    (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+
+#define IXGBE_AUTOC_1G_PMA_PMD_MASK    0x00000200
+#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT   9
+#define IXGBE_AUTOC_10G_PMA_PMD_MASK   0x00000180
+#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT  7
+#define IXGBE_AUTOC_10G_XAUI   (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_KX4    (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_CX4    (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_BX      (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX      (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_SFI     (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX_BX   (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+
+#define IXGBE_AUTOC2_UPPER_MASK        0xFFFF0000
+#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK   0x00030000
+#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT  16
+#define IXGBE_AUTOC2_10G_KR    (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_XFI   (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_SFI   (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+
+#define IXGBE_MACC_FLU         0x00000001
+#define IXGBE_MACC_FSV_10G     0x00030000
+#define IXGBE_MACC_FS          0x00040000
+#define IXGBE_MAC_RX2TX_LPBK   0x00000002
 
 /* LINKS Bit Masks */
-#define IXGBE_LINKS_KX_AN_COMP  0x80000000
-#define IXGBE_LINKS_UP          0x40000000
-#define IXGBE_LINKS_SPEED       0x20000000
-#define IXGBE_LINKS_MODE        0x18000000
-#define IXGBE_LINKS_RX_MODE     0x06000000
-#define IXGBE_LINKS_TX_MODE     0x01800000
-#define IXGBE_LINKS_XGXS_EN     0x00400000
-#define IXGBE_LINKS_SGMII_EN    0x02000000
-#define IXGBE_LINKS_PCS_1G_EN   0x00200000
-#define IXGBE_LINKS_1G_AN_EN    0x00100000
-#define IXGBE_LINKS_KX_AN_IDLE  0x00080000
-#define IXGBE_LINKS_1G_SYNC     0x00040000
-#define IXGBE_LINKS_10G_ALIGN   0x00020000
-#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000
-#define IXGBE_LINKS_TL_FAULT    0x00001000
-#define IXGBE_LINKS_SIGNAL      0x00000F00
-
-#define IXGBE_LINKS_SPEED_82599     0x30000000
-#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
-#define IXGBE_LINKS_SPEED_1G_82599  0x20000000
-#define IXGBE_LINKS_SPEED_100_82599 0x10000000
-#define IXGBE_LINK_UP_TIME      90 /* 9.0 Seconds */
-#define IXGBE_AUTO_NEG_TIME     45 /* 4.5 Seconds */
-
-#define IXGBE_LINKS2_AN_SUPPORTED   0x00000040
+#define IXGBE_LINKS_KX_AN_COMP 0x80000000
+#define IXGBE_LINKS_UP         0x40000000
+#define IXGBE_LINKS_SPEED      0x20000000
+#define IXGBE_LINKS_MODE       0x18000000
+#define IXGBE_LINKS_RX_MODE    0x06000000
+#define IXGBE_LINKS_TX_MODE    0x01800000
+#define IXGBE_LINKS_XGXS_EN    0x00400000
+#define IXGBE_LINKS_SGMII_EN   0x02000000
+#define IXGBE_LINKS_PCS_1G_EN  0x00200000
+#define IXGBE_LINKS_1G_AN_EN   0x00100000
+#define IXGBE_LINKS_KX_AN_IDLE 0x00080000
+#define IXGBE_LINKS_1G_SYNC    0x00040000
+#define IXGBE_LINKS_10G_ALIGN  0x00020000
+#define IXGBE_LINKS_10G_LANE_SYNC      0x00017000
+#define IXGBE_LINKS_TL_FAULT           0x00001000
+#define IXGBE_LINKS_SIGNAL             0x00000F00
+
+#define IXGBE_LINKS_SPEED_82599                0x30000000
+#define IXGBE_LINKS_SPEED_10G_82599    0x30000000
+#define IXGBE_LINKS_SPEED_1G_82599     0x20000000
+#define IXGBE_LINKS_SPEED_100_82599    0x10000000
+#define IXGBE_LINK_UP_TIME             90 /* 9.0 Seconds */
+#define IXGBE_AUTO_NEG_TIME            45 /* 4.5 Seconds */
+
+#define IXGBE_LINKS2_AN_SUPPORTED      0x00000040
 
 /* PCS1GLSTA Bit Masks */
-#define IXGBE_PCS1GLSTA_LINK_OK         1
-#define IXGBE_PCS1GLSTA_SYNK_OK         0x10
-#define IXGBE_PCS1GLSTA_AN_COMPLETE     0x10000
-#define IXGBE_PCS1GLSTA_AN_PAGE_RX      0x20000
-#define IXGBE_PCS1GLSTA_AN_TIMED_OUT    0x40000
-#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000
-#define IXGBE_PCS1GLSTA_AN_ERROR_RWS    0x100000
+#define IXGBE_PCS1GLSTA_LINK_OK                1
+#define IXGBE_PCS1GLSTA_SYNK_OK                0x10
+#define IXGBE_PCS1GLSTA_AN_COMPLETE    0x10000
+#define IXGBE_PCS1GLSTA_AN_PAGE_RX     0x20000
+#define IXGBE_PCS1GLSTA_AN_TIMED_OUT   0x40000
+#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT        0x80000
+#define IXGBE_PCS1GLSTA_AN_ERROR_RWS   0x100000
 
-#define IXGBE_PCS1GANA_SYM_PAUSE        0x80
-#define IXGBE_PCS1GANA_ASM_PAUSE        0x100
+#define IXGBE_PCS1GANA_SYM_PAUSE       0x80
+#define IXGBE_PCS1GANA_ASM_PAUSE       0x100
 
 /* PCS1GLCTL Bit Masks */
-#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN  0x00040000 /* PCS 1G autoneg to en */
-#define IXGBE_PCS1GLCTL_FLV_LINK_UP     1
-#define IXGBE_PCS1GLCTL_FORCE_LINK      0x20
-#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH  0x40
-#define IXGBE_PCS1GLCTL_AN_ENABLE       0x10000
-#define IXGBE_PCS1GLCTL_AN_RESTART      0x20000
+#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */
+#define IXGBE_PCS1GLCTL_FLV_LINK_UP    1
+#define IXGBE_PCS1GLCTL_FORCE_LINK     0x20
+#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40
+#define IXGBE_PCS1GLCTL_AN_ENABLE      0x10000
+#define IXGBE_PCS1GLCTL_AN_RESTART     0x20000
 
 /* ANLP1 Bit Masks */
-#define IXGBE_ANLP1_PAUSE               0x0C00
-#define IXGBE_ANLP1_SYM_PAUSE           0x0400
-#define IXGBE_ANLP1_ASM_PAUSE           0x0800
-#define IXGBE_ANLP1_AN_STATE_MASK       0x000f0000
+#define IXGBE_ANLP1_PAUSE              0x0C00
+#define IXGBE_ANLP1_SYM_PAUSE          0x0400
+#define IXGBE_ANLP1_ASM_PAUSE          0x0800
+#define IXGBE_ANLP1_AN_STATE_MASK      0x000f0000
 
 /* SW Semaphore Register bitmasks */
-#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
-#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
-#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
-#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */
+#define IXGBE_SWSM_SMBI                0x00000001 /* Driver Semaphore bit */
+#define IXGBE_SWSM_SWESMBI     0x00000002 /* FW Semaphore bit */
+#define IXGBE_SWSM_WMNG                0x00000004 /* Wake MNG Clock */
+#define IXGBE_SWFW_REGSMP      0x80000000 /* Register Semaphore bit 31 */
 
 /* SW_FW_SYNC/GSSR definitions */
-#define IXGBE_GSSR_EEP_SM     0x0001
-#define IXGBE_GSSR_PHY0_SM    0x0002
-#define IXGBE_GSSR_PHY1_SM    0x0004
-#define IXGBE_GSSR_MAC_CSR_SM 0x0008
-#define IXGBE_GSSR_FLASH_SM   0x0010
-#define IXGBE_GSSR_SW_MNG_SM  0x0400
+#define IXGBE_GSSR_EEP_SM      0x0001
+#define IXGBE_GSSR_PHY0_SM     0x0002
+#define IXGBE_GSSR_PHY1_SM     0x0004
+#define IXGBE_GSSR_MAC_CSR_SM  0x0008
+#define IXGBE_GSSR_FLASH_SM    0x0010
+#define IXGBE_GSSR_SW_MNG_SM   0x0400
 
 /* FW Status register bitmask */
-#define IXGBE_FWSTS_FWRI    0x00000200 /* Firmware Reset Indication */
+#define IXGBE_FWSTS_FWRI       0x00000200 /* Firmware Reset Indication */
 
 /* EEC Register */
-#define IXGBE_EEC_SK        0x00000001 /* EEPROM Clock */
-#define IXGBE_EEC_CS        0x00000002 /* EEPROM Chip Select */
-#define IXGBE_EEC_DI        0x00000004 /* EEPROM Data In */
-#define IXGBE_EEC_DO        0x00000008 /* EEPROM Data Out */
-#define IXGBE_EEC_FWE_MASK  0x00000030 /* FLASH Write Enable */
-#define IXGBE_EEC_FWE_DIS   0x00000010 /* Disable FLASH writes */
-#define IXGBE_EEC_FWE_EN    0x00000020 /* Enable FLASH writes */
-#define IXGBE_EEC_FWE_SHIFT 4
-#define IXGBE_EEC_REQ       0x00000040 /* EEPROM Access Request */
-#define IXGBE_EEC_GNT       0x00000080 /* EEPROM Access Grant */
-#define IXGBE_EEC_PRES      0x00000100 /* EEPROM Present */
-#define IXGBE_EEC_ARD       0x00000200 /* EEPROM Auto Read Done */
-#define IXGBE_EEC_FLUP      0x00800000 /* Flash update command */
-#define IXGBE_EEC_SEC1VAL   0x02000000 /* Sector 1 Valid */
-#define IXGBE_EEC_FLUDONE   0x04000000 /* Flash update done */
+#define IXGBE_EEC_SK           0x00000001 /* EEPROM Clock */
+#define IXGBE_EEC_CS           0x00000002 /* EEPROM Chip Select */
+#define IXGBE_EEC_DI           0x00000004 /* EEPROM Data In */
+#define IXGBE_EEC_DO           0x00000008 /* EEPROM Data Out */
+#define IXGBE_EEC_FWE_MASK     0x00000030 /* FLASH Write Enable */
+#define IXGBE_EEC_FWE_DIS      0x00000010 /* Disable FLASH writes */
+#define IXGBE_EEC_FWE_EN       0x00000020 /* Enable FLASH writes */
+#define IXGBE_EEC_FWE_SHIFT    4
+#define IXGBE_EEC_REQ          0x00000040 /* EEPROM Access Request */
+#define IXGBE_EEC_GNT          0x00000080 /* EEPROM Access Grant */
+#define IXGBE_EEC_PRES         0x00000100 /* EEPROM Present */
+#define IXGBE_EEC_ARD          0x00000200 /* EEPROM Auto Read Done */
+#define IXGBE_EEC_FLUP         0x00800000 /* Flash update command */
+#define IXGBE_EEC_SEC1VAL      0x02000000 /* Sector 1 Valid */
+#define IXGBE_EEC_FLUDONE      0x04000000 /* Flash update done */
 /* EEPROM Addressing bits based on type (0-small, 1-large) */
-#define IXGBE_EEC_ADDR_SIZE 0x00000400
-#define IXGBE_EEC_SIZE      0x00007800 /* EEPROM Size */
-#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */
+#define IXGBE_EEC_ADDR_SIZE    0x00000400
+#define IXGBE_EEC_SIZE         0x00007800 /* EEPROM Size */
+#define IXGBE_EERD_MAX_ADDR    0x00003FFF /* EERD alows 14 bits for addr. */
 
-#define IXGBE_EEC_SIZE_SHIFT          11
-#define IXGBE_EEPROM_WORD_SIZE_SHIFT  6
-#define IXGBE_EEPROM_OPCODE_BITS      8
+#define IXGBE_EEC_SIZE_SHIFT           11
+#define IXGBE_EEPROM_WORD_SIZE_SHIFT   6
+#define IXGBE_EEPROM_OPCODE_BITS       8
 
 /* Part Number String Length */
-#define IXGBE_PBANUM_LENGTH 11
+#define IXGBE_PBANUM_LENGTH    11
 
 /* Checksum and EEPROM pointers */
-#define IXGBE_PBANUM_PTR_GUARD  0xFAFA
-#define IXGBE_EEPROM_CHECKSUM   0x3F
-#define IXGBE_EEPROM_SUM        0xBABA
-#define IXGBE_PCIE_ANALOG_PTR   0x03
-#define IXGBE_ATLAS0_CONFIG_PTR 0x04
-#define IXGBE_PHY_PTR           0x04
-#define IXGBE_ATLAS1_CONFIG_PTR 0x05
-#define IXGBE_OPTION_ROM_PTR    0x05
-#define IXGBE_PCIE_GENERAL_PTR  0x06
-#define IXGBE_PCIE_CONFIG0_PTR  0x07
-#define IXGBE_PCIE_CONFIG1_PTR  0x08
-#define IXGBE_CORE0_PTR         0x09
-#define IXGBE_CORE1_PTR         0x0A
-#define IXGBE_MAC0_PTR          0x0B
-#define IXGBE_MAC1_PTR          0x0C
-#define IXGBE_CSR0_CONFIG_PTR   0x0D
-#define IXGBE_CSR1_CONFIG_PTR   0x0E
-#define IXGBE_FW_PTR            0x0F
-#define IXGBE_PBANUM0_PTR       0x15
-#define IXGBE_PBANUM1_PTR       0x16
-#define IXGBE_FREE_SPACE_PTR    0X3E
-#define IXGBE_SAN_MAC_ADDR_PTR  0x28
-#define IXGBE_DEVICE_CAPS       0x2C
-#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
-#define IXGBE_PCIE_MSIX_82599_CAPS  0x72
-#define IXGBE_PCIE_MSIX_82598_CAPS  0x62
+#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
+#define IXGBE_EEPROM_CHECKSUM  0x3F
+#define IXGBE_EEPROM_SUM       0xBABA
+#define IXGBE_PCIE_ANALOG_PTR  0x03
+#define IXGBE_ATLAS0_CONFIG_PTR        0x04
+#define IXGBE_PHY_PTR          0x04
+#define IXGBE_ATLAS1_CONFIG_PTR        0x05
+#define IXGBE_OPTION_ROM_PTR   0x05
+#define IXGBE_PCIE_GENERAL_PTR 0x06
+#define IXGBE_PCIE_CONFIG0_PTR 0x07
+#define IXGBE_PCIE_CONFIG1_PTR 0x08
+#define IXGBE_CORE0_PTR                0x09
+#define IXGBE_CORE1_PTR                0x0A
+#define IXGBE_MAC0_PTR         0x0B
+#define IXGBE_MAC1_PTR         0x0C
+#define IXGBE_CSR0_CONFIG_PTR  0x0D
+#define IXGBE_CSR1_CONFIG_PTR  0x0E
+#define IXGBE_FW_PTR           0x0F
+#define IXGBE_PBANUM0_PTR      0x15
+#define IXGBE_PBANUM1_PTR      0x16
+#define IXGBE_ALT_MAC_ADDR_PTR 0x37
+#define IXGBE_FREE_SPACE_PTR   0X3E
+
+/* External Thermal Sensor Config */
+#define IXGBE_ETS_CFG                  0x26
+#define IXGBE_ETS_LTHRES_DELTA_MASK    0x07C0
+#define IXGBE_ETS_LTHRES_DELTA_SHIFT   6
+#define IXGBE_ETS_TYPE_MASK            0x0038
+#define IXGBE_ETS_TYPE_SHIFT           3
+#define IXGBE_ETS_TYPE_EMC             0x000
+#define IXGBE_ETS_NUM_SENSORS_MASK     0x0007
+#define IXGBE_ETS_DATA_LOC_MASK                0x3C00
+#define IXGBE_ETS_DATA_LOC_SHIFT       10
+#define IXGBE_ETS_DATA_INDEX_MASK      0x0300
+#define IXGBE_ETS_DATA_INDEX_SHIFT     8
+#define IXGBE_ETS_DATA_HTHRESH_MASK    0x00FF
+
+#define IXGBE_SAN_MAC_ADDR_PTR         0x28
+#define IXGBE_DEVICE_CAPS              0x2C
+#define IXGBE_SERIAL_NUMBER_MAC_ADDR   0x11
+#define IXGBE_PCIE_MSIX_82599_CAPS     0x72
+#define IXGBE_MAX_MSIX_VECTORS_82599   0x40
+#define IXGBE_PCIE_MSIX_82598_CAPS     0x62
+#define IXGBE_MAX_MSIX_VECTORS_82598   0x13
 
 /* MSI-X capability fields masks */
-#define IXGBE_PCIE_MSIX_TBL_SZ_MASK     0x7FF
+#define IXGBE_PCIE_MSIX_TBL_SZ_MASK    0x7FF
 
 /* Legacy EEPROM word offsets */
-#define IXGBE_ISCSI_BOOT_CAPS           0x0033
-#define IXGBE_ISCSI_SETUP_PORT_0        0x0030
-#define IXGBE_ISCSI_SETUP_PORT_1        0x0034
+#define IXGBE_ISCSI_BOOT_CAPS          0x0033
+#define IXGBE_ISCSI_SETUP_PORT_0       0x0030
+#define IXGBE_ISCSI_SETUP_PORT_1       0x0034
 
 /* EEPROM Commands - SPI */
-#define IXGBE_EEPROM_MAX_RETRY_SPI      5000 /* Max wait 5ms for RDY signal */
-#define IXGBE_EEPROM_STATUS_RDY_SPI     0x01
-#define IXGBE_EEPROM_READ_OPCODE_SPI    0x03  /* EEPROM read opcode */
-#define IXGBE_EEPROM_WRITE_OPCODE_SPI   0x02  /* EEPROM write opcode */
-#define IXGBE_EEPROM_A8_OPCODE_SPI      0x08  /* opcode bit-3 = addr bit-8 */
-#define IXGBE_EEPROM_WREN_OPCODE_SPI    0x06  /* EEPROM set Write Ena latch */
+#define IXGBE_EEPROM_MAX_RETRY_SPI     5000 /* Max wait 5ms for RDY signal */
+#define IXGBE_EEPROM_STATUS_RDY_SPI    0x01
+#define IXGBE_EEPROM_READ_OPCODE_SPI   0x03  /* EEPROM read opcode */
+#define IXGBE_EEPROM_WRITE_OPCODE_SPI  0x02  /* EEPROM write opcode */
+#define IXGBE_EEPROM_A8_OPCODE_SPI     0x08  /* opcode bit-3 = addr bit-8 */
+#define IXGBE_EEPROM_WREN_OPCODE_SPI   0x06  /* EEPROM set Write Ena latch */
 /* EEPROM reset Write Enable latch */
-#define IXGBE_EEPROM_WRDI_OPCODE_SPI    0x04
-#define IXGBE_EEPROM_RDSR_OPCODE_SPI    0x05  /* EEPROM read Status reg */
-#define IXGBE_EEPROM_WRSR_OPCODE_SPI    0x01  /* EEPROM write Status reg */
-#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20  /* EEPROM ERASE 4KB */
-#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI  0xD8  /* EEPROM ERASE 64KB */
-#define IXGBE_EEPROM_ERASE256_OPCODE_SPI  0xDB  /* EEPROM ERASE 256B */
+#define IXGBE_EEPROM_WRDI_OPCODE_SPI   0x04
+#define IXGBE_EEPROM_RDSR_OPCODE_SPI   0x05  /* EEPROM read Status reg */
+#define IXGBE_EEPROM_WRSR_OPCODE_SPI   0x01  /* EEPROM write Status reg */
+#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI        0x20  /* EEPROM ERASE 4KB */
+#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI       0xD8  /* EEPROM ERASE 64KB */
+#define IXGBE_EEPROM_ERASE256_OPCODE_SPI       0xDB  /* EEPROM ERASE 256B */
 
 /* EEPROM Read Register */
-#define IXGBE_EEPROM_RW_REG_DATA   16 /* data offset in EEPROM read reg */
-#define IXGBE_EEPROM_RW_REG_DONE   2  /* Offset to READ done bit */
-#define IXGBE_EEPROM_RW_REG_START  1  /* First bit to start operation */
-#define IXGBE_EEPROM_RW_ADDR_SHIFT 2  /* Shift to the address bits */
-#define IXGBE_NVM_POLL_WRITE       1  /* Flag for polling for write complete */
-#define IXGBE_NVM_POLL_READ        0  /* Flag for polling for read complete */
+#define IXGBE_EEPROM_RW_REG_DATA       16 /* data offset in EEPROM read reg */
+#define IXGBE_EEPROM_RW_REG_DONE       2 /* Offset to READ done bit */
+#define IXGBE_EEPROM_RW_REG_START      1 /* First bit to start operation */
+#define IXGBE_EEPROM_RW_ADDR_SHIFT     2 /* Shift to the address bits */
+#define IXGBE_NVM_POLL_WRITE           1 /* Flag for polling for wr complete */
+#define IXGBE_NVM_POLL_READ            0 /* Flag for polling for rd complete */
+
+#define IXGBE_ETH_LENGTH_OF_ADDRESS    6
 
-#define IXGBE_EEPROM_PAGE_SIZE_MAX       128
-#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */
-#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */
+#define IXGBE_EEPROM_PAGE_SIZE_MAX     128
+#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT       512 /* words rd in burst */
+#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT       256 /* words wr in burst */
 
 #ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
-#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
+#define IXGBE_EEPROM_GRANT_ATTEMPTS    1000 /* EEPROM attempts to gain grant */
 #endif
 
 #ifndef IXGBE_EERD_EEWR_ATTEMPTS
 /* Number of 5 microseconds we wait for EERD read and
  * EERW write to complete */
-#define IXGBE_EERD_EEWR_ATTEMPTS 100000
+#define IXGBE_EERD_EEWR_ATTEMPTS       100000
 #endif
 
 #ifndef IXGBE_FLUDONE_ATTEMPTS
 /* # attempts we wait for flush update to complete */
-#define IXGBE_FLUDONE_ATTEMPTS 20000
+#define IXGBE_FLUDONE_ATTEMPTS         20000
 #endif
 
-#define IXGBE_PCIE_CTRL2                 0x5   /* PCIe Control 2 Offset */
-#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE    0x8   /* Dummy Function Enable */
-#define IXGBE_PCIE_CTRL2_LAN_DISABLE     0x2   /* LAN PCI Disable */
-#define IXGBE_PCIE_CTRL2_DISABLE_SELECT  0x1   /* LAN Disable Select */
-
-#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET  0x0
-#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET  0x3
-#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP  0x1
-#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS  0x2
-#define IXGBE_FW_LESM_PARAMETERS_PTR     0x2
-#define IXGBE_FW_LESM_STATE_1            0x1
-#define IXGBE_FW_LESM_STATE_ENABLED      0x8000 /* LESM Enable bit */
-#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR   0x4
-#define IXGBE_FW_PATCH_VERSION_4         0x7
-#define IXGBE_FCOE_IBA_CAPS_BLK_PTR         0x33 /* iSCSI/FCOE block */
-#define IXGBE_FCOE_IBA_CAPS_FCOE            0x20 /* FCOE flags */
-#define IXGBE_ISCSI_FCOE_BLK_PTR            0x17 /* iSCSI/FCOE block */
-#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET       0x0  /* FCOE flags */
-#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE       0x1  /* FCOE flags enable bit */
-#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR      0x27 /* Alt. SAN MAC block */
-#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET  0x0 /* Alt. SAN MAC capability */
-#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */
-#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */
-#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET  0x7 /* Alt. WWNN prefix offset */
-#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET  0x8 /* Alt. WWPN prefix offset */
-#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC  0x0 /* Alt. SAN MAC exists */
-#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN  0x1 /* Alt. WWN base exists */
-
-#define IXGBE_DEVICE_CAPS_WOL_PORT0_1  0x4 /* WoL supported on ports 0 & 1 */
-#define IXGBE_DEVICE_CAPS_WOL_PORT0    0x8 /* WoL supported on port 0 */
-#define IXGBE_DEVICE_CAPS_WOL_MASK     0xC /* Mask for WoL capabilities */
+#define IXGBE_PCIE_CTRL2               0x5   /* PCIe Control 2 Offset */
+#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE  0x8   /* Dummy Function Enable */
+#define IXGBE_PCIE_CTRL2_LAN_DISABLE   0x2   /* LAN PCI Disable */
+#define IXGBE_PCIE_CTRL2_DISABLE_SELECT        0x1   /* LAN Disable Select */
+
+#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET                0x0
+#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET                0x3
+#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP                0x1
+#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS                0x2
+#define IXGBE_FW_LESM_PARAMETERS_PTR           0x2
+#define IXGBE_FW_LESM_STATE_1                  0x1
+#define IXGBE_FW_LESM_STATE_ENABLED            0x8000 /* LESM Enable bit */
+#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR  0x4
+#define IXGBE_FW_PATCH_VERSION_4               0x7
+#define IXGBE_FCOE_IBA_CAPS_BLK_PTR            0x33 /* iSCSI/FCOE block */
+#define IXGBE_FCOE_IBA_CAPS_FCOE               0x20 /* FCOE flags */
+#define IXGBE_ISCSI_FCOE_BLK_PTR               0x17 /* iSCSI/FCOE block */
+#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET          0x0 /* FCOE flags */
+#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE          0x1 /* FCOE flags enable bit */
+#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR         0x27 /* Alt. SAN MAC block */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET     0x0 /* Alt SAN MAC capability */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET    0x1 /* Alt SAN MAC 0 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET    0x4 /* Alt SAN MAC 1 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET     0x7 /* Alt WWNN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET     0x8 /* Alt WWPN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC     0x0 /* Alt SAN MAC exists */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN     0x1 /* Alt WWN base exists */
+
+#define IXGBE_DEVICE_CAPS_WOL_PORT0_1  0x4 /* WoL supported on ports 0 & 1 */
+#define IXGBE_DEVICE_CAPS_WOL_PORT0    0x8 /* WoL supported on port 0 */
+#define IXGBE_DEVICE_CAPS_WOL_MASK     0xC /* Mask for WoL capabilities */
 
 /* PCI Bus Info */
-#define IXGBE_PCI_DEVICE_STATUS   0xAA
-#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING   0x0020
-#define IXGBE_PCI_LINK_STATUS     0xB2
-#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
-#define IXGBE_PCI_LINK_WIDTH      0x3F0
-#define IXGBE_PCI_LINK_WIDTH_1    0x10
-#define IXGBE_PCI_LINK_WIDTH_2    0x20
-#define IXGBE_PCI_LINK_WIDTH_4    0x40
-#define IXGBE_PCI_LINK_WIDTH_8    0x80
-#define IXGBE_PCI_LINK_SPEED      0xF
-#define IXGBE_PCI_LINK_SPEED_2500 0x1
-#define IXGBE_PCI_LINK_SPEED_5000 0x2
-#define IXGBE_PCI_HEADER_TYPE_REGISTER  0x0E
-#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
-#define IXGBE_PCI_DEVICE_CONTROL2_16ms  0x0005
+#define IXGBE_PCI_DEVICE_STATUS                0xAA
+#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING    0x0020
+#define IXGBE_PCI_LINK_STATUS          0xB2
+#define IXGBE_PCI_DEVICE_CONTROL2      0xC8
+#define IXGBE_PCI_LINK_WIDTH           0x3F0
+#define IXGBE_PCI_LINK_WIDTH_1         0x10
+#define IXGBE_PCI_LINK_WIDTH_2         0x20
+#define IXGBE_PCI_LINK_WIDTH_4         0x40
+#define IXGBE_PCI_LINK_WIDTH_8         0x80
+#define IXGBE_PCI_LINK_SPEED           0xF
+#define IXGBE_PCI_LINK_SPEED_2500      0x1
+#define IXGBE_PCI_LINK_SPEED_5000      0x2
+#define IXGBE_PCI_LINK_SPEED_8000      0x3
+#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
+#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC        0x80
+#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
 
 /* Number of 100 microseconds we wait for PCI Express master disable */
-#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
+#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT       800
 
-/* Check whether address is multicast.  This is little-endian specific check.*/
+/* Check whether address is multicast. This is little-endian specific check.*/
 #define IXGBE_IS_MULTICAST(Address) \
-                (bool)(((u8 *)(Address))[0] & ((u8)0x01))
+               (bool)(((u8 *)(Address))[0] & ((u8)0x01))
 
 /* Check whether an address is broadcast. */
-#define IXGBE_IS_BROADCAST(Address)                      \
-                ((((u8 *)(Address))[0] == ((u8)0xff)) && \
-                (((u8 *)(Address))[1] == ((u8)0xff)))
+#define IXGBE_IS_BROADCAST(Address) \
+               ((((u8 *)(Address))[0] == ((u8)0xff)) && \
+               (((u8 *)(Address))[1] == ((u8)0xff)))
 
 /* RAH */
-#define IXGBE_RAH_VIND_MASK     0x003C0000
-#define IXGBE_RAH_VIND_SHIFT    18
-#define IXGBE_RAH_AV            0x80000000
-#define IXGBE_CLEAR_VMDQ_ALL    0xFFFFFFFF
+#define IXGBE_RAH_VIND_MASK    0x003C0000
+#define IXGBE_RAH_VIND_SHIFT   18
+#define IXGBE_RAH_AV           0x80000000
+#define IXGBE_CLEAR_VMDQ_ALL   0xFFFFFFFF
 
 /* Header split receive */
-#define IXGBE_RFCTL_ISCSI_DIS       0x00000001
-#define IXGBE_RFCTL_ISCSI_DWC_MASK  0x0000003E
-#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1
-#define IXGBE_RFCTL_NFSW_DIS        0x00000040
-#define IXGBE_RFCTL_NFSR_DIS        0x00000080
-#define IXGBE_RFCTL_NFS_VER_MASK    0x00000300
-#define IXGBE_RFCTL_NFS_VER_SHIFT   8
-#define IXGBE_RFCTL_NFS_VER_2       0
-#define IXGBE_RFCTL_NFS_VER_3       1
-#define IXGBE_RFCTL_NFS_VER_4       2
-#define IXGBE_RFCTL_IPV6_DIS        0x00000400
-#define IXGBE_RFCTL_IPV6_XSUM_DIS   0x00000800
-#define IXGBE_RFCTL_IPFRSP_DIS      0x00004000
-#define IXGBE_RFCTL_IPV6_EX_DIS     0x00010000
-#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
+#define IXGBE_RFCTL_ISCSI_DIS          0x00000001
+#define IXGBE_RFCTL_ISCSI_DWC_MASK     0x0000003E
+#define IXGBE_RFCTL_ISCSI_DWC_SHIFT    1
+#define IXGBE_RFCTL_RSC_DIS            0x00000010
+#define IXGBE_RFCTL_NFSW_DIS           0x00000040
+#define IXGBE_RFCTL_NFSR_DIS           0x00000080
+#define IXGBE_RFCTL_NFS_VER_MASK       0x00000300
+#define IXGBE_RFCTL_NFS_VER_SHIFT      8
+#define IXGBE_RFCTL_NFS_VER_2          0
+#define IXGBE_RFCTL_NFS_VER_3          1
+#define IXGBE_RFCTL_NFS_VER_4          2
+#define IXGBE_RFCTL_IPV6_DIS           0x00000400
+#define IXGBE_RFCTL_IPV6_XSUM_DIS      0x00000800
+#define IXGBE_RFCTL_IPFRSP_DIS         0x00004000
+#define IXGBE_RFCTL_IPV6_EX_DIS                0x00010000
+#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS   0x00020000
 
 /* Transmit Config masks */
-#define IXGBE_TXDCTL_ENABLE     0x02000000 /* Enable specific Tx Queue */
-#define IXGBE_TXDCTL_SWFLSH     0x04000000 /* Tx Desc. write-back flushing */
-#define IXGBE_TXDCTL_WTHRESH_SHIFT      16 /* shift to WTHRESH bits */
+#define IXGBE_TXDCTL_ENABLE            0x02000000 /* Ena specific Tx Queue */
+#define IXGBE_TXDCTL_SWFLSH            0x04000000 /* Tx Desc. wr-bk flushing */
+#define IXGBE_TXDCTL_WTHRESH_SHIFT     16 /* shift to WTHRESH bits */
 /* Enable short packet padding to 64 bytes */
-#define IXGBE_TX_PAD_ENABLE     0x00000400
-#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004  /* Allow jumbo frames */
+#define IXGBE_TX_PAD_ENABLE            0x00000400
+#define IXGBE_JUMBO_FRAME_ENABLE       0x00000004  /* Allow jumbo frames */
 /* This allows for 16K packets + 4k for vlan */
-#define IXGBE_MAX_FRAME_SZ      0x40040000
+#define IXGBE_MAX_FRAME_SZ             0x40040000
 
-#define IXGBE_TDWBAL_HEAD_WB_ENABLE   0x1      /* Tx head write-back enable */
-#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2      /* Tx seq# write-back enable */
+#define IXGBE_TDWBAL_HEAD_WB_ENABLE    0x1 /* Tx head write-back enable */
+#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE  0x2 /* Tx seq# write-back enable */
 
 /* Receive Config masks */
-#define IXGBE_RXCTRL_RXEN       0x00000001  /* Enable Receiver */
-#define IXGBE_RXCTRL_DMBYPS     0x00000002  /* Descriptor Monitor Bypass */
-#define IXGBE_RXDCTL_ENABLE     0x02000000  /* Enable specific Rx Queue */
-#define IXGBE_RXDCTL_SWFLSH     0x04000000  /* Rx Desc. write-back flushing */
-#define IXGBE_RXDCTL_RLPMLMASK  0x00003FFF  /* Only supported on the X540 */
-#define IXGBE_RXDCTL_RLPML_EN   0x00008000
-#define IXGBE_RXDCTL_VME        0x40000000  /* VLAN mode enable */
-
-#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
-#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
-#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */
-#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */
-#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */
-#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */
+#define IXGBE_RXCTRL_RXEN              0x00000001 /* Enable Receiver */
+#define IXGBE_RXCTRL_DMBYPS            0x00000002 /* Desc Monitor Bypass */
+#define IXGBE_RXDCTL_ENABLE            0x02000000 /* Ena specific Rx Queue */
+#define IXGBE_RXDCTL_SWFLSH            0x04000000 /* Rx Desc wr-bk flushing */
+#define IXGBE_RXDCTL_RLPMLMASK         0x00003FFF /* X540 supported only */
+#define IXGBE_RXDCTL_RLPML_EN          0x00008000
+#define IXGBE_RXDCTL_VME               0x40000000 /* VLAN mode enable */
+
+#define IXGBE_TSAUXC_EN_CLK            0x00000004
+#define IXGBE_TSAUXC_SYNCLK            0x00000008
+#define IXGBE_TSAUXC_SDP0_INT          0x00000040
+
+#define IXGBE_TSYNCTXCTL_VALID         0x00000001 /* Tx timestamp valid */
+#define IXGBE_TSYNCTXCTL_ENABLED       0x00000010 /* Tx timestamping enabled */
+
+#define IXGBE_TSYNCRXCTL_VALID         0x00000001 /* Rx timestamp valid */
+#define IXGBE_TSYNCRXCTL_TYPE_MASK     0x0000000E /* Rx type mask */
+#define IXGBE_TSYNCRXCTL_TYPE_L2_V2    0x00
+#define IXGBE_TSYNCRXCTL_TYPE_L4_V1    0x02
+#define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
+#define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
+#define IXGBE_TSYNCRXCTL_ENABLED       0x00000010 /* Rx Timestamping enabled */
+
+#define IXGBE_RXMTRL_V1_CTRLT_MASK     0x000000FF
+#define IXGBE_RXMTRL_V1_SYNC_MSG       0x00
+#define IXGBE_RXMTRL_V1_DELAY_REQ_MSG  0x01
+#define IXGBE_RXMTRL_V1_FOLLOWUP_MSG   0x02
+#define IXGBE_RXMTRL_V1_DELAY_RESP_MSG 0x03
+#define IXGBE_RXMTRL_V1_MGMT_MSG       0x04
+
+#define IXGBE_RXMTRL_V2_MSGID_MASK     0x0000FF00
+#define IXGBE_RXMTRL_V2_SYNC_MSG       0x0000
+#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG  0x0100
+#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG 0x0200
+#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG        0x0300
+#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG   0x0800
+#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG 0x0900
+#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG 0x0A00
+#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG   0x0B00
+#define IXGBE_RXMTRL_V2_SIGNALLING_MSG 0x0C00
+#define IXGBE_RXMTRL_V2_MGMT_MSG       0x0D00
+
+#define IXGBE_FCTRL_SBP                0x00000002 /* Store Bad Packet */
+#define IXGBE_FCTRL_MPE                0x00000100 /* Multicast Promiscuous Ena*/
+#define IXGBE_FCTRL_UPE                0x00000200 /* Unicast Promiscuous Ena */
+#define IXGBE_FCTRL_BAM                0x00000400 /* Broadcast Accept Mode */
+#define IXGBE_FCTRL_PMCF       0x00001000 /* Pass MAC Control Frames */
+#define IXGBE_FCTRL_DPF                0x00002000 /* Discard Pause Frame */
 /* Receive Priority Flow Control Enable */
-#define IXGBE_FCTRL_RPFCE 0x00004000
-#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */
-#define IXGBE_MFLCN_PMCF        0x00000001 /* Pass MAC Control Frames */
-#define IXGBE_MFLCN_DPF         0x00000002 /* Discard Pause Frame */
-#define IXGBE_MFLCN_RPFCE       0x00000004 /* Receive Priority FC Enable */
-#define IXGBE_MFLCN_RFCE        0x00000008 /* Receive FC Enable */
-#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF0 /* Receive FC Mask */
-
-#define IXGBE_MFLCN_RPFCE_SHIFT                 4
+#define IXGBE_FCTRL_RPFCE      0x00004000
+#define IXGBE_FCTRL_RFCE       0x00008000 /* Receive Flow Control Ena */
+#define IXGBE_MFLCN_PMCF       0x00000001 /* Pass MAC Control Frames */
+#define IXGBE_MFLCN_DPF                0x00000002 /* Discard Pause Frame */
+#define IXGBE_MFLCN_RPFCE      0x00000004 /* Receive Priority FC Enable */
+#define IXGBE_MFLCN_RFCE       0x00000008 /* Receive FC Enable */
+#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF4 /* Rx Priority FC bitmap mask */
+#define IXGBE_MFLCN_RPFCE_SHIFT        4 /* Rx Priority FC bitmap shift */
 
 /* Multiple Receive Queue Control */
-#define IXGBE_MRQC_RSSEN                 0x00000001  /* RSS Enable */
-#define IXGBE_MRQC_MRQE_MASK                    0xF /* Bits 3:0 */
-#define IXGBE_MRQC_RT8TCEN               0x00000002 /* 8 TC no RSS */
-#define IXGBE_MRQC_RT4TCEN               0x00000003 /* 4 TC no RSS */
-#define IXGBE_MRQC_RTRSS8TCEN            0x00000004 /* 8 TC w/ RSS */
-#define IXGBE_MRQC_RTRSS4TCEN            0x00000005 /* 4 TC w/ RSS */
-#define IXGBE_MRQC_VMDQEN                0x00000008 /* VMDq2 64 pools no RSS */
-#define IXGBE_MRQC_VMDQRSS32EN           0x0000000A /* VMDq2 32 pools w/ RSS */
-#define IXGBE_MRQC_VMDQRSS64EN           0x0000000B /* VMDq2 64 pools w/ RSS */
-#define IXGBE_MRQC_VMDQRT8TCEN           0x0000000C /* VMDq2/RT 16 pool 8 TC */
-#define IXGBE_MRQC_VMDQRT4TCEN           0x0000000D /* VMDq2/RT 32 pool 4 TC */
-#define IXGBE_MRQC_RSS_FIELD_MASK        0xFFFF0000
-#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP    0x00010000
-#define IXGBE_MRQC_RSS_FIELD_IPV4        0x00020000
+#define IXGBE_MRQC_RSSEN       0x00000001  /* RSS Enable */
+#define IXGBE_MRQC_MRQE_MASK   0xF /* Bits 3:0 */
+#define IXGBE_MRQC_RT8TCEN     0x00000002 /* 8 TC no RSS */
+#define IXGBE_MRQC_RT4TCEN     0x00000003 /* 4 TC no RSS */
+#define IXGBE_MRQC_RTRSS8TCEN  0x00000004 /* 8 TC w/ RSS */
+#define IXGBE_MRQC_RTRSS4TCEN  0x00000005 /* 4 TC w/ RSS */
+#define IXGBE_MRQC_VMDQEN      0x00000008 /* VMDq2 64 pools no RSS */
+#define IXGBE_MRQC_VMDQRSS32EN 0x0000000A /* VMDq2 32 pools w/ RSS */
+#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */
+#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */
+#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */
+#define IXGBE_MRQC_RSS_FIELD_MASK      0xFFFF0000
+#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP  0x00010000
+#define IXGBE_MRQC_RSS_FIELD_IPV4      0x00020000
 #define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000
-#define IXGBE_MRQC_RSS_FIELD_IPV6_EX     0x00080000
-#define IXGBE_MRQC_RSS_FIELD_IPV6        0x00100000
-#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP    0x00200000
-#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP    0x00400000
-#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP    0x00800000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX   0x00080000
+#define IXGBE_MRQC_RSS_FIELD_IPV6      0x00100000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP  0x00200000
+#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP  0x00400000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP  0x00800000
 #define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
-#define IXGBE_MRQC_L3L4TXSWEN            0x00008000
+#define IXGBE_MRQC_L3L4TXSWEN          0x00008000
 
 /* Queue Drop Enable */
-#define IXGBE_QDE_ENABLE     0x00000001
-#define IXGBE_QDE_IDX_MASK   0x00007F00
-#define IXGBE_QDE_IDX_SHIFT           8
-
-#define IXGBE_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
-#define IXGBE_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
-#define IXGBE_TXD_CMD_EOP    0x01000000 /* End of Packet */
-#define IXGBE_TXD_CMD_IFCS   0x02000000 /* Insert FCS (Ethernet CRC) */
-#define IXGBE_TXD_CMD_IC     0x04000000 /* Insert Checksum */
-#define IXGBE_TXD_CMD_RS     0x08000000 /* Report Status */
-#define IXGBE_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
-#define IXGBE_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */
-#define IXGBE_TXD_STAT_DD    0x00000001 /* Descriptor Done */
-
-#define IXGBE_RXDADV_IPSEC_STATUS_SECP                  0x00020000
-#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL       0x08000000
-#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH         0x10000000
-#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED            0x18000000
-#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK               0x18000000
+#define IXGBE_QDE_ENABLE       0x00000001
+#define IXGBE_QDE_IDX_MASK     0x00007F00
+#define IXGBE_QDE_IDX_SHIFT    8
+#define IXGBE_QDE_WRITE                0x00010000
+#define IXGBE_QDE_READ         0x00020000
+
+#define IXGBE_TXD_POPTS_IXSM   0x01 /* Insert IP checksum */
+#define IXGBE_TXD_POPTS_TXSM   0x02 /* Insert TCP/UDP checksum */
+#define IXGBE_TXD_CMD_EOP      0x01000000 /* End of Packet */
+#define IXGBE_TXD_CMD_IFCS     0x02000000 /* Insert FCS (Ethernet CRC) */
+#define IXGBE_TXD_CMD_IC       0x04000000 /* Insert Checksum */
+#define IXGBE_TXD_CMD_RS       0x08000000 /* Report Status */
+#define IXGBE_TXD_CMD_DEXT     0x20000000 /* Desc extension (0 = legacy) */
+#define IXGBE_TXD_CMD_VLE      0x40000000 /* Add VLAN tag */
+#define IXGBE_TXD_STAT_DD      0x00000001 /* Descriptor Done */
+
+#define IXGBE_RXDADV_IPSEC_STATUS_SECP         0x00020000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH        0x10000000
+#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED   0x18000000
+#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK      0x18000000
 /* Multiple Transmit Queue Command Register */
-#define IXGBE_MTQC_RT_ENA       0x1 /* DCB Enable */
-#define IXGBE_MTQC_VT_ENA       0x2 /* VMDQ2 Enable */
-#define IXGBE_MTQC_64Q_1PB      0x0 /* 64 queues 1 pack buffer */
-#define IXGBE_MTQC_32VF         0x8 /* 4 TX Queues per pool w/32VF's */
-#define IXGBE_MTQC_64VF         0x4 /* 2 TX Queues per pool w/64VF's */
-#define IXGBE_MTQC_8TC_8TQ      0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */
-#define IXGBE_MTQC_4TC_4TQ     0x8 /* 4 TC if RT_ENA or 4 TQ if VT_ENA */
+#define IXGBE_MTQC_RT_ENA      0x1 /* DCB Enable */
+#define IXGBE_MTQC_VT_ENA      0x2 /* VMDQ2 Enable */
+#define IXGBE_MTQC_64Q_1PB     0x0 /* 64 queues 1 pack buffer */
+#define IXGBE_MTQC_32VF                0x8 /* 4 TX Queues per pool w/32VF's */
+#define IXGBE_MTQC_64VF                0x4 /* 2 TX Queues per pool w/64VF's */
+#define IXGBE_MTQC_4TC_4TQ     0x8 /* 4 TC if RT_ENA and VT_ENA */
+#define IXGBE_MTQC_8TC_8TQ     0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */
 
 /* Receive Descriptor bit definitions */
-#define IXGBE_RXD_STAT_DD       0x01    /* Descriptor Done */
-#define IXGBE_RXD_STAT_EOP      0x02    /* End of Packet */
-#define IXGBE_RXD_STAT_FLM      0x04    /* FDir Match */
-#define IXGBE_RXD_STAT_VP       0x08    /* IEEE VLAN Packet */
-#define IXGBE_RXDADV_NEXTP_MASK   0x000FFFF0 /* Next Descriptor Index */
-#define IXGBE_RXDADV_NEXTP_SHIFT  0x00000004
-#define IXGBE_RXD_STAT_UDPCS    0x10    /* UDP xsum calculated */
-#define IXGBE_RXD_STAT_L4CS     0x20    /* L4 xsum calculated */
-#define IXGBE_RXD_STAT_IPCS     0x40    /* IP xsum calculated */
-#define IXGBE_RXD_STAT_PIF      0x80    /* passed in-exact filter */
-#define IXGBE_RXD_STAT_CRCV     0x100   /* Speculative CRC Valid */
-#define IXGBE_RXD_STAT_VEXT     0x200   /* 1st VLAN found */
-#define IXGBE_RXD_STAT_UDPV     0x400   /* Valid UDP checksum */
-#define IXGBE_RXD_STAT_DYNINT   0x800   /* Pkt caused INT via DYNINT */
-#define IXGBE_RXD_STAT_LLINT    0x800   /* Pkt caused Low Latency Interrupt */
-#define IXGBE_RXD_STAT_TS       0x10000 /* Time Stamp */
-#define IXGBE_RXD_STAT_SECP     0x20000 /* Security Processing */
-#define IXGBE_RXD_STAT_LB       0x40000 /* Loopback Status */
-#define IXGBE_RXD_STAT_ACK      0x8000  /* ACK Packet indication */
-#define IXGBE_RXD_ERR_CE        0x01    /* CRC Error */
-#define IXGBE_RXD_ERR_LE        0x02    /* Length Error */
-#define IXGBE_RXD_ERR_PE        0x08    /* Packet Error */
-#define IXGBE_RXD_ERR_OSE       0x10    /* Oversize Error */
-#define IXGBE_RXD_ERR_USE       0x20    /* Undersize Error */
-#define IXGBE_RXD_ERR_TCPE      0x40    /* TCP/UDP Checksum Error */
-#define IXGBE_RXD_ERR_IPE       0x80    /* IP Checksum Error */
-#define IXGBE_RXDADV_ERR_MASK           0xfff00000 /* RDESC.ERRORS mask */
-#define IXGBE_RXDADV_ERR_SHIFT          20         /* RDESC.ERRORS shift */
-#define IXGBE_RXDADV_ERR_FCEOFE         0x80000000 /* FCoEFe/IPE */
-#define IXGBE_RXDADV_ERR_FCERR          0x00700000 /* FCERR/FDIRERR */
-#define IXGBE_RXDADV_ERR_FDIR_LEN       0x00100000 /* FDIR Length error */
-#define IXGBE_RXDADV_ERR_FDIR_DROP      0x00200000 /* FDIR Drop error */
-#define IXGBE_RXDADV_ERR_FDIR_COLL      0x00400000 /* FDIR Collision error */
-#define IXGBE_RXDADV_ERR_HBO    0x00800000 /*Header Buffer Overflow */
-#define IXGBE_RXDADV_ERR_CE     0x01000000 /* CRC Error */
-#define IXGBE_RXDADV_ERR_LE     0x02000000 /* Length Error */
-#define IXGBE_RXDADV_ERR_PE     0x08000000 /* Packet Error */
-#define IXGBE_RXDADV_ERR_OSE    0x10000000 /* Oversize Error */
-#define IXGBE_RXDADV_ERR_USE    0x20000000 /* Undersize Error */
-#define IXGBE_RXDADV_ERR_TCPE   0x40000000 /* TCP/UDP Checksum Error */
-#define IXGBE_RXDADV_ERR_IPE    0x80000000 /* IP Checksum Error */
-#define IXGBE_RXD_VLAN_ID_MASK  0x0FFF  /* VLAN ID is in lower 12 bits */
-#define IXGBE_RXD_PRI_MASK      0xE000  /* Priority is in upper 3 bits */
-#define IXGBE_RXD_PRI_SHIFT     13
-#define IXGBE_RXD_CFI_MASK      0x1000  /* CFI is bit 12 */
-#define IXGBE_RXD_CFI_SHIFT     12
-
-#define IXGBE_RXDADV_STAT_DD            IXGBE_RXD_STAT_DD  /* Done */
-#define IXGBE_RXDADV_STAT_EOP           IXGBE_RXD_STAT_EOP /* End of Packet */
-#define IXGBE_RXDADV_STAT_FLM           IXGBE_RXD_STAT_FLM /* FDir Match */
-#define IXGBE_RXDADV_STAT_VP            IXGBE_RXD_STAT_VP  /* IEEE VLAN Pkt */
-#define IXGBE_RXDADV_STAT_MASK          0x000fffff /* Stat/NEXTP: bit 0-19 */
-#define IXGBE_RXDADV_STAT_FCEOFS        0x00000040 /* FCoE EOF/SOF Stat */
-#define IXGBE_RXDADV_STAT_FCSTAT        0x00000030 /* FCoE Pkt Stat */
-#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
-#define IXGBE_RXDADV_STAT_FCSTAT_NODDP  0x00000010 /* 01: Ctxt w/o DDP */
-#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
-#define IXGBE_RXDADV_STAT_FCSTAT_DDP    0x00000030 /* 11: Ctxt w/ DDP */
+#define IXGBE_RXD_STAT_DD      0x01 /* Descriptor Done */
+#define IXGBE_RXD_STAT_EOP     0x02 /* End of Packet */
+#define IXGBE_RXD_STAT_FLM     0x04 /* FDir Match */
+#define IXGBE_RXD_STAT_VP      0x08 /* IEEE VLAN Packet */
+#define IXGBE_RXDADV_NEXTP_MASK        0x000FFFF0 /* Next Descriptor Index */
+#define IXGBE_RXDADV_NEXTP_SHIFT       0x00000004
+#define IXGBE_RXD_STAT_UDPCS   0x10 /* UDP xsum calculated */
+#define IXGBE_RXD_STAT_L4CS    0x20 /* L4 xsum calculated */
+#define IXGBE_RXD_STAT_IPCS    0x40 /* IP xsum calculated */
+#define IXGBE_RXD_STAT_PIF     0x80 /* passed in-exact filter */
+#define IXGBE_RXD_STAT_CRCV    0x100 /* Speculative CRC Valid */
+#define IXGBE_RXD_STAT_VEXT    0x200 /* 1st VLAN found */
+#define IXGBE_RXD_STAT_UDPV    0x400 /* Valid UDP checksum */
+#define IXGBE_RXD_STAT_DYNINT  0x800 /* Pkt caused INT via DYNINT */
+#define IXGBE_RXD_STAT_LLINT   0x800 /* Pkt caused Low Latency Interrupt */
+#define IXGBE_RXD_STAT_TS      0x10000 /* Time Stamp */
+#define IXGBE_RXD_STAT_SECP    0x20000 /* Security Processing */
+#define IXGBE_RXD_STAT_LB      0x40000 /* Loopback Status */
+#define IXGBE_RXD_STAT_ACK     0x8000 /* ACK Packet indication */
+#define IXGBE_RXD_ERR_CE       0x01 /* CRC Error */
+#define IXGBE_RXD_ERR_LE       0x02 /* Length Error */
+#define IXGBE_RXD_ERR_PE       0x08 /* Packet Error */
+#define IXGBE_RXD_ERR_OSE      0x10 /* Oversize Error */
+#define IXGBE_RXD_ERR_USE      0x20 /* Undersize Error */
+#define IXGBE_RXD_ERR_TCPE     0x40 /* TCP/UDP Checksum Error */
+#define IXGBE_RXD_ERR_IPE      0x80 /* IP Checksum Error */
+#define IXGBE_RXDADV_ERR_MASK          0xfff00000 /* RDESC.ERRORS mask */
+#define IXGBE_RXDADV_ERR_SHIFT         20 /* RDESC.ERRORS shift */
+#define IXGBE_RXDADV_ERR_RXE           0x20000000 /* Any MAC Error */
+#define IXGBE_RXDADV_ERR_FCEOFE                0x80000000 /* FCoEFe/IPE */
+#define IXGBE_RXDADV_ERR_FCERR         0x00700000 /* FCERR/FDIRERR */
+#define IXGBE_RXDADV_ERR_FDIR_LEN      0x00100000 /* FDIR Length error */
+#define IXGBE_RXDADV_ERR_FDIR_DROP     0x00200000 /* FDIR Drop error */
+#define IXGBE_RXDADV_ERR_FDIR_COLL     0x00400000 /* FDIR Collision error */
+#define IXGBE_RXDADV_ERR_HBO   0x00800000 /*Header Buffer Overflow */
+#define IXGBE_RXDADV_ERR_CE    0x01000000 /* CRC Error */
+#define IXGBE_RXDADV_ERR_LE    0x02000000 /* Length Error */
+#define IXGBE_RXDADV_ERR_PE    0x08000000 /* Packet Error */
+#define IXGBE_RXDADV_ERR_OSE   0x10000000 /* Oversize Error */
+#define IXGBE_RXDADV_ERR_USE   0x20000000 /* Undersize Error */
+#define IXGBE_RXDADV_ERR_TCPE  0x40000000 /* TCP/UDP Checksum Error */
+#define IXGBE_RXDADV_ERR_IPE   0x80000000 /* IP Checksum Error */
+#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
+#define IXGBE_RXD_PRI_MASK     0xE000  /* Priority is in upper 3 bits */
+#define IXGBE_RXD_PRI_SHIFT    13
+#define IXGBE_RXD_CFI_MASK     0x1000  /* CFI is bit 12 */
+#define IXGBE_RXD_CFI_SHIFT    12
+
+#define IXGBE_RXDADV_STAT_DD           IXGBE_RXD_STAT_DD  /* Done */
+#define IXGBE_RXDADV_STAT_EOP          IXGBE_RXD_STAT_EOP /* End of Packet */
+#define IXGBE_RXDADV_STAT_FLM          IXGBE_RXD_STAT_FLM /* FDir Match */
+#define IXGBE_RXDADV_STAT_VP           IXGBE_RXD_STAT_VP  /* IEEE VLAN Pkt */
+#define IXGBE_RXDADV_STAT_MASK         0x000fffff /* Stat/NEXTP: bit 0-19 */
+#define IXGBE_RXDADV_STAT_FCEOFS       0x00000040 /* FCoE EOF/SOF Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT       0x00000030 /* FCoE Pkt Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH        0x00000000 /* 00: No Ctxt Match */
+#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
+#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP        0x00000020 /* 10: Recv. FCP_RSP */
+#define IXGBE_RXDADV_STAT_FCSTAT_DDP   0x00000030 /* 11: Ctxt w/ DDP */
+#define IXGBE_RXDADV_STAT_TS           0x00010000 /* IEEE1588 Time Stamp */
 
 /* PSRTYPE bit definitions */
-#define IXGBE_PSRTYPE_TCPHDR    0x00000010
-#define IXGBE_PSRTYPE_UDPHDR    0x00000020
-#define IXGBE_PSRTYPE_IPV4HDR   0x00000100
-#define IXGBE_PSRTYPE_IPV6HDR   0x00000200
-#define IXGBE_PSRTYPE_L2HDR     0x00001000
+#define IXGBE_PSRTYPE_TCPHDR   0x00000010
+#define IXGBE_PSRTYPE_UDPHDR   0x00000020
+#define IXGBE_PSRTYPE_IPV4HDR  0x00000100
+#define IXGBE_PSRTYPE_IPV6HDR  0x00000200
+#define IXGBE_PSRTYPE_L2HDR    0x00001000
 
 /* SRRCTL bit definitions */
-#define IXGBE_SRRCTL_BSIZEPKT_SHIFT     10     /* so many KBs */
-#define IXGBE_SRRCTL_RDMTS_SHIFT        22
-#define IXGBE_SRRCTL_RDMTS_MASK         0x01C00000
-#define IXGBE_SRRCTL_DROP_EN            0x10000000
-#define IXGBE_SRRCTL_BSIZEPKT_MASK      0x0000007F
-#define IXGBE_SRRCTL_BSIZEHDR_MASK      0x00003F00
-#define IXGBE_SRRCTL_DESCTYPE_LEGACY    0x00000000
+#define IXGBE_SRRCTL_BSIZEPKT_SHIFT    10 /* so many KBs */
+#define IXGBE_SRRCTL_RDMTS_SHIFT       22
+#define IXGBE_SRRCTL_RDMTS_MASK                0x01C00000
+#define IXGBE_SRRCTL_DROP_EN           0x10000000
+#define IXGBE_SRRCTL_BSIZEPKT_MASK     0x0000007F
+#define IXGBE_SRRCTL_BSIZEHDR_MASK     0x00003F00
+#define IXGBE_SRRCTL_DESCTYPE_LEGACY   0x00000000
 #define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
-#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT  0x04000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT        0x04000000
 #define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
 #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
-#define IXGBE_SRRCTL_DESCTYPE_MASK      0x0E000000
+#define IXGBE_SRRCTL_DESCTYPE_MASK     0x0E000000
 
-#define IXGBE_RXDPS_HDRSTAT_HDRSP       0x00008000
-#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
+#define IXGBE_RXDPS_HDRSTAT_HDRSP      0x00008000
+#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK        0x000003FF
 
-#define IXGBE_RXDADV_RSSTYPE_MASK       0x0000000F
-#define IXGBE_RXDADV_PKTTYPE_MASK       0x0000FFF0
-#define IXGBE_RXDADV_PKTTYPE_MASK_EX    0x0001FFF0
-#define IXGBE_RXDADV_HDRBUFLEN_MASK     0x00007FE0
-#define IXGBE_RXDADV_RSCCNT_MASK        0x001E0000
-#define IXGBE_RXDADV_RSCCNT_SHIFT       17
-#define IXGBE_RXDADV_HDRBUFLEN_SHIFT    5
-#define IXGBE_RXDADV_SPLITHEADER_EN     0x00001000
-#define IXGBE_RXDADV_SPH                0x8000
+#define IXGBE_RXDADV_RSSTYPE_MASK      0x0000000F
+#define IXGBE_RXDADV_PKTTYPE_MASK      0x0000FFF0
+#define IXGBE_RXDADV_PKTTYPE_MASK_EX   0x0001FFF0
+#define IXGBE_RXDADV_HDRBUFLEN_MASK    0x00007FE0
+#define IXGBE_RXDADV_RSCCNT_MASK       0x001E0000
+#define IXGBE_RXDADV_RSCCNT_SHIFT      17
+#define IXGBE_RXDADV_HDRBUFLEN_SHIFT   5
+#define IXGBE_RXDADV_SPLITHEADER_EN    0x00001000
+#define IXGBE_RXDADV_SPH               0x8000
 
 /* RSS Hash results */
-#define IXGBE_RXDADV_RSSTYPE_NONE       0x00000000
-#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP   0x00000001
-#define IXGBE_RXDADV_RSSTYPE_IPV4       0x00000002
-#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP   0x00000003
-#define IXGBE_RXDADV_RSSTYPE_IPV6_EX    0x00000004
-#define IXGBE_RXDADV_RSSTYPE_IPV6       0x00000005
+#define IXGBE_RXDADV_RSSTYPE_NONE      0x00000000
+#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP  0x00000001
+#define IXGBE_RXDADV_RSSTYPE_IPV4      0x00000002
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP  0x00000003
+#define IXGBE_RXDADV_RSSTYPE_IPV6_EX   0x00000004
+#define IXGBE_RXDADV_RSSTYPE_IPV6      0x00000005
 #define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
-#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP   0x00000007
-#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP   0x00000008
+#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP  0x00000007
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP  0x00000008
 #define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
 
 /* RSS Packet Types as indicated in the receive descriptor. */
-#define IXGBE_RXDADV_PKTTYPE_NONE       0x00000000
-#define IXGBE_RXDADV_PKTTYPE_IPV4       0x00000010 /* IPv4 hdr present */
-#define IXGBE_RXDADV_PKTTYPE_IPV4_EX    0x00000020 /* IPv4 hdr + extensions */
-#define IXGBE_RXDADV_PKTTYPE_IPV6       0x00000040 /* IPv6 hdr present */
-#define IXGBE_RXDADV_PKTTYPE_IPV6_EX    0x00000080 /* IPv6 hdr + extensions */
-#define IXGBE_RXDADV_PKTTYPE_TCP        0x00000100 /* TCP hdr present */
-#define IXGBE_RXDADV_PKTTYPE_UDP        0x00000200 /* UDP hdr present */
-#define IXGBE_RXDADV_PKTTYPE_SCTP       0x00000400 /* SCTP hdr present */
-#define IXGBE_RXDADV_PKTTYPE_NFS        0x00000800 /* NFS hdr present */
-#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP  0x00001000 /* IPSec ESP */
-#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH   0x00002000 /* IPSec AH */
-#define IXGBE_RXDADV_PKTTYPE_LINKSEC    0x00004000 /* LinkSec Encap */
-#define IXGBE_RXDADV_PKTTYPE_ETQF       0x00008000 /* PKTTYPE is ETQF index */
-#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK  0x00000070 /* ETQF has 8 indices */
-#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4          /* Right-shift 4 bits */
+#define IXGBE_RXDADV_PKTTYPE_NONE      0x00000000
+#define IXGBE_RXDADV_PKTTYPE_IPV4      0x00000010 /* IPv4 hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPV4_EX   0x00000020 /* IPv4 hdr + extensions */
+#define IXGBE_RXDADV_PKTTYPE_IPV6      0x00000040 /* IPv6 hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPV6_EX   0x00000080 /* IPv6 hdr + extensions */
+#define IXGBE_RXDADV_PKTTYPE_TCP       0x00000100 /* TCP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_UDP       0x00000200 /* UDP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_SCTP      0x00000400 /* SCTP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_NFS       0x00000800 /* NFS hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
+#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH  0x00002000 /* IPSec AH */
+#define IXGBE_RXDADV_PKTTYPE_LINKSEC   0x00004000 /* LinkSec Encap */
+#define IXGBE_RXDADV_PKTTYPE_ETQF      0x00008000 /* PKTTYPE is ETQF index */
+#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */
+#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT        4 /* Right-shift 4 bits */
 
 /* Security Processing bit Indication */
-#define IXGBE_RXDADV_LNKSEC_STATUS_SECP         0x00020000
-#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH   0x08000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR  0x10000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK      0x18000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG       0x18000000
+#define IXGBE_RXDADV_LNKSEC_STATUS_SECP                0x00020000
+#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH  0x08000000
+#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000
+#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK     0x18000000
+#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG      0x18000000
 
 /* Masks to determine if packets should be dropped due to frame errors */
 #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
-                                      IXGBE_RXD_ERR_CE | \
-                                      IXGBE_RXD_ERR_LE | \
-                                      IXGBE_RXD_ERR_PE | \
-                                      IXGBE_RXD_ERR_OSE | \
-                                      IXGBE_RXD_ERR_USE)
+                               IXGBE_RXD_ERR_CE | \
+                               IXGBE_RXD_ERR_LE | \
+                               IXGBE_RXD_ERR_PE | \
+                               IXGBE_RXD_ERR_OSE | \
+                               IXGBE_RXD_ERR_USE)
 
 #define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
-                                      IXGBE_RXDADV_ERR_CE | \
-                                      IXGBE_RXDADV_ERR_LE | \
-                                      IXGBE_RXDADV_ERR_PE | \
-                                      IXGBE_RXDADV_ERR_OSE | \
-                                      IXGBE_RXDADV_ERR_USE)
+                               IXGBE_RXDADV_ERR_CE | \
+                               IXGBE_RXDADV_ERR_LE | \
+                               IXGBE_RXDADV_ERR_PE | \
+                               IXGBE_RXDADV_ERR_OSE | \
+                               IXGBE_RXDADV_ERR_USE)
+
+#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK_82599  IXGBE_RXDADV_ERR_RXE
 
 /* Multicast bit mask */
-#define IXGBE_MCSTCTRL_MFE      0x4
+#define IXGBE_MCSTCTRL_MFE     0x4
 
 /* Number of Transmit and Receive Descriptors must be a multiple of 8 */
-#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE  8
-#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE  8
-#define IXGBE_REQ_TX_BUFFER_GRANULARITY   1024
+#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE       8
+#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE       8
+#define IXGBE_REQ_TX_BUFFER_GRANULARITY                1024
 
 /* Vlan-specific macros */
-#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK  0x0FFF /* VLAN ID in lower 12 bits */
-#define IXGBE_RX_DESC_SPECIAL_PRI_MASK   0xE000 /* Priority in upper 3 bits */
-#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT  0x000D /* Priority in upper 3 of 16 */
-#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT  IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
+#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK        0x0FFF /* VLAN ID in lower 12 bits */
+#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */
+#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT        0x000D /* Priority in upper 3 of 16 */
+#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT        IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
 
 /* SR-IOV specific macros */
-#define IXGBE_MBVFICR_INDEX(vf_number)   (vf_number >> 4)
+#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4)
 #define IXGBE_MBVFICR(_i)              (0x00710 + ((_i) * 4))
-#define IXGBE_VFLRE(_i)                ((((_i) & 1) ? 0x001C0 : 0x00600))
-#define IXGBE_VFLREC(_i)               (0x00700 + ((_i) * 4))
+#define IXGBE_VFLRE(_i)                        (((_i & 1) ? 0x001C0 : 0x00600))
+#define IXGBE_VFLREC(_i)                (0x00700 + ((_i) * 4))
+/* Translated register #defines */
+#define IXGBE_PVFCTRL(P)       (0x00300 + (4 * (P)))
+#define IXGBE_PVFSTATUS(P)     (0x00008 + (0 * (P)))
+#define IXGBE_PVFLINKS(P)      (0x042A4 + (0 * (P)))
+#define IXGBE_PVFRTIMER(P)     (0x00048 + (0 * (P)))
+#define IXGBE_PVFMAILBOX(P)    (0x04C00 + (4 * (P)))
+#define IXGBE_PVFRXMEMWRAP(P)  (0x03190 + (0 * (P)))
+#define IXGBE_PVTEICR(P)       (0x00B00 + (4 * (P)))
+#define IXGBE_PVTEICS(P)       (0x00C00 + (4 * (P)))
+#define IXGBE_PVTEIMS(P)       (0x00D00 + (4 * (P)))
+#define IXGBE_PVTEIMC(P)       (0x00E00 + (4 * (P)))
+#define IXGBE_PVTEIAC(P)       (0x00F00 + (4 * (P)))
+#define IXGBE_PVTEIAM(P)       (0x04D00 + (4 * (P)))
+#define IXGBE_PVTEITR(P)       (((P) < 24) ? (0x00820 + ((P) * 4)) : \
+                                (0x012300 + (((P) - 24) * 4)))
+#define IXGBE_PVTIVAR(P)       (0x12500 + (4 * (P)))
+#define IXGBE_PVTIVAR_MISC(P)  (0x04E00 + (4 * (P)))
+#define IXGBE_PVTRSCINT(P)     (0x12000 + (4 * (P)))
+#define IXGBE_VFPBACL(P)       (0x110C8 + (4 * (P)))
+#define IXGBE_PVFRDBAL(P)      ((P < 64) ? (0x01000 + (0x40 * (P))) \
+                                : (0x0D000 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDBAH(P)      ((P < 64) ? (0x01004 + (0x40 * (P))) \
+                                : (0x0D004 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDLEN(P)      ((P < 64) ? (0x01008 + (0x40 * (P))) \
+                                : (0x0D008 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDH(P)                ((P < 64) ? (0x01010 + (0x40 * (P))) \
+                                : (0x0D010 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDT(P)                ((P < 64) ? (0x01018 + (0x40 * (P))) \
+                                : (0x0D018 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRXDCTL(P)     ((P < 64) ? (0x01028 + (0x40 * (P))) \
+                                : (0x0D028 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFSRRCTL(P)     ((P < 64) ? (0x01014 + (0x40 * (P))) \
+                                : (0x0D014 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFPSRTYPE(P)    (0x0EA00 + (4 * (P)))
+#define IXGBE_PVFTDBAL(P)      (0x06000 + (0x40 * (P)))
+#define IXGBE_PVFTDBAH(P)      (0x06004 + (0x40 * (P)))
+#define IXGBE_PVFTTDLEN(P)     (0x06008 + (0x40 * (P)))
+#define IXGBE_PVFTDH(P)                (0x06010 + (0x40 * (P)))
+#define IXGBE_PVFTDT(P)                (0x06018 + (0x40 * (P)))
+#define IXGBE_PVFTXDCTL(P)     (0x06028 + (0x40 * (P)))
+#define IXGBE_PVFTDWBAL(P)     (0x06038 + (0x40 * (P)))
+#define IXGBE_PVFTDWBAH(P)     (0x0603C + (0x40 * (P)))
+#define IXGBE_PVFDCA_RXCTRL(P) (((P) < 64) ? (0x0100C + (0x40 * (P))) \
+                                : (0x0D00C + (0x40 * ((P) - 64))))
+#define IXGBE_PVFDCA_TXCTRL(P) (0x0600C + (0x40 * (P)))
+#define IXGBE_PVFGPRC(x)       (0x0101C + (0x40 * (x)))
+#define IXGBE_PVFGPTC(x)       (0x08300 + (0x04 * (x)))
+#define IXGBE_PVFGORC_LSB(x)   (0x01020 + (0x40 * (x)))
+#define IXGBE_PVFGORC_MSB(x)   (0x0D020 + (0x40 * (x)))
+#define IXGBE_PVFGOTC_LSB(x)   (0x08400 + (0x08 * (x)))
+#define IXGBE_PVFGOTC_MSB(x)   (0x08404 + (0x08 * (x)))
+#define IXGBE_PVFMPRC(x)       (0x0D01C + (0x40 * (x)))
+
+#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \
+               (IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index)))
+#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \
+               (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index)))
+
+/* Little Endian defines */
+#ifndef __le16
+#define __le16  u16
+#endif
+#ifndef __le32
+#define __le32  u32
+#endif
+#ifndef __le64
+#define __le64  u64
 
+#endif
+#ifndef __be16
+/* Big Endian defines */
+#define __be16  u16
+#define __be32  u32
+#define __be64  u64
+
+#endif
 enum ixgbe_fdir_pballoc_type {
        IXGBE_FDIR_PBALLOC_NONE = 0,
        IXGBE_FDIR_PBALLOC_64K  = 1,
        IXGBE_FDIR_PBALLOC_128K = 2,
        IXGBE_FDIR_PBALLOC_256K = 3,
 };
-#define IXGBE_FDIR_PBALLOC_SIZE_SHIFT           16
 
 /* Flow Director register values */
-#define IXGBE_FDIRCTRL_PBALLOC_64K              0x00000001
-#define IXGBE_FDIRCTRL_PBALLOC_128K             0x00000002
-#define IXGBE_FDIRCTRL_PBALLOC_256K             0x00000003
-#define IXGBE_FDIRCTRL_INIT_DONE                0x00000008
-#define IXGBE_FDIRCTRL_PERFECT_MATCH            0x00000010
-#define IXGBE_FDIRCTRL_REPORT_STATUS            0x00000020
-#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS     0x00000080
-#define IXGBE_FDIRCTRL_DROP_Q_SHIFT             8
-#define IXGBE_FDIRCTRL_FLEX_SHIFT               16
-#define IXGBE_FDIRCTRL_SEARCHLIM                0x00800000
-#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT         24
-#define IXGBE_FDIRCTRL_FULL_THRESH_MASK         0xF0000000
-#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT        28
-
-#define IXGBE_FDIRTCPM_DPORTM_SHIFT             16
-#define IXGBE_FDIRUDPM_DPORTM_SHIFT             16
-#define IXGBE_FDIRIP6M_DIPM_SHIFT               16
-#define IXGBE_FDIRM_VLANID                      0x00000001
-#define IXGBE_FDIRM_VLANP                       0x00000002
-#define IXGBE_FDIRM_POOL                        0x00000004
-#define IXGBE_FDIRM_L4P                         0x00000008
-#define IXGBE_FDIRM_FLEX                        0x00000010
-#define IXGBE_FDIRM_DIPv6                       0x00000020
-
-#define IXGBE_FDIRFREE_FREE_MASK                0xFFFF
-#define IXGBE_FDIRFREE_FREE_SHIFT               0
-#define IXGBE_FDIRFREE_COLL_MASK                0x7FFF0000
-#define IXGBE_FDIRFREE_COLL_SHIFT               16
-#define IXGBE_FDIRLEN_MAXLEN_MASK               0x3F
-#define IXGBE_FDIRLEN_MAXLEN_SHIFT              0
-#define IXGBE_FDIRLEN_MAXHASH_MASK              0x7FFF0000
-#define IXGBE_FDIRLEN_MAXHASH_SHIFT             16
-#define IXGBE_FDIRUSTAT_ADD_MASK                0xFFFF
-#define IXGBE_FDIRUSTAT_ADD_SHIFT               0
-#define IXGBE_FDIRUSTAT_REMOVE_MASK             0xFFFF0000
-#define IXGBE_FDIRUSTAT_REMOVE_SHIFT            16
-#define IXGBE_FDIRFSTAT_FADD_MASK               0x00FF
-#define IXGBE_FDIRFSTAT_FADD_SHIFT              0
-#define IXGBE_FDIRFSTAT_FREMOVE_MASK            0xFF00
-#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT           8
-#define IXGBE_FDIRPORT_DESTINATION_SHIFT        16
-#define IXGBE_FDIRVLAN_FLEX_SHIFT               16
-#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT       15
-#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT       16
-
-#define IXGBE_FDIRCMD_CMD_MASK                  0x00000003
-#define IXGBE_FDIRCMD_CMD_ADD_FLOW              0x00000001
-#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW           0x00000002
-#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT        0x00000003
-#define IXGBE_FDIRCMD_FILTER_VALID              0x00000004
-#define IXGBE_FDIRCMD_FILTER_UPDATE             0x00000008
-#define IXGBE_FDIRCMD_IPv6DMATCH                0x00000010
-#define IXGBE_FDIRCMD_L4TYPE_UDP                0x00000020
-#define IXGBE_FDIRCMD_L4TYPE_TCP                0x00000040
-#define IXGBE_FDIRCMD_L4TYPE_SCTP               0x00000060
-#define IXGBE_FDIRCMD_IPV6                      0x00000080
-#define IXGBE_FDIRCMD_CLEARHT                   0x00000100
-#define IXGBE_FDIRCMD_DROP                      0x00000200
-#define IXGBE_FDIRCMD_INT                       0x00000400
-#define IXGBE_FDIRCMD_LAST                      0x00000800
-#define IXGBE_FDIRCMD_COLLISION                 0x00001000
-#define IXGBE_FDIRCMD_QUEUE_EN                  0x00008000
-#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT           5
-#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT            16
-#define IXGBE_FDIRCMD_VT_POOL_SHIFT             24
-#define IXGBE_FDIR_INIT_DONE_POLL               10
-#define IXGBE_FDIRCMD_CMD_POLL                  10
-
-#define IXGBE_FDIR_DROP_QUEUE                   127
+#define IXGBE_FDIRCTRL_PBALLOC_64K             0x00000001
+#define IXGBE_FDIRCTRL_PBALLOC_128K            0x00000002
+#define IXGBE_FDIRCTRL_PBALLOC_256K            0x00000003
+#define IXGBE_FDIRCTRL_INIT_DONE               0x00000008
+#define IXGBE_FDIRCTRL_PERFECT_MATCH           0x00000010
+#define IXGBE_FDIRCTRL_REPORT_STATUS           0x00000020
+#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS    0x00000080
+#define IXGBE_FDIRCTRL_DROP_Q_SHIFT            8
+#define IXGBE_FDIRCTRL_FLEX_SHIFT              16
+#define IXGBE_FDIRCTRL_SEARCHLIM               0x00800000
+#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT                24
+#define IXGBE_FDIRCTRL_FULL_THRESH_MASK                0xF0000000
+#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT       28
+
+#define IXGBE_FDIRTCPM_DPORTM_SHIFT            16
+#define IXGBE_FDIRUDPM_DPORTM_SHIFT            16
+#define IXGBE_FDIRIP6M_DIPM_SHIFT              16
+#define IXGBE_FDIRM_VLANID                     0x00000001
+#define IXGBE_FDIRM_VLANP                      0x00000002
+#define IXGBE_FDIRM_POOL                       0x00000004
+#define IXGBE_FDIRM_L4P                                0x00000008
+#define IXGBE_FDIRM_FLEX                       0x00000010
+#define IXGBE_FDIRM_DIPv6                      0x00000020
+
+#define IXGBE_FDIRFREE_FREE_MASK               0xFFFF
+#define IXGBE_FDIRFREE_FREE_SHIFT              0
+#define IXGBE_FDIRFREE_COLL_MASK               0x7FFF0000
+#define IXGBE_FDIRFREE_COLL_SHIFT              16
+#define IXGBE_FDIRLEN_MAXLEN_MASK              0x3F
+#define IXGBE_FDIRLEN_MAXLEN_SHIFT             0
+#define IXGBE_FDIRLEN_MAXHASH_MASK             0x7FFF0000
+#define IXGBE_FDIRLEN_MAXHASH_SHIFT            16
+#define IXGBE_FDIRUSTAT_ADD_MASK               0xFFFF
+#define IXGBE_FDIRUSTAT_ADD_SHIFT              0
+#define IXGBE_FDIRUSTAT_REMOVE_MASK            0xFFFF0000
+#define IXGBE_FDIRUSTAT_REMOVE_SHIFT           16
+#define IXGBE_FDIRFSTAT_FADD_MASK              0x00FF
+#define IXGBE_FDIRFSTAT_FADD_SHIFT             0
+#define IXGBE_FDIRFSTAT_FREMOVE_MASK           0xFF00
+#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT          8
+#define IXGBE_FDIRPORT_DESTINATION_SHIFT       16
+#define IXGBE_FDIRVLAN_FLEX_SHIFT              16
+#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT      15
+#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT      16
+
+#define IXGBE_FDIRCMD_CMD_MASK                 0x00000003
+#define IXGBE_FDIRCMD_CMD_ADD_FLOW             0x00000001
+#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW          0x00000002
+#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT       0x00000003
+#define IXGBE_FDIRCMD_FILTER_VALID             0x00000004
+#define IXGBE_FDIRCMD_FILTER_UPDATE            0x00000008
+#define IXGBE_FDIRCMD_IPv6DMATCH               0x00000010
+#define IXGBE_FDIRCMD_L4TYPE_UDP               0x00000020
+#define IXGBE_FDIRCMD_L4TYPE_TCP               0x00000040
+#define IXGBE_FDIRCMD_L4TYPE_SCTP              0x00000060
+#define IXGBE_FDIRCMD_IPV6                     0x00000080
+#define IXGBE_FDIRCMD_CLEARHT                  0x00000100
+#define IXGBE_FDIRCMD_DROP                     0x00000200
+#define IXGBE_FDIRCMD_INT                      0x00000400
+#define IXGBE_FDIRCMD_LAST                     0x00000800
+#define IXGBE_FDIRCMD_COLLISION                        0x00001000
+#define IXGBE_FDIRCMD_QUEUE_EN                 0x00008000
+#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT          5
+#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT           16
+#define IXGBE_FDIRCMD_VT_POOL_SHIFT            24
+#define IXGBE_FDIR_INIT_DONE_POLL              10
+#define IXGBE_FDIRCMD_CMD_POLL                 10
+
+#define IXGBE_FDIR_DROP_QUEUE                  127
+
+#define IXGBE_STATUS_OVERHEATING_BIT           20 /* STATUS overtemp bit num */
 
 /* Manageablility Host Interface defines */
-#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH       1792 /* Num of bytes in range */
-#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH      448 /* Num of dwords in range */
-#define IXGBE_HI_COMMAND_TIMEOUT             500 /* Process HI command limit */
+#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
+#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH        448 /* Num of dwords in range */
+#define IXGBE_HI_COMMAND_TIMEOUT       500 /* Process HI command limit */
 
 /* CEM Support */
-#define FW_CEM_HDR_LEN                0x4
-#define FW_CEM_CMD_DRIVER_INFO        0xDD
-#define FW_CEM_CMD_DRIVER_INFO_LEN    0x5
-#define FW_CEM_CMD_RESERVED           0x0
-#define FW_CEM_UNUSED_VER             0x0
-#define FW_CEM_MAX_RETRIES            3
-#define FW_CEM_RESP_STATUS_SUCCESS    0x1
+#define FW_CEM_HDR_LEN                 0x4
+#define FW_CEM_CMD_DRIVER_INFO         0xDD
+#define FW_CEM_CMD_DRIVER_INFO_LEN     0x5
+#define FW_CEM_CMD_RESERVED            0X0
+#define FW_CEM_UNUSED_VER              0x0
+#define FW_CEM_MAX_RETRIES             3
+#define FW_CEM_RESP_STATUS_SUCCESS     0x1
 
 /* Host Interface Command Structures */
+
 struct ixgbe_hic_hdr {
        u8 cmd;
        u8 buf_len;
@@ -2189,20 +2440,51 @@ struct ixgbe_hic_drv_info {
        u16 pad2; /* end spacing to ensure length is mult. of dword2 */
 };
 
+/* Transmit Descriptor - Legacy */
+struct ixgbe_legacy_tx_desc {
+       u64 buffer_addr; /* Address of the descriptor's data buffer */
+       union {
+               __le32 data;
+               struct {
+                       __le16 length; /* Data buffer length */
+                       u8 cso; /* Checksum offset */
+                       u8 cmd; /* Descriptor control */
+               } flags;
+       } lower;
+       union {
+               __le32 data;
+               struct {
+                       u8 status; /* Descriptor status */
+                       u8 css; /* Checksum start */
+                       __le16 vlan;
+               } fields;
+       } upper;
+};
+
 /* Transmit Descriptor - Advanced */
 union ixgbe_adv_tx_desc {
        struct {
-               __le64 buffer_addr;      /* Address of descriptor's data buf */
+               __le64 buffer_addr; /* Address of descriptor's data buf */
                __le32 cmd_type_len;
                __le32 olinfo_status;
        } read;
        struct {
-               __le64 rsvd;       /* Reserved */
+               __le64 rsvd; /* Reserved */
                __le32 nxtseq_seed;
                __le32 status;
        } wb;
 };
 
+/* Receive Descriptor - Legacy */
+struct ixgbe_legacy_rx_desc {
+       __le64 buffer_addr; /* Address of the descriptor's data buffer */
+       __le16 length; /* Length of data DMAed into data buffer */
+       __le16 csum; /* Packet checksum */
+       u8 status;   /* Descriptor status */
+       u8 errors;   /* Descriptor Errors */
+       __le16 vlan;
+};
+
 /* Receive Descriptor - Advanced */
 union ixgbe_adv_rx_desc {
        struct {
@@ -2243,100 +2525,103 @@ struct ixgbe_adv_tx_context_desc {
 };
 
 /* Adv Transmit Descriptor Config Masks */
-#define IXGBE_ADVTXD_DTALEN_MASK      0x0000FFFF /* Data buf length(bytes) */
-#define IXGBE_ADVTXD_MAC_LINKSEC      0x00040000 /* Insert LinkSec */
-#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK   0x000003FF /* IPSec SA index */
-#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK    0x000001FF /* IPSec ESP length */
-#define IXGBE_ADVTXD_DTYP_MASK  0x00F00000 /* DTYP mask */
-#define IXGBE_ADVTXD_DTYP_CTXT  0x00200000 /* Advanced Context Desc */
-#define IXGBE_ADVTXD_DTYP_DATA  0x00300000 /* Advanced Data Descriptor */
-#define IXGBE_ADVTXD_DCMD_EOP   IXGBE_TXD_CMD_EOP  /* End of Packet */
-#define IXGBE_ADVTXD_DCMD_IFCS  IXGBE_TXD_CMD_IFCS /* Insert FCS */
-#define IXGBE_ADVTXD_DCMD_RS    IXGBE_TXD_CMD_RS   /* Report Status */
-#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000    /* DDP hdr type or iSCSI */
-#define IXGBE_ADVTXD_DCMD_DEXT  IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
-#define IXGBE_ADVTXD_DCMD_VLE   IXGBE_TXD_CMD_VLE  /* VLAN pkt enable */
-#define IXGBE_ADVTXD_DCMD_TSE   0x80000000 /* TCP Seg enable */
-#define IXGBE_ADVTXD_STAT_DD    IXGBE_TXD_STAT_DD  /* Descriptor Done */
-#define IXGBE_ADVTXD_STAT_SN_CRC      0x00000002 /* NXTSEQ/SEED pres in WB */
-#define IXGBE_ADVTXD_STAT_RSV   0x0000000C /* STA Reserved */
-#define IXGBE_ADVTXD_IDX_SHIFT  4 /* Adv desc Index shift */
-#define IXGBE_ADVTXD_CC         0x00000080 /* Check Context */
-#define IXGBE_ADVTXD_POPTS_SHIFT      8  /* Adv desc POPTS shift */
-#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
-                                 IXGBE_ADVTXD_POPTS_SHIFT)
-#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
-                                 IXGBE_ADVTXD_POPTS_SHIFT)
-#define IXGBE_ADVTXD_POPTS_ISCO_1ST  0x00000000 /* 1st TSO of iSCSI PDU */
-#define IXGBE_ADVTXD_POPTS_ISCO_MDL  0x00000800 /* Middle TSO of iSCSI PDU */
-#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
-#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU */
-#define IXGBE_ADVTXD_POPTS_RSV       0x00002000 /* POPTS Reserved */
-#define IXGBE_ADVTXD_PAYLEN_SHIFT    14 /* Adv desc PAYLEN shift */
-#define IXGBE_ADVTXD_MACLEN_SHIFT    9  /* Adv ctxt desc mac len shift */
-#define IXGBE_ADVTXD_VLAN_SHIFT      16  /* Adv ctxt vlan tag shift */
-#define IXGBE_ADVTXD_TUCMD_IPV4      0x00000400  /* IP Packet Type: 1=IPv4 */
-#define IXGBE_ADVTXD_TUCMD_IPV6      0x00000000  /* IP Packet Type: 0=IPv6 */
-#define IXGBE_ADVTXD_TUCMD_L4T_UDP   0x00000000  /* L4 Packet TYPE of UDP */
-#define IXGBE_ADVTXD_TUCMD_L4T_TCP   0x00000800  /* L4 Packet TYPE of TCP */
-#define IXGBE_ADVTXD_TUCMD_L4T_SCTP  0x00001000  /* L4 Packet TYPE of SCTP */
-#define IXGBE_ADVTXD_TUCMD_MKRREQ    0x00002000 /*Req requires Markers and CRC*/
-#define IXGBE_ADVTXD_POPTS_IPSEC      0x00000400 /* IPSec offload request */
+#define IXGBE_ADVTXD_DTALEN_MASK       0x0000FFFF /* Data buf length(bytes) */
+#define IXGBE_ADVTXD_MAC_LINKSEC       0x00040000 /* Insert LinkSec */
+#define IXGBE_ADVTXD_MAC_TSTAMP                0x00080000 /* IEEE1588 time stamp */
+#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */
+#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK        0x000001FF /* IPSec ESP length */
+#define IXGBE_ADVTXD_DTYP_MASK         0x00F00000 /* DTYP mask */
+#define IXGBE_ADVTXD_DTYP_CTXT         0x00200000 /* Adv Context Desc */
+#define IXGBE_ADVTXD_DTYP_DATA         0x00300000 /* Adv Data Descriptor */
+#define IXGBE_ADVTXD_DCMD_EOP          IXGBE_TXD_CMD_EOP  /* End of Packet */
+#define IXGBE_ADVTXD_DCMD_IFCS         IXGBE_TXD_CMD_IFCS /* Insert FCS */
+#define IXGBE_ADVTXD_DCMD_RS           IXGBE_TXD_CMD_RS /* Report Status */
+#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI  0x10000000 /* DDP hdr type or iSCSI */
+#define IXGBE_ADVTXD_DCMD_DEXT         IXGBE_TXD_CMD_DEXT /* Desc ext 1=Adv */
+#define IXGBE_ADVTXD_DCMD_VLE          IXGBE_TXD_CMD_VLE  /* VLAN pkt enable */
+#define IXGBE_ADVTXD_DCMD_TSE          0x80000000 /* TCP Seg enable */
+#define IXGBE_ADVTXD_STAT_DD           IXGBE_TXD_STAT_DD  /* Descriptor Done */
+#define IXGBE_ADVTXD_STAT_SN_CRC       0x00000002 /* NXTSEQ/SEED pres in WB */
+#define IXGBE_ADVTXD_STAT_RSV          0x0000000C /* STA Reserved */
+#define IXGBE_ADVTXD_IDX_SHIFT         4 /* Adv desc Index shift */
+#define IXGBE_ADVTXD_CC                        0x00000080 /* Check Context */
+#define IXGBE_ADVTXD_POPTS_SHIFT       8  /* Adv desc POPTS shift */
+#define IXGBE_ADVTXD_POPTS_IXSM                (IXGBE_TXD_POPTS_IXSM << \
+                                        IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_TXSM                (IXGBE_TXD_POPTS_TXSM << \
+                                        IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_ISCO_1ST    0x00000000 /* 1st TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_MDL    0x00000800 /* Middle TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_LAST   0x00001000 /* Last TSO of iSCSI PDU */
+/* 1st&Last TSO-full iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_FULL   0x00001800
+#define IXGBE_ADVTXD_POPTS_RSV         0x00002000 /* POPTS Reserved */
+#define IXGBE_ADVTXD_PAYLEN_SHIFT      14 /* Adv desc PAYLEN shift */
+#define IXGBE_ADVTXD_MACLEN_SHIFT      9  /* Adv ctxt desc mac len shift */
+#define IXGBE_ADVTXD_VLAN_SHIFT                16  /* Adv ctxt vlan tag shift */
+#define IXGBE_ADVTXD_TUCMD_IPV4                0x00000400 /* IP Packet Type: 1=IPv4 */
+#define IXGBE_ADVTXD_TUCMD_IPV6                0x00000000 /* IP Packet Type: 0=IPv6 */
+#define IXGBE_ADVTXD_TUCMD_L4T_UDP     0x00000000 /* L4 Packet TYPE of UDP */
+#define IXGBE_ADVTXD_TUCMD_L4T_TCP     0x00000800 /* L4 Packet TYPE of TCP */
+#define IXGBE_ADVTXD_TUCMD_L4T_SCTP    0x00001000 /* L4 Packet TYPE of SCTP */
+#define IXGBE_ADVTXD_TUCMD_MKRREQ      0x00002000 /* req Markers and CRC */
+#define IXGBE_ADVTXD_POPTS_IPSEC       0x00000400 /* IPSec offload request */
 #define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
 #define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
-#define IXGBE_ADVTXT_TUCMD_FCOE      0x00008000       /* FCoE Frame Type */
-#define IXGBE_ADVTXD_FCOEF_EOF_MASK  (0x3 << 10)      /* FC EOF index */
-#define IXGBE_ADVTXD_FCOEF_SOF       ((1 << 2) << 10) /* FC SOF index */
-#define IXGBE_ADVTXD_FCOEF_PARINC    ((1 << 3) << 10) /* Rel_Off in F_CTL */
-#define IXGBE_ADVTXD_FCOEF_ORIE      ((1 << 4) << 10) /* Orientation: End */
-#define IXGBE_ADVTXD_FCOEF_ORIS      ((1 << 5) << 10) /* Orientation: Start */
-#define IXGBE_ADVTXD_FCOEF_EOF_N     (0x0 << 10)      /* 00: EOFn */
-#define IXGBE_ADVTXD_FCOEF_EOF_T     (0x1 << 10)      /* 01: EOFt */
-#define IXGBE_ADVTXD_FCOEF_EOF_NI    (0x2 << 10)      /* 10: EOFni */
-#define IXGBE_ADVTXD_FCOEF_EOF_A     (0x3 << 10)      /* 11: EOFa */
-#define IXGBE_ADVTXD_L4LEN_SHIFT     8  /* Adv ctxt L4LEN shift */
-#define IXGBE_ADVTXD_MSS_SHIFT       16  /* Adv ctxt MSS shift */
+#define IXGBE_ADVTXT_TUCMD_FCOE                0x00008000 /* FCoE Frame Type */
+#define IXGBE_ADVTXD_FCOEF_EOF_MASK    (0x3 << 10) /* FC EOF index */
+#define IXGBE_ADVTXD_FCOEF_SOF         ((1 << 2) << 10) /* FC SOF index */
+#define IXGBE_ADVTXD_FCOEF_PARINC      ((1 << 3) << 10) /* Rel_Off in F_CTL */
+#define IXGBE_ADVTXD_FCOEF_ORIE                ((1 << 4) << 10) /* Orientation End */
+#define IXGBE_ADVTXD_FCOEF_ORIS                ((1 << 5) << 10) /* Orientation Start */
+#define IXGBE_ADVTXD_FCOEF_EOF_N       (0x0 << 10) /* 00: EOFn */
+#define IXGBE_ADVTXD_FCOEF_EOF_T       (0x1 << 10) /* 01: EOFt */
+#define IXGBE_ADVTXD_FCOEF_EOF_NI      (0x2 << 10) /* 10: EOFni */
+#define IXGBE_ADVTXD_FCOEF_EOF_A       (0x3 << 10) /* 11: EOFa */
+#define IXGBE_ADVTXD_L4LEN_SHIFT       8  /* Adv ctxt L4LEN shift */
+#define IXGBE_ADVTXD_MSS_SHIFT         16  /* Adv ctxt MSS shift */
 
 /* Autonegotiation advertised speeds */
 typedef u32 ixgbe_autoneg_advertised;
 /* Link speed */
 typedef u32 ixgbe_link_speed;
-#define IXGBE_LINK_SPEED_UNKNOWN   0
-#define IXGBE_LINK_SPEED_100_FULL  0x0008
-#define IXGBE_LINK_SPEED_1GB_FULL  0x0020
-#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
-#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
-                                        IXGBE_LINK_SPEED_10GB_FULL)
-#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
-                                        IXGBE_LINK_SPEED_1GB_FULL | \
-                                        IXGBE_LINK_SPEED_10GB_FULL)
+#define IXGBE_LINK_SPEED_UNKNOWN       0
+#define IXGBE_LINK_SPEED_100_FULL      0x0008
+#define IXGBE_LINK_SPEED_1GB_FULL      0x0020
+#define IXGBE_LINK_SPEED_10GB_FULL     0x0080
+#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
+                                        IXGBE_LINK_SPEED_10GB_FULL)
+#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
+                                        IXGBE_LINK_SPEED_1GB_FULL | \
+                                        IXGBE_LINK_SPEED_10GB_FULL)
 
 
 /* Physical layer type */
 typedef u32 ixgbe_physical_layer;
-#define IXGBE_PHYSICAL_LAYER_UNKNOWN      0
-#define IXGBE_PHYSICAL_LAYER_10GBASE_T    0x0001
-#define IXGBE_PHYSICAL_LAYER_1000BASE_T   0x0002
-#define IXGBE_PHYSICAL_LAYER_100BASE_TX   0x0004
-#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU  0x0008
-#define IXGBE_PHYSICAL_LAYER_10GBASE_LR   0x0010
-#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM  0x0020
-#define IXGBE_PHYSICAL_LAYER_10GBASE_SR   0x0040
-#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4  0x0080
-#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4  0x0100
-#define IXGBE_PHYSICAL_LAYER_1000BASE_KX  0x0200
-#define IXGBE_PHYSICAL_LAYER_1000BASE_BX  0x0400
-#define IXGBE_PHYSICAL_LAYER_10GBASE_KR   0x0800
-#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
-#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
+#define IXGBE_PHYSICAL_LAYER_UNKNOWN           0
+#define IXGBE_PHYSICAL_LAYER_10GBASE_T         0x0001
+#define IXGBE_PHYSICAL_LAYER_1000BASE_T                0x0002
+#define IXGBE_PHYSICAL_LAYER_100BASE_TX                0x0004
+#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU       0x0008
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LR                0x0010
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM       0x0020
+#define IXGBE_PHYSICAL_LAYER_10GBASE_SR                0x0040
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4       0x0080
+#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4       0x0100
+#define IXGBE_PHYSICAL_LAYER_1000BASE_KX       0x0200
+#define IXGBE_PHYSICAL_LAYER_1000BASE_BX       0x0400
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KR                0x0800
+#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI      0x1000
+#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA     0x2000
+#define IXGBE_PHYSICAL_LAYER_1000BASE_SX       0x4000
 
 /* Flow Control Data Sheet defined values
  * Calculation and defines taken from 802.1bb Annex O
  */
 
 /* BitTimes (BT) conversion */
-#define IXGBE_BT2KB(BT) ((BT + 1023) / (8 * 1024))
-#define IXGBE_B2BT(BT) (BT * 8)
+#define IXGBE_BT2KB(BT)                ((BT + (8 * 1024 - 1)) / (8 * 1024))
+#define IXGBE_B2BT(BT)         (BT * 8)
 
 /* Calculate Delay to respond to PFC */
 #define IXGBE_PFC_D    672
@@ -2346,8 +2631,8 @@ typedef u32 ixgbe_physical_layer;
 #define IXGBE_CABLE_DO 5000 /* Delay Optical */
 
 /* Calculate Interface Delay X540 */
-#define IXGBE_PHY_DC   25600   /* Delay 10G BASET */
-#define IXGBE_MAC_DC   8192    /* Delay Copper XAUI interface */
+#define IXGBE_PHY_DC   25600 /* Delay 10G BASET */
+#define IXGBE_MAC_DC   8192  /* Delay Copper XAUI interface */
 #define IXGBE_XAUI_DC  (2 * 2048) /* Delay Copper Phy */
 
 #define IXGBE_ID_X540  (IXGBE_MAC_DC + IXGBE_XAUI_DC + IXGBE_PHY_DC)
@@ -2366,45 +2651,52 @@ typedef u32 ixgbe_physical_layer;
 #define IXGBE_PCI_DELAY        10000
 
 /* Calculate X540 delay value in bit times */
-#define IXGBE_FILL_RATE (36 / 25)
-
-#define IXGBE_DV_X540(LINK, TC) (IXGBE_FILL_RATE * \
-                                (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \
-                                (2 * IXGBE_CABLE_DC) + \
-                                (2 * IXGBE_ID_X540) + \
-                                IXGBE_HD + IXGBE_B2BT(TC)))
+#define IXGBE_DV_X540(_max_frame_link, _max_frame_tc) \
+                       ((36 * \
+                         (IXGBE_B2BT(_max_frame_link) + \
+                          IXGBE_PFC_D + \
+                          (2 * IXGBE_CABLE_DC) + \
+                          (2 * IXGBE_ID_X540) + \
+                          IXGBE_HD) / 25 + 1) + \
+                        2 * IXGBE_B2BT(_max_frame_tc))
 
 /* Calculate 82599, 82598 delay value in bit times */
-#define IXGBE_DV(LINK, TC) (IXGBE_FILL_RATE * \
-                           (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \
-                           (2 * IXGBE_CABLE_DC) + (2 * IXGBE_ID) + \
-                           IXGBE_HD + IXGBE_B2BT(TC)))
+#define IXGBE_DV(_max_frame_link, _max_frame_tc) \
+                       ((36 * \
+                         (IXGBE_B2BT(_max_frame_link) + \
+                          IXGBE_PFC_D + \
+                          (2 * IXGBE_CABLE_DC) + \
+                          (2 * IXGBE_ID) + \
+                          IXGBE_HD) / 25 + 1) + \
+                        2 * IXGBE_B2BT(_max_frame_tc))
 
 /* Calculate low threshold delay values */
-#define IXGBE_LOW_DV_X540(TC) (2 * IXGBE_B2BT(TC) + \
-                              (IXGBE_FILL_RATE * IXGBE_PCI_DELAY))
-#define IXGBE_LOW_DV(TC)      (2 * IXGBE_LOW_DV_X540(TC))
+#define IXGBE_LOW_DV_X540(_max_frame_tc) \
+                       (2 * IXGBE_B2BT(_max_frame_tc) + \
+                       (36 * IXGBE_PCI_DELAY / 25) + 1)
+#define IXGBE_LOW_DV(_max_frame_tc) \
+                       (2 * IXGBE_LOW_DV_X540(_max_frame_tc))
 
 /* Software ATR hash keys */
-#define IXGBE_ATR_BUCKET_HASH_KEY    0x3DAD14E2
-#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
+#define IXGBE_ATR_BUCKET_HASH_KEY      0x3DAD14E2
+#define IXGBE_ATR_SIGNATURE_HASH_KEY   0x174D3614
 
 /* Software ATR input stream values and masks */
-#define IXGBE_ATR_HASH_MASK     0x7fff
-#define IXGBE_ATR_L4TYPE_MASK      0x3
-#define IXGBE_ATR_L4TYPE_UDP       0x1
-#define IXGBE_ATR_L4TYPE_TCP       0x2
-#define IXGBE_ATR_L4TYPE_SCTP      0x3
-#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+#define IXGBE_ATR_HASH_MASK            0x7fff
+#define IXGBE_ATR_L4TYPE_MASK          0x3
+#define IXGBE_ATR_L4TYPE_UDP           0x1
+#define IXGBE_ATR_L4TYPE_TCP           0x2
+#define IXGBE_ATR_L4TYPE_SCTP          0x3
+#define IXGBE_ATR_L4TYPE_IPV6_MASK     0x4
 enum ixgbe_atr_flow_type {
-       IXGBE_ATR_FLOW_TYPE_IPV4   = 0x0,
-       IXGBE_ATR_FLOW_TYPE_UDPV4  = 0x1,
-       IXGBE_ATR_FLOW_TYPE_TCPV4  = 0x2,
-       IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
-       IXGBE_ATR_FLOW_TYPE_IPV6   = 0x4,
-       IXGBE_ATR_FLOW_TYPE_UDPV6  = 0x5,
-       IXGBE_ATR_FLOW_TYPE_TCPV6  = 0x6,
-       IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+       IXGBE_ATR_FLOW_TYPE_IPV4        = 0x0,
+       IXGBE_ATR_FLOW_TYPE_UDPV4       = 0x1,
+       IXGBE_ATR_FLOW_TYPE_TCPV4       = 0x2,
+       IXGBE_ATR_FLOW_TYPE_SCTPV4      = 0x3,
+       IXGBE_ATR_FLOW_TYPE_IPV6        = 0x4,
+       IXGBE_ATR_FLOW_TYPE_UDPV6       = 0x5,
+       IXGBE_ATR_FLOW_TYPE_TCPV6       = 0x6,
+       IXGBE_ATR_FLOW_TYPE_SCTPV6      = 0x7,
 };
 
 /* Flow Director ATR input struct. */
@@ -2412,19 +2704,19 @@ union ixgbe_atr_input {
        /*
         * Byte layout in order, all values with MSB first:
         *
-        * vm_pool    - 1 byte
-        * flow_type  - 1 byte
-        * vlan_id    - 2 bytes
-        * src_ip     - 16 bytes
-        * dst_ip     - 16 bytes
-        * src_port   - 2 bytes
-        * dst_port   - 2 bytes
-        * flex_bytes - 2 bytes
-        * bkt_hash   - 2 bytes
+        * vm_pool      - 1 byte
+        * flow_type    - 1 byte
+        * vlan_id      - 2 bytes
+        * src_ip       - 16 bytes
+        * dst_ip       - 16 bytes
+        * src_port     - 2 bytes
+        * dst_port     - 2 bytes
+        * flex_bytes   - 2 bytes
+        * bkt_hash     - 2 bytes
         */
        struct {
-               u8     vm_pool;
-               u8     flow_type;
+               u8 vm_pool;
+               u8 flow_type;
                __be16 vlan_id;
                __be32 dst_ip[4];
                __be32 src_ip[4];
@@ -2452,6 +2744,18 @@ union ixgbe_atr_hash_dword {
        __be32 dword;
 };
 
+
+/*
+ * Unavailable: The FCoE Boot Option ROM is not present in the flash.
+ * Disabled: Present; boot order is not set for any targets on the port.
+ * Enabled: Present; boot order is set for at least one target on the port.
+ */
+enum ixgbe_fcoe_boot_status {
+       ixgbe_fcoe_bootstatus_disabled = 0,
+       ixgbe_fcoe_bootstatus_enabled = 1,
+       ixgbe_fcoe_bootstatus_unavailable = 0xFFFF
+};
+
 enum ixgbe_eeprom_type {
        ixgbe_eeprom_uninitialized = 0,
        ixgbe_eeprom_spi,
@@ -2484,22 +2788,22 @@ enum ixgbe_phy_type {
        ixgbe_phy_sfp_ftl_active,
        ixgbe_phy_sfp_unknown,
        ixgbe_phy_sfp_intel,
-       ixgbe_phy_sfp_unsupported,
+       ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/
        ixgbe_phy_generic
 };
 
 /*
  * SFP+ module type IDs:
  *
- * ID   Module Type
+ * ID  Module Type
  * =============
- * 0    SFP_DA_CU
- * 1    SFP_SR
- * 2    SFP_LR
- * 3    SFP_DA_CU_CORE0 - 82599-specific
- * 4    SFP_DA_CU_CORE1 - 82599-specific
- * 5    SFP_SR/LR_CORE0 - 82599-specific
- * 6    SFP_SR/LR_CORE1 - 82599-specific
+ * 0   SFP_DA_CU
+ * 1   SFP_SR
+ * 2   SFP_LR
+ * 3   SFP_DA_CU_CORE0 - 82599-specific
+ * 4   SFP_DA_CU_CORE1 - 82599-specific
+ * 5   SFP_SR/LR_CORE0 - 82599-specific
+ * 6   SFP_SR/LR_CORE1 - 82599-specific
  */
 enum ixgbe_sfp_type {
        ixgbe_sfp_type_da_cu = 0,
@@ -2513,6 +2817,8 @@ enum ixgbe_sfp_type {
        ixgbe_sfp_type_da_act_lmt_core1 = 8,
        ixgbe_sfp_type_1g_cu_core0 = 9,
        ixgbe_sfp_type_1g_cu_core1 = 10,
+       ixgbe_sfp_type_1g_sx_core0 = 11,
+       ixgbe_sfp_type_1g_sx_core1 = 12,
        ixgbe_sfp_type_not_present = 0xFFFE,
        ixgbe_sfp_type_unknown = 0xFFFF
 };
@@ -2533,9 +2839,6 @@ enum ixgbe_fc_mode {
        ixgbe_fc_rx_pause,
        ixgbe_fc_tx_pause,
        ixgbe_fc_full,
-#ifdef CONFIG_DCB
-       ixgbe_fc_pfc,
-#endif
        ixgbe_fc_default
 };
 
@@ -2558,26 +2861,27 @@ enum ixgbe_bus_type {
 
 /* PCI bus speeds */
 enum ixgbe_bus_speed {
-       ixgbe_bus_speed_unknown = 0,
-       ixgbe_bus_speed_33      = 33,
-       ixgbe_bus_speed_66      = 66,
-       ixgbe_bus_speed_100     = 100,
-       ixgbe_bus_speed_120     = 120,
-       ixgbe_bus_speed_133     = 133,
-       ixgbe_bus_speed_2500    = 2500,
-       ixgbe_bus_speed_5000    = 5000,
+       ixgbe_bus_speed_unknown = 0,
+       ixgbe_bus_speed_33      = 33,
+       ixgbe_bus_speed_66      = 66,
+       ixgbe_bus_speed_100     = 100,
+       ixgbe_bus_speed_120     = 120,
+       ixgbe_bus_speed_133     = 133,
+       ixgbe_bus_speed_2500    = 2500,
+       ixgbe_bus_speed_5000    = 5000,
+       ixgbe_bus_speed_8000    = 8000,
        ixgbe_bus_speed_reserved
 };
 
 /* PCI bus widths */
 enum ixgbe_bus_width {
-       ixgbe_bus_width_unknown = 0,
-       ixgbe_bus_width_pcie_x1 = 1,
-       ixgbe_bus_width_pcie_x2 = 2,
-       ixgbe_bus_width_pcie_x4 = 4,
-       ixgbe_bus_width_pcie_x8 = 8,
-       ixgbe_bus_width_32      = 32,
-       ixgbe_bus_width_64      = 64,
+       ixgbe_bus_width_unknown = 0,
+       ixgbe_bus_width_pcie_x1 = 1,
+       ixgbe_bus_width_pcie_x2 = 2,
+       ixgbe_bus_width_pcie_x4 = 4,
+       ixgbe_bus_width_pcie_x8 = 8,
+       ixgbe_bus_width_32      = 32,
+       ixgbe_bus_width_64      = 64,
        ixgbe_bus_width_reserved
 };
 
@@ -2586,7 +2890,6 @@ struct ixgbe_addr_filter_info {
        u32 rar_used_count;
        u32 mta_in_use;
        u32 overflow_promisc;
-       bool uc_set_promisc;
        bool user_set_promisc;
 };
 
@@ -2602,8 +2905,8 @@ struct ixgbe_bus_info {
 
 /* Flow control parameters */
 struct ixgbe_fc_info {
-       u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */
-       u32 low_water; /* Flow Control Low-water */
+       u32 high_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl High-water */
+       u32 low_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl Low-water */
        u16 pause_time; /* Flow Control Pause timer */
        bool send_xon; /* Flow control send XON */
        bool strict_ieee; /* Strict IEEE mode */
@@ -2664,8 +2967,6 @@ struct ixgbe_hw_stats {
        u64 mptc;
        u64 bptc;
        u64 xec;
-       u64 rqsmr[16];
-       u64 tqsmr[8];
        u64 qprc[16];
        u64 qptc[16];
        u64 qbrc[16];
@@ -2679,6 +2980,7 @@ struct ixgbe_hw_stats {
        u64 fdirmatch;
        u64 fdirmiss;
        u64 fccrc;
+       u64 fclast;
        u64 fcoerpdc;
        u64 fcoeprc;
        u64 fcoeptc;
@@ -2686,6 +2988,8 @@ struct ixgbe_hw_stats {
        u64 fcoedwtc;
        u64 fcoe_noddp;
        u64 fcoe_noddp_ext_buff;
+       u64 ldpcec;
+       u64 pcrc8ec;
        u64 b2ospc;
        u64 b2ogprc;
        u64 o2bgptc;
@@ -2697,7 +3001,7 @@ struct ixgbe_hw;
 
 /* iterator type for walking multicast address lists */
 typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
-                                  u32 *vmdq);
+                                 u32 *vmdq);
 
 /* Function pointer table */
 struct ixgbe_eeprom_operations {
@@ -2720,17 +3024,19 @@ struct ixgbe_mac_operations {
        u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
        s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
        s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
+       s32 (*set_san_mac_addr)(struct ixgbe_hw *, u8 *);
        s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
        s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
+       s32 (*get_fcoe_boot_status)(struct ixgbe_hw *, u16 *);
        s32 (*stop_adapter)(struct ixgbe_hw *);
        s32 (*get_bus_info)(struct ixgbe_hw *);
        void (*set_lan_id)(struct ixgbe_hw *);
        s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
        s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
        s32 (*setup_sfp)(struct ixgbe_hw *);
-       s32 (*disable_rx_buff)(struct ixgbe_hw *);
-       s32 (*enable_rx_buff)(struct ixgbe_hw *);
        s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
+       s32 (*disable_sec_rx_path)(struct ixgbe_hw *);
+       s32 (*enable_sec_rx_path)(struct ixgbe_hw *);
        s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
        void (*release_swfw_sync)(struct ixgbe_hw *, u16);
 
@@ -2741,10 +3047,10 @@ struct ixgbe_mac_operations {
        s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
        s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
        s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
-                                    bool *);
+                                    bool *);
 
-       /* Packet Buffer Manipulation */
-       void (*set_rxpba)(struct ixgbe_hw *, int, u32, int);
+       /* Packet Buffer manipulation */
+       void (*setup_rxpba)(struct ixgbe_hw *, int, u32, int);
 
        /* LED */
        s32 (*led_on)(struct ixgbe_hw *, u32);
@@ -2754,24 +3060,33 @@ struct ixgbe_mac_operations {
 
        /* RAR, Multicast, VLAN */
        s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
+       s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *);
        s32 (*clear_rar)(struct ixgbe_hw *, u32);
+       s32 (*insert_mac_addr)(struct ixgbe_hw *, u8 *, u32);
        s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
+       s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32);
        s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
        s32 (*init_rx_addrs)(struct ixgbe_hw *);
-       s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
+       s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32,
+                                  ixgbe_mc_addr_itr);
+       s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
+                                  ixgbe_mc_addr_itr, bool clear);
        s32 (*enable_mc)(struct ixgbe_hw *);
        s32 (*disable_mc)(struct ixgbe_hw *);
        s32 (*clear_vfta)(struct ixgbe_hw *);
        s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+       s32 (*set_vlvf)(struct ixgbe_hw *, u32, u32, bool, bool *);
        s32 (*init_uta_tables)(struct ixgbe_hw *);
        void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
        void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
 
        /* Flow Control */
-       s32 (*fc_enable)(struct ixgbe_hw *, s32);
+       s32 (*fc_enable)(struct ixgbe_hw *);
 
        /* Manageability interface */
        s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
+       s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
+       s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
 };
 
 struct ixgbe_phy_operations {
@@ -2783,82 +3098,86 @@ struct ixgbe_phy_operations {
        s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
        s32 (*setup_link)(struct ixgbe_hw *);
        s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
-                               bool);
+                               bool);
        s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
        s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
        s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
        s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
        s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
        s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
+       void (*i2c_bus_clear)(struct ixgbe_hw *);
        s32 (*check_overtemp)(struct ixgbe_hw *);
 };
 
 struct ixgbe_eeprom_info {
-       struct ixgbe_eeprom_operations  ops;
-       enum ixgbe_eeprom_type          type;
-       u32                             semaphore_delay;
-       u16                             word_size;
-       u16                             address_bits;
-       u16                             word_page_size;
+       struct ixgbe_eeprom_operations ops;
+       enum ixgbe_eeprom_type type;
+       u32 semaphore_delay;
+       u16 word_size;
+       u16 address_bits;
+       u16 word_page_size;
 };
 
 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED      0x01
 struct ixgbe_mac_info {
-       struct ixgbe_mac_operations     ops;
-       enum ixgbe_mac_type             type;
-       u8                              addr[ETH_ALEN];
-       u8                              perm_addr[ETH_ALEN];
-       u8                              san_addr[ETH_ALEN];
+       struct ixgbe_mac_operations ops;
+       enum ixgbe_mac_type type;
+       u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+       u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+       u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
        /* prefix for World Wide Node Name (WWNN) */
-       u16                             wwnn_prefix;
+       u16 wwnn_prefix;
        /* prefix for World Wide Port Name (WWPN) */
-       u16                             wwpn_prefix;
+       u16 wwpn_prefix;
 #define IXGBE_MAX_MTA                  128
-       u32                             mta_shadow[IXGBE_MAX_MTA];
-       s32                             mc_filter_type;
-       u32                             mcft_size;
-       u32                             vft_size;
-       u32                             num_rar_entries;
-       u32                             rar_highwater;
-       u32                             rx_pb_size;
-       u32                             max_tx_queues;
-       u32                             max_rx_queues;
-       u32                             max_msix_vectors;
-       u32                             orig_autoc;
-       u32                             orig_autoc2;
-       bool                            orig_link_settings_stored;
-       bool                            autotry_restart;
-       u8                              flags;
+       u32 mta_shadow[IXGBE_MAX_MTA];
+       s32 mc_filter_type;
+       u32 mcft_size;
+       u32 vft_size;
+       u32 num_rar_entries;
+       u32 rar_highwater;
+       u32 rx_pb_size;
+       u32 max_tx_queues;
+       u32 max_rx_queues;
+       u32 orig_autoc;
+       u8  san_mac_rar_index;
+       u32 orig_autoc2;
+       u16 max_msix_vectors;
+       bool arc_subsystem_valid;
+       bool orig_link_settings_stored;
+       bool autotry_restart;
+       u8 flags;
+       struct ixgbe_thermal_sensor_data  thermal_sensor_data;
 };
 
 struct ixgbe_phy_info {
-       struct ixgbe_phy_operations     ops;
-       struct mdio_if_info             mdio;
-       enum ixgbe_phy_type             type;
-       u32                             id;
-       enum ixgbe_sfp_type             sfp_type;
-       bool                            sfp_setup_needed;
-       u32                             revision;
-       enum ixgbe_media_type           media_type;
-       bool                            reset_disable;
-       ixgbe_autoneg_advertised        autoneg_advertised;
-       enum ixgbe_smart_speed          smart_speed;
-       bool                            smart_speed_active;
-       bool                            multispeed_fiber;
-       bool                            reset_if_overtemp;
+       struct ixgbe_phy_operations ops;
+       enum ixgbe_phy_type type;
+       u32 addr;
+       u32 id;
+       enum ixgbe_sfp_type sfp_type;
+       bool sfp_setup_needed;
+       u32 revision;
+       enum ixgbe_media_type media_type;
+       bool reset_disable;
+       ixgbe_autoneg_advertised autoneg_advertised;
+       enum ixgbe_smart_speed smart_speed;
+       bool smart_speed_active;
+       bool multispeed_fiber;
+       bool reset_if_overtemp;
 };
 
 #include "ixgbe_mbx.h"
 
 struct ixgbe_mbx_operations {
-       s32 (*init_params)(struct ixgbe_hw *hw);
-       s32 (*read)(struct ixgbe_hw *, u32 *, u16,  u16);
-       s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16);
-       s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16,  u16);
-       s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
-       s32 (*check_for_msg)(struct ixgbe_hw *, u16);
-       s32 (*check_for_ack)(struct ixgbe_hw *, u16);
-       s32 (*check_for_rst)(struct ixgbe_hw *, u16);
+       void (*init_params)(struct ixgbe_hw *hw);
+       s32  (*read)(struct ixgbe_hw *, u32 *, u16,  u16);
+       s32  (*write)(struct ixgbe_hw *, u32 *, u16, u16);
+       s32  (*read_posted)(struct ixgbe_hw *, u32 *, u16,  u16);
+       s32  (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+       s32  (*check_for_msg)(struct ixgbe_hw *, u16);
+       s32  (*check_for_ack)(struct ixgbe_hw *, u16);
+       s32  (*check_for_rst)(struct ixgbe_hw *, u16);
 };
 
 struct ixgbe_mbx_stats {
@@ -2874,75 +3193,72 @@ struct ixgbe_mbx_info {
        struct ixgbe_mbx_operations ops;
        struct ixgbe_mbx_stats stats;
        u32 timeout;
-       u32 usec_delay;
+       u32 udelay;
        u32 v2p_mailbox;
        u16 size;
 };
 
 struct ixgbe_hw {
-       u8 __iomem                      *hw_addr;
-       void                            *back;
-       struct ixgbe_mac_info           mac;
-       struct ixgbe_addr_filter_info   addr_ctrl;
-       struct ixgbe_fc_info            fc;
-       struct ixgbe_phy_info           phy;
-       struct ixgbe_eeprom_info        eeprom;
-       struct ixgbe_bus_info           bus;
-       struct ixgbe_mbx_info           mbx;
-       u16                             device_id;
-       u16                             vendor_id;
-       u16                             subsystem_device_id;
-       u16                             subsystem_vendor_id;
-       u8                              revision_id;
-       bool                            adapter_stopped;
-       bool                            force_full_reset;
-       bool                            allow_unsupported_sfp;
+       u8 __iomem *hw_addr;
+       void *back;
+       struct ixgbe_mac_info mac;
+       struct ixgbe_addr_filter_info addr_ctrl;
+       struct ixgbe_fc_info fc;
+       struct ixgbe_phy_info phy;
+       struct ixgbe_eeprom_info eeprom;
+       struct ixgbe_bus_info bus;
+       struct ixgbe_mbx_info mbx;
+       u16 device_id;
+       u16 vendor_id;
+       u16 subsystem_device_id;
+       u16 subsystem_vendor_id;
+       u8 revision_id;
+       bool adapter_stopped;
+       bool force_full_reset;
+       bool allow_unsupported_sfp;
 };
 
-struct ixgbe_info {
-       enum ixgbe_mac_type             mac;
-       s32                             (*get_invariants)(struct ixgbe_hw *);
-       struct ixgbe_mac_operations     *mac_ops;
-       struct ixgbe_eeprom_operations  *eeprom_ops;
-       struct ixgbe_phy_operations     *phy_ops;
-       struct ixgbe_mbx_operations     *mbx_ops;
-};
+#define ixgbe_call_func(hw, func, params, error) \
+               (func != NULL) ? func params : error
 
 
 /* Error Codes */
-#define IXGBE_ERR_EEPROM                        -1
-#define IXGBE_ERR_EEPROM_CHECKSUM               -2
-#define IXGBE_ERR_PHY                           -3
-#define IXGBE_ERR_CONFIG                        -4
-#define IXGBE_ERR_PARAM                         -5
-#define IXGBE_ERR_MAC_TYPE                      -6
-#define IXGBE_ERR_UNKNOWN_PHY                   -7
-#define IXGBE_ERR_LINK_SETUP                    -8
-#define IXGBE_ERR_ADAPTER_STOPPED               -9
-#define IXGBE_ERR_INVALID_MAC_ADDR              -10
-#define IXGBE_ERR_DEVICE_NOT_SUPPORTED          -11
-#define IXGBE_ERR_MASTER_REQUESTS_PENDING       -12
-#define IXGBE_ERR_INVALID_LINK_SETTINGS         -13
-#define IXGBE_ERR_AUTONEG_NOT_COMPLETE          -14
-#define IXGBE_ERR_RESET_FAILED                  -15
-#define IXGBE_ERR_SWFW_SYNC                     -16
-#define IXGBE_ERR_PHY_ADDR_INVALID              -17
-#define IXGBE_ERR_I2C                           -18
-#define IXGBE_ERR_SFP_NOT_SUPPORTED             -19
-#define IXGBE_ERR_SFP_NOT_PRESENT               -20
-#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT       -21
-#define IXGBE_ERR_NO_SAN_ADDR_PTR               -22
-#define IXGBE_ERR_FDIR_REINIT_FAILED            -23
-#define IXGBE_ERR_EEPROM_VERSION                -24
-#define IXGBE_ERR_NO_SPACE                      -25
-#define IXGBE_ERR_OVERTEMP                      -26
-#define IXGBE_ERR_FC_NOT_NEGOTIATED             -27
-#define IXGBE_ERR_FC_NOT_SUPPORTED              -28
-#define IXGBE_ERR_FLOW_CONTROL                  -29
-#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE        -30
-#define IXGBE_ERR_PBA_SECTION                   -31
-#define IXGBE_ERR_INVALID_ARGUMENT              -32
-#define IXGBE_ERR_HOST_INTERFACE_COMMAND        -33
-#define IXGBE_NOT_IMPLEMENTED                   0x7FFFFFFF
+#define IXGBE_ERR_EEPROM                       -1
+#define IXGBE_ERR_EEPROM_CHECKSUM              -2
+#define IXGBE_ERR_PHY                          -3
+#define IXGBE_ERR_CONFIG                       -4
+#define IXGBE_ERR_PARAM                                -5
+#define IXGBE_ERR_MAC_TYPE                     -6
+#define IXGBE_ERR_UNKNOWN_PHY                  -7
+#define IXGBE_ERR_LINK_SETUP                   -8
+#define IXGBE_ERR_ADAPTER_STOPPED              -9
+#define IXGBE_ERR_INVALID_MAC_ADDR             -10
+#define IXGBE_ERR_DEVICE_NOT_SUPPORTED         -11
+#define IXGBE_ERR_MASTER_REQUESTS_PENDING      -12
+#define IXGBE_ERR_INVALID_LINK_SETTINGS                -13
+#define IXGBE_ERR_AUTONEG_NOT_COMPLETE         -14
+#define IXGBE_ERR_RESET_FAILED                 -15
+#define IXGBE_ERR_SWFW_SYNC                    -16
+#define IXGBE_ERR_PHY_ADDR_INVALID             -17
+#define IXGBE_ERR_I2C                          -18
+#define IXGBE_ERR_SFP_NOT_SUPPORTED            -19
+#define IXGBE_ERR_SFP_NOT_PRESENT              -20
+#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT      -21
+#define IXGBE_ERR_NO_SAN_ADDR_PTR              -22
+#define IXGBE_ERR_FDIR_REINIT_FAILED           -23
+#define IXGBE_ERR_EEPROM_VERSION               -24
+#define IXGBE_ERR_NO_SPACE                     -25
+#define IXGBE_ERR_OVERTEMP                     -26
+#define IXGBE_ERR_FC_NOT_NEGOTIATED            -27
+#define IXGBE_ERR_FC_NOT_SUPPORTED             -28
+#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE       -30
+#define IXGBE_ERR_PBA_SECTION                  -31
+#define IXGBE_ERR_INVALID_ARGUMENT             -32
+#define IXGBE_ERR_HOST_INTERFACE_COMMAND       -33
+#define IXGBE_ERR_OUT_OF_MEM                   -34
+
+#define IXGBE_NOT_IMPLEMENTED                  0x7FFFFFFF
+
+#define UNREFERENCED_XPARAMETER
 
 #endif /* _IXGBE_TYPE_H_ */
index 1bbd6ea6e49c90eda2a4b4e5989602d6cedfe801..329289c789da17024878a49551bdac32a3dc158e 100644 (file)
 
 *******************************************************************************/
 
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-
-#include "ixgbe.h"
+#include "ixgbe_x540.h"
+#include "ixgbe_type.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
 #include "ixgbe_phy.h"
 
-#define IXGBE_X540_MAX_TX_QUEUES 128
-#define IXGBE_X540_MAX_RX_QUEUES 128
-#define IXGBE_X540_RAR_ENTRIES   128
-#define IXGBE_X540_MC_TBL_SIZE   128
-#define IXGBE_X540_VFT_TBL_SIZE  128
-#define IXGBE_X540_RX_PB_SIZE   384
-
 static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
 static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
-static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
-static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
 static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
 static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
 
-static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
-{
-       return ixgbe_media_type_copper;
-}
-
-static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
+/**
+ *  ixgbe_init_ops_X540 - Inits func ptrs and MAC type
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize the function pointers and assign the MAC type for X540.
+ *  Does not touch the hardware.
+ **/
+s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
 {
        struct ixgbe_mac_info *mac = &hw->mac;
+       struct ixgbe_phy_info *phy = &hw->phy;
+       struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+       s32 ret_val;
+
+       ret_val = ixgbe_init_phy_ops_generic(hw);
+       ret_val = ixgbe_init_ops_generic(hw);
+
+
+       /* EEPROM */
+       eeprom->ops.init_params = &ixgbe_init_eeprom_params_X540;
+       eeprom->ops.read = &ixgbe_read_eerd_X540;
+       eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_X540;
+       eeprom->ops.write = &ixgbe_write_eewr_X540;
+       eeprom->ops.write_buffer = &ixgbe_write_eewr_buffer_X540;
+       eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_X540;
+       eeprom->ops.validate_checksum = &ixgbe_validate_eeprom_checksum_X540;
+       eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_X540;
+
+       /* PHY */
+       phy->ops.init = &ixgbe_init_phy_ops_generic;
+       phy->ops.reset = NULL;
+
+       /* MAC */
+       mac->ops.reset_hw = &ixgbe_reset_hw_X540;
+       mac->ops.get_media_type = &ixgbe_get_media_type_X540;
+       mac->ops.get_supported_physical_layer =
+                                   &ixgbe_get_supported_physical_layer_X540;
+       mac->ops.read_analog_reg8 = NULL;
+       mac->ops.write_analog_reg8 = NULL;
+       mac->ops.start_hw = &ixgbe_start_hw_X540;
+       mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
+       mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
+       mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
+       mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
+       mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
+       mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540;
+       mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync_X540;
+       mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
+       mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
+
+       /* RAR, Multicast, VLAN */
+       mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
+       mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
+       mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
+       mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
+       mac->rar_highwater = 1;
+       mac->ops.set_vfta = &ixgbe_set_vfta_generic;
+       mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
+       mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
+       mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
+       mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
+       mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
+
+       /* Link */
+       mac->ops.get_link_capabilities =
+                               &ixgbe_get_copper_link_capabilities_generic;
+       mac->ops.setup_link = &ixgbe_setup_mac_link_X540;
+       mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
+       mac->ops.check_link = &ixgbe_check_mac_link_generic;
+
+       mac->mcft_size          = 128;
+       mac->vft_size           = 128;
+       mac->num_rar_entries    = 128;
+       mac->rx_pb_size         = 384;
+       mac->max_tx_queues      = 128;
+       mac->max_rx_queues      = 128;
+       mac->max_msix_vectors   = ixgbe_get_pcie_msix_count_generic(hw);
+
+       /*
+        * FWSM register
+        * ARC supported; valid only if manageability features are
+        * enabled.
+        */
+       mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
+                                  IXGBE_FWSM_MODE_MASK) ? true : false;
+
+       hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
 
-       /* Call PHY identify routine to get the phy type */
-       ixgbe_identify_phy_generic(hw);
+       /* LEDs */
+       mac->ops.blink_led_start = ixgbe_blink_led_start_X540;
+       mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540;
 
-       mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
-       mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
-       mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
-       mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
-       mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
-       mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
+       /* Manageability interface */
+       mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_get_link_capabilities_X540 - Determines link capabilities
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @autoneg: true when autoneg or autotry is enabled
+ *
+ *  Determines the link capabilities by reading the AUTOC register.
+ **/
+s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
+                                    ixgbe_link_speed *speed,
+                                    bool *autoneg)
+{
+       ixgbe_get_copper_link_capabilities_generic(hw, speed, autoneg);
 
        return 0;
 }
 
 /**
- *  ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires
+ *  ixgbe_get_media_type_X540 - Get media type
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the media type (fiber, copper, backplane)
+ **/
+enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
+{
+       return ixgbe_media_type_copper;
+}
+
+/**
+ *  ixgbe_setup_mac_link_X540 - Sets the auto advertised capabilities
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
  *  @autoneg: true if autonegotiation enabled
  *  @autoneg_wait_to_complete: true when waiting for completion is needed
  **/
-static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
-                                     ixgbe_link_speed speed, bool autoneg,
-                                     bool autoneg_wait_to_complete)
+s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
+                             ixgbe_link_speed speed, bool autoneg,
+                             bool autoneg_wait_to_complete)
 {
        return hw->phy.ops.setup_link_speed(hw, speed, autoneg,
-                                           autoneg_wait_to_complete);
+                                           autoneg_wait_to_complete);
 }
 
 /**
@@ -88,10 +182,9 @@ static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
  *  @hw: pointer to hardware structure
  *
  *  Resets the hardware by resetting the transmit and receive units, masks
- *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
- *  reset.
+ *  and clears all interrupts, and perform a reset.
  **/
-static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
+s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
 {
        s32 status;
        u32 ctrl, i;
@@ -145,7 +238,7 @@ mac_reset_top:
         * clear the multicast table.  Also reset num_rar_entries to 128,
         * since we modify this value when programming the SAN MAC address.
         */
-       hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES;
+       hw->mac.num_rar_entries = 128;
        hw->mac.ops.init_rx_addrs(hw);
 
        /* Store the permanent SAN mac address */
@@ -154,7 +247,10 @@ mac_reset_top:
        /* Add the SAN MAC address to the RAR only if it's a valid address */
        if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
                hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
-                                   hw->mac.san_addr, 0, IXGBE_RAH_AV);
+                                   hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+               /* Save the SAN MAC RAR index */
+               hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
 
                /* Reserve the last RAR for the SAN MAC address */
                hw->mac.num_rar_entries--;
@@ -162,7 +258,7 @@ mac_reset_top:
 
        /* Store the alternative WWNN/WWPN prefix */
        hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
-                                  &hw->mac.wwpn_prefix);
+                                  &hw->mac.wwpn_prefix);
 
 reset_hw_out:
        return status;
@@ -176,7 +272,7 @@ reset_hw_out:
  *  and the generation start_hw function.
  *  Then performs revision-specific operations, if any.
  **/
-static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
+s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
 {
        s32 ret_val = 0;
 
@@ -185,7 +281,7 @@ static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
                goto out;
 
        ret_val = ixgbe_start_hw_gen2(hw);
-       hw->mac.rx_pb_size = IXGBE_X540_RX_PB_SIZE;
+
 out:
        return ret_val;
 }
@@ -196,20 +292,18 @@ out:
  *
  *  Determines physical layer capabilities of the current configuration.
  **/
-static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
+u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
 {
        u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
        u16 ext_ability = 0;
 
-       hw->phy.ops.identify(hw);
-
-       hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
-                            &ext_ability);
-       if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
+       hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+       IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
+       if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
                physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
-       if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
+       if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
                physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
-       if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
+       if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
                physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
 
        return physical_layer;
@@ -222,7 +316,7 @@ static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
  *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
  *  ixgbe_hw struct in order to set up EEPROM access.
  **/
-static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
+s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
 {
        struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
        u32 eec;
@@ -234,12 +328,12 @@ static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
 
                eec = IXGBE_READ_REG(hw, IXGBE_EEC);
                eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
-                                   IXGBE_EEC_SIZE_SHIFT);
+                                   IXGBE_EEC_SIZE_SHIFT);
                eeprom->word_size = 1 << (eeprom_size +
-                                         IXGBE_EEPROM_WORD_SIZE_SHIFT);
+                                         IXGBE_EEPROM_WORD_SIZE_SHIFT);
 
                hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
-                      eeprom->type, eeprom->word_size);
+                         eeprom->type, eeprom->word_size);
        }
 
        return 0;
@@ -253,7 +347,7 @@ static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
  *
  *  Reads a 16 bit word from the EEPROM using the EERD register.
  **/
-static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
+s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
 {
        s32 status = 0;
 
@@ -268,7 +362,7 @@ static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
 }
 
 /**
- *  ixgbe_read_eerd_buffer_X540 - Read EEPROM word(s) using EERD
+ *  ixgbe_read_eerd_buffer_X540- Read EEPROM word(s) using EERD
  *  @hw: pointer to hardware structure
  *  @offset: offset of  word in the EEPROM to read
  *  @words: number of words
@@ -276,8 +370,8 @@ static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
  *
  *  Reads a 16 bit word(s) from the EEPROM using the EERD register.
  **/
-static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
-                                      u16 offset, u16 words, u16 *data)
+s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
+                               u16 offset, u16 words, u16 *data)
 {
        s32 status = 0;
 
@@ -300,11 +394,12 @@ static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
  *
  *  Write a 16 bit word to the EEPROM using the EEWR register.
  **/
-static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
+s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
 {
        s32 status = 0;
 
-       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0)
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+           0)
                status = ixgbe_write_eewr_generic(hw, offset, data);
        else
                status = IXGBE_ERR_SWFW_SYNC;
@@ -322,8 +417,8 @@ static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
  *
  *  Write a 16 bit word(s) to the EEPROM using the EEWR register.
  **/
-static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
-                                       u16 offset, u16 words, u16 *data)
+s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
+                                u16 offset, u16 words, u16 *data)
 {
        s32 status = 0;
 
@@ -346,7 +441,7 @@ static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
  *
  *  @hw: pointer to hardware structure
  **/
-static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
 {
        u16 i;
        u16 j;
@@ -388,7 +483,8 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
                    pointer >= hw->eeprom.word_size)
                        continue;
 
-               if (ixgbe_read_eerd_generic(hw, pointer, &length) != 0) {
+               if (ixgbe_read_eerd_generic(hw, pointer, &length) !=
+                   0) {
                        hw_dbg(hw, "EEPROM read failed\n");
                        break;
                }
@@ -399,7 +495,8 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
                        continue;
 
                for (j = pointer+1; j <= pointer+length; j++) {
-                       if (ixgbe_read_eerd_generic(hw, j, &word) != 0) {
+                       if (ixgbe_read_eerd_generic(hw, j, &word) !=
+                           0) {
                                hw_dbg(hw, "EEPROM read failed\n");
                                break;
                        }
@@ -420,8 +517,8 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
  *  Performs checksum calculation and validates the EEPROM checksum.  If the
  *  caller does not need checksum_val, the value can be NULL.
  **/
-static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
-                                              u16 *checksum_val)
+s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
+                                       u16 *checksum_val)
 {
        s32 status;
        u16 checksum;
@@ -439,13 +536,14 @@ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
                goto out;
        }
 
-       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+           0) {
                checksum = hw->eeprom.ops.calc_checksum(hw);
 
                /*
                 * Do not use hw->eeprom.ops.read because we do not want to take
                 * the synchronization semaphores twice here.
-                */
+               */
                ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
                                        &read_checksum);
 
@@ -476,7 +574,7 @@ out:
  * checksum and updates the EEPROM and instructs the hardware to update
  * the flash.
  **/
-static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
+s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
 {
        s32 status;
        u16 checksum;
@@ -491,13 +589,14 @@ static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
        if (status != 0)
                hw_dbg(hw, "EEPROM read failed\n");
 
-       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+           0) {
                checksum = hw->eeprom.ops.calc_checksum(hw);
 
                /*
                 * Do not use hw->eeprom.ops.write because we do not want to
                 * take the synchronization semaphores twice here.
-                */
+               */
                status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM,
                                                  checksum);
 
@@ -513,11 +612,11 @@ static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
 }
 
 /**
- * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
- * @hw: pointer to hardware structure
+ *  ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
+ *  @hw: pointer to hardware structure
  *
- * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
- * EEPROM from shadow RAM to the flash device.
+ *  Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
+ *  EEPROM from shadow RAM to the flash device.
  **/
 static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
 {
@@ -558,11 +657,11 @@ out:
 }
 
 /**
- * ixgbe_poll_flash_update_done_X540 - Poll flash update status
- * @hw: pointer to hardware structure
+ *  ixgbe_poll_flash_update_done_X540 - Poll flash update status
+ *  @hw: pointer to hardware structure
  *
- * Polls the FLUDONE (bit 26) of the EEC Register to determine when the
- * flash update is done.
+ *  Polls the FLUDONE (bit 26) of the EEC Register to determine when the
+ *  flash update is done.
  **/
 static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
 {
@@ -582,14 +681,14 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
 }
 
 /**
- * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
- * @hw: pointer to hardware structure
- * @mask: Mask to specify which semaphore to acquire
+ *  ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
+ *  @hw: pointer to hardware structure
+ *  @mask: Mask to specify which semaphore to acquire
  *
- * Acquires the SWFW semaphore thought the SW_FW_SYNC register for
- * the specified function (CSR, PHY0, PHY1, NVM, Flash)
+ *  Acquires the SWFW semaphore thought the SW_FW_SYNC register for
+ *  the specified function (CSR, PHY0, PHY1, NVM, Flash)
  **/
-static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
 {
        u32 swfw_sync;
        u32 swmask = mask;
@@ -597,67 +696,80 @@ static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
        u32 hwmask = 0;
        u32 timeout = 200;
        u32 i;
+       s32 ret_val = 0;
 
        if (swmask == IXGBE_GSSR_EEP_SM)
                hwmask = IXGBE_GSSR_FLASH_SM;
 
+       /* SW only mask doesn't have FW bit pair */
+       if (swmask == IXGBE_GSSR_SW_MNG_SM)
+               fwmask = 0;
+
        for (i = 0; i < timeout; i++) {
                /*
                 * SW NVM semaphore bit is used for access to all
                 * SW_FW_SYNC bits (not just NVM)
                 */
-               if (ixgbe_get_swfw_sync_semaphore(hw))
-                       return IXGBE_ERR_SWFW_SYNC;
+               if (ixgbe_get_swfw_sync_semaphore(hw)) {
+                       ret_val = IXGBE_ERR_SWFW_SYNC;
+                       goto out;
+               }
 
                swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
                if (!(swfw_sync & (fwmask | swmask | hwmask))) {
                        swfw_sync |= swmask;
                        IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
                        ixgbe_release_swfw_sync_semaphore(hw);
-                       break;
+                       msleep(5);
+                       goto out;
                } else {
                        /*
-                        * Firmware currently using resource (fwmask),
-                        * hardware currently using resource (hwmask),
-                        * or other software thread currently using
-                        * resource (swmask)
+                        * Firmware currently using resource (fwmask), hardware
+                        * currently using resource (hwmask), or other software
+                        * thread currently using resource (swmask)
                         */
                        ixgbe_release_swfw_sync_semaphore(hw);
-                       usleep_range(5000, 10000);
+                       msleep(5);
                }
        }
 
-       /*
-        * If the resource is not released by the FW/HW the SW can assume that
-        * the FW/HW malfunctions. In that case the SW should sets the
-        * SW bit(s) of the requested resource(s) while ignoring the
-        * corresponding FW/HW bits in the SW_FW_SYNC register.
-        */
-       if (i >= timeout) {
-               swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
-               if (swfw_sync & (fwmask | hwmask)) {
-                       if (ixgbe_get_swfw_sync_semaphore(hw))
-                               return IXGBE_ERR_SWFW_SYNC;
+       /* Failed to get SW only semaphore */
+       if (swmask == IXGBE_GSSR_SW_MNG_SM) {
+               ret_val = IXGBE_ERR_SWFW_SYNC;
+               goto out;
+       }
 
-                       swfw_sync |= swmask;
-                       IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
-                       ixgbe_release_swfw_sync_semaphore(hw);
+       /* If the resource is not released by the FW/HW the SW can assume that
+        * the FW/HW malfunctions. In that case the SW should sets the SW bit(s)
+        * of the requested resource(s) while ignoring the corresponding FW/HW
+        * bits in the SW_FW_SYNC register.
+        */
+       swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+       if (swfw_sync & (fwmask | hwmask)) {
+               if (ixgbe_get_swfw_sync_semaphore(hw)) {
+                       ret_val = IXGBE_ERR_SWFW_SYNC;
+                       goto out;
                }
+
+               swfw_sync |= swmask;
+               IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+               ixgbe_release_swfw_sync_semaphore(hw);
+               msleep(5);
        }
 
-       usleep_range(5000, 10000);
-       return 0;
+out:
+       return ret_val;
 }
 
 /**
- * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
- * @hw: pointer to hardware structure
- * @mask: Mask to specify which semaphore to release
+ *  ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
+ *  @hw: pointer to hardware structure
+ *  @mask: Mask to specify which semaphore to release
  *
- * Releases the SWFW semaphore through the SW_FW_SYNC register
- * for the specified function (CSR, PHY0, PHY1, EVM, Flash)
+ *  Releases the SWFW semaphore through the SW_FW_SYNC register
+ *  for the specified function (CSR, PHY0, PHY1, EVM, Flash)
  **/
-static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
 {
        u32 swfw_sync;
        u32 swmask = mask;
@@ -669,14 +781,14 @@ static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
        IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
 
        ixgbe_release_swfw_sync_semaphore(hw);
-       usleep_range(5000, 10000);
+       msleep(5);
 }
 
 /**
- * ixgbe_get_nvm_semaphore - Get hardware semaphore
- * @hw: pointer to hardware structure
+ *  ixgbe_get_nvm_semaphore - Get hardware semaphore
+ *  @hw: pointer to hardware structure
  *
- * Sets the hardware semaphores so SW/FW can gain control of shared resources
+ *  Sets the hardware semaphores so SW/FW can gain control of shared resources
  **/
 static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
 {
@@ -700,7 +812,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
        }
 
        /* Now get the semaphore between SW/FW through the REGSMP bit */
-       if (status) {
+       if (status == 0) {
                for (i = 0; i < timeout; i++) {
                        swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
                        if (!(swsm & IXGBE_SWFW_REGSMP))
@@ -708,23 +820,34 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
 
                        udelay(50);
                }
+
+               /*
+                * Release semaphores and return error if SW NVM semaphore
+                * was not granted because we don't have access to the EEPROM
+                */
+               if (i >= timeout) {
+                       hw_dbg(hw, "REGSMP Software NVM semaphore not "
+                                "granted.\n");
+                       ixgbe_release_swfw_sync_semaphore(hw);
+                       status = IXGBE_ERR_EEPROM;
+               }
        } else {
                hw_dbg(hw, "Software semaphore SMBI between device drivers "
-                          "not granted.\n");
+                        "not granted.\n");
        }
 
        return status;
 }
 
 /**
- * ixgbe_release_nvm_semaphore - Release hardware semaphore
- * @hw: pointer to hardware structure
+ *  ixgbe_release_nvm_semaphore - Release hardware semaphore
+ *  @hw: pointer to hardware structure
  *
- * This function clears hardware semaphore bits.
+ *  This function clears hardware semaphore bits.
  **/
 static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
 {
-        u32 swsm;
+       u32 swsm;
 
        /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
 
@@ -747,7 +870,7 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
  * Devices that implement the version 2 interface:
  *   X540
  **/
-static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
+s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
 {
        u32 macc_reg;
        u32 ledctl_reg;
@@ -783,7 +906,7 @@ static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
  * Devices that implement the version 2 interface:
  *   X540
  **/
-static s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
+s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
 {
        u32 macc_reg;
        u32 ledctl_reg;
@@ -803,87 +926,4 @@ static s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
 
        return 0;
 }
-static struct ixgbe_mac_operations mac_ops_X540 = {
-       .init_hw                = &ixgbe_init_hw_generic,
-       .reset_hw               = &ixgbe_reset_hw_X540,
-       .start_hw               = &ixgbe_start_hw_X540,
-       .clear_hw_cntrs         = &ixgbe_clear_hw_cntrs_generic,
-       .get_media_type         = &ixgbe_get_media_type_X540,
-       .get_supported_physical_layer =
-                                  &ixgbe_get_supported_physical_layer_X540,
-       .enable_rx_dma          = &ixgbe_enable_rx_dma_generic,
-       .get_mac_addr           = &ixgbe_get_mac_addr_generic,
-       .get_san_mac_addr       = &ixgbe_get_san_mac_addr_generic,
-       .get_device_caps        = &ixgbe_get_device_caps_generic,
-       .get_wwn_prefix         = &ixgbe_get_wwn_prefix_generic,
-       .stop_adapter           = &ixgbe_stop_adapter_generic,
-       .get_bus_info           = &ixgbe_get_bus_info_generic,
-       .set_lan_id             = &ixgbe_set_lan_id_multi_port_pcie,
-       .read_analog_reg8       = NULL,
-       .write_analog_reg8      = NULL,
-       .setup_link             = &ixgbe_setup_mac_link_X540,
-       .set_rxpba              = &ixgbe_set_rxpba_generic,
-       .check_link             = &ixgbe_check_mac_link_generic,
-       .get_link_capabilities  = &ixgbe_get_copper_link_capabilities_generic,
-       .led_on                 = &ixgbe_led_on_generic,
-       .led_off                = &ixgbe_led_off_generic,
-       .blink_led_start        = &ixgbe_blink_led_start_X540,
-       .blink_led_stop         = &ixgbe_blink_led_stop_X540,
-       .set_rar                = &ixgbe_set_rar_generic,
-       .clear_rar              = &ixgbe_clear_rar_generic,
-       .set_vmdq               = &ixgbe_set_vmdq_generic,
-       .clear_vmdq             = &ixgbe_clear_vmdq_generic,
-       .init_rx_addrs          = &ixgbe_init_rx_addrs_generic,
-       .update_mc_addr_list    = &ixgbe_update_mc_addr_list_generic,
-       .enable_mc              = &ixgbe_enable_mc_generic,
-       .disable_mc             = &ixgbe_disable_mc_generic,
-       .clear_vfta             = &ixgbe_clear_vfta_generic,
-       .set_vfta               = &ixgbe_set_vfta_generic,
-       .fc_enable              = &ixgbe_fc_enable_generic,
-       .set_fw_drv_ver         = &ixgbe_set_fw_drv_ver_generic,
-       .init_uta_tables        = &ixgbe_init_uta_tables_generic,
-       .setup_sfp              = NULL,
-       .set_mac_anti_spoofing  = &ixgbe_set_mac_anti_spoofing,
-       .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
-       .acquire_swfw_sync      = &ixgbe_acquire_swfw_sync_X540,
-       .release_swfw_sync      = &ixgbe_release_swfw_sync_X540,
-       .disable_rx_buff        = &ixgbe_disable_rx_buff_generic,
-       .enable_rx_buff         = &ixgbe_enable_rx_buff_generic,
-};
-
-static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
-       .init_params            = &ixgbe_init_eeprom_params_X540,
-       .read                   = &ixgbe_read_eerd_X540,
-       .read_buffer            = &ixgbe_read_eerd_buffer_X540,
-       .write                  = &ixgbe_write_eewr_X540,
-       .write_buffer           = &ixgbe_write_eewr_buffer_X540,
-       .calc_checksum          = &ixgbe_calc_eeprom_checksum_X540,
-       .validate_checksum      = &ixgbe_validate_eeprom_checksum_X540,
-       .update_checksum        = &ixgbe_update_eeprom_checksum_X540,
-};
-
-static struct ixgbe_phy_operations phy_ops_X540 = {
-       .identify               = &ixgbe_identify_phy_generic,
-       .identify_sfp           = &ixgbe_identify_sfp_module_generic,
-       .init                   = NULL,
-       .reset                  = NULL,
-       .read_reg               = &ixgbe_read_phy_reg_generic,
-       .write_reg              = &ixgbe_write_phy_reg_generic,
-       .setup_link             = &ixgbe_setup_phy_link_generic,
-       .setup_link_speed       = &ixgbe_setup_phy_link_speed_generic,
-       .read_i2c_byte          = &ixgbe_read_i2c_byte_generic,
-       .write_i2c_byte         = &ixgbe_write_i2c_byte_generic,
-       .read_i2c_eeprom        = &ixgbe_read_i2c_eeprom_generic,
-       .write_i2c_eeprom       = &ixgbe_write_i2c_eeprom_generic,
-       .check_overtemp         = &ixgbe_tn_check_overtemp,
-       .get_firmware_version   = &ixgbe_get_phy_firmware_version_generic,
-};
-
-struct ixgbe_info ixgbe_X540_info = {
-       .mac                    = ixgbe_mac_X540,
-       .get_invariants         = &ixgbe_get_invariants_X540,
-       .mac_ops                = &mac_ops_X540,
-       .eeprom_ops             = &eeprom_ops_X540,
-       .phy_ops                = &phy_ops_X540,
-       .mbx_ops                = &mbx_ops_generic,
-};
+
diff --git a/drivers/net/ixgbe/ixgbe_x540.h b/drivers/net/ixgbe/ixgbe_x540.h
new file mode 100644 (file)
index 0000000..77e8952
--- /dev/null
@@ -0,0 +1,58 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2012 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_X540_H_
+#define _IXGBE_X540_H_
+
+#include "ixgbe_type.h"
+
+s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
+                                    ixgbe_link_speed *speed, bool *autoneg);
+enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+                             bool autoneg, bool link_up_wait_to_complete);
+s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
+u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw);
+
+s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
+s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
+                               u16 *data);
+s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
+                                u16 *data);
+s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw);
+s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val);
+u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw);
+
+s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
+void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
+
+s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
+#endif /* _IXGBE_X540_H_ */
diff --git a/drivers/net/ixgbe/kcompat.c b/drivers/net/ixgbe/kcompat.c
new file mode 100644 (file)
index 0000000..b366648
--- /dev/null
@@ -0,0 +1,1200 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2012 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "ixgbe.h"
+#include "kcompat.h"
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
+/* From lib/vsprintf.c */
+#include <asm/div64.h>
+
+static int skip_atoi(const char **s)
+{
+       int i=0;
+
+       while (isdigit(**s))
+               i = i*10 + *((*s)++) - '0';
+       return i;
+}
+
+#define _kc_ZEROPAD    1               /* pad with zero */
+#define _kc_SIGN       2               /* unsigned/signed long */
+#define _kc_PLUS       4               /* show plus */
+#define _kc_SPACE      8               /* space if plus */
+#define _kc_LEFT       16              /* left justified */
+#define _kc_SPECIAL    32              /* 0x */
+#define _kc_LARGE      64              /* use 'ABCDEF' instead of 'abcdef' */
+
+static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type)
+{
+       char c,sign,tmp[66];
+       const char *digits;
+       const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz";
+       const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+       int i;
+
+       digits = (type & _kc_LARGE) ? large_digits : small_digits;
+       if (type & _kc_LEFT)
+               type &= ~_kc_ZEROPAD;
+       if (base < 2 || base > 36)
+               return 0;
+       c = (type & _kc_ZEROPAD) ? '0' : ' ';
+       sign = 0;
+       if (type & _kc_SIGN) {
+               if (num < 0) {
+                       sign = '-';
+                       num = -num;
+                       size--;
+               } else if (type & _kc_PLUS) {
+                       sign = '+';
+                       size--;
+               } else if (type & _kc_SPACE) {
+                       sign = ' ';
+                       size--;
+               }
+       }
+       if (type & _kc_SPECIAL) {
+               if (base == 16)
+                       size -= 2;
+               else if (base == 8)
+                       size--;
+       }
+       i = 0;
+       if (num == 0)
+               tmp[i++]='0';
+       else while (num != 0)
+               tmp[i++] = digits[do_div(num,base)];
+       if (i > precision)
+               precision = i;
+       size -= precision;
+       if (!(type&(_kc_ZEROPAD+_kc_LEFT))) {
+               while(size-->0) {
+                       if (buf <= end)
+                               *buf = ' ';
+                       ++buf;
+               }
+       }
+       if (sign) {
+               if (buf <= end)
+                       *buf = sign;
+               ++buf;
+       }
+       if (type & _kc_SPECIAL) {
+               if (base==8) {
+                       if (buf <= end)
+                               *buf = '0';
+                       ++buf;
+               } else if (base==16) {
+                       if (buf <= end)
+                               *buf = '0';
+                       ++buf;
+                       if (buf <= end)
+                               *buf = digits[33];
+                       ++buf;
+               }
+       }
+       if (!(type & _kc_LEFT)) {
+               while (size-- > 0) {
+                       if (buf <= end)
+                               *buf = c;
+                       ++buf;
+               }
+       }
+       while (i < precision--) {
+               if (buf <= end)
+                       *buf = '0';
+               ++buf;
+       }
+       while (i-- > 0) {
+               if (buf <= end)
+                       *buf = tmp[i];
+               ++buf;
+       }
+       while (size-- > 0) {
+               if (buf <= end)
+                       *buf = ' ';
+               ++buf;
+       }
+       return buf;
+}
+
+int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
+{
+       int len;
+       unsigned long long num;
+       int i, base;
+       char *str, *end, c;
+       const char *s;
+
+       int flags;              /* flags to number() */
+
+       int field_width;        /* width of output field */
+       int precision;          /* min. # of digits for integers; max
+                                  number of chars for from string */
+       int qualifier;          /* 'h', 'l', or 'L' for integer fields */
+                               /* 'z' support added 23/7/1999 S.H.    */
+                               /* 'z' changed to 'Z' --davidm 1/25/99 */
+
+       str = buf;
+       end = buf + size - 1;
+
+       if (end < buf - 1) {
+               end = ((void *) -1);
+               size = end - buf + 1;
+       }
+
+       for (; *fmt ; ++fmt) {
+               if (*fmt != '%') {
+                       if (str <= end)
+                               *str = *fmt;
+                       ++str;
+                       continue;
+               }
+
+               /* process flags */
+               flags = 0;
+               repeat:
+                       ++fmt;          /* this also skips first '%' */
+                       switch (*fmt) {
+                               case '-': flags |= _kc_LEFT; goto repeat;
+                               case '+': flags |= _kc_PLUS; goto repeat;
+                               case ' ': flags |= _kc_SPACE; goto repeat;
+                               case '#': flags |= _kc_SPECIAL; goto repeat;
+                               case '0': flags |= _kc_ZEROPAD; goto repeat;
+                       }
+
+               /* get field width */
+               field_width = -1;
+               if (isdigit(*fmt))
+                       field_width = skip_atoi(&fmt);
+               else if (*fmt == '*') {
+                       ++fmt;
+                       /* it's the next argument */
+                       field_width = va_arg(args, int);
+                       if (field_width < 0) {
+                               field_width = -field_width;
+                               flags |= _kc_LEFT;
+                       }
+               }
+
+               /* get the precision */
+               precision = -1;
+               if (*fmt == '.') {
+                       ++fmt;  
+                       if (isdigit(*fmt))
+                               precision = skip_atoi(&fmt);
+                       else if (*fmt == '*') {
+                               ++fmt;
+                               /* it's the next argument */
+                               precision = va_arg(args, int);
+                       }
+                       if (precision < 0)
+                               precision = 0;
+               }
+
+               /* get the conversion qualifier */
+               qualifier = -1;
+               if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') {
+                       qualifier = *fmt;
+                       ++fmt;
+               }
+
+               /* default base */
+               base = 10;
+
+               switch (*fmt) {
+                       case 'c':
+                               if (!(flags & _kc_LEFT)) {
+                                       while (--field_width > 0) {
+                                               if (str <= end)
+                                                       *str = ' ';
+                                               ++str;
+                                       }
+                               }
+                               c = (unsigned char) va_arg(args, int);
+                               if (str <= end)
+                                       *str = c;
+                               ++str;
+                               while (--field_width > 0) {
+                                       if (str <= end)
+                                               *str = ' ';
+                                       ++str;
+                               }
+                               continue;
+
+                       case 's':
+                               s = va_arg(args, char *);
+                               if (!s)
+                                       s = "<NULL>";
+
+                               len = strnlen(s, precision);
+
+                               if (!(flags & _kc_LEFT)) {
+                                       while (len < field_width--) {
+                                               if (str <= end)
+                                                       *str = ' ';
+                                               ++str;
+                                       }
+                               }
+                               for (i = 0; i < len; ++i) {
+                                       if (str <= end)
+                                               *str = *s;
+                                       ++str; ++s;
+                               }
+                               while (len < field_width--) {
+                                       if (str <= end)
+                                               *str = ' ';
+                                       ++str;
+                               }
+                               continue;
+
+                       case 'p':
+                               if (field_width == -1) {
+                                       field_width = 2*sizeof(void *);
+                                       flags |= _kc_ZEROPAD;
+                               }
+                               str = number(str, end,
+                                               (unsigned long) va_arg(args, void *),
+                                               16, field_width, precision, flags);
+                               continue;
+
+
+                       case 'n':
+                               /* FIXME:
+                               * What does C99 say about the overflow case here? */
+                               if (qualifier == 'l') {
+                                       long * ip = va_arg(args, long *);
+                                       *ip = (str - buf);
+                               } else if (qualifier == 'Z') {
+                                       size_t * ip = va_arg(args, size_t *);
+                                       *ip = (str - buf);
+                               } else {
+                                       int * ip = va_arg(args, int *);
+                                       *ip = (str - buf);
+                               }
+                               continue;
+
+                       case '%':
+                               if (str <= end)
+                                       *str = '%';
+                               ++str;
+                               continue;
+
+                               /* integer number formats - set up the flags and "break" */
+                       case 'o':
+                               base = 8;
+                               break;
+
+                       case 'X':
+                               flags |= _kc_LARGE;
+                       case 'x':
+                               base = 16;
+                               break;
+
+                       case 'd':
+                       case 'i':
+                               flags |= _kc_SIGN;
+                       case 'u':
+                               break;
+
+                       default:
+                               if (str <= end)
+                                       *str = '%';
+                               ++str;
+                               if (*fmt) {
+                                       if (str <= end)
+                                               *str = *fmt;
+                                       ++str;
+                               } else {
+                                       --fmt;
+                               }
+                               continue;
+               }
+               if (qualifier == 'L')
+                       num = va_arg(args, long long);
+               else if (qualifier == 'l') {
+                       num = va_arg(args, unsigned long);
+                       if (flags & _kc_SIGN)
+                               num = (signed long) num;
+               } else if (qualifier == 'Z') {
+                       num = va_arg(args, size_t);
+               } else if (qualifier == 'h') {
+                       num = (unsigned short) va_arg(args, int);
+                       if (flags & _kc_SIGN)
+                               num = (signed short) num;
+               } else {
+                       num = va_arg(args, unsigned int);
+                       if (flags & _kc_SIGN)
+                               num = (signed int) num;
+               }
+               str = number(str, end, num, base,
+                               field_width, precision, flags);
+       }
+       if (str <= end)
+               *str = '\0';
+       else if (size > 0)
+               /* don't write out a null byte if the buf size is zero */
+               *end = '\0';
+       /* the trailing null byte doesn't count towards the total
+       * ++str;
+       */
+       return str-buf;
+}
+
+int _kc_snprintf(char * buf, size_t size, const char *fmt, ...)
+{
+       va_list args;
+       int i;
+
+       va_start(args, fmt);
+       i = _kc_vsnprintf(buf,size,fmt,args);
+       va_end(args);
+       return i;
+}
+#endif /* < 2.4.8 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
+
+/**************************************/
+/* PCI DMA MAPPING */
+
+#if defined(CONFIG_HIGHMEM)
+
+#ifndef PCI_DRAM_OFFSET
+#define PCI_DRAM_OFFSET 0
+#endif
+
+u64
+_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
+                 size_t size, int direction)
+{
+       return (((u64) (page - mem_map) << PAGE_SHIFT) + offset +
+               PCI_DRAM_OFFSET);
+}
+
+#else /* CONFIG_HIGHMEM */
+
+u64
+_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
+                 size_t size, int direction)
+{
+       return pci_map_single(dev, (void *)page_address(page) + offset, size,
+                             direction);
+}
+
+#endif /* CONFIG_HIGHMEM */
+
+void
+_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size,
+                   int direction)
+{
+       return pci_unmap_single(dev, dma_addr, size, direction);
+}
+
+#endif /* 2.4.13 => 2.4.3 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
+
+/**************************************/
+/* PCI DRIVER API */
+
+int
+_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
+{
+       if (!pci_dma_supported(dev, mask))
+               return -EIO;
+       dev->dma_mask = mask;
+       return 0;
+}
+
+int
+_kc_pci_request_regions(struct pci_dev *dev, char *res_name)
+{
+       int i;
+
+       for (i = 0; i < 6; i++) {
+               if (pci_resource_len(dev, i) == 0)
+                       continue;
+
+               if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
+                       if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
+                               pci_release_regions(dev);
+                               return -EBUSY;
+                       }
+               } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
+                       if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
+                               pci_release_regions(dev);
+                               return -EBUSY;
+                       }
+               }
+       }
+       return 0;
+}
+
+void
+_kc_pci_release_regions(struct pci_dev *dev)
+{
+       int i;
+
+       for (i = 0; i < 6; i++) {
+               if (pci_resource_len(dev, i) == 0)
+                       continue;
+
+               if (pci_resource_flags(dev, i) & IORESOURCE_IO)
+                       release_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
+
+               else if (pci_resource_flags(dev, i) & IORESOURCE_MEM)
+                       release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
+       }
+}
+
+/**************************************/
+/* NETWORK DRIVER API */
+
+struct net_device *
+_kc_alloc_etherdev(int sizeof_priv)
+{
+       struct net_device *dev;
+       int alloc_size;
+
+       alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31;
+       dev = kzalloc(alloc_size, GFP_KERNEL);
+       if (!dev)
+               return NULL;
+
+       if (sizeof_priv)
+               dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31);
+       dev->name[0] = '\0';
+       ether_setup(dev);
+
+       return dev;
+}
+
+int
+_kc_is_valid_ether_addr(u8 *addr)
+{
+       const char zaddr[6] = { 0, };
+
+       return !(addr[0] & 1) && memcmp(addr, zaddr, 6);
+}
+
+#endif /* 2.4.3 => 2.4.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
+
+int
+_kc_pci_set_power_state(struct pci_dev *dev, int state)
+{
+       return 0;
+}
+
+int
+_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
+{
+       return 0;
+}
+
+#endif /* 2.4.6 => 2.4.3 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page,
+                            int off, int size)
+{
+       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+       frag->page = page;
+       frag->page_offset = off;
+       frag->size = size;
+       skb_shinfo(skb)->nr_frags = i + 1;
+}
+
+/*
+ * Original Copyright:
+ * find_next_bit.c: fallback find next bit implementation
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+                            unsigned long offset)
+{
+       const unsigned long *p = addr + BITOP_WORD(offset);
+       unsigned long result = offset & ~(BITS_PER_LONG-1);
+       unsigned long tmp;
+
+       if (offset >= size)
+               return size;
+       size -= result;
+       offset %= BITS_PER_LONG;
+       if (offset) {
+               tmp = *(p++);
+               tmp &= (~0UL << offset);
+               if (size < BITS_PER_LONG)
+                       goto found_first;
+               if (tmp)
+                       goto found_middle;
+               size -= BITS_PER_LONG;
+               result += BITS_PER_LONG;
+       }
+       while (size & ~(BITS_PER_LONG-1)) {
+               if ((tmp = *(p++)))
+                       goto found_middle;
+               result += BITS_PER_LONG;
+               size -= BITS_PER_LONG;
+       }
+       if (!size)
+               return result;
+       tmp = *p;
+
+found_first:
+       tmp &= (~0UL >> (BITS_PER_LONG - size));
+       if (tmp == 0UL)         /* Are any bits set? */
+               return result + size;   /* Nope. */
+found_middle:
+       return result + ffs(tmp);
+}
+
+size_t _kc_strlcpy(char *dest, const char *src, size_t size)
+{
+       size_t ret = strlen(src);
+
+       if (size) {
+               size_t len = (ret >= size) ? size - 1 : ret;
+               memcpy(dest, src, len);
+               dest[len] = '\0';
+       }
+       return ret;
+}
+
+#endif /* 2.6.0 => 2.4.6 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
+int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...)
+{
+       va_list args;
+       int i;
+
+       va_start(args, fmt);
+       i = vsnprintf(buf, size, fmt, args);
+       va_end(args);
+       return (i >= size) ? (size - 1) : i;
+}
+#endif /* < 2.6.4 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
+DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1};
+#endif /* < 2.6.10 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
+char *_kc_kstrdup(const char *s, unsigned int gfp)
+{
+       size_t len;
+       char *buf;
+
+       if (!s)
+               return NULL;
+
+       len = strlen(s) + 1;
+       buf = kmalloc(len, gfp);
+       if (buf)
+               memcpy(buf, s, len);
+       return buf;
+}
+#endif /* < 2.6.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
+void *_kc_kzalloc(size_t size, int flags)
+{
+       void *ret = kmalloc(size, flags);
+       if (ret)
+               memset(ret, 0, size);
+       return ret;
+}
+#endif /* <= 2.6.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
+int _kc_skb_pad(struct sk_buff *skb, int pad)
+{
+       int ntail;
+        
+        /* If the skbuff is non linear tailroom is always zero.. */
+        if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
+               memset(skb->data+skb->len, 0, pad);
+               return 0;
+        }
+        
+       ntail = skb->data_len + pad - (skb->end - skb->tail);
+       if (likely(skb_cloned(skb) || ntail > 0)) {
+               if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC));
+                       goto free_skb;
+       }
+
+#ifdef MAX_SKB_FRAGS
+       if (skb_is_nonlinear(skb) &&
+           !__pskb_pull_tail(skb, skb->data_len))
+               goto free_skb;
+
+#endif
+       memset(skb->data + skb->len, 0, pad);
+        return 0;
+
+free_skb:
+       kfree_skb(skb);
+       return -ENOMEM;
+} 
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
+int _kc_pci_save_state(struct pci_dev *pdev)
+{
+       struct adapter_struct *adapter = pci_get_drvdata(pdev);
+       int size = PCI_CONFIG_SPACE_LEN, i;
+       u16 pcie_cap_offset, pcie_link_status;
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
+       /* no ->dev for 2.4 kernels */
+       WARN_ON(pdev->dev.driver_data == NULL);
+#endif
+       pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+       if (pcie_cap_offset) {
+               if (!pci_read_config_word(pdev,
+                                         pcie_cap_offset + PCIE_LINK_STATUS,
+                                         &pcie_link_status))
+               size = PCIE_CONFIG_SPACE_LEN;
+       }
+       pci_config_space_ich8lan();
+#ifdef HAVE_PCI_ERS
+       if (adapter->config_space == NULL)
+#else
+       WARN_ON(adapter->config_space != NULL);
+#endif
+               adapter->config_space = kmalloc(size, GFP_KERNEL);
+       if (!adapter->config_space) {
+               printk(KERN_ERR "Out of memory in pci_save_state\n");
+               return -ENOMEM;
+       }
+       for (i = 0; i < (size / 4); i++)
+               pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]);
+       return 0;
+}
+
+void _kc_pci_restore_state(struct pci_dev *pdev)
+{
+       struct adapter_struct *adapter = pci_get_drvdata(pdev);
+       int size = PCI_CONFIG_SPACE_LEN, i;
+       u16 pcie_cap_offset;
+       u16 pcie_link_status;
+
+       if (adapter->config_space != NULL) {
+               pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+               if (pcie_cap_offset &&
+                   !pci_read_config_word(pdev,
+                                         pcie_cap_offset + PCIE_LINK_STATUS,
+                                         &pcie_link_status))
+                       size = PCIE_CONFIG_SPACE_LEN;
+
+               pci_config_space_ich8lan();
+               for (i = 0; i < (size / 4); i++)
+               pci_write_config_dword(pdev, i * 4, adapter->config_space[i]);
+#ifndef HAVE_PCI_ERS
+               kfree(adapter->config_space);
+               adapter->config_space = NULL;
+#endif
+       }
+}
+#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
+
+#ifdef HAVE_PCI_ERS
+void _kc_free_netdev(struct net_device *netdev)
+{
+       struct adapter_struct *adapter = netdev_priv(netdev);
+
+       if (adapter->config_space != NULL)
+               kfree(adapter->config_space);
+#ifdef CONFIG_SYSFS
+       if (netdev->reg_state == NETREG_UNINITIALIZED) {
+               kfree((char *)netdev - netdev->padded);
+       } else {
+               BUG_ON(netdev->reg_state != NETREG_UNREGISTERED);
+               netdev->reg_state = NETREG_RELEASED;
+               class_device_put(&netdev->class_dev);
+       }
+#else
+       kfree((char *)netdev - netdev->padded);
+#endif
+}
+#endif
+
+void *_kc_kmemdup(const void *src, size_t len, unsigned gfp)
+{
+       void *p;
+
+       p = kzalloc(len, gfp);
+       if (p)
+               memcpy(p, src, len);
+       return p;
+}
+#endif /* <= 2.6.19 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
+/* hexdump code taken from lib/hexdump.c */
+static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
+                       int groupsize, unsigned char *linebuf,
+                       size_t linebuflen, bool ascii)
+{
+       const u8 *ptr = buf;
+       u8 ch;
+       int j, lx = 0;
+       int ascii_column;
+
+       if (rowsize != 16 && rowsize != 32)
+               rowsize = 16;
+
+       if (!len)
+               goto nil;
+       if (len > rowsize)              /* limit to one line at a time */
+               len = rowsize;
+       if ((len % groupsize) != 0)     /* no mixed size output */
+               groupsize = 1;
+
+       switch (groupsize) {
+       case 8: {
+               const u64 *ptr8 = buf;
+               int ngroups = len / groupsize;
+
+               for (j = 0; j < ngroups; j++)
+                       lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
+                               "%s%16.16llx", j ? " " : "",
+                               (unsigned long long)*(ptr8 + j));
+               ascii_column = 17 * ngroups + 2;
+               break;
+       }
+
+       case 4: {
+               const u32 *ptr4 = buf;
+               int ngroups = len / groupsize;
+
+               for (j = 0; j < ngroups; j++)
+                       lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
+                               "%s%8.8x", j ? " " : "", *(ptr4 + j));
+               ascii_column = 9 * ngroups + 2;
+               break;
+       }
+
+       case 2: {
+               const u16 *ptr2 = buf;
+               int ngroups = len / groupsize;
+
+               for (j = 0; j < ngroups; j++)
+                       lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
+                               "%s%4.4x", j ? " " : "", *(ptr2 + j));
+               ascii_column = 5 * ngroups + 2;
+               break;
+       }
+
+       default:
+               for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
+                       ch = ptr[j];
+                       linebuf[lx++] = hex_asc(ch >> 4);
+                       linebuf[lx++] = hex_asc(ch & 0x0f);
+                       linebuf[lx++] = ' ';
+               }
+               if (j)
+                       lx--;
+
+               ascii_column = 3 * rowsize + 2;
+               break;
+       }
+       if (!ascii)
+               goto nil;
+
+       while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
+               linebuf[lx++] = ' ';
+       for (j = 0; (j < len) && (lx + 2) < linebuflen; j++)
+               linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j]
+                               : '.';
+nil:
+       linebuf[lx++] = '\0';
+}
+
+void _kc_print_hex_dump(const char *level,
+                       const char *prefix_str, int prefix_type,
+                       int rowsize, int groupsize,
+                       const void *buf, size_t len, bool ascii)
+{
+       const u8 *ptr = buf;
+       int i, linelen, remaining = len;
+       unsigned char linebuf[200];
+
+       if (rowsize != 16 && rowsize != 32)
+               rowsize = 16;
+
+       for (i = 0; i < len; i += rowsize) {
+               linelen = min(remaining, rowsize);
+               remaining -= rowsize;
+               _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
+                               linebuf, sizeof(linebuf), ascii);
+
+               switch (prefix_type) {
+               case DUMP_PREFIX_ADDRESS:
+                       printk("%s%s%*p: %s\n", level, prefix_str,
+                               (int)(2 * sizeof(void *)), ptr + i, linebuf);
+                       break;
+               case DUMP_PREFIX_OFFSET:
+                       printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
+                       break;
+               default:
+                       printk("%s%s%s\n", level, prefix_str, linebuf);
+                       break;
+               }
+       }
+}
+#endif /* < 2.6.22 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
+int ixgbe_dcb_netlink_register(void)
+{
+       return 0;
+}
+
+int ixgbe_dcb_netlink_unregister(void)
+{
+       return 0;
+}
+
+int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max)
+{
+       return 0;
+}
+#endif /* < 2.6.23 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
+#ifdef NAPI
+struct net_device *napi_to_poll_dev(struct napi_struct *napi)
+{
+       struct adapter_q_vector *q_vector = container_of(napi,
+                                                       struct adapter_q_vector,
+                                                       napi);
+       return &q_vector->poll_dev;
+}
+
+int __kc_adapter_clean(struct net_device *netdev, int *budget)
+{
+       int work_done;
+       int work_to_do = min(*budget, netdev->quota);
+       /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */
+       struct napi_struct *napi = netdev->priv;
+       work_done = napi->poll(napi, work_to_do);
+       *budget -= work_done;
+       netdev->quota -= work_done;
+       return (work_done >= work_to_do) ? 1 : 0;
+}
+#endif /* NAPI */
+#endif /* <= 2.6.24 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
+void _kc_pci_disable_link_state(struct pci_dev *pdev, int state)
+{
+       struct pci_dev *parent = pdev->bus->self;
+       u16 link_state;
+       int pos;
+
+       if (!parent)
+               return;
+
+       pos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+       if (pos) {
+               pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state);
+               link_state &= ~state;
+               pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state);
+       }
+}
+#endif /* < 2.6.26 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
+#ifdef HAVE_TX_MQ
+void _kc_netif_tx_stop_all_queues(struct net_device *netdev)
+{
+       struct adapter_struct *adapter = netdev_priv(netdev);
+       int i;
+
+       netif_stop_queue(netdev);
+       if (netif_is_multiqueue(netdev))
+               for (i = 0; i < adapter->num_tx_queues; i++)
+                       netif_stop_subqueue(netdev, i);
+}
+void _kc_netif_tx_wake_all_queues(struct net_device *netdev)
+{
+       struct adapter_struct *adapter = netdev_priv(netdev);
+       int i;
+
+       netif_wake_queue(netdev);
+       if (netif_is_multiqueue(netdev))
+               for (i = 0; i < adapter->num_tx_queues; i++)
+                       netif_wake_subqueue(netdev, i);
+}
+void _kc_netif_tx_start_all_queues(struct net_device *netdev)
+{
+       struct adapter_struct *adapter = netdev_priv(netdev);
+       int i;
+
+       netif_start_queue(netdev);
+       if (netif_is_multiqueue(netdev))
+               for (i = 0; i < adapter->num_tx_queues; i++)
+                       netif_start_subqueue(netdev, i);
+}
+#endif /* HAVE_TX_MQ */
+
+#ifndef __WARN_printf
+void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...)
+{
+       va_list args;
+
+       printk(KERN_WARNING "------------[ cut here ]------------\n");
+       printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file, line);
+       va_start(args, fmt);
+       vprintk(fmt, args);
+       va_end(args);
+
+       dump_stack();
+}
+#endif /* __WARN_printf */
+#endif /* < 2.6.27 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
+
+int
+_kc_pci_prepare_to_sleep(struct pci_dev *dev)
+{
+       pci_power_t target_state;
+       int error;
+
+       target_state = pci_choose_state(dev, PMSG_SUSPEND);
+
+       pci_enable_wake(dev, target_state, true);
+
+       error = pci_set_power_state(dev, target_state);
+
+       if (error)
+               pci_enable_wake(dev, target_state, false);
+
+       return error;
+}
+
+int
+_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable)
+{
+       int err;
+
+       err = pci_enable_wake(dev, PCI_D3cold, enable);
+       if (err)
+               goto out;
+
+       err = pci_enable_wake(dev, PCI_D3hot, enable);
+
+out:
+       return err;
+}
+
+void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
+                        int off, int size)
+{
+       skb_fill_page_desc(skb, i, page, off, size);
+       skb->len += size;
+       skb->data_len += size;
+       skb->truesize += size;
+}
+#endif /* < 2.6.28 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
+int _kc_pci_num_vf(struct pci_dev *dev)
+{
+       int num_vf = 0;
+#ifdef CONFIG_PCI_IOV
+       struct pci_dev *vfdev;
+
+       /* loop through all ethernet devices starting at PF dev */
+       vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL);
+       while (vfdev) {
+               if (vfdev->is_virtfn && vfdev->physfn == dev)
+                       num_vf++;
+
+               vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev);
+       }
+
+#endif
+       return num_vf;
+}
+#endif /* RHEL_RELEASE_CODE */
+#endif /* < 2.6.34 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
+#ifdef HAVE_TX_MQ
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
+#ifndef CONFIG_NETDEVICES_MULTIQUEUE
+void _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
+{
+       unsigned int real_num = dev->real_num_tx_queues;
+       struct Qdisc *qdisc;
+       int i;
+
+       if (unlikely(txq > dev->num_tx_queues))
+               ;
+       else if (txq > real_num)
+               dev->real_num_tx_queues = txq;
+       else if ( txq < real_num) {
+               dev->real_num_tx_queues = txq;
+               for (i = txq; i < dev->num_tx_queues; i++) {
+                       qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+                       if (qdisc) {
+                               spin_lock_bh(qdisc_lock(qdisc));        
+                               qdisc_reset(qdisc);
+                               spin_unlock_bh(qdisc_lock(qdisc));
+                       }
+               }
+       }
+}
+#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
+#endif /* HAVE_TX_MQ */
+#endif /* < 2.6.35 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
+static const u32 _kc_flags_dup_features =
+       (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH);
+
+u32 _kc_ethtool_op_get_flags(struct net_device *dev)
+{
+       return dev->features & _kc_flags_dup_features;
+}
+
+int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
+{
+       if (data & ~supported)
+               return -EINVAL;
+
+       dev->features = ((dev->features & ~_kc_flags_dup_features) |
+                        (data & _kc_flags_dup_features));
+       return 0;
+}
+#endif /* < 2.6.36 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) )
+#ifdef HAVE_NETDEV_SELECT_QUEUE
+#include <net/ip.h>
+static u32 _kc_simple_tx_hashrnd;
+static u32 _kc_simple_tx_hashrnd_initialized;
+
+u16 ___kc_skb_tx_hash(struct net_device *dev, const struct sk_buff *skb,
+                     u16 num_tx_queues)
+{
+       u32 hash;
+
+       if (skb_rx_queue_recorded(skb)) {
+               hash = skb_get_rx_queue(skb);
+               while (unlikely(hash >= num_tx_queues))
+                       hash -= num_tx_queues;
+               return hash;
+       }
+
+       if (unlikely(!_kc_simple_tx_hashrnd_initialized)) {
+               get_random_bytes(&_kc_simple_tx_hashrnd, 4);
+               _kc_simple_tx_hashrnd_initialized = 1;
+       }
+
+       if (skb->sk && skb->sk->sk_hash)
+               hash = skb->sk->sk_hash;
+       else
+#ifdef NETIF_F_RXHASH
+               hash = (__force u16) skb->protocol ^ skb->rxhash;
+#else
+               hash = skb->protocol;
+#endif
+
+       hash = jhash_1word(hash, _kc_simple_tx_hashrnd);
+
+       return (u16) (((u64) hash * num_tx_queues) >> 32);
+}
+#endif /* HAVE_NETDEV_SELECT_QUEUE */
+#endif /* < 2.6.38 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
+u8 _kc_netdev_get_num_tc(struct net_device *dev)
+{
+       struct adapter_struct *kc_adapter = netdev_priv(dev);
+       if (kc_adapter->flags & IXGBE_FLAG_DCB_ENABLED)
+               return kc_adapter->tc;
+       else
+               return 0;
+}
+
+int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc)
+{
+       struct adapter_struct *kc_adapter = netdev_priv(dev);
+
+       if (num_tc > IXGBE_DCB_MAX_TRAFFIC_CLASS)
+               return -EINVAL;
+
+       kc_adapter->tc = num_tc;
+
+       return 0;
+}
+
+u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up)
+{
+       struct adapter_struct *kc_adapter = netdev_priv(dev);
+
+       return ixgbe_dcb_get_tc_from_up(&kc_adapter->dcb_cfg, 0, up);
+}
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
+#endif /* < 2.6.39 */
diff --git a/drivers/net/ixgbe/kcompat.h b/drivers/net/ixgbe/kcompat.h
new file mode 100644 (file)
index 0000000..8e54a2d
--- /dev/null
@@ -0,0 +1,3155 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2012 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _KCOMPAT_H_
+#define _KCOMPAT_H_
+
+#ifndef LINUX_VERSION_CODE
+#include <linux/version.h>
+#else
+#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
+#endif
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/mii.h>
+#include <linux/vmalloc.h>
+#include <asm/io.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+
+/* NAPI enable/disable flags here */
+#define NAPI
+
+#define adapter_struct ixgbe_adapter
+#define adapter_q_vector ixgbe_q_vector
+
+/* and finally set defines so that the code sees the changes */
+#ifdef NAPI
+#else
+#endif /* NAPI */
+
+/* packet split disable/enable */
+#ifdef DISABLE_PACKET_SPLIT
+#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+#define CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+#endif
+#endif /* DISABLE_PACKET_SPLIT */
+
+/* MSI compatibility code for all kernels and drivers */
+#ifdef DISABLE_PCI_MSI
+#undef CONFIG_PCI_MSI
+#endif
+#ifndef CONFIG_PCI_MSI
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
+struct msix_entry {
+       u16 vector; /* kernel uses to write allocated vector */
+       u16 entry;  /* driver uses to specify entry, OS writes */
+};
+#endif
+#undef pci_enable_msi
+#define pci_enable_msi(a) -ENOTSUPP
+#undef pci_disable_msi
+#define pci_disable_msi(a) do {} while (0)
+#undef pci_enable_msix
+#define pci_enable_msix(a, b, c) -ENOTSUPP
+#undef pci_disable_msix
+#define pci_disable_msix(a) do {} while (0)
+#define msi_remove_pci_irq_vectors(a) do {} while (0)
+#endif /* CONFIG_PCI_MSI */
+#ifdef DISABLE_PM
+#undef CONFIG_PM
+#endif
+
+#ifdef DISABLE_NET_POLL_CONTROLLER
+#undef CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifndef PMSG_SUSPEND
+#define PMSG_SUSPEND 3
+#endif
+
+/* generic boolean compatibility */
+#undef TRUE
+#undef FALSE
+#define TRUE true
+#define FALSE false
+#ifdef GCC_VERSION
+#if ( GCC_VERSION < 3000 )
+#define _Bool char
+#endif
+#else
+#define _Bool char
+#endif
+
+/* kernels less than 2.4.14 don't have this */
+#ifndef ETH_P_8021Q
+#define ETH_P_8021Q 0x8100
+#endif
+
+#ifndef module_param
+#define module_param(v,t,p) MODULE_PARM(v, "i");
+#endif
+
+#ifndef DMA_64BIT_MASK
+#define DMA_64BIT_MASK  0xffffffffffffffffULL
+#endif
+
+#ifndef DMA_32BIT_MASK
+#define DMA_32BIT_MASK  0x00000000ffffffffULL
+#endif
+
+#ifndef PCI_CAP_ID_EXP
+#define PCI_CAP_ID_EXP 0x10
+#endif
+
+#ifndef PCIE_LINK_STATE_L0S
+#define PCIE_LINK_STATE_L0S 1
+#endif
+#ifndef PCIE_LINK_STATE_L1
+#define PCIE_LINK_STATE_L1 2
+#endif
+
+#ifndef mmiowb
+#ifdef CONFIG_IA64
+#define mmiowb() asm volatile ("mf.a" ::: "memory")
+#else
+#define mmiowb()
+#endif
+#endif
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev)
+#endif
+
+#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
+#define free_netdev(x) kfree(x)
+#endif
+
+#ifdef HAVE_POLL_CONTROLLER
+#define CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifndef SKB_DATAREF_SHIFT
+/* if we do not have the infrastructure to detect if skb_header is cloned
+   just return false in all cases */
+#define skb_header_cloned(x) 0
+#endif
+
+#ifndef NETIF_F_GSO
+#define gso_size tso_size
+#define gso_segs tso_segs
+#endif
+
+#ifndef NETIF_F_GRO
+#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \
+               vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan)
+#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb)
+#endif
+
+#ifndef NETIF_F_SCTP_CSUM
+#define NETIF_F_SCTP_CSUM 0
+#endif
+
+#ifndef NETIF_F_LRO
+#define NETIF_F_LRO (1 << 15)
+#endif
+
+#ifndef NETIF_F_NTUPLE
+#define NETIF_F_NTUPLE (1 << 27)
+#endif
+
+#ifndef IPPROTO_SCTP
+#define IPPROTO_SCTP 132
+#endif
+
+#ifndef CHECKSUM_PARTIAL
+#define CHECKSUM_PARTIAL CHECKSUM_HW
+#define CHECKSUM_COMPLETE CHECKSUM_HW
+#endif
+
+#ifndef __read_mostly
+#define __read_mostly
+#endif
+
+#ifndef MII_RESV1
+#define MII_RESV1              0x17            /* Reserved...          */
+#endif
+
+#ifndef unlikely
+#define unlikely(_x) _x
+#define likely(_x) _x
+#endif
+
+#ifndef WARN_ON
+#define WARN_ON(x)
+#endif
+
+#ifndef PCI_DEVICE
+#define PCI_DEVICE(vend,dev) \
+       .vendor = (vend), .device = (dev), \
+       .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+#endif
+
+#ifndef node_online
+#define node_online(node) ((node) == 0)
+#endif
+
+#ifndef num_online_cpus
+#define num_online_cpus() smp_num_cpus
+#endif
+
+#ifndef cpu_online
+#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map)
+#endif
+
+#ifndef _LINUX_RANDOM_H
+#include <linux/random.h>
+#endif
+
+#ifndef DECLARE_BITMAP
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
+#endif
+#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)]
+#endif
+
+#ifndef VLAN_HLEN
+#define VLAN_HLEN 4
+#endif
+
+#ifndef VLAN_ETH_HLEN
+#define VLAN_ETH_HLEN 18
+#endif
+
+#ifndef VLAN_ETH_FRAME_LEN
+#define VLAN_ETH_FRAME_LEN 1518
+#endif
+
+#if !defined(IXGBE_DCA) && !defined(IGB_DCA)
+#define dca_get_tag(b) 0
+#define dca_add_requester(a) -1
+#define dca_remove_requester(b) do { } while(0)
+#define DCA_PROVIDER_ADD     0x0001
+#define DCA_PROVIDER_REMOVE  0x0002
+#endif
+
+#ifndef DCA_GET_TAG_TWO_ARGS
+#define dca3_get_tag(a,b) dca_get_tag(b)
+#endif
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#if defined(__i386__) || defined(__x86_64__)
+#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#endif
+#endif
+
+/* taken from 2.6.24 definition in linux/kernel.h */
+#ifndef IS_ALIGNED
+#define IS_ALIGNED(x,a)         (((x) % ((typeof(x))(a))) == 0)
+#endif
+
+#ifndef NETIF_F_HW_VLAN_TX
+struct _kc_vlan_ethhdr {
+       unsigned char   h_dest[ETH_ALEN];
+       unsigned char   h_source[ETH_ALEN];
+       __be16          h_vlan_proto;
+       __be16          h_vlan_TCI;
+       __be16          h_vlan_encapsulated_proto;
+};
+#define vlan_ethhdr _kc_vlan_ethhdr
+struct _kc_vlan_hdr {
+       __be16          h_vlan_TCI;
+       __be16          h_vlan_encapsulated_proto;
+};
+#define vlan_hdr _kc_vlan_hdr
+#define vlan_tx_tag_present(_skb) 0
+#define vlan_tx_tag_get(_skb) 0
+#endif
+
+#ifndef VLAN_PRIO_SHIFT
+#define VLAN_PRIO_SHIFT 13
+#endif
+
+
+#ifndef __GFP_COLD
+#define __GFP_COLD 0
+#endif
+
+#ifndef __GFP_COMP
+#define __GFP_COMP 0
+#endif
+
+/*****************************************************************************/
+/* Installations with ethtool version without eeprom, adapter id, or statistics
+ * support */
+
+#ifndef ETH_GSTRING_LEN
+#define ETH_GSTRING_LEN 32
+#endif
+
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS 0x1d
+#undef ethtool_drvinfo
+#define ethtool_drvinfo k_ethtool_drvinfo
+struct k_ethtool_drvinfo {
+       u32 cmd;
+       char driver[32];
+       char version[32];
+       char fw_version[32];
+       char bus_info[32];
+       char reserved1[32];
+       char reserved2[16];
+       u32 n_stats;
+       u32 testinfo_len;
+       u32 eedump_len;
+       u32 regdump_len;
+};
+
+struct ethtool_stats {
+       u32 cmd;
+       u32 n_stats;
+       u64 data[0];
+};
+#endif /* ETHTOOL_GSTATS */
+
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID 0x1c
+#endif /* ETHTOOL_PHYS_ID */
+
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS 0x1b
+enum ethtool_stringset {
+       ETH_SS_TEST             = 0,
+       ETH_SS_STATS,
+};
+struct ethtool_gstrings {
+       u32 cmd;            /* ETHTOOL_GSTRINGS */
+       u32 string_set;     /* string set id e.c. ETH_SS_TEST, etc*/
+       u32 len;            /* number of strings in the string set */
+       u8 data[0];
+};
+#endif /* ETHTOOL_GSTRINGS */
+
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST 0x1a
+enum ethtool_test_flags {
+       ETH_TEST_FL_OFFLINE     = (1 << 0),
+       ETH_TEST_FL_FAILED      = (1 << 1),
+};
+struct ethtool_test {
+       u32 cmd;
+       u32 flags;
+       u32 reserved;
+       u32 len;
+       u64 data[0];
+};
+#endif /* ETHTOOL_TEST */
+
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM 0xb
+#undef ETHTOOL_GREGS
+struct ethtool_eeprom {
+       u32 cmd;
+       u32 magic;
+       u32 offset;
+       u32 len;
+       u8 data[0];
+};
+
+struct ethtool_value {
+       u32 cmd;
+       u32 data;
+};
+#endif /* ETHTOOL_GEEPROM */
+
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK 0xa
+#endif /* ETHTOOL_GLINK */
+
+#ifndef ETHTOOL_GWOL
+#define ETHTOOL_GWOL 0x5
+#define ETHTOOL_SWOL 0x6
+#define SOPASS_MAX      6
+struct ethtool_wolinfo {
+       u32 cmd;
+       u32 supported;
+       u32 wolopts;
+       u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */
+};
+#endif /* ETHTOOL_GWOL */
+
+#ifndef ETHTOOL_GREGS
+#define ETHTOOL_GREGS          0x00000004 /* Get NIC registers */
+#define ethtool_regs _kc_ethtool_regs
+/* for passing big chunks of data */
+struct _kc_ethtool_regs {
+       u32 cmd;
+       u32 version; /* driver-specific, indicates different chips/revs */
+       u32 len; /* bytes */
+       u8 data[0];
+};
+#endif /* ETHTOOL_GREGS */
+
+#ifndef ETHTOOL_GMSGLVL
+#define ETHTOOL_GMSGLVL                0x00000007 /* Get driver message level */
+#endif
+#ifndef ETHTOOL_SMSGLVL
+#define ETHTOOL_SMSGLVL                0x00000008 /* Set driver msg level, priv. */
+#endif
+#ifndef ETHTOOL_NWAY_RST
+#define ETHTOOL_NWAY_RST       0x00000009 /* Restart autonegotiation, priv */
+#endif
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK          0x0000000a /* Get link status */
+#endif
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM                0x0000000b /* Get EEPROM data */
+#endif
+#ifndef ETHTOOL_SEEPROM
+#define ETHTOOL_SEEPROM                0x0000000c /* Set EEPROM data */
+#endif
+#ifndef ETHTOOL_GCOALESCE
+#define ETHTOOL_GCOALESCE      0x0000000e /* Get coalesce config */
+/* for configuring coalescing parameters of chip */
+#define ethtool_coalesce _kc_ethtool_coalesce
+struct _kc_ethtool_coalesce {
+       u32     cmd;    /* ETHTOOL_{G,S}COALESCE */
+
+       /* How many usecs to delay an RX interrupt after
+        * a packet arrives.  If 0, only rx_max_coalesced_frames
+        * is used.
+        */
+       u32     rx_coalesce_usecs;
+
+       /* How many packets to delay an RX interrupt after
+        * a packet arrives.  If 0, only rx_coalesce_usecs is
+        * used.  It is illegal to set both usecs and max frames
+        * to zero as this would cause RX interrupts to never be
+        * generated.
+        */
+       u32     rx_max_coalesced_frames;
+
+       /* Same as above two parameters, except that these values
+        * apply while an IRQ is being serviced by the host.  Not
+        * all cards support this feature and the values are ignored
+        * in that case.
+        */
+       u32     rx_coalesce_usecs_irq;
+       u32     rx_max_coalesced_frames_irq;
+
+       /* How many usecs to delay a TX interrupt after
+        * a packet is sent.  If 0, only tx_max_coalesced_frames
+        * is used.
+        */
+       u32     tx_coalesce_usecs;
+
+       /* How many packets to delay a TX interrupt after
+        * a packet is sent.  If 0, only tx_coalesce_usecs is
+        * used.  It is illegal to set both usecs and max frames
+        * to zero as this would cause TX interrupts to never be
+        * generated.
+        */
+       u32     tx_max_coalesced_frames;
+
+       /* Same as above two parameters, except that these values
+        * apply while an IRQ is being serviced by the host.  Not
+        * all cards support this feature and the values are ignored
+        * in that case.
+        */
+       u32     tx_coalesce_usecs_irq;
+       u32     tx_max_coalesced_frames_irq;
+
+       /* How many usecs to delay in-memory statistics
+        * block updates.  Some drivers do not have an in-memory
+        * statistic block, and in such cases this value is ignored.
+        * This value must not be zero.
+        */
+       u32     stats_block_coalesce_usecs;
+
+       /* Adaptive RX/TX coalescing is an algorithm implemented by
+        * some drivers to improve latency under low packet rates and
+        * improve throughput under high packet rates.  Some drivers
+        * only implement one of RX or TX adaptive coalescing.  Anything
+        * not implemented by the driver causes these values to be
+        * silently ignored.
+        */
+       u32     use_adaptive_rx_coalesce;
+       u32     use_adaptive_tx_coalesce;
+
+       /* When the packet rate (measured in packets per second)
+        * is below pkt_rate_low, the {rx,tx}_*_low parameters are
+        * used.
+        */
+       u32     pkt_rate_low;
+       u32     rx_coalesce_usecs_low;
+       u32     rx_max_coalesced_frames_low;
+       u32     tx_coalesce_usecs_low;
+       u32     tx_max_coalesced_frames_low;
+
+       /* When the packet rate is below pkt_rate_high but above
+        * pkt_rate_low (both measured in packets per second) the
+        * normal {rx,tx}_* coalescing parameters are used.
+        */
+
+       /* When the packet rate is (measured in packets per second)
+        * is above pkt_rate_high, the {rx,tx}_*_high parameters are
+        * used.
+        */
+       u32     pkt_rate_high;
+       u32     rx_coalesce_usecs_high;
+       u32     rx_max_coalesced_frames_high;
+       u32     tx_coalesce_usecs_high;
+       u32     tx_max_coalesced_frames_high;
+
+       /* How often to do adaptive coalescing packet rate sampling,
+        * measured in seconds.  Must not be zero.
+        */
+       u32     rate_sample_interval;
+};
+#endif /* ETHTOOL_GCOALESCE */
+
+#ifndef ETHTOOL_SCOALESCE
+#define ETHTOOL_SCOALESCE      0x0000000f /* Set coalesce config. */
+#endif
+#ifndef ETHTOOL_GRINGPARAM
+#define ETHTOOL_GRINGPARAM     0x00000010 /* Get ring parameters */
+/* for configuring RX/TX ring parameters */
+#define ethtool_ringparam _kc_ethtool_ringparam
+struct _kc_ethtool_ringparam {
+       u32     cmd;    /* ETHTOOL_{G,S}RINGPARAM */
+
+       /* Read only attributes.  These indicate the maximum number
+        * of pending RX/TX ring entries the driver will allow the
+        * user to set.
+        */
+       u32     rx_max_pending;
+       u32     rx_mini_max_pending;
+       u32     rx_jumbo_max_pending;
+       u32     tx_max_pending;
+
+       /* Values changeable by the user.  The valid values are
+        * in the range 1 to the "*_max_pending" counterpart above.
+        */
+       u32     rx_pending;
+       u32     rx_mini_pending;
+       u32     rx_jumbo_pending;
+       u32     tx_pending;
+};
+#endif /* ETHTOOL_GRINGPARAM */
+
+#ifndef ETHTOOL_SRINGPARAM
+#define ETHTOOL_SRINGPARAM     0x00000011 /* Set ring parameters, priv. */
+#endif
+#ifndef ETHTOOL_GPAUSEPARAM
+#define ETHTOOL_GPAUSEPARAM    0x00000012 /* Get pause parameters */
+/* for configuring link flow control parameters */
+#define ethtool_pauseparam _kc_ethtool_pauseparam
+struct _kc_ethtool_pauseparam {
+       u32     cmd;    /* ETHTOOL_{G,S}PAUSEPARAM */
+
+       /* If the link is being auto-negotiated (via ethtool_cmd.autoneg
+        * being true) the user may set 'autoneg' here non-zero to have the
+        * pause parameters be auto-negotiated too.  In such a case, the
+        * {rx,tx}_pause values below determine what capabilities are
+        * advertised.
+        *
+        * If 'autoneg' is zero or the link is not being auto-negotiated,
+        * then {rx,tx}_pause force the driver to use/not-use pause
+        * flow control.
+        */
+       u32     autoneg;
+       u32     rx_pause;
+       u32     tx_pause;
+};
+#endif /* ETHTOOL_GPAUSEPARAM */
+
+#ifndef ETHTOOL_SPAUSEPARAM
+#define ETHTOOL_SPAUSEPARAM    0x00000013 /* Set pause parameters. */
+#endif
+#ifndef ETHTOOL_GRXCSUM
+#define ETHTOOL_GRXCSUM                0x00000014 /* Get RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SRXCSUM
+#define ETHTOOL_SRXCSUM                0x00000015 /* Set RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GTXCSUM
+#define ETHTOOL_GTXCSUM                0x00000016 /* Get TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STXCSUM
+#define ETHTOOL_STXCSUM                0x00000017 /* Set TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GSG
+#define ETHTOOL_GSG            0x00000018 /* Get scatter-gather enable
+                                           * (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SSG
+#define ETHTOOL_SSG            0x00000019 /* Set scatter-gather enable
+                                           * (ethtool_value). */
+#endif
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST           0x0000001a /* execute NIC self-test, priv. */
+#endif
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS       0x0000001b /* get specified string set */
+#endif
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID                0x0000001c /* identify the NIC */
+#endif
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS         0x0000001d /* get NIC-specific statistics */
+#endif
+#ifndef ETHTOOL_GTSO
+#define ETHTOOL_GTSO           0x0000001e /* Get TSO enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STSO
+#define ETHTOOL_STSO           0x0000001f /* Set TSO enable (ethtool_value) */
+#endif
+
+#ifndef ETHTOOL_BUSINFO_LEN
+#define ETHTOOL_BUSINFO_LEN    32
+#endif
+
+#ifndef RHEL_RELEASE_CODE
+/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */
+#define RHEL_RELEASE_CODE 0
+#endif
+#ifndef RHEL_RELEASE_VERSION
+#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b))
+#endif
+#ifndef AX_RELEASE_CODE
+#define AX_RELEASE_CODE 0
+#endif
+#ifndef AX_RELEASE_VERSION
+#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b))
+#endif
+
+/* SuSE version macro is the same as Linux kernel version */
+#ifndef SLE_VERSION
+#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c)
+#endif
+#ifndef SLE_VERSION_CODE
+#ifdef CONFIG_SUSE_KERNEL
+/* SLES11 GA is 2.6.27 based */
+#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) )
+#define SLE_VERSION_CODE SLE_VERSION(11,0,0)
+#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) )
+/* SLES11 SP1 is 2.6.32 based */
+#define SLE_VERSION_CODE SLE_VERSION(11,1,0)
+#else
+#define SLE_VERSION_CODE 0
+#endif
+#else /* CONFIG_SUSE_KERNEL */
+#define SLE_VERSION_CODE 0
+#endif /* CONFIG_SUSE_KERNEL */
+#endif /* SLE_VERSION_CODE */
+
+#ifdef __KLOCWORK__
+#ifdef ARRAY_SIZE
+#undef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+#endif /* __KLOCWORK__ */
+
+/*****************************************************************************/
+/* 2.4.3 => 2.4.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
+
+/**************************************/
+/* PCI DRIVER API */
+
+#ifndef pci_set_dma_mask
+#define pci_set_dma_mask _kc_pci_set_dma_mask
+extern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask);
+#endif
+
+#ifndef pci_request_regions
+#define pci_request_regions _kc_pci_request_regions
+extern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name);
+#endif
+
+#ifndef pci_release_regions
+#define pci_release_regions _kc_pci_release_regions
+extern void _kc_pci_release_regions(struct pci_dev *pdev);
+#endif
+
+/**************************************/
+/* NETWORK DRIVER API */
+
+#ifndef alloc_etherdev
+#define alloc_etherdev _kc_alloc_etherdev
+extern struct net_device * _kc_alloc_etherdev(int sizeof_priv);
+#endif
+
+#ifndef is_valid_ether_addr
+#define is_valid_ether_addr _kc_is_valid_ether_addr
+extern int _kc_is_valid_ether_addr(u8 *addr);
+#endif
+
+/**************************************/
+/* MISCELLANEOUS */
+
+#ifndef INIT_TQUEUE
+#define INIT_TQUEUE(_tq, _routine, _data)              \
+       do {                                            \
+               INIT_LIST_HEAD(&(_tq)->list);           \
+               (_tq)->sync = 0;                        \
+               (_tq)->routine = _routine;              \
+               (_tq)->data = _data;                    \
+       } while (0)
+#endif
+
+#endif /* 2.4.3 => 2.4.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) )
+/* Generic MII registers. */
+#define MII_BMCR            0x00        /* Basic mode control register */
+#define MII_BMSR            0x01        /* Basic mode status register  */
+#define MII_PHYSID1         0x02        /* PHYS ID 1                   */
+#define MII_PHYSID2         0x03        /* PHYS ID 2                   */
+#define MII_ADVERTISE       0x04        /* Advertisement control reg   */
+#define MII_LPA             0x05        /* Link partner ability reg    */
+#define MII_EXPANSION       0x06        /* Expansion register          */
+/* Basic mode control register. */
+#define BMCR_FULLDPLX           0x0100  /* Full duplex                 */
+#define BMCR_ANENABLE           0x1000  /* Enable auto negotiation     */
+/* Basic mode status register. */
+#define BMSR_ERCAP              0x0001  /* Ext-reg capability          */
+#define BMSR_ANEGCAPABLE        0x0008  /* Able to do auto-negotiation */
+#define BMSR_10HALF             0x0800  /* Can do 10mbps, half-duplex  */
+#define BMSR_10FULL             0x1000  /* Can do 10mbps, full-duplex  */
+#define BMSR_100HALF            0x2000  /* Can do 100mbps, half-duplex */
+#define BMSR_100FULL            0x4000  /* Can do 100mbps, full-duplex */
+/* Advertisement control register. */
+#define ADVERTISE_CSMA          0x0001  /* Only selector supported     */
+#define ADVERTISE_10HALF        0x0020  /* Try for 10mbps half-duplex  */
+#define ADVERTISE_10FULL        0x0040  /* Try for 10mbps full-duplex  */
+#define ADVERTISE_100HALF       0x0080  /* Try for 100mbps half-duplex */
+#define ADVERTISE_100FULL       0x0100  /* Try for 100mbps full-duplex */
+#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
+                       ADVERTISE_100HALF | ADVERTISE_100FULL)
+/* Expansion register for auto-negotiation. */
+#define EXPANSION_ENABLENPAGE   0x0004  /* This enables npage words    */
+#endif
+
+/*****************************************************************************/
+/* 2.4.6 => 2.4.3 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
+
+#ifndef pci_set_power_state
+#define pci_set_power_state _kc_pci_set_power_state
+extern int _kc_pci_set_power_state(struct pci_dev *dev, int state);
+#endif
+
+#ifndef pci_enable_wake
+#define pci_enable_wake _kc_pci_enable_wake
+extern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable);
+#endif
+
+#ifndef pci_disable_device
+#define pci_disable_device _kc_pci_disable_device
+extern void _kc_pci_disable_device(struct pci_dev *pdev);
+#endif
+
+/* PCI PM entry point syntax changed, so don't support suspend/resume */
+#undef CONFIG_PM
+
+#endif /* 2.4.6 => 2.4.3 */
+
+#ifndef HAVE_PCI_SET_MWI
+#define pci_set_mwi(X) pci_write_config_word(X, \
+                              PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \
+                              PCI_COMMAND_INVALIDATE);
+#define pci_clear_mwi(X) pci_write_config_word(X, \
+                              PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \
+                              ~PCI_COMMAND_INVALIDATE);
+#endif
+
+/*****************************************************************************/
+/* 2.4.10 => 2.4.9 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) )
+
+/**************************************/
+/* MODULE API */
+
+#ifndef MODULE_LICENSE
+       #define MODULE_LICENSE(X)
+#endif
+
+/**************************************/
+/* OTHER */
+
+#undef min
+#define min(x,y) ({ \
+       const typeof(x) _x = (x);       \
+       const typeof(y) _y = (y);       \
+       (void) (&_x == &_y);            \
+       _x < _y ? _x : _y; })
+
+#undef max
+#define max(x,y) ({ \
+       const typeof(x) _x = (x);       \
+       const typeof(y) _y = (y);       \
+       (void) (&_x == &_y);            \
+       _x > _y ? _x : _y; })
+
+#define min_t(type,x,y) ({ \
+       type _x = (x); \
+       type _y = (y); \
+       _x < _y ? _x : _y; })
+
+#define max_t(type,x,y) ({ \
+       type _x = (x); \
+       type _y = (y); \
+       _x > _y ? _x : _y; })
+
+#ifndef list_for_each_safe
+#define list_for_each_safe(pos, n, head) \
+       for (pos = (head)->next, n = pos->next; pos != (head); \
+               pos = n, n = pos->next)
+#endif
+
+#ifndef ____cacheline_aligned_in_smp
+#ifdef CONFIG_SMP
+#define ____cacheline_aligned_in_smp ____cacheline_aligned
+#else
+#define ____cacheline_aligned_in_smp
+#endif /* CONFIG_SMP */
+#endif
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
+extern int _kc_snprintf(char * buf, size_t size, const char *fmt, ...);
+#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args)
+extern int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
+#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args)
+#else /* 2.4.8 => 2.4.9 */
+extern int snprintf(char * buf, size_t size, const char *fmt, ...);
+extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
+#endif
+#endif /* 2.4.10 -> 2.4.6 */
+
+
+/*****************************************************************************/
+/* 2.4.12 => 2.4.10 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) )
+#ifndef HAVE_NETIF_MSG
+#define HAVE_NETIF_MSG 1
+enum {
+       NETIF_MSG_DRV           = 0x0001,
+       NETIF_MSG_PROBE         = 0x0002,
+       NETIF_MSG_LINK          = 0x0004,
+       NETIF_MSG_TIMER         = 0x0008,
+       NETIF_MSG_IFDOWN        = 0x0010,
+       NETIF_MSG_IFUP          = 0x0020,
+       NETIF_MSG_RX_ERR        = 0x0040,
+       NETIF_MSG_TX_ERR        = 0x0080,
+       NETIF_MSG_TX_QUEUED     = 0x0100,
+       NETIF_MSG_INTR          = 0x0200,
+       NETIF_MSG_TX_DONE       = 0x0400,
+       NETIF_MSG_RX_STATUS     = 0x0800,
+       NETIF_MSG_PKTDATA       = 0x1000,
+       NETIF_MSG_HW            = 0x2000,
+       NETIF_MSG_WOL           = 0x4000,
+};
+
+#define netif_msg_drv(p)       ((p)->msg_enable & NETIF_MSG_DRV)
+#define netif_msg_probe(p)     ((p)->msg_enable & NETIF_MSG_PROBE)
+#define netif_msg_link(p)      ((p)->msg_enable & NETIF_MSG_LINK)
+#define netif_msg_timer(p)     ((p)->msg_enable & NETIF_MSG_TIMER)
+#define netif_msg_ifdown(p)    ((p)->msg_enable & NETIF_MSG_IFDOWN)
+#define netif_msg_ifup(p)      ((p)->msg_enable & NETIF_MSG_IFUP)
+#define netif_msg_rx_err(p)    ((p)->msg_enable & NETIF_MSG_RX_ERR)
+#define netif_msg_tx_err(p)    ((p)->msg_enable & NETIF_MSG_TX_ERR)
+#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
+#define netif_msg_intr(p)      ((p)->msg_enable & NETIF_MSG_INTR)
+#define netif_msg_tx_done(p)   ((p)->msg_enable & NETIF_MSG_TX_DONE)
+#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
+#define netif_msg_pktdata(p)   ((p)->msg_enable & NETIF_MSG_PKTDATA)
+#endif /* !HAVE_NETIF_MSG */
+#endif /* 2.4.12 => 2.4.10 */
+
+/*****************************************************************************/
+/* 2.4.13 => 2.4.12 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
+
+/**************************************/
+/* PCI DMA MAPPING */
+
+#ifndef virt_to_page
+       #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT))
+#endif
+
+#ifndef pci_map_page
+#define pci_map_page _kc_pci_map_page
+extern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction);
+#endif
+
+#ifndef pci_unmap_page
+#define pci_unmap_page _kc_pci_unmap_page
+extern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction);
+#endif
+
+/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */
+
+#undef DMA_32BIT_MASK
+#define DMA_32BIT_MASK 0xffffffff
+#undef DMA_64BIT_MASK
+#define DMA_64BIT_MASK 0xffffffff
+
+/**************************************/
+/* OTHER */
+
+#ifndef cpu_relax
+#define cpu_relax()    rep_nop()
+#endif
+
+struct vlan_ethhdr {
+       unsigned char h_dest[ETH_ALEN];
+       unsigned char h_source[ETH_ALEN];
+       unsigned short h_vlan_proto;
+       unsigned short h_vlan_TCI;
+       unsigned short h_vlan_encapsulated_proto;
+};
+#endif /* 2.4.13 => 2.4.12 */
+
+/*****************************************************************************/
+/* 2.4.17 => 2.4.12 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) )
+
+#ifndef __devexit_p
+       #define __devexit_p(x) &(x)
+#endif
+
+#endif /* 2.4.17 => 2.4.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) )
+#define NETIF_MSG_HW   0x2000
+#define NETIF_MSG_WOL  0x4000
+
+#ifndef netif_msg_hw
+#define netif_msg_hw(p)                ((p)->msg_enable & NETIF_MSG_HW)
+#endif
+#ifndef netif_msg_wol
+#define netif_msg_wol(p)       ((p)->msg_enable & NETIF_MSG_WOL)
+#endif
+#endif /* 2.4.18 */
+
+/*****************************************************************************/
+
+/*****************************************************************************/
+/* 2.4.20 => 2.4.19 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) )
+
+/* we won't support NAPI on less than 2.4.20 */
+#ifdef NAPI
+#undef NAPI
+#endif
+
+#endif /* 2.4.20 => 2.4.19 */
+
+/*****************************************************************************/
+/* 2.4.22 => 2.4.17 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
+#define pci_name(x)    ((x)->slot_name)
+#endif
+
+/*****************************************************************************/
+/* 2.4.22 => 2.4.17 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
+#ifndef IXGBE_NO_LRO
+/* Don't enable LRO for these legacy kernels */
+#define IXGBE_NO_LRO
+#endif
+#endif
+
+/*****************************************************************************/
+/*****************************************************************************/
+/* 2.4.23 => 2.4.22 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) )
+/*****************************************************************************/
+#ifdef NAPI
+#ifndef netif_poll_disable
+#define netif_poll_disable(x) _kc_netif_poll_disable(x)
+static inline void _kc_netif_poll_disable(struct net_device *netdev)
+{
+       while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) {
+               /* No hurry */
+               current->state = TASK_INTERRUPTIBLE;
+               schedule_timeout(1);
+       }
+}
+#endif
+#ifndef netif_poll_enable
+#define netif_poll_enable(x) _kc_netif_poll_enable(x)
+static inline void _kc_netif_poll_enable(struct net_device *netdev)
+{
+       clear_bit(__LINK_STATE_RX_SCHED, &netdev->state);
+}
+#endif
+#endif /* NAPI */
+#ifndef netif_tx_disable
+#define netif_tx_disable(x) _kc_netif_tx_disable(x)
+static inline void _kc_netif_tx_disable(struct net_device *dev)
+{
+       spin_lock_bh(&dev->xmit_lock);
+       netif_stop_queue(dev);
+       spin_unlock_bh(&dev->xmit_lock);
+}
+#endif
+#else /* 2.4.23 => 2.4.22 */
+#define HAVE_SCTP
+#endif /* 2.4.23 => 2.4.22 */
+
+/*****************************************************************************/
+/* 2.6.4 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \
+    ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
+      LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) )
+#define ETHTOOL_OPS_COMPAT
+#endif /* 2.6.4 => 2.6.0 */
+
+/*****************************************************************************/
+/* 2.5.71 => 2.4.x */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) )
+#define sk_protocol protocol
+#define pci_get_device pci_find_device
+#endif /* 2.5.70 => 2.4.x */
+
+/*****************************************************************************/
+/* < 2.4.27 or 2.6.0 <= 2.6.5 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \
+    ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
+      LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) )
+
+#ifndef netif_msg_init
+#define netif_msg_init _kc_netif_msg_init
+static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits)
+{
+       /* use default */
+       if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
+               return default_msg_enable_bits;
+       if (debug_value == 0) /* no output */
+               return 0;
+       /* set low N bits */
+       return (1 << debug_value) -1;
+}
+#endif
+
+#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */
+/*****************************************************************************/
+#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \
+     (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \
+      ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )))
+#define netdev_priv(x) x->priv
+#endif
+
+/*****************************************************************************/
+/* <= 2.5.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) )
+#include <linux/rtnetlink.h>
+#undef pci_register_driver
+#define pci_register_driver pci_module_init
+
+/*
+ * Most of the dma compat code is copied/modifed from the 2.4.37
+ * /include/linux/libata-compat.h header file
+ */
+/* These definitions mirror those in pci.h, so they can be used
+ * interchangeably with their PCI_ counterparts */
+enum dma_data_direction {
+       DMA_BIDIRECTIONAL = 0,
+       DMA_TO_DEVICE = 1,
+       DMA_FROM_DEVICE = 2,
+       DMA_NONE = 3,
+};
+
+struct device {
+       struct pci_dev pdev;
+};
+
+static inline struct pci_dev *to_pci_dev (struct device *dev)
+{
+       return (struct pci_dev *) dev;
+}
+static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
+{
+       return (struct device *) pdev;
+}
+
+#define pdev_printk(lvl, pdev, fmt, args...)   \
+       printk("%s %s: " fmt, lvl, pci_name(pdev), ## args)
+#define dev_err(dev, fmt, args...)            \
+       pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args)
+#define dev_info(dev, fmt, args...)            \
+       pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args)
+#define dev_warn(dev, fmt, args...)            \
+       pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args)
+
+/* NOTE: dangerous! we ignore the 'gfp' argument */
+#define dma_alloc_coherent(dev,sz,dma,gfp) \
+       pci_alloc_consistent(to_pci_dev(dev),(sz),(dma))
+#define dma_free_coherent(dev,sz,addr,dma_addr) \
+       pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr))
+
+#define dma_map_page(dev,a,b,c,d) \
+       pci_map_page(to_pci_dev(dev),(a),(b),(c),(d))
+#define dma_unmap_page(dev,a,b,c) \
+       pci_unmap_page(to_pci_dev(dev),(a),(b),(c))
+
+#define dma_map_single(dev,a,b,c) \
+       pci_map_single(to_pci_dev(dev),(a),(b),(c))
+#define dma_unmap_single(dev,a,b,c) \
+       pci_unmap_single(to_pci_dev(dev),(a),(b),(c))
+
+#define dma_map_sg(dev, sg, nents, dir) \
+       pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir)
+#define dma_unmap_sg(dev, sg, nents, dir) \
+       pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir)
+
+#define dma_sync_single(dev,a,b,c) \
+       pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c))
+
+/* for range just sync everything, that's all the pci API can do */
+#define dma_sync_single_range(dev,addr,off,sz,dir) \
+       pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir))
+
+#define dma_set_mask(dev,mask) \
+       pci_set_dma_mask(to_pci_dev(dev),(mask))
+
+/* hlist_* code - double linked lists */
+struct hlist_head {
+       struct hlist_node *first;
+};
+
+struct hlist_node {
+       struct hlist_node *next, **pprev;
+};
+
+static inline void __hlist_del(struct hlist_node *n)
+{
+       struct hlist_node *next = n->next;
+       struct hlist_node **pprev = n->pprev;
+       *pprev = next;
+       if (next)
+       next->pprev = pprev;
+}
+
+static inline void hlist_del(struct hlist_node *n)
+{
+       __hlist_del(n);
+       n->next = NULL;
+       n->pprev = NULL;
+}
+
+static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+{
+       struct hlist_node *first = h->first;
+       n->next = first;
+       if (first)
+               first->pprev = &n->next;
+       h->first = n;
+       n->pprev = &h->first;
+}
+
+static inline int hlist_empty(const struct hlist_head *h)
+{
+       return !h->first;
+}
+#define HLIST_HEAD_INIT { .first = NULL }
+#define HLIST_HEAD(name) struct hlist_head name = {  .first = NULL }
+#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
+static inline void INIT_HLIST_NODE(struct hlist_node *h)
+{
+       h->next = NULL;
+       h->pprev = NULL;
+}
+#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+
+#define hlist_for_each_entry(tpos, pos, head, member)                    \
+       for (pos = (head)->first;                                        \
+            pos && ({ prefetch(pos->next); 1;}) &&                      \
+               ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+            pos = pos->next)
+
+#define hlist_for_each_entry_safe(tpos, pos, n, head, member)            \
+       for (pos = (head)->first;                                        \
+            pos && ({ n = pos->next; 1; }) &&                           \
+               ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+            pos = n)
+
+#ifndef might_sleep
+#define might_sleep()
+#endif
+#else
+static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
+{
+       return &pdev->dev;
+}
+#endif /* <= 2.5.0 */
+
+/*****************************************************************************/
+/* 2.5.28 => 2.4.23 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
+
+static inline void _kc_synchronize_irq(void)
+{
+       synchronize_irq();
+}
+#undef synchronize_irq
+#define synchronize_irq(X) _kc_synchronize_irq()
+
+#include <linux/tqueue.h>
+#define work_struct tq_struct
+#undef INIT_WORK
+#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a)
+#undef container_of
+#define container_of list_entry
+#define schedule_work schedule_task
+#define flush_scheduled_work flush_scheduled_tasks
+#define cancel_work_sync(x) flush_scheduled_work()
+
+#endif /* 2.5.28 => 2.4.17 */
+
+/*****************************************************************************/
+/* 2.6.0 => 2.5.28 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+#undef get_cpu
+#define get_cpu() smp_processor_id()
+#undef put_cpu
+#define put_cpu() do { } while(0)
+#define MODULE_INFO(version, _version)
+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
+#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1
+#endif
+#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1
+
+#define dma_set_coherent_mask(dev,mask) 1
+
+#undef dev_put
+#define dev_put(dev) __dev_put(dev)
+
+#ifndef skb_fill_page_desc
+#define skb_fill_page_desc _kc_skb_fill_page_desc
+extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size);
+#endif
+
+#undef ALIGN
+#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
+
+#ifndef page_count
+#define page_count(p) atomic_read(&(p)->count)
+#endif
+
+#ifdef MAX_NUMNODES
+#undef MAX_NUMNODES
+#endif
+#define MAX_NUMNODES 1
+
+/* find_first_bit and find_next bit are not defined for most
+ * 2.4 kernels (except for the redhat 2.4.21 kernels
+ */
+#include <linux/bitops.h>
+#define BITOP_WORD(nr)          ((nr) / BITS_PER_LONG)
+#undef find_next_bit
+#define find_next_bit _kc_find_next_bit
+extern unsigned long _kc_find_next_bit(const unsigned long *addr,
+                                       unsigned long size,
+                                       unsigned long offset);
+#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
+
+
+#ifndef netdev_name
+static inline const char *_kc_netdev_name(const struct net_device *dev)
+{
+       if (strchr(dev->name, '%'))
+               return "(unregistered net_device)";
+       return dev->name;
+}
+#define netdev_name(netdev)    _kc_netdev_name(netdev)
+#endif /* netdev_name */
+
+#ifndef strlcpy
+#define strlcpy _kc_strlcpy
+extern size_t _kc_strlcpy(char *dest, const char *src, size_t size);
+#endif /* strlcpy */
+
+#endif /* 2.6.0 => 2.5.28 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )
+#define dma_pool pci_pool
+#define dma_pool_destroy pci_pool_destroy
+#define dma_pool_alloc pci_pool_alloc
+#define dma_pool_free pci_pool_free
+
+#define dma_pool_create(name,dev,size,align,allocation) \
+       pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation))
+#endif /* < 2.6.3 */
+
+/*****************************************************************************/
+/* 2.6.4 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
+#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
+#endif /* 2.6.4 => 2.6.0 */
+
+/*****************************************************************************/
+/* 2.6.5 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) )
+#define dma_sync_single_for_cpu                dma_sync_single
+#define dma_sync_single_for_device     dma_sync_single
+#define dma_sync_single_range_for_cpu          dma_sync_single_range
+#define dma_sync_single_range_for_device       dma_sync_single_range
+#ifndef pci_dma_mapping_error
+#define pci_dma_mapping_error _kc_pci_dma_mapping_error
+static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr)
+{
+       return dma_addr == 0;
+}
+#endif
+#endif /* 2.6.5 => 2.6.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
+extern int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...);
+#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args)
+#endif /* < 2.6.4 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) )
+/* taken from 2.6 include/linux/bitmap.h */
+#undef bitmap_zero
+#define bitmap_zero _kc_bitmap_zero
+static inline void _kc_bitmap_zero(unsigned long *dst, int nbits)
+{
+        if (nbits <= BITS_PER_LONG)
+                *dst = 0UL;
+        else {
+                int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+                memset(dst, 0, len);
+        }
+}
+#define random_ether_addr _kc_random_ether_addr
+static inline void _kc_random_ether_addr(u8 *addr)
+{
+        get_random_bytes(addr, ETH_ALEN);
+        addr[0] &= 0xfe; /* clear multicast */
+        addr[0] |= 0x02; /* set local assignment */
+}
+#define page_to_nid(x) 0
+
+#endif /* < 2.6.6 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) )
+#undef if_mii
+#define if_mii _kc_if_mii
+static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq)
+{
+       return (struct mii_ioctl_data *) &rq->ifr_ifru;
+}
+
+#ifndef __force
+#define __force
+#endif
+#endif /* < 2.6.7 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
+#ifndef PCI_EXP_DEVCTL
+#define PCI_EXP_DEVCTL 8
+#endif
+#ifndef PCI_EXP_DEVCTL_CERE
+#define PCI_EXP_DEVCTL_CERE 0x0001
+#endif
+#define msleep(x)      do { set_current_state(TASK_UNINTERRUPTIBLE); \
+                               schedule_timeout((x * HZ)/1000 + 2); \
+                       } while (0)
+
+#endif /* < 2.6.8 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
+#include <net/dsfield.h>
+#define __iomem
+
+#ifndef kcalloc
+#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags)
+extern void *_kc_kzalloc(size_t size, int flags);
+#endif
+#define MSEC_PER_SEC    1000L
+static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j)
+{
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+       return (MSEC_PER_SEC / HZ) * j;
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+       return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
+#else
+       return (j * MSEC_PER_SEC) / HZ;
+#endif
+}
+static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m)
+{
+       if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET))
+               return MAX_JIFFY_OFFSET;
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+       return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+       return m * (HZ / MSEC_PER_SEC);
+#else
+       return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
+#endif
+}
+
+#define msleep_interruptible _kc_msleep_interruptible
+static inline unsigned long _kc_msleep_interruptible(unsigned int msecs)
+{
+       unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1;
+
+       while (timeout && !signal_pending(current)) {
+               __set_current_state(TASK_INTERRUPTIBLE);
+               timeout = schedule_timeout(timeout);
+       }
+       return _kc_jiffies_to_msecs(timeout);
+}
+
+/* Basic mode control register. */
+#define BMCR_SPEED1000         0x0040  /* MSB of Speed (1000)         */
+
+#ifndef __le16
+#define __le16 u16
+#endif
+#ifndef __le32
+#define __le32 u32
+#endif
+#ifndef __le64
+#define __le64 u64
+#endif
+#ifndef __be16
+#define __be16 u16
+#endif
+#ifndef __be32
+#define __be32 u32
+#endif
+#ifndef __be64
+#define __be64 u64
+#endif
+
+static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
+{
+       return (struct vlan_ethhdr *)skb->mac.raw;
+}
+
+/* Wake-On-Lan options. */
+#define WAKE_PHY               (1 << 0)
+#define WAKE_UCAST             (1 << 1)
+#define WAKE_MCAST             (1 << 2)
+#define WAKE_BCAST             (1 << 3)
+#define WAKE_ARP               (1 << 4)
+#define WAKE_MAGIC             (1 << 5)
+#define WAKE_MAGICSECURE       (1 << 6) /* only meaningful if WAKE_MAGIC */
+
+#define skb_header_pointer _kc_skb_header_pointer
+static inline void *_kc_skb_header_pointer(const struct sk_buff *skb,
+                                           int offset, int len, void *buffer)
+{
+       int hlen = skb_headlen(skb);
+
+       if (hlen - offset >= len)
+               return skb->data + offset;
+
+#ifdef MAX_SKB_FRAGS
+       if (skb_copy_bits(skb, offset, buffer, len) < 0)
+               return NULL;
+
+       return buffer;
+#else
+       return NULL;
+#endif
+
+#ifndef NETDEV_TX_OK
+#define NETDEV_TX_OK 0
+#endif
+#ifndef NETDEV_TX_BUSY
+#define NETDEV_TX_BUSY 1
+#endif
+#ifndef NETDEV_TX_LOCKED
+#define NETDEV_TX_LOCKED -1
+#endif
+}
+
+#ifndef __bitwise
+#define __bitwise
+#endif
+#endif /* < 2.6.9 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
+#ifdef module_param_array_named
+#undef module_param_array_named
+#define module_param_array_named(name, array, type, nump, perm)          \
+       static struct kparam_array __param_arr_##name                    \
+       = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \
+           sizeof(array[0]), array };                                   \
+       module_param_call(name, param_array_set, param_array_get,        \
+                         &__param_arr_##name, perm)
+#endif /* module_param_array_named */
+/*
+ * num_online is broken for all < 2.6.10 kernels.  This is needed to support
+ * Node module parameter of ixgbe.
+ */
+#undef num_online_nodes
+#define num_online_nodes(n) 1
+extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES);
+#undef node_online_map
+#define node_online_map _kcompat_node_online_map
+#define pci_get_class pci_find_class
+#endif /* < 2.6.10 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) )
+#define PCI_D0      0
+#define PCI_D1      1
+#define PCI_D2      2
+#define PCI_D3hot   3
+#define PCI_D3cold  4
+typedef int pci_power_t;
+#define pci_choose_state(pdev,state) state
+#define PMSG_SUSPEND 3
+#define PCI_EXP_LNKCTL 16
+
+#undef NETIF_F_LLTX
+
+#ifndef ARCH_HAS_PREFETCH
+#define prefetch(X)
+#endif
+
+#ifndef NET_IP_ALIGN
+#define NET_IP_ALIGN 2
+#endif
+
+#define KC_USEC_PER_SEC        1000000L
+#define usecs_to_jiffies _kc_usecs_to_jiffies
+static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j)
+{
+#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
+       return (KC_USEC_PER_SEC / HZ) * j;
+#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
+       return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC);
+#else
+       return (j * KC_USEC_PER_SEC) / HZ;
+#endif
+}
+static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m)
+{
+       if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET))
+               return MAX_JIFFY_OFFSET;
+#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
+       return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ);
+#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
+       return m * (HZ / KC_USEC_PER_SEC);
+#else
+       return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC;
+#endif
+}
+#endif /* < 2.6.11 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) )
+#include <linux/reboot.h>
+#define USE_REBOOT_NOTIFIER
+
+/* Generic MII registers. */
+#define MII_CTRL1000        0x09        /* 1000BASE-T control          */
+#define MII_STAT1000        0x0a        /* 1000BASE-T status           */
+/* Advertisement control register. */
+#define ADVERTISE_PAUSE_CAP     0x0400  /* Try for pause               */
+#define ADVERTISE_PAUSE_ASYM    0x0800  /* Try for asymmetric pause     */
+/* 1000BASE-T Control register */
+#define ADVERTISE_1000FULL      0x0200  /* Advertise 1000BASE-T full duplex */
+#ifndef is_zero_ether_addr
+#define is_zero_ether_addr _kc_is_zero_ether_addr
+static inline int _kc_is_zero_ether_addr(const u8 *addr)
+{
+       return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
+}
+#endif /* is_zero_ether_addr */
+#ifndef is_multicast_ether_addr
+#define is_multicast_ether_addr _kc_is_multicast_ether_addr
+static inline int _kc_is_multicast_ether_addr(const u8 *addr)
+{
+       return addr[0] & 0x01;
+}
+#endif /* is_multicast_ether_addr */
+#endif /* < 2.6.12 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
+#ifndef kstrdup
+#define kstrdup _kc_kstrdup
+extern char *_kc_kstrdup(const char *s, unsigned int gfp);
+#endif
+#endif /* < 2.6.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
+#define pm_message_t u32
+#ifndef kzalloc
+#define kzalloc _kc_kzalloc
+extern void *_kc_kzalloc(size_t size, int flags);
+#endif
+
+/* Generic MII registers. */
+#define MII_ESTATUS        0x0f        /* Extended Status */
+/* Basic mode status register. */
+#define BMSR_ESTATEN           0x0100  /* Extended Status in R15 */
+/* Extended status register. */
+#define ESTATUS_1000_TFULL     0x2000  /* Can do 1000BT Full */
+#define ESTATUS_1000_THALF     0x1000  /* Can do 1000BT Half */
+
+#define ADVERTISED_Pause       (1 << 13)
+#define ADVERTISED_Asym_Pause  (1 << 14)
+
+#if (!(RHEL_RELEASE_CODE && \
+       (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \
+       (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))))
+#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t))
+#define gfp_t unsigned
+#else
+typedef unsigned gfp_t;
+#endif
+#endif /* !RHEL4.3->RHEL5.0 */
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) )
+#ifdef CONFIG_X86_64
+#define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir)       \
+       dma_sync_single_for_cpu(dev, dma_handle, size, dir)
+#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir)    \
+       dma_sync_single_for_device(dev, dma_handle, size, dir)
+#endif
+#endif
+#endif /* < 2.6.14 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) )
+#ifndef vmalloc_node
+#define vmalloc_node(a,b) vmalloc(a)
+#endif /* vmalloc_node*/
+
+#define setup_timer(_timer, _function, _data) \
+do { \
+       (_timer)->function = _function; \
+       (_timer)->data = _data; \
+       init_timer(_timer); \
+} while (0)
+#ifndef device_can_wakeup
+#define device_can_wakeup(dev) (1)
+#endif
+#ifndef device_set_wakeup_enable
+#define device_set_wakeup_enable(dev, val)     do{}while(0)
+#endif
+#ifndef device_init_wakeup
+#define device_init_wakeup(dev,val) do {} while (0)
+#endif
+static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2)
+{
+       const u16 *a = (const u16 *) addr1;
+       const u16 *b = (const u16 *) addr2;
+
+       return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
+}
+#undef compare_ether_addr
+#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2)
+#endif /* < 2.6.15 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) )
+#undef DEFINE_MUTEX
+#define DEFINE_MUTEX(x)        DECLARE_MUTEX(x)
+#define mutex_lock(x)  down_interruptible(x)
+#define mutex_unlock(x)        up(x)
+
+#ifndef ____cacheline_internodealigned_in_smp
+#ifdef CONFIG_SMP
+#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp
+#else
+#define ____cacheline_internodealigned_in_smp
+#endif /* CONFIG_SMP */
+#endif /* ____cacheline_internodealigned_in_smp */
+#undef HAVE_PCI_ERS
+#else /* 2.6.16 and above */
+#undef HAVE_PCI_ERS
+#define HAVE_PCI_ERS
+#endif /* < 2.6.16 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) )
+#ifndef first_online_node
+#define first_online_node 0
+#endif
+#ifndef NET_SKB_PAD
+#define NET_SKB_PAD 16
+#endif
+#endif /* < 2.6.17 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
+
+#ifndef IRQ_HANDLED
+#define irqreturn_t void
+#define IRQ_HANDLED
+#define IRQ_NONE
+#endif
+
+#ifndef IRQF_PROBE_SHARED
+#ifdef SA_PROBEIRQ
+#define IRQF_PROBE_SHARED SA_PROBEIRQ
+#else
+#define IRQF_PROBE_SHARED 0
+#endif
+#endif
+
+#ifndef IRQF_SHARED
+#define IRQF_SHARED SA_SHIRQ
+#endif
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#ifndef FIELD_SIZEOF
+#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+#endif
+
+#ifndef skb_is_gso
+#ifdef NETIF_F_TSO
+#define skb_is_gso _kc_skb_is_gso
+static inline int _kc_skb_is_gso(const struct sk_buff *skb)
+{
+       return skb_shinfo(skb)->gso_size;
+}
+#else
+#define skb_is_gso(a) 0
+#endif
+#endif
+
+#ifndef resource_size_t
+#define resource_size_t unsigned long
+#endif
+
+#ifdef skb_pad
+#undef skb_pad
+#endif
+#define skb_pad(x,y) _kc_skb_pad(x, y)
+int _kc_skb_pad(struct sk_buff *skb, int pad);
+#ifdef skb_padto
+#undef skb_padto
+#endif
+#define skb_padto(x,y) _kc_skb_padto(x, y)
+static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len)
+{
+       unsigned int size = skb->len;
+       if(likely(size >= len))
+               return 0;
+       return _kc_skb_pad(skb, len - size);
+}
+
+#ifndef DECLARE_PCI_UNMAP_ADDR
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
+       dma_addr_t ADDR_NAME
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
+       u32 LEN_NAME
+#define pci_unmap_addr(PTR, ADDR_NAME) \
+       ((PTR)->ADDR_NAME)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
+       (((PTR)->ADDR_NAME) = (VAL))
+#define pci_unmap_len(PTR, LEN_NAME) \
+       ((PTR)->LEN_NAME)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
+       (((PTR)->LEN_NAME) = (VAL))
+#endif /* DECLARE_PCI_UNMAP_ADDR */
+#endif /* < 2.6.18 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
+
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#endif
+#ifndef __ALIGN_MASK
+#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
+#endif
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )
+#if (!((RHEL_RELEASE_CODE && \
+        ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \
+          RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \
+         (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))) || \
+       (AX_RELEASE_CODE && AX_RELEASE_CODE > AX_RELEASE_VERSION(3,0))))
+typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *);
+#endif
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
+#undef CONFIG_INET_LRO
+#undef CONFIG_INET_LRO_MODULE
+#undef CONFIG_FCOE
+#undef CONFIG_FCOE_MODULE
+#endif
+typedef irqreturn_t (*new_handler_t)(int, void*);
+static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
+#else /* 2.4.x */
+typedef void (*irq_handler_t)(int, void*, struct pt_regs *);
+typedef void (*new_handler_t)(int, void*);
+static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
+#endif /* >= 2.5.x */
+{
+       irq_handler_t new_handler = (irq_handler_t) handler;
+       return request_irq(irq, new_handler, flags, devname, dev_id);
+}
+
+#undef request_irq
+#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id))
+
+#define irq_handler_t new_handler_t
+/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
+#define PCIE_CONFIG_SPACE_LEN 256
+#define PCI_CONFIG_SPACE_LEN 64
+#define PCIE_LINK_STATUS 0x12
+#define pci_config_space_ich8lan() do {} while(0)
+#undef pci_save_state
+extern int _kc_pci_save_state(struct pci_dev *);
+#define pci_save_state(pdev) _kc_pci_save_state(pdev)
+#undef pci_restore_state
+extern void _kc_pci_restore_state(struct pci_dev *);
+#define pci_restore_state(pdev) _kc_pci_restore_state(pdev)
+#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
+
+#ifdef HAVE_PCI_ERS
+#undef free_netdev
+extern void _kc_free_netdev(struct net_device *);
+#define free_netdev(netdev) _kc_free_netdev(netdev)
+#endif
+static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
+{
+       return 0;
+}
+#define pci_disable_pcie_error_reporting(dev) do {} while (0)
+#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0)
+
+extern void *_kc_kmemdup(const void *src, size_t len, unsigned gfp);
+#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp)
+#ifndef bool
+#define bool _Bool
+#define true 1
+#define false 0
+#endif
+#else /* 2.6.19 */
+#include <linux/aer.h>
+#include <linux/string.h>
+#endif /* < 2.6.19 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) )
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) )
+#undef INIT_WORK
+#define INIT_WORK(_work, _func) \
+do { \
+       INIT_LIST_HEAD(&(_work)->entry); \
+       (_work)->pending = 0; \
+       (_work)->func = (void (*)(void *))_func; \
+       (_work)->data = _work; \
+       init_timer(&(_work)->timer); \
+} while (0)
+#endif
+
+#ifndef PCI_VDEVICE
+#define PCI_VDEVICE(ven, dev)        \
+       PCI_VENDOR_ID_##ven, (dev),  \
+       PCI_ANY_ID, PCI_ANY_ID, 0, 0
+#endif
+
+#ifndef round_jiffies
+#define round_jiffies(x) x
+#endif
+
+#define csum_offset csum
+
+#define HAVE_EARLY_VMALLOC_NODE
+#define dev_to_node(dev) -1
+#undef set_dev_node
+/* remove compiler warning with b=b, for unused variable */
+#define set_dev_node(a, b) do { (b) = (b); } while(0)
+
+#if (!(RHEL_RELEASE_CODE && \
+       (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \
+         (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \
+        (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \
+     !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0)))
+typedef __u16 __bitwise __sum16;
+typedef __u32 __bitwise __wsum;
+#endif
+
+#if (!(RHEL_RELEASE_CODE && \
+       (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \
+         (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \
+        (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \
+     !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0)))
+static inline __wsum csum_unfold(__sum16 n)
+{
+       return (__force __wsum)n;
+}
+#endif
+
+#else /* < 2.6.20 */
+#define HAVE_DEVICE_NUMA_NODE
+#endif /* < 2.6.20 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
+#define to_net_dev(class) container_of(class, struct net_device, class_dev)
+#define NETDEV_CLASS_DEV
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)))
+#define vlan_group_get_device(vg, id) (vg->vlan_devices[id])
+#define vlan_group_set_device(vg, id, dev)             \
+       do {                                            \
+               if (vg) vg->vlan_devices[id] = dev;     \
+       } while (0)
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */
+#define pci_channel_offline(pdev) (pdev->error_state && \
+       pdev->error_state != pci_channel_io_normal)
+#define pci_request_selected_regions(pdev, bars, name) \
+        pci_request_regions(pdev, name)
+#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev);
+#endif /* < 2.6.21 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
+#define tcp_hdr(skb) (skb->h.th)
+#define tcp_hdrlen(skb) (skb->h.th->doff << 2)
+#define skb_transport_offset(skb) (skb->h.raw - skb->data)
+#define skb_transport_header(skb) (skb->h.raw)
+#define ipv6_hdr(skb) (skb->nh.ipv6h)
+#define ip_hdr(skb) (skb->nh.iph)
+#define skb_network_offset(skb) (skb->nh.raw - skb->data)
+#define skb_network_header(skb) (skb->nh.raw)
+#define skb_tail_pointer(skb) skb->tail
+#define skb_reset_tail_pointer(skb) \
+       do { \
+               skb->tail = skb->data; \
+       } while (0)
+#define skb_copy_to_linear_data(skb, from, len) \
+                               memcpy(skb->data, from, len)
+#define skb_copy_to_linear_data_offset(skb, offset, from, len) \
+                               memcpy(skb->data + offset, from, len)
+#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw)
+#define pci_register_driver pci_module_init
+#define skb_mac_header(skb) skb->mac.raw
+
+#ifdef NETIF_F_MULTI_QUEUE
+#ifndef alloc_etherdev_mq
+#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a)
+#endif
+#endif /* NETIF_F_MULTI_QUEUE */
+
+#ifndef ETH_FCS_LEN
+#define ETH_FCS_LEN 4
+#endif
+#define cancel_work_sync(x) flush_scheduled_work()
+#ifndef udp_hdr
+#define udp_hdr _udp_hdr
+static inline struct udphdr *_udp_hdr(const struct sk_buff *skb)
+{
+       return (struct udphdr *)skb_transport_header(skb);
+}
+#endif
+
+#ifdef cpu_to_be16
+#undef cpu_to_be16
+#endif
+#define cpu_to_be16(x) __constant_htons(x)
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)))
+enum {
+       DUMP_PREFIX_NONE,
+       DUMP_PREFIX_ADDRESS,
+       DUMP_PREFIX_OFFSET
+};
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */
+#ifndef hex_asc
+#define hex_asc(x)     "0123456789abcdef"[x]
+#endif
+#include <linux/ctype.h>
+extern void _kc_print_hex_dump(const char *level, const char *prefix_str,
+                              int prefix_type, int rowsize, int groupsize,
+                              const void *buf, size_t len, bool ascii);
+#define print_hex_dump(lvl, s, t, r, g, b, l, a) \
+               _kc_print_hex_dump(lvl, s, t, r, g, b, l, a)
+#else /* 2.6.22 */
+#define ETH_TYPE_TRANS_SETS_DEV
+#define HAVE_NETDEV_STATS_IN_NETDEV
+#endif /* < 2.6.22 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) )
+#endif /* > 2.6.22 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
+#define netif_subqueue_stopped(_a, _b) 0
+#ifndef PTR_ALIGN
+#define PTR_ALIGN(p, a)         ((typeof(p))ALIGN((unsigned long)(p), (a)))
+#endif
+
+#ifndef CONFIG_PM_SLEEP
+#define CONFIG_PM_SLEEP        CONFIG_PM
+#endif
+
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) )
+#define HAVE_ETHTOOL_GET_PERM_ADDR
+#endif /* 2.6.14 through 2.6.22 */
+#endif /* < 2.6.23 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
+#ifndef ETH_FLAG_LRO
+#define ETH_FLAG_LRO NETIF_F_LRO
+#endif
+
+/* if GRO is supported then the napi struct must already exist */
+#ifndef NETIF_F_GRO
+/* NAPI API changes in 2.6.24 break everything */
+struct napi_struct {
+       /* used to look up the real NAPI polling routine */
+       int (*poll)(struct napi_struct *, int);
+       struct net_device *dev;
+       int weight;
+};
+#endif
+
+#ifdef NAPI
+extern int __kc_adapter_clean(struct net_device *, int *);
+extern struct net_device *napi_to_poll_dev(struct napi_struct *napi);
+#define netif_napi_add(_netdev, _napi, _poll, _weight) \
+       do { \
+               struct napi_struct *__napi = (_napi); \
+               struct net_device *poll_dev = napi_to_poll_dev(__napi); \
+               poll_dev->poll = &(__kc_adapter_clean); \
+               poll_dev->priv = (_napi); \
+               poll_dev->weight = (_weight); \
+               set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); \
+               set_bit(__LINK_STATE_START, &poll_dev->state);\
+               dev_hold(poll_dev); \
+               __napi->poll = &(_poll); \
+               __napi->weight = (_weight); \
+               __napi->dev = (_netdev); \
+       } while (0)
+#define netif_napi_del(_napi) \
+       do { \
+               struct net_device *poll_dev = napi_to_poll_dev(_napi); \
+               WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); \
+               dev_put(poll_dev); \
+               memset(poll_dev, 0, sizeof(struct net_device));\
+       } while (0)
+#define napi_schedule_prep(_napi) \
+       (netif_running((_napi)->dev) && netif_rx_schedule_prep(napi_to_poll_dev(_napi)))
+#define napi_schedule(_napi) \
+       do { \
+               if (napi_schedule_prep(_napi)) \
+                       __netif_rx_schedule(napi_to_poll_dev(_napi)); \
+       } while (0)
+#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi))
+#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi))
+#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi))
+#ifndef NETIF_F_GRO
+#define napi_complete(_napi) netif_rx_complete(napi_to_poll_dev(_napi))
+#else
+#define napi_complete(_napi) \
+       do { \
+               napi_gro_flush(_napi); \
+               netif_rx_complete(napi_to_poll_dev(_napi)); \
+       } while (0)
+#endif /* NETIF_F_GRO */
+#else /* NAPI */
+#define netif_napi_add(_netdev, _napi, _poll, _weight) \
+       do { \
+               struct napi_struct *__napi = _napi; \
+               _netdev->poll = &(_poll); \
+               _netdev->weight = (_weight); \
+               __napi->poll = &(_poll); \
+               __napi->weight = (_weight); \
+               __napi->dev = (_netdev); \
+       } while (0)
+#define netif_napi_del(_a) do {} while (0)
+#endif /* NAPI */
+
+#undef dev_get_by_name
+#define dev_get_by_name(_a, _b) dev_get_by_name(_b)
+#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b)
+#ifndef DMA_BIT_MASK
+#define DMA_BIT_MASK(n)        (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1))
+#endif
+
+#ifdef NETIF_F_TSO6
+#define skb_is_gso_v6 _kc_skb_is_gso_v6
+static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb)
+{
+       return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
+}
+#endif /* NETIF_F_TSO6 */
+
+#ifndef KERN_CONT
+#define KERN_CONT      ""
+#endif
+#else /* < 2.6.24 */
+#define HAVE_ETHTOOL_GET_SSET_COUNT
+#define HAVE_NETDEV_NAPI_LIST
+#endif /* < 2.6.24 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) )
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) )
+#include <linux/pm_qos_params.h>
+#else /* >= 3.2.0 */
+#include <linux/pm_qos.h>
+#endif /* else >= 3.2.0 */
+#endif /* > 2.6.24 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) )
+#define PM_QOS_CPU_DMA_LATENCY 1
+
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) )
+#include <linux/latency.h>
+#define PM_QOS_DEFAULT_VALUE   INFINITE_LATENCY
+#define pm_qos_add_requirement(pm_qos_class, name, value) \
+               set_acceptable_latency(name, value)
+#define pm_qos_remove_requirement(pm_qos_class, name) \
+               remove_acceptable_latency(name)
+#define pm_qos_update_requirement(pm_qos_class, name, value) \
+               modify_acceptable_latency(name, value)
+#else
+#define PM_QOS_DEFAULT_VALUE   -1
+#define pm_qos_add_requirement(pm_qos_class, name, value)
+#define pm_qos_remove_requirement(pm_qos_class, name)
+#define pm_qos_update_requirement(pm_qos_class, name, value) { \
+       if (value != PM_QOS_DEFAULT_VALUE) { \
+               printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \
+                       pci_name(adapter->pdev)); \
+       } \
+}
+
+#endif /* > 2.6.18 */
+
+#define pci_enable_device_mem(pdev) pci_enable_device(pdev)
+
+#ifndef DEFINE_PCI_DEVICE_TABLE
+#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[]
+#endif /* DEFINE_PCI_DEVICE_TABLE */
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
+#ifndef IXGBE_PROCFS
+#define IXGBE_PROCFS
+#endif /* IXGBE_PROCFS */
+#endif /* >= 2.6.0 */
+
+
+#else /* < 2.6.25 */
+
+#ifndef IXGBE_SYSFS
+#define IXGBE_SYSFS
+#endif /* IXGBE_SYSFS */
+
+
+#endif /* < 2.6.25 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
+#ifndef clamp_t
+#define clamp_t(type, val, min, max) ({                \
+       type __val = (val);                     \
+       type __min = (min);                     \
+       type __max = (max);                     \
+       __val = __val < __min ? __min : __val;  \
+       __val > __max ? __max : __val; })
+#endif /* clamp_t */
+#ifdef NETIF_F_TSO
+#ifdef NETIF_F_TSO6
+#define netif_set_gso_max_size(_netdev, size) \
+       do { \
+               if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { \
+                       _netdev->features &= ~NETIF_F_TSO; \
+                       _netdev->features &= ~NETIF_F_TSO6; \
+               } else { \
+                       _netdev->features |= NETIF_F_TSO; \
+                       _netdev->features |= NETIF_F_TSO6; \
+               } \
+       } while (0)
+#else /* NETIF_F_TSO6 */
+#define netif_set_gso_max_size(_netdev, size) \
+       do { \
+               if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
+                       _netdev->features &= ~NETIF_F_TSO; \
+               else \
+                       _netdev->features |= NETIF_F_TSO; \
+       } while (0)
+#endif /* NETIF_F_TSO6 */
+#else
+#define netif_set_gso_max_size(_netdev, size) do {} while (0)
+#endif /* NETIF_F_TSO */
+#undef kzalloc_node
+#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags)
+
+extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state);
+#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s)
+#else /* < 2.6.26 */
+#include <linux/pci-aspm.h>
+#define HAVE_NETDEV_VLAN_FEATURES
+#endif /* < 2.6.26 */
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
+static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep,
+                                            __u32 speed)
+{
+       ep->speed = (__u16)speed;
+       /* ep->speed_hi = (__u16)(speed >> 16); */
+}
+#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set
+
+static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep)
+{
+       /* no speed_hi before 2.6.27, and probably no need for it yet */
+       return (__u32)ep->speed;
+}
+#define ethtool_cmd_speed _kc_ethtool_cmd_speed
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) )
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM))
+#define ANCIENT_PM 1
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \
+       (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \
+       defined(CONFIG_PM_SLEEP))
+#define NEWER_PM 1
+#endif
+#if defined(ANCIENT_PM) || defined(NEWER_PM)
+#undef device_set_wakeup_enable
+#define device_set_wakeup_enable(dev, val) \
+       do { \
+               u16 pmc = 0; \
+               int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \
+               if (pm) { \
+                       pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \
+                               &pmc); \
+               } \
+               (dev)->power.can_wakeup = !!(pmc >> 11); \
+               (dev)->power.should_wakeup = (val && (pmc >> 11)); \
+       } while (0)
+#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */
+#endif /* 2.6.15 through 2.6.27 */
+#ifndef netif_napi_del
+#define netif_napi_del(_a) do {} while (0)
+#ifdef NAPI
+#ifdef CONFIG_NETPOLL
+#undef netif_napi_del
+#define netif_napi_del(_a) list_del(&(_a)->dev_list);
+#endif
+#endif
+#endif /* netif_napi_del */
+#ifdef dma_mapping_error
+#undef dma_mapping_error
+#endif
+#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr)
+
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+#define HAVE_TX_MQ
+#endif
+
+#ifdef HAVE_TX_MQ
+extern void _kc_netif_tx_stop_all_queues(struct net_device *);
+extern void _kc_netif_tx_wake_all_queues(struct net_device *);
+extern void _kc_netif_tx_start_all_queues(struct net_device *);
+#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a)
+#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a)
+#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a)
+#undef netif_stop_subqueue
+#define netif_stop_subqueue(_ndev,_qi) do { \
+       if (netif_is_multiqueue((_ndev))) \
+               netif_stop_subqueue((_ndev), (_qi)); \
+       else \
+               netif_stop_queue((_ndev)); \
+       } while (0)
+#undef netif_start_subqueue
+#define netif_start_subqueue(_ndev,_qi) do { \
+       if (netif_is_multiqueue((_ndev))) \
+               netif_start_subqueue((_ndev), (_qi)); \
+       else \
+               netif_start_queue((_ndev)); \
+       } while (0)
+#else /* HAVE_TX_MQ */
+#define netif_tx_stop_all_queues(a) netif_stop_queue(a)
+#define netif_tx_wake_all_queues(a) netif_wake_queue(a)
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) )
+#define netif_tx_start_all_queues(a) netif_start_queue(a)
+#else
+#define netif_tx_start_all_queues(a) do {} while (0)
+#endif
+#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev))
+#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev))
+#endif /* HAVE_TX_MQ */
+#ifndef NETIF_F_MULTI_QUEUE
+#define NETIF_F_MULTI_QUEUE 0
+#define netif_is_multiqueue(a) 0
+#define netif_wake_subqueue(a, b)
+#endif /* NETIF_F_MULTI_QUEUE */
+
+#ifndef __WARN_printf
+extern void __kc_warn_slowpath(const char *file, const int line,
+               const char *fmt, ...) __attribute__((format(printf, 3, 4)));
+#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg)
+#endif /* __WARN_printf */
+
+#ifndef WARN
+#define WARN(condition, format...) ({                                          \
+       int __ret_warn_on = !!(condition);                              \
+       if (unlikely(__ret_warn_on))                                    \
+               __WARN_printf(format);                                  \
+       unlikely(__ret_warn_on);                                        \
+})
+#endif /* WARN */
+#else /* < 2.6.27 */
+#define HAVE_TX_MQ
+#define HAVE_NETDEV_SELECT_QUEUE
+#endif /* < 2.6.27 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
+#define pci_ioremap_bar(pdev, bar)     ioremap(pci_resource_start(pdev, bar), \
+                                               pci_resource_len(pdev, bar))
+#define pci_wake_from_d3 _kc_pci_wake_from_d3
+#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep
+extern int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable);
+extern int _kc_pci_prepare_to_sleep(struct pci_dev *dev);
+#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC)
+#ifndef __skb_queue_head_init
+static inline void __kc_skb_queue_head_init(struct sk_buff_head *list)
+{
+       list->prev = list->next = (struct sk_buff *)list;
+       list->qlen = 0;
+}
+#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q)
+#endif
+#ifndef skb_add_rx_frag
+#define skb_add_rx_frag _kc_skb_add_rx_frag
+extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, int, int);
+#endif
+#endif /* < 2.6.28 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
+#ifndef swap
+#define swap(a, b) \
+       do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+#endif
+#define pci_request_selected_regions_exclusive(pdev, bars, name) \
+               pci_request_selected_regions(pdev, bars, name)
+#ifndef CONFIG_NR_CPUS
+#define CONFIG_NR_CPUS 1
+#endif /* CONFIG_NR_CPUS */
+#ifndef pcie_aspm_enabled
+#define pcie_aspm_enabled()   (1)
+#endif /* pcie_aspm_enabled */
+#else /* < 2.6.29 */
+#ifndef HAVE_NET_DEVICE_OPS
+#define HAVE_NET_DEVICE_OPS
+#endif
+#ifdef CONFIG_DCB
+#define HAVE_PFC_MODE_ENABLE
+#endif /* CONFIG_DCB */
+#endif /* < 2.6.29 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
+#define skb_rx_queue_recorded(a) false
+#define skb_get_rx_queue(a) 0
+#define skb_record_rx_queue(a, b) do {} while (0)
+#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues)
+#undef CONFIG_FCOE
+#undef CONFIG_FCOE_MODULE
+#ifndef CONFIG_PCI_IOV
+#undef pci_enable_sriov
+#define pci_enable_sriov(a, b) -ENOTSUPP
+#undef pci_disable_sriov
+#define pci_disable_sriov(a) do {} while (0)
+#endif /* CONFIG_PCI_IOV */
+#ifndef pr_cont
+#define pr_cont(fmt, ...) \
+       printk(KERN_CONT fmt, ##__VA_ARGS__)
+#endif /* pr_cont */
+#else
+#define HAVE_ASPM_QUIRKS
+#endif /* < 2.6.30 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) )
+#define ETH_P_1588 0x88F7
+#define ETH_P_FIP  0x8914
+#ifndef netdev_uc_count
+#define netdev_uc_count(dev) ((dev)->uc_count)
+#endif
+#ifndef netdev_for_each_uc_addr
+#define netdev_for_each_uc_addr(uclist, dev) \
+       for (uclist = dev->uc_list; uclist; uclist = uclist->next)
+#endif
+#else
+#ifndef HAVE_NETDEV_STORAGE_ADDRESS
+#define HAVE_NETDEV_STORAGE_ADDRESS
+#endif
+#ifndef HAVE_NETDEV_HW_ADDR
+#define HAVE_NETDEV_HW_ADDR
+#endif
+#ifndef HAVE_TRANS_START_IN_QUEUE
+#define HAVE_TRANS_START_IN_QUEUE
+#endif
+#endif /* < 2.6.31 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) )
+#undef netdev_tx_t
+#define netdev_tx_t int
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef NETIF_F_FCOE_MTU
+#define NETIF_F_FCOE_MTU       (1 << 26)
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+
+#ifndef pm_runtime_get_sync
+#define pm_runtime_get_sync(dev)       do {} while (0)
+#endif
+#ifndef pm_runtime_put
+#define pm_runtime_put(dev)            do {} while (0)
+#endif
+#ifndef pm_runtime_put_sync
+#define pm_runtime_put_sync(dev)       do {} while (0)
+#endif
+#ifndef pm_runtime_resume
+#define pm_runtime_resume(dev)         do {} while (0)
+#endif
+#ifndef pm_schedule_suspend
+#define pm_schedule_suspend(dev, t)    do {} while (0)
+#endif
+#ifndef pm_runtime_set_suspended
+#define pm_runtime_set_suspended(dev)  do {} while (0)
+#endif
+#ifndef pm_runtime_disable
+#define pm_runtime_disable(dev)                do {} while (0)
+#endif
+#ifndef pm_runtime_put_noidle
+#define pm_runtime_put_noidle(dev)     do {} while (0)
+#endif
+#ifndef pm_runtime_set_active
+#define pm_runtime_set_active(dev)     do {} while (0)
+#endif
+#ifndef pm_runtime_enable
+#define pm_runtime_enable(dev) do {} while (0)
+#endif
+#ifndef pm_runtime_get_noresume
+#define pm_runtime_get_noresume(dev)   do {} while (0)
+#endif
+#else /* < 2.6.32 */
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE
+#define HAVE_NETDEV_OPS_FCOE_ENABLE
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+#ifdef CONFIG_DCB
+#ifndef HAVE_DCBNL_OPS_GETAPP
+#define HAVE_DCBNL_OPS_GETAPP
+#endif
+#endif /* CONFIG_DCB */
+#include <linux/pm_runtime.h>
+/* IOV bad DMA target work arounds require at least this kernel rev support */
+#define HAVE_PCIE_TYPE
+#endif /* < 2.6.32 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) )
+#ifndef pci_pcie_cap
+#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP)
+#endif
+#ifndef IPV4_FLOW
+#define IPV4_FLOW 0x10
+#endif /* IPV4_FLOW */
+#ifndef IPV6_FLOW
+#define IPV6_FLOW 0x11
+#endif /* IPV6_FLOW */
+/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */
+#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \
+      (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) )
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
+#define HAVE_NETDEV_OPS_FCOE_GETWWN
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+#endif /* RHEL6 or SLES11 SP1 */
+#ifndef __percpu
+#define __percpu
+#endif /* __percpu */
+#else /* < 2.6.33 */
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
+#define HAVE_NETDEV_OPS_FCOE_GETWWN
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+#define HAVE_ETHTOOL_SFP_DISPLAY_PORT
+#endif /* < 2.6.33 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
+#ifndef pci_num_vf
+#define pci_num_vf(pdev) _kc_pci_num_vf(pdev)
+extern int _kc_pci_num_vf(struct pci_dev *dev);
+#endif
+#endif /* RHEL_RELEASE_CODE */
+
+#ifndef ETH_FLAG_NTUPLE
+#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE
+#endif
+
+#ifndef netdev_mc_count
+#define netdev_mc_count(dev) ((dev)->mc_count)
+#endif
+#ifndef netdev_mc_empty
+#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0)
+#endif
+#ifndef netdev_for_each_mc_addr
+#define netdev_for_each_mc_addr(mclist, dev) \
+       for (mclist = dev->mc_list; mclist; mclist = mclist->next)
+#endif
+#ifndef netdev_uc_count
+#define netdev_uc_count(dev) ((dev)->uc.count)
+#endif
+#ifndef netdev_uc_empty
+#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0)
+#endif
+#ifndef netdev_for_each_uc_addr
+#define netdev_for_each_uc_addr(ha, dev) \
+       list_for_each_entry(ha, &dev->uc.list, list)
+#endif
+#ifndef dma_set_coherent_mask
+#define dma_set_coherent_mask(dev,mask) \
+       pci_set_consistent_dma_mask(to_pci_dev(dev),(mask))
+#endif
+#ifndef pci_dev_run_wake
+#define pci_dev_run_wake(pdev) (0)
+#endif
+
+/* netdev logging taken from include/linux/netdevice.h */
+#ifndef netdev_name
+static inline const char *_kc_netdev_name(const struct net_device *dev)
+{
+       if (dev->reg_state != NETREG_REGISTERED)
+               return "(unregistered net_device)";
+       return dev->name;
+}
+#define netdev_name(netdev)    _kc_netdev_name(netdev)
+#endif /* netdev_name */
+
+#undef netdev_printk
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+#define netdev_printk(level, netdev, format, args...)          \
+do {                                                           \
+       struct adapter_struct *kc_adapter = netdev_priv(netdev);\
+       struct pci_dev *pdev = kc_adapter->pdev;                \
+       printk("%s %s: " format, level, pci_name(pdev),         \
+              ##args);                                         \
+} while(0)
+#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
+#define netdev_printk(level, netdev, format, args...)          \
+do {                                                           \
+       struct adapter_struct *kc_adapter = netdev_priv(netdev);\
+       struct pci_dev *pdev = kc_adapter->pdev;                \
+       struct device *dev = pci_dev_to_dev(pdev);              \
+       dev_printk(level, dev, "%s: " format,                   \
+                  netdev_name(netdev), ##args);                \
+} while(0)
+#else /* 2.6.21 => 2.6.34 */
+#define netdev_printk(level, netdev, format, args...)          \
+       dev_printk(level, (netdev)->dev.parent,                 \
+                  "%s: " format,                               \
+                  netdev_name(netdev), ##args)
+#endif /* <2.6.0 <2.6.21 <2.6.34 */
+#undef netdev_emerg
+#define netdev_emerg(dev, format, args...)                     \
+       netdev_printk(KERN_EMERG, dev, format, ##args)
+#undef netdev_alert
+#define netdev_alert(dev, format, args...)                     \
+       netdev_printk(KERN_ALERT, dev, format, ##args)
+#undef netdev_crit
+#define netdev_crit(dev, format, args...)                      \
+       netdev_printk(KERN_CRIT, dev, format, ##args)
+#undef netdev_err
+#define netdev_err(dev, format, args...)                       \
+       netdev_printk(KERN_ERR, dev, format, ##args)
+#undef netdev_warn
+#define netdev_warn(dev, format, args...)                      \
+       netdev_printk(KERN_WARNING, dev, format, ##args)
+#undef netdev_notice
+#define netdev_notice(dev, format, args...)                    \
+       netdev_printk(KERN_NOTICE, dev, format, ##args)
+#undef netdev_info
+#define netdev_info(dev, format, args...)                      \
+       netdev_printk(KERN_INFO, dev, format, ##args)
+#undef netdev_dbg
+#if defined(DEBUG)
+#define netdev_dbg(__dev, format, args...)                     \
+       netdev_printk(KERN_DEBUG, __dev, format, ##args)
+#elif defined(CONFIG_DYNAMIC_DEBUG)
+#define netdev_dbg(__dev, format, args...)                     \
+do {                                                           \
+       dynamic_dev_dbg((__dev)->dev.parent, "%s: " format,     \
+                       netdev_name(__dev), ##args);            \
+} while (0)
+#else /* DEBUG */
+#define netdev_dbg(__dev, format, args...)                     \
+({                                                             \
+       if (0)                                                  \
+               netdev_printk(KERN_DEBUG, __dev, format, ##args); \
+       0;                                                      \
+})
+#endif /* DEBUG */
+
+#undef netif_printk
+#define netif_printk(priv, type, level, dev, fmt, args...)     \
+do {                                                           \
+       if (netif_msg_##type(priv))                             \
+               netdev_printk(level, (dev), fmt, ##args);       \
+} while (0)
+
+#undef netif_emerg
+#define netif_emerg(priv, type, dev, fmt, args...)             \
+       netif_level(emerg, priv, type, dev, fmt, ##args)
+#undef netif_alert
+#define netif_alert(priv, type, dev, fmt, args...)             \
+       netif_level(alert, priv, type, dev, fmt, ##args)
+#undef netif_crit
+#define netif_crit(priv, type, dev, fmt, args...)              \
+       netif_level(crit, priv, type, dev, fmt, ##args)
+#undef netif_err
+#define netif_err(priv, type, dev, fmt, args...)               \
+       netif_level(err, priv, type, dev, fmt, ##args)
+#undef netif_warn
+#define netif_warn(priv, type, dev, fmt, args...)              \
+       netif_level(warn, priv, type, dev, fmt, ##args)
+#undef netif_notice
+#define netif_notice(priv, type, dev, fmt, args...)            \
+       netif_level(notice, priv, type, dev, fmt, ##args)
+#undef netif_info
+#define netif_info(priv, type, dev, fmt, args...)              \
+       netif_level(info, priv, type, dev, fmt, ##args)
+
+#ifdef SET_SYSTEM_SLEEP_PM_OPS
+#define HAVE_SYSTEM_SLEEP_PM_OPS
+#endif
+
+#ifndef for_each_set_bit
+#define for_each_set_bit(bit, addr, size) \
+       for ((bit) = find_first_bit((addr), (size)); \
+               (bit) < (size); \
+               (bit) = find_next_bit((addr), (size), (bit) + 1))
+#endif /* for_each_set_bit */
+
+#ifndef DEFINE_DMA_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN
+#define dma_unmap_addr pci_unmap_addr
+#define dma_unmap_addr_set pci_unmap_addr_set
+#define dma_unmap_len pci_unmap_len
+#define dma_unmap_len_set pci_unmap_len_set
+#endif /* DEFINE_DMA_UNMAP_ADDR */
+#else /* < 2.6.34 */
+#define HAVE_SYSTEM_SLEEP_PM_OPS
+#ifndef HAVE_SET_RX_MODE
+#define HAVE_SET_RX_MODE
+#endif
+
+#endif /* < 2.6.34 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
+#ifndef numa_node_id
+#define numa_node_id() 0
+#endif
+#ifdef HAVE_TX_MQ
+#include <net/sch_generic.h>
+#ifndef CONFIG_NETDEVICES_MULTIQUEUE
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
+void _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int);
+#define netif_set_real_num_tx_queues  _kc_netif_set_real_num_tx_queues
+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
+#else /* CONFIG_NETDEVICES_MULTI_QUEUE */
+#define netif_set_real_num_tx_queues(_netdev, _count) \
+       do { \
+               (_netdev)->egress_subqueue_count = _count; \
+       } while (0)
+#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */
+#else /* HAVE_TX_MQ */
+#define netif_set_real_num_tx_queues(_netdev, _count) do {} while(0)
+#endif /* HAVE_TX_MQ */
+#ifndef ETH_FLAG_RXHASH
+#define ETH_FLAG_RXHASH (1<<28)
+#endif /* ETH_FLAG_RXHASH */
+#else /* < 2.6.35 */
+#define HAVE_PM_QOS_REQUEST_LIST
+#define HAVE_IRQ_AFFINITY_HINT
+#endif /* < 2.6.35 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
+extern int _kc_ethtool_op_set_flags(struct net_device *, u32, u32);
+#define ethtool_op_set_flags _kc_ethtool_op_set_flags
+extern u32 _kc_ethtool_op_get_flags(struct net_device *);
+#define ethtool_op_get_flags _kc_ethtool_op_get_flags
+
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#ifdef NET_IP_ALIGN
+#undef NET_IP_ALIGN
+#endif
+#define NET_IP_ALIGN 0
+#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
+
+#ifdef NET_SKB_PAD
+#undef NET_SKB_PAD
+#endif
+
+#if (L1_CACHE_BYTES > 32)
+#define NET_SKB_PAD L1_CACHE_BYTES
+#else
+#define NET_SKB_PAD 32
+#endif
+
+static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev,
+                                                           unsigned int length)
+{
+       struct sk_buff *skb;
+
+       skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC);
+       if (skb) {
+#if (NET_IP_ALIGN + NET_SKB_PAD)
+               skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
+#endif
+               skb->dev = dev;
+       }
+       return skb;
+}
+
+#ifdef netdev_alloc_skb_ip_align
+#undef netdev_alloc_skb_ip_align
+#endif
+#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l)
+
+#undef netif_level
+#define netif_level(level, priv, type, dev, fmt, args...)      \
+do {                                                           \
+       if (netif_msg_##type(priv))                             \
+               netdev_##level(dev, fmt, ##args);               \
+} while (0)
+
+#undef usleep_range
+#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
+
+#else /* < 2.6.36 */
+#define HAVE_PM_QOS_REQUEST_ACTIVE
+#define HAVE_8021P_SUPPORT
+#define HAVE_NDO_GET_STATS64
+#endif /* < 2.6.36 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) )
+#ifndef netif_set_real_num_rx_queues
+static inline int __kc_netif_set_real_num_rx_queues(struct net_device *dev,
+                                                   unsigned int rxq)
+{
+       return 0;
+}
+#define netif_set_real_num_rx_queues(dev, rxq) \
+       __kc_netif_set_real_num_rx_queues((dev), (rxq))
+#endif
+#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR
+#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2)
+#endif
+#ifndef VLAN_N_VID
+#define VLAN_N_VID     VLAN_GROUP_ARRAY_LEN
+#endif /* VLAN_N_VID */
+#ifndef ETH_FLAG_TXVLAN
+#define ETH_FLAG_TXVLAN (1 << 7)
+#endif /* ETH_FLAG_TXVLAN */
+#ifndef ETH_FLAG_RXVLAN
+#define ETH_FLAG_RXVLAN (1 << 8)
+#endif /* ETH_FLAG_RXVLAN */
+
+static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb)
+{
+       WARN_ON(skb->ip_summed != CHECKSUM_NONE);
+}
+#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb)
+
+static inline void *_kc_vzalloc_node(unsigned long size, int node)
+{
+       void *addr = vmalloc_node(size, node);
+       if (addr)
+               memset(addr, 0, size);
+       return addr;
+}
+#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node)
+
+static inline void *_kc_vzalloc(unsigned long size)
+{
+       void *addr = vmalloc(size);
+       if (addr)
+               memset(addr, 0, size);
+       return addr;
+}
+#define vzalloc(_size) _kc_vzalloc(_size)
+
+#ifndef vlan_get_protocol
+static inline __be16 __kc_vlan_get_protocol(const struct sk_buff *skb)
+{
+       if (vlan_tx_tag_present(skb) ||
+           skb->protocol != cpu_to_be16(ETH_P_8021Q))
+               return skb->protocol;
+
+       if (skb_headlen(skb) < sizeof(struct vlan_ethhdr))
+               return 0;
+
+       return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto;
+}
+#define vlan_get_protocol(_skb) __kc_vlan_get_protocol(_skb)
+#endif
+#ifdef HAVE_HW_TIME_STAMP
+#define SKBTX_HW_TSTAMP (1 << 0)
+#define SKBTX_IN_PROGRESS (1 << 2)
+#define SKB_SHARED_TX_IS_UNION
+#endif
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) )
+#ifndef HAVE_VLAN_RX_REGISTER
+#define HAVE_VLAN_RX_REGISTER
+#endif
+#endif /* > 2.4.18 */
+#endif /* < 2.6.37 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) )
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
+#define skb_checksum_start_offset(skb) skb_transport_offset(skb)
+#else /* 2.6.22 -> 2.6.37 */
+static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb)
+{
+        return skb->csum_start - skb_headroom(skb);
+}
+#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb)
+#endif /* 2.6.22 -> 2.6.37 */
+#ifdef CONFIG_DCB
+#ifndef IEEE_8021QAZ_MAX_TCS
+#define IEEE_8021QAZ_MAX_TCS 8
+#endif
+#ifndef DCB_CAP_DCBX_HOST
+#define DCB_CAP_DCBX_HOST              0x01
+#endif
+#ifndef DCB_CAP_DCBX_LLD_MANAGED
+#define DCB_CAP_DCBX_LLD_MANAGED       0x02
+#endif
+#ifndef DCB_CAP_DCBX_VER_CEE
+#define DCB_CAP_DCBX_VER_CEE           0x04
+#endif
+#ifndef DCB_CAP_DCBX_VER_IEEE
+#define DCB_CAP_DCBX_VER_IEEE          0x08
+#endif
+#ifndef DCB_CAP_DCBX_STATIC
+#define DCB_CAP_DCBX_STATIC            0x10
+#endif
+#endif /* CONFIG_DCB */
+extern u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16);
+#define __skb_tx_hash(n, s, q) ___kc_skb_tx_hash((n), (s), (q))
+#else /* < 2.6.38 */
+#endif /* < 2.6.38 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
+#ifndef skb_queue_reverse_walk_safe
+#define skb_queue_reverse_walk_safe(queue, skb, tmp)                           \
+               for (skb = (queue)->prev, tmp = skb->prev;                      \
+                    skb != (struct sk_buff *)(queue);                          \
+                    skb = tmp, tmp = skb->prev)
+#endif
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
+extern u8 _kc_netdev_get_num_tc(struct net_device *dev);
+#define netdev_get_num_tc(dev) _kc_netdev_get_num_tc(dev)
+extern int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc);
+#define netdev_set_num_tc(dev, tc) _kc_netdev_set_num_tc((dev), (tc))
+#define netdev_reset_tc(dev) _kc_netdev_set_num_tc((dev), 0)
+#define netdev_set_tc_queue(dev, tc, cnt, off) do {} while (0)
+extern u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up);
+#define netdev_get_prio_tc_map(dev, up) _kc_netdev_get_prio_tc_map(dev, up)
+#define netdev_set_prio_tc_map(dev, up, tc) do {} while (0)
+#else /* RHEL6.1 or greater */
+#ifndef HAVE_MQPRIO
+#define HAVE_MQPRIO
+#endif /* HAVE_MQPRIO */
+#ifdef CONFIG_DCB
+#ifndef HAVE_DCBNL_IEEE
+#define HAVE_DCBNL_IEEE
+#ifndef IEEE_8021QAZ_TSA_STRICT
+#define IEEE_8021QAZ_TSA_STRICT                0
+#endif
+#ifndef IEEE_8021QAZ_TSA_ETS
+#define IEEE_8021QAZ_TSA_ETS           2
+#endif
+#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE
+#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1
+#endif
+#endif
+#endif /* CONFIG_DCB */
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
+#else /* < 2.6.39 */
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET
+#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+#ifndef HAVE_MQPRIO
+#define HAVE_MQPRIO
+#endif
+#ifndef HAVE_SETUP_TC
+#define HAVE_SETUP_TC
+#endif
+#ifdef CONFIG_DCB
+#ifndef HAVE_DCBNL_IEEE
+#define HAVE_DCBNL_IEEE
+#endif
+#endif /* CONFIG_DCB */
+#ifndef HAVE_NDO_SET_FEATURES
+#define HAVE_NDO_SET_FEATURES
+#endif
+#endif /* < 2.6.39 */
+
+/*****************************************************************************/
+/* use < 2.6.40 because of a Fedora 15 kernel update where they
+ * updated the kernel version to 2.6.40.x and they back-ported 3.0 features
+ * like set_phys_id for ethtool.
+ */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) )
+#ifdef ETHTOOL_GRXRINGS
+#ifndef FLOW_EXT
+#define        FLOW_EXT        0x80000000
+union _kc_ethtool_flow_union {
+       struct ethtool_tcpip4_spec              tcp_ip4_spec;
+       struct ethtool_usrip4_spec              usr_ip4_spec;
+       __u8                                    hdata[60];
+};
+struct _kc_ethtool_flow_ext {
+       __be16  vlan_etype;
+       __be16  vlan_tci;
+       __be32  data[2];
+};
+struct _kc_ethtool_rx_flow_spec {
+       __u32           flow_type;
+       union _kc_ethtool_flow_union h_u;
+       struct _kc_ethtool_flow_ext h_ext;
+       union _kc_ethtool_flow_union m_u;
+       struct _kc_ethtool_flow_ext m_ext;
+       __u64           ring_cookie;
+       __u32           location;
+};
+#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec
+#endif /* FLOW_EXT */
+#endif
+
+#define pci_disable_link_state_locked pci_disable_link_state
+
+#ifndef PCI_LTR_VALUE_MASK
+#define  PCI_LTR_VALUE_MASK    0x000003ff
+#endif
+#ifndef PCI_LTR_SCALE_MASK
+#define  PCI_LTR_SCALE_MASK    0x00001c00
+#endif
+#ifndef PCI_LTR_SCALE_SHIFT
+#define  PCI_LTR_SCALE_SHIFT   10
+#endif
+
+#else /* < 2.6.40 */
+#define HAVE_ETHTOOL_SET_PHYS_ID
+#endif /* < 2.6.40 */
+
+/*****************************************************************************/
+#undef CONFIG_IXGBE_PTP
+#ifdef IXGBE_PTP
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) && (defined(CONFIG_PTP_1588_CLOCK) || defined(CONFIG_PTP_1588_CLOCK_MODULE))
+#define CONFIG_IXGBE_PTP
+#else
+#error Cannot enable PTP Hardware Clock due to insufficient kernel support
+#endif
+#endif /* IXGBE_PTP */
+
+/*****************************************************************************/
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
+#ifndef __netdev_alloc_skb_ip_align
+#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l)
+#endif /* __netdev_alloc_skb_ip_align */
+#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app)
+#define dcb_ieee_delapp(dev, app) 0
+#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority)
+#else /* < 3.1.0 */
+#ifndef HAVE_DCBNL_IEEE_DELAPP
+#define HAVE_DCBNL_IEEE_DELAPP
+#endif
+#endif /* < 3.1.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) )
+#ifdef ETHTOOL_GRXRINGS
+#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
+#endif /* ETHTOOL_GRXRINGS */
+
+#ifndef skb_frag_size
+#define skb_frag_size(frag)    _kc_skb_frag_size(frag)
+static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag)
+{
+       return frag->size;
+}
+#endif /* skb_frag_size */
+
+#ifndef skb_frag_size_sub
+#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta)
+static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta)
+{
+       frag->size -= delta;
+}
+#endif /* skb_frag_size_sub */
+
+#ifndef skb_frag_page
+#define skb_frag_page(frag)    _kc_skb_frag_page(frag)
+static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag)
+{
+       return frag->page;
+}
+#endif /* skb_frag_page */
+
+#ifndef skb_frag_address
+#define skb_frag_address(frag) _kc_skb_frag_address(frag)
+static inline void *_kc_skb_frag_address(const skb_frag_t *frag)
+{
+       return page_address(skb_frag_page(frag)) + frag->page_offset;
+}
+#endif /* skb_frag_address */
+
+#ifndef skb_frag_dma_map
+#define skb_frag_dma_map(dev,frag,offset,size,dir) \
+               _kc_skb_frag_dma_map(dev,frag,offset,size,dir)
+static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev,
+                                             const skb_frag_t *frag,
+                                             size_t offset, size_t size,
+                                             enum dma_data_direction dir)
+{
+       return dma_map_page(dev, skb_frag_page(frag),
+                           frag->page_offset + offset, size, dir);
+}
+#endif /* skb_frag_dma_map */
+
+#ifndef __skb_frag_unref
+#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag)
+static inline void __kc_skb_frag_unref(skb_frag_t *frag)
+{
+       put_page(skb_frag_page(frag));
+}
+#endif /* __skb_frag_unref */
+
+#ifndef SPEED_UNKNOWN
+#define SPEED_UNKNOWN  -1
+#endif
+#ifndef DUPLEX_UNKNOWN
+#define DUPLEX_UNKNOWN 0xff
+#endif
+#else /* < 3.2.0 */
+#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
+#define HAVE_PCI_DEV_FLAGS_ASSIGNED
+#define HAVE_VF_SPOOFCHK_CONFIGURE
+#endif
+#endif /* < 3.2.0 */
+
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2))
+#undef ixgbe_get_netdev_tc_txq
+#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc])
+#endif
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) )
+//typedef u32 netdev_features_t;
+#else /* ! < 3.3.0 */
+#define HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef ETHTOOL_SRXNTUPLE
+#undef ETHTOOL_SRXNTUPLE
+#endif
+#endif /* < 3.3.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
+#ifndef NETIF_F_RXFCS
+#define NETIF_F_RXFCS  0
+#endif /* NETIF_F_RXFCS */
+#ifndef NETIF_F_RXALL
+#define NETIF_F_RXALL  0
+#endif /* NETIF_F_RXALL */
+
+#define NUMTCS_RETURNS_U8
+
+
+#endif /* < 3.4.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) )
+#define skb_tx_timestamp(skb) do {} while (0)
+#else
+#define HAVE_FDB_OPS
+#define HAVE_ETHTOOL_GET_TS_INFO
+#endif /* < 3.5.0 */
+#endif /* _KCOMPAT_H_ */
diff --git a/drivers/net/ixgbe/kcompat_ethtool.c b/drivers/net/ixgbe/kcompat_ethtool.c
new file mode 100644 (file)
index 0000000..f32d0dc
--- /dev/null
@@ -0,0 +1,1172 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2012 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/*
+ * net/core/ethtool.c - Ethtool ioctl handler
+ * Copyright (c) 2003 Matthew Wilcox <matthew@wil.cx>
+ *
+ * This file is where we call all the ethtool_ops commands to get
+ * the information ethtool needs.  We fall back to calling do_ioctl()
+ * for drivers which haven't been converted to ethtool_ops yet.
+ *
+ * It's GPL, stupid.
+ *
+ * Modification by sfeldma@pobox.com to work as backward compat
+ * solution for pre-ethtool_ops kernels.
+ *     - copied struct ethtool_ops from ethtool.h
+ *     - defined SET_ETHTOOL_OPS
+ *     - put in some #ifndef NETIF_F_xxx wrappers
+ *     - changes refs to dev->ethtool_ops to ethtool_ops
+ *     - changed dev_ethtool to ethtool_ioctl
+ *      - remove EXPORT_SYMBOL()s
+ *      - added _kc_ prefix in built-in ethtool_op_xxx ops.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <asm/uaccess.h>
+
+#include "kcompat.h"
+
+#undef SUPPORTED_10000baseT_Full
+#define SUPPORTED_10000baseT_Full      (1 << 12)
+#undef ADVERTISED_10000baseT_Full
+#define ADVERTISED_10000baseT_Full     (1 << 12)
+#undef SPEED_10000
+#define SPEED_10000            10000
+
+#undef ethtool_ops
+#define ethtool_ops _kc_ethtool_ops
+
+struct _kc_ethtool_ops {
+       int  (*get_settings)(struct net_device *, struct ethtool_cmd *);
+       int  (*set_settings)(struct net_device *, struct ethtool_cmd *);
+       void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
+       int  (*get_regs_len)(struct net_device *);
+       void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
+       void (*get_wol)(struct net_device *, struct ethtool_wolinfo *);
+       int  (*set_wol)(struct net_device *, struct ethtool_wolinfo *);
+       u32  (*get_msglevel)(struct net_device *);
+       void (*set_msglevel)(struct net_device *, u32);
+       int  (*nway_reset)(struct net_device *);
+       u32  (*get_link)(struct net_device *);
+       int  (*get_eeprom_len)(struct net_device *);
+       int  (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
+       int  (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
+       int  (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);
+       int  (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);
+       void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *);
+       int  (*set_ringparam)(struct net_device *, struct ethtool_ringparam *);
+       void (*get_pauseparam)(struct net_device *,
+                              struct ethtool_pauseparam*);
+       int  (*set_pauseparam)(struct net_device *,
+                              struct ethtool_pauseparam*);
+       u32  (*get_rx_csum)(struct net_device *);
+       int  (*set_rx_csum)(struct net_device *, u32);
+       u32  (*get_tx_csum)(struct net_device *);
+       int  (*set_tx_csum)(struct net_device *, u32);
+       u32  (*get_sg)(struct net_device *);
+       int  (*set_sg)(struct net_device *, u32);
+       u32  (*get_tso)(struct net_device *);
+       int  (*set_tso)(struct net_device *, u32);
+       int  (*self_test_count)(struct net_device *);
+       void (*self_test)(struct net_device *, struct ethtool_test *, u64 *);
+       void (*get_strings)(struct net_device *, u32 stringset, u8 *);
+       int  (*phys_id)(struct net_device *, u32);
+       int  (*get_stats_count)(struct net_device *);
+       void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *,
+                                 u64 *);
+} *ethtool_ops = NULL;
+
+#undef SET_ETHTOOL_OPS
+#define SET_ETHTOOL_OPS(netdev, ops) (ethtool_ops = (ops))
+
+/*
+ * Some useful ethtool_ops methods that are device independent. If we find that
+ * all drivers want to do the same thing here, we can turn these into dev_()
+ * function calls.
+ */
+
+#undef ethtool_op_get_link
+#define ethtool_op_get_link _kc_ethtool_op_get_link
+u32 _kc_ethtool_op_get_link(struct net_device *dev)
+{
+       return netif_carrier_ok(dev) ? 1 : 0;
+}
+
+#undef ethtool_op_get_tx_csum
+#define ethtool_op_get_tx_csum _kc_ethtool_op_get_tx_csum
+u32 _kc_ethtool_op_get_tx_csum(struct net_device *dev)
+{
+#ifdef NETIF_F_IP_CSUM
+       return (dev->features & NETIF_F_IP_CSUM) != 0;
+#else
+       return 0;
+#endif
+}
+
+#undef ethtool_op_set_tx_csum
+#define ethtool_op_set_tx_csum _kc_ethtool_op_set_tx_csum
+int _kc_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
+{
+#ifdef NETIF_F_IP_CSUM
+       if (data)
+#ifdef NETIF_F_IPV6_CSUM
+               dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+       else
+               dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+#else
+               dev->features |= NETIF_F_IP_CSUM;
+       else
+               dev->features &= ~NETIF_F_IP_CSUM;
+#endif
+#endif
+
+       return 0;
+}
+
+#undef ethtool_op_get_sg
+#define ethtool_op_get_sg _kc_ethtool_op_get_sg
+u32 _kc_ethtool_op_get_sg(struct net_device *dev)
+{
+#ifdef NETIF_F_SG
+       return (dev->features & NETIF_F_SG) != 0;
+#else
+       return 0;
+#endif
+}
+
+#undef ethtool_op_set_sg
+#define ethtool_op_set_sg _kc_ethtool_op_set_sg
+int _kc_ethtool_op_set_sg(struct net_device *dev, u32 data)
+{
+#ifdef NETIF_F_SG
+       if (data)
+               dev->features |= NETIF_F_SG;
+       else
+               dev->features &= ~NETIF_F_SG;
+#endif
+
+       return 0;
+}
+
+#undef ethtool_op_get_tso
+#define ethtool_op_get_tso _kc_ethtool_op_get_tso
+u32 _kc_ethtool_op_get_tso(struct net_device *dev)
+{
+#ifdef NETIF_F_TSO
+       return (dev->features & NETIF_F_TSO) != 0;
+#else
+       return 0;
+#endif
+}
+
+#undef ethtool_op_set_tso
+#define ethtool_op_set_tso _kc_ethtool_op_set_tso
+int _kc_ethtool_op_set_tso(struct net_device *dev, u32 data)
+{
+#ifdef NETIF_F_TSO
+       if (data)
+               dev->features |= NETIF_F_TSO;
+       else
+               dev->features &= ~NETIF_F_TSO;
+#endif
+
+       return 0;
+}
+
+/* Handlers for each ethtool command */
+
+static int ethtool_get_settings(struct net_device *dev, void *useraddr)
+{
+       struct ethtool_cmd cmd = { ETHTOOL_GSET };
+       int err;
+
+       if (!ethtool_ops->get_settings)
+               return -EOPNOTSUPP;
+
+       err = ethtool_ops->get_settings(dev, &cmd);
+       if (err < 0)
+               return err;
+
+       if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
+               return -EFAULT;
+       return 0;
+}
+
+static int ethtool_set_settings(struct net_device *dev, void *useraddr)
+{
+       struct ethtool_cmd cmd;
+
+       if (!ethtool_ops->set_settings)
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
+               return -EFAULT;
+
+       return ethtool_ops->set_settings(dev, &cmd);
+}
+
+static int ethtool_get_drvinfo(struct net_device *dev, void *useraddr)
+{
+       struct ethtool_drvinfo info;
+       struct ethtool_ops *ops = ethtool_ops;
+
+       if (!ops->get_drvinfo)
+               return -EOPNOTSUPP;
+
+       memset(&info, 0, sizeof(info));
+       info.cmd = ETHTOOL_GDRVINFO;
+       ops->get_drvinfo(dev, &info);
+
+       if (ops->self_test_count)
+               info.testinfo_len = ops->self_test_count(dev);
+       if (ops->get_stats_count)
+               info.n_stats = ops->get_stats_count(dev);
+       if (ops->get_regs_len)
+               info.regdump_len = ops->get_regs_len(dev);
+       if (ops->get_eeprom_len)
+               info.eedump_len = ops->get_eeprom_len(dev);
+
+       if (copy_to_user(useraddr, &info, sizeof(info)))
+               return -EFAULT;
+       return 0;
+}
+
+static int ethtool_get_regs(struct net_device *dev, char *useraddr)
+{
+       struct ethtool_regs regs;
+       struct ethtool_ops *ops = ethtool_ops;
+       void *regbuf;
+       int reglen, ret;
+
+       if (!ops->get_regs || !ops->get_regs_len)
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&regs, useraddr, sizeof(regs)))
+               return -EFAULT;
+
+       reglen = ops->get_regs_len(dev);
+       if (regs.len > reglen)
+               regs.len = reglen;
+
+       regbuf = kmalloc(reglen, GFP_USER);
+       if (!regbuf)
+               return -ENOMEM;
+
+       ops->get_regs(dev, &regs, regbuf);
+
+       ret = -EFAULT;
+       if (copy_to_user(useraddr, &regs, sizeof(regs)))
+               goto out;
+       useraddr += offsetof(struct ethtool_regs, data);
+       if (copy_to_user(useraddr, regbuf, reglen))
+               goto out;
+       ret = 0;
+
+out:
+       kfree(regbuf);
+       return ret;
+}
+
+static int ethtool_get_wol(struct net_device *dev, char *useraddr)
+{
+       struct ethtool_wolinfo wol = { ETHTOOL_GWOL };
+
+       if (!ethtool_ops->get_wol)
+               return -EOPNOTSUPP;
+
+       ethtool_ops->get_wol(dev, &wol);
+
+       if (copy_to_user(useraddr, &wol, sizeof(wol)))
+               return -EFAULT;
+       return 0;
+}
+
+static int ethtool_set_wol(struct net_device *dev, char *useraddr)
+{
+       struct ethtool_wolinfo wol;
+
+       if (!ethtool_ops->set_wol)
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&wol, useraddr, sizeof(wol)))
+               return -EFAULT;
+
+       return ethtool_ops->set_wol(dev, &wol);
+}
+
+static int ethtool_get_msglevel(struct net_device *dev, char *useraddr)
+{
+       struct ethtool_value edata = { ETHTOOL_GMSGLVL };
+
+       if (!ethtool_ops->get_msglevel)
+               return -EOPNOTSUPP;
+
+       edata.data = ethtool_ops->get_msglevel(dev);
+
+       if (copy_to_user(useraddr, &edata, sizeof(edata)))
+               return -EFAULT;
+       return 0;
+}
+
+static int ethtool_set_msglevel(struct net_device *dev, char *useraddr)
+{
+       struct ethtool_value edata;
+
+       if (!ethtool_ops->set_msglevel)
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&edata, useraddr, sizeof(edata)))
+               return -EFAULT;
+
+       ethtool_ops->set_msglevel(dev, edata.data);
+       return 0;
+}
+
+static int ethtool_nway_reset(struct net_device *dev)
+{
+       if (!ethtool_ops->nway_reset)
+               return -EOPNOTSUPP;
+
+       return ethtool_ops->nway_reset(dev);
+}
+
+static int ethtool_get_link(struct net_device *dev, void *useraddr)
+{
+       struct ethtool_value edata = { ETHTOOL_GLINK };
+
+       if (!ethtool_ops->get_link)
+               return -EOPNOTSUPP;
+
+       edata.data = ethtool_ops->get_link(dev);
+
+       if (copy_to_user(useraddr, &edata, sizeof(edata)))
+               return -EFAULT;
+       return 0;
+}
+
+static int ethtool_get_eeprom(struct net_device *dev, void *useraddr)
+{
+       struct ethtool_eeprom eeprom;
+       struct ethtool_ops *ops = ethtool_ops;
+       u8 *data;
+       int ret;
+
+       if (!ops->get_eeprom || !ops->get_eeprom_len)
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
+               return -EFAULT;
+
+       /* Check for wrap and zero */
+       if (eeprom.offset + eeprom.len <= eeprom.offset)
+               return -EINVAL;
+
+       /* Check for exceeding total eeprom len */
+       if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
+               return -EINVAL;
+
+       data = kmalloc(eeprom.len, GFP_USER);
+       if (!data)
+               return -ENOMEM;
+
+       ret = -EFAULT;
+       if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len))
+               goto out;
+
+       ret = ops->get_eeprom(dev, &eeprom, data);
+       if (ret)
+               goto out;
+
+       ret = -EFAULT;
+       if (copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
+               goto out;
+       if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len))
+               goto out;
+       ret = 0;
+
+out:
+       kfree(data);
+       return ret;
+}
+
+static int ethtool_set_eeprom(struct net_device *dev, void *useraddr)
+{
+       struct ethtool_eeprom eeprom;
+       struct ethtool_ops *ops = ethtool_ops;
+       u8 *data;
+       int ret;
+
+       if (!ops->set_eeprom || !ops->get_eeprom_len)
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
+               return -EFAULT;
+
+       /* Check for wrap and zero */
+       if (eeprom.offset + eeprom.len <= eeprom.offset)
+               return -EINVAL;
+
+       /* Check for exceeding total eeprom len */
+       if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
+               return -EINVAL;
+
+       data = kmalloc(eeprom.len, GFP_USER);
+       if (!data)
+               return -ENOMEM;
+
+       ret = -EFAULT;
+       if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len))
+               goto out;
+
+       ret = ops->set_eeprom(dev, &eeprom, data);
+       if (ret)
+               goto out;
+
+       if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len))
+               ret = -EFAULT;
+
+out:
+       kfree(data);
+       return ret;
+}
+
+static int ethtool_get_coalesce(struct net_device *dev, void *useraddr)
+{
+       struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE };
+
+       if (!ethtool_ops->get_coalesce)
+               return -EOPNOTSUPP;
+
+       ethtool_ops->get_coalesce(dev, &coalesce);
+
+       if (copy_to_user(useraddr, &coalesce, sizeof(coalesce)))
+               return -EFAULT;
+       return 0;
+}
+
+static int ethtool_set_coalesce(struct net_device *dev, void *useraddr)
+{
+       struct ethtool_coalesce coalesce;
+
+       if (!ethtool_ops->get_coalesce)
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&coalesce, useraddr, sizeof(coalesce)))
+               return -EFAULT;
+
+       return ethtool_ops->set_coalesce(dev, &coalesce);
+}
+
+static int ethtool_get_ringparam(struct net_device *dev, void *useraddr)
+{
+       struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM };
+
+       if (!ethtool_ops->get_ringparam)
+               return -EOPNOTSUPP;
+
+       ethtool_ops->get_ringparam(dev, &ringparam);
+
+       if (copy_to_user(useraddr, &ringparam, sizeof(ringparam)))
+               return -EFAULT;
+       return 0;
+}
+
+static int ethtool_set_ringparam(struct net_device *dev, void *useraddr)
+{
+       struct ethtool_ringparam ringparam;
+
+       if (!ethtool_ops->get_ringparam)
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&ringparam, useraddr, sizeof(ringparam)))
+               return -EFAULT;
+
+       return ethtool_ops->set_ringparam(dev, &ringparam);
+}
+
+static int ethtool_get_pauseparam(struct net_device *dev, void *useraddr)
+{
+       struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM };
+
+       if (!ethtool_ops->get_pauseparam)
+               return -EOPNOTSUPP;
+
+       ethtool_ops->get_pauseparam(dev, &pauseparam);
+
+       if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam)))
+               return -EFAULT;
+       return 0;
+}
+
+static int ethtool_set_pauseparam(struct net_device *dev, void *useraddr)
+{
+       struct ethtool_pauseparam pauseparam;
+
+       if (!ethtool_ops->get_pauseparam)
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam)))
+               return -EFAULT;
+
+       return ethtool_ops->set_pauseparam(dev, &pauseparam);
+}
+
+static int ethtool_get_rx_csum(struct net_device *dev, char *useraddr)
+{
+       struct ethtool_value edata = { ETHTOOL_GRXCSUM };
+
+       if (!ethtool_ops->get_rx_csum)
+               return -EOPNOTSUPP;
+
+       edata.data = ethtool_ops->get_rx_csum(dev);
+
+       if (copy_to_user(useraddr, &edata, sizeof(edata)))
+               return -EFAULT;
+       return 0;
+}
+
+static int ethtool_set_rx_csum(struct net_device *dev, char *useraddr)
+{
+       struct ethtool_value edata;
+
+       if (!ethtool_ops->set_rx_csum)
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&edata, useraddr, sizeof(edata)))
+               return -EFAULT;
+
+       ethtool_ops->set_rx_csum(dev, edata.data);
+       return 0;
+}
+
+static int ethtool_get_tx_csum(struct net_device *dev, char *useraddr)
+{
+       struct ethtool_value edata = { ETHTOOL_GTXCSUM };
+
+       if (!ethtool_ops->get_tx_csum)
+               return -EOPNOTSUPP;
+
+       edata.data = ethtool_ops->get_tx_csum(dev);
+
+       if (copy_to_user(useraddr, &edata, sizeof(edata)))
+               return -EFAULT;
+       return 0;
+}
+
+static int ethtool_set_tx_csum(struct net_device *dev, char *useraddr)
+{
+       struct ethtool_value edata;
+
+       if (!ethtool_ops->set_tx_csum)
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&edata, useraddr, sizeof(edata)))
+               return -EFAULT;
+
+       return ethtool_ops->set_tx_csum(dev, edata.data);
+}
+
+static int ethtool_get_sg(struct net_device *dev, char *useraddr)
+{
+       struct ethtool_value edata = { ETHTOOL_GSG };
+
+       if (!ethtool_ops->get_sg)
+               return -EOPNOTSUPP;
+
+       edata.data = ethtool_ops->get_sg(dev);
+
+       if (copy_to_user(useraddr, &edata, sizeof(edata)))
+               return -EFAULT;
+       return 0;
+}
+
+static int ethtool_set_sg(struct net_device *dev, char *useraddr)
+{
+       struct ethtool_value edata;
+
+       if (!ethtool_ops->set_sg)
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&edata, useraddr, sizeof(edata)))
+               return -EFAULT;
+
+       return ethtool_ops->set_sg(dev, edata.data);
+}
+
+static int ethtool_get_tso(struct net_device *dev, char *useraddr)
+{
+       struct ethtool_value edata = { ETHTOOL_GTSO };
+
+       if (!ethtool_ops->get_tso)
+               return -EOPNOTSUPP;
+
+       edata.data = ethtool_ops->get_tso(dev);
+
+       if (copy_to_user(useraddr, &edata, sizeof(edata)))
+               return -EFAULT;
+       return 0;
+}
+
+static int ethtool_set_tso(struct net_device *dev, char *useraddr)
+{
+       struct ethtool_value edata;
+
+       if (!ethtool_ops->set_tso)
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&edata, useraddr, sizeof(edata)))
+               return -EFAULT;
+
+       return ethtool_ops->set_tso(dev, edata.data);
+}
+
+static int ethtool_self_test(struct net_device *dev, char *useraddr)
+{
+       struct ethtool_test test;
+       struct ethtool_ops *ops = ethtool_ops;
+       u64 *data;
+       int ret;
+
+       if (!ops->self_test || !ops->self_test_count)
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&test, useraddr, sizeof(test)))
+               return -EFAULT;
+
+       test.len = ops->self_test_count(dev);
+       data = kmalloc(test.len * sizeof(u64), GFP_USER);
+       if (!data)
+               return -ENOMEM;
+
+       ops->self_test(dev, &test, data);
+
+       ret = -EFAULT;
+       if (copy_to_user(useraddr, &test, sizeof(test)))
+               goto out;
+       useraddr += sizeof(test);
+       if (copy_to_user(useraddr, data, test.len * sizeof(u64)))
+               goto out;
+       ret = 0;
+
+out:
+       kfree(data);
+       return ret;
+}
+
+static int ethtool_get_strings(struct net_device *dev, void *useraddr)
+{
+       struct ethtool_gstrings gstrings;
+       struct ethtool_ops *ops = ethtool_ops;
+       u8 *data;
+       int ret;
+
+       if (!ops->get_strings)
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
+               return -EFAULT;
+
+       switch (gstrings.string_set) {
+       case ETH_SS_TEST:
+               if (!ops->self_test_count)
+                       return -EOPNOTSUPP;
+               gstrings.len = ops->self_test_count(dev);
+               break;
+       case ETH_SS_STATS:
+               if (!ops->get_stats_count)
+                       return -EOPNOTSUPP;
+               gstrings.len = ops->get_stats_count(dev);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
+       if (!data)
+               return -ENOMEM;
+
+       ops->get_strings(dev, gstrings.string_set, data);
+
+       ret = -EFAULT;
+       if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
+               goto out;
+       useraddr += sizeof(gstrings);
+       if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
+               goto out;
+       ret = 0;
+
+out:
+       kfree(data);
+       return ret;
+}
+
+static int ethtool_phys_id(struct net_device *dev, void *useraddr)
+{
+       struct ethtool_value id;
+
+       if (!ethtool_ops->phys_id)
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&id, useraddr, sizeof(id)))
+               return -EFAULT;
+
+       return ethtool_ops->phys_id(dev, id.data);
+}
+
+static int ethtool_get_stats(struct net_device *dev, void *useraddr)
+{
+       struct ethtool_stats stats;
+       struct ethtool_ops *ops = ethtool_ops;
+       u64 *data;
+       int ret;
+
+       if (!ops->get_ethtool_stats || !ops->get_stats_count)
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&stats, useraddr, sizeof(stats)))
+               return -EFAULT;
+
+       stats.n_stats = ops->get_stats_count(dev);
+       data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER);
+       if (!data)
+               return -ENOMEM;
+
+       ops->get_ethtool_stats(dev, &stats, data);
+
+       ret = -EFAULT;
+       if (copy_to_user(useraddr, &stats, sizeof(stats)))
+               goto out;
+       useraddr += sizeof(stats);
+       if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
+               goto out;
+       ret = 0;
+
+out:
+       kfree(data);
+       return ret;
+}
+
+/* The main entry point in this file.  Called from net/core/dev.c */
+
+#define ETHTOOL_OPS_COMPAT
+int ethtool_ioctl(struct ifreq *ifr)
+{
+       struct net_device *dev = __dev_get_by_name(ifr->ifr_name);
+       void *useraddr = (void *) ifr->ifr_data;
+       u32 ethcmd;
+
+       /*
+        * XXX: This can be pushed down into the ethtool_* handlers that
+        * need it.  Keep existing behavior for the moment.
+        */
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
+       if (!dev || !netif_device_present(dev))
+               return -ENODEV;
+
+       if (copy_from_user(&ethcmd, useraddr, sizeof (ethcmd)))
+               return -EFAULT;
+
+       switch (ethcmd) {
+       case ETHTOOL_GSET:
+               return ethtool_get_settings(dev, useraddr);
+       case ETHTOOL_SSET:
+               return ethtool_set_settings(dev, useraddr);
+       case ETHTOOL_GDRVINFO:
+               return ethtool_get_drvinfo(dev, useraddr);
+       case ETHTOOL_GREGS:
+               return ethtool_get_regs(dev, useraddr);
+       case ETHTOOL_GWOL:
+               return ethtool_get_wol(dev, useraddr);
+       case ETHTOOL_SWOL:
+               return ethtool_set_wol(dev, useraddr);
+       case ETHTOOL_GMSGLVL:
+               return ethtool_get_msglevel(dev, useraddr);
+       case ETHTOOL_SMSGLVL:
+               return ethtool_set_msglevel(dev, useraddr);
+       case ETHTOOL_NWAY_RST:
+               return ethtool_nway_reset(dev);
+       case ETHTOOL_GLINK:
+               return ethtool_get_link(dev, useraddr);
+       case ETHTOOL_GEEPROM:
+               return ethtool_get_eeprom(dev, useraddr);
+       case ETHTOOL_SEEPROM:
+               return ethtool_set_eeprom(dev, useraddr);
+       case ETHTOOL_GCOALESCE:
+               return ethtool_get_coalesce(dev, useraddr);
+       case ETHTOOL_SCOALESCE:
+               return ethtool_set_coalesce(dev, useraddr);
+       case ETHTOOL_GRINGPARAM:
+               return ethtool_get_ringparam(dev, useraddr);
+       case ETHTOOL_SRINGPARAM:
+               return ethtool_set_ringparam(dev, useraddr);
+       case ETHTOOL_GPAUSEPARAM:
+               return ethtool_get_pauseparam(dev, useraddr);
+       case ETHTOOL_SPAUSEPARAM:
+               return ethtool_set_pauseparam(dev, useraddr);
+       case ETHTOOL_GRXCSUM:
+               return ethtool_get_rx_csum(dev, useraddr);
+       case ETHTOOL_SRXCSUM:
+               return ethtool_set_rx_csum(dev, useraddr);
+       case ETHTOOL_GTXCSUM:
+               return ethtool_get_tx_csum(dev, useraddr);
+       case ETHTOOL_STXCSUM:
+               return ethtool_set_tx_csum(dev, useraddr);
+       case ETHTOOL_GSG:
+               return ethtool_get_sg(dev, useraddr);
+       case ETHTOOL_SSG:
+               return ethtool_set_sg(dev, useraddr);
+       case ETHTOOL_GTSO:
+               return ethtool_get_tso(dev, useraddr);
+       case ETHTOOL_STSO:
+               return ethtool_set_tso(dev, useraddr);
+       case ETHTOOL_TEST:
+               return ethtool_self_test(dev, useraddr);
+       case ETHTOOL_GSTRINGS:
+               return ethtool_get_strings(dev, useraddr);
+       case ETHTOOL_PHYS_ID:
+               return ethtool_phys_id(dev, useraddr);
+       case ETHTOOL_GSTATS:
+               return ethtool_get_stats(dev, useraddr);
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return -EOPNOTSUPP;
+}
+
+#define mii_if_info _kc_mii_if_info
+struct _kc_mii_if_info {
+       int phy_id;
+       int advertising;
+       int phy_id_mask;
+       int reg_num_mask;
+
+       unsigned int full_duplex : 1;   /* is full duplex? */
+       unsigned int force_media : 1;   /* is autoneg. disabled? */
+
+       struct net_device *dev;
+       int (*mdio_read) (struct net_device *dev, int phy_id, int location);
+       void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val);
+};
+
+struct ethtool_cmd;
+struct mii_ioctl_data;
+
+#undef mii_link_ok
+#define mii_link_ok _kc_mii_link_ok
+#undef mii_nway_restart
+#define mii_nway_restart _kc_mii_nway_restart
+#undef mii_ethtool_gset
+#define mii_ethtool_gset _kc_mii_ethtool_gset
+#undef mii_ethtool_sset
+#define mii_ethtool_sset _kc_mii_ethtool_sset
+#undef mii_check_link
+#define mii_check_link _kc_mii_check_link
+extern int _kc_mii_link_ok (struct mii_if_info *mii);
+extern int _kc_mii_nway_restart (struct mii_if_info *mii);
+extern int _kc_mii_ethtool_gset(struct mii_if_info *mii,
+                                struct ethtool_cmd *ecmd);
+extern int _kc_mii_ethtool_sset(struct mii_if_info *mii,
+                                struct ethtool_cmd *ecmd);
+extern void _kc_mii_check_link (struct mii_if_info *mii);
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) )
+#undef generic_mii_ioctl
+#define generic_mii_ioctl _kc_generic_mii_ioctl
+extern int _kc_generic_mii_ioctl(struct mii_if_info *mii_if,
+                                 struct mii_ioctl_data *mii_data, int cmd,
+                                 unsigned int *duplex_changed);
+#endif /* > 2.4.6 */
+
+
+struct _kc_pci_dev_ext {
+       struct pci_dev *dev;
+       void *pci_drvdata;
+       struct pci_driver *driver;
+};
+
+struct _kc_net_dev_ext {
+       struct net_device *dev;
+       unsigned int carrier;
+};
+
+
+/**************************************/
+/* mii support */
+
+int _kc_mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
+{
+       struct net_device *dev = mii->dev;
+       u32 advert, bmcr, lpa, nego;
+
+       ecmd->supported =
+           (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+            SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+            SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
+
+       /* only supports twisted-pair */
+       ecmd->port = PORT_MII;
+
+       /* only supports internal transceiver */
+       ecmd->transceiver = XCVR_INTERNAL;
+
+       /* this isn't fully supported at higher layers */
+       ecmd->phy_address = mii->phy_id;
+
+       ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
+       advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
+       if (advert & ADVERTISE_10HALF)
+               ecmd->advertising |= ADVERTISED_10baseT_Half;
+       if (advert & ADVERTISE_10FULL)
+               ecmd->advertising |= ADVERTISED_10baseT_Full;
+       if (advert & ADVERTISE_100HALF)
+               ecmd->advertising |= ADVERTISED_100baseT_Half;
+       if (advert & ADVERTISE_100FULL)
+               ecmd->advertising |= ADVERTISED_100baseT_Full;
+
+       bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
+       lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA);
+       if (bmcr & BMCR_ANENABLE) {
+               ecmd->advertising |= ADVERTISED_Autoneg;
+               ecmd->autoneg = AUTONEG_ENABLE;
+
+               nego = mii_nway_result(advert & lpa);
+               if (nego == LPA_100FULL || nego == LPA_100HALF)
+                       ecmd->speed = SPEED_100;
+               else
+                       ecmd->speed = SPEED_10;
+               if (nego == LPA_100FULL || nego == LPA_10FULL) {
+                       ecmd->duplex = DUPLEX_FULL;
+                       mii->full_duplex = 1;
+               } else {
+                       ecmd->duplex = DUPLEX_HALF;
+                       mii->full_duplex = 0;
+               }
+       } else {
+               ecmd->autoneg = AUTONEG_DISABLE;
+
+               ecmd->speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
+               ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+       }
+
+       /* ignore maxtxpkt, maxrxpkt for now */
+
+       return 0;
+}
+
+int _kc_mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
+{
+       struct net_device *dev = mii->dev;
+
+       if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
+               return -EINVAL;
+       if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
+               return -EINVAL;
+       if (ecmd->port != PORT_MII)
+               return -EINVAL;
+       if (ecmd->transceiver != XCVR_INTERNAL)
+               return -EINVAL;
+       if (ecmd->phy_address != mii->phy_id)
+               return -EINVAL;
+       if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
+               return -EINVAL;
+
+       /* ignore supported, maxtxpkt, maxrxpkt */
+
+       if (ecmd->autoneg == AUTONEG_ENABLE) {
+               u32 bmcr, advert, tmp;
+
+               if ((ecmd->advertising & (ADVERTISED_10baseT_Half |
+                                         ADVERTISED_10baseT_Full |
+                                         ADVERTISED_100baseT_Half |
+                                         ADVERTISED_100baseT_Full)) == 0)
+                       return -EINVAL;
+
+               /* advertise only what has been requested */
+               advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
+               tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+               if (ADVERTISED_10baseT_Half)
+                       tmp |= ADVERTISE_10HALF;
+               if (ADVERTISED_10baseT_Full)
+                       tmp |= ADVERTISE_10FULL;
+               if (ADVERTISED_100baseT_Half)
+                       tmp |= ADVERTISE_100HALF;
+               if (ADVERTISED_100baseT_Full)
+                       tmp |= ADVERTISE_100FULL;
+               if (advert != tmp) {
+                       mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp);
+                       mii->advertising = tmp;
+               }
+
+               /* turn on autonegotiation, and force a renegotiate */
+               bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
+               bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+               mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr);
+
+               mii->force_media = 0;
+       } else {
+               u32 bmcr, tmp;
+
+               /* turn off auto negotiation, set speed and duplexity */
+               bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
+               tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
+               if (ecmd->speed == SPEED_100)
+                       tmp |= BMCR_SPEED100;
+               if (ecmd->duplex == DUPLEX_FULL) {
+                       tmp |= BMCR_FULLDPLX;
+                       mii->full_duplex = 1;
+               } else
+                       mii->full_duplex = 0;
+               if (bmcr != tmp)
+                       mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp);
+
+               mii->force_media = 1;
+       }
+       return 0;
+}
+
+int _kc_mii_link_ok (struct mii_if_info *mii)
+{
+       /* first, a dummy read, needed to latch some MII phys */
+       mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
+       if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS)
+               return 1;
+       return 0;
+}
+
+int _kc_mii_nway_restart (struct mii_if_info *mii)
+{
+       int bmcr;
+       int r = -EINVAL;
+
+       /* if autoneg is off, it's an error */
+       bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR);
+
+       if (bmcr & BMCR_ANENABLE) {
+               bmcr |= BMCR_ANRESTART;
+               mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr);
+               r = 0;
+       }
+
+       return r;
+}
+
+void _kc_mii_check_link (struct mii_if_info *mii)
+{
+       int cur_link = mii_link_ok(mii);
+       int prev_link = netif_carrier_ok(mii->dev);
+
+       if (cur_link && !prev_link)
+               netif_carrier_on(mii->dev);
+       else if (prev_link && !cur_link)
+               netif_carrier_off(mii->dev);
+}
+
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) )
+int _kc_generic_mii_ioctl(struct mii_if_info *mii_if,
+                          struct mii_ioctl_data *mii_data, int cmd,
+                          unsigned int *duplex_chg_out)
+{
+       int rc = 0;
+       unsigned int duplex_changed = 0;
+
+       if (duplex_chg_out)
+               *duplex_chg_out = 0;
+
+       mii_data->phy_id &= mii_if->phy_id_mask;
+       mii_data->reg_num &= mii_if->reg_num_mask;
+
+       switch(cmd) {
+       case SIOCDEVPRIVATE:    /* binary compat, remove in 2.5 */
+       case SIOCGMIIPHY:
+               mii_data->phy_id = mii_if->phy_id;
+               /* fall through */
+
+       case SIOCDEVPRIVATE + 1:/* binary compat, remove in 2.5 */
+       case SIOCGMIIREG:
+               mii_data->val_out =
+                       mii_if->mdio_read(mii_if->dev, mii_data->phy_id,
+                                         mii_data->reg_num);
+               break;
+
+       case SIOCDEVPRIVATE + 2:/* binary compat, remove in 2.5 */
+       case SIOCSMIIREG: {
+               u16 val = mii_data->val_in;
+
+               if (!capable(CAP_NET_ADMIN))
+                       return -EPERM;
+
+               if (mii_data->phy_id == mii_if->phy_id) {
+                       switch(mii_data->reg_num) {
+                       case MII_BMCR: {
+                               unsigned int new_duplex = 0;
+                               if (val & (BMCR_RESET|BMCR_ANENABLE))
+                                       mii_if->force_media = 0;
+                               else
+                                       mii_if->force_media = 1;
+                               if (mii_if->force_media &&
+                                   (val & BMCR_FULLDPLX))
+                                       new_duplex = 1;
+                               if (mii_if->full_duplex != new_duplex) {
+                                       duplex_changed = 1;
+                                       mii_if->full_duplex = new_duplex;
+                               }
+                               break;
+                       }
+                       case MII_ADVERTISE:
+                               mii_if->advertising = val;
+                               break;
+                       default:
+                               /* do nothing */
+                               break;
+                       }
+               }
+
+               mii_if->mdio_write(mii_if->dev, mii_data->phy_id,
+                                  mii_data->reg_num, val);
+               break;
+       }
+
+       default:
+               rc = -EOPNOTSUPP;
+               break;
+       }
+
+       if ((rc == 0) && (duplex_chg_out) && (duplex_changed))
+               *duplex_chg_out = 1;
+
+       return rc;
+}
+#endif /* > 2.4.6 */
+