]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
[net] ixgbe-3.11.33 driver update
authorMaxim Uvarov <maxim.uvarov@oracle.com>
Tue, 16 Oct 2012 15:36:30 +0000 (08:36 -0700)
committerMaxim Uvarov <maxim.uvarov@oracle.com>
Tue, 16 Oct 2012 16:17:25 +0000 (09:17 -0700)
Changes in this release:
- Various bug fixes
- Fix for VLAN on resent (since 3.6.38) kernels
- Performance imporements - limited Jumbo frames w/ SR-IOV
- PTP support - support for 8 TC's w/ 15 or fewer VF's
- Limit Jumbo to 9.5k to avoid Tx Hang
- support for RHEL6.3
- SR-IOV & DCB coexist on 82599
- Support for new adapters
Signed-off-by: Maxim Uvarov <maxim.uvarov@oracle.com>
20 files changed:
drivers/net/ixgbe/ixgbe.h
drivers/net/ixgbe/ixgbe_82599.c
drivers/net/ixgbe/ixgbe_api.c
drivers/net/ixgbe/ixgbe_api.h
drivers/net/ixgbe/ixgbe_common.c
drivers/net/ixgbe/ixgbe_common.h
drivers/net/ixgbe/ixgbe_ethtool.c
drivers/net/ixgbe/ixgbe_fcoe.c
drivers/net/ixgbe/ixgbe_lib.c
drivers/net/ixgbe/ixgbe_main.c
drivers/net/ixgbe/ixgbe_mbx.h
drivers/net/ixgbe/ixgbe_param.c
drivers/net/ixgbe/ixgbe_procfs.c
drivers/net/ixgbe/ixgbe_ptp.c
drivers/net/ixgbe/ixgbe_sriov.c
drivers/net/ixgbe/ixgbe_sriov.h
drivers/net/ixgbe/ixgbe_sysfs.c
drivers/net/ixgbe/ixgbe_type.h
drivers/net/ixgbe/ixgbe_x540.c
drivers/net/ixgbe/kcompat.h

index d5c26019927d299374f8215330d7e3e8d644d661..da20a287d5f488aef3b46340e6ae432d92c36d2d 100644 (file)
@@ -34,9 +34,6 @@
 
 #include <linux/pci.h>
 #include <linux/netdevice.h>
-#ifdef HAVE_IRQ_AFFINITY_HINT
-#include <linux/cpumask.h>
-#endif /* HAVE_IRQ_AFFINITY_HINT */
 #include <linux/vmalloc.h>
 
 #ifdef SIOCETHTOOL
 #include <linux/sctp.h>
 #endif
 
+#ifdef HAVE_INCLUDE_LINUX_MDIO_H
+#include <linux/mdio.h>
+#endif
+
 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
 #define IXGBE_FCOE
 #include "ixgbe_fcoe.h"
        printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
                __func__ , ## args)))
 
+#ifdef CONFIG_IXGBE_PTP
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#endif
 
 /* TX/RX descriptor defines */
 #define IXGBE_DEFAULT_TXD              512
 
 /* Supported Rx Buffer Sizes */
 #define IXGBE_RXBUFFER_256       256  /* Used for skb receive header */
-#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
-#define IXGBE_RXBUFFER_1536    1536
 #define IXGBE_RXBUFFER_2K      2048
 #define IXGBE_RXBUFFER_3K      3072
 #define IXGBE_RXBUFFER_4K      4096
+#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+#define IXGBE_RXBUFFER_1536    1536
 #define IXGBE_RXBUFFER_7K      7168
 #define IXGBE_RXBUFFER_8K      8192
 #define IXGBE_RXBUFFER_15K     15360
@@ -197,6 +203,7 @@ struct vf_data_storage {
        u16 tx_rate;
        u16 vlan_count;
        u8 spoofchk_enabled;
+       unsigned int vf_api;
 };
 
 struct vf_macvlans {
@@ -411,16 +418,29 @@ struct ixgbe_ring_feature {
  * this is twice the size of a half page we need to double the page order
  * for FCoE enabled Rx queues.
  */
-#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192)
-static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
+static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
 {
-       return test_bit(__IXGBE_RX_FCOE, &ring->state) ? 1 : 0;
-}
+#if MAX_SKB_FRAGS < 8
+       return ALIGN(IXGBE_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024);
 #else
-#define ixgbe_rx_pg_order(_ring) 0
+#ifdef IXGBE_FCOE
+       if (test_bit(__IXGBE_RX_FCOE, &ring->state))
+               return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K :
+                                           IXGBE_RXBUFFER_3K;
+#endif
+       return IXGBE_RXBUFFER_2K;
 #endif
+}
+
+static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
+{
+#ifdef IXGBE_FCOE
+       if (test_bit(__IXGBE_RX_FCOE, &ring->state))
+               return (PAGE_SIZE < 8192) ? 1 : 0;
+#endif
+       return 0;
+}
 #define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
-#define ixgbe_rx_bufsz(_ring) ((PAGE_SIZE / 2) << ixgbe_rx_pg_order(_ring))
 
 #endif
 struct ixgbe_ring_container {
@@ -503,7 +523,7 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
 #define IXGBE_TX_CTXTDESC(R, i)        \
        (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
 
-#define IXGBE_MAX_JUMBO_FRAME_SIZE     16128
+#define IXGBE_MAX_JUMBO_FRAME_SIZE     9728
 #ifdef IXGBE_FCOE
 /* use 3K as the baby jumbo frame size for FCoE */
 #define IXGBE_FCOE_JUMBO_FRAME_SIZE    3072
@@ -532,7 +552,6 @@ struct ixgbe_therm_proc_data {
 };
 
 #endif /* IXGBE_PROCFS */
-
 /*
  * Only for array allocations in our adapter struct.  On 82598, there will be
  * unused entries in the array, but that's not a big deal.  Also, in 82599,
@@ -618,7 +637,7 @@ struct ixgbe_adapter {
 #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT       (u32)(1 << 8)
 #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP         (u32)(1 << 9)
 #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP         (u32)(1 << 10)
-#define IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED      (u32)(1 << 11)
+#define IXGBE_FLAG2_PTP_ENABLED                 (u32)(1 << 11)
 #define IXGBE_FLAG2_PTP_PPS_ENABLED            (u32)(1 << 12)
 
        /* Tx fast path data */
@@ -661,7 +680,7 @@ struct ixgbe_adapter {
        u8 dcb_set_bitmap;
        u8 dcbx_cap;
 #ifndef HAVE_MQPRIO
-       u8 tc;
+       u8 dcb_tc;
 #endif
        enum ixgbe_fc_mode last_lfc_mode;
 
@@ -729,6 +748,17 @@ struct ixgbe_adapter {
        u32 led_reg;
 #endif
 
+#ifdef CONFIG_IXGBE_PTP
+       struct ptp_clock *ptp_clock;
+       struct ptp_clock_info ptp_caps;
+       unsigned long last_overflow_check;
+       spinlock_t tmreg_lock;
+       struct cyclecounter hw_cc;
+       struct timecounter hw_tc;
+       int rx_hwtstamp_filter;
+       u32 base_incval;
+#endif /* CONFIG_IXGBE_PTP */
+
        DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
        unsigned int num_vfs;
        struct vf_data_storage *vfinfo;
@@ -751,6 +781,8 @@ struct ixgbe_adapter {
        struct ixgbe_therm_proc_data therm_data[IXGBE_MAX_SENSORS];
 #endif /* IXGBE_PROCFS */
 #endif /* IXGBE_SYSFS */
+
+       u8 default_up;
 };
 
 struct ixgbe_fdir_filter {
@@ -921,6 +953,21 @@ extern int ixgbe_available_rars(struct ixgbe_adapter *adapter);
 extern void ixgbe_vlan_mode(struct net_device *, u32);
 #endif
 
+#ifdef CONFIG_IXGBE_PTP
+extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
+extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
+extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
+extern void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
+                                 struct sk_buff *skb);
+extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
+                                 union ixgbe_adv_rx_desc *rx_desc,
+                                 struct sk_buff *skb);
+extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
+                                   struct ifreq *ifr, int cmd);
+extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
+extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
+extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
+#endif /* CONFIG_IXGBE_PTP */
 
 extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
 #endif /* _IXGBE_H_ */
index 678aac465534b6a4b8670e13ee4663a93fd13ad5..3076c58b05a01a01c65ace5a547ef4809562b5e2 100644 (file)
@@ -124,9 +124,8 @@ init_phy_ops_out:
 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
 {
        s32 ret_val = 0;
-       u32 reg_anlp1 = 0;
-       u32 i = 0;
        u16 list_offset, data_offset, data_value;
+       bool got_lock = false;
 
        if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
                ixgbe_init_mac_link_ops_82599(hw);
@@ -158,28 +157,39 @@ s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
                /* Delay obtaining semaphore again to allow FW access */
                msleep(hw->eeprom.semaphore_delay);
 
-               /* Now restart DSP by setting Restart_AN and clearing LMS */
-               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
-                               IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
-                               IXGBE_AUTOC_AN_RESTART));
-
-               /* Wait for AN to leave state 0 */
-               for (i = 0; i < 10; i++) {
-                       msleep(4);
-                       reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
-                       if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
-                               break;
+               /* Need SW/FW semaphore around AUTOC writes if LESM on,
+                * likewise reset_pipeline requires lock as it also writes
+                * AUTOC.
+                */
+               if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+                       ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+                                                       IXGBE_GSSR_MAC_CSR_SM);
+                       if (ret_val != 0) {
+                               ret_val = IXGBE_ERR_SWFW_SYNC;
+                               goto setup_sfp_out;
+                       }
+
+                       got_lock = true;
                }
-               if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
+
+               /* Restart DSP and set SFI mode */
+               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
+                               IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL));
+
+               ret_val = ixgbe_reset_pipeline_82599(hw);
+
+               if (got_lock) {
+                       hw->mac.ops.release_swfw_sync(hw,
+                                                     IXGBE_GSSR_MAC_CSR_SM);
+                       got_lock = false;
+               }
+
+               if (ret_val) {
                        hw_dbg(hw, "sfp module setup not complete\n");
                        ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
                        goto setup_sfp_out;
                }
 
-               /* Restart DSP by setting Restart_AN and return to SFI mode */
-               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
-                               IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
-                               IXGBE_AUTOC_AN_RESTART));
        }
 
 setup_sfp_out:
@@ -443,14 +453,29 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
        u32 links_reg;
        u32 i;
        s32 status = 0;
+       bool got_lock = false;
+
+       /*  reset_pipeline requires us to hold this lock as it writes to
+        *  AUTOC.
+        */
+       if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+               status = hw->mac.ops.acquire_swfw_sync(hw,
+                                                      IXGBE_GSSR_MAC_CSR_SM);
+               if (status != 0)
+                       goto out;
+
+               got_lock = true;
+       }
 
        /* Restart link */
-       autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-       autoc_reg |= IXGBE_AUTOC_AN_RESTART;
-       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+       ixgbe_reset_pipeline_82599(hw);
+
+       if (got_lock)
+               hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
 
        /* Only poll for autoneg to complete if specified to do so */
        if (autoneg_wait_to_complete) {
+               autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
                if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
                     IXGBE_AUTOC_LMS_KX4_KX_KR ||
                    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
@@ -474,6 +499,7 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
        /* Add delay to filter out noises during initial link setup */
        msleep(50);
 
+out:
        return status;
 }
 
@@ -820,10 +846,11 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
        u32 links_reg;
        u32 i;
        ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+       bool got_lock = false;
 
        /* Check to see if speed passed in is supported. */
        status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
-       if (status != 0)
+       if (status)
                goto out;
 
        speed &= link_capabilities;
@@ -844,12 +871,13 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
            link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
                /* Set KX4/KX/KR support according to speed requested */
                autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
-               if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+               if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
                        if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
                                autoc |= IXGBE_AUTOC_KX4_SUPP;
                        if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
                            (hw->phy.smart_speed_active == false))
                                autoc |= IXGBE_AUTOC_KR_SUPP;
+               }
                if (speed & IXGBE_LINK_SPEED_1GB_FULL)
                        autoc |= IXGBE_AUTOC_KX_SUPP;
        } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
@@ -875,9 +903,30 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
        }
 
        if (autoc != start_autoc) {
+               /* Need SW/FW semaphore around AUTOC writes if LESM is on,
+                * likewise reset_pipeline requires us to hold this lock as
+                * it also writes to AUTOC.
+                */
+               if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+                       status = hw->mac.ops.acquire_swfw_sync(hw,
+                                                       IXGBE_GSSR_MAC_CSR_SM);
+                       if (status != 0) {
+                               status = IXGBE_ERR_SWFW_SYNC;
+                               goto out;
+                       }
+
+                       got_lock = true;
+               }
+
                /* Restart link */
-               autoc |= IXGBE_AUTOC_AN_RESTART;
                IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+               ixgbe_reset_pipeline_82599(hw);
+
+               if (got_lock) {
+                       hw->mac.ops.release_swfw_sync(hw,
+                                                     IXGBE_GSSR_MAC_CSR_SM);
+                       got_lock = false;
+               }
 
                /* Only poll for autoneg to complete if specified to do so */
                if (autoneg_wait_to_complete) {
@@ -1032,9 +1081,30 @@ mac_reset_top:
                hw->mac.orig_autoc2 = autoc2;
                hw->mac.orig_link_settings_stored = true;
        } else {
-               if (autoc != hw->mac.orig_autoc)
-                       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
-                                       IXGBE_AUTOC_AN_RESTART));
+               if (autoc != hw->mac.orig_autoc) {
+                       /* Need SW/FW semaphore around AUTOC writes if LESM is
+                        * on, likewise reset_pipeline requires us to hold
+                        * this lock as it also writes to AUTOC.
+                        */
+                       bool got_lock = false;
+                       if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+                               status = hw->mac.ops.acquire_swfw_sync(hw,
+                                                       IXGBE_GSSR_MAC_CSR_SM);
+                               if (status != 0) {
+                                       status = IXGBE_ERR_SWFW_SYNC;
+                                       goto reset_hw_out;
+                               }
+
+                               got_lock = true;
+                       }
+
+                       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
+                       ixgbe_reset_pipeline_82599(hw);
+
+                       if (got_lock)
+                               hw->mac.ops.release_swfw_sync(hw,
+                                                     IXGBE_GSSR_MAC_CSR_SM);
+               }
 
                if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
                    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
@@ -1137,7 +1207,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
                if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
                                   IXGBE_FDIRCTRL_INIT_DONE)
                        break;
-               udelay(10);
+               msleep(1);
        }
        if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
                hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
@@ -2037,7 +2107,7 @@ s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
  *  Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
  *  if the FW version is not supported.
  **/
-static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
+s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
 {
        s32 status = IXGBE_ERR_EEPROM_VERSION;
        u16 fw_offset, fw_ptp_cfg_offset;
@@ -2178,4 +2248,46 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
        return ret_val;
 }
 
+/**
+ * ixgbe_reset_pipeline_82599 - perform pipeline reset
+ *
+ *  @hw: pointer to hardware structure
+ *
+ * Reset pipeline by asserting Restart_AN together with LMS change to ensure
+ * full pipeline reset
+ **/
+s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
+{
+       s32 i, autoc_reg, ret_val;
+       s32 anlp1_reg = 0;
+
+       autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+       /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
+       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
+       /* Wait for AN to leave state 0 */
+       for (i = 0; i < 10; i++) {
+               msleep(4);
+               anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+               if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
+                       break;
+       }
+
+       if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
+               hw_dbg(hw, "auto negotiation not completed\n");
+               ret_val = IXGBE_ERR_RESET_FAILED;
+               goto reset_pipeline_out;
+       }
+
+       ret_val = 0;
+
+reset_pipeline_out:
+       /* Write AUTOC register with original LMS field and Restart_AN */
+       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return ret_val;
+}
+
+
 
index 4e3804ef966a7973a84260d7c9f60374dcd46f19..290fe5d20a25e8bd51256d2ece84d82924f2fb2f 100644 (file)
@@ -78,48 +78,44 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
 {
        s32 ret_val = 0;
 
-       if (hw->vendor_id == IXGBE_INTEL_VENDOR_ID) {
-               switch (hw->device_id) {
-               case IXGBE_DEV_ID_82598:
-               case IXGBE_DEV_ID_82598_BX:
-               case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
-               case IXGBE_DEV_ID_82598AF_DUAL_PORT:
-               case IXGBE_DEV_ID_82598AT:
-               case IXGBE_DEV_ID_82598AT2:
-               case IXGBE_DEV_ID_82598EB_CX4:
-               case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
-               case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
-               case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
-               case IXGBE_DEV_ID_82598EB_XF_LR:
-               case IXGBE_DEV_ID_82598EB_SFP_LOM:
-                       hw->mac.type = ixgbe_mac_82598EB;
-                       break;
-               case IXGBE_DEV_ID_82599_KX4:
-               case IXGBE_DEV_ID_82599_KX4_MEZZ:
-               case IXGBE_DEV_ID_82599_XAUI_LOM:
-               case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
-               case IXGBE_DEV_ID_82599_KR:
-               case IXGBE_DEV_ID_82599_SFP:
-               case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
-               case IXGBE_DEV_ID_82599_SFP_FCOE:
-               case IXGBE_DEV_ID_82599_SFP_EM:
-               case IXGBE_DEV_ID_82599_SFP_SF2:
-               case IXGBE_DEV_ID_82599_SFP_SF_QP:
-               case IXGBE_DEV_ID_82599EN_SFP:
-               case IXGBE_DEV_ID_82599_CX4:
-               case IXGBE_DEV_ID_82599_LS:
-               case IXGBE_DEV_ID_82599_T3_LOM:
-                       hw->mac.type = ixgbe_mac_82599EB;
-                       break;
-               case IXGBE_DEV_ID_X540T:
-                       hw->mac.type = ixgbe_mac_X540;
-                       break;
-               default:
-                       ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
-                       break;
-               }
-       } else {
+       switch (hw->device_id) {
+       case IXGBE_DEV_ID_82598:
+       case IXGBE_DEV_ID_82598_BX:
+       case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+       case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+       case IXGBE_DEV_ID_82598AT:
+       case IXGBE_DEV_ID_82598AT2:
+       case IXGBE_DEV_ID_82598EB_CX4:
+       case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+       case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+       case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+       case IXGBE_DEV_ID_82598EB_XF_LR:
+       case IXGBE_DEV_ID_82598EB_SFP_LOM:
+               hw->mac.type = ixgbe_mac_82598EB;
+               break;
+       case IXGBE_DEV_ID_82599_KX4:
+       case IXGBE_DEV_ID_82599_KX4_MEZZ:
+       case IXGBE_DEV_ID_82599_XAUI_LOM:
+       case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+       case IXGBE_DEV_ID_82599_KR:
+       case IXGBE_DEV_ID_82599_SFP:
+       case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
+       case IXGBE_DEV_ID_82599_SFP_FCOE:
+       case IXGBE_DEV_ID_82599_SFP_EM:
+       case IXGBE_DEV_ID_82599_SFP_SF2:
+       case IXGBE_DEV_ID_82599_SFP_SF_QP:
+       case IXGBE_DEV_ID_82599EN_SFP:
+       case IXGBE_DEV_ID_82599_CX4:
+       case IXGBE_DEV_ID_82599_LS:
+       case IXGBE_DEV_ID_82599_T3_LOM:
+               hw->mac.type = ixgbe_mac_82599EB;
+               break;
+       case IXGBE_DEV_ID_X540T:
+               hw->mac.type = ixgbe_mac_X540;
+               break;
+       default:
                ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+               break;
        }
 
        hw_dbg(hw, "ixgbe_set_mac_type found mac: %d, returns: %d\n",
index a6ab30d2912da0565ffcac76497446d0fe2d70e4..bdf34fb8228169b4f00d83167de5fcc3bb8c51ed 100644 (file)
@@ -151,6 +151,7 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
                                          union ixgbe_atr_input *mask);
 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
                                     union ixgbe_atr_hash_dword common);
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
 s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
                        u8 *data);
 s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
index 8646bbd49f217b1d87b9aea159402b90790e0673..f3298a990958cc0e20b16739477a8d4cf9863876 100644 (file)
@@ -162,6 +162,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
        s32 ret_val = 0;
        u32 reg = 0, reg_bp = 0;
        u16 reg_cu = 0;
+       bool got_lock = false;
 
        /*
         * Validate the requested mode.  Strict IEEE mode does not allow
@@ -283,7 +284,28 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
         */
        if (hw->phy.media_type == ixgbe_media_type_backplane) {
                reg_bp |= IXGBE_AUTOC_AN_RESTART;
+               /* Need the SW/FW semaphore around AUTOC writes if 82599 and
+                * LESM is on, likewise reset_pipeline requries the lock as
+                * it also writes AUTOC.
+                */
+               if ((hw->mac.type == ixgbe_mac_82599EB) &&
+                   ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+                       ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+                                                       IXGBE_GSSR_MAC_CSR_SM);
+                       if (ret_val != 0) {
+                               ret_val = IXGBE_ERR_SWFW_SYNC;
+                               goto out;
+                       }
+                       got_lock = true;
+               }
+
                IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
+               if (hw->mac.type == ixgbe_mac_82599EB)
+                       ixgbe_reset_pipeline_82599(hw);
+
+               if (got_lock)
+                       hw->mac.ops.release_swfw_sync(hw,
+                                                     IXGBE_GSSR_MAC_CSR_SM);
        } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
                    (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
                hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
@@ -2808,6 +2830,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
        bool link_up = 0;
        u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
        u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+       s32 ret_val = 0;
 
        /*
         * Link must be up to auto-blink the LEDs;
@@ -2816,10 +2839,29 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
        hw->mac.ops.check_link(hw, &speed, &link_up, false);
 
        if (!link_up) {
+               /* Need the SW/FW semaphore around AUTOC writes if 82599 and
+                * LESM is on.
+                */
+               bool got_lock = false;
+               if ((hw->mac.type == ixgbe_mac_82599EB) &&
+                   ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+                       ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+                                                       IXGBE_GSSR_MAC_CSR_SM);
+                       if (ret_val != 0) {
+                               ret_val = IXGBE_ERR_SWFW_SYNC;
+                               goto out;
+                       }
+                       got_lock = true;
+               }
+
                autoc_reg |= IXGBE_AUTOC_AN_RESTART;
                autoc_reg |= IXGBE_AUTOC_FLU;
                IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
                IXGBE_WRITE_FLUSH(hw);
+
+               if (got_lock)
+                       hw->mac.ops.release_swfw_sync(hw,
+                                                     IXGBE_GSSR_MAC_CSR_SM);
                msleep(10);
        }
 
@@ -2828,7 +2870,8 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
        IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
        IXGBE_WRITE_FLUSH(hw);
 
-       return 0;
+out:
+       return ret_val;
 }
 
 /**
@@ -2840,18 +2883,42 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
 {
        u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
        u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+       s32 ret_val = 0;
+       bool got_lock = false;
+
+       /* Need the SW/FW semaphore around AUTOC writes if 82599 and
+        * LESM is on.
+        */
+       if ((hw->mac.type == ixgbe_mac_82599EB) &&
+           ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+               ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+                                               IXGBE_GSSR_MAC_CSR_SM);
+               if (ret_val != 0) {
+                       ret_val = IXGBE_ERR_SWFW_SYNC;
+                       goto out;
+               }
+               got_lock = true;
+       }
+
 
        autoc_reg &= ~IXGBE_AUTOC_FLU;
        autoc_reg |= IXGBE_AUTOC_AN_RESTART;
        IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
 
+       if (hw->mac.type == ixgbe_mac_82599EB)
+               ixgbe_reset_pipeline_82599(hw);
+
+       if (got_lock)
+               hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+
        led_reg &= ~IXGBE_LED_MODE_MASK(index);
        led_reg &= ~IXGBE_LED_BLINK(index);
        led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
        IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
        IXGBE_WRITE_FLUSH(hw);
 
-       return 0;
+out:
+       return ret_val;
 }
 
 /**
@@ -4080,3 +4147,4 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
        return status;
 }
 
+
index 9bd6f534bfc2cc5079a69fbb1752c1334c6828be..e9c2a9a9146807cf0ce49e13296d135f1e213e15 100644 (file)
@@ -125,6 +125,8 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
                                 u8 build, u8 ver);
 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
 
+extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
+
 #define IXGBE_I2C_THERMAL_SENSOR_ADDR  0xF8
 #define IXGBE_EMC_INTERNAL_DATA                0x00
 #define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20
index 124ade95486457e4e5535075386a1ccd1ac96654..aaea34e5a53a25b295faf92e5870fcb7ecfe3de1 100644 (file)
@@ -175,100 +175,60 @@ int ixgbe_get_settings(struct net_device *netdev,
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
+       ixgbe_link_speed supported_link;
        u32 link_speed = 0;
+       bool autoneg;
        bool link_up;
 
-       ecmd->supported = SUPPORTED_10000baseT_Full;
-       ecmd->autoneg = AUTONEG_ENABLE;
-       ecmd->transceiver = XCVR_EXTERNAL;
-       if ((hw->phy.media_type == ixgbe_media_type_copper) ||
-           (hw->phy.multispeed_fiber)) {
-               ecmd->supported |= (SUPPORTED_1000baseT_Full |
-                                   SUPPORTED_Autoneg);
-               switch (hw->mac.type) {
-               case ixgbe_mac_X540:
-                       ecmd->supported |= SUPPORTED_100baseT_Full;
-                       break;
-               default:
-                       break;
-               }
-
-               ecmd->advertising = ADVERTISED_Autoneg;
-               if (hw->phy.autoneg_advertised) {
-                       if (hw->phy.autoneg_advertised &
-                           IXGBE_LINK_SPEED_100_FULL)
-                               ecmd->advertising |= ADVERTISED_100baseT_Full;
-                       if (hw->phy.autoneg_advertised &
-                           IXGBE_LINK_SPEED_10GB_FULL)
-                               ecmd->advertising |= ADVERTISED_10000baseT_Full;
-                       if (hw->phy.autoneg_advertised &
-                           IXGBE_LINK_SPEED_1GB_FULL)
-                               ecmd->advertising |= ADVERTISED_1000baseT_Full;
-               } else {
-                       /*
-                        * Default advertised modes in case
-                        * phy.autoneg_advertised isn't set.
-                        */
-                       ecmd->advertising |= (ADVERTISED_10000baseT_Full |
-                                             ADVERTISED_1000baseT_Full);
-                       if (hw->mac.type == ixgbe_mac_X540)
-                               ecmd->advertising |= ADVERTISED_100baseT_Full;
-               }
-
-               if (hw->phy.media_type == ixgbe_media_type_copper) {
-                       ecmd->supported |= SUPPORTED_TP;
-                       ecmd->advertising |= ADVERTISED_TP;
-                       ecmd->port = PORT_TP;
-               } else {
-                       ecmd->supported |= SUPPORTED_FIBRE;
-                       ecmd->advertising |= ADVERTISED_FIBRE;
-                       ecmd->port = PORT_FIBRE;
-               }
-       } else if (hw->phy.media_type == ixgbe_media_type_backplane) {
-               /* Set as FIBRE until SERDES defined in kernel */
-               if (hw->device_id == IXGBE_DEV_ID_82598_BX) {
-                       ecmd->supported = (SUPPORTED_1000baseT_Full |
-                                          SUPPORTED_FIBRE);
-                       ecmd->advertising = (ADVERTISED_1000baseT_Full |
-                                            ADVERTISED_FIBRE);
-                       ecmd->port = PORT_FIBRE;
-                       ecmd->autoneg = AUTONEG_DISABLE;
-               } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE)
-                         || (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
-                       ecmd->supported |= (SUPPORTED_1000baseT_Full |
-                                           SUPPORTED_Autoneg |
-                                           SUPPORTED_FIBRE);
-                       ecmd->advertising = (ADVERTISED_10000baseT_Full |
-                                            ADVERTISED_1000baseT_Full |
-                                            ADVERTISED_Autoneg |
-                                            ADVERTISED_FIBRE);
-                       ecmd->port = PORT_FIBRE;
-               } else {
-                       ecmd->supported |= (SUPPORTED_1000baseT_Full |
-                                           SUPPORTED_FIBRE);
-                       ecmd->advertising = (ADVERTISED_10000baseT_Full |
-                                            ADVERTISED_1000baseT_Full |
-                                            ADVERTISED_FIBRE);
-                       ecmd->port = PORT_FIBRE;
-               }
+       hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
+
+       /* set the supported link speeds */
+       if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
+               ecmd->supported |= SUPPORTED_10000baseT_Full;
+       if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
+               ecmd->supported |= SUPPORTED_1000baseT_Full;
+       if (supported_link & IXGBE_LINK_SPEED_100_FULL)
+               ecmd->supported |= SUPPORTED_100baseT_Full;
+
+       /* set the advertised speeds */
+       if (hw->phy.autoneg_advertised) {
+               if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
+                       ecmd->advertising |= ADVERTISED_100baseT_Full;
+               if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+                       ecmd->advertising |= ADVERTISED_10000baseT_Full;
+               if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+                       ecmd->advertising |= ADVERTISED_1000baseT_Full;
        } else {
-               ecmd->supported |= SUPPORTED_FIBRE;
-               ecmd->advertising = (ADVERTISED_10000baseT_Full |
-                                    ADVERTISED_FIBRE);
-               ecmd->port = PORT_FIBRE;
+               /* default modes in case phy.autoneg_advertised isn't set */
+               if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
+                       ecmd->advertising |= ADVERTISED_10000baseT_Full;
+               if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
+                       ecmd->advertising |= ADVERTISED_1000baseT_Full;
+               if (supported_link & IXGBE_LINK_SPEED_100_FULL)
+                       ecmd->advertising |= ADVERTISED_100baseT_Full;
+       }
+
+       if (autoneg) {
+               ecmd->supported |= SUPPORTED_Autoneg;
+               ecmd->advertising |= ADVERTISED_Autoneg;
+               ecmd->autoneg = AUTONEG_ENABLE;
+       } else
                ecmd->autoneg = AUTONEG_DISABLE;
-       }
 
-#ifdef HAVE_ETHTOOL_SFP_DISPLAY_PORT
-       /* Get PHY type */
+       ecmd->transceiver = XCVR_EXTERNAL;
+
+       /* Determine the remaining settings based on the PHY type. */
        switch (adapter->hw.phy.type) {
        case ixgbe_phy_tn:
        case ixgbe_phy_aq:
        case ixgbe_phy_cu_unknown:
-               /* Copper 10G-BASET */
+               ecmd->supported |= SUPPORTED_TP;
+               ecmd->advertising |= ADVERTISED_TP;
                ecmd->port = PORT_TP;
                break;
        case ixgbe_phy_qt:
+               ecmd->supported |= SUPPORTED_FIBRE;
+               ecmd->advertising |= ADVERTISED_FIBRE;
                ecmd->port = PORT_FIBRE;
                break;
        case ixgbe_phy_nl:
@@ -279,52 +239,61 @@ int ixgbe_get_settings(struct net_device *netdev,
        case ixgbe_phy_sfp_intel:
        case ixgbe_phy_sfp_unknown:
                switch (adapter->hw.phy.sfp_type) {
-               /* SFP+ devices, further checking needed */
+                       /* SFP+ devices, further checking needed */
                case ixgbe_sfp_type_da_cu:
                case ixgbe_sfp_type_da_cu_core0:
                case ixgbe_sfp_type_da_cu_core1:
+                       ecmd->supported |= SUPPORTED_FIBRE;
+                       ecmd->advertising |= ADVERTISED_FIBRE;
                        ecmd->port = PORT_DA;
                        break;
                case ixgbe_sfp_type_sr:
                case ixgbe_sfp_type_lr:
                case ixgbe_sfp_type_srlr_core0:
                case ixgbe_sfp_type_srlr_core1:
+                       ecmd->supported |= SUPPORTED_FIBRE;
+                       ecmd->advertising |= ADVERTISED_FIBRE;
                        ecmd->port = PORT_FIBRE;
                        break;
                case ixgbe_sfp_type_not_present:
+                       ecmd->supported |= SUPPORTED_FIBRE;
+                       ecmd->advertising |= ADVERTISED_FIBRE;
                        ecmd->port = PORT_NONE;
                        break;
                case ixgbe_sfp_type_1g_cu_core0:
                case ixgbe_sfp_type_1g_cu_core1:
+                       ecmd->supported |= SUPPORTED_TP;
+                       ecmd->advertising |= ADVERTISED_TP;
                        ecmd->port = PORT_TP;
-                       ecmd->supported = SUPPORTED_TP;
-                       ecmd->advertising = (ADVERTISED_1000baseT_Full |
-                               ADVERTISED_TP);
                        break;
                case ixgbe_sfp_type_1g_sx_core0:
                case ixgbe_sfp_type_1g_sx_core1:
+                       ecmd->supported |= SUPPORTED_FIBRE;
+                       ecmd->advertising |= ADVERTISED_FIBRE;
                        ecmd->port = PORT_FIBRE;
-                       ecmd->supported = SUPPORTED_FIBRE;
-                       ecmd->advertising = (ADVERTISED_1000baseT_Full |
-                               ADVERTISED_FIBRE);
                        break;
                case ixgbe_sfp_type_unknown:
                default:
+                       ecmd->supported |= SUPPORTED_FIBRE;
+                       ecmd->advertising |= ADVERTISED_FIBRE;
                        ecmd->port = PORT_OTHER;
                        break;
                }
                break;
        case ixgbe_phy_xaui:
+               ecmd->supported |= SUPPORTED_FIBRE;
+               ecmd->advertising |= ADVERTISED_FIBRE;
                ecmd->port = PORT_NONE;
                break;
        case ixgbe_phy_unknown:
        case ixgbe_phy_generic:
        case ixgbe_phy_sfp_unsupported:
        default:
+               ecmd->supported |= SUPPORTED_FIBRE;
+               ecmd->advertising |= ADVERTISED_FIBRE;
                ecmd->port = PORT_OTHER;
                break;
        }
-#endif
 
        if (!in_interrupt()) {
                hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
@@ -822,7 +791,7 @@ static int ixgbe_get_eeprom(struct net_device *netdev,
        if (!eeprom_buff)
                return -ENOMEM;
 
-       ret_val = ixgbe_read_eeprom_buffer(hw, first_word, eeprom_len,
+       ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
                                           eeprom_buff);
 
        /* Device's eeprom is always little-endian, word addressable */
@@ -866,7 +835,7 @@ static int ixgbe_set_eeprom(struct net_device *netdev,
                 * need read/modify/write of first changed EEPROM word
                 * only the second byte of the word is being modified
                 */
-               ret_val = ixgbe_read_eeprom(hw, first_word, &eeprom_buff[0]);
+               ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
                if (ret_val)
                        goto err;
 
@@ -877,7 +846,7 @@ static int ixgbe_set_eeprom(struct net_device *netdev,
                 * need read/modify/write of last changed EEPROM word
                 * only the first byte of the word is being modified
                 */
-               ret_val = ixgbe_read_eeprom(hw, last_word,
+               ret_val = hw->eeprom.ops.read(hw, last_word,
                                          &eeprom_buff[last_word - first_word]);
                if (ret_val)
                        goto err;
@@ -892,13 +861,13 @@ static int ixgbe_set_eeprom(struct net_device *netdev,
        for (i = 0; i < last_word - first_word + 1; i++)
                cpu_to_le16s(&eeprom_buff[i]);
 
-       ret_val = ixgbe_write_eeprom_buffer(hw, first_word,
+       ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
                                            last_word - first_word + 1,
                                            eeprom_buff);
 
        /* Update the checksum */
        if (ret_val == 0)
-               ixgbe_update_eeprom_checksum(hw);
+               hw->eeprom.ops.update_checksum(hw);
 
 err:
        kfree(eeprom_buff);
@@ -1224,8 +1193,7 @@ static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
        bool link_up;
        u32 link_speed = 0;
        *data = 0;
-
-       hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
+       hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
        if (link_up)
                return *data;
        else
@@ -1436,7 +1404,9 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
 
 static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
 {
-       if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL))
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       if (hw->eeprom.ops.validate_checksum(hw, NULL))
                *data = 1;
        else
                *data = 0;
@@ -1697,21 +1667,21 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
        if (hw->mac.type == ixgbe_mac_82598EB) {
                u8 atlas;
 
-               ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
+               hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
                atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
-               ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
+               hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
 
-               ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
+               hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
                atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
-               ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
+               hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
 
-               ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
+               hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
                atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
-               ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
+               hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
 
-               ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
+               hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
                atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
-               ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
+               hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
        }
 
        return 0;
@@ -2123,9 +2093,9 @@ static int ixgbe_phys_id(struct net_device *netdev, u32 data)
                data = 300;
 
        for (i = 0; i < (data * 1000); i += 400) {
-               ixgbe_led_on(hw, IXGBE_LED_ON);
+               hw->mac.ops.led_on(hw, IXGBE_LED_ON);
                msleep_interruptible(200);
-               ixgbe_led_off(hw, IXGBE_LED_ON);
+               hw->mac.ops.led_off(hw, IXGBE_LED_ON);
                msleep_interruptible(200);
        }
 
@@ -3020,6 +2990,48 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
        return ret;
 }
 
+#ifdef HAVE_ETHTOOL_GET_TS_INFO
+static int ixgbe_get_ts_info(struct net_device *dev,
+                            struct ethtool_ts_info *info)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+       switch (adapter->hw.mac.type) {
+#ifdef CONFIG_IXGBE_PTP
+       case ixgbe_mac_X540:
+       case ixgbe_mac_82599EB:
+               info->so_timestamping =
+                       SOF_TIMESTAMPING_TX_SOFTWARE |
+                       SOF_TIMESTAMPING_RX_SOFTWARE |
+                       SOF_TIMESTAMPING_SOFTWARE |
+                       SOF_TIMESTAMPING_TX_HARDWARE |
+                       SOF_TIMESTAMPING_RX_HARDWARE |
+                       SOF_TIMESTAMPING_RAW_HARDWARE;
+
+               if (adapter->ptp_clock)
+                       info->phc_index = ptp_clock_index(adapter->ptp_clock);
+               else
+                       info->phc_index = -1;
+
+               info->tx_types =
+                       (1 << HWTSTAMP_TX_OFF) |
+                       (1 << HWTSTAMP_TX_ON);
+
+               info->rx_filters =
+                       (1 << HWTSTAMP_FILTER_NONE) |
+                       (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+                       (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+                       (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+               break;
+#endif /* CONFIG_IXGBE_PTP */
+       default:
+               return ethtool_op_get_ts_info(dev, info);
+               break;
+       }
+       return 0;
+}
+#endif /* HAVE_ETHTOOL_GET_TS_INFO */
+
 #endif /* ETHTOOL_GRXRINGS */
 static struct ethtool_ops ixgbe_ethtool_ops = {
        .get_settings           = ixgbe_get_settings,
@@ -3084,6 +3096,9 @@ static struct ethtool_ops ixgbe_ethtool_ops = {
        .set_rx_ntuple          = ixgbe_set_rx_ntuple,
 #endif
 #endif
+#ifdef HAVE_ETHTOOL_GET_TS_INFO
+       .get_ts_info            = ixgbe_get_ts_info,
+#endif
 };
 
 void ixgbe_set_ethtool_ops(struct net_device *netdev)
index 21ec1ffdd43017f160e0be585b5efc03abe0f01d..eee40ec70477ca7cfcbd8fe1cfa35844ec56e6a3 100644 (file)
@@ -819,6 +819,10 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
                return -EINVAL;
 
        e_info(drv, "Enabling FCoE offload features.\n");
+
+       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+               e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n");
+
        if (netif_running(netdev))
                netdev->netdev_ops->ndo_stop(netdev);
 
index 36a858abd669eebe02f92c6de820c3908390ad4a..7be7a739052a236bc9f65cde49255eeca913804d 100644 (file)
@@ -347,10 +347,10 @@ static bool ixgbe_set_dcb_vmdq_queues(struct ixgbe_adapter *adapter)
        int i;
        u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
        u16 vmdq_m = 0;
-       u8 tcs = netdev_get_num_tc(adapter->netdev);
 #ifdef IXGBE_FCOE
        u16 fcoe_i = 0;
 #endif
+       u8 tcs = netdev_get_num_tc(adapter->netdev);
 
        /* verify we have DCB enabled before proceeding */
        if (tcs <= 1)
@@ -717,12 +717,14 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
         * more CPUs.
         */
        if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+               struct net_device *dev = adapter->netdev;
                u16 fcoe_i;
 
                f = &adapter->ring_feature[RING_F_FCOE];
 
                /* merge FCoE queues with RSS queues */
                fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
+               fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
 
                /* limit indices to rss_i if MSI-X is disabled */
                if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
@@ -888,6 +890,9 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
 #endif
        q_vector->numa_node = node;
 
+       /* initialize CPU for DCA */
+       q_vector->cpu = -1;
+
 #ifndef IXGBE_NO_LRO
        /* initialize LRO */
        __skb_queue_head_init(&q_vector->lrolist.active);
index 278398e2f3bd9a1fd1f4dfcd4ea33734ffcc7b25..809d1f950cdac6eb9ad60b43e65c4ce52687a0c2 100644 (file)
@@ -38,9 +38,6 @@
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/tcp.h>
-#ifdef HAVE_SCTP
-#include <linux/sctp.h>
-#endif
 #include <linux/pkt_sched.h>
 #include <linux/ipv6.h>
 #ifdef NETIF_F_TSO
@@ -56,6 +53,7 @@
 #include "ixgbe.h"
 
 
+
 #include "ixgbe_dcb_82599.h"
 #include "ixgbe_sriov.h"
 
@@ -70,11 +68,10 @@ static const char ixgbe_driver_string[] =
 
 #define VMDQ_TAG
 
-#define MAJ 3
-#define MIN 10
-#define BUILD 17
-#define DRV_VERSION    __stringify(MAJ) "." __stringify(MIN) "." \
-                       __stringify(BUILD) DRIVERIOV DRV_HW_PERF FPGA VMDQ_TAG
+#define BYPASS_TAG
+
+#define DRV_VERSION    __stringify(3.11.33) DRIVERIOV DRV_HW_PERF FPGA \
+                       VMDQ_TAG BYPASS_TAG
 const char ixgbe_driver_version[] = DRV_VERSION;
 static const char ixgbe_copyright[] =
                                "Copyright (c) 1999-2012 Intel Corporation.";
@@ -474,6 +471,14 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                total_bytes += tx_buffer->bytecount;
                total_packets += tx_buffer->gso_segs;
 
+#ifdef CONFIG_IXGBE_PTP
+               if (unlikely(tx_buffer->tx_flags &
+                            IXGBE_TX_FLAGS_TSTAMP))
+                       ixgbe_ptp_tx_hwtstamp(q_vector,
+                                             tx_buffer->skb);
+
+
+#endif
                /* free the skb */
                dev_kfree_skb_any(tx_buffer->skb);
 
@@ -898,7 +903,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
        }
 
        bi->dma = dma;
-       bi->page_offset ^= ixgbe_rx_bufsz(rx_ring);
+       bi->page_offset = 0;
 
        return true;
 }
@@ -1557,29 +1562,6 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
 }
 
 #endif /* !CONFIG_IXGBE_DISABLE_PACKET_SPLIT || NETIF_F_GSO */
-static void ixgbe_get_rsc_cnt(struct ixgbe_ring *rx_ring,
-                             union ixgbe_adv_rx_desc *rx_desc,
-                             struct sk_buff *skb)
-{
-       __le32 rsc_enabled;
-       u32 rsc_cnt;
-
-       if (!ring_is_rsc_enabled(rx_ring))
-               return;
-
-       rsc_enabled = rx_desc->wb.lower.lo_dword.data &
-                     cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
-
-       /* If this is an RSC frame rsc_cnt should be non-zero */
-       if (!rsc_enabled)
-               return;
-
-       rsc_cnt = le32_to_cpu(rsc_enabled);
-       rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
-
-       IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
-}
-
 #ifdef NETIF_F_GSO
 static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
                                   struct sk_buff *skb)
@@ -1651,6 +1633,10 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
 
 #endif /* NETIF_F_RXHASH */
        ixgbe_rx_checksum(rx_ring, rx_desc, skb);
+#ifdef CONFIG_IXGBE_PTP
+       ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
+
+#endif
        ixgbe_rx_vlan(rx_ring, rx_desc, skb);
 
        skb_record_rx_queue(skb, ring_queue_index(rx_ring));
@@ -1683,7 +1669,7 @@ static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
  * ixgbe_is_non_eop - process handling of non-EOP buffers
  * @rx_ring: Rx ring being processed
  * @rx_desc: Rx descriptor for current buffer
- * @skb: current socket buffer containing buffer in progress
+ * @skb: Current socket buffer containing buffer in progress
  *
  * This function updates next to clean.  If the buffer is an EOP buffer
  * this function exits returning false, otherwise it will place the
@@ -1705,79 +1691,62 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
 
        prefetch(IXGBE_RX_DESC(rx_ring, ntc));
 
-       if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
-               return false;
+       /* update RSC append count if present */
+       if (ring_is_rsc_enabled(rx_ring)) {
+               __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
+                                    cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
 
-       /* append_cnt indicates packet is RSC, if so fetch nextp */
-       if (IXGBE_CB(skb)->append_cnt) {
-               ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
-               ntc &= IXGBE_RXDADV_NEXTP_MASK;
-               ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
+               if (unlikely(rsc_enabled)) {
+                       u32 rsc_cnt = le32_to_cpu(rsc_enabled);
+
+                       rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
+                       IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
+
+                       /* update ntc based on RSC value */
+                       ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
+                       ntc &= IXGBE_RXDADV_NEXTP_MASK;
+                       ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
+               }
        }
 
+       /* if we are the last buffer then there is nothing else to do */
+       if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
+               return false;
+
        /* place skb in next buffer to be received */
 #ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
        next_skb = rx_ring->rx_buffer_info[ntc].skb;
-       rx_ring->rx_stats.non_eop_descs++;
 
        ixgbe_add_active_tail(skb, next_skb);
        IXGBE_CB(next_skb)->head = skb;
 #else
        rx_ring->rx_buffer_info[ntc].skb = skb;
-       rx_ring->rx_stats.non_eop_descs++;
 #endif
+       rx_ring->rx_stats.non_eop_descs++;
 
        return true;
 }
 
 #ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
 /**
- * ixgbe_cleanup_headers - Correct corrupted or empty headers
+ * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail
  * @rx_ring: rx descriptor ring packet is being transacted on
- * @rx_desc: pointer to the EOP Rx descriptor
- * @skb: pointer to current skb being fixed
- *
- * Check for corrupted packet headers caused by senders on the local L2
- * embedded NIC switch not setting up their Tx Descriptors right.  These
- * should be very rare.
- *
- * Also address the case where we are pulling data in on pages only
- * and as such no data is present in the skb header.
- *
- * In addition if skb is not at least 60 bytes we need to pad it so that
- * it is large enough to qualify as a valid Ethernet frame.
+ * @skb: pointer to current skb being adjusted
  *
- * Returns true if an error was encountered and skb was freed.
- **/
-static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
-                                 union ixgbe_adv_rx_desc *rx_desc,
-                                 struct sk_buff *skb)
+ * This function is an ixgbe specific version of __pskb_pull_tail.  The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
+                           struct sk_buff *skb)
 {
        struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
        unsigned char *va;
        unsigned int pull_len;
 
-       /* if the page was released unmap it, else just sync our portion */
-       if (unlikely(IXGBE_CB(skb)->page_released)) {
-               dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
-                              ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
-               IXGBE_CB(skb)->page_released = false;
-       } else {
-               dma_sync_single_range_for_cpu(rx_ring->dev,
-                                             IXGBE_CB(skb)->dma,
-                                             frag->page_offset,
-                                             ixgbe_rx_bufsz(rx_ring),
-                                             DMA_FROM_DEVICE);
-       }
-       IXGBE_CB(skb)->dma = 0;
-
-       /* verify that the packet does not have any known errors */
-       if (unlikely(ixgbe_test_staterr(rx_desc,
-                                       IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
-               dev_kfree_skb_any(skb);
-               return true;
-       }
-
        /*
         * it is valid to use page_address instead of kmap since we are
         * working with pages allocated out of the lomem pool per
@@ -1789,9 +1758,7 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
         * we need the header to contain the greater of either ETH_HLEN or
         * 60 bytes if the skb->len is less than 60 for skb_pad.
         */
-       pull_len = skb_frag_size(frag);
-       if (pull_len > IXGBE_RX_HDR_SIZE)
-               pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
+       pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
 
        /* align pull length to size of long to optimize memcpy performance */
        skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
@@ -1801,18 +1768,69 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
        frag->page_offset += pull_len;
        skb->data_len -= pull_len;
        skb->tail += pull_len;
+}
 
-       /*
-        * if we sucked the frag empty then we should free it,
-        * if there are other frags here something is screwed up in hardware
-        */
-       if (skb_frag_size(frag) == 0) {
-               BUG_ON(skb_shinfo(skb)->nr_frags != 1);
-               skb_shinfo(skb)->nr_frags = 0;
-               __skb_frag_unref(frag);
-               skb->truesize -= ixgbe_rx_bufsz(rx_ring);
+/**
+ * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being updated
+ *
+ * This function provides a basic DMA sync up for the first fragment of an
+ * skb.  The reason for doing this is that the first fragment cannot be
+ * unmapped until we have reached the end of packet descriptor for a buffer
+ * chain.
+ */
+static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
+                               struct sk_buff *skb)
+{
+       /* if the page was released unmap it, else just sync our portion */
+       if (unlikely(IXGBE_CB(skb)->page_released)) {
+               dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
+                              ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
+               IXGBE_CB(skb)->page_released = false;
+       } else {
+               dma_sync_single_range_for_cpu(rx_ring->dev,
+                                         IXGBE_CB(skb)->dma,
+                                         skb_shinfo(skb)->frags[0].page_offset,
+                                         ixgbe_rx_bufsz(rx_ring),
+                                         DMA_FROM_DEVICE);
+       }
+       IXGBE_CB(skb)->dma = 0;
+}
+
+/**
+ * ixgbe_cleanup_headers - Correct corrupted or empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being fixed
+ *
+ * Check for corrupted packet headers caused by senders on the local L2
+ * embedded NIC switch not setting up their Tx Descriptors right.  These
+ * should be very rare.
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
+                                 union ixgbe_adv_rx_desc *rx_desc,
+                                 struct sk_buff *skb)
+{
+       /* verify that the packet does not have any known errors */
+       if (unlikely(ixgbe_test_staterr(rx_desc,
+                                       IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
+               dev_kfree_skb_any(skb);
+               return true;
        }
 
+       /* place header in linear portion of buffer */
+       if (skb_is_nonlinear(skb))
+               ixgbe_pull_tail(rx_ring, skb);
+
 #ifdef IXGBE_FCOE
        /* do not attempt to pad FCoE Frames as this will disrupt DDP */
        if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
@@ -1831,34 +1849,18 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
        return false;
 }
 
-/**
- * ixgbe_can_reuse_page - determine if we can reuse a page
- * @rx_buffer: pointer to rx_buffer containing the page we want to reuse
- *
- * Returns true if page can be reused in another Rx buffer
- **/
-static inline bool ixgbe_can_reuse_page(struct ixgbe_rx_buffer *rx_buffer)
-{
-       struct page *page = rx_buffer->page;
-
-       /* if we are only owner of page and it is local we can reuse it */
-       return likely(page_count(page) == 1) &&
-              likely(page_to_nid(page) == numa_node_id());
-}
-
 /**
  * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring
  * @rx_ring: rx descriptor ring to store buffers on
  * @old_buff: donor buffer to have page reused
  *
- * Syncronizes page for reuse by the adapter
+ * Synchronizes page for reuse by the adapter
  **/
 static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
                                struct ixgbe_rx_buffer *old_buff)
 {
        struct ixgbe_rx_buffer *new_buff;
        u16 nta = rx_ring->next_to_alloc;
-       u16 bufsz = ixgbe_rx_bufsz(rx_ring);
 
        new_buff = &rx_ring->rx_buffer_info[nta];
 
@@ -1869,17 +1871,13 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
        /* transfer page from old buffer to new buffer */
        new_buff->page = old_buff->page;
        new_buff->dma = old_buff->dma;
-
-       /* flip page offset to other buffer and store to new_buff */
-       new_buff->page_offset = old_buff->page_offset ^ bufsz;
+       new_buff->page_offset = old_buff->page_offset;
 
        /* sync the buffer for use by the device */
        dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
-                                        new_buff->page_offset, bufsz,
+                                        new_buff->page_offset,
+                                        ixgbe_rx_bufsz(rx_ring),
                                         DMA_FROM_DEVICE);
-
-       /* bump ref count on page before it is given to the stack */
-       get_page(new_buff->page);
 }
 
 /**
@@ -1889,20 +1887,152 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
  * @rx_desc: descriptor containing length of buffer written by hardware
  * @skb: sk_buff to place the data into
  *
- * This function is based on skb_add_rx_frag.  I would have used that
- * function however it doesn't handle the truesize case correctly since we
- * are allocating more memory than might be used for a single receive.
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
  **/
-static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
+static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
                              struct ixgbe_rx_buffer *rx_buffer,
-                             struct sk_buff *skb, int size)
+                             union ixgbe_adv_rx_desc *rx_desc,
+                             struct sk_buff *skb)
 {
-       skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-                          rx_buffer->page, rx_buffer->page_offset,
-                          size);
-       skb->len += size;
-       skb->data_len += size;
-       skb->truesize += ixgbe_rx_bufsz(rx_ring);
+       struct page *page = rx_buffer->page;
+       unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+#if (PAGE_SIZE < 8192)
+       unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
+#else
+       unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+       unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
+                                  ixgbe_rx_bufsz(rx_ring);
+#endif
+
+       if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
+               unsigned char *va = page_address(page) + rx_buffer->page_offset;
+
+               memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+
+               /* we can reuse buffer as-is, just make sure it is local */
+               if (likely(page_to_nid(page) == numa_node_id()))
+                       return true;
+
+               /* this page cannot be reused so discard it */
+               put_page(page);
+               return false;
+       }
+
+       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+                       rx_buffer->page_offset, size, truesize);
+
+       /* avoid re-using remote pages */
+       if (unlikely(page_to_nid(page) != numa_node_id()))
+               return false;
+
+#if (PAGE_SIZE < 8192)
+       /* if we are only owner of page we can reuse it */
+       if (unlikely(page_count(page) != 1))
+               return false;
+
+       /* flip page offset to other buffer */
+       rx_buffer->page_offset ^= truesize;
+#else
+       /* move offset up to the next cache line */
+       rx_buffer->page_offset += truesize;
+
+       if (rx_buffer->page_offset > last_offset)
+               return false;
+#endif
+
+       /* bump ref count on page before it is given to the stack */
+       get_page(page);
+
+       return true;
+}
+
+static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
+                                            union ixgbe_adv_rx_desc *rx_desc)
+{
+       struct ixgbe_rx_buffer *rx_buffer;
+       struct sk_buff *skb;
+       struct page *page;
+
+       rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+       page = rx_buffer->page;
+       prefetchw(page);
+
+       skb = rx_buffer->skb;
+
+       if (likely(!skb)) {
+               void *page_addr = page_address(page) +
+                                 rx_buffer->page_offset;
+
+               /* prefetch first cache line of first page */
+               prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+               prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+               /* allocate a skb to store the frags */
+               skb = netdev_alloc_skb_ip_align(netdev_ring(rx_ring),
+                                               IXGBE_RX_HDR_SIZE);
+               if (unlikely(!skb)) {
+                       rx_ring->rx_stats.alloc_rx_buff_failed++;
+                       return NULL;
+               }
+
+               /*
+                * we will be copying header into skb->data in
+                * pskb_may_pull so it is in our interest to prefetch
+                * it now to avoid a possible cache miss
+                */
+               prefetchw(skb->data);
+
+               /*
+                * Delay unmapping of the first packet. It carries the
+                * header information, HW may still access the header
+                * after the writeback.  Only unmap it when EOP is
+                * reached
+                */
+               if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
+                       goto dma_sync;
+
+               IXGBE_CB(skb)->dma = rx_buffer->dma;
+       } else {
+               if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
+                       ixgbe_dma_sync_frag(rx_ring, skb);
+
+dma_sync:
+               /* we are reusing so sync this buffer for CPU use */
+               dma_sync_single_range_for_cpu(rx_ring->dev,
+                                             rx_buffer->dma,
+                                             rx_buffer->page_offset,
+                                             ixgbe_rx_bufsz(rx_ring),
+                                             DMA_FROM_DEVICE);
+       }
+
+       /* pull page into skb */
+       if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+               /* hand second half of page back to the ring */
+               ixgbe_reuse_rx_page(rx_ring, rx_buffer);
+       } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
+               /* the page has been released from the ring */
+               IXGBE_CB(skb)->page_released = true;
+       } else {
+               /* we are not reusing the buffer so unmap it */
+               dma_unmap_page(rx_ring->dev, rx_buffer->dma,
+                              ixgbe_rx_pg_size(rx_ring),
+                              DMA_FROM_DEVICE);
+       }
+
+       /* clear contents of buffer_info */
+       rx_buffer->skb = NULL;
+       rx_buffer->dma = 0;
+       rx_buffer->page = NULL;
+
+       return skb;
 }
 
 /**
@@ -1929,11 +2059,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
        u16 cleaned_count = ixgbe_desc_unused(rx_ring);
 
        do {
-               struct ixgbe_rx_buffer *rx_buffer;
                union ixgbe_adv_rx_desc *rx_desc;
                struct sk_buff *skb;
-               struct page *page;
-               u16 ntc;
 
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
@@ -1941,9 +2068,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                        cleaned_count = 0;
                }
 
-               ntc = rx_ring->next_to_clean;
-               rx_desc = IXGBE_RX_DESC(rx_ring, ntc);
-               rx_buffer = &rx_ring->rx_buffer_info[ntc];
+               rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
 
                if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
                        break;
@@ -1955,75 +2080,12 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                 */
                rmb();
 
-               page = rx_buffer->page;
-               prefetchw(page);
-
-               skb = rx_buffer->skb;
-
-               if (likely(!skb)) {
-                       void *page_addr = page_address(page) +
-                                         rx_buffer->page_offset;
+               /* retrieve a buffer from the ring */
+               skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
 
-                       /* prefetch first cache line of first page */
-                       prefetch(page_addr);
-#if L1_CACHE_BYTES < 128
-                       prefetch(page_addr + L1_CACHE_BYTES);
-#endif
-
-                       /* allocate a skb to store the frags */
-                       skb = netdev_alloc_skb_ip_align(netdev_ring(rx_ring),
-                                                       IXGBE_RX_HDR_SIZE);
-                       if (unlikely(!skb)) {
-                               rx_ring->rx_stats.alloc_rx_buff_failed++;
-                               break;
-                       }
-
-                       /*
-                        * we will be copying header into skb->data in
-                        * pskb_may_pull so it is in our interest to prefetch
-                        * it now to avoid a possible cache miss
-                        */
-                       prefetchw(skb->data);
-
-                       /*
-                        * Delay unmapping of the first packet. It carries the
-                        * header information, HW may still access the header
-                        * after the writeback.  Only unmap it when EOP is
-                        * reached
-                        */
-                       IXGBE_CB(skb)->dma = rx_buffer->dma;
-               } else {
-                       /* we are reusing so sync this buffer for CPU use */
-                       dma_sync_single_range_for_cpu(rx_ring->dev,
-                                                     rx_buffer->dma,
-                                                     rx_buffer->page_offset,
-                                                     ixgbe_rx_bufsz(rx_ring),
-                                                     DMA_FROM_DEVICE);
-               }
-
-               /* pull page into skb */
-               ixgbe_add_rx_frag(rx_ring, rx_buffer, skb,
-                                 le16_to_cpu(rx_desc->wb.upper.length));
-
-               if (ixgbe_can_reuse_page(rx_buffer)) {
-                       /* hand second half of page back to the ring */
-                       ixgbe_reuse_rx_page(rx_ring, rx_buffer);
-               } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
-                       /* the page has been released from the ring */
-                       IXGBE_CB(skb)->page_released = true;
-               } else {
-                       /* we are not reusing the buffer so unmap it */
-                       dma_unmap_page(rx_ring->dev, rx_buffer->dma,
-                                      ixgbe_rx_pg_size(rx_ring),
-                                      DMA_FROM_DEVICE);
-               }
-
-               /* clear contents of buffer_info */
-               rx_buffer->skb = NULL;
-               rx_buffer->dma = 0;
-               rx_buffer->page = NULL;
-
-               ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb);
+               /* exit if we failed to retrieve a buffer */
+               if (!skb)
+                       break;
 
                cleaned_count++;
 
@@ -2168,8 +2230,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                rx_buffer->skb = NULL;
                rx_buffer->dma = 0;
 
-               ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb);
-
                cleaned_count++;
 
                if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
@@ -2697,6 +2757,12 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
        default:
                break;
        }
+
+#ifdef CONFIG_IXGBE_PTP
+       if (adapter->hw.mac.type == ixgbe_mac_X540)
+               mask |= IXGBE_EIMS_TIMESYNC;
+#endif
+
        if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
            !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
                mask |= IXGBE_EIMS_FLOW_DIR;
@@ -2770,6 +2836,11 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
 
        ixgbe_check_fan_failure(adapter, eicr);
 
+#ifdef CONFIG_IXGBE_PTP
+       if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
+           ixgbe_ptp_check_pps_event(adapter, eicr);
+#endif
+
        /* re-enable the original interrupt state, no lsc, no queues */
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
                ixgbe_irq_enable(adapter, false, false);
@@ -2968,6 +3039,10 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
        }
 
        ixgbe_check_fan_failure(adapter, eicr);
+#ifdef CONFIG_IXGBE_PTP
+       if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
+           ixgbe_ptp_check_pps_event(adapter, eicr);
+#endif
 
        /* would disable interrupts here but EIAM disabled it */
        napi_schedule(&q_vector->napi);
@@ -3345,12 +3420,8 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
 #ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
        srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
                  IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-#else
-#if PAGE_SIZE > IXGBE_MAX_RXBUFFER
-       srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 #else
        srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-#endif
 #endif
 
        /* configure descriptor type */
@@ -3479,9 +3550,9 @@ void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
         * than 65536
         */
 #ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
-#if (MAX_SKB_FRAGS >= 16) && (PAGE_SIZE <= 8192)
+#if (MAX_SKB_FRAGS >= 16)
        rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
-#elif (MAX_SKB_FRAGS >= 8) && (PAGE_SIZE <= 16384)
+#elif (MAX_SKB_FRAGS >= 8)
        rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
 #elif (MAX_SKB_FRAGS >= 4)
        rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
@@ -3583,6 +3654,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
 #endif
 
        ixgbe_configure_srrctl(adapter, ring);
+        /* In ESX, RSCCTL configuration is done by on demand */
        ixgbe_configure_rscctl(adapter, ring);
 
        switch (hw->mac.type) {
@@ -3751,6 +3823,11 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
                max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
 
 #endif /* IXGBE_FCOE */
+
+       /* adjust max frame to be at least the size of a standard frame */
+       if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
+               max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
+
        mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
        if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
                mhadd &= ~IXGBE_MHADD_MFS_MASK;
@@ -3758,10 +3835,11 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
 
                IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
        }
+
+#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
                /* MHADD will allow an extra 4 bytes past for vlan tagged frames */
                max_frame += VLAN_HLEN;
 
-#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
        if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
            (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) {
                rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
@@ -3793,6 +3871,7 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
         */
        for (i = 0; i < adapter->num_rx_queues; i++) {
                rx_ring = adapter->rx_ring[i];
+               /* RSC enablement is done by on demand in ESX. */
                if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
                        set_ring_rsc_enabled(rx_ring);
                else
@@ -3885,7 +3964,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
 
        /* enable all receives */
        rxctrl |= IXGBE_RXCTRL_RXEN;
-       ixgbe_enable_rx_dma(hw, rxctrl);
+       hw->mac.ops.enable_rx_dma(hw, rxctrl);
 }
 
 #ifdef NETIF_F_HW_VLAN_TX
@@ -5110,11 +5189,12 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
        else
                ixgbe_configure_msi_and_legacy(adapter);
 
-       /* enable the optics */
-       if ((hw->phy.multispeed_fiber) ||
-           ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
-            (hw->mac.type == ixgbe_mac_82599EB)))
-               ixgbe_enable_tx_laser(hw);
+       /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */
+       if (hw->mac.ops.enable_tx_laser &&
+           ((hw->phy.multispeed_fiber) ||
+            ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+             (hw->mac.type == ixgbe_mac_82599EB))))
+               hw->mac.ops.enable_tx_laser(hw);
 
        clear_bit(__IXGBE_DOWN, &adapter->state);
        ixgbe_napi_enable_all(adapter);
@@ -5228,7 +5308,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
        }
 
        clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
-
        /* do not flush user set addresses */
        memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len);
        ixgbe_flush_sw_mac_table(adapter);
@@ -5243,31 +5322,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
        /* update SAN MAC vmdq pool selection */
        if (hw->mac.san_mac_rar_index)
                hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
-}
-
-#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
-/**
- * ixgbe_init_rx_page_offset - initialize page offset values for Rx buffers
- * @rx_ring: ring to setup
- *
- * On many IA platforms the L1 cache has a critical stride of 4K, this
- * results in each receive buffer starting in the same cache set.  To help
- * reduce the pressure on this cache set we can interleave the offsets so
- * that only every other buffer will be in the same cache set.
- **/
-static void ixgbe_init_rx_page_offset(struct ixgbe_ring *rx_ring)
-{
-       struct ixgbe_rx_buffer *rx_buffer = rx_ring->rx_buffer_info;
-       u16 i;
 
-       for (i = 0; i < rx_ring->count; i += 2) {
-               rx_buffer[0].page_offset = 0;
-               rx_buffer[1].page_offset = ixgbe_rx_bufsz(rx_ring);
-               rx_buffer = &rx_buffer[2];
-       }
+#ifdef CONFIG_IXGBE_PTP
+       if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+               ixgbe_ptp_reset(adapter);
+#endif
 }
 
-#endif
 /**
  * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
  * @rx_ring: ring to free buffers from
@@ -5333,10 +5394,6 @@ void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
        size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
        memset(rx_ring->rx_buffer_info, 0, size);
 
-#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
-       ixgbe_init_rx_page_offset(rx_ring);
-
-#endif
        /* Zero out the descriptor ring */
        memset(rx_ring->desc, 0, rx_ring->size);
 }
@@ -5483,11 +5540,12 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
        if (!pci_channel_offline(adapter->pdev))
 #endif
                ixgbe_reset(adapter);
-       /* power down the optics */
-       if ((hw->phy.multispeed_fiber) ||
-           ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
-            (hw->mac.type == ixgbe_mac_82599EB)))
-               ixgbe_disable_tx_laser(hw);
+       /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
+       if (hw->mac.ops.disable_tx_laser &&
+           ((hw->phy.multispeed_fiber) ||
+            ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+             (hw->mac.type == ixgbe_mac_82599EB))))
+               hw->mac.ops.disable_tx_laser(hw);
 
        ixgbe_clean_all_tx_rings(adapter);
        ixgbe_clean_all_rx_rings(adapter);
@@ -5510,9 +5568,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        struct pci_dev *pdev = adapter->pdev;
+       int err;
+#ifdef CONFIG_DCB
        struct ixgbe_dcb_tc_config *tc;
        int j, bwg_pct;
-       int err;
+#endif /* CONFIG_DCB */
 
        /* PCI config space info */
 
@@ -5589,7 +5649,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
        /* n-tuple support exists, always init our spinlock */
        spin_lock_init(&adapter->fdir_perfect_lock);
 
-
+#ifdef CONFIG_DCB
        switch (hw->mac.type) {
        case ixgbe_mac_82598EB:
        case ixgbe_mac_82599EB:
@@ -5635,11 +5695,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
        adapter->dcb_cfg.pfc_mode_enable = false;
        adapter->dcb_cfg.round_robin_enable = false;
        adapter->dcb_set_bitmap = 0x00;
-#ifdef CONFIG_DCB
        adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
-#endif /* CONFIG_DCB */
-#ifdef CONFIG_DCB
-       /* XXX does this need to be initialized even w/o DCB? */
        memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
               sizeof(adapter->temp_dcb_cfg));
 
@@ -5791,10 +5847,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
        if (!rx_ring->desc)
                goto err;
 
-#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
-       ixgbe_init_rx_page_offset(rx_ring);
-
-#endif
        return 0;
 err:
        vfree(rx_ring->rx_buffer_info);
@@ -5932,14 +5984,14 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
                return -EINVAL;
 
        /*
-        * For 82599EB we cannot allow PF to change MTU greater than 1500
-        * in SR-IOV mode as it may cause buffer overruns in guest VFs that
-        * don't allocate and chain buffers correctly.
+        * For 82599EB we cannot allow legacy VFs to enable their receive
+        * paths when MTU greater than 1500 is configured.  So display a
+        * warning that legacy VFs will be disabled.
         */
        if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
            (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
            (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
-                       return -EINVAL;
+               e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
 
        e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
 
@@ -6002,6 +6054,10 @@ static int ixgbe_open(struct net_device *netdev)
        if (err)
                goto err_set_queues;
 
+#ifdef CONFIG_IXGBE_PTP
+       ixgbe_ptp_init(adapter);
+#endif /* CONFIG_IXGBE_PTP*/
+
        ixgbe_up_complete(adapter);
 
        return 0;
@@ -6033,6 +6089,10 @@ static int ixgbe_close(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
+#ifdef CONFIG_IXGBE_PTP
+       ixgbe_ptp_stop(adapter);
+#endif
+
        ixgbe_down(adapter);
        ixgbe_free_irq(adapter);
 
@@ -6134,13 +6194,14 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
                ixgbe_set_rx_mode(netdev);
 
                /*
-                * Laser could be off for multispeed fiber so make sure it
-                * is on for WoL.
+                * enable the optics for both mult-speed fiber and
+                * 82599 SFP+ fiber as we can WoL.
                 */
-               if (hw->phy.multispeed_fiber ||
+               if (hw->mac.ops.enable_tx_laser &&
+                   (hw->phy.multispeed_fiber ||
                    (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber &&
-                    hw->mac.type == ixgbe_mac_82599EB))
-                       ixgbe_enable_tx_laser(hw);
+                    hw->mac.type == ixgbe_mac_82599EB)))
+                       hw->mac.ops.enable_tx_laser(hw);
 
                /* turn on all-multi mode if wake on multicast is enabled */
                if (wufc & IXGBE_WUFC_MC) {
@@ -6632,6 +6693,25 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
        adapter->link_speed = link_speed;
 }
 
+static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
+{
+       u8 up = 0;
+#ifdef HAVE_DCBNL_IEEE
+       struct net_device *netdev = adapter->netdev;
+       struct dcb_app app = {
+                             .selector = DCB_APP_IDTYPE_ETHTYPE,
+                             .protocol = 0,
+                            };
+       up = dcb_getapp(netdev, &app);
+#endif
+
+#ifdef IXGBE_FCOE
+       adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
+#else
+       adapter->default_up = up;
+#endif
+}
+
 /**
  * ixgbe_watchdog_link_is_up - update netif_carrier status and
  *                             print link up message
@@ -6672,6 +6752,11 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
                break;
        }
 
+#ifdef CONFIG_IXGBE_PTP
+       if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+               ixgbe_ptp_start_cyclecounter(adapter);
+
+#endif
        e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
               (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
               "10 Gbps" :
@@ -6690,6 +6775,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
 #endif /* IFLA_VF_MAX */
        netif_tx_wake_all_queues(netdev);
 
+       /* update the default user priority for VFs */
+       ixgbe_update_default_up(adapter);
+
        /* ping all the active vfs to let them know link has changed */
        ixgbe_ping_all_vfs(adapter);
 }
@@ -6715,6 +6803,11 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
        if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
                adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
 
+#ifdef CONFIG_IXGBE_PTP
+       if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+               ixgbe_ptp_start_cyclecounter(adapter);
+
+#endif
        e_info(drv, "NIC Link is Down\n");
        netif_carrier_off(netdev);
        netif_tx_stop_all_queues(netdev);
@@ -6756,8 +6849,9 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
 {
        u32 ssvpc;
 
-       /* Do not perform spoof check for 82598 */
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+       /* Do not perform spoof check for 82598 or if in non-IOV mode */
+       if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
+           adapter->num_vfs == 0)
                return;
 
        ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
@@ -7020,6 +7114,9 @@ static void ixgbe_service_task(struct work_struct *work)
        ixgbe_fdir_reinit_subtask(adapter);
 #endif
        ixgbe_check_hang_subtask(adapter);
+#ifdef CONFIG_IXGBE_PTP
+       ixgbe_ptp_overflow_check(adapter);
+#endif
 
        ixgbe_service_event_complete(adapter);
 }
@@ -7179,6 +7276,11 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
        /* set HW vlan bit if vlan is present */
        if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
                cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
+#ifdef CONFIG_IXGBE_PTP
+       if (tx_flags & IXGBE_TX_FLAGS_TSTAMP)
+               cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP);
+
+#endif
        /* set segmentation enable bits for TSO/FSO */
 #ifdef IXGBE_FCOE
        if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FSO))
@@ -7358,6 +7460,12 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
        /* notify HW of packet */
        writel(i, tx_ring->tail);
 
+       /*
+        * we need this if more than one processor can write to our tail
+        * at a time, it synchronizes IO on IA64/Altix systems
+        */
+       mmiowb();
+
        return;
 dma_error:
        dev_err(tx_ring->dev, "TX DMA map failed\n");
@@ -7522,7 +7630,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
        if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
                struct ixgbe_ring_feature *f;
                if (skb->priority == TC_PRIO_CONTROL)
-                       txq = adapter->tc - 1;
+                       txq = adapter->dcb_tc - 1;
                else
                        txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK)
                               >> 13;
@@ -7605,6 +7713,13 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
                tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
        }
 
+#ifdef CONFIG_IXGBE_PTP
+       if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+               skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+               tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
+       }
+
+#endif
 #ifdef CONFIG_PCI_IOV
        /*
         * Use the l2switch_enable flag - would be false if the DMA
@@ -7700,17 +7815,12 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
        unsigned int r_idx = skb->queue_mapping;
 #endif
 
-       if (skb->len <= 0) {
-               dev_kfree_skb_any(skb);
-               return NETDEV_TX_OK;
-       }
-
        /*
         * The minimum packet size for olinfo paylen is 17 so pad the skb
         * in order to meet this minimum size requirement.
         */
-       if (skb->len < 17) {
-               if (skb_padto(skb, 17))
+       if (unlikely(skb->len < 17)) {
+               if (skb_pad(skb, 17 - skb->len))
                        return NETDEV_TX_OK;
                skb->len = 17;
        }
@@ -7801,20 +7911,73 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev)
 
 #endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) */
 
-/**
- * ixgbe_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- **/
+static int ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad,
+                          u16 addr)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
+       u16 value;
+       int rc;
+
+       if (prtad != hw->phy.addr)
+               return -EINVAL;
+       rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
+       if (!rc)
+               rc = value;
+       return rc;
+}
+
+static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
+                           u16 addr, u16 value)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       if (prtad != hw->phy.addr)
+               return -EINVAL;
+       return hw->phy.ops.write_reg(hw, addr, devad, value);
+}
+
+static int ixgbe_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
+                          int cmd)
+{
+       struct mii_ioctl_data *mii = (struct mii_ioctl_data *) &ifr->ifr_data;
+       int prtad, devad, ret;
+
+       prtad = (mii->phy_id & MDIO_PHY_ID_PRTAD) >> 5;
+       devad = (mii->phy_id & MDIO_PHY_ID_DEVAD);
+
+       if (cmd == SIOCGMIIREG) {
+               ret = ixgbe_mdio_read(netdev, prtad, devad, mii->reg_num);
+               if (ret < 0)
+                       return ret;
+               mii->val_out = ret;
+               return 0;
+       } else {
+               return ixgbe_mdio_write(netdev, prtad, devad, mii->reg_num,
+                                       mii->val_in);
+       }
+}
+
 static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 {
+#ifdef CONFIG_IXGBE_PTP
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+#endif
        switch (cmd) {
+#ifdef CONFIG_IXGBE_PTP
+       case SIOCSHWTSTAMP:
+               return ixgbe_ptp_hwtstamp_ioctl(adapter, ifr, cmd);
+#endif
 #ifdef ETHTOOL_OPS_COMPAT
        case SIOCETHTOOL:
                return ethtool_ioctl(ifr);
 #endif
 
+       case SIOCGMIIREG:
+       case SIOCSMIIREG:
+               return ixgbe_mii_ioctl(netdev, ifr, cmd);
        default:
                return -EOPNOTSUPP;
        }
@@ -8203,6 +8366,8 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
                        if (hw->bus.func != 0)
                                break;
                case IXGBE_SUBDEV_ID_82599_SFP:
+               case IXGBE_SUBDEV_ID_82599_RNDC:
+               case IXGBE_SUBDEV_ID_82599_ECNA_DP:
                        is_wol_supported = 1;
                        break;
                }
@@ -8246,7 +8411,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        struct ixgbe_adapter *adapter = NULL;
        struct ixgbe_hw *hw = NULL;
        static int cards_found;
-       int i, err, pci_using_dac;
+       int err, pci_using_dac;
        u16 offset;
        u16 eeprom_verh, eeprom_verl, eeprom_cfg_blkh, eeprom_cfg_blkl;
        u32 etrack_id;
@@ -8523,7 +8688,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 #ifdef IXGBE_FCOE
 #ifdef NETIF_F_FSO
        if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
-               ixgbe_get_device_caps(hw, &device_caps);
+               hw->mac.ops.get_device_caps(hw, &device_caps);
                if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) {
                        adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
                        adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
@@ -8600,27 +8765,32 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        if (err)
                goto err_sw_init;
 
+       /* WOL not supported for all devices */
        adapter->wol = 0;
+       hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
        if (ixgbe_wol_supported(adapter, pdev->device, pdev->subsystem_device))
                adapter->wol = IXGBE_WUFC_MAG;
 
        device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
 
+#ifdef CONFIG_IXGBE_PTP
+       ixgbe_ptp_init(adapter);
 
+#endif
        /*
         * Save off EEPROM version number and Option Rom version which
         * together make a unique identify for the eeprom
         */
-       ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh);
-       ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl);
+       hw->eeprom.ops.read(hw, 0x2e, &eeprom_verh);
+       hw->eeprom.ops.read(hw, 0x2d, &eeprom_verl);
        etrack_id = (eeprom_verh << 16) | eeprom_verl;
 
-       ixgbe_read_eeprom(hw, 0x17, &offset);
+       hw->eeprom.ops.read(hw, 0x17, &offset);
 
        /* Make sure offset to SCSI block is valid */
        if (!(offset == 0x0) && !(offset == 0xffff)) {
-               ixgbe_read_eeprom(hw, offset + 0x84, &eeprom_cfg_blkh);
-               ixgbe_read_eeprom(hw, offset + 0x83, &eeprom_cfg_blkl);
+               hw->eeprom.ops.read(hw, offset + 0x84, &eeprom_cfg_blkh);
+               hw->eeprom.ops.read(hw, offset + 0x83, &eeprom_cfg_blkl);
 
                /* Only display Option Rom if exist */
                if (eeprom_cfg_blkl && eeprom_cfg_blkh) {
@@ -8662,11 +8832,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 
        adapter->netdev_registered = true;
 
-       /* power down the optics */
-       if ((hw->phy.multispeed_fiber) ||
-           ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
-            (hw->mac.type == ixgbe_mac_82599EB)))
-               ixgbe_disable_tx_laser(hw);
+       /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
+       if (hw->mac.ops.disable_tx_laser &&
+           ((hw->phy.multispeed_fiber) ||
+            ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+             (hw->mac.type == ixgbe_mac_82599EB))))
+               hw->mac.ops.disable_tx_laser(hw);
 
        /* carrier off reporting is important to ethtool even BEFORE open */
        netif_carrier_off(netdev);
@@ -8695,18 +8866,17 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 
        /* print all messages at the end so that we use our eth%d name */
        /* print bus type/speed/width info */
-       e_dev_info("(PCI Express:%s:%s) ",
-                  (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
+       e_dev_info("(PCI Express:%s:%s) %02x:%02x:%02x:%02x:%02x:%02x\n",
+                  (hw->bus.speed == ixgbe_bus_speed_8000 ? "8.0GT/s" :
+                  hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
                   hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
                   "Unknown"),
                   (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
                   hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
                   hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
-                  "Unknown"));
-
-       /* print the MAC address */
-       for (i = 0; i < 6; i++)
-               pr_cont("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
+                  "Unknown"), netdev->dev_addr[0], netdev->dev_addr[1],
+                  netdev->dev_addr[2], netdev->dev_addr[3],
+                  netdev->dev_addr[4], netdev->dev_addr[5]);
 
        /* First try to read PBA as a string */
        err = ixgbe_read_pba_string(hw, part_str, IXGBE_PBANUM_LENGTH);
@@ -8763,13 +8933,16 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 no_info_string:
 #ifdef CONFIG_PCI_IOV
        if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+               int i;
+
                for (i = 0; i < adapter->num_vfs; i++)
                        ixgbe_vf_configuration(pdev, (i | 0x10000000));
        }
 #endif
 
        /* firmware requires blank driver version */
-       ixgbe_set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF);
+       if (hw->mac.ops.set_fw_drv_ver)
+               hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF);
 
 #if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN)
        /* add san mac addr to netdev */
@@ -8977,11 +9150,11 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
                }
 
                /* Find the pci device of the offending VF */
-               vfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
+               vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
                while (vfdev) {
                        if (vfdev->devfn == (req_id & 0xFF))
                                break;
-                       vfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
+                       vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
                                               device_id, vfdev);
                }
                /*
index 124f00de7f472de7cbfe16240619b897acb34cdf..56d697f2c7e6892e3a9d040aed77b6114e780435 100644 (file)
 /* bits 23:16 are used for extra info for certain messages */
 #define IXGBE_VT_MSGINFO_MASK  (0xFF << IXGBE_VT_MSGINFO_SHIFT)
 
+/* definitions to support mailbox API version negotiation */
+
+/*
+ * each element denotes a version of the API; existing numbers may not
+ * change; any additions must go at the end
+ */
+enum ixgbe_pfvf_api_rev {
+       ixgbe_mbox_api_10,      /* API version 1.0, linux/freebsd VF driver */
+       ixgbe_mbox_api_20,      /* API version 2.0, solaris Phase1 VF driver */
+       ixgbe_mbox_api_11,      /* API version 1.1, linux/freebsd VF driver */
+       /* This value should always be last */
+       ixgbe_mbox_api_unknown, /* indicates that API version is not known */
+};
+
+/* mailbox API, legacy requests */
 #define IXGBE_VF_RESET         0x01 /* VF requests reset */
 #define IXGBE_VF_SET_MAC_ADDR  0x02 /* VF requests PF to set MAC addr */
 #define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
 #define IXGBE_VF_SET_VLAN      0x04 /* VF requests PF to set VLAN */
+
+/* mailbox API, version 1.0 VF requests */
 #define IXGBE_VF_SET_LPE       0x05 /* VF requests PF to set VMOLR.LPE */
 #define IXGBE_VF_SET_MACVLAN   0x06 /* VF requests PF for unicast filter */
+#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+
+/* mailbox API, version 1.1 VF requests */
+#define IXGBE_VF_GET_QUEUES    0x09 /* get queue configuration */
+
+/* GET_QUEUES return data indices within the mailbox */
+#define IXGBE_VF_TX_QUEUES     1       /* number of Tx queues supported */
+#define IXGBE_VF_RX_QUEUES     2       /* number of Rx queues supported */
+#define IXGBE_VF_TRANS_VLAN    3       /* Indication of port vlan */
+#define IXGBE_VF_DEF_QUEUE     4       /* Default queue offset */
 
 /* length of permanent address message returned from PF */
 #define IXGBE_VF_PERMADDR_MSG_LEN      4
 
 #define IXGBE_PF_CONTROL_MSG           0x0100 /* PF control message */
 
+/* mailbox API, version 2.0 VF requests */
+#define IXGBE_VF_API_NEGOTIATE         0x08 /* negotiate API version */
+#define IXGBE_VF_GET_QUEUES            0x09 /* get queue configuration */
+#define IXGBE_VF_ENABLE_MACADDR                0x0A /* enable MAC address */
+#define IXGBE_VF_DISABLE_MACADDR       0x0B /* disable MAC address */
+#define IXGBE_VF_GET_MACADDRS          0x0C /* get all configured MAC addrs */
+#define IXGBE_VF_SET_MCAST_PROMISC     0x0D /* enable multicast promiscuous */
+#define IXGBE_VF_GET_MTU               0x0E /* get bounds on MTU */
+#define IXGBE_VF_SET_MTU               0x0F /* set a specific MTU */
+
+/* mailbox API, version 2.0 PF requests */
+#define IXGBE_PF_TRANSPARENT_VLAN      0x0101 /* enable transparent vlan */
 
 #define IXGBE_VF_MBX_INIT_TIMEOUT      2000 /* number of retries on mailbox */
 #define IXGBE_VF_MBX_INIT_DELAY                500  /* microseconds between retries */
index ae8d7a4552dfcd218e39d397c066918099b67811..7b6f725709e79188196de16994ce77a15e15670c 100644 (file)
@@ -1060,7 +1060,7 @@ void __devinit ixgbe_check_options(struct ixgbe_adapter *adapter)
                                "hardware.  Disabling RSC.\n");
                        netdev->features &= ~NETIF_F_LRO;
                }
-#endif
+#endif /* IXGBE_NO_LRO */
        }
        { /*
           * allow_unsupported_sfp - Enable/Disable support for unsupported
index 339a2d4da36e7ceb727a587af44c5eaa2c266b33..c5609211cc9d62a381877350527669735e377202 100644 (file)
@@ -467,7 +467,7 @@ static int ixgbe_maclla1(char *page, char **start, off_t off,
        if (hw == NULL)
                return snprintf(page, count, "error: no hw data\n");
 
-       rc = ixgbe_read_eeprom_buffer(hw, first_word, word_count,
+       rc = hw->eeprom.ops.read_buffer(hw, first_word, word_count,
                                           eeprom_buff);
        if (rc != 0)
                return snprintf(page, count, "error: reading buffer\n");
index f84e19c40d1198f9f719b08eedbfce2892e4cc92..522052db631fbf912db9279930b162696c356205 100644 (file)
 *******************************************************************************/
 
 #include "ixgbe.h"
+#ifdef CONFIG_IXGBE_PTP
+#include <linux/export.h>
+#include <linux/ptp_classify.h>
+
+/*
+ * The 82599 and the X540 do not have true 64bit nanosecond scale
+ * counter registers. Instead, SYSTIME is defined by a fixed point
+ * system which allows the user to define the scale counter increment
+ * value at every level change of the oscillator driving the SYSTIME
+ * value. For both devices the TIMINCA:IV field defines this
+ * increment. On the X540 device, 31 bits are provided. However on the
+ * 82599 only provides 24 bits. The time unit is determined by the
+ * clock frequency of the oscillator in combination with the TIMINCA
+ * register. When these devices link at 10Gb the oscillator has a
+ * period of 6.4ns. In order to convert the scale counter into
+ * nanoseconds the cyclecounter and timecounter structures are
+ * used. The SYSTIME registers need to be converted to ns values by use
+ * of only a right shift (division by power of 2). The following math
+ * determines the largest incvalue that will fit into the available
+ * bits in the TIMINCA register.
+ *
+ * PeriodWidth: Number of bits to store the clock period
+ * MaxWidth: The maximum width value of the TIMINCA register
+ * Period: The clock period for the oscillator
+ * round(): discard the fractional portion of the calculation
+ *
+ * Period * [ 2 ^ ( MaxWidth - PeriodWidth ) ]
+ *
+ * For the X540, MaxWidth is 31 bits, and the base period is 6.4 ns
+ * For the 82599, MaxWidth is 24 bits, and the base period is 6.4 ns
+ *
+ * The period also changes based on the link speed:
+ * At 10Gb link or no link, the period remains the same.
+ * At 1Gb link, the period is multiplied by 10. (64ns)
+ * At 100Mb link, the period is multiplied by 100. (640ns)
+ *
+ * The calculated value allows us to right shift the SYSTIME register
+ * value in order to quickly convert it into a nanosecond clock,
+ * while allowing for the maximum possible adjustment value.
+ *
+ * These diagrams are only for the 10Gb link period
+ *
+ *           SYSTIMEH            SYSTIMEL
+ *       +--------------+  +--------------+
+ * X540  |      32      |  | 1 | 3 |  28  |
+ *       *--------------+  +--------------+
+ *        \________ 36 bits ______/  fract
+ *
+ *       +--------------+  +--------------+
+ * 82599 |      32      |  | 8 | 3 |  21  |
+ *       *--------------+  +--------------+
+ *        \________ 43 bits ______/  fract
+ *
+ * The 36 bit X540 SYSTIME overflows every
+ *   2^36 * 10^-9 / 60 = 1.14 minutes or 69 seconds
+ *
+ * The 43 bit 82599 SYSTIME overflows every
+ *   2^43 * 10^-9 / 3600 = 2.4 hours
+ */
+#define IXGBE_INCVAL_10GB 0x66666666
+#define IXGBE_INCVAL_1GB  0x40000000
+#define IXGBE_INCVAL_100  0x50000000
+
+#define IXGBE_INCVAL_SHIFT_10GB  28
+#define IXGBE_INCVAL_SHIFT_1GB   24
+#define IXGBE_INCVAL_SHIFT_100   21
+
+#define IXGBE_INCVAL_SHIFT_82599 7
+#define IXGBE_INCPER_SHIFT_82599 24
+#define IXGBE_MAX_TIMEADJ_VALUE  0x7FFFFFFFFFFFFFFFULL
+
+#define IXGBE_OVERFLOW_PERIOD    (HZ * 30)
+
+#ifndef NSECS_PER_SEC
+#define NSECS_PER_SEC 1000000000ULL
+#endif
+
+static struct sock_filter ptp_filter[] = {
+       PTP_FILTER
+};
+
+/**
+ * ixgbe_ptp_setup_sdp
+ * @hw: the hardware private structure
+ *
+ * this function enables or disables the clock out feature on SDP0 for
+ * the X540 device. It will create a 1second periodic output that can
+ * be used as the PPS (via an interrupt).
+ *
+ * It calculates when the systime will be on an exact second, and then
+ * aligns the start of the PPS signal to that value. The shift is
+ * necessary because it can change based on the link speed.
+ */
+static void ixgbe_ptp_setup_sdp(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int shift = adapter->hw_cc.shift;
+       u32 esdp, tsauxc, clktiml, clktimh, trgttiml, trgttimh, rem;
+       u64 ns = 0, clock_edge = 0;
+
+       if ((adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED) &&
+           (hw->mac.type == ixgbe_mac_X540)) {
+
+               /* disable the pin first */
+               IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0);
+               IXGBE_WRITE_FLUSH(hw);
+
+               esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+               /*
+                * enable the SDP0 pin as output, and connected to the
+                * native function for Timesync (ClockOut)
+                */
+               esdp |= (IXGBE_ESDP_SDP0_DIR |
+                        IXGBE_ESDP_SDP0_NATIVE);
+
+               /*
+                * enable the Clock Out feature on SDP0, and allow
+                * interrupts to occur when the pin changes
+                */
+               tsauxc = (IXGBE_TSAUXC_EN_CLK |
+                         IXGBE_TSAUXC_SYNCLK |
+                         IXGBE_TSAUXC_SDP0_INT);
+
+               /* clock period (or pulse length) */
+               clktiml = (u32)(NSECS_PER_SEC << shift);
+               clktimh = (u32)((NSECS_PER_SEC << shift) >> 32);
+
+               /*
+                * Account for the cyclecounter wrap-around value by
+                * using the converted ns value of the current time to
+                * check for when the next aligned second would occur.
+                */
+               clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+               clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32;
+               ns = timecounter_cyc2time(&adapter->hw_tc, clock_edge);
+
+               div_u64_rem(ns, NSECS_PER_SEC, &rem);
+               clock_edge += ((NSECS_PER_SEC - (u64)rem) << shift);
+
+               /* specify the initial clock start time */
+               trgttiml = (u32)clock_edge;
+               trgttimh = (u32)(clock_edge >> 32);
+
+               IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml);
+               IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh);
+               IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml);
+               IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh);
+
+               IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+               IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
+               IXGBE_WRITE_FLUSH(hw);
+       } else {
+               IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0);
+       }
+}
+
+/**
+ * ixgbe_ptp_read - read raw cycle counter (to be used by time counter)
+ * @cc: the cyclecounter structure
+ *
+ * this function reads the cyclecounter registers and is called by the
+ * cyclecounter structure used to construct a ns counter from the
+ * arbitrary fixed point registers
+ */
+static cycle_t ixgbe_ptp_read(const struct cyclecounter *hw_cc)
+{
+       struct ixgbe_adapter *adapter =
+               container_of(hw_cc, struct ixgbe_adapter, hw_cc);
+       struct ixgbe_hw *hw = &adapter->hw;
+       u64 stamp = 0;
+
+       stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+       stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32;
+
+       return stamp;
+}
+
+/**
+ * ixgbe_ptp_adjfreq
+ * @ptp: the ptp clock structure
+ * @ppb: parts per billion adjustment from base
+ *
+ * adjust the frequency of the ptp cycle counter by the
+ * indicated ppb from the base frequency.
+ */
+static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+       struct ixgbe_adapter *adapter =
+               container_of(ptp, struct ixgbe_adapter, ptp_caps);
+       struct ixgbe_hw *hw = &adapter->hw;
+       u64 freq;
+       u32 diff, incval;
+       int neg_adj = 0;
+
+       if (ppb < 0) {
+               neg_adj = 1;
+               ppb = -ppb;
+       }
+
+       smp_mb();
+       incval = ACCESS_ONCE(adapter->base_incval);
+
+       freq = incval;
+       freq *= ppb;
+       diff = div_u64(freq, 1000000000ULL);
+
+       incval = neg_adj ? (incval - diff) : (incval + diff);
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_X540:
+               IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
+               break;
+       case ixgbe_mac_82599EB:
+               IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
+                               (1 << IXGBE_INCPER_SHIFT_82599) |
+                               incval);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+/**
+ * ixgbe_ptp_adjtime
+ * @ptp: the ptp clock structure
+ * @delta: offset to adjust the cycle counter by
+ *
+ * adjust the timer by resetting the timecounter structure.
+ */
+static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+       struct ixgbe_adapter *adapter =
+               container_of(ptp, struct ixgbe_adapter, ptp_caps);
+       unsigned long flags;
+       u64 now;
+
+       spin_lock_irqsave(&adapter->tmreg_lock, flags);
+
+       now = timecounter_read(&adapter->hw_tc);
+       now += delta;
+
+       /* reset the timecounter */
+       timecounter_init(&adapter->hw_tc,
+                        &adapter->hw_cc,
+                        now);
+
+       spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+       ixgbe_ptp_setup_sdp(adapter);
+
+       return 0;
+}
+
+/**
+ * ixgbe_ptp_gettime
+ * @ptp: the ptp clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * read the timecounter and return the correct value on ns,
+ * after converting it into a struct timespec.
+ */
+static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+       struct ixgbe_adapter *adapter =
+               container_of(ptp, struct ixgbe_adapter, ptp_caps);
+       u64 ns;
+       u32 remainder;
+       unsigned long flags;
+
+       spin_lock_irqsave(&adapter->tmreg_lock, flags);
+       ns = timecounter_read(&adapter->hw_tc);
+       spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+       ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
+       ts->tv_nsec = remainder;
+
+       return 0;
+}
+
+/**
+ * ixgbe_ptp_settime
+ * @ptp: the ptp clock structure
+ * @ts: the timespec containing the new time for the cycle counter
+ *
+ * reset the timecounter to use a new base value instead of the kernel
+ * wall timer value.
+ */
+static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
+                            const struct timespec *ts)
+{
+       struct ixgbe_adapter *adapter =
+               container_of(ptp, struct ixgbe_adapter, ptp_caps);
+       u64 ns;
+       unsigned long flags;
+
+       ns = ts->tv_sec * 1000000000ULL;
+       ns += ts->tv_nsec;
+
+       /* reset the timecounter */
+       spin_lock_irqsave(&adapter->tmreg_lock, flags);
+       timecounter_init(&adapter->hw_tc, &adapter->hw_cc, ns);
+       spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+       ixgbe_ptp_setup_sdp(adapter);
+       return 0;
+}
+
+/**
+ * ixgbe_ptp_enable
+ * @ptp: the ptp clock structure
+ * @rq: the requested feature to change
+ * @on: whether to enable or disable the feature
+ *
+ * enable (or disable) ancillary features of the phc subsystem.
+ * our driver only supports the PPS feature on the X540
+ */
+static int ixgbe_ptp_enable(struct ptp_clock_info *ptp,
+                           struct ptp_clock_request *rq, int on)
+{
+       struct ixgbe_adapter *adapter =
+               container_of(ptp, struct ixgbe_adapter, ptp_caps);
+
+       /**
+        * When PPS is enabled, unmask the interrupt for the ClockOut
+        * feature, so that the interrupt handler can send the PPS
+        * event when the clock SDP triggers. Clear mask when PPS is
+        * disabled
+        */
+       if (rq->type == PTP_CLK_REQ_PPS) {
+               switch (adapter->hw.mac.type) {
+               case ixgbe_mac_X540:
+                       if (on)
+                               adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED;
+                       else
+                               adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED;
+
+                       ixgbe_ptp_setup_sdp(adapter);
+                       return 0;
+               default:
+                       break;
+               }
+       }
+
+       return -ENOTSUPP;
+}
+
+/**
+ * ixgbe_ptp_check_pps_event
+ * @adapter: the private adapter structure
+ * @eicr: the interrupt cause register value
+ *
+ * This function is called by the interrupt routine when checking for
+ * interrupts. It will check and handle a pps event.
+ */
+void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct ptp_clock_event event;
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_X540:
+               ptp_clock_event(adapter->ptp_clock, &event);
+               break;
+       default:
+               break;
+       }
+}
+
+
+/**
+ * ixgbe_ptp_overflow_check - delayed work to detect SYSTIME overflow
+ * @work: structure containing information about this work task
+ *
+ * this work function is scheduled to continue reading the timecounter
+ * in order to prevent missing when the system time registers wrap
+ * around. This needs to be run approximately twice a minute when no
+ * PTP activity is occurring.
+ */
+void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
+{
+       unsigned long elapsed_jiffies = adapter->last_overflow_check - jiffies;
+       struct timespec ts;
+
+       if ((adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) &&
+           (elapsed_jiffies >= IXGBE_OVERFLOW_PERIOD)) {
+               ixgbe_ptp_gettime(&adapter->ptp_caps, &ts);
+               adapter->last_overflow_check = jiffies;
+       }
+}
+
+/**
+ * ixgbe_ptp_match - determine if this skb matches a ptp packet
+ * @skb: pointer to the skb
+ * @hwtstamp: pointer to the hwtstamp_config to check
+ *
+ * Determine whether the skb should have been timestamped, assuming the
+ * hwtstamp was set via the hwtstamp ioctl. Returns non-zero when the packet
+ * should have a timestamp waiting in the registers, and 0 otherwise.
+ *
+ * V1 packets have to check the version type to determine whether they are
+ * correct. However, we can't directly access the data because it might be
+ * fragmented in the SKB, in paged memory. In order to work around this, we
+ * use skb_copy_bits which will properly copy the data whether it is in the
+ * paged memory fragments or not. We have to copy the IP header as well as the
+ * message type.
+ */
+static int ixgbe_ptp_match(struct sk_buff *skb, int rx_filter)
+{
+       struct iphdr iph;
+       u8 msgtype;
+       unsigned int type, offset;
+
+       if (rx_filter == HWTSTAMP_FILTER_NONE)
+               return 0;
+
+       type = sk_run_filter(skb, ptp_filter);
+
+       if (likely(rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT))
+               return type & PTP_CLASS_V2;
+
+       /* For the remaining cases actually check message type */
+       switch (type) {
+       case PTP_CLASS_V1_IPV4:
+               skb_copy_bits(skb, OFF_IHL, &iph, sizeof(iph));
+               offset = ETH_HLEN + (iph.ihl << 2) + UDP_HLEN + OFF_PTP_CONTROL;
+               break;
+       case PTP_CLASS_V1_IPV6:
+               offset = OFF_PTP6 + OFF_PTP_CONTROL;
+               break;
+       default:
+               /* other cases invalid or handled above */
+               return 0;
+       }
+
+       /* Make sure our buffer is long enough */
+       if (skb->len < offset)
+               return 0;
+
+       skb_copy_bits(skb, offset, &msgtype, sizeof(msgtype));
+
+       switch (rx_filter) {
+       case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+               return (msgtype == IXGBE_RXMTRL_V1_SYNC_MSG);
+               break;
+       case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+               return (msgtype == IXGBE_RXMTRL_V1_DELAY_REQ_MSG);
+               break;
+       default:
+               return 0;
+       }
+}
+
+/**
+ * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp
+ * @q_vector: structure containing interrupt and ring information
+ * @skb: particular skb to send timestamp with
+ *
+ * if the timestamp is valid, we convert it into the timecounter ns
+ * value, then store that result into the shhwtstamps structure which
+ * is passed up the network stack
+ */
+void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
+                          struct sk_buff *skb)
+{
+       struct ixgbe_adapter *adapter;
+       struct ixgbe_hw *hw;
+       struct skb_shared_hwtstamps shhwtstamps;
+       u64 regval = 0, ns;
+       u32 tsynctxctl;
+       unsigned long flags;
+
+       /* we cannot process timestamps on a ring without a q_vector */
+       if (!q_vector || !q_vector->adapter)
+               return;
+
+       adapter = q_vector->adapter;
+       hw = &adapter->hw;
+
+       tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
+       regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
+       regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32;
+
+       /*
+        * if TX timestamp is not valid, exit after clearing the
+        * timestamp registers
+        */
+       if (!(tsynctxctl & IXGBE_TSYNCTXCTL_VALID))
+               return;
+
+       spin_lock_irqsave(&adapter->tmreg_lock, flags);
+       ns = timecounter_cyc2time(&adapter->hw_tc, regval);
+       spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+       memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+       shhwtstamps.hwtstamp = ns_to_ktime(ns);
+       skb_tstamp_tx(skb, &shhwtstamps);
+}
+
+/**
+ * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
+ * @q_vector: structure containing interrupt and ring information
+ * @rx_desc: the rx descriptor
+ * @skb: particular skb to send timestamp with
+ *
+ * if the timestamp is valid, we convert it into the timecounter ns
+ * value, then store that result into the shhwtstamps structure which
+ * is passed up the network stack
+ */
+void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
+                          union ixgbe_adv_rx_desc *rx_desc,
+                          struct sk_buff *skb)
+{
+       struct ixgbe_adapter *adapter;
+       struct ixgbe_hw *hw;
+       struct skb_shared_hwtstamps *shhwtstamps;
+       u64 regval = 0, ns;
+       u32 tsyncrxctl;
+       unsigned long flags;
+
+       /* we cannot process timestamps on a ring without a q_vector */
+       if (!q_vector || !q_vector->adapter)
+               return;
+
+       adapter = q_vector->adapter;
+       hw = &adapter->hw;
+
+       tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
+
+       /* Check if we have a valid timestamp and make sure the skb should
+        * have been timestamped */
+       if (likely(!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID) ||
+                  !ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter)))
+               return;
+
+       /*
+        * Always read the registers, in order to clear a possible fault
+        * because of stagnant RX timestamp values for a packet that never
+        * reached the queue.
+        */
+       regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
+       regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;
+
+       /*
+        * If the timestamp bit is set in the packet's descriptor, we know the
+        * timestamp belongs to this packet. No other packet can be
+        * timestamped until the registers for timestamping have been read.
+        * Therefor only one packet with this bit can be in the queue at a
+        * time, and the rx timestamp values that were in the registers belong
+        * to this packet.
+        *
+        * If nothing went wrong, then it should have a skb_shared_tx that we
+        * can turn into a skb_shared_hwtstamps.
+        */
+       if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
+               return;
+
+       spin_lock_irqsave(&adapter->tmreg_lock, flags);
+       ns = timecounter_cyc2time(&adapter->hw_tc, regval);
+       spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+       shhwtstamps = skb_hwtstamps(skb);
+       shhwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+/**
+ * ixgbe_ptp_hwtstamp_ioctl - control hardware time stamping
+ * @adapter: pointer to adapter struct
+ * @ifreq: ioctl data
+ * @cmd: particular ioctl requested
+ *
+ * Outgoing time stamping can be enabled and disabled. Play nice and
+ * disable it when requested, although it shouldn't case any overhead
+ * when no packet needs it. At most one packet in the queue may be
+ * marked for time stamping, otherwise it would be impossible to tell
+ * for sure to which packet the hardware time stamp belongs.
+ *
+ * Incoming time stamping has to be configured via the hardware
+ * filters. Not all combinations are supported, in particular event
+ * type has to be specified. Matching the kind of event packet is
+ * not supported, with the exception of "all V2 events regardless of
+ * level 2 or 4".
+ *
+ * Since hardware always timestamps Path delay packets when timestamping V2
+ * packets, regardless of the type specified in the register, only use V2
+ * Event mode. This more accurately tells the user what the hardware is going
+ * to do anyways.
+ */
+int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
+                            struct ifreq *ifr, int cmd)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct hwtstamp_config config;
+       u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
+       u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
+       u32 tsync_rx_mtrl = 0;
+       bool is_l4 = false;
+       bool is_l2 = false;
+       u32 regval;
+
+       if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+               return -EFAULT;
+
+       /* reserved for future extensions */
+       if (config.flags)
+               return -EINVAL;
+
+       switch (config.tx_type) {
+       case HWTSTAMP_TX_OFF:
+               tsync_tx_ctl = 0;
+       case HWTSTAMP_TX_ON:
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       switch (config.rx_filter) {
+       case HWTSTAMP_FILTER_NONE:
+               tsync_rx_ctl = 0;
+               break;
+       case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+               tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
+               tsync_rx_mtrl = IXGBE_RXMTRL_V1_SYNC_MSG;
+               is_l4 = true;
+               break;
+       case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+               tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
+               tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
+               is_l4 = true;
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+               tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
+               is_l2 = true;
+               is_l4 = true;
+               config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+               break;
+       case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+       case HWTSTAMP_FILTER_ALL:
+       default:
+               /*
+                * register RXMTRL must be set in order to do V1 packets,
+                * therefore it is not possible to time stamp both V1 Sync and
+                * Delay_Req messages and hardware does not support
+                * timestamping all packets => return error
+                */
+               config.rx_filter = HWTSTAMP_FILTER_NONE;
+               return -ERANGE;
+       }
+
+       if (hw->mac.type == ixgbe_mac_82598EB) {
+               if (tsync_rx_ctl | tsync_tx_ctl)
+                       return -ERANGE;
+               return 0;
+       }
+
+       /* Store filter value for later use */
+       adapter->rx_hwtstamp_filter = config.rx_filter;
+
+       /* define ethertype filter for timestamped packets */
+       if (is_l2)
+               IXGBE_WRITE_REG(hw, IXGBE_ETQF(3),
+                               (IXGBE_ETQF_FILTER_EN | /* enable filter */
+                                IXGBE_ETQF_1588 | /* enable timestamping */
+                                ETH_P_1588));     /* 1588 eth protocol type */
+       else
+               IXGBE_WRITE_REG(hw, IXGBE_ETQF(3), 0);
+
+#define PTP_PORT 319
+       /* L4 Queue Filter[3]: filter by destination port and protocol */
+       if (is_l4) {
+               u32 ftqf = (IXGBE_FTQF_PROTOCOL_UDP /* UDP */
+                           | IXGBE_FTQF_POOL_MASK_EN /* Pool not compared */
+                           | IXGBE_FTQF_QUEUE_ENABLE);
+
+               ftqf |= ((IXGBE_FTQF_PROTOCOL_COMP_MASK /* protocol check */
+                         & IXGBE_FTQF_DEST_PORT_MASK /* dest check */
+                         & IXGBE_FTQF_SOURCE_PORT_MASK) /* source check */
+                        << IXGBE_FTQF_5TUPLE_MASK_SHIFT);
+
+               IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(3),
+                               (3 << IXGBE_IMIR_RX_QUEUE_SHIFT_82599 |
+                                IXGBE_IMIR_SIZE_BP_82599));
+
+               /* enable port check */
+               IXGBE_WRITE_REG(hw, IXGBE_SDPQF(3),
+                               (htons(PTP_PORT) |
+                                htons(PTP_PORT) << 16));
+
+               IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), ftqf);
+
+               tsync_rx_mtrl |= PTP_PORT << 16;
+       } else {
+               IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), 0);
+       }
+
+       /* enable/disable TX */
+       regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
+       regval &= ~IXGBE_TSYNCTXCTL_ENABLED;
+       regval |= tsync_tx_ctl;
+       IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, regval);
+
+       /* enable/disable RX */
+       regval = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
+       regval &= ~(IXGBE_TSYNCRXCTL_ENABLED | IXGBE_TSYNCRXCTL_TYPE_MASK);
+       regval |= tsync_rx_ctl;
+       IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, regval);
+
+       /* define which PTP packets are time stamped */
+       IXGBE_WRITE_REG(hw, IXGBE_RXMTRL, tsync_rx_mtrl);
+
+       IXGBE_WRITE_FLUSH(hw);
+
+       /* clear TX/RX time stamp registers, just to be sure */
+       regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH);
+       regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
+
+       return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+               -EFAULT : 0;
+}
+
+/**
+ * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw
+ * @adapter: pointer to the adapter structure
+ *
+ * This function should be called to set the proper values for the TIMINCA
+ * register and tell the cyclecounter structure what the tick rate of SYSTIME
+ * is. It does not directly modify SYSTIME registers or the timecounter
+ * structure. It should be called whenever a new TIMINCA value is necessary,
+ * such as during initialization or when the link speed changes.
+ */
+void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 incval = 0;
+       u32 shift = 0;
+       unsigned long flags;
+
+       /**
+        * Scale the NIC cycle counter by a large factor so that
+        * relatively small corrections to the frequency can be added
+        * or subtracted. The drawbacks of a large factor include
+        * (a) the clock register overflows more quickly, (b) the cycle
+        * counter structure must be able to convert the systime value
+        * to nanoseconds using only a multiplier and a right-shift,
+        * and (c) the value must fit within the timinca register space
+        * => math based on internal DMA clock rate and available bits
+        *
+        * Note that when there is no link, internal DMA clock is same as when
+        * link speed is 10Gb. Set the registers correctly even when link is
+        * down to preserve the clock setting
+        */
+       switch (adapter->link_speed) {
+       case IXGBE_LINK_SPEED_100_FULL:
+               incval = IXGBE_INCVAL_100;
+               shift = IXGBE_INCVAL_SHIFT_100;
+               break;
+       case IXGBE_LINK_SPEED_1GB_FULL:
+               incval = IXGBE_INCVAL_1GB;
+               shift = IXGBE_INCVAL_SHIFT_1GB;
+               break;
+       case IXGBE_LINK_SPEED_10GB_FULL:
+       default:
+               incval = IXGBE_INCVAL_10GB;
+               shift = IXGBE_INCVAL_SHIFT_10GB;
+               break;
+       }
+
+       /**
+        * Modify the calculated values to fit within the correct
+        * number of bits specified by the hardware. The 82599 doesn't
+        * have the same space as the X540, so bitshift the calculated
+        * values to fit.
+        */
+       switch (hw->mac.type) {
+       case ixgbe_mac_X540:
+               IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
+               break;
+       case ixgbe_mac_82599EB:
+               incval >>= IXGBE_INCVAL_SHIFT_82599;
+               shift -= IXGBE_INCVAL_SHIFT_82599;
+               IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
+                               (1 << IXGBE_INCPER_SHIFT_82599) |
+                               incval);
+               break;
+       default:
+               /* other devices aren't supported */
+               return;
+       }
+
+       /* update the base incval used to calculate frequency adjustment */
+       ACCESS_ONCE(adapter->base_incval) = incval;
+       smp_mb();
+
+       /* need lock to prevent incorrect read while modifying cyclecounter */
+       spin_lock_irqsave(&adapter->tmreg_lock, flags);
+
+       memset(&adapter->hw_cc, 0, sizeof(adapter->hw_cc));
+       adapter->hw_cc.read = ixgbe_ptp_read;
+       adapter->hw_cc.mask = CLOCKSOURCE_MASK(64);
+       adapter->hw_cc.shift = shift;
+       adapter->hw_cc.mult = 1;
+
+       spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+}
+
+/**
+ * ixgbe_ptp_reset
+ * @adapter: the ixgbe private board structure
+ *
+ * When the MAC resets, all timesync features are reset. This function should be
+ * called to re-enable the PTP clock structure. It will re-init the timecounter
+ * structure based on the kernel time as well as setup the cycle counter data.
+ */
+void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       unsigned long flags;
+
+       /* set SYSTIME registers to 0 just in case */
+       IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000);
+       IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
+       IXGBE_WRITE_FLUSH(hw);
+
+       ixgbe_ptp_start_cyclecounter(adapter);
+
+       spin_lock_irqsave(&adapter->tmreg_lock, flags);
+
+       /* reset the ns time counter */
+       timecounter_init(&adapter->hw_tc, &adapter->hw_cc,
+                        ktime_to_ns(ktime_get_real()));
+
+       spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+       /*
+        * now that the shift has been calculated and the systime
+        * registers reset, enable pps signal
+        */
+       ixgbe_ptp_setup_sdp(adapter);
+}
+
+/**
+ * ixgbe_ptp_init
+ * @adapter: the ixgbe private adapter structure
+ *
+ * This function performs the required steps for enabling ptp
+ * support. If ptp support has already been loaded it simply calls the
+ * cyclecounter init routine and exits.
+ */
+void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_X540:
+               snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
+               adapter->ptp_caps.owner = THIS_MODULE;
+               adapter->ptp_caps.max_adj = 250000000;
+               adapter->ptp_caps.n_alarm = 0;
+               adapter->ptp_caps.n_ext_ts = 0;
+               adapter->ptp_caps.n_per_out = 0;
+               adapter->ptp_caps.pps = 1;
+               adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq;
+               adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
+               adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
+               adapter->ptp_caps.settime = ixgbe_ptp_settime;
+               adapter->ptp_caps.enable = ixgbe_ptp_enable;
+               break;
+       case ixgbe_mac_82599EB:
+               snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
+               adapter->ptp_caps.owner = THIS_MODULE;
+               adapter->ptp_caps.max_adj = 250000000;
+               adapter->ptp_caps.n_alarm = 0;
+               adapter->ptp_caps.n_ext_ts = 0;
+               adapter->ptp_caps.n_per_out = 0;
+               adapter->ptp_caps.pps = 0;
+               adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq;
+               adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
+               adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
+               adapter->ptp_caps.settime = ixgbe_ptp_settime;
+               adapter->ptp_caps.enable = ixgbe_ptp_enable;
+               break;
+       default:
+               adapter->ptp_clock = NULL;
+               return;
+       }
+
+       /* initialize the ptp filter */
+       if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter)))
+               e_dev_warn("ptp_filter_init failed\n");
+
+       spin_lock_init(&adapter->tmreg_lock);
+
+       adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps);
+       if (IS_ERR(adapter->ptp_clock)) {
+               adapter->ptp_clock = NULL;
+               e_dev_err("ptp_clock_register failed\n");
+       } else
+               e_dev_info("registered PHC device on %s\n", netdev->name);
+
+       ixgbe_ptp_reset(adapter);
+
+       /* set the flag that PTP has been enabled */
+       adapter->flags2 |= IXGBE_FLAG2_PTP_ENABLED;
+
+       return;
+}
+
+/**
+ * ixgbe_ptp_stop - disable ptp device and stop the overflow check
+ * @adapter: pointer to adapter struct
+ *
+ * this function stops the ptp support, and cancels the delayed work.
+ */
+void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
+{
+       /* stop the overflow check task */
+       adapter->flags2 &= ~(IXGBE_FLAG2_PTP_ENABLED |
+                            IXGBE_FLAG2_PTP_PPS_ENABLED);
+
+       ixgbe_ptp_setup_sdp(adapter);
+
+       if (adapter->ptp_clock) {
+               ptp_clock_unregister(adapter->ptp_clock);
+               adapter->ptp_clock = NULL;
+               e_dev_info("removed PHC on %s\n",
+                          adapter->netdev->name);
+       }
+}
+#endif /* CONFIG_IXGBE_PTP */
index e04295e91f9c1a699778f10d10953445ff23a46d..cf08778763dae12da9822945dd517bdc18d87459 100644 (file)
 #include "ixgbe_sriov.h"
 
 #ifdef CONFIG_PCI_IOV
-#if defined(IFLA_VF_MAX) && defined(__VMKLNX__)
-#define pci_enable_sriov(dev,vfs) \
-       (vmklnx_enable_vfs((dev), (vfs), NULL, NULL) != (vfs) ? -ENOTSUPP : 0)
-
-#define pci_disable_sriov(dev) \
-       vmklnx_disable_vfs((dev), adapter->num_vfs, NULL, NULL)
-
-static VMK_ReturnStatus ixgbe_passthru_ops(struct net_device *netdev,
-                                               vmk_NetPTOP op,
-                                               void *pargs)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       VMK_ReturnStatus ret;
-       
-       switch (op) {
-       case VMK_NETPTOP_VF_SET_MAC:
-       {
-               vmk_NetPTOPVFSetMacArgs *args = pargs;
-
-               if (is_zero_ether_addr(args->mac)) {
-                       /* Remove the VF mac address */
-                       ixgbe_del_mac_filter(adapter,
-                               adapter->vfinfo[args->vf].vf_mac_addresses,
-                               args->vf);
-                       memset(adapter->vfinfo[args->vf].vf_mac_addresses,
-                               0, ETH_ALEN);
-                       adapter->vfinfo[args->vf].pf_set_mac = false;
-                       ret = VMK_OK;
-               } else {
-                       if (ixgbe_ndo_set_vf_mac(netdev, 
-                                                args->vf, args->mac) < 0)
-                               ret = VMK_FAILURE;
-                       else
-                               ret = VMK_OK;
-               }
-               break;
-       }
-       case VMK_NETPTOP_VF_SET_DEFAULT_VLAN:
-       {
-               vmk_NetPTOPVFSetDefaultVlanArgs *args = pargs;
-               
-               if (args->enable) {
-                       adapter->vfinfo[args->vf].pf_set_vlan = true;
-                       ret = ixgbe_ndo_set_vf_vlan(netdev, args->vf, args->vid,
-                               args->prio) ? VMK_FAILURE : VMK_OK;
-               } else {
-                       adapter->vfinfo[args->vf].pf_set_vlan = false;
-                       ret = ixgbe_ndo_set_vf_vlan(netdev, args->vf, 0, 0) ?
-                               VMK_FAILURE : VMK_OK;
-               }
-               break;
-       }
-       default:
-               e_err(probe, "Unhandled OP %d\n", op);
-               ret = VMK_FAILURE;
-               break;
-       }
-       return ret;
-}
-#endif
 
 void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
 {
@@ -185,7 +125,13 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
                                  IXGBE_FLAG_SRIOV_REPLICATION_ENABLE;
 
                /* limit traffic classes based on VFs enabled */
-               if (adapter->num_vfs < 32) {
+               if ((adapter->hw.mac.type == ixgbe_mac_82599EB) &&
+                   (adapter->num_vfs < 16)) {
+                       adapter->dcb_cfg.num_tcs.pg_tcs =
+                                               IXGBE_DCB_MAX_TRAFFIC_CLASS;
+                       adapter->dcb_cfg.num_tcs.pfc_tcs =
+                                               IXGBE_DCB_MAX_TRAFFIC_CLASS;
+               } else if (adapter->num_vfs < 32) {
                        adapter->dcb_cfg.num_tcs.pg_tcs = 4;
                        adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
                } else {
@@ -200,20 +146,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
                /* disable RSC when in SR-IOV mode */
                adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
                                     IXGBE_FLAG2_RSC_ENABLED);
-#ifdef IXGBE_FCOE
-               /*
-                * When SR-IOV is enabled 82599 cannot support jumbo frames
-                * so we must disable FCoE because we cannot support FCoE MTU.
-                */
-               if (adapter->hw.mac.type == ixgbe_mac_82599EB)
-                       adapter->flags &= ~(IXGBE_FLAG_FCOE_ENABLED |
-                                           IXGBE_FLAG_FCOE_CAPABLE);
-#endif
-#if defined(IFLA_VF_MAX) && defined(__VMKLNX__)
-               /* Register control callback */
-               e_info(probe, "Registered Passthru Ops\n");
-               VMK_REGISTER_PT_OPS(adapter->netdev, ixgbe_passthru_ops);
-#endif
+
                /* enable spoof checking for all VFs */
                for (i = 0; i < adapter->num_vfs; i++)
                        adapter->vfinfo[i].spoofchk_enabled = true;
@@ -246,7 +179,7 @@ static bool ixgbe_vfs_are_assigned(struct ixgbe_adapter *adapter)
        }
 
        /* loop through all the VFs to see if we own any that are assigned */
-       vfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, dev_id, NULL);
+       vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
        while (vfdev) {
                /* if we don't own it we don't care */
                if (vfdev->is_virtfn && vfdev->physfn == pdev) {
@@ -255,7 +188,7 @@ static bool ixgbe_vfs_are_assigned(struct ixgbe_adapter *adapter)
                                return true;
                }
 
-               vfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, dev_id, vfdev);
+               vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
        }
 
 #endif
@@ -276,11 +209,11 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
        /* set num VFs to 0 to prevent access to vfinfo */
        adapter->num_vfs = 0;
 
-       if (adapter->vfinfo) {  
+       if (adapter->vfinfo) {
                kfree(adapter->vfinfo);
                adapter->vfinfo = NULL;
        }
+
        if (adapter->mv_list) {
                kfree(adapter->mv_list);
                adapter->mv_list = NULL;
@@ -325,9 +258,12 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
        adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
 }
 
-int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
-                           int entries, u16 *hash_list, u32 vf)
+static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
+                                  u32 *msgbuf, u32 vf)
 {
+       int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
+                      >> IXGBE_VT_MSGINFO_SHIFT;
+       u16 *hash_list = (u16 *)&msgbuf[1];
        struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
        struct ixgbe_hw *hw = &adapter->hw;
        int i;
@@ -396,38 +332,98 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
 
 int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf)
 {
+       struct ixgbe_hw *hw = &adapter->hw;
+
        /* VLAN 0 is a special case, don't allow it to be removed */
        if (!vid && !add)
                return 0;
 
-       return ixgbe_set_vfta(&adapter->hw, vid, vf, (bool)add);
+       return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add);
 }
 
-void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
+static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       int new_mtu = msgbuf[1];
+       int max_frame = msgbuf[1];
        u32 max_frs;
-       int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
-       /* Only X540 supports jumbo frames in IOV mode */
-       if (adapter->hw.mac.type != ixgbe_mac_X540)
-               return;
+       /*
+        * For 82599EB we have to keep all PFs and VFs operating with
+        * the same max_frame value in order to avoid sending an oversize
+        * frame to a VF.  In order to guarantee this is handled correctly
+        * for all cases we have several special exceptions to take into
+        * account before we can enable the VF for receive
+        */
+       if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+               struct net_device *dev = adapter->netdev;
+               int pf_max_frame = dev->mtu + ETH_HLEN;
+               u32 reg_offset, vf_shift, vfre;
+               s32 err = 0;
+
+#ifdef IXGBE_FCOE
+               if (dev->features & NETIF_F_FCOE_MTU)
+                       pf_max_frame = max_t(int, pf_max_frame,
+                                            IXGBE_FCOE_JUMBO_FRAME_SIZE);
+
+#endif /* IXGBE_FCOE */
+               switch (adapter->vfinfo[vf].vf_api) {
+               case ixgbe_mbox_api_11:
+                       /*
+                        * Version 1.1 supports jumbo frames on VFs if PF has
+                        * jumbo frames enabled which means legacy VFs are
+                        * disabled
+                        */
+                       if (pf_max_frame > ETH_FRAME_LEN)
+                               break;
+               default:
+                       /*
+                        * If the PF or VF are running w/ jumbo frames enabled
+                        * we need to shut down the VF Rx path as we cannot
+                        * support jumbo frames on legacy VFs
+                        */
+                       if ((pf_max_frame > ETH_FRAME_LEN) ||
+                           (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
+                               err = -EINVAL;
+                       break;
+               }
+
+               /* determine VF receive enable location */
+               vf_shift = vf % 32;
+               reg_offset = vf / 32;
+
+               /* enable or disable receive depending on error */
+               vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
+               if (err)
+                       vfre &= ~(1 << vf_shift);
+               else
+                       vfre |= 1 << vf_shift;
+               IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
+
+               if (err) {
+                       e_err(drv, "VF max_frame %d out of range\n", max_frame);
+                       return err;
+               }
+       }
 
        /* MTU < 68 is an error and causes problems on some kernels */
-       if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) {
-               e_err(drv, "VF mtu %d out of range\n", new_mtu);
-               return;
+       if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
+               e_err(drv, "VF max_frame %d out of range\n", max_frame);
+               return -EINVAL;
        }
 
-       max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
-                  IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
-       if (max_frs < new_mtu) {
-               max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
+       /* pull current max frame size from hardware */
+       max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
+       max_frs &= IXGBE_MHADD_MFS_MASK;
+       max_frs >>= IXGBE_MHADD_MFS_SHIFT;
+
+       if (max_frs < max_frame) {
+               max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT;
                IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
        }
 
-       e_info(drv, "VF requests change max MTU to %d\n", new_mtu);
+       e_info(hw, "VF requests change max MTU to %d\n", max_frame);
+
+       return 0;
 }
 
 void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
@@ -441,34 +437,47 @@ void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
        IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
 }
 
-static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
+static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter,
+                           u16 vid, u16 qos, u32 vf)
 {
        struct ixgbe_hw *hw = &adapter->hw;
+       u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT;
 
-       if (vid)
-               IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf),
-                               (vid | IXGBE_VMVIR_VLANA_DEFAULT));
-       else
-               IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir);
+}
+
+static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
 }
 
-inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
+static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
 {
        struct ixgbe_hw *hw = &adapter->hw;
+       struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+       u8 num_tcs = netdev_get_num_tc(adapter->netdev);
+
+       /* add PF assigned VLAN or VLAN 0 */
+       ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf);
 
        /* reset offloads to defaults */
-       if (adapter->vfinfo[vf].pf_vlan) {
-               ixgbe_set_vf_vlan(adapter, true,
-                                 adapter->vfinfo[vf].pf_vlan, vf);
-               ixgbe_set_vmvir(adapter,
-                               (adapter->vfinfo[vf].pf_vlan |
-                                (adapter->vfinfo[vf].pf_qos <<
-                                 VLAN_PRIO_SHIFT)), vf);
-               ixgbe_set_vmolr(hw, vf, false);
+       ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan);
+
+       /* set outgoing tags for VFs */
+       if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
+               ixgbe_clear_vmvir(adapter, vf);
        } else {
-               ixgbe_set_vf_vlan(adapter, true, 0, vf);
-               ixgbe_set_vmvir(adapter, 0, vf);
-               ixgbe_set_vmolr(hw, vf, true);
+               if (vfinfo->pf_qos || !num_tcs)
+                       ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
+                                       vfinfo->pf_qos, vf);
+               else
+                       ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
+                                       adapter->default_up, vf);
+
+               if (vfinfo->spoofchk_enabled)
+                       hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
        }
 
        /* reset multicast table array for vf */
@@ -478,6 +487,9 @@ inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
        ixgbe_set_rx_mode(adapter->netdev);
 
        ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
+
+       /* reset VF api back to unknown */
+       adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
 }
 
 int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
@@ -488,12 +500,70 @@ int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
        retval = ixgbe_add_mac_filter(adapter, mac_addr, vf);
        if (retval >= 0)
                memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
-       else 
+       else
                memset(adapter->vfinfo[vf].vf_mac_addresses, 0, ETH_ALEN);
 
        return retval;
 }
 
+static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
+                                 u32 *msgbuf, u32 vf)
+{
+       int api = msgbuf[1];
+
+       switch (api) {
+       case ixgbe_mbox_api_10:
+       case ixgbe_mbox_api_11:
+               adapter->vfinfo[vf].vf_api = api;
+               return 0;
+       default:
+               break;
+       }
+
+       e_info(drv, "VF %d requested invalid api version %u\n", vf, api);
+
+       return -1;
+}
+
+static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
+                              u32 *msgbuf, u32 vf)
+{
+       struct net_device *dev = adapter->netdev;
+       struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
+       unsigned int default_tc = 0;
+       u8 num_tcs = netdev_get_num_tc(dev);
+
+       /* verify the PF is supporting the correct APIs */
+       switch (adapter->vfinfo[vf].vf_api) {
+       case ixgbe_mbox_api_20:
+       case ixgbe_mbox_api_11:
+               break;
+       default:
+               return -1;
+       }
+
+       /* only allow 1 Tx queue for bandwidth limiting */
+       msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
+       msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
+
+       /* if TCs > 1 determine which TC belongs to default user priority */
+       if (num_tcs > 1)
+               default_tc = netdev_get_prio_tc_map(dev, adapter->default_up);
+
+       /* notify VF of need for VLAN tag stripping, and correct queue */
+       if (num_tcs)
+               msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
+       else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos)
+               msgbuf[IXGBE_VF_TRANS_VLAN] = 1;
+       else
+               msgbuf[IXGBE_VF_TRANS_VLAN] = 0;
+
+       /* notify VF of default queue */
+       msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc;
+
+       return 0;
+}
+
 static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
                                int vf, int index, unsigned char *mac_addr)
 {
@@ -576,27 +646,58 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
 }
 #endif /* CONFIG_PCI_IOV */
 
-inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
+static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       u32 reg;
+       unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
+       u32 reg, msgbuf[4];
        u32 reg_offset, vf_shift;
+       u8 *addr = (u8 *)(&msgbuf[1]);
         /* q_per_pool assumes that DCB is not enabled, hence in 64 pool mode */
         u32 q_per_pool = 2;
         int i;
 
+       e_info(probe, "VF Reset msg received from vf %d\n", vf);
+
+       /* reset the filters for the device */
+       ixgbe_vf_reset_event(adapter, vf);
+
+       /* set vf mac address */
+       ixgbe_set_vf_mac(adapter, vf, vf_mac);
+
        vf_shift = vf % 32;
        reg_offset = vf / 32;
 
-       /* enable transmit and receive for vf */
+       /* enable transmit for vf */
        reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
-       reg |= (reg | (1 << vf_shift));
+       reg |= 1 << vf_shift;
        IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
 
+       /* enable receive for vf */
        reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
-       reg |= (reg | (1 << vf_shift));
+       reg |= 1 << vf_shift;
+       /*
+        * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
+        * For more info take a look at ixgbe_set_vf_lpe
+        */
+       if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+               struct net_device *dev = adapter->netdev;
+               int pf_max_frame = dev->mtu + ETH_HLEN;
+
+#ifdef IXGBE_FCOE
+               if (dev->features & NETIF_F_FCOE_MTU)
+                       pf_max_frame = max_t(int, pf_max_frame,
+                                            IXGBE_FCOE_JUMBO_FRAME_SIZE);
+
+#endif /* IXGBE_FCOE */
+               if (pf_max_frame > ETH_FRAME_LEN)
+                       reg &= ~(1 << vf_shift);
+       }
        IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
 
+       /* enable VF mailbox for further messages */
+       adapter->vfinfo[vf].clear_to_send = true;
+
        reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
        reg |= (1 << vf_shift);
        IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
@@ -610,7 +711,110 @@ inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
                IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0);
        }
 
-       ixgbe_vf_reset_event(adapter, vf);
+       /* reply to reset with ack and vf mac address */
+       msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
+       memcpy(addr, vf_mac, ETH_ALEN);
+
+       /*
+        * Piggyback the multicast filter type so VF can compute the
+        * correct vectors
+        */
+       msgbuf[3] = hw->mac.mc_filter_type;
+       ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
+
+       return 0;
+}
+
+static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
+                                u32 *msgbuf, u32 vf)
+{
+       u8 *new_mac = ((u8 *)(&msgbuf[1]));
+
+       if (!is_valid_ether_addr(new_mac)) {
+               e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
+               return -1;
+       }
+
+       if (adapter->vfinfo[vf].pf_set_mac &&
+           memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac,
+                  ETH_ALEN)) {
+               e_warn(drv,
+                      "VF %d attempted to override administratively set MAC address\n"
+                      "Reload the VF driver to resume operations\n",
+                      vf);
+               return -1;
+       }
+
+       return !!(ixgbe_set_vf_mac(adapter, vf, new_mac) < 0);
+}
+
+static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
+                                u32 *msgbuf, u32 vf)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
+       int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
+       int err;
+       u8 tcs = netdev_get_num_tc(adapter->netdev);
+
+       if (adapter->vfinfo[vf].pf_vlan || tcs) {
+               e_warn(drv,
+                      "VF %d attempted to override administratively set VLAN configuration\n"
+                      "Reload the VF driver to resume operations\n",
+                      vf);
+               return -1;
+       }
+
+       if (add)
+               adapter->vfinfo[vf].vlan_count++;
+       else if (adapter->vfinfo[vf].vlan_count)
+               adapter->vfinfo[vf].vlan_count--;
+
+       err = ixgbe_set_vf_vlan(adapter, add, vid, vf);
+       if (!err && adapter->vfinfo[vf].spoofchk_enabled)
+               hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
+
+       return err;
+}
+
+static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
+                                   u32 *msgbuf, u32 vf)
+{
+       u8 *new_mac = ((u8 *)(&msgbuf[1]));
+       int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
+                   IXGBE_VT_MSGINFO_SHIFT;
+       int err;
+
+       if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
+               e_warn(drv,
+                      "VF %d requested MACVLAN filter but is administratively denied\n",
+                      vf);
+               return -1;
+       }
+
+       /* An non-zero index indicates the VF is setting a filter */
+       if (index) {
+               if (!is_valid_ether_addr(new_mac)) {
+                       e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
+                       return -1;
+               }
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+               /*
+                * If the VF is allowed to set MAC filters then turn off
+                * anti-spoofing to avoid false positives.
+                */
+               if (adapter->vfinfo[vf].spoofchk_enabled)
+                       ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
+#endif
+       }
+
+       err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
+       if (err == -ENOSPC)
+               e_warn(drv,
+                      "VF %d has requested a MACVLAN filter but there is no space for it\n",
+                      vf);
+
+       return !!err < 0;
 }
 
 static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
@@ -619,10 +823,6 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
        u32 msgbuf[mbx_size];
        struct ixgbe_hw *hw = &adapter->hw;
        s32 retval;
-       int entries;
-       u16 *hash_list;
-       int add, vid, index;
-       u8 *new_mac;
 
        retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
 
@@ -633,30 +833,14 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
        if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
                return retval;
 
+       if (msgbuf[0] == IXGBE_VF_RESET)
+               return ixgbe_vf_reset_msg(adapter, vf);
+
        /*
         * until the vf completes a virtual function reset it should not be
         * allowed to start any configuration.
         */
 
-       if (msgbuf[0] == IXGBE_VF_RESET) {
-               unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
-               new_mac = (u8 *)(&msgbuf[1]);
-               adapter->vfinfo[vf].clear_to_send = false;
-               ixgbe_vf_reset_msg(adapter, vf);
-               adapter->vfinfo[vf].clear_to_send = true;
-               ixgbe_set_vf_mac(adapter, vf, vf_mac);
-
-               /* reply to reset with ack and vf mac address */
-               msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
-               memcpy(new_mac, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
-               /* Piggyback the multicast filter type so VF can compute the
-                * correct vectors */
-               msgbuf[3] = hw->mac.mc_filter_type;
-               retval = ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
-
-               return retval;
-       }
-
        if (!adapter->vfinfo[vf].clear_to_send) {
                msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
                ixgbe_write_mbx(hw, msgbuf, 1, vf);
@@ -665,78 +849,25 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
 
        switch ((msgbuf[0] & 0xFFFF)) {
        case IXGBE_VF_SET_MAC_ADDR:
-               new_mac = ((u8 *)(&msgbuf[1]));
-               if (is_valid_ether_addr(new_mac) &&
-                   !adapter->vfinfo[vf].pf_set_mac) {
-                       e_info(probe, "Set MAC msg received from VF %d\n", vf);
-                       if (ixgbe_set_vf_mac(adapter, vf, new_mac) >= 0)
-                               retval = 0 ;
-                       else
-                               retval = -1;
-               } else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses,
-                                 new_mac, ETH_ALEN)) {
-                       e_warn(drv, "VF %d attempted to override "
-                              "administratively set MAC address\nReload "
-                              "the VF driver to resume operations\n", vf);
-                       retval = -1;
-               }
+               retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf);
                break;
        case IXGBE_VF_SET_MULTICAST:
-               entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
-                                       >> IXGBE_VT_MSGINFO_SHIFT;
-               hash_list = (u16 *)&msgbuf[1];
-               retval = ixgbe_set_vf_multicasts(adapter, entries,
-                                                hash_list, vf);
-               break;
-       case IXGBE_VF_SET_LPE:
-               e_info(probe, "Set LPE msg received from vf %d\n", vf);
-               ixgbe_set_vf_lpe(adapter, msgbuf);
+               retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf);
                break;
        case IXGBE_VF_SET_VLAN:
-               add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
-                                       >> IXGBE_VT_MSGINFO_SHIFT;
-               vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
-               if (adapter->vfinfo[vf].pf_vlan) {
-                       e_warn(drv, "VF %d attempted to override "
-                              "administratively set VLAN configuration\n"
-                              "Reload the VF driver to resume operations\n",
-                              vf);
-                       retval = -1;
-               } else {
-                       if (add)
-                               adapter->vfinfo[vf].vlan_count++;
-                       else if (adapter->vfinfo[vf].vlan_count)
-                               adapter->vfinfo[vf].vlan_count--;
-                       retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
-                       if (!retval && adapter->vfinfo[vf].spoofchk_enabled)
-                               hw->mac.ops.set_vlan_anti_spoofing(hw,
-                                                               true, vf);
-               }
+               retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
+               break;
+       case IXGBE_VF_SET_LPE:
+               retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
                break;
        case IXGBE_VF_SET_MACVLAN:
-               index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
-                       IXGBE_VT_MSGINFO_SHIFT;
-               if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
-                       e_warn(drv, "VF %d requested MACVLAN filter but is "
-                                   "administratively denied\n", vf);
-                       retval = -1;
-                       break;
-               }
-#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
-               /*
-                * If the VF is allowed to set MAC filters then turn off
-                * anti-spoofing to avoid false positives.  An index
-                * greater than 0 will indicate the VF is setting a
-                * macvlan MAC filter.
-                */
-               if (index > 0 && adapter->vfinfo[vf].spoofchk_enabled)
-                       ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
-#endif
-               retval = ixgbe_set_vf_macvlan(adapter, vf, index,
-                                             (unsigned char *)(&msgbuf[1]));
-               if (retval == -ENOSPC)
-                       e_warn(drv, "VF %d has requested a MACVLAN filter "
-                                   "but there is no space for it\n", vf);
+               retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
+               break;
+       case IXGBE_VF_API_NEGOTIATE:
+               retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf);
+               break;
+       case IXGBE_VF_GET_QUEUES:
+               retval = ixgbe_get_vf_queues(adapter, msgbuf, vf);
                break;
        default:
                e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
@@ -752,7 +883,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
 
        msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
 
-       ixgbe_write_mbx(hw, msgbuf, 1, vf);
+       ixgbe_write_mbx(hw, msgbuf, mbx_size, vf);
 
        return retval;
 }
@@ -820,7 +951,6 @@ int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
                return -EINVAL;
-       adapter->vfinfo[vf].pf_set_mac = true;
        dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
        dev_info(&adapter->pdev->dev, "Reload the VF driver to make this change effective.\n");
        retval = ixgbe_set_vf_mac(adapter, vf, mac);
@@ -845,7 +975,7 @@ static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter,
        err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
        if (err)
                goto out;
-       ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
+       ixgbe_set_vmvir(adapter, vlan, qos, vf);
        ixgbe_set_vmolr(hw, vf, false);
        if (adapter->vfinfo[vf].spoofchk_enabled)
                hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
@@ -870,7 +1000,7 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
 
        err = ixgbe_set_vf_vlan(adapter, false,
                                adapter->vfinfo[vf].pf_vlan, vf);
-       ixgbe_set_vmvir(adapter, 0, vf);
+       ixgbe_clear_vmvir(adapter, vf);
        ixgbe_set_vmolr(hw, vf, true);
        hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
        if (adapter->vfinfo[vf].vlan_count)
index 8dc6a1145ea7d3d4a70c2155afd4a1ec1aa01f0d..0e038f396e84f66cec3585751b4449bc39e1f214 100644 (file)
 #ifndef _IXGBE_SRIOV_H_
 #define _IXGBE_SRIOV_H_
 
-int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
-                           int entries, u16 *hash_list, u32 vf);
 void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
 int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf);
 void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe);
-void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf);
-void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
 void ixgbe_msg_task(struct ixgbe_adapter *adapter);
 int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
                     int vf, unsigned char *mac_addr);
index 8e3c18e3eaa56f0956e817a3c60c31cc694b7174..25d7cd27d2fc77379175f41e4ed53078161c332f 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/kobject.h>
 #include <linux/device.h>
 #include <linux/netdevice.h>
+#include <linux/time.h>
 
 /*
  * This file provides a sysfs interface to export information from the
@@ -519,7 +520,7 @@ static ssize_t ixgbe_maclla1(struct kobject *kobj,
        if (hw == NULL)
                return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
 
-       rc = ixgbe_read_eeprom_buffer(hw, first_word, word_count,
+       rc = hw->eeprom.ops.read_buffer(hw, first_word, word_count,
                                      eeprom_buff);
        if (rc != 0)
                return snprintf(buf, PAGE_SIZE, "error: reading buffer\n");
@@ -989,29 +990,28 @@ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
                goto err;
 
        /* Don't create thermal subkobjs if no data present */
-       if (ixgbe_thermal_present(adapter->info_kobj) != true)
-               goto exit;
-
-       for (i = 0; i < IXGBE_MAX_SENSORS; i++) {
-
-               char buf[16];
-
-               /*
-                * Likewise only create individual kobjs that have
-                * meaningful data.
-                */
-               if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0)
-                       continue;
-
-               /* directory named after sensor offset */
-               snprintf(buf, sizeof(buf), "sensor_%d", i);
-               adapter->therm_kobj[i] =
-                       kobject_create_and_add(buf, adapter->info_kobj);
-               if (adapter->therm_kobj[i] == NULL)
-                       goto err;
-               if (sysfs_create_group(adapter->therm_kobj[i],
-                                      &therm_attr_group))
-                       goto err;
+       if (ixgbe_thermal_present(adapter->info_kobj) == true) {
+               for (i = 0; i < IXGBE_MAX_SENSORS; i++) {
+
+                       char buf[16];
+
+                       /*
+                        * Likewise only create individual kobjs that have
+                        * meaningful data.
+                        */
+                       if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0)
+                               continue;
+
+                       /* directory named after sensor offset */
+                       snprintf(buf, sizeof(buf), "sensor_%d", i);
+                       adapter->therm_kobj[i] =
+                               kobject_create_and_add(buf, adapter->info_kobj);
+                       if (adapter->therm_kobj[i] == NULL)
+                               goto err;
+                       if (sysfs_create_group(adapter->therm_kobj[i],
+                                              &therm_attr_group))
+                               goto err;
+               }
        }
 
        goto exit;
index 6e8b2fa33d4c8bbe6351a424dc2b537be7cfa127..ec91d1c9adca91cd4116a39f3ab7ca242f7402d4 100644 (file)
@@ -31,9 +31,6 @@
 #include "ixgbe_osdep.h"
 
 
-/* Vendor ID */
-#define IXGBE_INTEL_VENDOR_ID                  0x8086
-
 /* Device IDs */
 #define IXGBE_DEV_ID_82598                     0x10B6
 #define IXGBE_DEV_ID_82598_BX                  0x1508
@@ -55,7 +52,9 @@
 #define IXGBE_DEV_ID_82599_CX4                 0x10F9
 #define IXGBE_DEV_ID_82599_SFP                 0x10FB
 #define IXGBE_SUBDEV_ID_82599_SFP              0x11A9
+#define IXGBE_SUBDEV_ID_82599_RNDC             0x1F72
 #define IXGBE_SUBDEV_ID_82599_560FLR           0x17D0
+#define IXGBE_SUBDEV_ID_82599_ECNA_DP          0x0470
 #define IXGBE_DEV_ID_82599_BACKPLANE_FCOE      0x152A
 #define IXGBE_DEV_ID_82599_SFP_FCOE            0x1529
 #define IXGBE_DEV_ID_82599_SFP_EM              0x1507
@@ -1014,6 +1013,7 @@ struct ixgbe_thermal_sensor_data {
 #define IXGBE_RSCCTL_MAXDESC_4 0x04
 #define IXGBE_RSCCTL_MAXDESC_8 0x08
 #define IXGBE_RSCCTL_MAXDESC_16        0x0C
+#define IXGBE_RSCCTL_TS_DIS    0x02
 
 /* RSCDBU Bit Masks */
 #define IXGBE_RSCDBU_RSCSMALDIS_MASK   0x0000007F
@@ -1026,7 +1026,7 @@ struct ixgbe_thermal_sensor_data {
 #define IXGBE_RDRXCTL_DMAIDONE         0x00000008 /* DMA init cycle done */
 #define IXGBE_RDRXCTL_AGGDIS           0x00010000 /* Aggregation disable */
 #define IXGBE_RDRXCTL_RSCFRSTSIZE      0x003E0000 /* RSC First packet size */
-#define IXGBE_RDRXCTL_RSCLLIDIS                0x00800000 /* Disabl RSC compl on LLI */
+#define IXGBE_RDRXCTL_RSCLLIDIS                0x00800000 /* Disable RSC compl on LLI*/
 #define IXGBE_RDRXCTL_RSCACKC          0x02000000 /* must set 1 when RSC ena */
 #define IXGBE_RDRXCTL_FCOE_WRFIX       0x04000000 /* must set 1 when RSC ena */
 
@@ -1587,10 +1587,15 @@ enum {
 #define IXGBE_ESDP_SDP4                0x00000010 /* SDP4 Data Value */
 #define IXGBE_ESDP_SDP5                0x00000020 /* SDP5 Data Value */
 #define IXGBE_ESDP_SDP6                0x00000040 /* SDP6 Data Value */
+#define IXGBE_ESDP_SDP7                0x00000080 /* SDP7 Data Value */
 #define IXGBE_ESDP_SDP0_DIR    0x00000100 /* SDP0 IO direction */
 #define IXGBE_ESDP_SDP1_DIR    0x00000200 /* SDP1 IO direction */
+#define IXGBE_ESDP_SDP2_DIR    0x00000400 /* SDP1 IO direction */
+#define IXGBE_ESDP_SDP3_DIR    0x00000800 /* SDP3 IO direction */
 #define IXGBE_ESDP_SDP4_DIR    0x00001000 /* SDP4 IO direction */
 #define IXGBE_ESDP_SDP5_DIR    0x00002000 /* SDP5 IO direction */
+#define IXGBE_ESDP_SDP6_DIR    0x00004000 /* SDP6 IO direction */
+#define IXGBE_ESDP_SDP7_DIR    0x00008000 /* SDP7 IO direction */
 #define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 IO mode */
 #define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */
 
@@ -3214,6 +3219,7 @@ struct ixgbe_hw {
        u16 subsystem_vendor_id;
        u8 revision_id;
        bool adapter_stopped;
+       int api_version;
        bool force_full_reset;
        bool allow_unsupported_sfp;
 };
index 329289c789da17024878a49551bdac32a3dc158e..87784f06e7a0139cc339bcd8855c23aac9af8b41 100644 (file)
@@ -106,6 +106,7 @@ s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
        mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
        mac->ops.check_link = &ixgbe_check_mac_link_generic;
 
+
        mac->mcft_size          = 128;
        mac->vft_size           = 128;
        mac->num_rar_entries    = 128;
@@ -927,3 +928,4 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
        return 0;
 }
 
+
index 8e54a2daca9400eb77b1ee2899a5aa625ca56522..3c07680f23d31c1d40fb951662e82a875d406e55 100644 (file)
@@ -1124,6 +1124,8 @@ static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
        pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args)
 #define dev_warn(dev, fmt, args...)            \
        pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args)
+#define dev_notice(dev, fmt, args...)            \
+       pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args)
 
 /* NOTE: dangerous! we ignore the 'gfp' argument */
 #define dma_alloc_coherent(dev,sz,dma,gfp) \
@@ -1646,6 +1648,8 @@ extern void *_kc_kzalloc(size_t size, int flags);
 #define ESTATUS_1000_TFULL     0x2000  /* Can do 1000BT Full */
 #define ESTATUS_1000_THALF     0x1000  /* Can do 1000BT Half */
 
+#define SUPPORTED_Pause                (1 << 13)
+#define SUPPORTED_Asym_Pause   (1 << 14)
 #define ADVERTISED_Pause       (1 << 13)
 #define ADVERTISED_Asym_Pause  (1 << 14)
 
@@ -1661,10 +1665,10 @@ typedef unsigned gfp_t;
 
 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) )
 #ifdef CONFIG_X86_64
-#define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir)       \
-       dma_sync_single_for_cpu(dev, dma_handle, size, dir)
-#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir)    \
-       dma_sync_single_for_device(dev, dma_handle, size, dir)
+#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir)       \
+       dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir))
+#define dma_sync_single_range_for_device(dev, addr, off, sz, dir)    \
+       dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir))
 #endif
 #endif
 #endif /* < 2.6.14 */
@@ -1723,6 +1727,11 @@ static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2)
 
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) )
+#ifndef dev_notice
+#define dev_notice(dev, fmt, args...)            \
+       dev_printk(KERN_NOTICE, dev, fmt, ## args)
+#endif
+
 #ifndef first_online_node
 #define first_online_node 0
 #endif
@@ -1906,6 +1915,10 @@ do { \
        PCI_ANY_ID, PCI_ANY_ID, 0, 0
 #endif
 
+#ifndef PCI_VENDOR_ID_INTEL
+#define PCI_VENDOR_ID_INTEL 0x8086
+#endif
+
 #ifndef round_jiffies
 #define round_jiffies(x) x
 #endif
@@ -2138,6 +2151,10 @@ static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb)
 #ifndef KERN_CONT
 #define KERN_CONT      ""
 #endif
+#ifndef pr_err
+#define pr_err(fmt, arg...) \
+       printk(KERN_ERR fmt, ##arg)
+#endif
 #else /* < 2.6.24 */
 #define HAVE_ETHTOOL_GET_SSET_COUNT
 #define HAVE_NETDEV_NAPI_LIST
@@ -2375,10 +2392,6 @@ static inline void __kc_skb_queue_head_init(struct sk_buff_head *list)
 }
 #define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q)
 #endif
-#ifndef skb_add_rx_frag
-#define skb_add_rx_frag _kc_skb_add_rx_frag
-extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, int, int);
-#endif
 #endif /* < 2.6.28 */
 
 /*****************************************************************************/
@@ -2437,7 +2450,16 @@ extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, int, int);
 #define netdev_for_each_uc_addr(uclist, dev) \
        for (uclist = dev->uc_list; uclist; uclist = uclist->next)
 #endif
-#else
+#ifndef PORT_OTHER
+#define PORT_OTHER 0xff
+#endif
+#ifndef MDIO_PHY_ID_PRTAD
+#define MDIO_PHY_ID_PRTAD 0x03e0
+#endif
+#ifndef MDIO_PHY_ID_DEVAD
+#define MDIO_PHY_ID_DEVAD 0x001f
+#endif
+#else /* < 2.6.31 */
 #ifndef HAVE_NETDEV_STORAGE_ADDRESS
 #define HAVE_NETDEV_STORAGE_ADDRESS
 #endif
@@ -2447,6 +2469,9 @@ extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, int, int);
 #ifndef HAVE_TRANS_START_IN_QUEUE
 #define HAVE_TRANS_START_IN_QUEUE
 #endif
+#ifndef HAVE_INCLUDE_LINUX_MDIO_H
+#define HAVE_INCLUDE_LINUX_MDIO_H
+#endif
 #endif /* < 2.6.31 */
 
 /*****************************************************************************/
@@ -2531,13 +2556,37 @@ extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, int, int);
 #ifndef __percpu
 #define __percpu
 #endif /* __percpu */
+#ifndef PORT_DA
+#define PORT_DA PORT_OTHER
+#endif
+#ifndef PORT_NONE
+#define PORT_NONE PORT_OTHER
+#endif
+
+#if ((RHEL_RELEASE_CODE && \
+     (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \
+     (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))))
+#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE)
+#undef DEFINE_DMA_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)       dma_addr_t ADDR_NAME
+#undef DEFINE_DMA_UNMAP_LEN
+#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)         __u32 LEN_NAME
+#undef dma_unmap_addr
+#define dma_unmap_addr(PTR, ADDR_NAME)         ((PTR)->ADDR_NAME)
+#undef dma_unmap_addr_set
+#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)        (((PTR)->ADDR_NAME) = (VAL))
+#undef dma_unmap_len
+#define dma_unmap_len(PTR, LEN_NAME)           ((PTR)->LEN_NAME)
+#undef dma_unmap_len_set
+#define dma_unmap_len_set(PTR, LEN_NAME, VAL)  (((PTR)->LEN_NAME) = (VAL))
+#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */
+#endif /* RHEL_RELEASE_CODE */
 #else /* < 2.6.33 */
 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
 #ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
 #define HAVE_NETDEV_OPS_FCOE_GETWWN
 #endif
 #endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
-#define HAVE_ETHTOOL_SFP_DISPLAY_PORT
 #endif /* < 2.6.33 */
 
 /*****************************************************************************/
@@ -2598,8 +2647,7 @@ static inline const char *_kc_netdev_name(const struct net_device *dev)
 do {                                                           \
        struct adapter_struct *kc_adapter = netdev_priv(netdev);\
        struct pci_dev *pdev = kc_adapter->pdev;                \
-       printk("%s %s: " format, level, pci_name(pdev),         \
-              ##args);                                         \
+       printk(level "%s: " format, pci_name(pdev), ##args);    \
 } while(0)
 #elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
 #define netdev_printk(level, netdev, format, args...)          \
@@ -3030,6 +3078,8 @@ struct _kc_ethtool_rx_flow_spec {
 
 /*****************************************************************************/
 
+/*****************************************************************************/
+
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
 #ifndef __netdev_alloc_skb_ip_align
@@ -3120,7 +3170,6 @@ static inline void __kc_skb_frag_unref(skb_frag_t *frag)
 #undef ixgbe_get_netdev_tc_txq
 #define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc])
 #endif
-
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) )
 //typedef u32 netdev_features_t;
@@ -3143,6 +3192,11 @@ static inline void __kc_skb_frag_unref(skb_frag_t *frag)
 #define NUMTCS_RETURNS_U8
 
 
+#ifndef skb_add_rx_frag
+#define skb_add_rx_frag _kc_skb_add_rx_frag
+extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *,
+                               int, int, unsigned int);
+#endif
 #endif /* < 3.4.0 */
 
 /*****************************************************************************/
@@ -3152,4 +3206,65 @@ static inline void __kc_skb_frag_unref(skb_frag_t *frag)
 #define HAVE_FDB_OPS
 #define HAVE_ETHTOOL_GET_TS_INFO
 #endif /* < 3.5.0 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) )
+#ifdef ETHTOOL_GEEE
+#include <linux/mdio.h>
+/**
+ * mmd_eee_cap_to_ethtool_sup_t
+ * @eee_cap: value of the MMD EEE Capability register
+ *
+ * A small helper function that translates MMD EEE Capability (3.20) bits
+ * to ethtool supported settings.
+ */
+static inline u32 mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap)
+{
+       u32 supported = 0;
+
+       if (eee_cap & MDIO_EEE_100TX)
+               supported |= SUPPORTED_100baseT_Full;
+       if (eee_cap & MDIO_EEE_1000T)
+               supported |= SUPPORTED_1000baseT_Full;
+       if (eee_cap & MDIO_EEE_10GT)
+               supported |= SUPPORTED_10000baseT_Full;
+       if (eee_cap & MDIO_EEE_1000KX)
+               supported |= SUPPORTED_1000baseKX_Full;
+       if (eee_cap & MDIO_EEE_10GKX4)
+               supported |= SUPPORTED_10000baseKX4_Full;
+       if (eee_cap & MDIO_EEE_10GKR)
+               supported |= SUPPORTED_10000baseKR_Full;
+
+       return supported;
+}
+
+/**
+ * mmd_eee_adv_to_ethtool_adv_t
+ * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers
+ *
+ * A small helper function that translates the MMD EEE Advertisment (7.60)
+ * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement
+ * settings.
+ */
+static inline u32 mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv)
+{
+       u32 adv = 0;
+
+       if (eee_adv & MDIO_EEE_100TX)
+               adv |= ADVERTISED_100baseT_Full;
+       if (eee_adv & MDIO_EEE_1000T)
+               adv |= ADVERTISED_1000baseT_Full;
+       if (eee_adv & MDIO_EEE_10GT)
+               adv |= ADVERTISED_10000baseT_Full;
+       if (eee_adv & MDIO_EEE_1000KX)
+               adv |= ADVERTISED_1000baseKX_Full;
+       if (eee_adv & MDIO_EEE_10GKX4)
+               adv |= ADVERTISED_10000baseKX4_Full;
+       if (eee_adv & MDIO_EEE_10GKR)
+               adv |= ADVERTISED_10000baseKR_Full;
+
+       return adv;
+}
+#endif /* ETHTOOL_GEEE */
+#endif /* < 3.7.0 */
 #endif /* _KCOMPAT_H_ */