]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
[net] e1000e-2.1.4 driver update
authorMaxim Uvarov <maxim.uvarov@oracle.com>
Mon, 15 Oct 2012 10:27:21 +0000 (03:27 -0700)
committerMaxim Uvarov <maxim.uvarov@oracle.com>
Mon, 15 Oct 2012 10:40:14 +0000 (03:40 -0700)
Update driver from sf.net sources.
Signed-off-by: Maxim Uvarov <maxim.uvarov@oracle.com>
16 files changed:
drivers/net/e1000e/82571.c
drivers/net/e1000e/defines.h
drivers/net/e1000e/e1000.h
drivers/net/e1000e/ethtool.c
drivers/net/e1000e/ich8lan.c
drivers/net/e1000e/ich8lan.h
drivers/net/e1000e/kcompat.c
drivers/net/e1000e/kcompat.h
drivers/net/e1000e/mac.c
drivers/net/e1000e/mac.h
drivers/net/e1000e/netdev.c
drivers/net/e1000e/nvm.h
drivers/net/e1000e/param.c
drivers/net/e1000e/phy.c
drivers/net/e1000e/phy.h
drivers/net/e1000e/regs.h

index ef65bb246548866089f664d9a469a3b266a3b025..20be799d1de4f27cabb0c38bcdb22431bc715c58 100644 (file)
@@ -670,7 +670,7 @@ static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
  **/
 static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
 {
-       u16 data = er32(POEMB);
+       u32 data = er32(POEMB);
 
        if (active)
                data |= E1000_PHY_CTRL_D0A_LPLU;
@@ -694,7 +694,7 @@ static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
  **/
 static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
 {
-       u16 data = er32(POEMB);
+       u32 data = er32(POEMB);
 
        if (!active) {
                data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
@@ -1015,7 +1015,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
  **/
 static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
 {
-       u32 ctrl, ctrl_ext;
+       u32 ctrl, ctrl_ext, eecd;
        s32 ret_val;
 
        /*
@@ -1088,6 +1088,16 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
         */
 
        switch (hw->mac.type) {
+       case e1000_82571:
+       case e1000_82572:
+               /*
+                * REQ and GNT bits need to be cleared when using AUTO_RD
+                * to access the EEPROM.
+                */
+               eecd = er32(EECD);
+               eecd &= ~(E1000_EECD_REQ | E1000_EECD_GNT);
+               ew32(EECD, eecd);
+               break;
        case e1000_82573:
        case e1000_82574:
        case e1000_82583:
@@ -1577,9 +1587,6 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
        ctrl = er32(CTRL);
        status = er32(STATUS);
        rxcw = er32(RXCW);
-       /* SYNCH bit and IV bit are sticky */
-       udelay(10);
-       rxcw = er32(RXCW);
 
        if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
 
@@ -1606,8 +1613,10 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
                         * auto-negotiation in the TXCW register and disable
                         * forced link in the Device Control register in an
                         * attempt to auto-negotiate with our link partner.
+                        * If the partner code word is null, stop forcing
+                        * and restart auto negotiation.
                         */
-                       if (rxcw & E1000_RXCW_C) {
+                       if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) {
                                /* Enable autoneg, and unforce link up */
                                ew32(TXCW, mac->txcw);
                                ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
@@ -1788,7 +1797,8 @@ void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state)
                 * incoming packets directed to this port are dropped.
                 * Eventually the LAA will be in RAR[0] and RAR[14].
                 */
-               e1000e_rar_set(hw, hw->mac.addr, hw->mac.rar_entry_count - 1);
+               hw->mac.ops.rar_set(hw, hw->mac.addr,
+                                   hw->mac.rar_entry_count - 1);
 }
 
 /**
index 96d0b583c097a255780521c029e612aee11007c2..43d65455ec5337442a994e854caaf22ec1cb5410 100644 (file)
 
 #define E1000_KABGTXD_BGSQLBIAS                        0x00050000
 
+/* Low Power IDLE Control */
+#define E1000_LPIC_1000ENABLE          0x00010000
+#define E1000_LPIC_100ENABLE           0x00020000
+
 /* PBA constants */
 #define E1000_PBA_6K           0x0006  /* 6KB */
 #define E1000_PBA_8K           0x0008  /* 8KB */
 #define NVM_VERSION                    0x0005
 #define NVM_SERDES_AMPLITUDE           0x0006  /* SERDES output amplitude */
 #define NVM_PHY_CLASS_WORD             0x0007
+#define NVM_ETRACK_WORD                        0x0042
+#define NVM_COMB_VER_OFF               0x0083
+#define NVM_COMB_VER_PTR               0x003d
 
 #define NVM_INIT_CONTROL1_REG          0x000A
 #define NVM_INIT_CONTROL2_REG          0x000F
index 377f2ea4b28d51acfab58ae59e5dd6e94808926e..f8452db4653b8e531c44808cb4de8eda9e8c1776 100644 (file)
@@ -251,6 +251,7 @@ struct e1000_adapter {
         */
        struct e1000_ring *tx_ring      /* One per active queue */
         ____cacheline_aligned_in_smp;
+       u32 tx_fifo_limit;
 
 #ifdef CONFIG_E1000E_NAPI
        struct napi_struct napi;
@@ -326,7 +327,7 @@ struct e1000_adapter {
        struct e1000_hw hw;
 
 #ifdef HAVE_NDO_GET_STATS64
-       spinlock_t stats64_lock;
+       spinlock_t stats64_lock;        /* protects statistics counters */
 #endif
        struct e1000_hw_stats stats;
        struct e1000_phy_info phy_info;
@@ -491,6 +492,7 @@ extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
 extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
 extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
 extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
+extern void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
 
 extern unsigned int copybreak;
 
@@ -541,7 +543,6 @@ extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
 extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
                                               u8 *mc_addr_list,
                                               u32 mc_addr_count);
-extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
 extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
 extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
 extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
@@ -602,6 +603,11 @@ static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data)
        return 0;
 }
 
+static inline s32 e1e_rphy_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       return hw->phy.ops.read_reg_locked(hw, offset, data);
+}
+
 static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data)
 {
        if (hw->phy.ops.write_reg)
@@ -610,6 +616,11 @@ static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data)
        return 0;
 }
 
+static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       return hw->phy.ops.write_reg_locked(hw, offset, data);
+}
+
 static inline s32 e1000_get_cable_length(struct e1000_hw *hw)
 {
        if (hw->phy.ops.get_cable_length)
index 74b95ce2eb9c6d88ad419fc0c021a2f011481bb1..37efda97f6acfc3fd99064d4cc747bb5216c4a60 100644 (file)
@@ -504,7 +504,6 @@ static int e1000e_set_flags(struct net_device *netdev, u32 data)
 #endif
 
        rc = ethtool_op_set_flags(netdev, data, supported);
-
        if (rc)
                return rc;
 
@@ -2122,7 +2121,6 @@ static int e1000_set_coalesce(struct net_device *netdev,
                              struct ethtool_coalesce *ec)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       struct e1000_hw *hw = &adapter->hw;
 
        if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
            ((ec->rx_coalesce_usecs > 4) &&
@@ -2131,7 +2129,8 @@ static int e1000_set_coalesce(struct net_device *netdev,
                return -EINVAL;
 
        if (ec->rx_coalesce_usecs == 4) {
-               adapter->itr = adapter->itr_setting = 4;
+               adapter->itr_setting = 4;
+               adapter->itr = adapter->itr_setting;
        } else if (ec->rx_coalesce_usecs <= 3) {
                adapter->itr = 20000;
                adapter->itr_setting = ec->rx_coalesce_usecs;
@@ -2141,9 +2140,9 @@ static int e1000_set_coalesce(struct net_device *netdev,
        }
 
        if (adapter->itr_setting != 0)
-               ew32(ITR, 1000000000 / (adapter->itr * 256));
+               e1000e_write_itr(adapter, adapter->itr);
        else
-               ew32(ITR, 0);
+               e1000e_write_itr(adapter, 0);
 
        return 0;
 }
index 16d9fb3379aecea4b2f983558e4c1757b444fabe..59c4bf4c2edc2f554b18d2e66506e61974226f07 100644 (file)
@@ -178,24 +178,46 @@ union ich8_hws_flash_regacc {
  **/
 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
 {
-       u16 phy_reg;
-       u32 phy_id;
+       u16 phy_reg = 0;
+       u32 phy_id = 0;
+       s32 ret_val;
+       u16 retry_count;
+
+       for (retry_count = 0; retry_count < 2; retry_count++) {
+               ret_val = e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
+               if (ret_val || (phy_reg == 0xFFFF))
+                       continue;
+               phy_id = (u32)(phy_reg << 16);
 
-       hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
-       phy_id = (u32)(phy_reg << 16);
-       hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
-       phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
+               ret_val = e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
+               if (ret_val || (phy_reg == 0xFFFF)) {
+                       phy_id = 0;
+                       continue;
+               }
+               phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
+               break;
+       }
 
        if (hw->phy.id) {
                if (hw->phy.id == phy_id)
                        return true;
-       } else {
-               if ((phy_id != 0) && (phy_id != PHY_REVISION_MASK))
-                       hw->phy.id = phy_id;
+       } else if (phy_id) {
+               hw->phy.id = phy_id;
+               hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
                return true;
        }
 
-       return false;
+       /*
+        * In case the PHY needs to be in mdio slow mode,
+        * set slow mode and try to get the PHY id again.
+        */
+       hw->phy.ops.release(hw);
+       ret_val = e1000_set_mdio_slow_mode_hv(hw);
+       if (!ret_val)
+               ret_val = e1000e_get_phy_id(hw);
+       hw->phy.ops.acquire(hw);
+
+       return !ret_val;
 }
 
 /**
@@ -248,11 +270,9 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
                if (e1000_phy_is_accessible_pchlan(hw)) {
                        if (hw->mac.type == e1000_pch_lpt) {
                                /* Unforce SMBus mode in PHY */
-                               hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL,
-                                                           &phy_reg);
+                               e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
                                phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
-                               hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL,
-                                                            phy_reg);
+                               e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
 
                                /* Unforce SMBus mode in MAC */
                                mac_reg = er32(CTRL_EXT);
@@ -683,56 +703,84 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
  *  e1000_set_eee_pchlan - Enable/disable EEE support
  *  @hw: pointer to the HW structure
  *
- *  Enable/disable EEE based on setting in dev_spec structure.  The bits in
- *  the LPI Control register will remain set only if/when link is up.
+ *  Enable/disable EEE based on setting in dev_spec structure, the duplex and
+ *  MDI/MDI-X mode of the link and the EEE capabilities of the link partner.
+ *  The LPI Control register bits will remain set only if/when link is up.
  **/
 static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
 {
        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
-       s32 ret_val = 0;
-       u16 phy_reg;
+       s32 ret_val;
+       u16 lpi_ctrl;
 
        if ((hw->phy.type != e1000_phy_82579) &&
            (hw->phy.type != e1000_phy_i217))
                return 0;
 
-       ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
+       /* get phy info to see if the phy is in MDI-x mode */
+       ret_val = hw->phy.ops.get_info(hw);
        if (ret_val)
                return ret_val;
 
-       if (dev_spec->eee_disable)
-               phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
-       else
-               phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
-
-       ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
+       ret_val = hw->phy.ops.acquire(hw);
        if (ret_val)
                return ret_val;
 
-       if ((hw->phy.type == e1000_phy_i217) && !dev_spec->eee_disable) {
+       ret_val = e1e_rphy_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
+       if (ret_val)
+               goto release;
+
+       /* Clear bits that enable EEE in various speeds */
+       lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
+
+       /* Enable EEE if not disabled by user and link is not in MDI-X mode */
+       if (!dev_spec->eee_disable && !hw->phy.is_mdix) {
+               u16 data;
+
                /* Save off link partner's EEE ability */
-               ret_val = hw->phy.ops.acquire(hw);
-               if (ret_val)
-                       return ret_val;
-               ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
-                                                      I217_EEE_LP_ABILITY);
+               switch (hw->phy.type) {
+               case e1000_phy_82579:
+                       data = I82579_EEE_LP_ABILITY;
+                       break;
+               case e1000_phy_i217:
+                       data = I217_EEE_LP_ABILITY;
+                       break;
+               default:
+                       ret_val = -E1000_ERR_PHY;
+                       goto release;
+               }
+               ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, data);
                if (ret_val)
                        goto release;
-               hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
-                                           &dev_spec->eee_lp_ability);
+               e1e_rphy_locked(hw, I82579_EMI_DATA, &dev_spec->eee_lp_ability);
 
                /*
-                * EEE is not supported in 100Half, so ignore partner's EEE
-                * in 100 ability if full-duplex is not advertised.
+                * Enable EEE only for speeds in which the link partner is
+                * EEE capable.
                 */
-               hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &phy_reg);
-               if (!(phy_reg & NWAY_LPAR_100TX_FD_CAPS))
-                       dev_spec->eee_lp_ability &= ~I217_EEE_100_SUPPORTED;
-release:
-               hw->phy.ops.release(hw);
+               if (dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
+                       lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
+
+               if (dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
+                       e1e_rphy_locked(hw, PHY_LP_ABILITY, &data);
+                       if (data & NWAY_LPAR_100TX_FD_CAPS)
+                               lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
+                       else
+                               /*
+                                * EEE is not supported in 100Half, so ignore
+                                * partner's EEE in 100 ability if full-duplex
+                                * is not advertised.
+                                */
+                               dev_spec->eee_lp_ability &=
+                                   ~I82579_EEE_100_SUPPORTED;
+               }
        }
 
-       return 0;
+       ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
+release:
+       hw->phy.ops.release(hw);
+
+       return ret_val;
 }
 
 /**
@@ -1213,7 +1261,7 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
                        phy_data |= (freq & (1 << 0)) <<
                            HV_SMB_ADDR_FREQ_LOW_SHIFT;
                        phy_data |= (freq & (1 << 1)) <<
-                           HV_SMB_ADDR_FREQ_HIGH_SHIFT;
+                           (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
                } else {
                        e_dbg("Unsupported SMB frequency in PHY\n");
                }
@@ -1333,8 +1381,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
                reg_addr &= PHY_REG_MASK;
                reg_addr |= phy_page;
 
-               ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
-                                                   reg_data);
+               ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data);
                if (ret_val)
                        goto release;
        }
@@ -1371,8 +1418,8 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
        /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
        if (link) {
                if (hw->phy.type == e1000_phy_82578) {
-                       ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
-                                                             &status_reg);
+                       ret_val = e1e_rphy_locked(hw, BM_CS_STATUS,
+                                                 &status_reg);
                        if (ret_val)
                                goto release;
 
@@ -1386,8 +1433,7 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
                }
 
                if (hw->phy.type == e1000_phy_82577) {
-                       ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
-                                                             &status_reg);
+                       ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg);
                        if (ret_val)
                                goto release;
 
@@ -1402,15 +1448,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
                }
 
                /* Link stall fix for link up */
-               ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
-                                                      0x0100);
+               ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100);
                if (ret_val)
                        goto release;
 
        } else {
                /* Link stall fix for link down */
-               ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
-                                                      0x4100);
+               ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100);
                if (ret_val)
                        goto release;
        }
@@ -1509,7 +1553,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
 
        mac_reg = er32(PHY_CTRL);
 
-       ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
+       ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg);
        if (ret_val)
                goto release;
 
@@ -1536,7 +1580,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
            !hw->phy.ops.check_reset_block(hw))
                oem_reg |= HV_OEM_BITS_RESTART_AN;
 
-       ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
+       ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg);
 
 release:
        hw->phy.ops.release(hw);
@@ -1631,11 +1675,10 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
        ret_val = hw->phy.ops.acquire(hw);
        if (ret_val)
                return ret_val;
-       ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
+       ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data);
        if (ret_val)
                goto release;
-       ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
-                                              phy_data & 0x00FF);
+       ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF);
 release:
        hw->phy.ops.release(hw);
 
@@ -1694,7 +1737,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
        u32 mac_reg;
        u16 i;
 
-       if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pch_lpt))
+       if (hw->mac.type < e1000_pch2lan)
                return 0;
 
        /* disable Rx path while enabling/disabling workaround */
@@ -1867,20 +1910,18 @@ static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
        ret_val = hw->phy.ops.acquire(hw);
        if (ret_val)
                return ret_val;
-       ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
-                                              I82579_MSE_THRESHOLD);
+       ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_THRESHOLD);
        if (ret_val)
                goto release;
        /* set MSE higher to enable link to stay up when noise is high */
-       ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0034);
+       ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0034);
        if (ret_val)
                goto release;
-       ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
-                                              I82579_MSE_LINK_DOWN);
+       ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_LINK_DOWN);
        if (ret_val)
                goto release;
        /* drop link after 5 times MSE threshold was reached */
-       ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0005);
+       ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0005);
 release:
        hw->phy.ops.release(hw);
 
@@ -1953,7 +1994,7 @@ static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
 {
        u32 extcnf_ctrl;
 
-       if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pch_lpt))
+       if (hw->mac.type < e1000_pch2lan)
                return;
 
        extcnf_ctrl = er32(EXTCNF_CTRL);
@@ -2055,12 +2096,10 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
                ret_val = hw->phy.ops.acquire(hw);
                if (ret_val)
                        return ret_val;
-               ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
-                                                      I82579_LPI_UPDATE_TIMER);
+               ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
+                                         I82579_LPI_UPDATE_TIMER);
                if (!ret_val)
-                       ret_val = hw->phy.ops.write_reg_locked(hw,
-                                                              I82579_EMI_DATA,
-                                                              0x1387);
+                       ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x1387);
                hw->phy.ops.release(hw);
        }
 
@@ -3885,22 +3924,21 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
                if (!dev_spec->eee_disable) {
                        u16 eee_advert;
 
-                       ret_val = hw->phy.ops.write_reg_locked(hw,
-                                                              I82579_EMI_ADDR,
-                                                              I217_EEE_ADVERTISEMENT);
+                       ret_val = e1e_wphy_locked(hw,
+                                                 I82579_EMI_ADDR,
+                                                 I217_EEE_ADVERTISEMENT);
                        if (ret_val)
                                goto release;
-                       hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
-                                                   &eee_advert);
+                       e1e_rphy_locked(hw, I82579_EMI_DATA, &eee_advert);
 
                        /*
                         * Disable LPLU if both link partners support 100BaseT
                         * EEE and 100Full is advertised on both ends of the
                         * link.
                         */
-                       if ((eee_advert & I217_EEE_100_SUPPORTED) &&
+                       if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
                            (dev_spec->eee_lp_ability &
-                            I217_EEE_100_SUPPORTED) &&
+                            I82579_EEE_100_SUPPORTED) &&
                            (hw->phy.autoneg_advertised & ADVERTISE_100_FULL))
                                phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
                                              E1000_PHY_CTRL_NOND0A_LPLU);
@@ -3917,33 +3955,31 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
                if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
 
                        /* Enable proxy to reset only on power good. */
-                       hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
-                                                   &phy_reg);
+                       e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg);
                        phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
-                       hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
-                                                    phy_reg);
+                       e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg);
 
                        /*
                         * Set bit enable LPI (EEE) to reset only on
                         * power good.
                         */
-                       hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
-                       phy_reg |= I217_SxCTRL_MASK;
-                       hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
+                       e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
+                       phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
+                       e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
 
                        /* Disable the SMB release on LCD reset. */
-                       hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
-                       phy_reg &= ~I217_MEMPWR;
-                       hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
+                       e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
+                       phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
+                       e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
                }
 
                /*
                 * Enable MTA to reset for Intel Rapid Start Technology
                 * Support
                 */
-               hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
-               phy_reg |= I217_CGFREG_MASK;
-               hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
+               e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
+               phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
+               e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
 
 release:
                hw->phy.ops.release(hw);
@@ -4012,23 +4048,21 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
                         * Restore clear on SMB if no manageability engine
                         * is present
                         */
-                       ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
-                                                             &phy_reg);
+                       ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
                        if (ret_val)
                                goto release;
-                       phy_reg |= I217_MEMPWR_MASK;
-                       hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
+                       phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
+                       e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
 
                        /* Disable Proxy */
-                       hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
+                       e1e_wphy_locked(hw, I217_PROXY_CTRL, 0);
                }
                /* Enable reset on MTA */
-               ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
-                                                     &phy_reg);
+               ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
                if (ret_val)
                        goto release;
-               phy_reg &= ~I217_CGFREG_MASK;
-               hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
+               phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
+               e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
 release:
                if (ret_val)
                        e_dbg("Error %d in resume workarounds\n", ret_val);
index 37f88fa69c0937c98f22e2c7aef9f27889234a84..4da6e5e8c9682b7a9eb5f1d55ceecb1f14582c52 100644 (file)
 #define E1000_FWSM_WLOCK_MAC_SHIFT     7
 
 /* Shared Receive Address Registers */
-#define E1000_SHRAL(_i)                (0x05438 + ((_i) * 8))
-#define E1000_SHRAH(_i)                (0x0543C + ((_i) * 8))
-#define E1000_SHRAH_AV         0x80000000      /* Addr Valid bit */
-#define E1000_SHRAH_MAV                0x40000000      /* Multicast Addr Valid bit */
-
 #define E1000_SHRAL_PCH_LPT(_i)                (0x05408 + ((_i) * 8))
 #define E1000_SHRAH_PCH_LPT(_i)                (0x0540C + ((_i) * 8))
+#define E1000_SHRAH_AV         0x80000000      /* Addr Valid bit */
+#define E1000_SHRAH_MAV                0x40000000      /* Multicast Addr Valid bit */
 
 #define E1000_H2ME             0x05B50 /* Host to ME */
 #define E1000_H2ME_LSECREQ     0x00000001      /* Linksec Request */
 
 #define E1000_ICH8_LAN_INIT_TIMEOUT    1500
 
+/* FEXT register bit definition */
+#define E1000_FEXT_PHY_CABLE_DISCONNECTED      0x00000004
+
 #define E1000_FEXTNVM_SW_CONFIG                1
 #define E1000_FEXTNVM_SW_CONFIG_ICH8M  (1 << 27)       /* Bit redefined for ICH8M */
 
 
 /* PHY Low Power Idle Control */
 #define I82579_LPI_CTRL                                PHY_REG(772, 20)
+#define I82579_LPI_CTRL_100_ENABLE             0x2000
+#define I82579_LPI_CTRL_1000_ENABLE            0x4000
 #define I82579_LPI_CTRL_ENABLE_MASK            0x6000
 #define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT   0x80
 
 #define I82579_LPI_UPDATE_TIMER        0x4805  /* in 40ns units + 40 ns base value */
 #define I82579_MSE_THRESHOLD   0x084F  /* Mean Square Error Threshold */
 #define I82579_MSE_LINK_DOWN   0x2411  /* MSE count before dropping link */
+#define I82579_EEE_LP_ABILITY          0x040F  /* IEEE MMD Register 7.61 */
+#define I82579_EEE_100_SUPPORTED       (1 << 1)        /* 100BaseTx EEE supported */
+#define I82579_EEE_1000_SUPPORTED      (1 << 2)        /* 1000BaseTx EEE supported */
 #define I217_EEE_ADVERTISEMENT 0x8001  /* IEEE MMD Register 7.60 */
 #define I217_EEE_LP_ABILITY    0x8002  /* IEEE MMD Register 7.61 */
-#define I217_EEE_100_SUPPORTED (1 << 1)        /* 100BaseTx EEE supported */
 
 /* Intel Rapid Start Technology Support */
-#define I217_PROXY_CTRL                        PHY_REG(BM_WUC_PAGE, 70)
+#define I217_PROXY_CTRL                BM_PHY_REG(BM_WUC_PAGE, 70)
 #define I217_PROXY_CTRL_AUTO_DISABLE   0x0080
 #define I217_SxCTRL                    PHY_REG(BM_PORT_CTRL_PAGE, 28)
-#define I217_SxCTRL_MASK               0x1000
+#define I217_SxCTRL_ENABLE_LPI_RESET   0x1000
+#define I217_SxCTRL_ENABLE_SERDES      0x0020
 #define I217_CGFREG                    PHY_REG(772, 29)
-#define I217_CGFREG_MASK               0x0002
+#define I217_CGFREG_ENABLE_MTA_RESET   0x0002
 #define I217_MEMPWR                    PHY_REG(772, 26)
-#define I217_MEMPWR_MASK               0x0010
+#define I217_MEMPWR_DISABLE_SMB_RELEASE        0x0010
 
 /*
  * Additional interrupts need to be handled for ICH family:
index b88f5303979e720c28115f416bde7e860802d0a6..5707dbae0a352831ec587f81bb60f580e2179543 100644 (file)
@@ -1035,16 +1035,30 @@ int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable)
 out:
        return err;
 }
+#endif /* < 2.6.28 */
 
-void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
-                        int off, int size)
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
+int _kc_pci_num_vf(struct pci_dev *dev)
 {
-       skb_fill_page_desc(skb, i, page, off, size);
-       skb->len += size;
-       skb->data_len += size;
-       skb->truesize += size;
+       int num_vf = 0;
+#ifdef CONFIG_PCI_IOV
+       struct pci_dev *vfdev;
+
+       /* loop through all ethernet devices starting at PF dev */
+       vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL);
+       while (vfdev) {
+               if (vfdev->is_virtfn && vfdev->physfn == dev)
+                       num_vf++;
+
+               vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev);
+       }
+
+#endif
+       return num_vf;
 }
-#endif /* < 2.6.28 */
+#endif /* RHEL_RELEASE_CODE */
+#endif /* < 2.6.34 */
 
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
 #endif /* < 2.6.35 */
@@ -1077,5 +1091,10 @@ int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
 /******************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
+
 #endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
 #endif /* < 2.6.39 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
+#endif /* < 3.4.0 */
index 50a600770894c1d8782c6ba67c883ed8975d1c20..eae2b2fcfa687d90cd9009954bd0512e65957982 100644 (file)
@@ -317,6 +317,10 @@ struct _kc_vlan_hdr {
 #define __GFP_COLD 0
 #endif
 
+#ifndef __GFP_COMP
+#define __GFP_COMP 0
+#endif
+
 /*****************************************************************************/
 /* Installations with ethtool version without eeprom, adapter id, or statistics
  * support */
@@ -1141,6 +1145,8 @@ static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
        pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args)
 #define dev_warn(dev, fmt, args...)            \
        pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args)
+#define dev_notice(dev, fmt, args...)            \
+       pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args)
 
 /* NOTE: dangerous! we ignore the 'gfp' argument */
 #define dma_alloc_coherent(dev,sz,dma,gfp) \
@@ -1158,6 +1164,11 @@ static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
 #define dma_unmap_single(dev,a,b,c) \
        pci_unmap_single(to_pci_dev(dev),(a),(b),(c))
 
+#define dma_map_sg(dev, sg, nents, dir) \
+       pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir)
+#define dma_unmap_sg(dev, sg, nents, dir) \
+       pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir)
+
 #define dma_sync_single(dev,a,b,c) \
        pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c))
 
@@ -1333,6 +1344,17 @@ extern size_t _kc_strlcpy(char *dest, const char *src, size_t size);
 
 #endif /* 2.6.0 => 2.5.28 */
 
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )
+#define dma_pool pci_pool
+#define dma_pool_destroy pci_pool_destroy
+#define dma_pool_alloc pci_pool_alloc
+#define dma_pool_free pci_pool_free
+
+#define dma_pool_create(name,dev,size,align,allocation) \
+       pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation))
+#endif /* < 2.6.3 */
+
 /*****************************************************************************/
 /* 2.6.4 => 2.6.0 */
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
@@ -1552,6 +1574,7 @@ static inline void *_kc_skb_header_pointer(const struct sk_buff *skb,
 extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES);
 #undef node_online_map
 #define node_online_map _kcompat_node_online_map
+#define pci_get_class pci_find_class
 #endif /* < 2.6.10 */
 
 /*****************************************************************************/
@@ -1671,10 +1694,10 @@ typedef unsigned gfp_t;
 
 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) )
 #ifdef CONFIG_X86_64
-#define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir)       \
-       dma_sync_single_for_cpu(dev, dma_handle, size, dir)
-#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir)    \
-       dma_sync_single_for_device(dev, dma_handle, size, dir)
+#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir)       \
+       dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir))
+#define dma_sync_single_range_for_device(dev, addr, off, sz, dir)    \
+       dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir))
 #endif
 #endif
 #endif /* < 2.6.14 */
@@ -1734,6 +1757,11 @@ static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2)
 
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) )
+#ifndef dev_notice
+#define dev_notice(dev, fmt, args...)            \
+       dev_printk(KERN_NOTICE, dev, fmt, ## args)
+#endif
+
 #ifndef first_online_node
 #define first_online_node 0
 #endif
@@ -1826,6 +1854,9 @@ static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len)
 #ifndef DIV_ROUND_UP
 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
 #endif
+#ifndef __ALIGN_MASK
+#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
+#endif
 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )
 #if (!((RHEL_RELEASE_CODE && \
         ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \
@@ -2144,6 +2175,10 @@ static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb)
 #ifndef KERN_CONT
 #define KERN_CONT      ""
 #endif
+#ifndef pr_err
+#define pr_err(fmt, arg...) \
+       printk(KERN_ERR fmt, ##arg)
+#endif
 #else /* < 2.6.24 */
 #define HAVE_ETHTOOL_GET_SSET_COUNT
 #define HAVE_NETDEV_NAPI_LIST
@@ -2345,10 +2380,6 @@ static inline void __kc_skb_queue_head_init(struct sk_buff_head *list)
 
 #define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q)
 #endif
-#ifndef skb_add_rx_frag
-#define skb_add_rx_frag _kc_skb_add_rx_frag
-extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, int, int);
-#endif
 #endif /* < 2.6.28 */
 
 /*****************************************************************************/
@@ -2409,7 +2440,10 @@ extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, int, int);
 #define netdev_for_each_uc_addr(uclist, dev) \
        for (uclist = dev->uc_list; uclist; uclist = uclist->next)
 #endif
-#else
+#ifndef PORT_OTHER
+#define PORT_OTHER 0xff
+#endif
+#else /* < 2.6.31 */
 #ifndef HAVE_NETDEV_STORAGE_ADDRESS
 #define HAVE_NETDEV_STORAGE_ADDRESS
 #endif
@@ -2503,17 +2537,30 @@ extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, int, int);
 #ifndef __percpu
 #define __percpu
 #endif /* __percpu */
+#ifndef PORT_DA
+#define PORT_DA PORT_OTHER
+#endif
+#ifndef PORT_NONE
+#define PORT_NONE PORT_OTHER
+#endif
+
 #else /* < 2.6.33 */
 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
 #ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
 #define HAVE_NETDEV_OPS_FCOE_GETWWN
 #endif
 #endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
-#define HAVE_ETHTOOL_SFP_DISPLAY_PORT
 #endif /* < 2.6.33 */
 
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
+#ifndef pci_num_vf
+#define pci_num_vf(pdev) _kc_pci_num_vf(pdev)
+extern int _kc_pci_num_vf(struct pci_dev *dev);
+#endif
+#endif /* RHEL_RELEASE_CODE */
+
 #ifndef ETH_FLAG_NTUPLE
 #define ETH_FLAG_NTUPLE NETIF_F_NTUPLE
 #endif
@@ -2564,8 +2611,7 @@ static inline const char *_kc_netdev_name(const struct net_device *dev)
 do {                                                           \
        struct adapter_struct *kc_adapter = netdev_priv(netdev);\
        struct pci_dev *pdev = kc_adapter->pdev;                \
-       printk("%s %s: " format, level, pci_name(pdev),         \
-              ##args);                                         \
+       printk(level "%s: " format, pci_name(pdev), ##args);    \
 } while(0)
 #elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
 #define netdev_printk(level, netdev, format, args...)          \
@@ -2686,15 +2732,17 @@ do {                                                            \
 #ifdef HAVE_TX_MQ
 #include <net/sch_generic.h>
 #ifndef CONFIG_NETDEVICES_MULTIQUEUE
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
 void _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int);
 #define netif_set_real_num_tx_queues  _kc_netif_set_real_num_tx_queues
+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
 #else /* CONFIG_NETDEVICES_MULTI_QUEUE */
 #define netif_set_real_num_tx_queues(_netdev, _count) \
        do { \
                (_netdev)->egress_subqueue_count = _count; \
        } while (0)
 #endif /* CONFIG_NETDEVICES_MULTI_QUEUE */
-#else
+#else /* HAVE_TX_MQ */
 #define netif_set_real_num_tx_queues(_netdev, _count) do {} while(0)
 #endif /* HAVE_TX_MQ */
 #ifndef ETH_FLAG_RXHASH
@@ -2768,6 +2816,16 @@ do {                                                             \
 
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) )
+#ifndef netif_set_real_num_rx_queues
+static inline int __kc_netif_set_real_num_rx_queues(struct net_device *dev,
+                                                   unsigned int rxq)
+{
+       return 0;
+}
+
+#define netif_set_real_num_rx_queues(dev, rxq) \
+       __kc_netif_set_real_num_rx_queues((dev), (rxq))
+#endif
 #ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR
 #define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2)
 #endif
@@ -2950,6 +3008,18 @@ struct _kc_ethtool_rx_flow_spec {
 #define HAVE_ETHTOOL_SET_PHYS_ID
 #endif /* < 2.6.40 */
 
+/*****************************************************************************/
+
+/*****************************************************************************/
+#undef CONFIG_E1000E_PTP
+#ifdef E1000E_PTP
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) && (defined(CONFIG_PTP_1588_CLOCK) || defined(CONFIG_PTP_1588_CLOCK_MODULE))
+#define CONFIG_E1000E_PTP
+#else
+#error Cannot enable PTP Hardware Clock due to insufficient kernel support
+#endif
+#endif /* E1000E_PTP */
+
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
 #ifndef __netdev_alloc_skb_ip_align
@@ -3022,6 +3092,13 @@ static inline void __kc_skb_frag_unref(skb_frag_t * frag)
        put_page(skb_frag_page(frag));
 }
 #endif /* __skb_frag_unref */
+
+#ifndef SPEED_UNKNOWN
+#define SPEED_UNKNOWN  -1
+#endif
+#ifndef DUPLEX_UNKNOWN
+#define DUPLEX_UNKNOWN 0xff
+#endif
 #else /* < 3.2.0 */
 #ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
 #define HAVE_PCI_DEV_FLAGS_ASSIGNED
@@ -3076,7 +3153,10 @@ static inline void _kc_kunmap_atomic(void *addr)
 
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) )
+#define skb_tx_timestamp(skb) do {} while (0)
+#define eth_random_addr(addr) random_ether_addr(addr)
 #else
 #define HAVE_FDB_OPS
+#define HAVE_ETHTOOL_GET_TS_INFO
 #endif /* < 3.5.0 */
 #endif /* _KCOMPAT_H_ */
index e3d931fd7b9277cff27fa899c14defff90203f50..b02aad6ffb8e3823f51bf7ef98831f63af349bd3 100644 (file)
@@ -31,6 +31,7 @@
 static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw);
 static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
 static void e1000e_config_collision_dist_generic(struct e1000_hw *hw);
+static void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
 
 /**
  *  e1000_init_mac_ops_generic - Initialize MAC function pointers
@@ -52,7 +53,7 @@ void e1000_init_mac_ops_generic(struct e1000_hw *hw)
        mac->ops.mng_write_cmd_header = e1000_mng_write_cmd_header;
        mac->ops.mng_enable_host_if = e1000_mng_enable_host_if;
        /* VLAN, MC, etc. */
-       mac->ops.rar_set = e1000e_rar_set;
+       mac->ops.rar_set = e1000e_rar_set_generic;
        mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic;
 }
 
@@ -184,12 +185,12 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
        /* Setup the receive address */
        e_dbg("Programming MAC Address into RAR[0]\n");
 
-       e1000e_rar_set(hw, hw->mac.addr, 0);
+       hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
 
        /* Zero out the other (rar_entry_count - 1) receive addresses */
        e_dbg("Clearing RAR[1-%u]\n", rar_count - 1);
        for (i = 1; i < rar_count; i++)
-               e1000e_rar_set(hw, mac_addr, i);
+               hw->mac.ops.rar_set(hw, mac_addr, i);
 }
 
 /**
@@ -256,13 +257,13 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
         * same as the normal permanent MAC address stored by the HW into the
         * RAR. Do this by mapping this address into RAR0.
         */
-       e1000e_rar_set(hw, alt_mac_addr, 0);
+       hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
 
        return 0;
 }
 
 /**
- *  e1000e_rar_set - Set receive address register
+ *  e1000e_rar_set_generic - Set receive address register
  *  @hw: pointer to the HW structure
  *  @addr: pointer to the receive address
  *  @index: receive address array register
@@ -270,7 +271,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
  *  Sets the receive address array register at index to the address passed
  *  in by addr.
  **/
-void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+static void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
 {
        u32 rar_low, rar_high;
 
index 8740ea376fce08ced7f8021b42c2a7805ac770a2..82a9c6a78f906f37a6aa185dc90480477fa71ef3 100644 (file)
@@ -65,7 +65,6 @@ void e1000_clear_vfta_generic(struct e1000_hw *hw);
 void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
 void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw);
 void e1000e_put_hw_semaphore(struct e1000_hw *hw);
-void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
 s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
 void e1000e_reset_adaptive(struct e1000_hw *hw);
 void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
index 8b334221c3f51a60e4aa0fe5a5654df57b5bbe7b..68cc0f7711c01ef26e8e1d789e3f0a936a53d6a4 100644 (file)
@@ -61,7 +61,7 @@
 #define DRV_EXTRAVERSION ""
 #endif
 
-#define DRV_VERSION "2.0.0.1" DRV_EXTRAVERSION
+#define DRV_VERSION "2.1.4" DRV_EXTRAVERSION
 char e1000e_driver_name[] = "e1000e";
 const char e1000e_driver_version[] = DRV_VERSION;
 
@@ -71,8 +71,6 @@ module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 
 static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
-static int e1000_set_features(struct net_device *netdev,
-                             netdev_features_t features);
 
 static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
 {
@@ -188,7 +186,8 @@ static struct e1000_info e1000_82574_info = {
            | FLAG_HAS_SMART_POWER_DOWN
            | FLAG_HAS_AMT | FLAG_HAS_CTRLEXT_ON_LOAD,
        .flags2 = FLAG2_CHECK_PHY_HANG
-           | FLAG2_DISABLE_ASPM_L0S | FLAG2_NO_DISABLE_RX | FLAG2_DMA_BURST,
+           | FLAG2_DISABLE_ASPM_L0S
+           | FLAG2_DISABLE_ASPM_L1 | FLAG2_NO_DISABLE_RX | FLAG2_DMA_BURST,
        .pba = 32,
        .max_hw_frame_size = DEFAULT_JUMBO,
        .init_ops = e1000_init_function_pointers_82571,
@@ -782,12 +781,19 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
        napi_gro_receive(&adapter->napi, skb);
 #endif /* HAVE_VLAN_RX_REGISTER */
 #else /* CONFIG_E1000E_NAPI */
+#ifdef HAVE_VLAN_RX_REGISTER
 #ifdef NETIF_F_HW_VLAN_TX
        if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
                ret = vlan_hwaccel_rx(skb, adapter->vlgrp, le16_to_cpu(vlan));
        else
 #endif
                ret = netif_rx(skb);
+#else /* HAVE_VLAN_RX_REGISTER */
+       if (status & E1000_RXD_STAT_VP)
+               __vlan_hwaccel_put_tag(skb, tag);
+
+       ret = netif_rx(skb);
+#endif /* HAVE_VLAN_RX_REGISTER */
        if (unlikely(ret == NET_RX_DROP))
                adapter->rx_dropped_backlog++;
 #endif /* CONFIG_E1000E_NAPI */
@@ -805,7 +811,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
  * @sk_buff: socket buffer with received data
  **/
 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
-                             __le16 csum, struct sk_buff *skb)
+                             struct sk_buff *skb)
 {
        u16 status = (u16)status_err;
        u8 errors = (u8)(status_err >> 24);
@@ -824,8 +830,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
        if (status & E1000_RXD_STAT_IXSM)
                return;
 
-       /* TCP/UDP checksum error bit is set */
-       if (errors & E1000_RXD_ERR_TCPE) {
+       /* TCP/UDP checksum error bit or IP checksum error bit is set */
+       if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
                /* let the stack verify checksum errors */
                adapter->hw_csum_err++;
                return;
@@ -836,19 +842,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
                return;
 
        /* It must be a TCP or UDP packet with a valid checksum */
-       if (status & E1000_RXD_STAT_TCPCS) {
-               /* TCP checksum is good */
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
-       } else {
-               /*
-                * IP fragment with UDP payload
-                * Hardware complements the payload checksum, so we undo it
-                * and then put the value in host order for further stack use.
-                */
-               __sum16 sum = (__force __sum16)swab16((__force u16)csum);
-               skb->csum = csum_unfold(~sum);
-               skb->ip_summed = CHECKSUM_COMPLETE;
-       }
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
        adapter->hw_csum_good++;
 }
 
@@ -1086,7 +1080,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
        struct e1000_buffer *buffer_info;
        struct sk_buff *skb;
        unsigned int i;
-       unsigned int bufsz = 256 - 16 /* for skb_reserve */ ;
+       unsigned int bufsz = 256 - 16;  /* for skb_reserve */
 
        i = rx_ring->next_to_use;
        buffer_info = &rx_ring->buffer_info[i];
@@ -1288,8 +1282,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring)
                skb_put(skb, length);
 
                /* Receive Checksum Offload */
-               e1000_rx_checksum(adapter, staterr,
-                                 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+               e1000_rx_checksum(adapter, staterr, skb);
 
 #ifdef NETIF_F_RXHASH
                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
@@ -1702,8 +1695,7 @@ copydone:
                total_rx_bytes += skb->len;
                total_rx_packets++;
 
-               e1000_rx_checksum(adapter, staterr,
-                                 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+               e1000_rx_checksum(adapter, staterr, skb);
 
 #ifdef NETIF_F_RXHASH
                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
@@ -1885,9 +1877,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
                        }
                }
 
-               /* Receive Checksum Offload XXX recompute due to CRC strip? */
-               e1000_rx_checksum(adapter, staterr,
-                                 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+               /* Receive Checksum Offload */
+               e1000_rx_checksum(adapter, staterr, skb);
 
 #ifdef NETIF_F_RXHASH
                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
@@ -2953,6 +2944,30 @@ set_itr_now:
        }
 }
 
+/**
+ * e1000e_write_itr - write the ITR value to the appropriate registers
+ * @adapter: address of board private structure
+ * @itr: new ITR value to program
+ *
+ * e1000e_write_itr determines if the adapter is in MSI-X mode
+ * and, if so, writes the EITR registers with the ITR value.
+ * Otherwise, it writes the ITR value into the ITR register.
+ **/
+void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u32 new_itr = itr ? 1000000000 / (itr * 256) : 0;
+
+       if (adapter->msix_entries) {
+               int vector;
+
+               for (vector = 0; vector < adapter->num_vectors; vector++)
+                       writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector));
+       } else {
+               ew32(ITR, new_itr);
+       }
+}
+
 /**
  * e1000_alloc_queues - Allocate memory for all rings
  * @adapter: board private structure to initialize
@@ -3683,7 +3698,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
        /* irq moderation */
        ew32(RADV, adapter->rx_abs_int_delay);
        if ((adapter->itr_setting != 0) && (adapter->itr != 0))
-               ew32(ITR, 1000000000 / (adapter->itr * 256));
+               e1000e_write_itr(adapter, adapter->itr);
 
        ctrl_ext = er32(CTRL_EXT);
 #ifdef CONFIG_E1000E_NAPI
@@ -3710,22 +3725,13 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
        /* Enable Receive Checksum Offload for TCP and UDP */
        rxcsum = er32(RXCSUM);
 #ifdef HAVE_NDO_SET_FEATURES
-       if (adapter->netdev->features & NETIF_F_RXCSUM) {
+       if (adapter->netdev->features & NETIF_F_RXCSUM)
 #else
-       if (adapter->flags & FLAG_RX_CSUM_ENABLED) {
+       if (adapter->flags & FLAG_RX_CSUM_ENABLED)
 #endif
                rxcsum |= E1000_RXCSUM_TUOFL;
-
-               /*
-                * IPv4 payload checksum for UDP fragments must be
-                * used in conjunction with packet-split.
-                */
-               if (adapter->rx_ps_pages)
-                       rxcsum |= E1000_RXCSUM_IPPCSE;
-       } else {
+       else
                rxcsum &= ~E1000_RXCSUM_TUOFL;
-               /* no need to clear IPPCSE as it defaults to 0 */
-       }
        ew32(RXCSUM, rxcsum);
 
        if (adapter->hw.mac.type == e1000_pch2lan) {
@@ -3852,9 +3858,9 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
                        if (!rar_entries)
                                break;
 #ifdef NETDEV_HW_ADDR_T_UNICAST
-                       e1000e_rar_set(hw, ha->addr, rar_entries--);
+                       hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
 #else
-                       e1000e_rar_set(hw, ha->da_addr, rar_entries--);
+                       hw->mac.ops.rar_set(hw, ha->da_addr, rar_entries--);
 #endif
                        count++;
                }
@@ -4097,7 +4103,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
 
                        /*
                         * if short on Rx space, Rx wins and must trump Tx
-                        * adjustment or use Early Receive if available
+                        * adjustment
                         */
                        if (pba < min_rx_space)
                                pba = min_rx_space;
@@ -4167,6 +4173,15 @@ void e1000e_reset(struct e1000_adapter *adapter)
                break;
        }
 
+       /*
+        * Alignment of Tx data is on an arbitrary byte boundary with the
+        * maximum size per Tx descriptor limited only to the transmit
+        * allocation of the packet buffer minus 96 bytes with an upper
+        * limit of 24KB due to receive synchronization limitations.
+        */
+       adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
+                                      24 << 10);
+
        /*
         * Disable Adaptive Interrupt Moderation if 2 full packets cannot
         * fit in receive buffer.
@@ -4177,14 +4192,14 @@ void e1000e_reset(struct e1000_adapter *adapter)
                                dev_info(pci_dev_to_dev(adapter->pdev),
                                         "Interrupt Throttle Rate turned off\n");
                                adapter->flags2 |= FLAG2_DISABLE_AIM;
-                               ew32(ITR, 0);
+                               e1000e_write_itr(adapter, 0);
                        }
                } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
                        dev_info(pci_dev_to_dev(adapter->pdev),
                                 "Interrupt Throttle Rate turned on\n");
                        adapter->flags2 &= ~FLAG2_DISABLE_AIM;
                        adapter->itr = 20000;
-                       ew32(ITR, 1000000000 / (adapter->itr * 256));
+                       e1000e_write_itr(adapter, adapter->itr);
                }
        }
 
@@ -4423,6 +4438,10 @@ static irqreturn_t e1000_intr_msi_test(int irq, void *data)
        e_dbg("icr is %08X\n", icr);
        if (icr & E1000_ICR_RXSEQ) {
                adapter->flags &= ~FLAG_MSI_TEST_FAILED;
+               /*
+                * Force memory writes to complete before acknowledging the
+                * interrupt is handled.
+                */
                wmb();
        }
 
@@ -4464,6 +4483,10 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
                goto msi_test_failed;
        }
 
+       /*
+        * Force memory writes to complete before enabling and firing an
+        * interrupt.
+        */
        wmb();
 
        e1000_irq_enable(adapter);
@@ -4471,11 +4494,11 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
        /* fire an unusual interrupt on the test handler */
        ew32(ICS, E1000_ICS_RXSEQ);
        e1e_flush();
-       msleep(50);
+       msleep(100);
 
        e1000_irq_disable(adapter);
 
-       rmb();
+       rmb();                  /* read flags after interrupt has been fired */
 
        if (adapter->flags & FLAG_MSI_TEST_FAILED) {
                adapter->int_mode = E1000E_INT_MODE_LEGACY;
@@ -4747,6 +4770,7 @@ static int e1000_close(struct net_device *netdev)
 static int e1000_set_mac(struct net_device *netdev, void *p)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
+       struct e1000_hw *hw = &adapter->hw;
        struct sockaddr *addr = p;
 
        if (!is_valid_ether_addr((unsigned char *)(addr->sa_data)))
@@ -4755,7 +4779,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
        memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
 
-       e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
+       hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
 
        if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
                /* activate the work around */
@@ -4769,9 +4793,8 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
                 * are dropped. Eventually the LAA will be in RAR[0] and
                 * RAR[14]
                 */
-               e1000e_rar_set(&adapter->hw,
-                              adapter->hw.mac.addr,
-                              adapter->hw.mac.rar_entry_count - 1);
+               hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr,
+                                   adapter->hw.mac.rar_entry_count - 1);
        }
 
        return 0;
@@ -5080,13 +5103,15 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
        u32 ctrl = er32(CTRL);
 
        /* Link status message must follow this format for user tools */
-       printk(KERN_INFO
-              "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
-              adapter->netdev->name, adapter->link_speed,
-              adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
-              (ctrl & E1000_CTRL_TFCE)
-              && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" : (ctrl & E1000_CTRL_RFCE)
-              ? "Rx" : (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
+/* *INDENT-OFF* */
+       pr_info("e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
+               adapter->netdev->name,
+               adapter->link_speed,
+               adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
+               (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
+               (ctrl & E1000_CTRL_RFCE) ? "Rx" :
+               (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
+/* *INDENT-ON* */
 }
 
 static bool e1000e_has_link(struct e1000_adapter *adapter)
@@ -5317,8 +5342,8 @@ static void e1000_watchdog_task(struct work_struct *work)
                        adapter->link_speed = 0;
                        adapter->link_duplex = 0;
                        /* Link status message must follow this format */
-                       printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
-                              adapter->netdev->name);
+                       pr_info("e1000e: %s NIC Link is Down\n",
+                               adapter->netdev->name);
                        netif_carrier_off(netdev);
                        if (!test_bit(__E1000_DOWN, &adapter->state))
                                mod_timer(&adapter->phy_info_timer,
@@ -5379,7 +5404,7 @@ link_up:
                           adapter->gorc - adapter->gotc) / 10000;
                u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
 
-               ew32(ITR, 1000000000 / (itr * 256));
+               e1000e_write_itr(adapter, itr);
        }
 
        /* Cause software interrupt to ensure Rx ring is cleaned */
@@ -5399,7 +5424,7 @@ link_up:
         * reset from the other port. Set the appropriate LAA in RAR[0]
         */
        if (e1000e_get_laa_state_82571(hw))
-               e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
+               hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0);
 
        if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
                e1000e_check_82574_phy_workaround(adapter);
@@ -5425,7 +5450,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
        struct e1000_buffer *buffer_info;
        unsigned int i;
        u32 cmd_length = 0;
-       u16 ipcse = 0, tucse, mss;
+       u16 ipcse = 0, mss;
        u8 ipcss, ipcso, tucss, tucso, hdr_len;
 
        if (!skb_is_gso(skb))
@@ -5439,6 +5464,11 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
        }
 
        hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+#if defined(DEBUG) || defined(DEV)
+       /* C-Spec section on TCP Segmentation Context Descriptor states
+        * the header may be up to 240 bytes in length */
+       BUG_ON(hdr_len > 240);
+#endif
        mss = skb_shinfo(skb)->gso_size;
        if (skb->protocol == htons(ETH_P_IP)) {
                struct iphdr *iph = ip_hdr(skb);
@@ -5461,7 +5491,6 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
        ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
        tucss = skb_transport_offset(skb);
        tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
-       tucse = 0;
 
        cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
                       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
@@ -5475,7 +5504,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
        context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
        context_desc->upper_setup.tcp_fields.tucss = tucss;
        context_desc->upper_setup.tcp_fields.tucso = tucso;
-       context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
+       context_desc->upper_setup.tcp_fields.tucse = 0;
        context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
        context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
        context_desc->cmd_and_length = cpu_to_le32(cmd_length);
@@ -5557,12 +5586,9 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
        return 1;
 }
 
-#define E1000_MAX_PER_TXD      8192
-#define E1000_MAX_TXD_PWR      12
-
 static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
                        unsigned int first, unsigned int max_per_txd,
-                       unsigned int nr_frags, unsigned int mss)
+                       unsigned int nr_frags)
 {
        struct e1000_adapter *adapter = tx_ring->adapter;
        struct pci_dev *pdev = adapter->pdev;
@@ -5743,13 +5769,12 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
        u16 length, offset;
 
 #ifdef NETIF_F_HW_VLAN_TX
-       if (vlan_tx_tag_present(skb)) {
-               if (!
-                   ((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id)
-                    && (adapter->hw.mng_cookie.
-                        status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
-                       return 0;
-       }
+       if (vlan_tx_tag_present(skb) &&
+           !((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
+             (adapter->hw.mng_cookie.status &
+              E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
+               return 0;
+
 #endif
        if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
                return 0;
@@ -5803,20 +5828,19 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
 
 static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
 {
+       BUG_ON(size > tx_ring->count);
+
        if (e1000_desc_unused(tx_ring) >= size)
                return 0;
        return __e1000_maybe_stop_tx(tx_ring, size);
 }
 
-#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                                    struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_ring *tx_ring = adapter->tx_ring;
        unsigned int first;
-       unsigned int max_per_txd = E1000_MAX_PER_TXD;
-       unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
        unsigned int tx_flags = 0;
        unsigned int len = skb_headlen(skb);
        unsigned int nr_frags;
@@ -5836,18 +5860,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
        }
 #ifdef NETIF_F_TSO
        mss = skb_shinfo(skb)->gso_size;
-       /*
-        * The controller does a simple calculation to
-        * make sure there is enough room in the FIFO before
-        * initiating the DMA for each buffer.  The calc is:
-        * 4 = ceil(buffer len/mss).  To make sure we don't
-        * overrun the FIFO, adjust the max buffer len if mss
-        * drops.
-        */
        if (mss) {
                u8 hdr_len;
-               max_per_txd = min(mss << 2, max_per_txd);
-               max_txd_pwr = fls(max_per_txd) - 1;
 
                /*
                 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
@@ -5883,13 +5897,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                count++;
 #endif
 
-       count += TXD_USE_COUNT(len, max_txd_pwr);
+       count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
 
        nr_frags = skb_shinfo(skb)->nr_frags;
        for (f = 0; f < nr_frags; f++)
-               count +=
-                   TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
-                                 max_txd_pwr);
+               count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
+                                     adapter->tx_fifo_limit);
 
        if (adapter->hw.mac.tx_pkt_filtering)
                e1000_transfer_dhcp_info(adapter, skb);
@@ -5934,14 +5947,20 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 #endif /* IFF_SUPP_NOFCS */
 
        /* if count is 0 then mapping error has occurred */
-       count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss);
+       count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
+                            nr_frags);
        if (count) {
+               skb_tx_timestamp(skb);
+
 #ifdef CONFIG_BQL
                netdev_sent_queue(netdev, skb->len);
 #endif
                e1000_tx_queue(tx_ring, tx_flags, count);
                /* Make sure there is space in the ring for the next send. */
-               e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2);
+               e1000_maybe_stop_tx(tx_ring,
+                                   (MAX_SKB_FRAGS *
+                                    DIV_ROUND_UP(PAGE_SIZE,
+                                                 adapter->tx_fifo_limit) + 2));
        } else {
                dev_kfree_skb_any(skb);
                tx_ring->buffer_info[first].time_stamp = 0;
@@ -6066,30 +6085,10 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
        /* Jumbo frame support */
-       if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
-               if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
-                       e_err("Jumbo Frames not supported.\n");
-                       return -EINVAL;
-               }
-#ifdef HAVE_NDO_SET_FEATURES
-               /*
-                * IP payload checksum (enabled with jumbos/packet-split when
-                * Rx checksum is enabled) and generation of RSS hash is
-                * mutually exclusive in the hardware.
-                */
-               if ((netdev->features & NETIF_F_RXCSUM) &&
-                   (netdev->features & NETIF_F_RXHASH)) {
-                       /* Disable receive hashing */
-                       netdev_features_t features;
-
-                       features = netdev->features & (~NETIF_F_RXHASH);
-                       if (e1000_set_features(netdev, features)) {
-                               e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled.  Disable one of the receive offload features before enabling jumbos.\n");
-                               return -EINVAL;
-                       }
-                       e_info("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled. Disabling Receive Hashing.\n");
-               }
-#endif /* HAVE_NDO_SET_FEATURES */
+       if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+           !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
+               e_err("Jumbo Frames not supported.\n");
+               return -EINVAL;
        }
 
        /* Supported frame sizes */
@@ -6107,14 +6106,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
                return -EINVAL;
        }
 
-       /* 82573 Errata 17 */
-       if (((adapter->hw.mac.type == e1000_82573) ||
-            (adapter->hw.mac.type == e1000_82574)) &&
-           (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
-               adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
-               e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
-       }
-
        while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
                usleep_range(1000, 2000);
        /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
@@ -6977,17 +6968,6 @@ static int e1000_set_features(struct net_device *netdev,
                         NETIF_F_RXALL)))
                return 0;
 
-       /*
-        * IP payload checksum (enabled with jumbos/packet-split when Rx
-        * checksum is enabled) and generation of RSS hash is mutually
-        * exclusive in the hardware.
-        */
-       if (adapter->rx_ps_pages &&
-           (features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) {
-               e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames.  Disable jumbos or enable only one of the receive offload features.\n");
-               return -EINVAL;
-       }
-
        if (changed & NETIF_F_RXFCS) {
                if (features & NETIF_F_RXFCS) {
                        adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
@@ -7232,7 +7212,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
        }
 
        if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
-               e_info("PHY reset is blocked due to SOL/IDER session.\n");
+               dev_info(pci_dev_to_dev(pdev),
+                        "PHY reset is blocked due to SOL/IDER session.\n");
 
        /* Set initial default active device features */
        netdev->features = (NETIF_F_SG |
@@ -7245,7 +7226,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
                            NETIF_F_TSO6 |
 #endif
 #endif
-#if defined(NETIF_F_RXHASH) && defined(HAVE_NDO_SET_FEATURES)
+#if defined(NETIF_F_RXHASH)
                            NETIF_F_RXHASH |
 #endif
 #ifdef NETIF_F_RXCSUM
@@ -7312,7 +7293,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
                if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
                        break;
                if (i == 2) {
-                       e_err("The NVM Checksum Is Not Valid\n");
+                       dev_err(pci_dev_to_dev(pdev),
+                               "The NVM Checksum Is Not Valid\n");
                        err = -EIO;
                        goto err_eeprom;
                }
@@ -7322,7 +7304,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
 
        /* copy the MAC address */
        if (e1000e_read_mac_addr(&adapter->hw))
-               e_err("NVM Read Error while reading MAC address\n");
+               dev_err(pci_dev_to_dev(pdev),
+                       "NVM Read Error while reading MAC address\n");
 
        memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
 #ifdef ETHTOOL_GPERMADDR
@@ -7332,10 +7315,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
 #else
        if (!is_valid_ether_addr(netdev->dev_addr)) {
 #endif
-               e_err("Invalid MAC Address: %02x:%02x:%02x:%02x:%02x:%02x\n",
-                     netdev->dev_addr[0], netdev->dev_addr[1],
-                     netdev->dev_addr[2], netdev->dev_addr[3],
-                     netdev->dev_addr[4], netdev->dev_addr[5]);
+               dev_err(pci_dev_to_dev(pdev),
+                       "Invalid MAC Address: %02x:%02x:%02x:%02x:%02x:%02x\n",
+                       netdev->dev_addr[0], netdev->dev_addr[1],
+                       netdev->dev_addr[2], netdev->dev_addr[3],
+                       netdev->dev_addr[4], netdev->dev_addr[5]);
                err = -EIO;
                goto err_eeprom;
        }
@@ -7365,8 +7349,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
        adapter->hw.phy.autoneg_advertised = 0x2f;
 
        /* ring size defaults */
-       adapter->rx_ring->count = 256;
-       adapter->tx_ring->count = 256;
+       adapter->rx_ring->count = E1000_DEFAULT_RXD;
+       adapter->tx_ring->count = E1000_DEFAULT_TXD;
 
        /*
         * Initial Wake on LAN setting - If APM wake is enabled in
index 65a858a9ea2f701dfbce7635e93c6a06758e2bec..292c2d08bf006db8207b407b96a8861cdb5123c0 100644 (file)
@@ -44,4 +44,5 @@ s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw);
 void e1000e_release_nvm(struct e1000_hw *hw);
 
 #define E1000_STM_OPCODE       0xDB00
+
 #endif
index 2711cf9bd6b64bc39ab38efca67168d0b65cf7a8..7d263d76084f05abdeaca3f67117bc5c33d893d2 100644 (file)
@@ -123,7 +123,7 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
 /*
  * Interrupt Throttle Rate (interrupts/sec)
  *
- * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
+ * Valid Range: 100-100000 or one of: 0=off, 1=dynamic, 3=dynamic conservative
  */
 E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
 #define DEFAULT_ITR 3
@@ -230,16 +230,19 @@ static int __devinit e1000_validate_option(unsigned int *value,
        case enable_option:
                switch (*value) {
                case OPTION_ENABLED:
-                       e_info("%s Enabled\n", opt->name);
+                       dev_info(pci_dev_to_dev(adapter->pdev), "%s Enabled\n",
+                                opt->name);
                        return 0;
                case OPTION_DISABLED:
-                       e_info("%s Disabled\n", opt->name);
+                       dev_info(pci_dev_to_dev(adapter->pdev), "%s Disabled\n",
+                                opt->name);
                        return 0;
                }
                break;
        case range_option:
                if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
-                       e_info("%s set to %i\n", opt->name, *value);
+                       dev_info(pci_dev_to_dev(adapter->pdev),
+                                "%s set to %i\n", opt->name, *value);
                        return 0;
                }
                break;
@@ -251,7 +254,9 @@ static int __devinit e1000_validate_option(unsigned int *value,
                                ent = &opt->arg.l.p[i];
                                if (*value == ent->i) {
                                        if (ent->str[0] != '\0')
-                                               e_info("%s\n", ent->str);
+                                               dev_info(pci_dev_to_dev
+                                                        (adapter->pdev),
+                                                        "%s\n", ent->str);
                                        return 0;
                                }
                        }
@@ -261,8 +266,9 @@ static int __devinit e1000_validate_option(unsigned int *value,
                BUG();
        }
 
-       e_info("Invalid %s value specified (%i) %s\n", opt->name, *value,
-              opt->err);
+       dev_info(pci_dev_to_dev(adapter->pdev),
+                "Invalid %s value specified (%i) %s\n", opt->name, *value,
+                opt->err);
        *value = opt->def;
        return -1;
 }
@@ -282,8 +288,10 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
        int bd = adapter->bd_number;
 
        if (bd >= E1000_MAX_NIC) {
-               e_notice("Warning: no configuration for board #%i\n", bd);
-               e_notice("Using defaults for all values\n");
+               dev_notice(pci_dev_to_dev(adapter->pdev),
+                          "Warning: no configuration for board #%i\n", bd);
+               dev_notice(pci_dev_to_dev(adapter->pdev),
+                          "Using defaults for all values\n");
        }
 
        {                       /* Transmit Interrupt Delay */
@@ -385,53 +393,65 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
 
                if (num_InterruptThrottleRate > bd) {
                        adapter->itr = InterruptThrottleRate[bd];
-                       switch (adapter->itr) {
-                       case 0:
-                               e_info("%s turned off\n", opt.name);
-                               break;
-                       case 1:
-                               e_info("%s set to dynamic mode\n", opt.name);
-                               adapter->itr_setting = adapter->itr;
-                               adapter->itr = 20000;
-                               break;
-                       case 3:
-                               e_info("%s set to dynamic conservative mode\n",
-                                      opt.name);
-                               adapter->itr_setting = adapter->itr;
-                               adapter->itr = 20000;
-                               break;
-                       case 4:
-                               e_info("%s set to simplified (2000-8000 ints) mode\n",
-                                    opt.name);
-                               adapter->itr_setting = 4;
-                               break;
-                       default:
-                               /*
-                                * Save the setting, because the dynamic bits
-                                * change itr.
-                                */
-                               if (e1000_validate_option(&adapter->itr, &opt,
-                                                         adapter) &&
-                                   (adapter->itr == 3)) {
-                                       /*
-                                        * In case of invalid user value,
-                                        * default to conservative mode.
-                                        */
-                                       adapter->itr_setting = adapter->itr;
-                                       adapter->itr = 20000;
-                               } else {
-                                       /*
-                                        * Clear the lower two bits because
-                                        * they are used as control.
-                                        */
-                                       adapter->itr_setting =
-                                           adapter->itr & ~3;
-                               }
-                               break;
-                       }
+
+                       /*
+                        * Make sure a message is printed for non-special
+                        * values.  And in case of an invalid option, display
+                        * warning, use default and go through itr/itr_setting
+                        * adjustment logic below
+                        */
+                       if ((adapter->itr > 4) &&
+                           e1000_validate_option(&adapter->itr, &opt, adapter))
+                               adapter->itr = opt.def;
                } else {
-                       adapter->itr_setting = opt.def;
+                       /*
+                        * If no option specified, use default value and go
+                        * through the logic below to adjust itr/itr_setting
+                        */
+                       adapter->itr = opt.def;
+
+                       /*
+                        * Make sure a message is printed for non-special
+                        * default values
+                        */
+                       if (adapter->itr > 4)
+                               dev_info(pci_dev_to_dev(adapter->pdev),
+                                        "%s set to default %d\n", opt.name,
+                                        adapter->itr);
+               }
+
+               adapter->itr_setting = adapter->itr;
+               switch (adapter->itr) {
+               case 0:
+                       dev_info(pci_dev_to_dev(adapter->pdev),
+                                "%s turned off\n", opt.name);
+                       break;
+               case 1:
+                       dev_info(pci_dev_to_dev(adapter->pdev),
+                                "%s set to dynamic mode\n", opt.name);
+                       adapter->itr = 20000;
+                       break;
+               case 3:
+                       dev_info(pci_dev_to_dev(adapter->pdev),
+                                "%s set to dynamic conservative mode\n",
+                                opt.name);
                        adapter->itr = 20000;
+                       break;
+               case 4:
+                       dev_info(pci_dev_to_dev(adapter->pdev),
+                                "%s set to simplified (2000-8000 ints) mode\n",
+                                opt.name);
+                       break;
+               default:
+                       /*
+                        * Save the setting, because the dynamic bits
+                        * change itr.
+                        *
+                        * Clear the lower two bits because
+                        * they are used as control.
+                        */
+                       adapter->itr_setting &= ~3;
+                       break;
                }
        }
        {                       /* Interrupt Mode */
@@ -490,8 +510,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
                if (num_SmartPowerDownEnable > bd) {
                        unsigned int spd = SmartPowerDownEnable[bd];
                        e1000_validate_option(&spd, &opt, adapter);
-                       if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN)
-                           && spd)
+                       if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd)
                                adapter->flags |= FLAG_SMART_POWER_DOWN;
                }
        }
@@ -576,9 +595,11 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
                 * default value to an online node, which is not
                 * necessarily zero, and the constant initializer
                 * above can't take first_online_node */
-               if (node == 0)
+               if (node == 0) {
                        /* must set opt.def for validate */
-                       opt.def = node = first_online_node;
+                       node = first_online_node;
+                       opt.def = node;
+               }
 
                if (num_Node > bd) {
                        node = Node[bd];
index 24e2749a577dba7273782416f696ac34881935f3..15e835fda65496da7e4870349c42d4f788201c8e 100644 (file)
@@ -660,6 +660,32 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
        if (ret_val)
                return ret_val;
 
+       /* Set MDI/MDIX mode */
+       ret_val = e1e_rphy(hw, I82577_PHY_CTRL_2, &phy_data);
+       if (ret_val)
+               return ret_val;
+       phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK;
+       /*
+        * Options:
+        *   0 - Auto (default)
+        *   1 - MDI mode
+        *   2 - MDI-X mode
+        */
+       switch (hw->phy.mdix) {
+       case 1:
+               break;
+       case 2:
+               phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX;
+               break;
+       case 0:
+       default:
+               phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX;
+               break;
+       }
+       ret_val = e1e_wphy(hw, I82577_PHY_CTRL_2, phy_data);
+       if (ret_val)
+               return ret_val;
+
        return e1000_set_master_slave_mode(hw);
 }
 
index 5cfb24a9c2f7347602945f29717b716bb5d67803..522ca9c62f036332f76a013e03ecc31d78b72387 100644 (file)
@@ -154,8 +154,9 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
 #define I82577_PHY_STATUS2_SPEED_100MBPS       0x0100
 
 /* I82577 PHY Control 2 */
-#define I82577_PHY_CTRL2_AUTO_MDIX             0x0400
-#define I82577_PHY_CTRL2_FORCE_MDI_MDIX                0x0200
+#define I82577_PHY_CTRL2_MANUAL_MDIX           0x0200
+#define I82577_PHY_CTRL2_AUTO_MDI_MDIX         0x0400
+#define I82577_PHY_CTRL2_MDIX_CFG_MASK         0x0600
 
 /* I82577 PHY Diagnostics Status */
 #define I82577_DSTATUS_CABLE_LENGTH            0x03FC
@@ -234,8 +235,14 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
 #define E1000_KMRNCTRLSTA_IBIST_DISABLE        0x0200  /* Kumeran IBIST Disable */
 #define E1000_KMRNCTRLSTA_DIAG_NELPBK  0x1000  /* Nearend Loopback mode */
 #define E1000_KMRNCTRLSTA_K1_CONFIG    0x7
-#define E1000_KMRNCTRLSTA_K1_ENABLE    0x0002
+#define E1000_KMRNCTRLSTA_K1_ENABLE    0x0002  /* enable K1 */
+#define E1000_KMRNCTRLSTA_UNBLOCK_RX   0x0004  /* unblock Kumeran Rx in K0/K1 */
+#define E1000_KMRNCTRLSTA_PLL_STOP_EN  0x0008  /* enable PLL stop in K1 mode */
+
 #define E1000_KMRNCTRLSTA_HD_CTRL      0x10    /* Kumeran HD Control */
+#define E1000_KMRNCTRLSTA_K0_CTRL      0x1E    /* Kumeran K0s Control */
+#define E1000_KMRNCTRLSTA_K0_GBE_EN    0x1000  /* ena K0s mode for 1G link */
+#define E1000_KMRNCTRLSTA_K0_100_EN    0x2000  /* ena K0s mode for 10/100 lnk */
 
 #define IFE_PHY_EXTENDED_STATUS_CONTROL        0x10
 #define IFE_PHY_SPECIAL_CONTROL                0x11    /* 100BaseTx PHY Special Control */
index 8c334b6188555ed752d07da2e52a0ce08bd60699..b9de42355d998917a4a0c63e769f4f6c72e7ab56 100644 (file)
 #define E1000_FCAL     0x00028 /* Flow Control Address Low - RW */
 #define E1000_FCAH     0x0002C /* Flow Control Address High -RW */
 #define E1000_FEXT     0x0002C /* Future Extended - RW */
-#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
 #define E1000_FEXTNVM  0x00028 /* Future Extended NVM - RW */
+#define E1000_FEXTNVM2 0x00030 /* Future Extended NVM 2 - RW */
+#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
+#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
 #define E1000_FCT      0x00030 /* Flow Control Type - RW */
 #define E1000_CONNSW   0x00034 /* Copper/Fiber switch control - RW */
 #define E1000_VET      0x00038 /* VLAN Ether Type - RW */
@@ -55,6 +57,7 @@
 #define E1000_IVAR     0x000E4 /* Interrupt Vector Allocation Register - RW */
 #define E1000_SVCR     0x000F0
 #define E1000_SVT      0x000F4
+#define E1000_LPIC  0x000FC    /* Low Power IDLE control */
 #define E1000_RCTL     0x00100 /* Rx Control - RW */
 #define E1000_FCTTV    0x00170 /* Flow Control Transmit Timer Value - RW */
 #define E1000_TXCW     0x00178 /* Tx Configuration Word - RW */