Update driver from sf.net sources.
Signed-off-by: Maxim Uvarov <maxim.uvarov@oracle.com>
**/
static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
{
- u16 data = er32(POEMB);
+ u32 data = er32(POEMB);
if (active)
data |= E1000_PHY_CTRL_D0A_LPLU;
**/
static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
{
- u16 data = er32(POEMB);
+ u32 data = er32(POEMB);
if (!active) {
data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
**/
static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
{
- u32 ctrl, ctrl_ext;
+ u32 ctrl, ctrl_ext, eecd;
s32 ret_val;
/*
*/
switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ /*
+ * REQ and GNT bits need to be cleared when using AUTO_RD
+ * to access the EEPROM.
+ */
+ eecd = er32(EECD);
+ eecd &= ~(E1000_EECD_REQ | E1000_EECD_GNT);
+ ew32(EECD, eecd);
+ break;
case e1000_82573:
case e1000_82574:
case e1000_82583:
ctrl = er32(CTRL);
status = er32(STATUS);
rxcw = er32(RXCW);
- /* SYNCH bit and IV bit are sticky */
- udelay(10);
- rxcw = er32(RXCW);
if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
* auto-negotiation in the TXCW register and disable
* forced link in the Device Control register in an
* attempt to auto-negotiate with our link partner.
+ * If the partner code word is null, stop forcing
+ * and restart auto negotiation.
*/
- if (rxcw & E1000_RXCW_C) {
+ if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) {
/* Enable autoneg, and unforce link up */
ew32(TXCW, mac->txcw);
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
* incoming packets directed to this port are dropped.
* Eventually the LAA will be in RAR[0] and RAR[14].
*/
- e1000e_rar_set(hw, hw->mac.addr, hw->mac.rar_entry_count - 1);
+ hw->mac.ops.rar_set(hw, hw->mac.addr,
+ hw->mac.rar_entry_count - 1);
}
/**
#define E1000_KABGTXD_BGSQLBIAS 0x00050000
+/* Low Power IDLE Control */
+#define E1000_LPIC_1000ENABLE 0x00010000
+#define E1000_LPIC_100ENABLE 0x00020000
+
/* PBA constants */
#define E1000_PBA_6K 0x0006 /* 6KB */
#define E1000_PBA_8K 0x0008 /* 8KB */
#define NVM_VERSION 0x0005
#define NVM_SERDES_AMPLITUDE 0x0006 /* SERDES output amplitude */
#define NVM_PHY_CLASS_WORD 0x0007
+#define NVM_ETRACK_WORD 0x0042
+#define NVM_COMB_VER_OFF 0x0083
+#define NVM_COMB_VER_PTR 0x003d
#define NVM_INIT_CONTROL1_REG 0x000A
#define NVM_INIT_CONTROL2_REG 0x000F
*/
struct e1000_ring *tx_ring /* One per active queue */
____cacheline_aligned_in_smp;
+ u32 tx_fifo_limit;
#ifdef CONFIG_E1000E_NAPI
struct napi_struct napi;
struct e1000_hw hw;
#ifdef HAVE_NDO_GET_STATS64
- spinlock_t stats64_lock;
+ spinlock_t stats64_lock; /* protects statistics counters */
#endif
struct e1000_hw_stats stats;
struct e1000_phy_info phy_info;
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
+extern void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
extern unsigned int copybreak;
extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
u8 *mc_addr_list,
u32 mc_addr_count);
-extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
return 0;
}
+static inline s32 e1e_rphy_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return hw->phy.ops.read_reg_locked(hw, offset, data);
+}
+
static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data)
{
if (hw->phy.ops.write_reg)
return 0;
}
+static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return hw->phy.ops.write_reg_locked(hw, offset, data);
+}
+
static inline s32 e1000_get_cable_length(struct e1000_hw *hw)
{
if (hw->phy.ops.get_cable_length)
#endif
rc = ethtool_op_set_flags(netdev, data, supported);
-
if (rc)
return rc;
struct ethtool_coalesce *ec)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
((ec->rx_coalesce_usecs > 4) &&
return -EINVAL;
if (ec->rx_coalesce_usecs == 4) {
- adapter->itr = adapter->itr_setting = 4;
+ adapter->itr_setting = 4;
+ adapter->itr = adapter->itr_setting;
} else if (ec->rx_coalesce_usecs <= 3) {
adapter->itr = 20000;
adapter->itr_setting = ec->rx_coalesce_usecs;
}
if (adapter->itr_setting != 0)
- ew32(ITR, 1000000000 / (adapter->itr * 256));
+ e1000e_write_itr(adapter, adapter->itr);
else
- ew32(ITR, 0);
+ e1000e_write_itr(adapter, 0);
return 0;
}
**/
static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
{
- u16 phy_reg;
- u32 phy_id;
+ u16 phy_reg = 0;
+ u32 phy_id = 0;
+ s32 ret_val;
+ u16 retry_count;
+
+ for (retry_count = 0; retry_count < 2; retry_count++) {
+ ret_val = e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
+ if (ret_val || (phy_reg == 0xFFFF))
+ continue;
+ phy_id = (u32)(phy_reg << 16);
- hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
- phy_id = (u32)(phy_reg << 16);
- hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
- phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
+ ret_val = e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
+ if (ret_val || (phy_reg == 0xFFFF)) {
+ phy_id = 0;
+ continue;
+ }
+ phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
+ break;
+ }
if (hw->phy.id) {
if (hw->phy.id == phy_id)
return true;
- } else {
- if ((phy_id != 0) && (phy_id != PHY_REVISION_MASK))
- hw->phy.id = phy_id;
+ } else if (phy_id) {
+ hw->phy.id = phy_id;
+ hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
return true;
}
- return false;
+ /*
+ * In case the PHY needs to be in mdio slow mode,
+ * set slow mode and try to get the PHY id again.
+ */
+ hw->phy.ops.release(hw);
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (!ret_val)
+ ret_val = e1000e_get_phy_id(hw);
+ hw->phy.ops.acquire(hw);
+
+ return !ret_val;
}
/**
if (e1000_phy_is_accessible_pchlan(hw)) {
if (hw->mac.type == e1000_pch_lpt) {
/* Unforce SMBus mode in PHY */
- hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL,
- &phy_reg);
+ e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
- hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL,
- phy_reg);
+ e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
/* Unforce SMBus mode in MAC */
mac_reg = er32(CTRL_EXT);
* e1000_set_eee_pchlan - Enable/disable EEE support
* @hw: pointer to the HW structure
*
- * Enable/disable EEE based on setting in dev_spec structure. The bits in
- * the LPI Control register will remain set only if/when link is up.
+ * Enable/disable EEE based on setting in dev_spec structure, the duplex and
+ * MDI/MDI-X mode of the link and the EEE capabilities of the link partner.
+ * The LPI Control register bits will remain set only if/when link is up.
**/
static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
{
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
- s32 ret_val = 0;
- u16 phy_reg;
+ s32 ret_val;
+ u16 lpi_ctrl;
if ((hw->phy.type != e1000_phy_82579) &&
(hw->phy.type != e1000_phy_i217))
return 0;
- ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
+ /* get phy info to see if the phy is in MDI-x mode */
+ ret_val = hw->phy.ops.get_info(hw);
if (ret_val)
return ret_val;
- if (dev_spec->eee_disable)
- phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
- else
- phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
-
- ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
+ ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
- if ((hw->phy.type == e1000_phy_i217) && !dev_spec->eee_disable) {
+ ret_val = e1e_rphy_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
+ if (ret_val)
+ goto release;
+
+ /* Clear bits that enable EEE in various speeds */
+ lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
+
+ /* Enable EEE if not disabled by user and link is not in MDI-X mode */
+ if (!dev_spec->eee_disable && !hw->phy.is_mdix) {
+ u16 data;
+
/* Save off link partner's EEE ability */
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- return ret_val;
- ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
- I217_EEE_LP_ABILITY);
+ switch (hw->phy.type) {
+ case e1000_phy_82579:
+ data = I82579_EEE_LP_ABILITY;
+ break;
+ case e1000_phy_i217:
+ data = I217_EEE_LP_ABILITY;
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ goto release;
+ }
+ ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, data);
if (ret_val)
goto release;
- hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
- &dev_spec->eee_lp_ability);
+ e1e_rphy_locked(hw, I82579_EMI_DATA, &dev_spec->eee_lp_ability);
/*
- * EEE is not supported in 100Half, so ignore partner's EEE
- * in 100 ability if full-duplex is not advertised.
+ * Enable EEE only for speeds in which the link partner is
+ * EEE capable.
*/
- hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &phy_reg);
- if (!(phy_reg & NWAY_LPAR_100TX_FD_CAPS))
- dev_spec->eee_lp_ability &= ~I217_EEE_100_SUPPORTED;
-release:
- hw->phy.ops.release(hw);
+ if (dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
+ lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
+
+ if (dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
+ e1e_rphy_locked(hw, PHY_LP_ABILITY, &data);
+ if (data & NWAY_LPAR_100TX_FD_CAPS)
+ lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
+ else
+ /*
+ * EEE is not supported in 100Half, so ignore
+ * partner's EEE in 100 ability if full-duplex
+ * is not advertised.
+ */
+ dev_spec->eee_lp_ability &=
+ ~I82579_EEE_100_SUPPORTED;
+ }
}
- return 0;
+ ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
+release:
+ hw->phy.ops.release(hw);
+
+ return ret_val;
}
/**
phy_data |= (freq & (1 << 0)) <<
HV_SMB_ADDR_FREQ_LOW_SHIFT;
phy_data |= (freq & (1 << 1)) <<
- HV_SMB_ADDR_FREQ_HIGH_SHIFT;
+ (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
} else {
e_dbg("Unsupported SMB frequency in PHY\n");
}
reg_addr &= PHY_REG_MASK;
reg_addr |= phy_page;
- ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
- reg_data);
+ ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data);
if (ret_val)
goto release;
}
/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
if (link) {
if (hw->phy.type == e1000_phy_82578) {
- ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
- &status_reg);
+ ret_val = e1e_rphy_locked(hw, BM_CS_STATUS,
+ &status_reg);
if (ret_val)
goto release;
}
if (hw->phy.type == e1000_phy_82577) {
- ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
- &status_reg);
+ ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg);
if (ret_val)
goto release;
}
/* Link stall fix for link up */
- ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
- 0x0100);
+ ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100);
if (ret_val)
goto release;
} else {
/* Link stall fix for link down */
- ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
- 0x4100);
+ ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100);
if (ret_val)
goto release;
}
mac_reg = er32(PHY_CTRL);
- ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
+ ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg);
if (ret_val)
goto release;
!hw->phy.ops.check_reset_block(hw))
oem_reg |= HV_OEM_BITS_RESTART_AN;
- ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
+ ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg);
release:
hw->phy.ops.release(hw);
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
- ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
+ ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data);
if (ret_val)
goto release;
- ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
- phy_data & 0x00FF);
+ ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF);
release:
hw->phy.ops.release(hw);
u32 mac_reg;
u16 i;
- if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pch_lpt))
+ if (hw->mac.type < e1000_pch2lan)
return 0;
/* disable Rx path while enabling/disabling workaround */
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
- ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
- I82579_MSE_THRESHOLD);
+ ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_THRESHOLD);
if (ret_val)
goto release;
/* set MSE higher to enable link to stay up when noise is high */
- ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0034);
+ ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0034);
if (ret_val)
goto release;
- ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
- I82579_MSE_LINK_DOWN);
+ ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_LINK_DOWN);
if (ret_val)
goto release;
/* drop link after 5 times MSE threshold was reached */
- ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0005);
+ ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0005);
release:
hw->phy.ops.release(hw);
{
u32 extcnf_ctrl;
- if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pch_lpt))
+ if (hw->mac.type < e1000_pch2lan)
return;
extcnf_ctrl = er32(EXTCNF_CTRL);
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
- ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
- I82579_LPI_UPDATE_TIMER);
+ ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
+ I82579_LPI_UPDATE_TIMER);
if (!ret_val)
- ret_val = hw->phy.ops.write_reg_locked(hw,
- I82579_EMI_DATA,
- 0x1387);
+ ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x1387);
hw->phy.ops.release(hw);
}
if (!dev_spec->eee_disable) {
u16 eee_advert;
- ret_val = hw->phy.ops.write_reg_locked(hw,
- I82579_EMI_ADDR,
- I217_EEE_ADVERTISEMENT);
+ ret_val = e1e_wphy_locked(hw,
+ I82579_EMI_ADDR,
+ I217_EEE_ADVERTISEMENT);
if (ret_val)
goto release;
- hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
- &eee_advert);
+ e1e_rphy_locked(hw, I82579_EMI_DATA, &eee_advert);
/*
* Disable LPLU if both link partners support 100BaseT
* EEE and 100Full is advertised on both ends of the
* link.
*/
- if ((eee_advert & I217_EEE_100_SUPPORTED) &&
+ if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
(dev_spec->eee_lp_ability &
- I217_EEE_100_SUPPORTED) &&
+ I82579_EEE_100_SUPPORTED) &&
(hw->phy.autoneg_advertised & ADVERTISE_100_FULL))
phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
E1000_PHY_CTRL_NOND0A_LPLU);
if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
/* Enable proxy to reset only on power good. */
- hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
- &phy_reg);
+ e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg);
phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
- hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
- phy_reg);
+ e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg);
/*
* Set bit enable LPI (EEE) to reset only on
* power good.
*/
- hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
- phy_reg |= I217_SxCTRL_MASK;
- hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
+ e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
+ phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
+ e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
/* Disable the SMB release on LCD reset. */
- hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
- phy_reg &= ~I217_MEMPWR;
- hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
+ e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
+ phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
+ e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
}
/*
* Enable MTA to reset for Intel Rapid Start Technology
* Support
*/
- hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
- phy_reg |= I217_CGFREG_MASK;
- hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
+ e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
+ phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
+ e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
release:
hw->phy.ops.release(hw);
* Restore clear on SMB if no manageability engine
* is present
*/
- ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
- &phy_reg);
+ ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
if (ret_val)
goto release;
- phy_reg |= I217_MEMPWR_MASK;
- hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
+ phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
+ e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
/* Disable Proxy */
- hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
+ e1e_wphy_locked(hw, I217_PROXY_CTRL, 0);
}
/* Enable reset on MTA */
- ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
- &phy_reg);
+ ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
if (ret_val)
goto release;
- phy_reg &= ~I217_CGFREG_MASK;
- hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
+ phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
+ e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
release:
if (ret_val)
e_dbg("Error %d in resume workarounds\n", ret_val);
#define E1000_FWSM_WLOCK_MAC_SHIFT 7
/* Shared Receive Address Registers */
-#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8))
-#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8))
-#define E1000_SHRAH_AV 0x80000000 /* Addr Valid bit */
-#define E1000_SHRAH_MAV 0x40000000 /* Multicast Addr Valid bit */
-
#define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8))
#define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8))
+#define E1000_SHRAH_AV 0x80000000 /* Addr Valid bit */
+#define E1000_SHRAH_MAV 0x40000000 /* Multicast Addr Valid bit */
#define E1000_H2ME 0x05B50 /* Host to ME */
#define E1000_H2ME_LSECREQ 0x00000001 /* Linksec Request */
#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
+/* FEXT register bit definition */
+#define E1000_FEXT_PHY_CABLE_DISCONNECTED 0x00000004
+
#define E1000_FEXTNVM_SW_CONFIG 1
#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M */
/* PHY Low Power Idle Control */
#define I82579_LPI_CTRL PHY_REG(772, 20)
+#define I82579_LPI_CTRL_100_ENABLE 0x2000
+#define I82579_LPI_CTRL_1000_ENABLE 0x4000
#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80
#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
#define I82579_MSE_THRESHOLD 0x084F /* Mean Square Error Threshold */
#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
+#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */
+#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */
+#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE supported */
#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */
#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */
-#define I217_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */
/* Intel Rapid Start Technology Support */
-#define I217_PROXY_CTRL PHY_REG(BM_WUC_PAGE, 70)
+#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70)
#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080
#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28)
-#define I217_SxCTRL_MASK 0x1000
+#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000
+#define I217_SxCTRL_ENABLE_SERDES 0x0020
#define I217_CGFREG PHY_REG(772, 29)
-#define I217_CGFREG_MASK 0x0002
+#define I217_CGFREG_ENABLE_MTA_RESET 0x0002
#define I217_MEMPWR PHY_REG(772, 26)
-#define I217_MEMPWR_MASK 0x0010
+#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
/*
* Additional interrupts need to be handled for ICH family:
out:
return err;
}
+#endif /* < 2.6.28 */
-void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
- int off, int size)
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
+int _kc_pci_num_vf(struct pci_dev *dev)
{
- skb_fill_page_desc(skb, i, page, off, size);
- skb->len += size;
- skb->data_len += size;
- skb->truesize += size;
+ int num_vf = 0;
+#ifdef CONFIG_PCI_IOV
+ struct pci_dev *vfdev;
+
+ /* loop through all ethernet devices starting at PF dev */
+ vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL);
+ while (vfdev) {
+ if (vfdev->is_virtfn && vfdev->physfn == dev)
+ num_vf++;
+
+ vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev);
+ }
+
+#endif
+ return num_vf;
}
-#endif /* < 2.6.28 */
+#endif /* RHEL_RELEASE_CODE */
+#endif /* < 2.6.34 */
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
#endif /* < 2.6.35 */
/******************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
+
#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
#endif /* < 2.6.39 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
+#endif /* < 3.4.0 */
#define __GFP_COLD 0
#endif
+#ifndef __GFP_COMP
+#define __GFP_COMP 0
+#endif
+
/*****************************************************************************/
/* Installations with ethtool version without eeprom, adapter id, or statistics
* support */
pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args)
#define dev_warn(dev, fmt, args...) \
pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args)
+#define dev_notice(dev, fmt, args...) \
+ pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args)
/* NOTE: dangerous! we ignore the 'gfp' argument */
#define dma_alloc_coherent(dev,sz,dma,gfp) \
#define dma_unmap_single(dev,a,b,c) \
pci_unmap_single(to_pci_dev(dev),(a),(b),(c))
+#define dma_map_sg(dev, sg, nents, dir) \
+ pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir)
+#define dma_unmap_sg(dev, sg, nents, dir) \
+ pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir)
+
#define dma_sync_single(dev,a,b,c) \
pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c))
#endif /* 2.6.0 => 2.5.28 */
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )
+#define dma_pool pci_pool
+#define dma_pool_destroy pci_pool_destroy
+#define dma_pool_alloc pci_pool_alloc
+#define dma_pool_free pci_pool_free
+
+#define dma_pool_create(name,dev,size,align,allocation) \
+ pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation))
+#endif /* < 2.6.3 */
+
/*****************************************************************************/
/* 2.6.4 => 2.6.0 */
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES);
#undef node_online_map
#define node_online_map _kcompat_node_online_map
+#define pci_get_class pci_find_class
#endif /* < 2.6.10 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) )
#ifdef CONFIG_X86_64
-#define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \
- dma_sync_single_for_cpu(dev, dma_handle, size, dir)
-#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
- dma_sync_single_for_device(dev, dma_handle, size, dir)
+#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \
+ dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir))
+#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \
+ dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir))
#endif
#endif
#endif /* < 2.6.14 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) )
+#ifndef dev_notice
+#define dev_notice(dev, fmt, args...) \
+ dev_printk(KERN_NOTICE, dev, fmt, ## args)
+#endif
+
#ifndef first_online_node
#define first_online_node 0
#endif
#ifndef DIV_ROUND_UP
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
#endif
+#ifndef __ALIGN_MASK
+#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
+#endif
#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )
#if (!((RHEL_RELEASE_CODE && \
((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \
#ifndef KERN_CONT
#define KERN_CONT ""
#endif
+#ifndef pr_err
+#define pr_err(fmt, arg...) \
+ printk(KERN_ERR fmt, ##arg)
+#endif
#else /* < 2.6.24 */
#define HAVE_ETHTOOL_GET_SSET_COUNT
#define HAVE_NETDEV_NAPI_LIST
#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q)
#endif
-#ifndef skb_add_rx_frag
-#define skb_add_rx_frag _kc_skb_add_rx_frag
-extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, int, int);
-#endif
#endif /* < 2.6.28 */
/*****************************************************************************/
#define netdev_for_each_uc_addr(uclist, dev) \
for (uclist = dev->uc_list; uclist; uclist = uclist->next)
#endif
-#else
+#ifndef PORT_OTHER
+#define PORT_OTHER 0xff
+#endif
+#else /* < 2.6.31 */
#ifndef HAVE_NETDEV_STORAGE_ADDRESS
#define HAVE_NETDEV_STORAGE_ADDRESS
#endif
#ifndef __percpu
#define __percpu
#endif /* __percpu */
+#ifndef PORT_DA
+#define PORT_DA PORT_OTHER
+#endif
+#ifndef PORT_NONE
+#define PORT_NONE PORT_OTHER
+#endif
+
#else /* < 2.6.33 */
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
#define HAVE_NETDEV_OPS_FCOE_GETWWN
#endif
#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
-#define HAVE_ETHTOOL_SFP_DISPLAY_PORT
#endif /* < 2.6.33 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
+#ifndef pci_num_vf
+#define pci_num_vf(pdev) _kc_pci_num_vf(pdev)
+extern int _kc_pci_num_vf(struct pci_dev *dev);
+#endif
+#endif /* RHEL_RELEASE_CODE */
+
#ifndef ETH_FLAG_NTUPLE
#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE
#endif
do { \
struct adapter_struct *kc_adapter = netdev_priv(netdev);\
struct pci_dev *pdev = kc_adapter->pdev; \
- printk("%s %s: " format, level, pci_name(pdev), \
- ##args); \
+ printk(level "%s: " format, pci_name(pdev), ##args); \
} while(0)
#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
#define netdev_printk(level, netdev, format, args...) \
#ifdef HAVE_TX_MQ
#include <net/sch_generic.h>
#ifndef CONFIG_NETDEVICES_MULTIQUEUE
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
void _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int);
#define netif_set_real_num_tx_queues _kc_netif_set_real_num_tx_queues
+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
#else /* CONFIG_NETDEVICES_MULTI_QUEUE */
#define netif_set_real_num_tx_queues(_netdev, _count) \
do { \
(_netdev)->egress_subqueue_count = _count; \
} while (0)
#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */
-#else
+#else /* HAVE_TX_MQ */
#define netif_set_real_num_tx_queues(_netdev, _count) do {} while(0)
#endif /* HAVE_TX_MQ */
#ifndef ETH_FLAG_RXHASH
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) )
+#ifndef netif_set_real_num_rx_queues
+static inline int __kc_netif_set_real_num_rx_queues(struct net_device *dev,
+ unsigned int rxq)
+{
+ return 0;
+}
+
+#define netif_set_real_num_rx_queues(dev, rxq) \
+ __kc_netif_set_real_num_rx_queues((dev), (rxq))
+#endif
#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR
#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2)
#endif
#define HAVE_ETHTOOL_SET_PHYS_ID
#endif /* < 2.6.40 */
+/*****************************************************************************/
+
+/*****************************************************************************/
+#undef CONFIG_E1000E_PTP
+#ifdef E1000E_PTP
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) && (defined(CONFIG_PTP_1588_CLOCK) || defined(CONFIG_PTP_1588_CLOCK_MODULE))
+#define CONFIG_E1000E_PTP
+#else
+#error Cannot enable PTP Hardware Clock due to insufficient kernel support
+#endif
+#endif /* E1000E_PTP */
+
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
#ifndef __netdev_alloc_skb_ip_align
put_page(skb_frag_page(frag));
}
#endif /* __skb_frag_unref */
+
+#ifndef SPEED_UNKNOWN
+#define SPEED_UNKNOWN -1
+#endif
+#ifndef DUPLEX_UNKNOWN
+#define DUPLEX_UNKNOWN 0xff
+#endif
#else /* < 3.2.0 */
#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
#define HAVE_PCI_DEV_FLAGS_ASSIGNED
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) )
+#define skb_tx_timestamp(skb) do {} while (0)
+#define eth_random_addr(addr) random_ether_addr(addr)
#else
#define HAVE_FDB_OPS
+#define HAVE_ETHTOOL_GET_TS_INFO
#endif /* < 3.5.0 */
#endif /* _KCOMPAT_H_ */
static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw);
static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
static void e1000e_config_collision_dist_generic(struct e1000_hw *hw);
+static void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
/**
* e1000_init_mac_ops_generic - Initialize MAC function pointers
mac->ops.mng_write_cmd_header = e1000_mng_write_cmd_header;
mac->ops.mng_enable_host_if = e1000_mng_enable_host_if;
/* VLAN, MC, etc. */
- mac->ops.rar_set = e1000e_rar_set;
+ mac->ops.rar_set = e1000e_rar_set_generic;
mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic;
}
/* Setup the receive address */
e_dbg("Programming MAC Address into RAR[0]\n");
- e1000e_rar_set(hw, hw->mac.addr, 0);
+ hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
/* Zero out the other (rar_entry_count - 1) receive addresses */
e_dbg("Clearing RAR[1-%u]\n", rar_count - 1);
for (i = 1; i < rar_count; i++)
- e1000e_rar_set(hw, mac_addr, i);
+ hw->mac.ops.rar_set(hw, mac_addr, i);
}
/**
* same as the normal permanent MAC address stored by the HW into the
* RAR. Do this by mapping this address into RAR0.
*/
- e1000e_rar_set(hw, alt_mac_addr, 0);
+ hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
return 0;
}
/**
- * e1000e_rar_set - Set receive address register
+ * e1000e_rar_set_generic - Set receive address register
* @hw: pointer to the HW structure
* @addr: pointer to the receive address
* @index: receive address array register
* Sets the receive address array register at index to the address passed
* in by addr.
**/
-void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+static void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw);
void e1000e_put_hw_semaphore(struct e1000_hw *hw);
-void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
void e1000e_reset_adaptive(struct e1000_hw *hw);
void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
#define DRV_EXTRAVERSION ""
#endif
-#define DRV_VERSION "2.0.0.1" DRV_EXTRAVERSION
+#define DRV_VERSION "2.1.4" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
-static int e1000_set_features(struct net_device *netdev,
- netdev_features_t features);
static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
{
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT | FLAG_HAS_CTRLEXT_ON_LOAD,
.flags2 = FLAG2_CHECK_PHY_HANG
- | FLAG2_DISABLE_ASPM_L0S | FLAG2_NO_DISABLE_RX | FLAG2_DMA_BURST,
+ | FLAG2_DISABLE_ASPM_L0S
+ | FLAG2_DISABLE_ASPM_L1 | FLAG2_NO_DISABLE_RX | FLAG2_DMA_BURST,
.pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
.init_ops = e1000_init_function_pointers_82571,
napi_gro_receive(&adapter->napi, skb);
#endif /* HAVE_VLAN_RX_REGISTER */
#else /* CONFIG_E1000E_NAPI */
+#ifdef HAVE_VLAN_RX_REGISTER
#ifdef NETIF_F_HW_VLAN_TX
if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
ret = vlan_hwaccel_rx(skb, adapter->vlgrp, le16_to_cpu(vlan));
else
#endif
ret = netif_rx(skb);
+#else /* HAVE_VLAN_RX_REGISTER */
+ if (status & E1000_RXD_STAT_VP)
+ __vlan_hwaccel_put_tag(skb, tag);
+
+ ret = netif_rx(skb);
+#endif /* HAVE_VLAN_RX_REGISTER */
if (unlikely(ret == NET_RX_DROP))
adapter->rx_dropped_backlog++;
#endif /* CONFIG_E1000E_NAPI */
* @sk_buff: socket buffer with received data
**/
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
- __le16 csum, struct sk_buff *skb)
+ struct sk_buff *skb)
{
u16 status = (u16)status_err;
u8 errors = (u8)(status_err >> 24);
if (status & E1000_RXD_STAT_IXSM)
return;
- /* TCP/UDP checksum error bit is set */
- if (errors & E1000_RXD_ERR_TCPE) {
+ /* TCP/UDP checksum error bit or IP checksum error bit is set */
+ if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
/* let the stack verify checksum errors */
adapter->hw_csum_err++;
return;
return;
/* It must be a TCP or UDP packet with a valid checksum */
- if (status & E1000_RXD_STAT_TCPCS) {
- /* TCP checksum is good */
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- /*
- * IP fragment with UDP payload
- * Hardware complements the payload checksum, so we undo it
- * and then put the value in host order for further stack use.
- */
- __sum16 sum = (__force __sum16)swab16((__force u16)csum);
- skb->csum = csum_unfold(~sum);
- skb->ip_summed = CHECKSUM_COMPLETE;
- }
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
adapter->hw_csum_good++;
}
struct e1000_buffer *buffer_info;
struct sk_buff *skb;
unsigned int i;
- unsigned int bufsz = 256 - 16 /* for skb_reserve */ ;
+ unsigned int bufsz = 256 - 16; /* for skb_reserve */
i = rx_ring->next_to_use;
buffer_info = &rx_ring->buffer_info[i];
skb_put(skb, length);
/* Receive Checksum Offload */
- e1000_rx_checksum(adapter, staterr,
- rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+ e1000_rx_checksum(adapter, staterr, skb);
#ifdef NETIF_F_RXHASH
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
total_rx_bytes += skb->len;
total_rx_packets++;
- e1000_rx_checksum(adapter, staterr,
- rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+ e1000_rx_checksum(adapter, staterr, skb);
#ifdef NETIF_F_RXHASH
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
}
}
- /* Receive Checksum Offload XXX recompute due to CRC strip? */
- e1000_rx_checksum(adapter, staterr,
- rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+ /* Receive Checksum Offload */
+ e1000_rx_checksum(adapter, staterr, skb);
#ifdef NETIF_F_RXHASH
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
}
}
+/**
+ * e1000e_write_itr - write the ITR value to the appropriate registers
+ * @adapter: address of board private structure
+ * @itr: new ITR value to program
+ *
+ * e1000e_write_itr determines if the adapter is in MSI-X mode
+ * and, if so, writes the EITR registers with the ITR value.
+ * Otherwise, it writes the ITR value into the ITR register.
+ **/
+void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 new_itr = itr ? 1000000000 / (itr * 256) : 0;
+
+ if (adapter->msix_entries) {
+ int vector;
+
+ for (vector = 0; vector < adapter->num_vectors; vector++)
+ writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector));
+ } else {
+ ew32(ITR, new_itr);
+ }
+}
+
/**
* e1000_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
/* irq moderation */
ew32(RADV, adapter->rx_abs_int_delay);
if ((adapter->itr_setting != 0) && (adapter->itr != 0))
- ew32(ITR, 1000000000 / (adapter->itr * 256));
+ e1000e_write_itr(adapter, adapter->itr);
ctrl_ext = er32(CTRL_EXT);
#ifdef CONFIG_E1000E_NAPI
/* Enable Receive Checksum Offload for TCP and UDP */
rxcsum = er32(RXCSUM);
#ifdef HAVE_NDO_SET_FEATURES
- if (adapter->netdev->features & NETIF_F_RXCSUM) {
+ if (adapter->netdev->features & NETIF_F_RXCSUM)
#else
- if (adapter->flags & FLAG_RX_CSUM_ENABLED) {
+ if (adapter->flags & FLAG_RX_CSUM_ENABLED)
#endif
rxcsum |= E1000_RXCSUM_TUOFL;
-
- /*
- * IPv4 payload checksum for UDP fragments must be
- * used in conjunction with packet-split.
- */
- if (adapter->rx_ps_pages)
- rxcsum |= E1000_RXCSUM_IPPCSE;
- } else {
+ else
rxcsum &= ~E1000_RXCSUM_TUOFL;
- /* no need to clear IPPCSE as it defaults to 0 */
- }
ew32(RXCSUM, rxcsum);
if (adapter->hw.mac.type == e1000_pch2lan) {
if (!rar_entries)
break;
#ifdef NETDEV_HW_ADDR_T_UNICAST
- e1000e_rar_set(hw, ha->addr, rar_entries--);
+ hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
#else
- e1000e_rar_set(hw, ha->da_addr, rar_entries--);
+ hw->mac.ops.rar_set(hw, ha->da_addr, rar_entries--);
#endif
count++;
}
/*
* if short on Rx space, Rx wins and must trump Tx
- * adjustment or use Early Receive if available
+ * adjustment
*/
if (pba < min_rx_space)
pba = min_rx_space;
break;
}
+ /*
+ * Alignment of Tx data is on an arbitrary byte boundary with the
+ * maximum size per Tx descriptor limited only to the transmit
+ * allocation of the packet buffer minus 96 bytes with an upper
+ * limit of 24KB due to receive synchronization limitations.
+ */
+ adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
+ 24 << 10);
+
/*
* Disable Adaptive Interrupt Moderation if 2 full packets cannot
* fit in receive buffer.
dev_info(pci_dev_to_dev(adapter->pdev),
"Interrupt Throttle Rate turned off\n");
adapter->flags2 |= FLAG2_DISABLE_AIM;
- ew32(ITR, 0);
+ e1000e_write_itr(adapter, 0);
}
} else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
dev_info(pci_dev_to_dev(adapter->pdev),
"Interrupt Throttle Rate turned on\n");
adapter->flags2 &= ~FLAG2_DISABLE_AIM;
adapter->itr = 20000;
- ew32(ITR, 1000000000 / (adapter->itr * 256));
+ e1000e_write_itr(adapter, adapter->itr);
}
}
e_dbg("icr is %08X\n", icr);
if (icr & E1000_ICR_RXSEQ) {
adapter->flags &= ~FLAG_MSI_TEST_FAILED;
+ /*
+ * Force memory writes to complete before acknowledging the
+ * interrupt is handled.
+ */
wmb();
}
goto msi_test_failed;
}
+ /*
+ * Force memory writes to complete before enabling and firing an
+ * interrupt.
+ */
wmb();
e1000_irq_enable(adapter);
/* fire an unusual interrupt on the test handler */
ew32(ICS, E1000_ICS_RXSEQ);
e1e_flush();
- msleep(50);
+ msleep(100);
e1000_irq_disable(adapter);
- rmb();
+ rmb(); /* read flags after interrupt has been fired */
if (adapter->flags & FLAG_MSI_TEST_FAILED) {
adapter->int_mode = E1000E_INT_MODE_LEGACY;
static int e1000_set_mac(struct net_device *netdev, void *p)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
struct sockaddr *addr = p;
if (!is_valid_ether_addr((unsigned char *)(addr->sa_data)))
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
- e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
+ hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
/* activate the work around */
* are dropped. Eventually the LAA will be in RAR[0] and
* RAR[14]
*/
- e1000e_rar_set(&adapter->hw,
- adapter->hw.mac.addr,
- adapter->hw.mac.rar_entry_count - 1);
+ hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr,
+ adapter->hw.mac.rar_entry_count - 1);
}
return 0;
u32 ctrl = er32(CTRL);
/* Link status message must follow this format for user tools */
- printk(KERN_INFO
- "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
- adapter->netdev->name, adapter->link_speed,
- adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
- (ctrl & E1000_CTRL_TFCE)
- && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" : (ctrl & E1000_CTRL_RFCE)
- ? "Rx" : (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
+/* *INDENT-OFF* */
+ pr_info("e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
+ adapter->netdev->name,
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
+ (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
+ (ctrl & E1000_CTRL_RFCE) ? "Rx" :
+ (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
+/* *INDENT-ON* */
}
static bool e1000e_has_link(struct e1000_adapter *adapter)
adapter->link_speed = 0;
adapter->link_duplex = 0;
/* Link status message must follow this format */
- printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
- adapter->netdev->name);
+ pr_info("e1000e: %s NIC Link is Down\n",
+ adapter->netdev->name);
netif_carrier_off(netdev);
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->phy_info_timer,
adapter->gorc - adapter->gotc) / 10000;
u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
- ew32(ITR, 1000000000 / (itr * 256));
+ e1000e_write_itr(adapter, itr);
}
/* Cause software interrupt to ensure Rx ring is cleaned */
* reset from the other port. Set the appropriate LAA in RAR[0]
*/
if (e1000e_get_laa_state_82571(hw))
- e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
+ hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0);
if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
e1000e_check_82574_phy_workaround(adapter);
struct e1000_buffer *buffer_info;
unsigned int i;
u32 cmd_length = 0;
- u16 ipcse = 0, tucse, mss;
+ u16 ipcse = 0, mss;
u8 ipcss, ipcso, tucss, tucso, hdr_len;
if (!skb_is_gso(skb))
}
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+#if defined(DEBUG) || defined(DEV)
+ /* C-Spec section on TCP Segmentation Context Descriptor states
+ * the header may be up to 240 bytes in length */
+ BUG_ON(hdr_len > 240);
+#endif
mss = skb_shinfo(skb)->gso_size;
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
tucss = skb_transport_offset(skb);
tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
- tucse = 0;
cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
context_desc->upper_setup.tcp_fields.tucss = tucss;
context_desc->upper_setup.tcp_fields.tucso = tucso;
- context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
+ context_desc->upper_setup.tcp_fields.tucse = 0;
context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
context_desc->cmd_and_length = cpu_to_le32(cmd_length);
return 1;
}
-#define E1000_MAX_PER_TXD 8192
-#define E1000_MAX_TXD_PWR 12
-
static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
unsigned int first, unsigned int max_per_txd,
- unsigned int nr_frags, unsigned int mss)
+ unsigned int nr_frags)
{
struct e1000_adapter *adapter = tx_ring->adapter;
struct pci_dev *pdev = adapter->pdev;
u16 length, offset;
#ifdef NETIF_F_HW_VLAN_TX
- if (vlan_tx_tag_present(skb)) {
- if (!
- ((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id)
- && (adapter->hw.mng_cookie.
- status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
- return 0;
- }
+ if (vlan_tx_tag_present(skb) &&
+ !((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
+ (adapter->hw.mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
+ return 0;
+
#endif
if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
return 0;
static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
{
+ BUG_ON(size > tx_ring->count);
+
if (e1000_desc_unused(tx_ring) >= size)
return 0;
return __e1000_maybe_stop_tx(tx_ring, size);
}
-#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_ring *tx_ring = adapter->tx_ring;
unsigned int first;
- unsigned int max_per_txd = E1000_MAX_PER_TXD;
- unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0;
unsigned int len = skb_headlen(skb);
unsigned int nr_frags;
}
#ifdef NETIF_F_TSO
mss = skb_shinfo(skb)->gso_size;
- /*
- * The controller does a simple calculation to
- * make sure there is enough room in the FIFO before
- * initiating the DMA for each buffer. The calc is:
- * 4 = ceil(buffer len/mss). To make sure we don't
- * overrun the FIFO, adjust the max buffer len if mss
- * drops.
- */
if (mss) {
u8 hdr_len;
- max_per_txd = min(mss << 2, max_per_txd);
- max_txd_pwr = fls(max_per_txd) - 1;
/*
* TSO Workaround for 82571/2/3 Controllers -- if skb->data
count++;
#endif
- count += TXD_USE_COUNT(len, max_txd_pwr);
+ count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
nr_frags = skb_shinfo(skb)->nr_frags;
for (f = 0; f < nr_frags; f++)
- count +=
- TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
- max_txd_pwr);
+ count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
+ adapter->tx_fifo_limit);
if (adapter->hw.mac.tx_pkt_filtering)
e1000_transfer_dhcp_info(adapter, skb);
#endif /* IFF_SUPP_NOFCS */
/* if count is 0 then mapping error has occurred */
- count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss);
+ count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
+ nr_frags);
if (count) {
+ skb_tx_timestamp(skb);
+
#ifdef CONFIG_BQL
netdev_sent_queue(netdev, skb->len);
#endif
e1000_tx_queue(tx_ring, tx_flags, count);
/* Make sure there is space in the ring for the next send. */
- e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2);
+ e1000_maybe_stop_tx(tx_ring,
+ (MAX_SKB_FRAGS *
+ DIV_ROUND_UP(PAGE_SIZE,
+ adapter->tx_fifo_limit) + 2));
} else {
dev_kfree_skb_any(skb);
tx_ring->buffer_info[first].time_stamp = 0;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
/* Jumbo frame support */
- if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
- if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
- e_err("Jumbo Frames not supported.\n");
- return -EINVAL;
- }
-#ifdef HAVE_NDO_SET_FEATURES
- /*
- * IP payload checksum (enabled with jumbos/packet-split when
- * Rx checksum is enabled) and generation of RSS hash is
- * mutually exclusive in the hardware.
- */
- if ((netdev->features & NETIF_F_RXCSUM) &&
- (netdev->features & NETIF_F_RXHASH)) {
- /* Disable receive hashing */
- netdev_features_t features;
-
- features = netdev->features & (~NETIF_F_RXHASH);
- if (e1000_set_features(netdev, features)) {
- e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled. Disable one of the receive offload features before enabling jumbos.\n");
- return -EINVAL;
- }
- e_info("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled. Disabling Receive Hashing.\n");
- }
-#endif /* HAVE_NDO_SET_FEATURES */
+ if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+ !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
+ e_err("Jumbo Frames not supported.\n");
+ return -EINVAL;
}
/* Supported frame sizes */
return -EINVAL;
}
- /* 82573 Errata 17 */
- if (((adapter->hw.mac.type == e1000_82573) ||
- (adapter->hw.mac.type == e1000_82574)) &&
- (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
- adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
- e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
- }
-
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
usleep_range(1000, 2000);
/* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
NETIF_F_RXALL)))
return 0;
- /*
- * IP payload checksum (enabled with jumbos/packet-split when Rx
- * checksum is enabled) and generation of RSS hash is mutually
- * exclusive in the hardware.
- */
- if (adapter->rx_ps_pages &&
- (features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) {
- e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames. Disable jumbos or enable only one of the receive offload features.\n");
- return -EINVAL;
- }
-
if (changed & NETIF_F_RXFCS) {
if (features & NETIF_F_RXFCS) {
adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
}
if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
- e_info("PHY reset is blocked due to SOL/IDER session.\n");
+ dev_info(pci_dev_to_dev(pdev),
+ "PHY reset is blocked due to SOL/IDER session.\n");
/* Set initial default active device features */
netdev->features = (NETIF_F_SG |
NETIF_F_TSO6 |
#endif
#endif
-#if defined(NETIF_F_RXHASH) && defined(HAVE_NDO_SET_FEATURES)
+#if defined(NETIF_F_RXHASH)
NETIF_F_RXHASH |
#endif
#ifdef NETIF_F_RXCSUM
if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
break;
if (i == 2) {
- e_err("The NVM Checksum Is Not Valid\n");
+ dev_err(pci_dev_to_dev(pdev),
+ "The NVM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
}
/* copy the MAC address */
if (e1000e_read_mac_addr(&adapter->hw))
- e_err("NVM Read Error while reading MAC address\n");
+ dev_err(pci_dev_to_dev(pdev),
+ "NVM Read Error while reading MAC address\n");
memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
#ifdef ETHTOOL_GPERMADDR
#else
if (!is_valid_ether_addr(netdev->dev_addr)) {
#endif
- e_err("Invalid MAC Address: %02x:%02x:%02x:%02x:%02x:%02x\n",
- netdev->dev_addr[0], netdev->dev_addr[1],
- netdev->dev_addr[2], netdev->dev_addr[3],
- netdev->dev_addr[4], netdev->dev_addr[5]);
+ dev_err(pci_dev_to_dev(pdev),
+ "Invalid MAC Address: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ netdev->dev_addr[0], netdev->dev_addr[1],
+ netdev->dev_addr[2], netdev->dev_addr[3],
+ netdev->dev_addr[4], netdev->dev_addr[5]);
err = -EIO;
goto err_eeprom;
}
adapter->hw.phy.autoneg_advertised = 0x2f;
/* ring size defaults */
- adapter->rx_ring->count = 256;
- adapter->tx_ring->count = 256;
+ adapter->rx_ring->count = E1000_DEFAULT_RXD;
+ adapter->tx_ring->count = E1000_DEFAULT_TXD;
/*
* Initial Wake on LAN setting - If APM wake is enabled in
void e1000e_release_nvm(struct e1000_hw *hw);
#define E1000_STM_OPCODE 0xDB00
+
#endif
/*
* Interrupt Throttle Rate (interrupts/sec)
*
- * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
+ * Valid Range: 100-100000 or one of: 0=off, 1=dynamic, 3=dynamic conservative
*/
E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
#define DEFAULT_ITR 3
case enable_option:
switch (*value) {
case OPTION_ENABLED:
- e_info("%s Enabled\n", opt->name);
+ dev_info(pci_dev_to_dev(adapter->pdev), "%s Enabled\n",
+ opt->name);
return 0;
case OPTION_DISABLED:
- e_info("%s Disabled\n", opt->name);
+ dev_info(pci_dev_to_dev(adapter->pdev), "%s Disabled\n",
+ opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
- e_info("%s set to %i\n", opt->name, *value);
+ dev_info(pci_dev_to_dev(adapter->pdev),
+ "%s set to %i\n", opt->name, *value);
return 0;
}
break;
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
- e_info("%s\n", ent->str);
+ dev_info(pci_dev_to_dev
+ (adapter->pdev),
+ "%s\n", ent->str);
return 0;
}
}
BUG();
}
- e_info("Invalid %s value specified (%i) %s\n", opt->name, *value,
- opt->err);
+ dev_info(pci_dev_to_dev(adapter->pdev),
+ "Invalid %s value specified (%i) %s\n", opt->name, *value,
+ opt->err);
*value = opt->def;
return -1;
}
int bd = adapter->bd_number;
if (bd >= E1000_MAX_NIC) {
- e_notice("Warning: no configuration for board #%i\n", bd);
- e_notice("Using defaults for all values\n");
+ dev_notice(pci_dev_to_dev(adapter->pdev),
+ "Warning: no configuration for board #%i\n", bd);
+ dev_notice(pci_dev_to_dev(adapter->pdev),
+ "Using defaults for all values\n");
}
{ /* Transmit Interrupt Delay */
if (num_InterruptThrottleRate > bd) {
adapter->itr = InterruptThrottleRate[bd];
- switch (adapter->itr) {
- case 0:
- e_info("%s turned off\n", opt.name);
- break;
- case 1:
- e_info("%s set to dynamic mode\n", opt.name);
- adapter->itr_setting = adapter->itr;
- adapter->itr = 20000;
- break;
- case 3:
- e_info("%s set to dynamic conservative mode\n",
- opt.name);
- adapter->itr_setting = adapter->itr;
- adapter->itr = 20000;
- break;
- case 4:
- e_info("%s set to simplified (2000-8000 ints) mode\n",
- opt.name);
- adapter->itr_setting = 4;
- break;
- default:
- /*
- * Save the setting, because the dynamic bits
- * change itr.
- */
- if (e1000_validate_option(&adapter->itr, &opt,
- adapter) &&
- (adapter->itr == 3)) {
- /*
- * In case of invalid user value,
- * default to conservative mode.
- */
- adapter->itr_setting = adapter->itr;
- adapter->itr = 20000;
- } else {
- /*
- * Clear the lower two bits because
- * they are used as control.
- */
- adapter->itr_setting =
- adapter->itr & ~3;
- }
- break;
- }
+
+ /*
+ * Make sure a message is printed for non-special
+ * values. And in case of an invalid option, display
+ * warning, use default and go through itr/itr_setting
+ * adjustment logic below
+ */
+ if ((adapter->itr > 4) &&
+ e1000_validate_option(&adapter->itr, &opt, adapter))
+ adapter->itr = opt.def;
} else {
- adapter->itr_setting = opt.def;
+ /*
+ * If no option specified, use default value and go
+ * through the logic below to adjust itr/itr_setting
+ */
+ adapter->itr = opt.def;
+
+ /*
+ * Make sure a message is printed for non-special
+ * default values
+ */
+ if (adapter->itr > 4)
+ dev_info(pci_dev_to_dev(adapter->pdev),
+ "%s set to default %d\n", opt.name,
+ adapter->itr);
+ }
+
+ adapter->itr_setting = adapter->itr;
+ switch (adapter->itr) {
+ case 0:
+ dev_info(pci_dev_to_dev(adapter->pdev),
+ "%s turned off\n", opt.name);
+ break;
+ case 1:
+ dev_info(pci_dev_to_dev(adapter->pdev),
+ "%s set to dynamic mode\n", opt.name);
+ adapter->itr = 20000;
+ break;
+ case 3:
+ dev_info(pci_dev_to_dev(adapter->pdev),
+ "%s set to dynamic conservative mode\n",
+ opt.name);
adapter->itr = 20000;
+ break;
+ case 4:
+ dev_info(pci_dev_to_dev(adapter->pdev),
+ "%s set to simplified (2000-8000 ints) mode\n",
+ opt.name);
+ break;
+ default:
+ /*
+ * Save the setting, because the dynamic bits
+ * change itr.
+ *
+ * Clear the lower two bits because
+ * they are used as control.
+ */
+ adapter->itr_setting &= ~3;
+ break;
}
}
{ /* Interrupt Mode */
if (num_SmartPowerDownEnable > bd) {
unsigned int spd = SmartPowerDownEnable[bd];
e1000_validate_option(&spd, &opt, adapter);
- if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN)
- && spd)
+ if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd)
adapter->flags |= FLAG_SMART_POWER_DOWN;
}
}
* default value to an online node, which is not
* necessarily zero, and the constant initializer
* above can't take first_online_node */
- if (node == 0)
+ if (node == 0) {
/* must set opt.def for validate */
- opt.def = node = first_online_node;
+ node = first_online_node;
+ opt.def = node;
+ }
if (num_Node > bd) {
node = Node[bd];
if (ret_val)
return ret_val;
+ /* Set MDI/MDIX mode */
+ ret_val = e1e_rphy(hw, I82577_PHY_CTRL_2, &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK;
+ /*
+ * Options:
+ * 0 - Auto (default)
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ */
+ switch (hw->phy.mdix) {
+ case 1:
+ break;
+ case 2:
+ phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX;
+ break;
+ case 0:
+ default:
+ phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX;
+ break;
+ }
+ ret_val = e1e_wphy(hw, I82577_PHY_CTRL_2, phy_data);
+ if (ret_val)
+ return ret_val;
+
return e1000_set_master_slave_mode(hw);
}
#define I82577_PHY_STATUS2_SPEED_100MBPS 0x0100
/* I82577 PHY Control 2 */
-#define I82577_PHY_CTRL2_AUTO_MDIX 0x0400
-#define I82577_PHY_CTRL2_FORCE_MDI_MDIX 0x0200
+#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200
+#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
+#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600
/* I82577 PHY Diagnostics Status */
#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */
#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
-#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002
+#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002 /* enable K1 */
+#define E1000_KMRNCTRLSTA_UNBLOCK_RX 0x0004 /* unblock Kumeran Rx in K0/K1 */
+#define E1000_KMRNCTRLSTA_PLL_STOP_EN 0x0008 /* enable PLL stop in K1 mode */
+
#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */
+#define E1000_KMRNCTRLSTA_K0_CTRL 0x1E /* Kumeran K0s Control */
+#define E1000_KMRNCTRLSTA_K0_GBE_EN 0x1000 /* ena K0s mode for 1G link */
+#define E1000_KMRNCTRLSTA_K0_100_EN 0x2000 /* ena K0s mode for 10/100 lnk */
#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */
#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
#define E1000_FEXT 0x0002C /* Future Extended - RW */
-#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
+#define E1000_FEXTNVM2 0x00030 /* Future Extended NVM 2 - RW */
+#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
+#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
#define E1000_IVAR 0x000E4 /* Interrupt Vector Allocation Register - RW */
#define E1000_SVCR 0x000F0
#define E1000_SVT 0x000F4
+#define E1000_LPIC 0x000FC /* Low Power IDLE control */
#define E1000_RCTL 0x00100 /* Rx Control - RW */
#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */