From: Joe Jin Date: Tue, 28 Aug 2012 05:49:04 +0000 (+0800) Subject: igb: upgrade to 3.4.8. X-Git-Tag: v2.6.39-400.9.0~338^2~193 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=851be1f3725cbd25207beabf722ab69b2701dd3f;p=users%2Fjedix%2Flinux-maple.git igb: upgrade to 3.4.8. Signed-off-by: Joe Jin --- diff --git a/drivers/net/igb/Makefile b/drivers/net/igb/Makefile index 6565c463185c..f6508bd50dc4 100644 --- a/drivers/net/igb/Makefile +++ b/drivers/net/igb/Makefile @@ -1,7 +1,7 @@ ################################################################################ # -# Intel 82575 PCI-Express Ethernet Linux driver -# Copyright(c) 1999 - 2012 Intel Corporation. +# Intel(R) Gigabit Ethernet Linux driver +# Copyright(c) 2007-2012 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, @@ -20,7 +20,6 @@ # the file called "COPYING". # # Contact Information: -# Linux NICS # e1000-devel Mailing List # Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 # @@ -32,6 +31,6 @@ obj-$(CONFIG_IGB) += igb.o -igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \ - e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o - +igb-objs := igb_main.o e1000_82575.o e1000_mac.o e1000_nvm.o e1000_phy.o \ + e1000_manage.o igb_param.o igb_ethtool.o kcompat.o e1000_api.o \ + e1000_mbx.o igb_vmdq.o igb_sysfs.o igb_procfs.o diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c index 08bdc33715ee..6df23ea6246c 100644 --- a/drivers/net/igb/e1000_82575.c +++ b/drivers/net/igb/e1000_82575.c @@ -25,78 +25,116 @@ *******************************************************************************/ -/* e1000_82575 - * e1000_82576 +/* + * 82575EB Gigabit Network Connection + * 82575EB Gigabit Backplane Connection + * 82575GB Gigabit Network Connection + * 82576 Gigabit Network Connection + * 82576 Quad Port Gigabit Mezzanine Adapter */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include - -#include "e1000_mac.h" -#include "e1000_82575.h" - -static s32 igb_get_invariants_82575(struct e1000_hw *); -static s32 igb_acquire_phy_82575(struct e1000_hw *); -static void igb_release_phy_82575(struct e1000_hw *); -static s32 igb_acquire_nvm_82575(struct e1000_hw *); -static void igb_release_nvm_82575(struct e1000_hw *); -static s32 igb_check_for_link_82575(struct e1000_hw *); -static s32 igb_get_cfg_done_82575(struct e1000_hw *); -static s32 igb_init_hw_82575(struct e1000_hw *); -static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); -static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); -static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *); -static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16); -static s32 igb_reset_hw_82575(struct e1000_hw *); -static s32 igb_reset_hw_82580(struct e1000_hw *); -static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); -static s32 igb_setup_copper_link_82575(struct e1000_hw *); -static s32 igb_setup_serdes_link_82575(struct e1000_hw *); -static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16); -static void igb_clear_hw_cntrs_82575(struct e1000_hw *); -static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16); -static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *, - u16 *); -static s32 igb_get_phy_id_82575(struct e1000_hw *); -static void igb_release_swfw_sync_82575(struct e1000_hw *, u16); -static bool igb_sgmii_active_82575(struct e1000_hw *); -static s32 igb_reset_init_script_82575(struct e1000_hw *); -static s32 igb_read_mac_addr_82575(struct e1000_hw *); -static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); -static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); -static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw); -static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); -static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); -static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); -static const u16 e1000_82580_rxpbs_table[] = - { 36, 72, 144, 1, 2, 4, 8, 16, - 35, 70, 140 }; +#include "e1000_api.h" + +static s32 e1000_init_phy_params_82575(struct e1000_hw *hw); +static s32 e1000_init_mac_params_82575(struct e1000_hw *hw); +static s32 e1000_acquire_phy_82575(struct e1000_hw *hw); +static void e1000_release_phy_82575(struct e1000_hw *hw); +static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw); +static void e1000_release_nvm_82575(struct e1000_hw *hw); +static s32 e1000_check_for_link_82575(struct e1000_hw *hw); +static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw); +static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +static s32 e1000_init_hw_82575(struct e1000_hw *hw); +static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw); +static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 *data); +static s32 e1000_reset_hw_82575(struct e1000_hw *hw); +static s32 e1000_reset_hw_82580(struct e1000_hw *hw); +static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, + u32 offset, u16 *data); +static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, + u32 offset, u16 data); +static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, + bool active); +static s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, + bool active); +static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, + bool active); +static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw); +static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw); +static s32 e1000_get_media_type_82575(struct e1000_hw *hw); +static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw); +static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data); +static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, + u32 offset, u16 data); +static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw); +static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask); +static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, + u16 *speed, u16 *duplex); +static s32 e1000_get_phy_id_82575(struct e1000_hw *hw); +static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask); +static bool e1000_sgmii_active_82575(struct e1000_hw *hw); +static s32 e1000_reset_init_script_82575(struct e1000_hw *hw); +static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw); +static void e1000_config_collision_dist_82575(struct e1000_hw *hw); +static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw); +static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw); +static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw); +static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw); +static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw); +static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw); +static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw); +static s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset); +static s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset); +static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw); +static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw); +static void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value); +static void e1000_clear_vfta_i350(struct e1000_hw *hw); + +static void e1000_i2c_start(struct e1000_hw *hw); +static void e1000_i2c_stop(struct e1000_hw *hw); +static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data); +static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data); +static s32 e1000_get_i2c_ack(struct e1000_hw *hw); +static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data); +static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data); +static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl); +static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl); +static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data); +static bool e1000_get_i2c_data(u32 *i2cctl); + +static const u16 e1000_82580_rxpbs_table[] = { + 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; #define E1000_82580_RXPBS_TABLE_SIZE \ (sizeof(e1000_82580_rxpbs_table)/sizeof(u16)) + /** - * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO + * e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO * @hw: pointer to the HW structure * * Called to determine if the I2C pins are being used for I2C or as an * external MDIO interface since the two options are mutually exclusive. **/ -static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) +static bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw) { u32 reg = 0; bool ext_mdio = false; + DEBUGFUNC("e1000_sgmii_uses_mdio_82575"); + switch (hw->mac.type) { case e1000_82575: case e1000_82576: - reg = rd32(E1000_MDIC); + reg = E1000_READ_REG(hw, E1000_MDIC); ext_mdio = !!(reg & E1000_MDIC_DEST); break; case e1000_82580: case e1000_i350: - reg = rd32(E1000_MDICNFG); + reg = E1000_READ_REG(hw, E1000_MDICNFG); ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); break; default: @@ -105,279 +143,334 @@ static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) return ext_mdio; } -static s32 igb_get_invariants_82575(struct e1000_hw *hw) +/** + * e1000_init_phy_params_82575 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_phy_params_82575(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; - struct e1000_nvm_info *nvm = &hw->nvm; - struct e1000_mac_info *mac = &hw->mac; - struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575; - u32 eecd; - s32 ret_val; - u16 size; - u32 ctrl_ext = 0; + s32 ret_val = E1000_SUCCESS; + u32 ctrl_ext; - switch (hw->device_id) { - case E1000_DEV_ID_82575EB_COPPER: - case E1000_DEV_ID_82575EB_FIBER_SERDES: - case E1000_DEV_ID_82575GB_QUAD_COPPER: - mac->type = e1000_82575; - break; - case E1000_DEV_ID_82576: - case E1000_DEV_ID_82576_NS: - case E1000_DEV_ID_82576_NS_SERDES: - case E1000_DEV_ID_82576_FIBER: - case E1000_DEV_ID_82576_SERDES: - case E1000_DEV_ID_82576_QUAD_COPPER: - case E1000_DEV_ID_82576_QUAD_COPPER_ET2: - case E1000_DEV_ID_82576_SERDES_QUAD: - mac->type = e1000_82576; - break; - case E1000_DEV_ID_82580_COPPER: - case E1000_DEV_ID_82580_FIBER: - case E1000_DEV_ID_82580_QUAD_FIBER: - case E1000_DEV_ID_82580_SERDES: - case E1000_DEV_ID_82580_SGMII: - case E1000_DEV_ID_82580_COPPER_DUAL: - case E1000_DEV_ID_DH89XXCC_SGMII: - case E1000_DEV_ID_DH89XXCC_SERDES: - case E1000_DEV_ID_DH89XXCC_BACKPLANE: - case E1000_DEV_ID_DH89XXCC_SFP: - mac->type = e1000_82580; - break; - case E1000_DEV_ID_I350_COPPER: - case E1000_DEV_ID_I350_FIBER: - case E1000_DEV_ID_I350_SERDES: - case E1000_DEV_ID_I350_SGMII: - mac->type = e1000_i350; - break; - default: - return -E1000_ERR_MAC_INIT; - break; + DEBUGFUNC("e1000_init_phy_params_82575"); + + phy->ops.read_i2c_byte = e1000_read_i2c_byte_generic; + phy->ops.write_i2c_byte = e1000_write_i2c_byte_generic; + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + goto out; } - /* Set media type */ - /* - * The 82575 uses bits 22:23 for link mode. The mode can be changed - * based on the EEPROM. We cannot rely upon device ID. There - * is no distinguishable difference between fiber and internal - * SerDes mode on the 82575. There can be an external PHY attached - * on the SGMII interface. For this, we'll set sgmii_active to true. - */ - phy->media_type = e1000_media_type_copper; - dev_spec->sgmii_active = false; + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_82575; - ctrl_ext = rd32(E1000_CTRL_EXT); - switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { - case E1000_CTRL_EXT_LINK_MODE_SGMII: - dev_spec->sgmii_active = true; - break; - case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: - case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: - hw->phy.media_type = e1000_media_type_internal_serdes; - break; - default: - break; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + + phy->ops.acquire = e1000_acquire_phy_82575; + phy->ops.check_reset_block = e1000_check_reset_block_generic; + phy->ops.commit = e1000_phy_sw_reset_generic; + phy->ops.get_cfg_done = e1000_get_cfg_done_82575; + phy->ops.release = e1000_release_phy_82575; + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + + if (e1000_sgmii_active_82575(hw)) { + phy->ops.reset = e1000_phy_hw_reset_sgmii_82575; + ctrl_ext |= E1000_CTRL_I2C_ENA; + } else { + phy->ops.reset = e1000_phy_hw_reset_generic; + ctrl_ext &= ~E1000_CTRL_I2C_ENA; } - /* Set mta register count */ - mac->mta_reg_count = 128; - /* Set rar entry count */ - mac->rar_entry_count = E1000_RAR_ENTRIES_82575; - if (mac->type == e1000_82576) - mac->rar_entry_count = E1000_RAR_ENTRIES_82576; - if (mac->type == e1000_82580) - mac->rar_entry_count = E1000_RAR_ENTRIES_82580; - if (mac->type == e1000_i350) - mac->rar_entry_count = E1000_RAR_ENTRIES_I350; - /* reset */ - if (mac->type >= e1000_82580) - mac->ops.reset_hw = igb_reset_hw_82580; - else - mac->ops.reset_hw = igb_reset_hw_82575; - /* Set if part includes ASF firmware */ - mac->asf_firmware_present = true; - /* Set if manageability features are enabled. */ - mac->arc_subsystem_valid = - (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) - ? true : false; - /* enable EEE on i350 parts */ - if (mac->type == e1000_i350) - dev_spec->eee_disable = false; - else - dev_spec->eee_disable = true; - /* physical interface link setup */ - mac->ops.setup_physical_interface = - (hw->phy.media_type == e1000_media_type_copper) - ? igb_setup_copper_link_82575 - : igb_setup_serdes_link_82575; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + e1000_reset_mdicnfg_82580(hw); + + if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) { + phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575; + phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575; + } else { + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + phy->ops.read_reg = e1000_read_phy_reg_82580; + phy->ops.write_reg = e1000_write_phy_reg_82580; + break; + default: + phy->ops.read_reg = e1000_read_phy_reg_igp; + phy->ops.write_reg = e1000_write_phy_reg_igp; + } + } - /* NVM initialization */ - eecd = rd32(E1000_EECD); + /* Set phy->phy_addr and phy->id. */ + ret_val = e1000_get_phy_id_82575(hw); - nvm->opcode_bits = 8; - nvm->delay_usec = 1; - switch (nvm->override) { - case e1000_nvm_override_spi_large: - nvm->page_size = 32; - nvm->address_bits = 16; + /* Verify phy id and set remaining function pointers */ + switch (phy->id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1340M_E_PHY_ID: + case M88E1111_I_PHY_ID: + phy->type = e1000_phy_m88; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.get_info = e1000_get_phy_info_m88; + if (phy->id == I347AT4_E_PHY_ID || + phy->id == M88E1112_E_PHY_ID || + phy->id == M88E1340M_E_PHY_ID) + phy->ops.get_cable_length = + e1000_get_cable_length_m88_gen2; + else + phy->ops.get_cable_length = e1000_get_cable_length_m88; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; break; - case e1000_nvm_override_spi_small: - nvm->page_size = 8; - nvm->address_bits = 8; + case IGP03E1000_E_PHY_ID: + case IGP04E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + phy->ops.check_polarity = e1000_check_polarity_igp; + phy->ops.get_info = e1000_get_phy_info_igp; + phy->ops.get_cable_length = e1000_get_cable_length_igp_2; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic; break; - default: - nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; - nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + case I82580_I_PHY_ID: + case I350_I_PHY_ID: + phy->type = e1000_phy_82580; + phy->ops.check_polarity = e1000_check_polarity_82577; + phy->ops.force_speed_duplex = + e1000_phy_force_speed_duplex_82577; + phy->ops.get_cable_length = e1000_get_cable_length_82577; + phy->ops.get_info = e1000_get_phy_info_82577; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; break; + default: + ret_val = -E1000_ERR_PHY; + goto out; } - nvm->type = e1000_nvm_eeprom_spi; +out: + return ret_val; +} + +/** + * e1000_init_nvm_params_82575 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +s32 e1000_init_nvm_params_82575(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u16 size; + + DEBUGFUNC("e1000_init_nvm_params_82575"); size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> E1000_EECD_SIZE_EX_SHIFT); - /* * Added to a constant, "size" becomes the left-shift value * for setting word_size. */ size += NVM_WORD_SIZE_BASE_SHIFT; - /* - * Check for invalid size + /* Just in case size is out of range, cap it to the largest + * EEPROM size supported */ - if ((hw->mac.type == e1000_82576) && (size > 15)) { - pr_notice("The NVM size is not valid, defaulting to 32K\n"); + if (size > 15) size = 15; - } + nvm->word_size = 1 << size; + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + + nvm->type = e1000_nvm_eeprom_spi; + if (nvm->word_size == (1 << 15)) nvm->page_size = 128; - /* NVM Function Pointers */ - nvm->ops.acquire = igb_acquire_nvm_82575; + /* Function Pointers */ + nvm->ops.acquire = e1000_acquire_nvm_82575; + nvm->ops.release = e1000_release_nvm_82575; if (nvm->word_size < (1 << 15)) - nvm->ops.read = igb_read_nvm_eerd; + nvm->ops.read = e1000_read_nvm_eerd; else - nvm->ops.read = igb_read_nvm_spi; + nvm->ops.read = e1000_read_nvm_spi; + + nvm->ops.write = e1000_write_nvm_spi; + nvm->ops.validate = e1000_validate_nvm_checksum_generic; + nvm->ops.update = e1000_update_nvm_checksum_generic; + nvm->ops.valid_led_default = e1000_valid_led_default_82575; - nvm->ops.release = igb_release_nvm_82575; + /* override generic family function pointers for specific descendants */ switch (hw->mac.type) { case e1000_82580: - nvm->ops.validate = igb_validate_nvm_checksum_82580; - nvm->ops.update = igb_update_nvm_checksum_82580; - break; - case e1000_i350: - nvm->ops.validate = igb_validate_nvm_checksum_i350; - nvm->ops.update = igb_update_nvm_checksum_i350; + nvm->ops.validate = e1000_validate_nvm_checksum_82580; + nvm->ops.update = e1000_update_nvm_checksum_82580; break; - default: - nvm->ops.validate = igb_validate_nvm_checksum; - nvm->ops.update = igb_update_nvm_checksum; - } - nvm->ops.write = igb_write_nvm_spi; - - /* if part supports SR-IOV then initialize mailbox parameters */ - switch (mac->type) { - case e1000_82576: case e1000_i350: - igb_init_mbx_params_pf(hw); + nvm->ops.validate = e1000_validate_nvm_checksum_i350; + nvm->ops.update = e1000_update_nvm_checksum_i350; break; default: break; } - /* setup PHY parameters */ - if (phy->media_type != e1000_media_type_copper) { - phy->type = e1000_phy_none; - return 0; - } + return E1000_SUCCESS; +} - phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; - phy->reset_delay_us = 100; +/** + * e1000_init_mac_params_82575 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_mac_params_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; - ctrl_ext = rd32(E1000_CTRL_EXT); + DEBUGFUNC("e1000_init_mac_params_82575"); - /* PHY function pointers */ - if (igb_sgmii_active_82575(hw)) { - phy->ops.reset = igb_phy_hw_reset_sgmii_82575; - ctrl_ext |= E1000_CTRL_I2C_ENA; - } else { - phy->ops.reset = igb_phy_hw_reset; - ctrl_ext &= ~E1000_CTRL_I2C_ENA; + /* Derives media type */ + e1000_get_media_type_82575(hw); + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set uta register count */ + mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES_82575; + if (mac->type == e1000_82576) + mac->rar_entry_count = E1000_RAR_ENTRIES_82576; + if (mac->type == e1000_82580) + mac->rar_entry_count = E1000_RAR_ENTRIES_82580; + if (mac->type == e1000_i350) { + mac->rar_entry_count = E1000_RAR_ENTRIES_I350; + /* Enable EEE default settings for i350 */ + dev_spec->eee_disable = false; } - wr32(E1000_CTRL_EXT, ctrl_ext); - igb_reset_mdicnfg_82580(hw); + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = true; + /* FWSM register */ + mac->has_fwsm = true; + /* ARC supported; valid only if manageability features are enabled. */ + mac->arc_subsystem_valid = + !!(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK); + + /* Function pointers */ - if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { - phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; - phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; - } else if (hw->mac.type >= e1000_82580) { - phy->ops.read_reg = igb_read_phy_reg_82580; - phy->ops.write_reg = igb_write_phy_reg_82580; + /* bus type/speed/width */ + mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic; + /* reset */ + if (mac->type >= e1000_82580) + mac->ops.reset_hw = e1000_reset_hw_82580; + else + mac->ops.reset_hw = e1000_reset_hw_82575; + /* hw initialization */ + mac->ops.init_hw = e1000_init_hw_82575; + /* link setup */ + mac->ops.setup_link = e1000_setup_link_generic; + /* physical interface link setup */ + mac->ops.setup_physical_interface = + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_setup_copper_link_82575 : e1000_setup_serdes_link_82575; + /* physical interface shutdown */ + mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575; + /* physical interface power up */ + mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575; + /* check for link */ + mac->ops.check_for_link = e1000_check_for_link_82575; + /* receive address register setting */ + mac->ops.rar_set = e1000_rar_set_generic; + /* read mac address */ + mac->ops.read_mac_addr = e1000_read_mac_addr_82575; + /* configure collision distance */ + mac->ops.config_collision_dist = e1000_config_collision_dist_82575; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; + if (hw->mac.type == e1000_i350) { + /* writing VFTA */ + mac->ops.write_vfta = e1000_write_vfta_i350; + /* clearing VFTA */ + mac->ops.clear_vfta = e1000_clear_vfta_i350; } else { - phy->ops.read_reg = igb_read_phy_reg_igp; - phy->ops.write_reg = igb_write_phy_reg_igp; + /* writing VFTA */ + mac->ops.write_vfta = e1000_write_vfta_generic; + /* clearing VFTA */ + mac->ops.clear_vfta = e1000_clear_vfta_generic; } + /* ID LED init */ + mac->ops.id_led_init = e1000_id_led_init_generic; + /* blink LED */ + mac->ops.blink_led = e1000_blink_led_generic; + /* setup LED */ + mac->ops.setup_led = e1000_setup_led_generic; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_generic; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_generic; + mac->ops.led_off = e1000_led_off_generic; + /* clear hardware counters */ + mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575; + /* link info */ + mac->ops.get_link_up_info = e1000_get_link_up_info_82575; + /* get thermal sensor data */ + mac->ops.get_thermal_sensor_data = + e1000_get_thermal_sensor_data_generic; + mac->ops.init_thermal_sensor_thresh = + e1000_init_thermal_sensor_thresh_generic; + /* acquire SW_FW sync */ + mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_82575; + mac->ops.release_swfw_sync = e1000_release_swfw_sync_82575; + + /* set lan id for port to determine which phy lock to use */ + hw->mac.ops.set_lan_id(hw); + + return E1000_SUCCESS; +} - /* set lan id */ - hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> - E1000_STATUS_FUNC_SHIFT; - - /* Set phy->phy_addr and phy->id. */ - ret_val = igb_get_phy_id_82575(hw); - if (ret_val) - return ret_val; - - /* Verify phy id and set remaining function pointers */ - switch (phy->id) { - case I347AT4_E_PHY_ID: - case M88E1112_E_PHY_ID: - case M88E1111_I_PHY_ID: - phy->type = e1000_phy_m88; - phy->ops.get_phy_info = igb_get_phy_info_m88; - - if (phy->id == I347AT4_E_PHY_ID || - phy->id == M88E1112_E_PHY_ID) - phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; - else - phy->ops.get_cable_length = igb_get_cable_length_m88; - - phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; - break; - case IGP03E1000_E_PHY_ID: - phy->type = e1000_phy_igp_3; - phy->ops.get_phy_info = igb_get_phy_info_igp; - phy->ops.get_cable_length = igb_get_cable_length_igp_2; - phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp; - phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; - phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; - break; - case I82580_I_PHY_ID: - case I350_I_PHY_ID: - phy->type = e1000_phy_82580; - phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580; - phy->ops.get_cable_length = igb_get_cable_length_82580; - phy->ops.get_phy_info = igb_get_phy_info_82580; - break; - default: - return -E1000_ERR_PHY; - } +/** + * e1000_init_function_pointers_82575 - Init func ptrs. + * @hw: pointer to the HW structure + * + * Called to initialize all function pointers and parameters. + **/ +void e1000_init_function_pointers_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_82575"); - return 0; + hw->mac.ops.init_params = e1000_init_mac_params_82575; + hw->nvm.ops.init_params = e1000_init_nvm_params_82575; + hw->phy.ops.init_params = e1000_init_phy_params_82575; + hw->mbx.ops.init_params = e1000_init_mbx_params_pf; } /** - * igb_acquire_phy_82575 - Acquire rights to access PHY + * e1000_acquire_phy_82575 - Acquire rights to access PHY * @hw: pointer to the HW structure * - * Acquire access rights to the correct PHY. This is a - * function pointer entry point called by the api module. + * Acquire access rights to the correct PHY. **/ -static s32 igb_acquire_phy_82575(struct e1000_hw *hw) +static s32 e1000_acquire_phy_82575(struct e1000_hw *hw) { u16 mask = E1000_SWFW_PHY0_SM; + DEBUGFUNC("e1000_acquire_phy_82575"); + if (hw->bus.func == E1000_FUNC_1) mask = E1000_SWFW_PHY1_SM; else if (hw->bus.func == E1000_FUNC_2) @@ -385,20 +478,21 @@ static s32 igb_acquire_phy_82575(struct e1000_hw *hw) else if (hw->bus.func == E1000_FUNC_3) mask = E1000_SWFW_PHY3_SM; - return igb_acquire_swfw_sync_82575(hw, mask); + return hw->mac.ops.acquire_swfw_sync(hw, mask); } /** - * igb_release_phy_82575 - Release rights to access PHY + * e1000_release_phy_82575 - Release rights to access PHY * @hw: pointer to the HW structure * - * A wrapper to release access rights to the correct PHY. This is a - * function pointer entry point called by the api module. + * A wrapper to release access rights to the correct PHY. **/ -static void igb_release_phy_82575(struct e1000_hw *hw) +static void e1000_release_phy_82575(struct e1000_hw *hw) { u16 mask = E1000_SWFW_PHY0_SM; + DEBUGFUNC("e1000_release_phy_82575"); + if (hw->bus.func == E1000_FUNC_1) mask = E1000_SWFW_PHY1_SM; else if (hw->bus.func == E1000_FUNC_2) @@ -406,11 +500,11 @@ static void igb_release_phy_82575(struct e1000_hw *hw) else if (hw->bus.func == E1000_FUNC_3) mask = E1000_SWFW_PHY3_SM; - igb_release_swfw_sync_82575(hw, mask); + hw->mac.ops.release_swfw_sync(hw, mask); } /** - * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii + * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data @@ -418,13 +512,15 @@ static void igb_release_phy_82575(struct e1000_hw *hw) * Reads the PHY register at offset using the serial gigabit media independent * interface and stores the retrieved information in data. **/ -static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, +static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val = -E1000_ERR_PARAM; + DEBUGFUNC("e1000_read_phy_reg_sgmii_82575"); + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { - hw_dbg("PHY Address %u is out of range\n", offset); + DEBUGOUT1("PHY Address %u is out of range\n", offset); goto out; } @@ -432,7 +528,7 @@ static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, if (ret_val) goto out; - ret_val = igb_read_phy_reg_i2c(hw, offset, data); + ret_val = e1000_read_phy_reg_i2c(hw, offset, data); hw->phy.ops.release(hw); @@ -441,7 +537,7 @@ out: } /** - * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii + * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset @@ -449,14 +545,15 @@ out: * Writes the data to PHY register at the offset using the serial gigabit * media independent interface. **/ -static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, +static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val = -E1000_ERR_PARAM; + DEBUGFUNC("e1000_write_phy_reg_sgmii_82575"); if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { - hw_dbg("PHY Address %d is out of range\n", offset); + DEBUGOUT1("PHY Address %d is out of range\n", offset); goto out; } @@ -464,7 +561,7 @@ static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, if (ret_val) goto out; - ret_val = igb_write_phy_reg_i2c(hw, offset, data); + ret_val = e1000_write_phy_reg_i2c(hw, offset, data); hw->phy.ops.release(hw); @@ -473,20 +570,22 @@ out: } /** - * igb_get_phy_id_82575 - Retrieve PHY addr and id + * e1000_get_phy_id_82575 - Retrieve PHY addr and id * @hw: pointer to the HW structure * * Retrieves the PHY address and ID for both PHY's which do and do not use * sgmi interface. **/ -static s32 igb_get_phy_id_82575(struct e1000_hw *hw) +static s32 e1000_get_phy_id_82575(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; - s32 ret_val = 0; + s32 ret_val = E1000_SUCCESS; u16 phy_id; u32 ctrl_ext; u32 mdic; + DEBUGFUNC("e1000_get_phy_id_82575"); + /* * For SGMII PHYs, we try the list of possible addresses until * we find one that works. For non-SGMII PHYs @@ -494,23 +593,23 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw) * work. The result of this function should mean phy->phy_addr * and phy->id are set correctly. */ - if (!(igb_sgmii_active_82575(hw))) { + if (!e1000_sgmii_active_82575(hw)) { phy->addr = 1; - ret_val = igb_get_phy_id(hw); + ret_val = e1000_get_phy_id(hw); goto out; } - if (igb_sgmii_uses_mdio_82575(hw)) { + if (e1000_sgmii_uses_mdio_82575(hw)) { switch (hw->mac.type) { case e1000_82575: case e1000_82576: - mdic = rd32(E1000_MDIC); + mdic = E1000_READ_REG(hw, E1000_MDIC); mdic &= E1000_MDIC_PHY_MASK; phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; break; case e1000_82580: case e1000_i350: - mdic = rd32(E1000_MDICNFG); + mdic = E1000_READ_REG(hw, E1000_MDICNFG); mdic &= E1000_MDICNFG_PHY_MASK; phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; break; @@ -519,25 +618,26 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw) goto out; break; } - ret_val = igb_get_phy_id(hw); + ret_val = e1000_get_phy_id(hw); goto out; } /* Power on sgmii phy if it is disabled */ - ctrl_ext = rd32(E1000_CTRL_EXT); - wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); - wrfl(); - msleep(300); + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); + E1000_WRITE_FLUSH(hw); + msec_delay(300); /* * The address field in the I2CCMD register is 3 bits and 0 is invalid. * Therefore, we need to test 1-7 */ for (phy->addr = 1; phy->addr < 8; phy->addr++) { - ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); - if (ret_val == 0) { - hw_dbg("Vendor ID 0x%08X read at address %u\n", - phy_id, phy->addr); + ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); + if (ret_val == E1000_SUCCESS) { + DEBUGOUT2("Vendor ID 0x%08X read at address %u\n", + phy_id, phy->addr); /* * At the time of this writing, The M88 part is * the only supported SGMII PHY product. @@ -545,7 +645,8 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw) if (phy_id == M88_VENDOR) break; } else { - hw_dbg("PHY address %u was unreadable\n", phy->addr); + DEBUGOUT1("PHY address %u was unreadable\n", + phy->addr); } } @@ -553,34 +654,38 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw) if (phy->addr == 8) { phy->addr = 0; ret_val = -E1000_ERR_PHY; - goto out; } else { - ret_val = igb_get_phy_id(hw); + ret_val = e1000_get_phy_id(hw); } /* restore previous sfp cage power state */ - wr32(E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); out: return ret_val; } /** - * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset + * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset * @hw: pointer to the HW structure * * Resets the PHY using the serial gigabit media independent interface. **/ -static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) +static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) { - s32 ret_val; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575"); /* * This isn't a true "hard" reset, but is the only reset * available to us at this time. */ - hw_dbg("Soft resetting SGMII attached PHY...\n"); + DEBUGOUT("Soft resetting SGMII attached PHY...\n"); + + if (!(hw->phy.ops.write_reg)) + goto out; /* * SFP documentation requires the following to configure the SPF module @@ -590,14 +695,14 @@ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) if (ret_val) goto out; - ret_val = igb_phy_sw_reset(hw); + ret_val = hw->phy.ops.commit(hw); out: return ret_val; } /** - * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state + * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state * @hw: pointer to the HW structure * @active: true to enable LPLU, false to disable * @@ -609,12 +714,17 @@ out: * This is a function pointer entry point only called by * PHY setup routines. **/ -static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) +static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; + s32 ret_val = E1000_SUCCESS; u16 data; + DEBUGFUNC("e1000_set_d0_lplu_state_82575"); + + if (!(hw->phy.ops.read_reg)) + goto out; + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); if (ret_val) goto out; @@ -622,22 +732,22 @@ static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) if (active) { data |= IGP02E1000_PM_D0_LPLU; ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, - data); + data); if (ret_val) goto out; /* When LPLU is enabled, we should disable SmartSpeed */ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, - &data); + &data); data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, - data); + data); if (ret_val) goto out; } else { data &= ~IGP02E1000_PM_D0_LPLU; ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, - data); + data); /* * LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most @@ -646,24 +756,28 @@ static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) */ if (phy->smart_speed == e1000_smart_speed_on) { ret_val = phy->ops.read_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, &data); + IGP01E1000_PHY_PORT_CONFIG, + &data); if (ret_val) goto out; data |= IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, data); + IGP01E1000_PHY_PORT_CONFIG, + data); if (ret_val) goto out; } else if (phy->smart_speed == e1000_smart_speed_off) { ret_val = phy->ops.read_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, &data); + IGP01E1000_PHY_PORT_CONFIG, + &data); if (ret_val) goto out; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, data); + IGP01E1000_PHY_PORT_CONFIG, + data); if (ret_val) goto out; } @@ -674,67 +788,193 @@ out: } /** - * igb_acquire_nvm_82575 - Request for access to EEPROM + * e1000_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable * - * Acquire the necessary semaphores for exclusive access to the EEPROM. - * Set the EEPROM access request bit and wait for EEPROM access grant bit. - * Return successful if access grant bit set, else clear the request for - * EEPROM access and return -E1000_ERR_NVM (-1). + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. **/ -static s32 igb_acquire_nvm_82575(struct e1000_hw *hw) +static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) { - s32 ret_val; + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 data; - ret_val = igb_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); - if (ret_val) - goto out; + DEBUGFUNC("e1000_set_d0_lplu_state_82580"); - ret_val = igb_acquire_nvm(hw); + data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); - if (ret_val) - igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + if (active) { + data |= E1000_82580_PM_D0_LPLU; -out: + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } else { + data &= ~E1000_82580_PM_D0_LPLU; + + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; + } + + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); return ret_val; } /** - * igb_release_nvm_82575 - Release exclusive access to EEPROM + * e1000_set_d3_lplu_state_82580 - Sets low power link up state for D3 * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu * - * Stop any current commands to the EEPROM and clear the EEPROM request bit, - * then release the semaphores acquired. + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. **/ -static void igb_release_nvm_82575(struct e1000_hw *hw) +s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) { - igb_release_nvm(hw); - igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 data; + + DEBUGFUNC("e1000_set_d3_lplu_state_82580"); + + data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + + if (!active) { + data &= ~E1000_82580_PM_D3_LPLU; + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= E1000_82580_PM_D3_LPLU; + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } + + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); + return ret_val; } /** - * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore + * e1000_acquire_nvm_82575 - Request for access to EEPROM * @hw: pointer to the HW structure - * @mask: specifies which semaphore to acquire * - * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_acquire_nvm_82575"); + + ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + if (ret_val) + goto out; + + /* + * Check if there is some access + * error this access may hook on + */ + if (hw->mac.type == e1000_i350) { + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT | + E1000_EECD_TIMEOUT)) { + /* Clear all access error flags */ + E1000_WRITE_REG(hw, E1000_EECD, eecd | + E1000_EECD_ERROR_CLR); + DEBUGOUT("Nvm bit banging access error detected and cleared.\n"); + } + } + if (hw->mac.type == e1000_82580) { + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + if (eecd & E1000_EECD_BLOCKED) { + /* Clear access error flag */ + E1000_WRITE_REG(hw, E1000_EECD, eecd | + E1000_EECD_BLOCKED); + DEBUGOUT("Nvm bit banging access error detected and cleared.\n"); + } + } + + + ret_val = e1000_acquire_nvm_generic(hw); + if (ret_val) + e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + +out: + return ret_val; +} + +/** + * e1000_release_nvm_82575 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +static void e1000_release_nvm_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_release_nvm_82575"); + + e1000_release_nvm_generic(hw); + + e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); +} + +/** + * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask * will also specify which port we're acquiring the lock for. **/ -static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) { u32 swfw_sync; u32 swmask = mask; u32 fwmask = mask << 16; - s32 ret_val = 0; + s32 ret_val = E1000_SUCCESS; s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ + DEBUGFUNC("e1000_acquire_swfw_sync_82575"); + while (i < timeout) { - if (igb_get_hw_semaphore(hw)) { + if (e1000_get_hw_semaphore_generic(hw)) { ret_val = -E1000_ERR_SWFW_SYNC; goto out; } - swfw_sync = rd32(E1000_SW_FW_SYNC); + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); if (!(swfw_sync & (fwmask | swmask))) break; @@ -742,146 +982,180 @@ static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) * Firmware currently using resource (fwmask) * or other software thread using resource (swmask) */ - igb_put_hw_semaphore(hw); - mdelay(5); + e1000_put_hw_semaphore_generic(hw); + msec_delay_irq(5); i++; } if (i == timeout) { - hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); ret_val = -E1000_ERR_SWFW_SYNC; goto out; } swfw_sync |= swmask; - wr32(E1000_SW_FW_SYNC, swfw_sync); + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); - igb_put_hw_semaphore(hw); + e1000_put_hw_semaphore_generic(hw); out: return ret_val; } /** - * igb_release_swfw_sync_82575 - Release SW/FW semaphore + * e1000_release_swfw_sync_82575 - Release SW/FW semaphore * @hw: pointer to the HW structure * @mask: specifies which semaphore to acquire * * Release the SW/FW semaphore used to access the PHY or NVM. The mask * will also specify which port we're releasing the lock for. **/ -static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) { u32 swfw_sync; - while (igb_get_hw_semaphore(hw) != 0); - /* Empty */ + DEBUGFUNC("e1000_release_swfw_sync_82575"); + + while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS) + ; /* Empty */ - swfw_sync = rd32(E1000_SW_FW_SYNC); + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); swfw_sync &= ~mask; - wr32(E1000_SW_FW_SYNC, swfw_sync); + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); - igb_put_hw_semaphore(hw); + e1000_put_hw_semaphore_generic(hw); } /** - * igb_get_cfg_done_82575 - Read config done bit + * e1000_get_cfg_done_82575 - Read config done bit * @hw: pointer to the HW structure * * Read the management control register for the config done bit for * completion status. NOTE: silicon which is EEPROM-less will fail trying * to read the config done bit, so an error is *ONLY* logged and returns - * 0. If we were to return with error, EEPROM-less silicon + * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon * would not be able to be reset or change link. **/ -static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) +static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw) { s32 timeout = PHY_CFG_TIMEOUT; - s32 ret_val = 0; + s32 ret_val = E1000_SUCCESS; u32 mask = E1000_NVM_CFG_DONE_PORT_0; - if (hw->bus.func == 1) + DEBUGFUNC("e1000_get_cfg_done_82575"); + + if (hw->bus.func == E1000_FUNC_1) mask = E1000_NVM_CFG_DONE_PORT_1; else if (hw->bus.func == E1000_FUNC_2) mask = E1000_NVM_CFG_DONE_PORT_2; else if (hw->bus.func == E1000_FUNC_3) mask = E1000_NVM_CFG_DONE_PORT_3; - while (timeout) { - if (rd32(E1000_EEMNGCTL) & mask) + if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask) break; - msleep(1); + msec_delay(1); timeout--; } if (!timeout) - hw_dbg("MNG configuration cycle has not completed.\n"); + DEBUGOUT("MNG configuration cycle has not completed.\n"); /* If EEPROM is not marked present, init the PHY manually */ - if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) && + if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) && (hw->phy.type == e1000_phy_igp_3)) - igb_phy_init_script_igp3(hw); + e1000_phy_init_script_igp3(hw); return ret_val; } /** - * igb_check_for_link_82575 - Check for link + * e1000_get_link_up_info_82575 - Get link speed/duplex info + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * This is a wrapper function, if using the serial gigabit media independent + * interface, use PCS to retrieve the link speed and duplex information. + * Otherwise, use the generic function to get the link speed and duplex info. + **/ +static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + DEBUGFUNC("e1000_get_link_up_info_82575"); + + if (hw->phy.media_type != e1000_media_type_copper) + ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed, + duplex); + else + ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, + duplex); + + return ret_val; +} + +/** + * e1000_check_for_link_82575 - Check for link * @hw: pointer to the HW structure * * If sgmii is enabled, then use the pcs register to determine link, otherwise * use the generic interface for determining link. **/ -static s32 igb_check_for_link_82575(struct e1000_hw *hw) +static s32 e1000_check_for_link_82575(struct e1000_hw *hw) { s32 ret_val; u16 speed, duplex; + DEBUGFUNC("e1000_check_for_link_82575"); + if (hw->phy.media_type != e1000_media_type_copper) { - ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, - &duplex); + ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed, + &duplex); /* * Use this flag to determine if link needs to be checked or - * not. If we have link clear the flag so that we do not + * not. If we have link clear the flag so that we do not * continue to check for link. */ hw->mac.get_link_status = !hw->mac.serdes_has_link; + } else { - ret_val = igb_check_for_copper_link(hw); + ret_val = e1000_check_for_copper_link_generic(hw); } return ret_val; } /** - * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown + * e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown * @hw: pointer to the HW structure **/ -void igb_power_up_serdes_link_82575(struct e1000_hw *hw) +static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw) { u32 reg; + DEBUGFUNC("e1000_power_up_serdes_link_82575"); if ((hw->phy.media_type != e1000_media_type_internal_serdes) && - !igb_sgmii_active_82575(hw)) + !e1000_sgmii_active_82575(hw)) return; /* Enable PCS to turn on link */ - reg = rd32(E1000_PCS_CFG0); + reg = E1000_READ_REG(hw, E1000_PCS_CFG0); reg |= E1000_PCS_CFG_PCS_EN; - wr32(E1000_PCS_CFG0, reg); + E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); /* Power up the laser */ - reg = rd32(E1000_CTRL_EXT); + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); reg &= ~E1000_CTRL_EXT_SDP3_DATA; - wr32(E1000_CTRL_EXT, reg); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); /* flush the write to verify completion */ - wrfl(); - msleep(1); + E1000_WRITE_FLUSH(hw); + msec_delay(1); } /** - * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex + * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex * @hw: pointer to the HW structure * @speed: stores the current speed * @duplex: stores the current duplex @@ -889,12 +1163,14 @@ void igb_power_up_serdes_link_82575(struct e1000_hw *hw) * Using the physical coding sub-layer (PCS), retrieve the current speed and * duplex, then store the values in the pointers provided. **/ -static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, - u16 *duplex) +static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, + u16 *speed, u16 *duplex) { struct e1000_mac_info *mac = &hw->mac; u32 pcs; + DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575"); + /* Set up defaults for the return values of this function */ mac->serdes_has_link = false; *speed = 0; @@ -905,173 +1181,175 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, * the status register is not accurate. The PCS status register is * used instead. */ - pcs = rd32(E1000_PCS_LSTAT); + pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT); /* - * The link up bit determines when link is up on autoneg. The sync ok - * gets set once both sides sync up and agree upon link. Stable link - * can be determined by checking for both link up and link sync ok + * The link up bit determines when link is up on autoneg. */ - if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { + if (pcs & E1000_PCS_LSTS_LINK_OK) { mac->serdes_has_link = true; /* Detect and store PCS speed */ - if (pcs & E1000_PCS_LSTS_SPEED_1000) { + if (pcs & E1000_PCS_LSTS_SPEED_1000) *speed = SPEED_1000; - } else if (pcs & E1000_PCS_LSTS_SPEED_100) { + else if (pcs & E1000_PCS_LSTS_SPEED_100) *speed = SPEED_100; - } else { + else *speed = SPEED_10; - } /* Detect and store PCS duplex */ - if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) { + if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) *duplex = FULL_DUPLEX; - } else { + else *duplex = HALF_DUPLEX; - } } - return 0; + return E1000_SUCCESS; } /** - * igb_shutdown_serdes_link_82575 - Remove link during power down + * e1000_shutdown_serdes_link_82575 - Remove link during power down * @hw: pointer to the HW structure * - * In the case of fiber serdes, shut down optics and PCS on driver unload + * In the case of serdes shut down sfp and PCS on driver unload * when management pass thru is not enabled. **/ -void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) +void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw) { u32 reg; - if (hw->phy.media_type != e1000_media_type_internal_serdes && - igb_sgmii_active_82575(hw)) + DEBUGFUNC("e1000_shutdown_serdes_link_82575"); + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !e1000_sgmii_active_82575(hw)) return; - if (!igb_enable_mng_pass_thru(hw)) { + if (!e1000_enable_mng_pass_thru(hw)) { /* Disable PCS to turn off link */ - reg = rd32(E1000_PCS_CFG0); + reg = E1000_READ_REG(hw, E1000_PCS_CFG0); reg &= ~E1000_PCS_CFG_PCS_EN; - wr32(E1000_PCS_CFG0, reg); + E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); /* shutdown the laser */ - reg = rd32(E1000_CTRL_EXT); + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); reg |= E1000_CTRL_EXT_SDP3_DATA; - wr32(E1000_CTRL_EXT, reg); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); /* flush the write to verify completion */ - wrfl(); - msleep(1); + E1000_WRITE_FLUSH(hw); + msec_delay(1); } + + return; } /** - * igb_reset_hw_82575 - Reset hardware + * e1000_reset_hw_82575 - Reset hardware * @hw: pointer to the HW structure * - * This resets the hardware into a known state. This is a - * function pointer entry point called by the api module. + * This resets the hardware into a known state. **/ -static s32 igb_reset_hw_82575(struct e1000_hw *hw) +static s32 e1000_reset_hw_82575(struct e1000_hw *hw) { - u32 ctrl, icr; + u32 ctrl; s32 ret_val; + DEBUGFUNC("e1000_reset_hw_82575"); + /* * Prevent the PCI-E bus from sticking if there is no TLP connection * on the last TLP read/write transaction when MAC is reset. */ - ret_val = igb_disable_pcie_master(hw); + ret_val = e1000_disable_pcie_master_generic(hw); if (ret_val) - hw_dbg("PCI-E Master disable polling has failed.\n"); + DEBUGOUT("PCI-E Master disable polling has failed.\n"); /* set the completion timeout for interface */ - ret_val = igb_set_pcie_completion_timeout(hw); - if (ret_val) { - hw_dbg("PCI-E Set completion timeout has failed.\n"); - } + ret_val = e1000_set_pcie_completion_timeout(hw); + if (ret_val) + DEBUGOUT("PCI-E Set completion timeout has failed.\n"); - hw_dbg("Masking off all interrupts\n"); - wr32(E1000_IMC, 0xffffffff); + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); - wr32(E1000_RCTL, 0); - wr32(E1000_TCTL, E1000_TCTL_PSP); - wrfl(); + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); - msleep(10); + msec_delay(10); - ctrl = rd32(E1000_CTRL); + ctrl = E1000_READ_REG(hw, E1000_CTRL); - hw_dbg("Issuing a global reset to MAC\n"); - wr32(E1000_CTRL, ctrl | E1000_CTRL_RST); + DEBUGOUT("Issuing a global reset to MAC\n"); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); - ret_val = igb_get_auto_rd_done(hw); + ret_val = e1000_get_auto_rd_done_generic(hw); if (ret_val) { /* * When auto config read does not complete, do not * return with an error. This can happen in situations * where there is no eeprom and prevents getting link. */ - hw_dbg("Auto Read Done did not complete\n"); + DEBUGOUT("Auto Read Done did not complete\n"); } /* If EEPROM is not present, run manual init scripts */ - if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) - igb_reset_init_script_82575(hw); + if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) + e1000_reset_init_script_82575(hw); /* Clear any pending interrupt events. */ - wr32(E1000_IMC, 0xffffffff); - icr = rd32(E1000_ICR); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_READ_REG(hw, E1000_ICR); /* Install any alternate MAC address into RAR0 */ - ret_val = igb_check_alt_mac_addr(hw); + ret_val = e1000_check_alt_mac_addr_generic(hw); return ret_val; } /** - * igb_init_hw_82575 - Initialize hardware + * e1000_init_hw_82575 - Initialize hardware * @hw: pointer to the HW structure * * This inits the hardware readying it for operation. **/ -static s32 igb_init_hw_82575(struct e1000_hw *hw) +static s32 e1000_init_hw_82575(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val; u16 i, rar_count = mac->rar_entry_count; + DEBUGFUNC("e1000_init_hw_82575"); + /* Initialize identification LED */ - ret_val = igb_id_led_init(hw); + ret_val = mac->ops.id_led_init(hw); if (ret_val) { - hw_dbg("Error initializing identification LED\n"); + DEBUGOUT("Error initializing identification LED\n"); /* This is not fatal and we should not stop init due to this */ } /* Disabling VLAN filtering */ - hw_dbg("Initializing the IEEE VLAN\n"); - if (hw->mac.type == e1000_i350) - igb_clear_vfta_i350(hw); - else - igb_clear_vfta(hw); + DEBUGOUT("Initializing the IEEE VLAN\n"); + mac->ops.clear_vfta(hw); /* Setup the receive address */ - igb_init_rx_addrs(hw, rar_count); + e1000_init_rx_addrs_generic(hw, rar_count); /* Zero out the Multicast HASH table */ - hw_dbg("Zeroing the MTA\n"); + DEBUGOUT("Zeroing the MTA\n"); for (i = 0; i < mac->mta_reg_count; i++) - array_wr32(E1000_MTA, i, 0); + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); /* Zero out the Unicast HASH table */ - hw_dbg("Zeroing the UTA\n"); + DEBUGOUT("Zeroing the UTA\n"); for (i = 0; i < mac->uta_reg_count; i++) - array_wr32(E1000_UTA, i, 0); + E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0); /* Setup link and flow control */ - ret_val = igb_setup_link(hw); + ret_val = mac->ops.setup_link(hw); + + /* Set the default MTU size */ + hw->dev_spec._82575.mtu = 1500; /* * Clear all of the statistics registers (clear on read). It is @@ -1079,56 +1357,59 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw) * because the symbol error count will increment wildly if there * is no link. */ - igb_clear_hw_cntrs_82575(hw); + e1000_clear_hw_cntrs_82575(hw); return ret_val; } /** - * igb_setup_copper_link_82575 - Configure copper link settings + * e1000_setup_copper_link_82575 - Configure copper link settings * @hw: pointer to the HW structure * * Configures the link for auto-neg or forced speed and duplex. Then we check * for link, once link is established calls to configure collision distance * and flow control are called. **/ -static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) +static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; - ctrl = rd32(E1000_CTRL); + DEBUGFUNC("e1000_setup_copper_link_82575"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); ctrl |= E1000_CTRL_SLU; ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); - wr32(E1000_CTRL, ctrl); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); - ret_val = igb_setup_serdes_link_82575(hw); + ret_val = e1000_setup_serdes_link_82575(hw); if (ret_val) goto out; - if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) { + if (e1000_sgmii_active_82575(hw) && !hw->phy.reset_disable) { /* allow time for SFP cage time to power up phy */ - msleep(300); + msec_delay(300); ret_val = hw->phy.ops.reset(hw); if (ret_val) { - hw_dbg("Error resetting the PHY.\n"); + DEBUGOUT("Error resetting the PHY.\n"); goto out; } } switch (hw->phy.type) { case e1000_phy_m88: if (hw->phy.id == I347AT4_E_PHY_ID || - hw->phy.id == M88E1112_E_PHY_ID) - ret_val = igb_copper_link_setup_m88_gen2(hw); + hw->phy.id == M88E1112_E_PHY_ID || + hw->phy.id == M88E1340M_E_PHY_ID) + ret_val = e1000_copper_link_setup_m88_gen2(hw); else - ret_val = igb_copper_link_setup_m88(hw); + ret_val = e1000_copper_link_setup_m88(hw); break; case e1000_phy_igp_3: - ret_val = igb_copper_link_setup_igp(hw); + ret_val = e1000_copper_link_setup_igp(hw); break; case e1000_phy_82580: - ret_val = igb_copper_link_setup_82580(hw); + ret_val = e1000_copper_link_setup_82577(hw); break; default: ret_val = -E1000_ERR_PHY; @@ -1138,13 +1419,13 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) if (ret_val) goto out; - ret_val = igb_setup_copper_link(hw); + ret_val = e1000_setup_copper_link_generic(hw); out: return ret_val; } /** - * igb_setup_serdes_link_82575 - Setup link for serdes + * e1000_setup_serdes_link_82575 - Setup link for serdes * @hw: pointer to the HW structure * * Configure the physical coding sub-layer (PCS) link. The PCS link is @@ -1152,45 +1433,40 @@ out: * interface (sgmii), or serdes fiber is being used. Configures the link * for auto-negotiation or forces speed/duplex. **/ -static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) +static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw) { u32 ctrl_ext, ctrl_reg, reg; bool pcs_autoneg; s32 ret_val = E1000_SUCCESS; u16 data; + DEBUGFUNC("e1000_setup_serdes_link_82575"); + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && - !igb_sgmii_active_82575(hw)) + !e1000_sgmii_active_82575(hw)) return ret_val; - /* * On the 82575, SerDes loopback mode persists until it is * explicitly turned off or a power cycle is performed. A read to * the register does not indicate its status. Therefore, we ensure * loopback mode is disabled during initialization. */ - wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); /* power on the sfp cage if present */ - ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; - wr32(E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); - ctrl_reg = rd32(E1000_CTRL); + ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); ctrl_reg |= E1000_CTRL_SLU; - if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { - /* set both sw defined pins */ + /* set both sw defined pins on 82575/82576*/ + if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; - /* Set switch control to serdes energy detect */ - reg = rd32(E1000_CONNSW); - reg |= E1000_CONNSW_ENRGSRC; - wr32(E1000_CONNSW, reg); - } - - reg = rd32(E1000_PCS_LCTL); + reg = E1000_READ_REG(hw, E1000_PCS_LCTL); /* default pcs_autoneg to the same setting as mac autoneg */ pcs_autoneg = hw->mac.autoneg; @@ -1205,12 +1481,13 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: /* disable PCS autoneg and support parallel detect only */ pcs_autoneg = false; + /* fall through to default case */ default: if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); if (ret_val) { - printk(KERN_DEBUG "NVM Read Error\n\n"); + DEBUGOUT("NVM Read Error\n"); return ret_val; } @@ -1224,14 +1501,14 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) * link either autoneg or be forced to 1000/Full */ ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | - E1000_CTRL_FD | E1000_CTRL_FRCDPX; + E1000_CTRL_FD | E1000_CTRL_FRCDPX; /* set speed of 1000/Full if speed/duplex is forced */ reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; break; } - wr32(E1000_CTRL, ctrl_reg); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg); /* * New SerDes mode allows for forcing speed or autonegotiating speed @@ -1240,7 +1517,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) * However, both are supported by the hardware and some drivers/tools. */ reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | - E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); + E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); /* * We force flow control to prevent the CTRL register values from being @@ -1252,174 +1529,452 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) /* Set PCS register for autoneg */ reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ - hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); + DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); } else { /* Set PCS register for forced link */ - reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ + reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ + DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); + } + + E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg); + + if (!e1000_sgmii_active_82575(hw)) + e1000_force_mac_fc_generic(hw); + + return ret_val; +} + +/** + * e1000_get_media_type_82575 - derives current media type. + * @hw: pointer to the HW structure + * + * The media type is chosen reflecting few settings. + * The following are taken into account: + * - link mode set in the current port Init Control Word #3 + * - current link mode settings in CSR register + * - MDIO vs. I2C PHY control interface chosen + * - SFP module media type + **/ +static s32 e1000_get_media_type_82575(struct e1000_hw *hw) +{ + u32 lan_id = 0; + s32 ret_val = E1000_ERR_CONFIG; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + u32 ctrl_ext = 0; + u32 current_link_mode = 0; + u16 init_ctrl_wd_3 = 0; + u8 init_ctrl_wd_3_offset = 0; + u8 init_ctrl_wd_3_bit_offset = 0; + + /* Set internal phy as default */ + dev_spec->sgmii_active = false; + dev_spec->module_plugged = false; + + /* + * Check if NVM access method is attached already. + * If it is then Init Control Word #3 is considered + * otherwise runtime CSR register content is taken. + */ + + /* Get CSR setting */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + + /* Get link mode setting */ + if ((hw->nvm.ops.read) && (hw->nvm.ops.read != e1000_null_read_nvm)) { + /* Take link mode from EEPROM */ + + /* + * Get LAN port ID to derive its + * adequate Init Control Word #3 + */ + lan_id = ((E1000_READ_REG(hw, E1000_STATUS) & + E1000_STATUS_LAN_ID_MASK) >> E1000_STATUS_LAN_ID_OFFSET); + /* + * Derive Init Control Word #3 offset + * and mask to pick up link mode setting. + */ + if (hw->mac.type < e1000_82580) { + init_ctrl_wd_3_offset = lan_id ? + NVM_INIT_CONTROL3_PORT_A : NVM_INIT_CONTROL3_PORT_B; + init_ctrl_wd_3_bit_offset = NVM_WORD24_LNK_MODE_OFFSET; + } else { + init_ctrl_wd_3_offset = + NVM_82580_LAN_FUNC_OFFSET(lan_id) + + NVM_INIT_CONTROL3_PORT_A; + init_ctrl_wd_3_bit_offset = + NVM_WORD24_82580_LNK_MODE_OFFSET; + } + /* Read Init Control Word #3*/ + hw->nvm.ops.read(hw, init_ctrl_wd_3_offset, 1, &init_ctrl_wd_3); + current_link_mode = init_ctrl_wd_3; + /* + * Switch to CSR for all but internal PHY. + */ + if ((init_ctrl_wd_3 << (E1000_CTRL_EXT_LINK_MODE_OFFSET - + init_ctrl_wd_3_bit_offset)) != + E1000_CTRL_EXT_LINK_MODE_GMII) { + current_link_mode = ctrl_ext; + init_ctrl_wd_3_bit_offset = + E1000_CTRL_EXT_LINK_MODE_OFFSET; + } + } else { + /* Take link mode from CSR */ + current_link_mode = ctrl_ext; + init_ctrl_wd_3_bit_offset = E1000_CTRL_EXT_LINK_MODE_OFFSET; + } + + /* + * Align link mode bits to + * their CTRL_EXT location. + */ + current_link_mode <<= (E1000_CTRL_EXT_LINK_MODE_OFFSET - + init_ctrl_wd_3_bit_offset); + current_link_mode &= E1000_CTRL_EXT_LINK_MODE_MASK; - hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); + switch (current_link_mode) { + + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + hw->phy.media_type = e1000_media_type_internal_serdes; + current_link_mode = E1000_CTRL_EXT_LINK_MODE_1000BASE_KX; + break; + case E1000_CTRL_EXT_LINK_MODE_GMII: + hw->phy.media_type = e1000_media_type_copper; + current_link_mode = E1000_CTRL_EXT_LINK_MODE_GMII; + break; + case E1000_CTRL_EXT_LINK_MODE_SGMII: + case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: + /* Get phy control interface type set (MDIO vs. I2C)*/ + if (e1000_sgmii_uses_mdio_82575(hw)) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + current_link_mode = E1000_CTRL_EXT_LINK_MODE_SGMII; + } else { + ret_val = e1000_set_sfp_media_type_82575(hw); + if (ret_val != E1000_SUCCESS) + goto out; + if (hw->phy.media_type == + e1000_media_type_internal_serdes) { + current_link_mode = + E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + } else if (hw->phy.media_type == + e1000_media_type_copper) { + current_link_mode = + E1000_CTRL_EXT_LINK_MODE_SGMII; + } + } + break; + default: + DEBUGOUT("Link mode mask doesn't fit bit field size\n"); + goto out; + } + /* + * Do not change current link mode setting + * if media type is fibre or has not been + * recognized. + */ + if ((hw->phy.media_type != e1000_media_type_unknown) && + (hw->phy.media_type != e1000_media_type_fiber)) { + /* Update link mode */ + ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | + current_link_mode); } - wr32(E1000_PCS_LCTL, reg); + ret_val = E1000_SUCCESS; +out: + /* + * If media type was not identified then return media type + * defined by the CTRL_EXT settings. + */ + if (hw->phy.media_type == e1000_media_type_unknown) { + if (current_link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) + hw->phy.media_type = e1000_media_type_copper; + else + hw->phy.media_type = e1000_media_type_internal_serdes; + } - if (!igb_sgmii_active_82575(hw)) - igb_force_mac_fc(hw); + return ret_val; +} +/** + * e1000_set_sfp_media_type_82575 - derives SFP module media type. + * @hw: pointer to the HW structure + * + * The media type is chosen based on SFP module. + * compatibility flags retrieved from SFP ID EEPROM. + **/ +static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw) +{ + s32 ret_val = E1000_ERR_CONFIG; + u32 ctrl_ext = 0; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + struct sfp_e1000_flags eth_flags = {0}; + u8 tranceiver_type = 0; + + /* Turn I2C interface ON */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); + + /* Read SFP module data */ + ret_val = e1000_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), + &tranceiver_type); + if (ret_val != E1000_SUCCESS) + goto out; + ret_val = e1000_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), + (u8 *)ð_flags); + if (ret_val != E1000_SUCCESS) + goto out; + /* + * Check if there is some SFP + * module plugged and powered + */ + if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || + (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { + dev_spec->module_plugged = true; + if (eth_flags.e1000_base_lx || eth_flags.e1000_base_sx) { + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags.e1000_base_t) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_copper; + } else { + hw->phy.media_type = e1000_media_type_unknown; + DEBUGOUT("PHY module has not been recognized\n"); + goto out; + } + } else { + hw->phy.media_type = e1000_media_type_unknown; + } + ret_val = E1000_SUCCESS; +out: + /* Restore I2C interface setting */ + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); return ret_val; } /** - * igb_sgmii_active_82575 - Return sgmii state + * e1000_valid_led_default_82575 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_valid_led_default_82575"); + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch (hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_82575_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT; + break; + } + } +out: + return ret_val; +} + +/** + * e1000_sgmii_active_82575 - Return sgmii state * @hw: pointer to the HW structure * * 82575 silicon has a serialized gigabit media independent interface (sgmii) * which can be enabled for use in the embedded applications. Simply * return the current state of the sgmii interface. **/ -static bool igb_sgmii_active_82575(struct e1000_hw *hw) +static bool e1000_sgmii_active_82575(struct e1000_hw *hw) { struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; return dev_spec->sgmii_active; } /** - * igb_reset_init_script_82575 - Inits HW defaults after reset + * e1000_reset_init_script_82575 - Inits HW defaults after reset * @hw: pointer to the HW structure * * Inits recommended HW defaults after a reset when there is no EEPROM * detected. This is only for the 82575. **/ -static s32 igb_reset_init_script_82575(struct e1000_hw *hw) +static s32 e1000_reset_init_script_82575(struct e1000_hw *hw) { + DEBUGFUNC("e1000_reset_init_script_82575"); + if (hw->mac.type == e1000_82575) { - hw_dbg("Running reset init script for 82575\n"); + DEBUGOUT("Running reset init script for 82575\n"); /* SerDes configuration via SERDESCTRL */ - igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C); - igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78); - igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23); - igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15); /* CCM configuration via CCMCTL register */ - igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00); - igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00); /* PCIe lanes configuration */ - igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC); - igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF); - igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05); - igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81); /* PCIe PLL Configuration */ - igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47); - igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00); - igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00); } - return 0; + return E1000_SUCCESS; } /** - * igb_read_mac_addr_82575 - Read device MAC address + * e1000_read_mac_addr_82575 - Read device MAC address * @hw: pointer to the HW structure **/ -static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) +static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw) { - s32 ret_val = 0; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_read_mac_addr_82575"); /* * If there's an alternate MAC address place it in RAR0 * so that it will override the Si installed default perm * address. */ - ret_val = igb_check_alt_mac_addr(hw); + ret_val = e1000_check_alt_mac_addr_generic(hw); if (ret_val) goto out; - ret_val = igb_read_mac_addr(hw); + ret_val = e1000_read_mac_addr_generic(hw); out: return ret_val; } /** - * igb_power_down_phy_copper_82575 - Remove link during PHY power down + * e1000_config_collision_dist_82575 - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. + **/ +static void e1000_config_collision_dist_82575(struct e1000_hw *hw) +{ + u32 tctl_ext; + + DEBUGFUNC("e1000_config_collision_dist_82575"); + + tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT); + + tctl_ext &= ~E1000_TCTL_EXT_COLD; + tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT; + + E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_power_down_phy_copper_82575 - Remove link during PHY power down * @hw: pointer to the HW structure * * In the case of a PHY power down to save power, or to turn off link during a * driver unload, or wake on lan is not enabled, remove the link. **/ -void igb_power_down_phy_copper_82575(struct e1000_hw *hw) +static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw) { + struct e1000_phy_info *phy = &hw->phy; + + if (!(phy->ops.check_reset_block)) + return; + /* If the management interface is not enabled, then power down */ - if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw))) - igb_power_down_phy_copper(hw); + if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); + + return; } /** - * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters + * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters * @hw: pointer to the HW structure * * Clears the hardware counters by reading the counter registers. **/ -static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) -{ - igb_clear_hw_cntrs_base(hw); - - rd32(E1000_PRC64); - rd32(E1000_PRC127); - rd32(E1000_PRC255); - rd32(E1000_PRC511); - rd32(E1000_PRC1023); - rd32(E1000_PRC1522); - rd32(E1000_PTC64); - rd32(E1000_PTC127); - rd32(E1000_PTC255); - rd32(E1000_PTC511); - rd32(E1000_PTC1023); - rd32(E1000_PTC1522); - - rd32(E1000_ALGNERRC); - rd32(E1000_RXERRC); - rd32(E1000_TNCRS); - rd32(E1000_CEXTERR); - rd32(E1000_TSCTC); - rd32(E1000_TSCTFC); - - rd32(E1000_MGTPRC); - rd32(E1000_MGTPDC); - rd32(E1000_MGTPTC); - - rd32(E1000_IAC); - rd32(E1000_ICRXOC); - - rd32(E1000_ICRXPTC); - rd32(E1000_ICRXATC); - rd32(E1000_ICTXPTC); - rd32(E1000_ICTXATC); - rd32(E1000_ICTXQEC); - rd32(E1000_ICTXQMTC); - rd32(E1000_ICRXDMTC); - - rd32(E1000_CBTMPC); - rd32(E1000_HTDPMC); - rd32(E1000_CBRMPC); - rd32(E1000_RPTHC); - rd32(E1000_HGPTC); - rd32(E1000_HTCBDPC); - rd32(E1000_HGORCL); - rd32(E1000_HGORCH); - rd32(E1000_HGOTCL); - rd32(E1000_HGOTCH); - rd32(E1000_LENERRS); +static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_clear_hw_cntrs_82575"); + + e1000_clear_hw_cntrs_base_generic(hw); + + E1000_READ_REG(hw, E1000_PRC64); + E1000_READ_REG(hw, E1000_PRC127); + E1000_READ_REG(hw, E1000_PRC255); + E1000_READ_REG(hw, E1000_PRC511); + E1000_READ_REG(hw, E1000_PRC1023); + E1000_READ_REG(hw, E1000_PRC1522); + E1000_READ_REG(hw, E1000_PTC64); + E1000_READ_REG(hw, E1000_PTC127); + E1000_READ_REG(hw, E1000_PTC255); + E1000_READ_REG(hw, E1000_PTC511); + E1000_READ_REG(hw, E1000_PTC1023); + E1000_READ_REG(hw, E1000_PTC1522); + + E1000_READ_REG(hw, E1000_ALGNERRC); + E1000_READ_REG(hw, E1000_RXERRC); + E1000_READ_REG(hw, E1000_TNCRS); + E1000_READ_REG(hw, E1000_CEXTERR); + E1000_READ_REG(hw, E1000_TSCTC); + E1000_READ_REG(hw, E1000_TSCTFC); + + E1000_READ_REG(hw, E1000_MGTPRC); + E1000_READ_REG(hw, E1000_MGTPDC); + E1000_READ_REG(hw, E1000_MGTPTC); + + E1000_READ_REG(hw, E1000_IAC); + E1000_READ_REG(hw, E1000_ICRXOC); + + E1000_READ_REG(hw, E1000_ICRXPTC); + E1000_READ_REG(hw, E1000_ICRXATC); + E1000_READ_REG(hw, E1000_ICTXPTC); + E1000_READ_REG(hw, E1000_ICTXATC); + E1000_READ_REG(hw, E1000_ICTXQEC); + E1000_READ_REG(hw, E1000_ICTXQMTC); + E1000_READ_REG(hw, E1000_ICRXDMTC); + + E1000_READ_REG(hw, E1000_CBTMPC); + E1000_READ_REG(hw, E1000_HTDPMC); + E1000_READ_REG(hw, E1000_CBRMPC); + E1000_READ_REG(hw, E1000_RPTHC); + E1000_READ_REG(hw, E1000_HGPTC); + E1000_READ_REG(hw, E1000_HTCBDPC); + E1000_READ_REG(hw, E1000_HGORCL); + E1000_READ_REG(hw, E1000_HGORCH); + E1000_READ_REG(hw, E1000_HGOTCL); + E1000_READ_REG(hw, E1000_HGOTCH); + E1000_READ_REG(hw, E1000_LENERRS); /* This register should not be read in copper configurations */ - if (hw->phy.media_type == e1000_media_type_internal_serdes || - igb_sgmii_active_82575(hw)) - rd32(E1000_SCVPC); + if ((hw->phy.media_type == e1000_media_type_internal_serdes) || + e1000_sgmii_active_82575(hw)) + E1000_READ_REG(hw, E1000_SCVPC); } /** - * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable + * e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable * @hw: pointer to the HW structure * * After rx enable if managability is enabled then there is likely some @@ -1427,72 +1982,73 @@ static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) * function clears the fifos and flushes any packets that came in as rx was * being enabled. **/ -void igb_rx_fifo_flush_82575(struct e1000_hw *hw) +void e1000_rx_fifo_flush_82575(struct e1000_hw *hw) { u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; int i, ms_wait; + DEBUGFUNC("e1000_rx_fifo_workaround_82575"); if (hw->mac.type != e1000_82575 || - !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) + !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN)) return; - /* Disable all RX queues */ + /* Disable all Rx queues */ for (i = 0; i < 4; i++) { - rxdctl[i] = rd32(E1000_RXDCTL(i)); - wr32(E1000_RXDCTL(i), - rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); + rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i)); + E1000_WRITE_REG(hw, E1000_RXDCTL(i), + rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); } /* Poll all queues to verify they have shut down */ for (ms_wait = 0; ms_wait < 10; ms_wait++) { - msleep(1); + msec_delay(1); rx_enabled = 0; for (i = 0; i < 4; i++) - rx_enabled |= rd32(E1000_RXDCTL(i)); + rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i)); if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) break; } if (ms_wait == 10) - hw_dbg("Queue disable timed out after 10ms\n"); + DEBUGOUT("Queue disable timed out after 10ms\n"); /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all * incoming packets are rejected. Set enable and wait 2ms so that * any packet that was coming in as RCTL.EN was set is flushed */ - rfctl = rd32(E1000_RFCTL); - wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); + rfctl = E1000_READ_REG(hw, E1000_RFCTL); + E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); - rlpml = rd32(E1000_RLPML); - wr32(E1000_RLPML, 0); + rlpml = E1000_READ_REG(hw, E1000_RLPML); + E1000_WRITE_REG(hw, E1000_RLPML, 0); - rctl = rd32(E1000_RCTL); + rctl = E1000_READ_REG(hw, E1000_RCTL); temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); temp_rctl |= E1000_RCTL_LPE; - wr32(E1000_RCTL, temp_rctl); - wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); - wrfl(); - msleep(2); + E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl); + E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN); + E1000_WRITE_FLUSH(hw); + msec_delay(2); - /* Enable RX queues that were previously enabled and restore our + /* Enable Rx queues that were previously enabled and restore our * previous state */ for (i = 0; i < 4; i++) - wr32(E1000_RXDCTL(i), rxdctl[i]); - wr32(E1000_RCTL, rctl); - wrfl(); + E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + E1000_WRITE_FLUSH(hw); - wr32(E1000_RLPML, rlpml); - wr32(E1000_RFCTL, rfctl); + E1000_WRITE_REG(hw, E1000_RLPML, rlpml); + E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); /* Flush receive errors generated by workaround */ - rd32(E1000_ROC); - rd32(E1000_RNBC); - rd32(E1000_MPC); + E1000_READ_REG(hw, E1000_ROC); + E1000_READ_REG(hw, E1000_RNBC); + E1000_READ_REG(hw, E1000_MPC); } /** - * igb_set_pcie_completion_timeout - set pci-e completion timeout + * e1000_set_pcie_completion_timeout - set pci-e completion timeout * @hw: pointer to the HW structure * * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, @@ -1501,10 +2057,10 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw) * increase the value to either 10ms to 200ms for capability version 1 config, * or 16ms to 55ms for version 2. **/ -static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw) +static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw) { - u32 gcr = rd32(E1000_GCR); - s32 ret_val = 0; + u32 gcr = E1000_READ_REG(hw, E1000_GCR); + s32 ret_val = E1000_SUCCESS; u16 pcie_devctl2; /* only take action if timeout value is defaulted to 0 */ @@ -1525,83 +2081,98 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw) * directly in order to set the completion timeout value for * 16ms to 55ms */ - ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, - &pcie_devctl2); + ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); if (ret_val) goto out; pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; - ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, - &pcie_devctl2); + ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); out: /* disable completion timeout resend */ gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; - wr32(E1000_GCR, gcr); + E1000_WRITE_REG(hw, E1000_GCR, gcr); return ret_val; } /** - * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing + * e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing * @hw: pointer to the hardware struct * @enable: state to enter, either enabled or disabled * @pf: Physical Function pool - do not set anti-spoofing for the PF * * enables/disables L2 switch anti-spoofing functionality. **/ -void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) +void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) { u32 dtxswc; switch (hw->mac.type) { case e1000_82576: - case e1000_i350: - dtxswc = rd32(E1000_DTXSWC); + dtxswc = E1000_READ_REG(hw, E1000_DTXSWC); if (enable) { dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK | E1000_DTXSWC_VLAN_SPOOF_MASK); /* The PF can spoof - it has to in order to * support emulation mode NICs */ - dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); + dtxswc ^= (1 << pf | 1 << (pf + + E1000_DTXSWC_VLAN_SPOOF_SHIFT)); } else { dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | E1000_DTXSWC_VLAN_SPOOF_MASK); } - wr32(E1000_DTXSWC, dtxswc); + E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc); break; + case e1000_i350: + dtxswc = E1000_READ_REG(hw, E1000_TXSWC); + if (enable) { + dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + /* The PF can spoof - it has to in order to + * support emulation mode NICs + */ + dtxswc ^= (1 << pf | 1 << (pf + + E1000_DTXSWC_VLAN_SPOOF_SHIFT)); + } else { + dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + } + E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc); default: break; } } /** - * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback + * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback * @hw: pointer to the hardware struct * @enable: state to enter, either enabled or disabled * * enables/disables L2 switch loopback functionality. **/ -void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) +void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) { u32 dtxswc; switch (hw->mac.type) { case e1000_82576: - dtxswc = rd32(E1000_DTXSWC); + dtxswc = E1000_READ_REG(hw, E1000_DTXSWC); if (enable) dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; else dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; - wr32(E1000_DTXSWC, dtxswc); + E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc); break; case e1000_i350: - dtxswc = rd32(E1000_TXSWC); + dtxswc = E1000_READ_REG(hw, E1000_TXSWC); if (enable) dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; else dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; - wr32(E1000_TXSWC, dtxswc); + E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc); break; default: /* Currently no other hardware supports loopback */ @@ -1612,26 +2183,26 @@ void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) } /** - * igb_vmdq_set_replication_pf - enable or disable vmdq replication + * e1000_vmdq_set_replication_pf - enable or disable vmdq replication * @hw: pointer to the hardware struct * @enable: state to enter, either enabled or disabled * * enables/disables replication of packets across multiple pools. **/ -void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) +void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) { - u32 vt_ctl = rd32(E1000_VT_CTL); + u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL); if (enable) vt_ctl |= E1000_VT_CTL_VM_REPL_EN; else vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; - wr32(E1000_VT_CTL, vt_ctl); + E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl); } /** - * igb_read_phy_reg_82580 - Read 82580 MDI control register + * e1000_read_phy_reg_82580 - Read 82580 MDI control register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data @@ -1639,16 +2210,17 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) * Reads the MDI control register in the PHY at offset and stores the * information read to data. **/ -static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) +static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val; + DEBUGFUNC("e1000_read_phy_reg_82580"); ret_val = hw->phy.ops.acquire(hw); if (ret_val) goto out; - ret_val = igb_read_phy_reg_mdic(hw, offset, data); + ret_val = e1000_read_phy_reg_mdic(hw, offset, data); hw->phy.ops.release(hw); @@ -1657,23 +2229,24 @@ out: } /** - * igb_write_phy_reg_82580 - Write 82580 MDI control register + * e1000_write_phy_reg_82580 - Write 82580 MDI control register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write to register at offset * * Writes data to MDI control register in the PHY at offset. **/ -static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) +static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val; + DEBUGFUNC("e1000_write_phy_reg_82580"); ret_val = hw->phy.ops.acquire(hw); if (ret_val) goto out; - ret_val = igb_write_phy_reg_mdic(hw, offset, data); + ret_val = e1000_write_phy_reg_mdic(hw, offset, data); hw->phy.ops.release(hw); @@ -1682,134 +2255,137 @@ out: } /** - * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits + * e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits * @hw: pointer to the HW structure * * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on * the values found in the EEPROM. This addresses an issue in which these * bits are not restored from EEPROM after reset. **/ -static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw) +static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw) { - s32 ret_val = 0; + s32 ret_val = E1000_SUCCESS; u32 mdicnfg; u16 nvm_data = 0; + DEBUGFUNC("e1000_reset_mdicnfg_82580"); + if (hw->mac.type != e1000_82580) goto out; - if (!igb_sgmii_active_82575(hw)) + if (!e1000_sgmii_active_82575(hw)) goto out; ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, &nvm_data); if (ret_val) { - hw_dbg("NVM Read Error\n"); + DEBUGOUT("NVM Read Error\n"); goto out; } - mdicnfg = rd32(E1000_MDICNFG); + mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG); if (nvm_data & NVM_WORD24_EXT_MDIO) mdicnfg |= E1000_MDICNFG_EXT_MDIO; if (nvm_data & NVM_WORD24_COM_MDIO) mdicnfg |= E1000_MDICNFG_COM_MDIO; - wr32(E1000_MDICNFG, mdicnfg); + E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); out: return ret_val; } /** - * igb_reset_hw_82580 - Reset hardware + * e1000_reset_hw_82580 - Reset hardware * @hw: pointer to the HW structure * * This resets function or entire device (all ports, etc.) * to a known state. **/ -static s32 igb_reset_hw_82580(struct e1000_hw *hw) +static s32 e1000_reset_hw_82580(struct e1000_hw *hw) { - s32 ret_val = 0; + s32 ret_val = E1000_SUCCESS; /* BH SW mailbox bit in SW_FW_SYNC */ u16 swmbsw_mask = E1000_SW_SYNCH_MB; - u32 ctrl, icr; + u32 ctrl; bool global_device_reset = hw->dev_spec._82575.global_device_reset; + DEBUGFUNC("e1000_reset_hw_82580"); hw->dev_spec._82575.global_device_reset = false; /* Get current control state. */ - ctrl = rd32(E1000_CTRL); + ctrl = E1000_READ_REG(hw, E1000_CTRL); /* * Prevent the PCI-E bus from sticking if there is no TLP connection * on the last TLP read/write transaction when MAC is reset. */ - ret_val = igb_disable_pcie_master(hw); + ret_val = e1000_disable_pcie_master_generic(hw); if (ret_val) - hw_dbg("PCI-E Master disable polling has failed.\n"); + DEBUGOUT("PCI-E Master disable polling has failed.\n"); - hw_dbg("Masking off all interrupts\n"); - wr32(E1000_IMC, 0xffffffff); - wr32(E1000_RCTL, 0); - wr32(E1000_TCTL, E1000_TCTL_PSP); - wrfl(); + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); - msleep(10); + msec_delay(10); /* Determine whether or not a global dev reset is requested */ - if (global_device_reset && - igb_acquire_swfw_sync_82575(hw, swmbsw_mask)) + if (global_device_reset && hw->mac.ops.acquire_swfw_sync(hw, + swmbsw_mask)) global_device_reset = false; - if (global_device_reset && - !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET)) + if (global_device_reset && !(E1000_READ_REG(hw, E1000_STATUS) & + E1000_STAT_DEV_RST_SET)) ctrl |= E1000_CTRL_DEV_RST; else ctrl |= E1000_CTRL_RST; - wr32(E1000_CTRL, ctrl); - wrfl(); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); /* Add delay to insure DEV_RST has time to complete */ if (global_device_reset) - msleep(5); + msec_delay(5); - ret_val = igb_get_auto_rd_done(hw); + ret_val = e1000_get_auto_rd_done_generic(hw); if (ret_val) { /* * When auto config read does not complete, do not * return with an error. This can happen in situations * where there is no eeprom and prevents getting link. */ - hw_dbg("Auto Read Done did not complete\n"); + DEBUGOUT("Auto Read Done did not complete\n"); } /* If EEPROM is not present, run manual init scripts */ - if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) - igb_reset_init_script_82575(hw); + if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) + e1000_reset_init_script_82575(hw); /* clear global device reset status bit */ - wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); + E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET); /* Clear any pending interrupt events. */ - wr32(E1000_IMC, 0xffffffff); - icr = rd32(E1000_ICR); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_READ_REG(hw, E1000_ICR); - ret_val = igb_reset_mdicnfg_82580(hw); + ret_val = e1000_reset_mdicnfg_82580(hw); if (ret_val) - hw_dbg("Could not reset MDICNFG based on EEPROM\n"); + DEBUGOUT("Could not reset MDICNFG based on EEPROM\n"); /* Install any alternate MAC address into RAR0 */ - ret_val = igb_check_alt_mac_addr(hw); + ret_val = e1000_check_alt_mac_addr_generic(hw); /* Release semaphore */ if (global_device_reset) - igb_release_swfw_sync_82575(hw, swmbsw_mask); + hw->mac.ops.release_swfw_sync(hw, swmbsw_mask); return ret_val; } /** - * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size + * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual Rx PBA size * @data: data received by reading RXPBS register * * The 82580 uses a table based approach for packet buffer allocation sizes. @@ -1818,7 +2394,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw) * 0x0 36 72 144 1 2 4 8 16 * 0x8 35 70 140 rsv rsv rsv rsv rsv */ -u16 igb_rxpbs_adjust_82580(u32 data) +u16 e1000_rxpbs_adjust_82580(u32 data) { u16 ret_val = 0; @@ -1829,7 +2405,7 @@ u16 igb_rxpbs_adjust_82580(u32 data) } /** - * igb_validate_nvm_checksum_with_offset - Validate EEPROM + * e1000_validate_nvm_checksum_with_offset - Validate EEPROM * checksum * @hw: pointer to the HW structure * @offset: offset in words of the checksum protected region @@ -1837,24 +2413,25 @@ u16 igb_rxpbs_adjust_82580(u32 data) * Calculates the EEPROM checksum by reading/adding each word of the EEPROM * and then verifies that the sum of the EEPROM is equal to 0xBABA. **/ -static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, - u16 offset) +s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) { - s32 ret_val = 0; + s32 ret_val = E1000_SUCCESS; u16 checksum = 0; u16 i, nvm_data; + DEBUGFUNC("e1000_validate_nvm_checksum_with_offset"); + for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); if (ret_val) { - hw_dbg("NVM Read Error\n"); + DEBUGOUT("NVM Read Error\n"); goto out; } checksum += nvm_data; } if (checksum != (u16) NVM_SUM) { - hw_dbg("NVM Checksum Invalid\n"); + DEBUGOUT("NVM Checksum Invalid\n"); ret_val = -E1000_ERR_NVM; goto out; } @@ -1864,7 +2441,7 @@ out: } /** - * igb_update_nvm_checksum_with_offset - Update EEPROM + * e1000_update_nvm_checksum_with_offset - Update EEPROM * checksum * @hw: pointer to the HW structure * @offset: offset in words of the checksum protected region @@ -1873,62 +2450,66 @@ out: * up to the checksum. Then calculates the EEPROM checksum and writes the * value to the EEPROM. **/ -static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) +s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) { s32 ret_val; u16 checksum = 0; u16 i, nvm_data; + DEBUGFUNC("e1000_update_nvm_checksum_with_offset"); + for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); if (ret_val) { - hw_dbg("NVM Read Error while updating checksum.\n"); + DEBUGOUT("NVM Read Error while updating checksum.\n"); goto out; } checksum += nvm_data; } checksum = (u16) NVM_SUM - checksum; ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, - &checksum); + &checksum); if (ret_val) - hw_dbg("NVM Write Error while updating checksum.\n"); + DEBUGOUT("NVM Write Error while updating checksum.\n"); out: return ret_val; } /** - * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum + * e1000_validate_nvm_checksum_82580 - Validate EEPROM checksum * @hw: pointer to the HW structure * * Calculates the EEPROM section checksum by reading/adding each word of * the EEPROM and then verifies that the sum of the EEPROM is * equal to 0xBABA. **/ -static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw) +static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw) { - s32 ret_val = 0; + s32 ret_val = E1000_SUCCESS; u16 eeprom_regions_count = 1; u16 j, nvm_data; u16 nvm_offset; + DEBUGFUNC("e1000_validate_nvm_checksum_82580"); + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); if (ret_val) { - hw_dbg("NVM Read Error\n"); + DEBUGOUT("NVM Read Error\n"); goto out; } if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { - /* if checksums compatibility bit is set validate checksums + /* if chekcsums compatibility bit is set validate checksums * for all 4 ports. */ eeprom_regions_count = 4; } for (j = 0; j < eeprom_regions_count; j++) { nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); - ret_val = igb_validate_nvm_checksum_with_offset(hw, - nvm_offset); - if (ret_val != 0) + ret_val = e1000_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != E1000_SUCCESS) goto out; } @@ -1937,23 +2518,24 @@ out: } /** - * igb_update_nvm_checksum_82580 - Update EEPROM checksum + * e1000_update_nvm_checksum_82580 - Update EEPROM checksum * @hw: pointer to the HW structure * * Updates the EEPROM section checksums for all 4 ports by reading/adding * each word of the EEPROM up to the checksum. Then calculates the EEPROM * checksum and writes the value to the EEPROM. **/ -static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw) +static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw) { s32 ret_val; u16 j, nvm_data; u16 nvm_offset; + DEBUGFUNC("e1000_update_nvm_checksum_82580"); + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); if (ret_val) { - hw_dbg("NVM Read Error while updating checksum" - " compatibility bit.\n"); + DEBUGOUT("NVM Read Error while updating checksum compatibility bit.\n"); goto out; } @@ -1961,17 +2543,16 @@ static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw) /* set compatibility bit to validate checksums appropriately */ nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, - &nvm_data); + &nvm_data); if (ret_val) { - hw_dbg("NVM Write Error while updating checksum" - " compatibility bit.\n"); + DEBUGOUT("NVM Write Error while updating checksum compatibility bit.\n"); goto out; } } for (j = 0; j < 4; j++) { nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); - ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); + ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); if (ret_val) goto out; } @@ -1981,24 +2562,26 @@ out: } /** - * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum + * e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum * @hw: pointer to the HW structure * * Calculates the EEPROM section checksum by reading/adding each word of * the EEPROM and then verifies that the sum of the EEPROM is * equal to 0xBABA. **/ -static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw) +static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw) { - s32 ret_val = 0; + s32 ret_val = E1000_SUCCESS; u16 j; u16 nvm_offset; + DEBUGFUNC("e1000_validate_nvm_checksum_i350"); + for (j = 0; j < 4; j++) { nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); - ret_val = igb_validate_nvm_checksum_with_offset(hw, - nvm_offset); - if (ret_val != 0) + ret_val = e1000_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != E1000_SUCCESS) goto out; } @@ -2007,23 +2590,25 @@ out: } /** - * igb_update_nvm_checksum_i350 - Update EEPROM checksum + * e1000_update_nvm_checksum_i350 - Update EEPROM checksum * @hw: pointer to the HW structure * * Updates the EEPROM section checksums for all 4 ports by reading/adding * each word of the EEPROM up to the checksum. Then calculates the EEPROM * checksum and writes the value to the EEPROM. **/ -static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw) +static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw) { - s32 ret_val = 0; + s32 ret_val = E1000_SUCCESS; u16 j; u16 nvm_offset; + DEBUGFUNC("e1000_update_nvm_checksum_i350"); + for (j = 0; j < 4; j++) { nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); - ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); - if (ret_val != 0) + ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val != E1000_SUCCESS) goto out; } @@ -2032,71 +2617,777 @@ out: } /** - * igb_set_eee_i350 - Enable/disable EEE support + * e1000_set_eee_i350 - Enable/disable EEE support * @hw: pointer to the HW structure * * Enable/disable EEE based on setting in dev_spec structure. * **/ -s32 igb_set_eee_i350(struct e1000_hw *hw) +s32 e1000_set_eee_i350(struct e1000_hw *hw) { - s32 ret_val = 0; - u32 ipcnfg, eeer, ctrl_ext; + s32 ret_val = E1000_SUCCESS; + u32 ipcnfg, eeer; + + DEBUGFUNC("e1000_set_eee_i350"); - ctrl_ext = rd32(E1000_CTRL_EXT); - if ((hw->mac.type != e1000_i350) || - (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK)) + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) goto out; - ipcnfg = rd32(E1000_IPCNFG); - eeer = rd32(E1000_EEER); + ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG); + eeer = E1000_READ_REG(hw, E1000_EEER); /* enable or disable per user setting */ if (!(hw->dev_spec._82575.eee_disable)) { - ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | - E1000_IPCNFG_EEE_100M_AN); - eeer |= (E1000_EEER_TX_LPI_EN | - E1000_EEER_RX_LPI_EN | - E1000_EEER_LPI_FC); + ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); + eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); } else { - ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | - E1000_IPCNFG_EEE_100M_AN); - eeer &= ~(E1000_EEER_TX_LPI_EN | - E1000_EEER_RX_LPI_EN | - E1000_EEER_LPI_FC); - } - wr32(E1000_IPCNFG, ipcnfg); - wr32(E1000_EEER, eeer); + ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); + eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + } + E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg); + E1000_WRITE_REG(hw, E1000_EEER, eeer); + E1000_READ_REG(hw, E1000_IPCNFG); + E1000_READ_REG(hw, E1000_EEER); out: return ret_val; } -static struct e1000_mac_operations e1000_mac_ops_82575 = { - .init_hw = igb_init_hw_82575, - .check_for_link = igb_check_for_link_82575, - .rar_set = igb_rar_set, - .read_mac_addr = igb_read_mac_addr_82575, - .get_speed_and_duplex = igb_get_speed_and_duplex_copper, -}; +/* Due to a hw errata, if the host tries to configure the VFTA register + * while performing queries from the BMC or DMA, then the VFTA in some + * cases won't be written. + */ -static struct e1000_phy_operations e1000_phy_ops_82575 = { - .acquire = igb_acquire_phy_82575, - .get_cfg_done = igb_get_cfg_done_82575, - .release = igb_release_phy_82575, -}; +/** + * e1000_clear_vfta_i350 - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void e1000_clear_vfta_i350(struct e1000_hw *hw) +{ + u32 offset; + int i; -static struct e1000_nvm_operations e1000_nvm_ops_82575 = { - .acquire = igb_acquire_nvm_82575, - .read = igb_read_nvm_eerd, - .release = igb_release_nvm_82575, - .write = igb_write_nvm_spi, -}; + DEBUGFUNC("e1000_clear_vfta_350"); + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + for (i = 0; i < 10; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); -const struct e1000_info e1000_82575_info = { - .get_invariants = igb_get_invariants_82575, - .mac_ops = &e1000_mac_ops_82575, - .phy_ops = &e1000_phy_ops_82575, - .nvm_ops = &e1000_nvm_ops_82575, + E1000_WRITE_FLUSH(hw); + } +} + +/** + * e1000_write_vfta_i350 - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) +{ + int i; + + DEBUGFUNC("e1000_write_vfta_350"); + + for (i = 0; i < 10; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); + + E1000_WRITE_FLUSH(hw); +} + + +/** + * e1000_set_i2c_bb - Enable I2C bit-bang + * @hw: pointer to the HW structure + * + * Enable I2C bit-bang interface + * + **/ +s32 e1000_set_i2c_bb(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u32 ctrl_ext, i2cparams; + + DEBUGFUNC("e1000_set_i2c_bb"); + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_I2C_ENA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + + i2cparams = E1000_READ_REG(hw, E1000_I2CPARAMS); + i2cparams |= E1000_I2CBB_EN; + i2cparams |= E1000_I2C_DATA_OE_N; + i2cparams |= E1000_I2C_CLK_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cparams); + E1000_WRITE_FLUSH(hw); + + return ret_val; +} + +/** + * e1000_read_i2c_byte_generic - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: device address + * @data: value read + * + * Performs byte read operation over I2C interface at + * a specified device address. + **/ +s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + s32 status = E1000_SUCCESS; + u32 max_retry = 10; + u32 retry = 1; + u16 swfw_mask = 0; + + bool nack = 1; + + DEBUGFUNC("e1000_read_i2c_byte_generic"); + + swfw_mask = E1000_SWFW_PHY0_SM; + + do { + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) + != E1000_SUCCESS) { + status = E1000_ERR_SWFW_SYNC; + goto read_byte_out; + } + + e1000_i2c_start(hw); + + /* Device Address and write indication */ + status = e1000_clock_out_i2c_byte(hw, dev_addr); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_out_i2c_byte(hw, byte_offset); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + e1000_i2c_start(hw); + + /* Device Address and read indication */ + status = e1000_clock_out_i2c_byte(hw, (dev_addr | 0x1)); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_in_i2c_byte(hw, data); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_out_i2c_bit(hw, nack); + if (status != E1000_SUCCESS) + goto fail; + + e1000_i2c_stop(hw); + break; + +fail: + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msec_delay(100); + e1000_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte read error - Retrying.\n"); + else + DEBUGOUT("I2C byte read error.\n"); + + } while (retry < max_retry); + + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + +read_byte_out: + + return status; +} + +/** + * e1000_write_i2c_byte_generic - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: value to write + * + * Performs byte write operation over I2C interface at + * a specified device address. + **/ +s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + s32 status = E1000_SUCCESS; + u32 max_retry = 1; + u32 retry = 0; + u16 swfw_mask = 0; + + DEBUGFUNC("e1000_write_i2c_byte_generic"); + + swfw_mask = E1000_SWFW_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) { + status = E1000_ERR_SWFW_SYNC; + goto write_byte_out; + } + + do { + e1000_i2c_start(hw); + + status = e1000_clock_out_i2c_byte(hw, dev_addr); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_out_i2c_byte(hw, byte_offset); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_out_i2c_byte(hw, data); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + e1000_i2c_stop(hw); + break; + +fail: + e1000_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte write error - Retrying.\n"); + else + DEBUGOUT("I2C byte write error.\n"); + } while (retry < max_retry); + + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + +write_byte_out: + + return status; +} + +/** + * e1000_i2c_start - Sets I2C start condition + * @hw: pointer to hardware structure + * + * Sets I2C start condition (High -> Low on SDA while SCL is High) + **/ +static void e1000_i2c_start(struct e1000_hw *hw) +{ + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + DEBUGFUNC("e1000_i2c_start"); + + /* Start condition must begin with data and clock high */ + e1000_set_i2c_data(hw, &i2cctl, 1); + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for start condition (4.7us) */ + usec_delay(E1000_I2C_T_SU_STA); + + e1000_set_i2c_data(hw, &i2cctl, 0); + + /* Hold time for start condition (4us) */ + usec_delay(E1000_I2C_T_HD_STA); + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(E1000_I2C_T_LOW); + +} + +/** + * e1000_i2c_stop - Sets I2C stop condition + * @hw: pointer to hardware structure + * + * Sets I2C stop condition (Low -> High on SDA while SCL is High) + **/ +static void e1000_i2c_stop(struct e1000_hw *hw) +{ + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + DEBUGFUNC("e1000_i2c_stop"); + + /* Stop condition must begin with data low and clock high */ + e1000_set_i2c_data(hw, &i2cctl, 0); + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for stop condition (4us) */ + usec_delay(E1000_I2C_T_SU_STO); + + e1000_set_i2c_data(hw, &i2cctl, 1); + + /* bus free time between stop and start (4.7us)*/ + usec_delay(E1000_I2C_T_BUF); +} + +/** + * e1000_clock_in_i2c_byte - Clocks in one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte to clock in + * + * Clocks in one byte data via I2C data/clock + **/ +static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data) +{ + s32 i; + bool bit = 0; + + DEBUGFUNC("e1000_clock_in_i2c_byte"); + + *data = 0; + for (i = 7; i >= 0; i--) { + e1000_clock_in_i2c_bit(hw, &bit); + *data |= bit << i; + } + + return E1000_SUCCESS; +} + +/** + * e1000_clock_out_i2c_byte - Clocks out one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte clocked out + * + * Clocks out one byte data via I2C data/clock + **/ +static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data) +{ + s32 status = E1000_SUCCESS; + s32 i; + u32 i2cctl; + bool bit = 0; + + DEBUGFUNC("e1000_clock_out_i2c_byte"); + + for (i = 7; i >= 0; i--) { + bit = (data >> i) & 0x1; + status = e1000_clock_out_i2c_bit(hw, bit); + + if (status != E1000_SUCCESS) + break; + } + + /* Release SDA line (set high) */ + i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + i2cctl |= E1000_I2C_DATA_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl); + E1000_WRITE_FLUSH(hw); + + return status; +} + +/** + * e1000_get_i2c_ack - Polls for I2C ACK + * @hw: pointer to hardware structure + * + * Clocks in/out one bit via I2C data/clock + **/ +static s32 e1000_get_i2c_ack(struct e1000_hw *hw) +{ + s32 status = E1000_SUCCESS; + u32 i = 0; + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + u32 timeout = 10; + bool ack = 1; + + DEBUGFUNC("e1000_get_i2c_ack"); + + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(E1000_I2C_T_HIGH); + + /* Wait until SCL returns high */ + for (i = 0; i < timeout; i++) { + usec_delay(1); + i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + if (i2cctl & E1000_I2C_CLK_IN) + break; + } + if (!(i2cctl & E1000_I2C_CLK_IN)) + return E1000_ERR_I2C; + + ack = e1000_get_i2c_data(&i2cctl); + if (ack == 1) { + DEBUGOUT("I2C ack was not received.\n"); + status = E1000_ERR_I2C; + } + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(E1000_I2C_T_LOW); + + return status; +} + +/** + * e1000_clock_in_i2c_bit - Clocks in one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: read data value + * + * Clocks in one bit via I2C data/clock + **/ +static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data) +{ + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + DEBUGFUNC("e1000_clock_in_i2c_bit"); + + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(E1000_I2C_T_HIGH); + + i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + *data = e1000_get_i2c_data(&i2cctl); + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(E1000_I2C_T_LOW); + + return E1000_SUCCESS; +} + +/** + * e1000_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: data value to write + * + * Clocks out one bit via I2C data/clock + **/ +static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data) +{ + s32 status; + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + DEBUGFUNC("e1000_clock_out_i2c_bit"); + + status = e1000_set_i2c_data(hw, &i2cctl, data); + if (status == E1000_SUCCESS) { + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(E1000_I2C_T_HIGH); + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us. + * This also takes care of the data hold time. + */ + usec_delay(E1000_I2C_T_LOW); + } else { + status = E1000_ERR_I2C; + DEBUGOUT1("I2C data was not set to %X\n", data); + } + + return status; +} +/** + * e1000_raise_i2c_clk - Raises the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Raises the I2C clock line '0'->'1' + **/ +static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl) +{ + DEBUGFUNC("e1000_raise_i2c_clk"); + + *i2cctl |= E1000_I2C_CLK_OUT; + *i2cctl &= ~E1000_I2C_CLK_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); + E1000_WRITE_FLUSH(hw); + + /* SCL rise time (1000ns) */ + usec_delay(E1000_I2C_T_RISE); +} + +/** + * e1000_lower_i2c_clk - Lowers the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Lowers the I2C clock line '1'->'0' + **/ +static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl) +{ + + DEBUGFUNC("e1000_lower_i2c_clk"); + + *i2cctl &= ~E1000_I2C_CLK_OUT; + *i2cctl &= ~E1000_I2C_CLK_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); + E1000_WRITE_FLUSH(hw); + + /* SCL fall time (300ns) */ + usec_delay(E1000_I2C_T_FALL); +} + +/** + * e1000_set_i2c_data - Sets the I2C data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * @data: I2C data value (0 or 1) to set + * + * Sets the I2C data bit + **/ +static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data) +{ + s32 status = E1000_SUCCESS; + + DEBUGFUNC("e1000_set_i2c_data"); + + if (data) + *i2cctl |= E1000_I2C_DATA_OUT; + else + *i2cctl &= ~E1000_I2C_DATA_OUT; + + *i2cctl &= ~E1000_I2C_DATA_OE_N; + *i2cctl |= E1000_I2C_CLK_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); + E1000_WRITE_FLUSH(hw); + + /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ + usec_delay(E1000_I2C_T_RISE + E1000_I2C_T_FALL + E1000_I2C_T_SU_DATA); + + *i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + if (data != e1000_get_i2c_data(i2cctl)) { + status = E1000_ERR_I2C; + DEBUGOUT1("Error - I2C data was not set to %X.\n", data); + } + + return status; +} + +/** + * e1000_get_i2c_data - Reads the I2C SDA data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Returns the I2C data bit value + **/ +static bool e1000_get_i2c_data(u32 *i2cctl) +{ + bool data; + + DEBUGFUNC("e1000_get_i2c_data"); + + if (*i2cctl & E1000_I2C_DATA_IN) + data = 1; + else + data = 0; + + return data; +} + +/** + * e1000_i2c_bus_clear - Clears the I2C bus + * @hw: pointer to hardware structure + * + * Clears the I2C bus by sending nine clock pulses. + * Used when data line is stuck low. + **/ +void e1000_i2c_bus_clear(struct e1000_hw *hw) +{ + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + u32 i; + + DEBUGFUNC("e1000_i2c_bus_clear"); + + e1000_i2c_start(hw); + + e1000_set_i2c_data(hw, &i2cctl, 1); + + for (i = 0; i < 9; i++) { + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Min high period of clock is 4us */ + usec_delay(E1000_I2C_T_HIGH); + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Min low period of clock is 4.7us*/ + usec_delay(E1000_I2C_T_LOW); + } + + e1000_i2c_start(hw); + + /* Put the i2c bus back to default state */ + e1000_i2c_stop(hw); +} + +static const u8 e1000_emc_temp_data[4] = { + E1000_EMC_INTERNAL_DATA, + E1000_EMC_DIODE1_DATA, + E1000_EMC_DIODE2_DATA, + E1000_EMC_DIODE3_DATA }; +static const u8 e1000_emc_therm_limit[4] = { + E1000_EMC_INTERNAL_THERM_LIMIT, + E1000_EMC_DIODE1_THERM_LIMIT, + E1000_EMC_DIODE2_THERM_LIMIT, + E1000_EMC_DIODE3_THERM_LIMIT +}; + +/** + * e1000_get_thermal_sensor_data_generic - Gathers thermal sensor data + * @hw: pointer to hardware structure + * + * Updates the temperatures in mac.thermal_sensor_data + **/ +s32 e1000_get_thermal_sensor_data_generic(struct e1000_hw *hw) +{ + s32 status = E1000_SUCCESS; + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + DEBUGFUNC("e1000_get_thermal_sensor_data_generic"); + + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + + data->sensor[0].temp = (E1000_READ_REG(hw, E1000_THMJT) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ + e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return status; + + e1000_read_nvm(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; + + num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); + if (num_sensors > E1000_MAX_SENSORS) + num_sensors = E1000_MAX_SENSORS; + + for (i = 1; i < num_sensors; i++) { + e1000_read_nvm(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> + NVM_ETS_DATA_LOC_SHIFT); + + if (sensor_location != 0) + hw->phy.ops.read_i2c_byte(hw, + e1000_emc_temp_data[sensor_index], + E1000_I2C_THERMAL_SENSOR_ADDR, + &data->sensor[i].temp); + } + return status; +} +/** + * e1000_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Sets the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +s32 e1000_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) +{ + s32 status = E1000_SUCCESS; + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 low_thresh_delta; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 therm_limit; + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + DEBUGFUNC("e1000_init_thermal_sensor_thresh_generic"); + + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + + memset(data, 0, sizeof(struct e1000_thermal_sensor_data)); + + data->sensor[0].location = 0x1; + data->sensor[0].caution_thresh = + (E1000_READ_REG(hw, E1000_THHIGHTC) & 0xFF); + data->sensor[0].max_op_thresh = + (E1000_READ_REG(hw, E1000_THLOWTC) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ + e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return status; + + e1000_read_nvm(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; + + low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >> + NVM_ETS_LTHRES_DELTA_SHIFT); + num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); + + for (i = 1; i <= num_sensors; i++) { + e1000_read_nvm(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> + NVM_ETS_DATA_LOC_SHIFT); + therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK; + + hw->phy.ops.write_i2c_byte(hw, + e1000_emc_therm_limit[sensor_index], + E1000_I2C_THERMAL_SENSOR_ADDR, + therm_limit); + + if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) { + data->sensor[i].location = sensor_location; + data->sensor[i].caution_thresh = therm_limit; + data->sensor[i].max_op_thresh = therm_limit - + low_thresh_delta; + } + } + return status; +} diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h index b927d79ab536..61f9d3a3ac3f 100644 --- a/drivers/net/igb/e1000_82575.h +++ b/drivers/net/igb/e1000_82575.h @@ -28,89 +28,244 @@ #ifndef _E1000_82575_H_ #define _E1000_82575_H_ -extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw); -extern void igb_power_up_serdes_link_82575(struct e1000_hw *hw); -extern void igb_power_down_phy_copper_82575(struct e1000_hw *hw); -extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); - -#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ - (ID_LED_DEF1_DEF2 << 8) | \ - (ID_LED_DEF1_DEF2 << 4) | \ - (ID_LED_OFF1_ON2)) - -#define E1000_RAR_ENTRIES_82575 16 -#define E1000_RAR_ENTRIES_82576 24 -#define E1000_RAR_ENTRIES_82580 24 -#define E1000_RAR_ENTRIES_I350 32 +#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) +/* + * Receive Address Register Count + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * These entries are also used for MAC-based filtering. + */ +/* + * For 82576, there are an additional set of RARs that begin at an offset + * separate from the first set of RARs. + */ +#define E1000_RAR_ENTRIES_82575 16 +#define E1000_RAR_ENTRIES_82576 24 +#define E1000_RAR_ENTRIES_82580 24 +#define E1000_RAR_ENTRIES_I350 32 +#define E1000_SW_SYNCH_MB 0x00000100 +#define E1000_STAT_DEV_RST_SET 0x00100000 +#define E1000_CTRL_DEV_RST 0x20000000 + +struct e1000_adv_data_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + u32 data; + struct { + u32 datalen:16; /* Data buffer length */ + u32 rsvd:4; + u32 dtyp:4; /* Descriptor type */ + u32 dcmd:8; /* Descriptor command */ + } config; + } lower; + union { + u32 data; + struct { + u32 status:4; /* Descriptor status */ + u32 idx:4; + u32 popts:6; /* Packet Options */ + u32 paylen:18; /* Payload length */ + } options; + } upper; +}; -#define E1000_SW_SYNCH_MB 0x00000100 -#define E1000_STAT_DEV_RST_SET 0x00100000 -#define E1000_CTRL_DEV_RST 0x20000000 +#define E1000_TXD_DTYP_ADV_C 0x2 /* Advanced Context Descriptor */ +#define E1000_TXD_DTYP_ADV_D 0x3 /* Advanced Data Descriptor */ +#define E1000_ADV_TXD_CMD_DEXT 0x20 /* Descriptor extension (0 = legacy) */ +#define E1000_ADV_TUCMD_IPV4 0x2 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADV_TUCMD_IPV6 0x0 /* IP Packet Type: 0=IPv6 */ +#define E1000_ADV_TUCMD_L4T_UDP 0x0 /* L4 Packet TYPE of UDP */ +#define E1000_ADV_TUCMD_L4T_TCP 0x4 /* L4 Packet TYPE of TCP */ +#define E1000_ADV_TUCMD_MKRREQ 0x10 /* Indicates markers are required */ +#define E1000_ADV_DCMD_EOP 0x1 /* End of Packet */ +#define E1000_ADV_DCMD_IFCS 0x2 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADV_DCMD_RS 0x8 /* Report Status */ +#define E1000_ADV_DCMD_VLE 0x40 /* Add VLAN tag */ +#define E1000_ADV_DCMD_TSE 0x80 /* TCP Seg enable */ +/* Extended Device Control */ +#define E1000_CTRL_EXT_NSICR 0x00000001 /* Disable Intr Clear all on read */ + +struct e1000_adv_context_desc { + union { + u32 ip_config; + struct { + u32 iplen:9; + u32 maclen:7; + u32 vlan_tag:16; + } fields; + } ip_setup; + u32 seq_num; + union { + u64 l4_config; + struct { + u32 mkrloc:9; + u32 tucmd:11; + u32 dtyp:4; + u32 adv:8; + u32 rsvd:4; + u32 idx:4; + u32 l4len:8; + u32 mss:16; + } fields; + } l4_setup; +}; /* SRRCTL bit definitions */ -#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ -#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ -#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 -#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 -#define E1000_SRRCTL_DROP_EN 0x80000000 -#define E1000_SRRCTL_TIMESTAMP 0x40000000 - -#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 -#define E1000_MRQC_ENABLE_VMDQ 0x00000003 -#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 -#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 -#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 -#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 +#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define E1000_SRRCTL_TIMESTAMP 0x40000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 + +#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 + +#define E1000_TX_HEAD_WB_ENABLE 0x1 +#define E1000_TX_SEQNUM_WB_ENABLE 0x2 + +#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 +#define E1000_MRQC_ENABLE_VMDQ 0x00000003 +#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 +#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 +#define E1000_MRQC_ENABLE_RSS_8Q 0x00000002 + +#define E1000_VMRCTL_MIRROR_PORT_SHIFT 8 +#define E1000_VMRCTL_MIRROR_DSTPORT_MASK (7 << \ + E1000_VMRCTL_MIRROR_PORT_SHIFT) +#define E1000_VMRCTL_POOL_MIRROR_ENABLE (1 << 0) +#define E1000_VMRCTL_UPLINK_MIRROR_ENABLE (1 << 1) +#define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2) #define E1000_EICR_TX_QUEUE ( \ - E1000_EICR_TX_QUEUE0 | \ - E1000_EICR_TX_QUEUE1 | \ - E1000_EICR_TX_QUEUE2 | \ - E1000_EICR_TX_QUEUE3) + E1000_EICR_TX_QUEUE0 | \ + E1000_EICR_TX_QUEUE1 | \ + E1000_EICR_TX_QUEUE2 | \ + E1000_EICR_TX_QUEUE3) #define E1000_EICR_RX_QUEUE ( \ - E1000_EICR_RX_QUEUE0 | \ - E1000_EICR_RX_QUEUE1 | \ - E1000_EICR_RX_QUEUE2 | \ - E1000_EICR_RX_QUEUE3) + E1000_EICR_RX_QUEUE0 | \ + E1000_EICR_RX_QUEUE1 | \ + E1000_EICR_RX_QUEUE2 | \ + E1000_EICR_RX_QUEUE3) + +#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE +#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE + +#define EIMS_ENABLE_MASK ( \ + E1000_EIMS_RX_QUEUE | \ + E1000_EIMS_TX_QUEUE | \ + E1000_EIMS_TCP_TIMER | \ + E1000_EIMS_OTHER) /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ -#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ -#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ +#define E1000_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ +#define E1000_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ +#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define E1000_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ +#define E1000_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ +#define E1000_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ +#define E1000_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ +#define E1000_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ +#define E1000_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ +#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ /* Receive Descriptor - Advanced */ union e1000_adv_rx_desc { struct { - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ } read; struct { struct { - struct { - __le16 pkt_info; /* RSS type, Packet type */ - __le16 hdr_info; /* Split Header, - * header buffer length */ + union { + __le32 data; + struct { + __le16 pkt_info; /*RSS type, Pkt type*/ + /* Split Header, header buffer len */ + __le16 hdr_info; + } hs_rss; } lo_dword; union { - __le32 rss; /* RSS Hash */ + __le32 rss; /* RSS Hash */ struct { - __le16 ip_id; /* IP id */ - __le16 csum; /* Packet Checksum */ + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ } csum_ip; } hi_dword; } lower; struct { - __le32 status_error; /* ext status/error */ - __le16 length; /* Packet length */ - __le16 vlan; /* VLAN tag */ + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ } upper; } wb; /* writeback */ }; -#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 -#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 -#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ -#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ +#define E1000_RXDADV_RSSTYPE_MASK 0x0000000F +#define E1000_RXDADV_RSSTYPE_SHIFT 12 +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 +#define E1000_RXDADV_SPLITHEADER_EN 0x00001000 +#define E1000_RXDADV_SPH 0x8000 +#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ +#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ +#define E1000_RXDADV_ERR_HBO 0x00800000 + +/* RSS Hash results */ +#define E1000_RXDADV_RSSTYPE_NONE 0x00000000 +#define E1000_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define E1000_RXDADV_RSSTYPE_IPV4 0x00000002 +#define E1000_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define E1000_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define E1000_RXDADV_RSSTYPE_IPV6 0x00000005 +#define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +#define E1000_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define E1000_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + +/* RSS Packet Types as indicated in the receive descriptor */ +#define E1000_RXDADV_PKTTYPE_NONE 0x00000000 +#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */ +#define E1000_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */ +#define E1000_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */ +#define E1000_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */ +#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ +#define E1000_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ +#define E1000_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ +#define E1000_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ + +#define E1000_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ +#define E1000_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ +#define E1000_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ +#define E1000_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ +#define E1000_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ +#define E1000_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ + +/* LinkSec results */ +/* Security Processing bit Indication */ +#define E1000_RXDADV_LNKSEC_STATUS_SECP 0x00020000 +#define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 +#define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 +#define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 +#define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 + +#define E1000_RXDADV_IPSEC_STATUS_SECP 0x00020000 +#define E1000_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 +#define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 +#define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 +#define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000 /* Transmit Descriptor - Advanced */ union e1000_adv_tx_desc { @@ -127,16 +282,26 @@ union e1000_adv_tx_desc { }; /* Adv Transmit Descriptor Config Masks */ -#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ -#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ -#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ -#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ -#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ -#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ -#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ -#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ -#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_MAC_LINKSEC 0x00040000 /* Apply LinkSec on pkt */ +#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp pkt */ +#define E1000_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED prsnt in WB */ +#define E1000_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define E1000_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ +#define E1000_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ +#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ +/* 1st & Last TSO-full iSCSI PDU*/ +#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800 +#define E1000_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ /* Context descriptors */ struct e1000_adv_tx_context_desc { @@ -146,115 +311,193 @@ struct e1000_adv_tx_context_desc { __le32 mss_l4len_idx; }; -#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ -#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ -#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ -#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ /* IPSec Encrypt Enable for ESP */ -#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ -#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 +/* Req requires Markers and CRC */ +#define E1000_ADVTXD_TUCMD_MKRREQ 0x00002000 +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ /* Adv ctxt IPSec SA IDX mask */ +#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF /* Adv ctxt IPSec ESP len mask */ +#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF /* Additional Transmit Descriptor Control definitions */ -#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ +#define E1000_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wbk flushing */ /* Tx Queue Arbitration Priority 0=low, 1=high */ +#define E1000_TXDCTL_PRIORITY 0x08000000 /* Additional Receive Descriptor Control definitions */ -#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ +#define E1000_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. wbk flushing */ /* Direct Cache Access (DCA) definitions */ -#define E1000_DCA_CTRL_DCA_MODE_DISABLE 0x01 /* DCA Disable */ -#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ - -#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ -#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ -#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ -#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ +#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ +#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ -#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ -#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ -#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ +#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ -/* Additional DCA related definitions, note change in position of CPUID */ -#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ -#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ -#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ -#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ +#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ +#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header ena */ +#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload ena */ -/* ETQF register bit definitions */ -#define E1000_ETQF_FILTER_ENABLE (1 << 26) -#define E1000_ETQF_1588 (1 << 30) +#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ -/* FTQF register bit definitions */ -#define E1000_FTQF_VF_BP 0x00008000 -#define E1000_FTQF_1588_TIME_STAMP 0x08000000 -#define E1000_FTQF_MASK 0xF0000000 -#define E1000_FTQF_MASK_PROTO_BP 0x10000000 -#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 +#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ +#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ +#define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */ +#define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */ -#define E1000_NVM_APME_82575 0x0400 -#define MAX_NUM_VFS 8 +/* Additional interrupt register bit definitions */ +#define E1000_ICR_LSECPNS 0x00000020 /* PN threshold - server */ +#define E1000_IMS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ +#define E1000_ICS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ -#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */ -#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */ -#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ -#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 -#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ +/* ETQF register bit definitions */ +#define E1000_ETQF_FILTER_ENABLE (1 << 26) +#define E1000_ETQF_IMM_INT (1 << 29) +#define E1000_ETQF_1588 (1 << 30) +#define E1000_ETQF_QUEUE_ENABLE (1 << 31) +/* + * ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters + * here!! + * + * Current filters: + * EAPOL 802.1x (0x888e): Filter 0 + */ +#define E1000_ETQF_FILTER_EAPOL 0 + +#define E1000_FTQF_VF_BP 0x00008000 +#define E1000_FTQF_1588_TIME_STAMP 0x08000000 +#define E1000_FTQF_MASK 0xF0000000 +#define E1000_FTQF_MASK_PROTO_BP 0x10000000 +#define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000 +#define E1000_FTQF_MASK_DEST_ADDR_BP 0x40000000 +#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 + +#define E1000_NVM_APME_82575 0x0400 +#define MAX_NUM_VFS 7 + +#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof cntrl */ +#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof cntrl */ +#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ +#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 +#define E1000_DTXSWC_LLE_SHIFT 16 +#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ /* Easy defines for setting default pool, would normally be left a zero */ -#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 -#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) +#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 +#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) /* Other useful VMD_CTL register defines */ -#define E1000_VT_CTL_IGNORE_MAC (1 << 28) -#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29) -#define E1000_VT_CTL_VM_REPL_EN (1 << 30) +#define E1000_VT_CTL_IGNORE_MAC (1 << 28) +#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29) +#define E1000_VT_CTL_VM_REPL_EN (1 << 30) /* Per VM Offload register setup */ -#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ -#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ -#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ -#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ -#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ -#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ -#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ -#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ -#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ -#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ - -#define E1000_VLVF_ARRAY_SIZE 32 -#define E1000_VLVF_VLANID_MASK 0x00000FFF -#define E1000_VLVF_POOLSEL_SHIFT 12 -#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) -#define E1000_VLVF_LVLAN 0x00100000 -#define E1000_VLVF_VLANID_ENABLE 0x80000000 - -#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ -#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ - -#define E1000_IOVCTL 0x05BBC -#define E1000_IOVCTL_REUSE_VFQ 0x00000001 - -#define E1000_RPLOLR_STRVLAN 0x40000000 -#define E1000_RPLOLR_STRCRC 0x80000000 - -#define E1000_DTXCTL_8023LL 0x0004 -#define E1000_DTXCTL_VLAN_ADDED 0x0008 -#define E1000_DTXCTL_OOS_ENABLE 0x0010 -#define E1000_DTXCTL_MDP_EN 0x0020 -#define E1000_DTXCTL_SPOOF_INT 0x0040 +#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ +#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ +#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ +#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ +#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ +#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ +#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ +#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ +#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_VMOLR_VPE 0x00800000 /* VLAN promiscuous enable */ +#define E1000_VMOLR_UPE 0x20000000 /* Unicast promisuous enable */ +#define E1000_DVMOLR_HIDVLAN 0x20000000 /* Vlan hiding enable */ +#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_PBRWAC_WALPB 0x00000007 /* Wrap around event on LAN Rx PB */ +#define E1000_PBRWAC_PBE 0x00000008 /* Rx packet buffer empty */ + +#define E1000_VLVF_ARRAY_SIZE 32 +#define E1000_VLVF_VLANID_MASK 0x00000FFF +#define E1000_VLVF_POOLSEL_SHIFT 12 +#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) +#define E1000_VLVF_LVLAN 0x00100000 +#define E1000_VLVF_VLANID_ENABLE 0x80000000 + +#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ + +#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ + +#define E1000_IOVCTL 0x05BBC +#define E1000_IOVCTL_REUSE_VFQ 0x00000001 + +#define E1000_RPLOLR_STRVLAN 0x40000000 +#define E1000_RPLOLR_STRCRC 0x80000000 + +#define E1000_TCTL_EXT_COLD 0x000FFC00 +#define E1000_TCTL_EXT_COLD_SHIFT 10 + +#define E1000_DTXCTL_8023LL 0x0004 +#define E1000_DTXCTL_VLAN_ADDED 0x0008 +#define E1000_DTXCTL_OOS_ENABLE 0x0010 +#define E1000_DTXCTL_MDP_EN 0x0020 +#define E1000_DTXCTL_SPOOF_INT 0x0040 #define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14) -#define ALL_QUEUES 0xFFFF - -/* RX packet buffer size defines */ -#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F -void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int); -void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); -void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); -u16 igb_rxpbs_adjust_82580(u32 data); -s32 igb_set_eee_i350(struct e1000_hw *); - -#endif +#define ALL_QUEUES 0xFFFF + +/* Rx packet buffer size defines */ +#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F +void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable); +void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf); +void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable); +s32 e1000_init_nvm_params_82575(struct e1000_hw *hw); + +u16 e1000_rxpbs_adjust_82580(u32 data); +s32 e1000_set_eee_i350(struct e1000_hw *); +#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8 +#define E1000_EMC_INTERNAL_DATA 0x00 +#define E1000_EMC_INTERNAL_THERM_LIMIT 0x20 +#define E1000_EMC_DIODE1_DATA 0x01 +#define E1000_EMC_DIODE1_THERM_LIMIT 0x19 +#define E1000_EMC_DIODE2_DATA 0x23 +#define E1000_EMC_DIODE2_THERM_LIMIT 0x1A +#define E1000_EMC_DIODE3_DATA 0x2A +#define E1000_EMC_DIODE3_THERM_LIMIT 0x30 + +s32 e1000_get_thermal_sensor_data_generic(struct e1000_hw *hw); +s32 e1000_init_thermal_sensor_thresh_generic(struct e1000_hw *hw); + +/* I2C SDA and SCL timing parameters for standard mode */ +#define E1000_I2C_T_HD_STA 4 +#define E1000_I2C_T_LOW 5 +#define E1000_I2C_T_HIGH 4 +#define E1000_I2C_T_SU_STA 5 +#define E1000_I2C_T_HD_DATA 5 +#define E1000_I2C_T_SU_DATA 1 +#define E1000_I2C_T_RISE 1 +#define E1000_I2C_T_FALL 1 +#define E1000_I2C_T_SU_STO 4 +#define E1000_I2C_T_BUF 5 + +s32 e1000_set_i2c_bb(struct e1000_hw *hw); +s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +void e1000_i2c_bus_clear(struct e1000_hw *hw); +#endif /* _E1000_82575_H_ */ diff --git a/drivers/net/igb/e1000_api.c b/drivers/net/igb/e1000_api.c new file mode 100644 index 000000000000..57a5440f7ce0 --- /dev/null +++ b/drivers/net/igb/e1000_api.c @@ -0,0 +1,1160 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000_api.h" + +/** + * e1000_init_mac_params - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the MAC + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_mac_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->mac.ops.init_params) { + ret_val = hw->mac.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("MAC Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("mac.init_mac_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_init_nvm_params - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the NVM + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_nvm_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->nvm.ops.init_params) { + ret_val = hw->nvm.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("NVM Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("nvm.init_nvm_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_init_phy_params - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the PHY + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_phy_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->phy.ops.init_params) { + ret_val = hw->phy.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("PHY Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("phy.init_phy_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_init_mbx_params - Initialize mailbox function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the PHY + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_mbx_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->mbx.ops.init_params) { + ret_val = hw->mbx.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("Mailbox Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("mbx.init_mbx_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the mac type of the adapter based on the + * device ID stored in the hw structure. + * MUST BE FIRST FUNCTION CALLED (explicitly or through + * e1000_setup_init_funcs()). + **/ +s32 e1000_set_mac_type(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_set_mac_type"); + + switch (hw->device_id) { + case E1000_DEV_ID_82575EB_COPPER: + case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82575GB_QUAD_COPPER: + mac->type = e1000_82575; + break; + case E1000_DEV_ID_82576: + case E1000_DEV_ID_82576_FIBER: + case E1000_DEV_ID_82576_SERDES: + case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: + case E1000_DEV_ID_82576_NS: + case E1000_DEV_ID_82576_NS_SERDES: + case E1000_DEV_ID_82576_SERDES_QUAD: + mac->type = e1000_82576; + break; + case E1000_DEV_ID_82580_COPPER: + case E1000_DEV_ID_82580_FIBER: + case E1000_DEV_ID_82580_SERDES: + case E1000_DEV_ID_82580_SGMII: + case E1000_DEV_ID_82580_COPPER_DUAL: + case E1000_DEV_ID_82580_QUAD_FIBER: + case E1000_DEV_ID_DH89XXCC_SGMII: + case E1000_DEV_ID_DH89XXCC_SERDES: + case E1000_DEV_ID_DH89XXCC_BACKPLANE: + case E1000_DEV_ID_DH89XXCC_SFP: + mac->type = e1000_82580; + break; + case E1000_DEV_ID_I350_COPPER: + case E1000_DEV_ID_I350_FIBER: + case E1000_DEV_ID_I350_SERDES: + case E1000_DEV_ID_I350_SGMII: + case E1000_DEV_ID_I350_DA4: + mac->type = e1000_i350; + break; + default: + /* Should never have loaded on this device */ + ret_val = -E1000_ERR_MAC_INIT; + break; + } + + return ret_val; +} + +/** + * e1000_setup_init_funcs - Initializes function pointers + * @hw: pointer to the HW structure + * @init_device: true will initialize the rest of the function pointers + * getting the device ready for use. false will only set + * MAC type and the function pointers for the other init + * functions. Passing false will not generate any hardware + * reads or writes. + * + * This function must be called by a driver in order to use the rest + * of the 'shared' code files. Called by drivers only. + **/ +s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device) +{ + s32 ret_val; + + /* Can't do much good without knowing the MAC type. */ + ret_val = e1000_set_mac_type(hw); + if (ret_val) { + DEBUGOUT("ERROR: MAC type could not be set properly.\n"); + goto out; + } + + if (!hw->hw_addr) { + DEBUGOUT("ERROR: Registers not mapped\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* + * Init function pointers to generic implementations. We do this first + * allowing a driver module to override it afterward. + */ + e1000_init_mac_ops_generic(hw); + e1000_init_phy_ops_generic(hw); + e1000_init_nvm_ops_generic(hw); + e1000_init_mbx_ops_generic(hw); + + /* + * Set up the init function pointers. These are functions within the + * adapter family file that sets up function pointers for the rest of + * the functions in that family. + */ + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + case e1000_82580: + case e1000_i350: + e1000_init_function_pointers_82575(hw); + break; + default: + DEBUGOUT("Hardware not supported\n"); + ret_val = -E1000_ERR_CONFIG; + break; + } + + /* + * Initialize the rest of the function pointers. These require some + * register reads/writes in some cases. + */ + if (!(ret_val) && init_device) { + ret_val = e1000_init_mac_params(hw); + if (ret_val) + goto out; + + ret_val = e1000_init_nvm_params(hw); + if (ret_val) + goto out; + + ret_val = e1000_init_phy_params(hw); + if (ret_val) + goto out; + + ret_val = e1000_init_mbx_params(hw); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_get_bus_info - Obtain bus information for adapter + * @hw: pointer to the HW structure + * + * This will obtain information about the HW bus for which the + * adapter is attached and stores it in the hw structure. This is a + * function pointer entry point called by drivers. + **/ +s32 e1000_get_bus_info(struct e1000_hw *hw) +{ + if (hw->mac.ops.get_bus_info) + return hw->mac.ops.get_bus_info(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_clear_vfta - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * This clears the VLAN filter table on the adapter. This is a function + * pointer entry point called by drivers. + **/ +void e1000_clear_vfta(struct e1000_hw *hw) +{ + if (hw->mac.ops.clear_vfta) + hw->mac.ops.clear_vfta(hw); +} + +/** + * e1000_write_vfta - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: the 32-bit offset in which to write the value to. + * @value: the 32-bit value to write at location offset. + * + * This writes a 32-bit value to a 32-bit offset in the VLAN filter + * table. This is a function pointer entry point called by drivers. + **/ +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + if (hw->mac.ops.write_vfta) + hw->mac.ops.write_vfta(hw, offset, value); +} + +/** + * e1000_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates the Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count) +{ + if (hw->mac.ops.update_mc_addr_list) + hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, + mc_addr_count); +} + +/** + * e1000_force_mac_fc - Force MAC flow control + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Currently no func pointer exists + * and all implementations are handled in the generic version of this + * function. + **/ +s32 e1000_force_mac_fc(struct e1000_hw *hw) +{ + return e1000_force_mac_fc_generic(hw); +} + +/** + * e1000_check_for_link - Check/Store link connection + * @hw: pointer to the HW structure + * + * This checks the link condition of the adapter and stores the + * results in the hw->mac structure. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_check_for_link(struct e1000_hw *hw) +{ + if (hw->mac.ops.check_for_link) + return hw->mac.ops.check_for_link(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_check_mng_mode - Check management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has manageability enabled. + * This is a function pointer entry point called by drivers. + **/ +bool e1000_check_mng_mode(struct e1000_hw *hw) +{ + if (hw->mac.ops.check_mng_mode) + return hw->mac.ops.check_mng_mode(hw); + + return false; +} + +/** + * e1000_mng_write_dhcp_info - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) +{ + return e1000_mng_write_dhcp_info_generic(hw, buffer, length); +} + +/** + * e1000_reset_hw - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a function pointer + * entry point called by drivers. + **/ +s32 e1000_reset_hw(struct e1000_hw *hw) +{ + if (hw->mac.ops.reset_hw) + return hw->mac.ops.reset_hw(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_init_hw - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. This is a function + * pointer entry point called by drivers. + **/ +s32 e1000_init_hw(struct e1000_hw *hw) +{ + if (hw->mac.ops.init_hw) + return hw->mac.ops.init_hw(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_setup_link - Configures link and flow control + * @hw: pointer to the HW structure + * + * This configures link and flow control settings for the adapter. This + * is a function pointer entry point called by drivers. While modules can + * also call this, they probably call their own version of this function. + **/ +s32 e1000_setup_link(struct e1000_hw *hw) +{ + if (hw->mac.ops.setup_link) + return hw->mac.ops.setup_link(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_get_speed_and_duplex - Returns current speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to a 16-bit value to store the speed + * @duplex: pointer to a 16-bit value to store the duplex. + * + * This returns the speed and duplex of the adapter in the two 'out' + * variables passed in. This is a function pointer entry point called + * by drivers. + **/ +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + if (hw->mac.ops.get_link_up_info) + return hw->mac.ops.get_link_up_info(hw, speed, duplex); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_setup_led - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_setup_led(struct e1000_hw *hw) +{ + if (hw->mac.ops.setup_led) + return hw->mac.ops.setup_led(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led - Restores SW controllable LED + * @hw: pointer to the HW structure + * + * This restores the SW controllable LED to the value saved off by + * e1000_setup_led. This is a function pointer entry point called by drivers. + **/ +s32 e1000_cleanup_led(struct e1000_hw *hw) +{ + if (hw->mac.ops.cleanup_led) + return hw->mac.ops.cleanup_led(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_blink_led - Blink SW controllable LED + * @hw: pointer to the HW structure + * + * This starts the adapter LED blinking. Request the LED to be setup first + * and cleaned up after. This is a function pointer entry point called by + * drivers. + **/ +s32 e1000_blink_led(struct e1000_hw *hw) +{ + if (hw->mac.ops.blink_led) + return hw->mac.ops.blink_led(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_id_led_init - store LED configurations in SW + * @hw: pointer to the HW structure + * + * Initializes the LED config in SW. This is a function pointer entry point + * called by drivers. + **/ +s32 e1000_id_led_init(struct e1000_hw *hw) +{ + if (hw->mac.ops.id_led_init) + return hw->mac.ops.id_led_init(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_led_on - Turn on SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED on. This is a function pointer entry point + * called by drivers. + **/ +s32 e1000_led_on(struct e1000_hw *hw) +{ + if (hw->mac.ops.led_on) + return hw->mac.ops.led_on(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_led_off - Turn off SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED off. This is a function pointer entry point + * called by drivers. + **/ +s32 e1000_led_off(struct e1000_hw *hw) +{ + if (hw->mac.ops.led_off) + return hw->mac.ops.led_off(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_reset_adaptive - Reset adaptive IFS + * @hw: pointer to the HW structure + * + * Resets the adaptive IFS. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void e1000_reset_adaptive(struct e1000_hw *hw) +{ + e1000_reset_adaptive_generic(hw); +} + +/** + * e1000_update_adaptive - Update adaptive IFS + * @hw: pointer to the HW structure + * + * Updates adapter IFS. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void e1000_update_adaptive(struct e1000_hw *hw) +{ + e1000_update_adaptive_generic(hw); +} + +/** + * e1000_disable_pcie_master - Disable PCI-Express master access + * @hw: pointer to the HW structure + * + * Disables PCI-Express master access and verifies there are no pending + * requests. Currently no func pointer exists and all implementations are + * handled in the generic version of this function. + **/ +s32 e1000_disable_pcie_master(struct e1000_hw *hw) +{ + return e1000_disable_pcie_master_generic(hw); +} + +/** + * e1000_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. + **/ +void e1000_config_collision_dist(struct e1000_hw *hw) +{ + if (hw->mac.ops.config_collision_dist) + hw->mac.ops.config_collision_dist(hw); +} + +/** + * e1000_rar_set - Sets a receive address register + * @hw: pointer to the HW structure + * @addr: address to set the RAR to + * @index: the RAR to set + * + * Sets a Receive Address Register (RAR) to the specified address. + **/ +void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + if (hw->mac.ops.rar_set) + hw->mac.ops.rar_set(hw, addr, index); +} + +/** + * e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state + * @hw: pointer to the HW structure + * + * Ensures that the MDI/MDIX SW state is valid. + **/ +s32 e1000_validate_mdi_setting(struct e1000_hw *hw) +{ + if (hw->mac.ops.validate_mdi_setting) + return hw->mac.ops.validate_mdi_setting(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_hash_mc_addr - Determines address location in multicast table + * @hw: pointer to the HW structure + * @mc_addr: Multicast address to hash. + * + * This hashes an address to determine its location in the multicast + * table. Currently no func pointer exists and all implementations + * are handled in the generic version of this function. + **/ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + return e1000_hash_mc_addr_generic(hw, mc_addr); +} + +/** + * e1000_enable_tx_pkt_filtering - Enable packet filtering on TX + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw) +{ + return e1000_enable_tx_pkt_filtering_generic(hw); +} + +/** + * e1000_mng_host_if_write - Writes to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + **/ +s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, + u16 offset, u8 *sum) +{ + if (hw->mac.ops.mng_host_if_write) + return hw->mac.ops.mng_host_if_write(hw, buffer, length, + offset, sum); + + return E1000_NOT_IMPLEMENTED; +} + +/** + * e1000_mng_write_cmd_header - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + **/ +s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr) +{ + if (hw->mac.ops.mng_write_cmd_header) + return hw->mac.ops.mng_write_cmd_header(hw, hdr); + + return E1000_NOT_IMPLEMENTED; +} + +/** + * e1000_mng_enable_host_if - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operation + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + **/ +s32 e1000_mng_enable_host_if(struct e1000_hw *hw) +{ + if (hw->mac.ops.mng_enable_host_if) + return hw->mac.ops.mng_enable_host_if(hw); + + return E1000_NOT_IMPLEMENTED; +} + +/** + * e1000_wait_autoneg - Waits for autonegotiation completion + * @hw: pointer to the HW structure + * + * Waits for autoneg to complete. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + if (hw->mac.ops.wait_autoneg) + return hw->mac.ops.wait_autoneg(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_check_reset_block - Verifies PHY can be reset + * @hw: pointer to the HW structure + * + * Checks if the PHY is in a state that can be reset or if manageability + * has it tied up. This is a function pointer entry point called by drivers. + **/ +s32 e1000_check_reset_block(struct e1000_hw *hw) +{ + if (hw->phy.ops.check_reset_block) + return hw->phy.ops.check_reset_block(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_read_phy_reg - Reads PHY register + * @hw: pointer to the HW structure + * @offset: the register to read + * @data: the buffer to store the 16-bit read. + * + * Reads the PHY register and returns the value in data. + * This is a function pointer entry point called by drivers. + **/ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + if (hw->phy.ops.read_reg) + return hw->phy.ops.read_reg(hw, offset, data); + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg - Writes PHY register + * @hw: pointer to the HW structure + * @offset: the register to write + * @data: the value to write. + * + * Writes the PHY register at offset with the value in data. + * This is a function pointer entry point called by drivers. + **/ +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + if (hw->phy.ops.write_reg) + return hw->phy.ops.write_reg(hw, offset, data); + + return E1000_SUCCESS; +} + +/** + * e1000_release_phy - Generic release PHY + * @hw: pointer to the HW structure + * + * Return if silicon family does not require a semaphore when accessing the + * PHY. + **/ +void e1000_release_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.release) + hw->phy.ops.release(hw); +} + +/** + * e1000_acquire_phy - Generic acquire PHY + * @hw: pointer to the HW structure + * + * Return success if silicon family does not require a semaphore when + * accessing the PHY. + **/ +s32 e1000_acquire_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.acquire) + return hw->phy.ops.acquire(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_read_kmrn_reg - Reads register using Kumeran interface + * @hw: pointer to the HW structure + * @offset: the register to read + * @data: the location to store the 16-bit value read. + * + * Reads a register out of the Kumeran interface. Currently no func pointer + * exists and all implementations are handled in the generic version of + * this function. + **/ +s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return e1000_read_kmrn_reg_generic(hw, offset, data); +} + +/** + * e1000_write_kmrn_reg - Writes register using Kumeran interface + * @hw: pointer to the HW structure + * @offset: the register to write + * @data: the value to write. + * + * Writes a register to the Kumeran interface. Currently no func pointer + * exists and all implementations are handled in the generic version of + * this function. + **/ +s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + return e1000_write_kmrn_reg_generic(hw, offset, data); +} + +/** + * e1000_get_cable_length - Retrieves cable length estimation + * @hw: pointer to the HW structure + * + * This function estimates the cable length and stores them in + * hw->phy.min_length and hw->phy.max_length. This is a function pointer + * entry point called by drivers. + **/ +s32 e1000_get_cable_length(struct e1000_hw *hw) +{ + if (hw->phy.ops.get_cable_length) + return hw->phy.ops.get_cable_length(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_info - Retrieves PHY information from registers + * @hw: pointer to the HW structure + * + * This function gets some information from various PHY registers and + * populates hw->phy values with it. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_get_phy_info(struct e1000_hw *hw) +{ + if (hw->phy.ops.get_info) + return hw->phy.ops.get_info(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_phy_hw_reset - Hard PHY reset + * @hw: pointer to the HW structure + * + * Performs a hard PHY reset. This is a function pointer entry point called + * by drivers. + **/ +s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + if (hw->phy.ops.reset) + return hw->phy.ops.reset(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_phy_commit - Soft PHY reset + * @hw: pointer to the HW structure + * + * Performs a soft PHY reset on those that apply. This is a function pointer + * entry point called by drivers. + **/ +s32 e1000_phy_commit(struct e1000_hw *hw) +{ + if (hw->phy.ops.commit) + return hw->phy.ops.commit(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_set_d0_lplu_state - Sets low power link up state for D0 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D0 + * and SmartSpeed is disabled when active is true, else clear lplu for D0 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. This is a function pointer entry point called by drivers. + **/ +s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) +{ + if (hw->phy.ops.set_d0_lplu_state) + return hw->phy.ops.set_d0_lplu_state(hw, active); + + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. This is a function pointer entry point called by drivers. + **/ +s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + if (hw->phy.ops.set_d3_lplu_state) + return hw->phy.ops.set_d3_lplu_state(hw, active); + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr - Reads MAC address + * @hw: pointer to the HW structure + * + * Reads the MAC address out of the adapter and stores it in the HW structure. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 e1000_read_mac_addr(struct e1000_hw *hw) +{ + if (hw->mac.ops.read_mac_addr) + return hw->mac.ops.read_mac_addr(hw); + + return e1000_read_mac_addr_generic(hw); +} + +/** + * e1000_read_pba_string - Read device part number string + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size) +{ + return e1000_read_pba_string_generic(hw, pba_num, pba_num_size); +} + +/** + * e1000_read_pba_length - Read device part number string length + * @hw: pointer to the HW structure + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number length from the EEPROM and + * stores the value in pba_num. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size) +{ + return e1000_read_pba_length_generic(hw, pba_num_size); +} + +/** + * e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum + * @hw: pointer to the HW structure + * + * Validates the NVM checksum is correct. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) +{ + if (hw->nvm.ops.validate) + return hw->nvm.ops.validate(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum + * @hw: pointer to the HW structure + * + * Updates the NVM checksum. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +s32 e1000_update_nvm_checksum(struct e1000_hw *hw) +{ + if (hw->nvm.ops.update) + return hw->nvm.ops.update(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_reload_nvm - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + **/ +void e1000_reload_nvm(struct e1000_hw *hw) +{ + if (hw->nvm.ops.reload) + hw->nvm.ops.reload(hw); +} + +/** + * e1000_read_nvm - Reads NVM (EEPROM) + * @hw: pointer to the HW structure + * @offset: the word offset to read + * @words: number of 16-bit words to read + * @data: pointer to the properly sized buffer for the data. + * + * Reads 16-bit chunks of data from the NVM (EEPROM). This is a function + * pointer entry point called by drivers. + **/ +s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + if (hw->nvm.ops.read) + return hw->nvm.ops.read(hw, offset, words, data); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_write_nvm - Writes to NVM (EEPROM) + * @hw: pointer to the HW structure + * @offset: the word offset to read + * @words: number of 16-bit words to write + * @data: pointer to the properly sized buffer for the data. + * + * Writes 16-bit chunks of data to the NVM (EEPROM). This is a function + * pointer entry point called by drivers. + **/ +s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + if (hw->nvm.ops.write) + return hw->nvm.ops.write(hw, offset, words, data); + + return E1000_SUCCESS; +} + +/** + * e1000_write_8bit_ctrl_reg - Writes 8bit Control register + * @hw: pointer to the HW structure + * @reg: 32bit register offset + * @offset: the register to write + * @data: the value to write. + * + * Writes the PHY register at offset with the value in data. + * This is a function pointer entry point called by drivers. + **/ +s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset, + u8 data) +{ + return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data); +} + +/** + * e1000_power_up_phy - Restores link in case of PHY power down + * @hw: pointer to the HW structure + * + * The phy may be powered down to save power, to turn off link when the + * driver is unloaded, or wake on lan is not enabled (among others). + **/ +void e1000_power_up_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.power_up) + hw->phy.ops.power_up(hw); + + e1000_setup_link(hw); +} + +/** + * e1000_power_down_phy - Power down PHY + * @hw: pointer to the HW structure + * + * The phy may be powered down to save power, to turn off link when the + * driver is unloaded, or wake on lan is not enabled (among others). + **/ +void e1000_power_down_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.power_down) + hw->phy.ops.power_down(hw); +} + +/** + * e1000_power_up_fiber_serdes_link - Power up serdes link + * @hw: pointer to the HW structure + * + * Power on the optics and PCS. + **/ +void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw) +{ + if (hw->mac.ops.power_up_serdes) + hw->mac.ops.power_up_serdes(hw); +} + +/** + * e1000_shutdown_fiber_serdes_link - Remove link during power down + * @hw: pointer to the HW structure + * + * Shutdown the optics and PCS on driver unload. + **/ +void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw) +{ + if (hw->mac.ops.shutdown_serdes) + hw->mac.ops.shutdown_serdes(hw); +} + +/** + * e1000_get_thermal_sensor_data - Gathers thermal sensor data + * @hw: pointer to hardware structure + * + * Updates the temperatures in mac.thermal_sensor_data + **/ +s32 e1000_get_thermal_sensor_data(struct e1000_hw *hw) +{ + if (hw->mac.ops.get_thermal_sensor_data) + return hw->mac.ops.get_thermal_sensor_data(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_init_thermal_sensor_thresh - Sets thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Sets the thermal sensor thresholds according to the NVM map + **/ +s32 e1000_init_thermal_sensor_thresh(struct e1000_hw *hw) +{ + if (hw->mac.ops.init_thermal_sensor_thresh) + return hw->mac.ops.init_thermal_sensor_thresh(hw); + + return E1000_SUCCESS; +} diff --git a/drivers/net/igb/e1000_api.h b/drivers/net/igb/e1000_api.h new file mode 100644 index 000000000000..98a4ba9944d0 --- /dev/null +++ b/drivers/net/igb/e1000_api.h @@ -0,0 +1,150 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_API_H_ +#define _E1000_API_H_ + +#include "e1000_hw.h" + +extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); +extern void e1000_rx_fifo_flush_82575(struct e1000_hw *hw); +extern void e1000_init_function_pointers_vf(struct e1000_hw *hw); +extern void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw); +extern void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw); + +s32 e1000_set_mac_type(struct e1000_hw *hw); +s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device); +s32 e1000_init_mac_params(struct e1000_hw *hw); +s32 e1000_init_nvm_params(struct e1000_hw *hw); +s32 e1000_init_phy_params(struct e1000_hw *hw); +s32 e1000_init_mbx_params(struct e1000_hw *hw); +s32 e1000_get_bus_info(struct e1000_hw *hw); +void e1000_clear_vfta(struct e1000_hw *hw); +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); +s32 e1000_force_mac_fc(struct e1000_hw *hw); +s32 e1000_check_for_link(struct e1000_hw *hw); +s32 e1000_reset_hw(struct e1000_hw *hw); +s32 e1000_init_hw(struct e1000_hw *hw); +s32 e1000_setup_link(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex); +s32 e1000_disable_pcie_master(struct e1000_hw *hw); +void e1000_config_collision_dist(struct e1000_hw *hw); +void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr); +void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count); +s32 e1000_setup_led(struct e1000_hw *hw); +s32 e1000_cleanup_led(struct e1000_hw *hw); +s32 e1000_check_reset_block(struct e1000_hw *hw); +s32 e1000_blink_led(struct e1000_hw *hw); +s32 e1000_led_on(struct e1000_hw *hw); +s32 e1000_led_off(struct e1000_hw *hw); +s32 e1000_id_led_init(struct e1000_hw *hw); +void e1000_reset_adaptive(struct e1000_hw *hw); +void e1000_update_adaptive(struct e1000_hw *hw); +s32 e1000_get_cable_length(struct e1000_hw *hw); +s32 e1000_validate_mdi_setting(struct e1000_hw *hw); +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset, + u8 data); +s32 e1000_get_phy_info(struct e1000_hw *hw); +void e1000_release_phy(struct e1000_hw *hw); +s32 e1000_acquire_phy(struct e1000_hw *hw); +s32 e1000_phy_hw_reset(struct e1000_hw *hw); +s32 e1000_phy_commit(struct e1000_hw *hw); +void e1000_power_up_phy(struct e1000_hw *hw); +void e1000_power_down_phy(struct e1000_hw *hw); +s32 e1000_read_mac_addr(struct e1000_hw *hw); +s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size); +s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size); +void e1000_reload_nvm(struct e1000_hw *hw); +s32 e1000_update_nvm_checksum(struct e1000_hw *hw); +s32 e1000_validate_nvm_checksum(struct e1000_hw *hw); +s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000_wait_autoneg(struct e1000_hw *hw); +s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); +s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); +bool e1000_check_mng_mode(struct e1000_hw *hw); +bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); +s32 e1000_mng_enable_host_if(struct e1000_hw *hw); +s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, + u16 offset, u8 *sum); +s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr); +s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length); +s32 e1000_get_thermal_sensor_data(struct e1000_hw *hw); +s32 e1000_init_thermal_sensor_thresh(struct e1000_hw *hw); + + + +/* + * TBI_ACCEPT macro definition: + * + * This macro requires: + * adapter = a pointer to struct e1000_hw + * status = the 8 bit status field of the Rx descriptor with EOP set + * error = the 8 bit error field of the Rx descriptor with EOP set + * length = the sum of all the length fields of the Rx descriptors that + * make up the current frame + * last_byte = the last byte of the frame DMAed by the hardware + * max_frame_length = the maximum frame length we want to accept. + * min_frame_length = the minimum frame length we want to accept. + * + * This macro is a conditional that should be used in the interrupt + * handler's Rx processing routine when RxErrors have been detected. + * + * Typical use: + * ... + * if (TBI_ACCEPT) { + * accept_frame = true; + * e1000_tbi_adjust_stats(adapter, MacAddress); + * frame_length--; + * } else { + * accept_frame = false; + * } + * ... + */ + +/* The carrier extension symbol, as received by the NIC. */ +#define CARRIER_EXTENSION 0x0F + +#define TBI_ACCEPT(a, status, errors, length, last_byte, \ + min_frame_size, max_frame_size) \ + (e1000_tbi_sbp_enabled_82543(a) && \ + (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ + ((last_byte) == CARRIER_EXTENSION) && \ + (((status) & E1000_RXD_STAT_VP) ? \ + (((length) > (min_frame_size - VLAN_TAG_SIZE)) && \ + ((length) <= (max_frame_size + 1))) : \ + (((length) > min_frame_size) && \ + ((length) <= (max_frame_size + VLAN_TAG_SIZE + 1))))) + +#endif diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h index aed217449f0d..cf1da99dbf42 100644 --- a/drivers/net/igb/e1000_defines.h +++ b/drivers/net/igb/e1000_defines.h @@ -34,345 +34,760 @@ /* Definitions for power management and wakeup registers */ /* Wake Up Control */ -#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define E1000_WUC_LSCWE 0x00000010 /* Link Status wake up enable */ +#define E1000_WUC_PPROXYE 0x00000010 /* Protocol Proxy Enable */ +#define E1000_WUC_LSCWO 0x00000020 /* Link Status wake up override */ +#define E1000_WUC_SPM 0x80000000 /* Enable SPM */ +#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ /* Wake Up Filter Control */ -#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ -#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ -#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ -#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ -#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define E1000_WUFC_FW_RST 0x80000000 /* Wake on FW Reset Enable */ +#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* all wakeup filters mask */ +#define E1000_WUFC_FLX_OFFSET 16 /* Flexible Filters bits offset */ +#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* 4 flexible filters mask */ +/* + * For 82576 to utilize Extended filter masks in addition to + * existing (filter) masks + */ +#define E1000_WUFC_EXT_FLX_FILTERS 0x00300000 /* Ext. FLX filter mask */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC E1000_WUFC_LNKC +#define E1000_WUS_MAG E1000_WUFC_MAG +#define E1000_WUS_EX E1000_WUFC_EX +#define E1000_WUS_MC E1000_WUFC_MC +#define E1000_WUS_BC E1000_WUFC_BC +#define E1000_WUS_ARP E1000_WUFC_ARP +#define E1000_WUS_IPV4 E1000_WUFC_IPV4 +#define E1000_WUS_IPV6 E1000_WUFC_IPV6 +#define E1000_WUS_FLX0 E1000_WUFC_FLX0 +#define E1000_WUS_FLX1 E1000_WUFC_FLX1 +#define E1000_WUS_FLX2 E1000_WUFC_FLX2 +#define E1000_WUS_FLX3 E1000_WUFC_FLX3 +#define E1000_WUS_FLX_FILTERS E1000_WUFC_FLX_FILTERS + +/* Wake Up Packet Length */ +#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ + +/* Four Flexible Filters are supported */ +#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4 +/* Two Extended Flexible Filters are supported (82576) */ +#define E1000_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 +#define E1000_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */ +#define E1000_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */ + +/* Each Flexible Filter is at most 128 (0x80) bytes in length */ +#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128 + +#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX +#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX +#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX /* Extended Device Control */ -#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */ +#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */ +#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */ +#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN +#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */ +#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */ +/* Reserved (bits 4,5) in >= 82575 */ +#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* SW Definable Pin 4 data */ +#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* SW Definable Pin 5 data */ +#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA +#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* SW Definable Pin 6 data */ +#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* SW Definable Pin 3 data */ +/* SDP 4/5 (bits 8,9) are reserved in >= 82575 */ +#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ +#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */ +#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ +#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */ +#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ /* Physical Func Reset Done Indication */ -#define E1000_CTRL_EXT_PFRSTD 0x00004000 -#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 -#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 -#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 -#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 -#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 -#define E1000_CTRL_EXT_EIAME 0x01000000 -#define E1000_CTRL_EXT_IRCA 0x00000001 -/* Interrupt delay cancellation */ -/* Driver loaded bit for FW */ -#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 -/* Interrupt acknowledge Auto-mask */ -/* Clear Interrupt timers after IMS clear */ +#define E1000_CTRL_EXT_PFRSTD 0x00004000 +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clk Gating */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +/* Offset of the link mode field in Ctrl Ext register */ +#define E1000_CTRL_EXT_LINK_MODE_OFFSET 22 +#define E1000_CTRL_EXT_LINK_MODE_82580_MASK 0x01C00000 /*82580 bit 24:22*/ +#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_PCIX_SERDES 0x00800000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_IRCA 0x00000001 +#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000 +#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000 +#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 +#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 +#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 +#define E1000_CTRL_EXT_CANC 0x04000000 /* Int delay cancellation */ +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ +/* IAME enable bit (27) was removed in >= 82575 */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */ /* packet buffer parity error detection enabled */ +#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* descriptor FIFO parity error detection enable */ -#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ -#define E1000_I2CCMD_REG_ADDR_SHIFT 16 -#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 -#define E1000_I2CCMD_OPCODE_READ 0x08000000 -#define E1000_I2CCMD_OPCODE_WRITE 0x00000000 -#define E1000_I2CCMD_READY 0x20000000 -#define E1000_I2CCMD_ERROR 0x80000000 -#define E1000_MAX_SGMII_PHY_REG_ADDR 255 -#define E1000_I2CCMD_PHY_TIMEOUT 200 -#define E1000_IVAR_VALID 0x80 -#define E1000_GPIE_NSICR 0x00000001 -#define E1000_GPIE_MSIX_MODE 0x00000010 -#define E1000_GPIE_EIAME 0x40000000 -#define E1000_GPIE_PBA 0x80000000 +#define E1000_CTRL_EXT_DF_PAREN 0x02000000 +#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000 +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_I2CCMD_REG_ADDR_SHIFT 16 +#define E1000_I2CCMD_REG_ADDR 0x00FF0000 +#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 +#define E1000_I2CCMD_PHY_ADDR 0x07000000 +#define E1000_I2CCMD_OPCODE_READ 0x08000000 +#define E1000_I2CCMD_OPCODE_WRITE 0x00000000 +#define E1000_I2CCMD_RESET 0x10000000 +#define E1000_I2CCMD_READY 0x20000000 +#define E1000_I2CCMD_INTERRUPT_ENA 0x40000000 +#define E1000_I2CCMD_ERROR 0x80000000 +#define E1000_I2CCMD_SFP_DATA_ADDR(a) (0x0000 + (a)) +#define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a)) +#define E1000_MAX_SGMII_PHY_REG_ADDR 255 +#define E1000_I2CCMD_PHY_TIMEOUT 200 +#define E1000_IVAR_VALID 0x80 +#define E1000_GPIE_NSICR 0x00000001 +#define E1000_GPIE_MSIX_MODE 0x00000010 +#define E1000_GPIE_EIAME 0x40000000 +#define E1000_GPIE_PBA 0x80000000 /* Receive Descriptor bit definitions */ -#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ -#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ -#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ -#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ -#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ -#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ -#define E1000_RXD_STAT_TS 0x10000 /* Pkt was time stamped */ - -#define E1000_RXDEXT_STATERR_LB 0x00040000 -#define E1000_RXDEXT_STATERR_CE 0x01000000 -#define E1000_RXDEXT_STATERR_SE 0x02000000 -#define E1000_RXDEXT_STATERR_SEQ 0x04000000 -#define E1000_RXDEXT_STATERR_CXE 0x10000000 -#define E1000_RXDEXT_STATERR_TCPE 0x20000000 -#define E1000_RXDEXT_STATERR_IPE 0x40000000 -#define E1000_RXDEXT_STATERR_RXE 0x80000000 +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define E1000_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define E1000_RXD_SPC_PRI_SHIFT 13 +#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define E1000_RXD_SPC_CFI_SHIFT 12 + +#define E1000_RXDEXT_STATERR_LB 0x00040000 +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) /* Same mask, but for extended and packet split descriptors */ #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ - E1000_RXDEXT_STATERR_CE | \ - E1000_RXDEXT_STATERR_SE | \ - E1000_RXDEXT_STATERR_SEQ | \ - E1000_RXDEXT_STATERR_CXE | \ - E1000_RXDEXT_STATERR_RXE) - -#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 -#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 -#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 -#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 -#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 - + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +#define E1000_MRQC_ENABLE_MASK 0x00000007 +#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001 +#define E1000_MRQC_ENABLE_RSS_INT 0x00000004 +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF /* Management Control */ -#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ -#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ -#define E1000_MANC_EN_BMC2OS 0x10000000 /* OSBMC is Enabled or not */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */ +#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */ +#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */ +#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */ +#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */ +#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ /* Enable Neighbor Discovery Filtering */ -#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ -#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +#define E1000_MANC_NEIGHBOR_EN 0x00004000 +#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ +#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ +#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ /* Enable MAC address filtering */ -#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 +/* Enable MNG packets to host memory */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 +/* Enable IP address filtering */ +#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 +#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Ena checksum filtering */ +#define E1000_MANC_BR_EN 0x01000000 /* Ena broadcast filtering */ +#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ +#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ +#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ +#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */ +#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */ +#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */ +#define E1000_MANC_MPROXYE 0x40000000 /* Mngment Proxy Enable */ +#define E1000_MANC_EN_BMC2OS 0x10000000 /* OS2BMC is enabld or not */ + +#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ +#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ + +#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */ +#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */ +#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */ +#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */ /* Receive Control */ -#define E1000_RCTL_EN 0x00000002 /* enable */ -#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ -#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ -#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ -#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ -#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ -#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ -#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ -#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ -#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ -#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ -#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ -#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ -#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ -#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ +#define E1000_RCTL_RST 0x00000001 /* Software reset */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promisc enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promisc enable */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */ +#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* Rx desc min thresh size */ +#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* Rx desc min thresh size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */ +#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */ +#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ +#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ +#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ /* * Use byte values for the following shift parameters * Usage: * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & - * E1000_PSRCTL_BSIZE0_MASK) | - * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & - * E1000_PSRCTL_BSIZE1_MASK) | - * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & - * E1000_PSRCTL_BSIZE2_MASK) | - * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; - * E1000_PSRCTL_BSIZE3_MASK)) + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) * where value0 = [128..16256], default=256 * value1 = [1024..64512], default=4096 * value2 = [0..64512], default=4096 * value3 = [0..64512], default=0 */ -#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F -#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 -#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 -#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 -#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ -#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ -#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ -#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ /* SWFW_SYNC Definitions */ -#define E1000_SWFW_EEP_SM 0x1 -#define E1000_SWFW_PHY0_SM 0x2 -#define E1000_SWFW_PHY1_SM 0x4 -#define E1000_SWFW_PHY2_SM 0x20 -#define E1000_SWFW_PHY3_SM 0x40 +#define E1000_SWFW_EEP_SM 0x01 +#define E1000_SWFW_PHY0_SM 0x02 +#define E1000_SWFW_PHY1_SM 0x04 +#define E1000_SWFW_CSR_SM 0x08 +#define E1000_SWFW_PHY2_SM 0x20 +#define E1000_SWFW_PHY3_SM 0x40 +#define E1000_SWFW_SW_MNG_SM 0x400 /* FACTPS Definitions */ +#define E1000_FACTPS_LFS 0x40000000 /* LAN Function Select */ /* Device Control */ -#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ -#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ -#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ -#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ -#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ -#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ -#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ -#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ -#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ -#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ -#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ +#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ +#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ /* Defined polarity of Dock/Undock indication in SDP[0] */ +#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Reset both PHY ports, through PHYRST_N pin */ +#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* enable link status from external LINK_0 and LINK_1 pins */ -#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ -#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ -#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ -#define E1000_CTRL_RST 0x04000000 /* Global reset */ -#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ -#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ -#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ -#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ -/* Initiate an interrupt to manageability engine */ -#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ - -/* Bit definitions for the Management Data IO (MDIO) and Management Data +#define E1000_CTRL_EXT_LINK_EN 0x00010000 +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ +#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ +#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */ +#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ +#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to ME */ +#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ + +/* + * Bit definitions for the Management Data IO (MDIO) and Management Data * Clock (MDC) pins in the Device Control Register. */ - -#define E1000_CONNSW_ENRGSRC 0x4 -#define E1000_PCS_CFG_PCS_EN 8 -#define E1000_PCS_LCTL_FLV_LINK_UP 1 -#define E1000_PCS_LCTL_FSV_100 2 -#define E1000_PCS_LCTL_FSV_1000 4 -#define E1000_PCS_LCTL_FDV_FULL 8 -#define E1000_PCS_LCTL_FSD 0x10 -#define E1000_PCS_LCTL_FORCE_LINK 0x20 -#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 -#define E1000_PCS_LCTL_AN_ENABLE 0x10000 -#define E1000_PCS_LCTL_AN_RESTART 0x20000 -#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 -#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 - -#define E1000_PCS_LSTS_LINK_OK 1 -#define E1000_PCS_LSTS_SPEED_100 2 -#define E1000_PCS_LSTS_SPEED_1000 4 -#define E1000_PCS_LSTS_DUPLEX_FULL 8 -#define E1000_PCS_LSTS_SYNK_OK 0x10 +#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0 +#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0 +#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2 +#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2 +#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3 +#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3 +#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR +#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA + +#define E1000_CONNSW_ENRGSRC 0x4 +#define E1000_PCS_CFG_PCS_EN 8 +#define E1000_PCS_LCTL_FLV_LINK_UP 1 +#define E1000_PCS_LCTL_FSV_10 0 +#define E1000_PCS_LCTL_FSV_100 2 +#define E1000_PCS_LCTL_FSV_1000 4 +#define E1000_PCS_LCTL_FDV_FULL 8 +#define E1000_PCS_LCTL_FSD 0x10 +#define E1000_PCS_LCTL_FORCE_LINK 0x20 +#define E1000_PCS_LCTL_LOW_LINK_LATCH 0x40 +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 +#define E1000_PCS_LCTL_AN_ENABLE 0x10000 +#define E1000_PCS_LCTL_AN_RESTART 0x20000 +#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 +#define E1000_PCS_LCTL_AN_SGMII_BYPASS 0x80000 +#define E1000_PCS_LCTL_AN_SGMII_TRIGGER 0x100000 +#define E1000_PCS_LCTL_FAST_LINK_TIMER 0x1000000 +#define E1000_PCS_LCTL_LINK_OK_FIX 0x2000000 +#define E1000_PCS_LCTL_CRS_ON_NI 0x4000000 +#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 + +#define E1000_PCS_LSTS_LINK_OK 1 +#define E1000_PCS_LSTS_SPEED_10 0 +#define E1000_PCS_LSTS_SPEED_100 2 +#define E1000_PCS_LSTS_SPEED_1000 4 +#define E1000_PCS_LSTS_DUPLEX_FULL 8 +#define E1000_PCS_LSTS_SYNK_OK 0x10 +#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 +#define E1000_PCS_LSTS_AN_PAGE_RX 0x20000 +#define E1000_PCS_LSTS_AN_TIMED_OUT 0x40000 +#define E1000_PCS_LSTS_AN_REMOTE_FAULT 0x80000 +#define E1000_PCS_LSTS_AN_ERROR_RWS 0x100000 /* Device Status */ -#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ -#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ -#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ -#define E1000_STATUS_FUNC_SHIFT 2 -#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ -#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ -#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ -#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ -/* Change in Dock/Undock state. Clear on write '0'. */ -/* Status of Master requests. */ -#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 +#define E1000_STATUS_FD 0x00000001 /* Duplex 0=half 1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Compltn by NVM */ +#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ +#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ +/* Change in Dock/Undock state clear on write '0'. */ +#define E1000_STATUS_DOCK_CI 0x00000800 +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */ +#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ +#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ +#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ +#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ +#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ +#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disbld */ +#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */ +#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */ +#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */ /* BMC external code execution disabled */ - -/* Constants used to intrepret the masked PCI-X bus speed. */ - -#define SPEED_10 10 -#define SPEED_100 100 -#define SPEED_1000 1000 -#define HALF_DUPLEX 1 -#define FULL_DUPLEX 2 - - -#define ADVERTISE_10_HALF 0x0001 -#define ADVERTISE_10_FULL 0x0002 -#define ADVERTISE_100_HALF 0x0004 -#define ADVERTISE_100_FULL 0x0008 -#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ -#define ADVERTISE_1000_FULL 0x0020 +#define E1000_STATUS_BMC_LITE 0x01000000 +#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */ +#define E1000_STATUS_FUSE_8 0x04000000 +#define E1000_STATUS_FUSE_9 0x08000000 +#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disbld on port 0 */ +#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disbld on port 1 */ + +/* Constants used to interpret the masked PCI-X bus speed. */ +#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus spd 50-66MHz */ +#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus spd 66-100MHz */ +#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus spd 100-133MHz*/ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +#define PHY_FORCE_TIME 20 + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 /* 1000/H is not supported, nor spec-compliant. */ -#define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ - ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ - ADVERTISE_1000_FULL) -#define E1000_ALL_NOT_GIG (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ - ADVERTISE_100_HALF | ADVERTISE_100_FULL) -#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) -#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) -#define E1000_ALL_FULL_DUPLEX (ADVERTISE_10_FULL | ADVERTISE_100_FULL | \ - ADVERTISE_1000_FULL) -#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) - -#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX +#define E1000_ALL_SPEED_DUPLEX ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL | ADVERTISE_1000_FULL) +#define E1000_ALL_NOT_GIG ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL) +#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +#define E1000_ALL_FULL_DUPLEX ( \ + ADVERTISE_10_FULL | ADVERTISE_100_FULL | ADVERTISE_1000_FULL) +#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX /* LED Control */ -#define E1000_LEDCTL_LED0_MODE_SHIFT 0 -#define E1000_LEDCTL_LED0_BLINK 0x00000080 - -#define E1000_LEDCTL_MODE_LED_ON 0xE -#define E1000_LEDCTL_MODE_LED_OFF 0xF +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK_RATE 0x00000020 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00 +#define E1000_LEDCTL_LED1_MODE_SHIFT 8 +#define E1000_LEDCTL_LED1_BLINK_RATE 0x00002000 +#define E1000_LEDCTL_LED1_IVRT 0x00004000 +#define E1000_LEDCTL_LED1_BLINK 0x00008000 +#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000 +#define E1000_LEDCTL_LED2_MODE_SHIFT 16 +#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000 +#define E1000_LEDCTL_LED2_IVRT 0x00400000 +#define E1000_LEDCTL_LED2_BLINK 0x00800000 +#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 +#define E1000_LEDCTL_LED3_MODE_SHIFT 24 +#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000 +#define E1000_LEDCTL_LED3_IVRT 0x40000000 +#define E1000_LEDCTL_LED3_BLINK 0x80000000 + +#define E1000_LEDCTL_MODE_LINK_10_1000 0x0 +#define E1000_LEDCTL_MODE_LINK_100_1000 0x1 +#define E1000_LEDCTL_MODE_LINK_UP 0x2 +#define E1000_LEDCTL_MODE_ACTIVITY 0x3 +#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4 +#define E1000_LEDCTL_MODE_LINK_10 0x5 +#define E1000_LEDCTL_MODE_LINK_100 0x6 +#define E1000_LEDCTL_MODE_LINK_1000 0x7 +#define E1000_LEDCTL_MODE_PCIX_MODE 0x8 +#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9 +#define E1000_LEDCTL_MODE_COLLISION 0xA +#define E1000_LEDCTL_MODE_BUS_SPEED 0xB +#define E1000_LEDCTL_MODE_BUS_SIZE 0xC +#define E1000_LEDCTL_MODE_PAUSED 0xD +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF /* Transmit Descriptor bit definitions */ -#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ -#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ -#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ -#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ -#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ -#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define E1000_TXD_POPTS_SHIFT 8 /* POPTS shift */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ /* Extended desc bits for Linksec and timesync */ /* Transmit Control */ -#define E1000_TCTL_EN 0x00000002 /* enable tx */ -#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ -#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ -#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ -#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ - -/* DMA Coalescing register fields */ -#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing - * Watchdog Timer */ -#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Receive - * Threshold */ -#define E1000_DMACR_DMACTHR_SHIFT 16 -#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe - * transactions */ -#define E1000_DMACR_DMAC_LX_SHIFT 28 -#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ - -#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit - * Threshold */ - -#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ - -#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Receive Traffic Rate - * Threshold */ -#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rcv packet rate in - * current window */ - -#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rcv Traffic - * Current Cnt */ - -#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rcv Threshold - * High val */ -#define E1000_FCRTC_RTH_COAL_SHIFT 4 -#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */ +#define E1000_TCTL_RST 0x00000001 /* software reset */ +#define E1000_TCTL_EN 0x00000002 /* enable Tx */ +#define E1000_TCTL_BCE 0x00000004 /* busy check enable */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */ +#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ + +/* Transmit Arbitration Count */ +#define E1000_TARC0_ENABLE 0x00000400 /* Enable Tx Queue 0 */ /* SerDes Control */ -#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 +#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 /* Receive Checksum Control */ -#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ -#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ -#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ -#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ +#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ +#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ /* Header split receive */ -#define E1000_RFCTL_LEF 0x00040000 +#define E1000_RFCTL_ISCSI_DIS 0x00000001 +#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define E1000_RFCTL_ISCSI_DWC_SHIFT 1 +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_NFS_VER_MASK 0x00000300 +#define E1000_RFCTL_NFS_VER_SHIFT 8 +#define E1000_RFCTL_IPV6_DIS 0x00000400 +#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_ACKD_DIS 0x00002000 +#define E1000_RFCTL_IPFRSP_DIS 0x00004000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 +#define E1000_RFCTL_LEF 0x00040000 /* Collision related configuration parameters */ -#define E1000_COLLISION_THRESHOLD 15 -#define E1000_CT_SHIFT 4 -#define E1000_COLLISION_DISTANCE 63 -#define E1000_COLD_SHIFT 12 +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLD_SHIFT 12 -/* Ethertype field values */ -#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ +/* Default values for the transmit IPG register */ +#define DEFAULT_82543_TIPG_IPGT_FIBER 9 +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF +#define E1000_TIPG_IPGR1_MASK 0x000FFC00 +#define E1000_TIPG_IPGR2_MASK 0x3FF00000 -#define MAX_JUMBO_FRAME_SIZE 0x3F00 +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82543_TIPG_IPGR2 6 +#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 +#define E1000_TIPG_IPGR2_SHIFT 20 + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ + +#define ETHERNET_FCS_SIZE 4 +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 +#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16 + +#define E1000_PHY_CTRL_SPD_EN 0x00000001 +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 /* PBA constants */ -#define E1000_PBA_34K 0x0022 -#define E1000_PBA_64K 0x0040 /* 64KB */ +#define E1000_PBA_6K 0x0006 /* 6KB */ +#define E1000_PBA_8K 0x0008 /* 8KB */ +#define E1000_PBA_10K 0x000A /* 10KB */ +#define E1000_PBA_12K 0x000C /* 12KB */ +#define E1000_PBA_14K 0x000E /* 14KB */ +#define E1000_PBA_16K 0x0010 /* 16KB */ +#define E1000_PBA_18K 0x0012 +#define E1000_PBA_20K 0x0014 +#define E1000_PBA_22K 0x0016 +#define E1000_PBA_24K 0x0018 +#define E1000_PBA_26K 0x001A +#define E1000_PBA_30K 0x001E +#define E1000_PBA_32K 0x0020 +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_35K 0x0023 +#define E1000_PBA_38K 0x0026 +#define E1000_PBA_40K 0x0028 +#define E1000_PBA_48K 0x0030 /* 48KB */ +#define E1000_PBA_64K 0x0040 /* 64KB */ + +#define E1000_PBA_RXA_MASK 0xFFFF; + +#define E1000_PBS_16K E1000_PBA_16K +#define E1000_PBS_24K E1000_PBA_24K + +#define IFS_MAX 80 +#define IFS_MIN 40 +#define IFS_RATIO 4 +#define IFS_STEP 10 +#define MIN_NUM_XMITS 1000 /* SW Semaphore Register */ -#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ -#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */ /* Interrupt Cause Read */ -#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ -#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ -#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ -#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ -#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ -#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ -#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* Rx overrun */ +#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ +#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ +#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ +#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */ +#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ +#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ +#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ +#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ +#define E1000_ICR_TXD_LOW 0x00008000 +#define E1000_ICR_SRPD 0x00010000 +#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ +#define E1000_ICR_MNG 0x00040000 /* Manageability event */ +#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ +#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ /* If this bit asserted, the driver should claim the interrupt */ -#define E1000_ICR_INT_ASSERTED 0x80000000 +#define E1000_ICR_INT_ASSERTED 0x80000000 +#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* Q0 Rx desc FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* Q0 Tx desc FIFO parity error */ +#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity err */ +#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ +#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* Q1 Rx desc FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* Q1 Tx desc FIFO parity error */ +#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ +/* FW changed the status of DISSW bit in the FWSM */ +#define E1000_ICR_DSW 0x00000020 /* LAN connected device generates an interrupt */ -#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ +#define E1000_ICR_PHYINT 0x00001000 +#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ +#define E1000_ICR_EPRST 0x00100000 /* ME hardware reset occurs */ +#define E1000_ICR_FER 0x00400000 /* Fatal Error */ + +#define E1000_ICR_THS 0x00800000 /* ICR.THS: Thermal Sensor Event*/ +#define E1000_ICR_MDDET 0x10000000 /* Malicious Driver Detect */ + +#define E1000_ITR_MASK 0x000FFFFF /* ITR value bitfield */ +#define E1000_ITR_MULT 256 /* ITR mulitplier in nsec */ + /* Extended Interrupt Cause Read */ -#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ -#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ -#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ -#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ -#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ -#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ -#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ -#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ -#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ +#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ +#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ +#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ +#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ +#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ +#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ +#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ +#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ +#define E1000_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ +#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ /* TCP Timer */ +#define E1000_TCPTIMER_KS 0x00000100 /* KickStart */ +#define E1000_TCPTIMER_COUNT_ENABLE 0x00000200 /* Count Enable */ +#define E1000_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */ +#define E1000_TCPTIMER_LOOP 0x00000800 /* Loop */ + +/* + * This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + */ +#define POLL_IMS_ENABLE_MASK ( \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ) /* * This defines the bits that are set in the Interrupt Mask @@ -384,47 +799,137 @@ * o LSC = Link Status Change */ #define IMS_ENABLE_MASK ( \ - E1000_IMS_RXT0 | \ - E1000_IMS_TXDW | \ - E1000_IMS_RXDMT0 | \ - E1000_IMS_RXSEQ | \ - E1000_IMS_LSC | \ - E1000_IMS_DOUTSYNC) + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) /* Interrupt Mask Set */ -#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ -#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ -#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ -#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ -#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ -#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ -#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ -#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ - +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Tx desc written back */ +#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* Rx overrun */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ +#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* Rx /c/ ordered set */ +#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMS_SRPD E1000_ICR_SRPD +#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ +/* Q0 Rx desc FIFO parity error */ +#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 +/* Q0 Tx desc FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 +/* host arb read buffer parity error */ +#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR +/* packet buffer parity error */ +#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR +/* Q1 Rx desc FIFO parity error */ +#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 +/* Q1 Tx desc FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 +#define E1000_IMS_DSW E1000_ICR_DSW +#define E1000_IMS_PHYINT E1000_ICR_PHYINT +#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ +#define E1000_IMS_EPRST E1000_ICR_EPRST +#define E1000_IMS_FER E1000_ICR_FER /* Fatal Error */ + +#define E1000_IMS_THS E1000_ICR_THS /* ICR.TS: Thermal Sensor Event*/ +#define E1000_IMS_MDDET E1000_ICR_MDDET /* Malicious Driver Detect */ /* Extended Interrupt Mask Set */ -#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ +#define E1000_EIMS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ +#define E1000_EIMS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ +#define E1000_EIMS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ +#define E1000_EIMS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ +#define E1000_EIMS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ +#define E1000_EIMS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ +#define E1000_EIMS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ +#define E1000_EIMS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ +#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ +#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ /* Interrupt Cause Set */ -#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ -#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ -#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */ +#define E1000_ICS_TXDW E1000_ICR_TXDW /* Tx desc written back */ +#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ +#define E1000_ICS_RXO E1000_ICR_RXO /* Rx overrun */ +#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ +#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* Rx /c/ ordered set */ +#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_ICS_SRPD E1000_ICR_SRPD +#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */ +/* Q0 Rx desc FIFO parity error */ +#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 +/* Q0 Tx desc FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 +/* host arb read buffer parity error */ +#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR +/* packet buffer parity error */ +#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR +/* Q1 Rx desc FIFO parity error */ +#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 +/* Q1 Tx desc FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 +#define E1000_ICS_DSW E1000_ICR_DSW +#define E1000_ICS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ +#define E1000_ICS_PHYINT E1000_ICR_PHYINT +#define E1000_ICS_EPRST E1000_ICR_EPRST /* Extended Interrupt Cause Set */ +#define E1000_EICS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ +#define E1000_EICS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ +#define E1000_EICS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ +#define E1000_EICS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ +#define E1000_EICS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ +#define E1000_EICS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ +#define E1000_EICS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ +#define E1000_EICS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ +#define E1000_EICS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ +#define E1000_EICS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + +#define E1000_EITR_ITR_INT_MASK 0x0000FFFF /* E1000_EITR_CNT_IGNR is only for 82576 and newer */ -#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ - +#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ /* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ /* Enable the counting of descriptors still to be processed. */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Flow Control Constants */ -#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 -#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 -#define FLOW_CONTROL_TYPE 0x8808 +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 /* 802.1q VLAN Packet Size */ -#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ -#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ /* Receive Address */ /* @@ -434,296 +939,539 @@ * (RAR[15]) for our directed address used by controllers with * manageability enabled, allowing us room for 15 multicast addresses. */ -#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ -#define E1000_RAL_MAC_ADDR_LEN 4 -#define E1000_RAH_MAC_ADDR_LEN 2 -#define E1000_RAH_POOL_MASK 0x03FC0000 -#define E1000_RAH_POOL_1 0x00040000 +#define E1000_RAR_ENTRIES 15 +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ +#define E1000_RAL_MAC_ADDR_LEN 4 +#define E1000_RAH_MAC_ADDR_LEN 2 +#define E1000_RAH_QUEUE_MASK_82575 0x000C0000 +#define E1000_RAH_POOL_MASK 0x03FC0000 +#define E1000_RAH_POOL_SHIFT 18 +#define E1000_RAH_POOL_1 0x00040000 /* Error Codes */ -#define E1000_SUCCESS 0 -#define E1000_ERR_NVM 1 -#define E1000_ERR_PHY 2 -#define E1000_ERR_CONFIG 3 -#define E1000_ERR_PARAM 4 -#define E1000_ERR_MAC_INIT 5 -#define E1000_ERR_RESET 9 -#define E1000_ERR_MASTER_REQUESTS_PENDING 10 -#define E1000_BLK_PHY_RESET 12 -#define E1000_ERR_SWFW_SYNC 13 -#define E1000_NOT_IMPLEMENTED 14 -#define E1000_ERR_MBX 15 -#define E1000_ERR_INVALID_ARGUMENT 16 -#define E1000_ERR_NO_SPACE 17 -#define E1000_ERR_NVM_PBA_SECTION 18 +#define E1000_SUCCESS 0 +#define E1000_ERR_NVM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 +#define E1000_ERR_SWFW_SYNC 13 +#define E1000_NOT_IMPLEMENTED 14 +#define E1000_ERR_MBX 15 +#define E1000_ERR_INVALID_ARGUMENT 16 +#define E1000_ERR_NO_SPACE 17 +#define E1000_ERR_NVM_PBA_SECTION 18 +#define E1000_ERR_I2C 19 +#define E1000_ERR_INVM_VALUE_NOT_FOUND 20 /* Loop limit on how long we wait for auto-negotiation to complete */ -#define COPPER_LINK_UP_LIMIT 10 -#define PHY_AUTO_NEG_LIMIT 45 -#define PHY_FORCE_LIMIT 20 +#define FIBER_LINK_UP_LIMIT 50 +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 +#define PHY_FORCE_LIMIT 20 /* Number of 100 microseconds we wait for PCI Express master disable */ -#define MASTER_DISABLE_TIMEOUT 800 +#define MASTER_DISABLE_TIMEOUT 800 /* Number of milliseconds we wait for PHY configuration done after MAC reset */ -#define PHY_CFG_TIMEOUT 100 +#define PHY_CFG_TIMEOUT 100 /* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ +#define MDIO_OWNERSHIP_TIMEOUT 10 /* Number of milliseconds for NVM auto read done after MAC reset. */ -#define AUTO_READ_DONE_TIMEOUT 10 +#define AUTO_READ_DONE_TIMEOUT 10 /* Flow Control */ -#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ - -#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */ -#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */ - -#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */ -#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */ -#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 -#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 -#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 -#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 -#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A -#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */ - -#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF -#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 -#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 -#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 -#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 -#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 - -#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 -#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 -#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 -#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 -#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 -#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 -#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 -#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 -#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 -#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 -#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 - -#define E1000_TIMINCA_16NS_SHIFT 24 - -#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ -#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ -#define E1000_MDICNFG_PHY_MASK 0x03E00000 -#define E1000_MDICNFG_PHY_SHIFT 21 +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */ +#define E1000_TXCW_NP 0x00008000 /* TXCW next page */ +#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */ +#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_CC 0x10000000 /* Receive config change */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ +#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */ + +#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ +#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ + +#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ +#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 +#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ + +#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 +#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 +#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 + +#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 +#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 +#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 +#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 +#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 +#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 + +#define E1000_TIMINCA_16NS_SHIFT 24 +/* TUPLE Filtering Configuration */ +#define E1000_TTQF_DISABLE_MASK 0xF0008000 /* TTQF Disable Mask */ +#define E1000_TTQF_QUEUE_ENABLE 0x100 /* TTQF Queue Enable Bit */ +#define E1000_TTQF_PROTOCOL_MASK 0xFF /* TTQF Protocol Mask */ +/* TTQF TCP Bit, shift with E1000_TTQF_PROTOCOL SHIFT */ +#define E1000_TTQF_PROTOCOL_TCP 0x0 +/* TTQF UDP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */ +#define E1000_TTQF_PROTOCOL_UDP 0x1 +/* TTQF SCTP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */ +#define E1000_TTQF_PROTOCOL_SCTP 0x2 +#define E1000_TTQF_PROTOCOL_SHIFT 5 /* TTQF Protocol Shift */ +#define E1000_TTQF_QUEUE_SHIFT 16 /* TTQF Queue Shfit */ +#define E1000_TTQF_RX_QUEUE_MASK 0x70000 /* TTQF Queue Mask */ +#define E1000_TTQF_MASK_ENABLE 0x10000000 /* TTQF Mask Enable Bit */ +#define E1000_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */ +#define E1000_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */ +#define E1000_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */ +#define E1000_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */ + +#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ +#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ +#define E1000_MDICNFG_PHY_MASK 0x03E00000 +#define E1000_MDICNFG_PHY_SHIFT 21 + +#define E1000_THSTAT_LOW_EVENT 0x20000000 /* Low thermal threshold */ +#define E1000_THSTAT_MID_EVENT 0x00200000 /* Mid thermal threshold */ +#define E1000_THSTAT_HIGH_EVENT 0x00002000 /* High thermal threshold */ +#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ +#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Spd Throttle Event */ + +/* I350 EEE defines */ +#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */ +#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */ +#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */ +#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */ +#define E1000_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */ +/* EEE status */ +#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ +#define E1000_EEER_RX_LPI_STATUS 0x40000000 /* Rx in LPI state */ +#define E1000_EEER_TX_LPI_STATUS 0x80000000 /* Tx in LPI state */ /* PCI Express Control */ -#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 -#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 -#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 -#define E1000_GCR_CAP_VER2 0x00040000 - -/* mPHY Address Control and Data Registers */ -#define E1000_MPHY_ADDR_CTL 0x0024 /* mPHY Address Control Register */ -#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000 -#define E1000_MPHY_DATA 0x0E10 /* mPHY Data Register */ - -/* mPHY PCS CLK Register */ -#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 /* mPHY PCS CLK AFE CSR Offset */ -/* mPHY Near End Digital Loopback Override Bit */ -#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 +#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 +#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define E1000_GCR_CAP_VER2 0x00040000 + +#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +/* mPHY address control and data registers */ +#define E1000_MPHY_ADDR_CTL 0x0024 /* Address Control Reg */ +#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000 +#define E1000_MPHY_DATA 0x0E10 /* Data Register */ + +/* AFE CSR Offset for PCS CLK */ +#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 +/* Override for near end digital loopback. */ +#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 /* PHY Control Register */ -#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ -#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ -#define MII_CR_POWER_DOWN 0x0800 /* Power down */ -#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ -#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ -#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ -#define MII_CR_SPEED_1000 0x0040 -#define MII_CR_SPEED_100 0x2000 -#define MII_CR_SPEED_10 0x0000 +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 /* PHY Status Register */ -#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ -#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ /* Autoneg Advertisement Register */ -#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ -#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ -#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ -#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ -#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ -#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ /* Link Partner Ability Register (Base Page) */ -#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ -#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ +#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ +#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP 10T Half Dplx Capable */ +#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP 10T Full Dplx Capable */ +#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP 100TX Half Dplx Capable */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */ +#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asym Pause Direction bit */ +#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP detected Remote Fault */ +#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP rx'd link code word */ +#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ /* Autoneg Expansion Register */ +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ +#define NWAY_ER_PAGE_RXD 0x0002 /* LP 10T Half Dplx Capable */ +#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP 10T Full Dplx Capable */ +#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP 100TX Half Dplx Capable */ +#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP 100TX Full Dplx Capable */ /* 1000BASE-T Control Register */ -#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ -#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ -#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ - /* 0=Configure PHY as Slave */ -#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ - /* 0=Automatic Master/Slave config */ +#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +/* 1=Repeater/switch device port 0=DTE device */ +#define CR_1000T_REPEATER_DTE 0x0400 +/* 1=Configure PHY as Master 0=Configure PHY as Slave */ +#define CR_1000T_MS_VALUE 0x0800 +/* 1=Master/Slave manual config value 0=Automatic Master/Slave config */ +#define CR_1000T_MS_ENABLE 0x1000 +#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ /* 1000BASE-T Status Register */ -#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ -#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle err since last rd */ +#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asym pause direction bit */ +#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local Tx Master, 0=Slave */ +#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 /* PHY 1000 MII Register/Bit Definitions */ /* PHY Registers defined by IEEE */ -#define PHY_CONTROL 0x00 /* Control Register */ -#define PHY_STATUS 0x01 /* Status Register */ -#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ -#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ -#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ -#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ -#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ -#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */ +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */ /* NVM Control */ -#define E1000_EECD_SK 0x00000001 /* NVM Clock */ -#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ -#define E1000_EECD_DI 0x00000004 /* NVM Data In */ -#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ -#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ -#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ -#define E1000_EECD_PRES 0x00000100 /* NVM Present */ +#define E1000_EECD_SK 0x00000001 /* NVM Clock */ +#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* NVM Data In */ +#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ +#define E1000_EECD_FWE_MASK 0x00000030 +#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define E1000_EECD_FWE_SHIFT 4 +#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* NVM Present */ +#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ +#define E1000_EECD_BLOCKED 0x00008000 /* Bit banging access blocked flag */ +#define E1000_EECD_ABORT 0x00010000 /* NVM operation aborted flag */ +#define E1000_EECD_TIMEOUT 0x00020000 /* NVM read operation timeout flag */ +#define E1000_EECD_ERROR_CLR 0x00040000 /* NVM error status clear bit */ /* NVM Addressing bits based on type 0=small, 1=large */ -#define E1000_EECD_ADDR_BITS 0x00000400 -#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ -#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ -#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ -#define E1000_EECD_SIZE_EX_SHIFT 11 - -/* Offset to data in NVM read/write registers */ -#define E1000_NVM_RW_REG_DATA 16 -#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ -#define E1000_NVM_RW_REG_START 1 /* Start operation */ -#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ -#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define E1000_EECD_ADDR_BITS 0x00000400 +#define E1000_EECD_TYPE 0x00002000 /* NVM Type (1-SPI, 0-Microwire) */ +#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ +#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ +#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Ena Auto FLASH update */ +#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SECVAL_SHIFT 22 +#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES) + +#define E1000_NVM_SWDPIN0 0x0001 /* SWDPIN 0 NVM Value */ +#define E1000_NVM_LED_LOGIC 0x0020 /* Led Logic Word */ +#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */ +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_NVM_RW_REG_START 1 /* Start operation */ +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define E1000_FLASH_UPDATES 2000 /* NVM Word Offsets */ -#define NVM_COMPAT 0x0003 -#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */ -#define NVM_INIT_CONTROL2_REG 0x000F -#define NVM_INIT_CONTROL3_PORT_B 0x0014 -#define NVM_INIT_CONTROL3_PORT_A 0x0024 -#define NVM_ALT_MAC_ADDR_PTR 0x0037 -#define NVM_CHECKSUM_REG 0x003F -#define NVM_COMPATIBILITY_REG_3 0x0003 -#define NVM_COMPATIBILITY_BIT_MASK 0x8000 - -#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ -#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ -#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ -#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ - -#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0) +#define NVM_COMPAT 0x0003 +#define NVM_ID_LED_SETTINGS 0x0004 +#define NVM_VERSION 0x0005 +#define NVM_SERDES_AMPLITUDE 0x0006 /* SERDES output amplitude */ +#define NVM_PHY_CLASS_WORD 0x0007 + +#define NVM_ETS_CFG 0x003E +#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0 +#define NVM_ETS_LTHRES_DELTA_SHIFT 6 +#define NVM_ETS_TYPE_MASK 0x0038 +#define NVM_ETS_TYPE_SHIFT 3 +#define NVM_ETS_TYPE_EMC 0x000 +#define NVM_ETS_NUM_SENSORS_MASK 0x0007 +#define NVM_ETS_DATA_LOC_MASK 0x3C00 +#define NVM_ETS_DATA_LOC_SHIFT 10 +#define NVM_ETS_DATA_INDEX_MASK 0x0300 +#define NVM_ETS_DATA_INDEX_SHIFT 8 +#define NVM_ETS_DATA_HTHRESH_MASK 0x00FF +#define NVM_INIT_CONTROL1_REG 0x000A +#define NVM_INIT_CONTROL2_REG 0x000F +#define NVM_SWDEF_PINS_CTRL_PORT_1 0x0010 +#define NVM_INIT_CONTROL3_PORT_B 0x0014 +#define NVM_INIT_3GIO_3 0x001A +#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020 +#define NVM_INIT_CONTROL3_PORT_A 0x0024 +#define NVM_CFG 0x0012 +#define NVM_FLASH_VERSION 0x0032 +#define NVM_ALT_MAC_ADDR_PTR 0x0037 +#define NVM_CHECKSUM_REG 0x003F +#define NVM_COMPATIBILITY_REG_3 0x0003 +#define NVM_COMPATIBILITY_BIT_MASK 0x8000 + +#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ +#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ +#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ +#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ + +#define NVM_82580_LAN_FUNC_OFFSET(a) ((a) ? (0x40 + (0x40 * (a))) : 0) /* Mask bits for fields in Word 0x24 of the NVM */ -#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ -#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed external */ +#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ +#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed extrnl */ +/* Offset of Link Mode bits for 82575/82576 */ +#define NVM_WORD24_LNK_MODE_OFFSET 8 +/* Offset of Link Mode bits for 82580 up */ +#define NVM_WORD24_82580_LNK_MODE_OFFSET 4 + /* Mask bits for fields in Word 0x0f of the NVM */ -#define NVM_WORD0F_PAUSE_MASK 0x3000 -#define NVM_WORD0F_ASM_DIR 0x2000 +#define NVM_WORD0F_PAUSE_MASK 0x3000 +#define NVM_WORD0F_PAUSE 0x1000 +#define NVM_WORD0F_ASM_DIR 0x2000 +#define NVM_WORD0F_ANE 0x0800 +#define NVM_WORD0F_SWPDIO_EXT_MASK 0x00F0 +#define NVM_WORD0F_LPLU 0x0001 /* Mask bits for fields in Word 0x1a of the NVM */ +#define NVM_WORD1A_ASPM_MASK 0x000C -/* length of string needed to store part num */ -#define E1000_PBANUM_LENGTH 11 - -/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ -#define NVM_SUM 0xBABA +/* Mask bits for fields in Word 0x03 of the EEPROM */ +#define NVM_COMPAT_LOM 0x0800 -#define NVM_PBA_OFFSET_0 8 -#define NVM_PBA_OFFSET_1 9 -#define NVM_PBA_PTR_GUARD 0xFAFA -#define NVM_WORD_SIZE_BASE_SHIFT 6 +/* length of string needed to store PBA number */ +#define E1000_PBANUM_LENGTH 11 -/* NVM Commands - Microwire */ +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA + +#define NVM_MAC_ADDR_OFFSET 0 +#define NVM_PBA_OFFSET_0 8 +#define NVM_PBA_OFFSET_1 9 +#define NVM_PBA_PTR_GUARD 0xFAFA +#define NVM_RESERVED_WORD 0xFFFF +#define NVM_PHY_CLASS_A 0x8000 +#define NVM_SERDES_AMPLITUDE_MASK 0x000F +#define NVM_SIZE_MASK 0x1C00 +#define NVM_SIZE_SHIFT 10 +#define NVM_WORD_SIZE_BASE_SHIFT 6 +#define NVM_SWDPIO_EXT_SHIFT 4 /* NVM Commands - SPI */ -#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ -#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ -#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ -#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ -#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ -#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ +#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +#define NVM_WRDI_OPCODE_SPI 0x04 /* NVM reset Write Enable latch */ +#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ +#define NVM_WRSR_OPCODE_SPI 0x01 /* NVM write Status register */ /* SPI NVM Status Register */ -#define NVM_STATUS_RDY_SPI 0x01 +#define NVM_STATUS_RDY_SPI 0x01 +#define NVM_STATUS_WEN_SPI 0x02 +#define NVM_STATUS_BP0_SPI 0x04 +#define NVM_STATUS_BP1_SPI 0x08 +#define NVM_STATUS_WPEN_SPI 0x80 /* Word definitions for ID LED Settings */ -#define ID_LED_RESERVED_0000 0x0000 -#define ID_LED_RESERVED_FFFF 0xFFFF -#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ - (ID_LED_OFF1_OFF2 << 8) | \ - (ID_LED_DEF1_DEF2 << 4) | \ - (ID_LED_DEF1_DEF2)) -#define ID_LED_DEF1_DEF2 0x1 -#define ID_LED_DEF1_ON2 0x2 -#define ID_LED_DEF1_OFF2 0x3 -#define ID_LED_ON1_DEF2 0x4 -#define ID_LED_ON1_ON2 0x5 -#define ID_LED_ON1_OFF2 0x6 -#define ID_LED_OFF1_DEF2 0x7 -#define ID_LED_OFF1_ON2 0x8 -#define ID_LED_OFF1_OFF2 0x9 - -#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF -#define IGP_ACTIVITY_LED_ENABLE 0x0300 -#define IGP_LED3_MODE 0x07000000 +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 /* PCI/PCI-X/PCI-EX Config space */ -#define PCIE_DEVICE_CONTROL2 0x28 -#define PCIE_DEVICE_CONTROL2_16ms 0x0005 +#define PCI_HEADER_TYPE_REGISTER 0x0E +#define PCIE_LINK_STATUS 0x12 +#define PCIE_DEVICE_CONTROL2 0x28 + +#define PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define PCIE_LINK_WIDTH_MASK 0x3F0 +#define PCIE_LINK_WIDTH_SHIFT 4 +#define PCIE_LINK_SPEED_MASK 0x0F +#define PCIE_LINK_SPEED_2500 0x01 +#define PCIE_LINK_SPEED_5000 0x02 +#define PCIE_DEVICE_CONTROL2_16ms 0x0005 + +#ifndef ETH_ADDR_LEN +#define ETH_ADDR_LEN 6 +#endif -#define PHY_REVISION_MASK 0xFFFFFFF0 -#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ -#define MAX_PHY_MULTI_PAGE_REG 0xF +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF /* Bit definitions for valid PHY IDs. */ /* * I = Integrated * E = External */ -#define M88E1111_I_PHY_ID 0x01410CC0 -#define M88E1112_E_PHY_ID 0x01410C90 -#define I347AT4_E_PHY_ID 0x01410DC0 -#define IGP03E1000_E_PHY_ID 0x02A80390 -#define I82580_I_PHY_ID 0x015403A0 -#define I350_I_PHY_ID 0x015403B0 -#define M88_VENDOR 0x0141 +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1011_I_REV_4 0x04 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1112_E_PHY_ID 0x01410C90 +#define I347AT4_E_PHY_ID 0x01410DC0 +#define M88E1340M_E_PHY_ID 0x01410DF0 +#define GG82563_E_PHY_ID 0x01410CA0 +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 +#define I82580_I_PHY_ID 0x015403A0 +#define I350_I_PHY_ID 0x015403B0 +#define IGP04E1000_E_PHY_ID 0x02A80391 +#define M88_VENDOR 0x0141 /* M88E1000 Specific Registers */ -#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ -#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ -#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ - -#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ -#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Reg */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Reg */ +#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Reg */ +#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Reg */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Cntrl */ +#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */ +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for pg number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* meaning depends on reg 29 */ +#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ +#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ /* M88E1000 PHY Specific Control Register */ -#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */ +#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ /* 1=CLK125 low, 0=CLK125 toggling */ -#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ - /* Manual MDI configuration */ -#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +#define M88E1000_PSCR_CLK125_DISABLE 0x0010 +/* MDI Crossover Mode bits 6:5 Manual MDI configuration */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ /* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ -#define M88E1000_PSCR_AUTO_X_1000T 0x0040 +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* Auto crossover enabled all speeds */ -#define M88E1000_PSCR_AUTO_X_MODE 0x0060 +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold * 0=Normal 10BASE-T Rx Threshold */ +#define M88E1000_PSCR_EN_10BT_EXT_DIST 0x0080 /* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ -#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ +#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100 +#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ +#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Tx */ /* M88E1000 PHY Specific Status Register */ -#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ -#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ -#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ /* * 0 = <50M * 1 = 50-80M @@ -731,102 +1479,191 @@ * 3 = 110-140M * 4 = >140M */ -#define M88E1000_PSSR_CABLE_LENGTH 0x0380 -#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ -#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ - -#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 +#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ +#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */ +#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 /* M88E1000 Extended PHY Specific Control Register */ +#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */ /* * 1 = Lost lock detect enabled. * Will assert lost lock and bring * link down if idle not seen * within 1ms in 1000BASE-T */ +#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 /* * Number of times we will attempt to autonegotiate before downshifting if we * are the master */ -#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 -#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00 /* * Number of times we will attempt to autonegotiate before downshifting if we * are the slave */ -#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 -#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 -#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300 +#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ + +/* M88E1111 Specific Registers */ +#define M88E1111_PHY_PAGE_SELECT1 0x16 /* for registers 0-28 */ +#define M88E1111_PHY_PAGE_SELECT2 0x1D /* for registers 30-31 */ -/* Intel i347-AT4 Registers */ +/* M88E1111 page select register mask */ +#define M88E1111_PHY_PAGE_SELECT_MASK1 0xFF +#define M88E1111_PHY_PAGE_SELECT_MASK2 0x3F -#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */ -#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ -#define I347AT4_PAGE_SELECT 0x16 +/* Intel I347AT4 Registers */ -/* i347-AT4 Extended PHY Specific Control Register */ +#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */ +#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ +#define I347AT4_PAGE_SELECT 0x16 + +/* I347AT4 Extended PHY Specific Control Register */ /* - * Number of times we will attempt to autonegotiate before downshifting if we - * are the master + * Number of times we will attempt to autonegotiate before downshifting if we + * are the master */ -#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 -#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000 -#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000 -#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000 -#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000 -#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000 -#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000 -#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000 -#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000 -#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000 - -/* i347-AT4 PHY Cable Diagnostics Control */ -#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */ - -/* Marvell 1112 only registers */ -#define M88E1112_VCT_DSP_DISTANCE 0x001A +#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 +#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000 +#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000 +#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000 +#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000 +#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000 +#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000 +#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000 +#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000 +#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000 + +/* I347AT4 PHY Cable Diagnostics Control */ +#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */ + +/* M88E1112 only registers */ +#define M88E1112_VCT_DSP_DISTANCE 0x001A /* M88EC018 Rev 2 specific DownShift settings */ -#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 -#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00 + +/* + * Bits... + * 15-5: page + * 4-0: register offset + */ +#define GG82563_PAGE_SHIFT 5 +#define GG82563_REG(page, reg) \ + (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) +#define GG82563_MIN_ALT_REG 30 + +/* GG82563 Specific Registers */ +#define GG82563_PHY_SPEC_CTRL GG82563_REG(0, 16) /* PHY Spec Cntrl */ +#define GG82563_PHY_SPEC_STATUS GG82563_REG(0, 17) /* PHY Spec Status */ +#define GG82563_PHY_INT_ENABLE GG82563_REG(0, 18) /* Interrupt Ena */ +#define GG82563_PHY_SPEC_STATUS_2 GG82563_REG(0, 19) /* PHY Spec Stat2 */ +#define GG82563_PHY_RX_ERR_CNTR GG82563_REG(0, 21) /* Rx Err Counter */ +#define GG82563_PHY_PAGE_SELECT GG82563_REG(0, 22) /* Page Select */ +#define GG82563_PHY_SPEC_CTRL_2 GG82563_REG(0, 26) /* PHY Spec Cntrl2 */ +#define GG82563_PHY_PAGE_SELECT_ALT GG82563_REG(0, 29) /* Alt Page Select */ +/* Test Clock Control (use reg. 29 to select) */ +#define GG82563_PHY_TEST_CLK_CTRL GG82563_REG(0, 30) + +/* MAC Specific Control Register */ +#define GG82563_PHY_MAC_SPEC_CTRL GG82563_REG(2, 21) +#define GG82563_PHY_MAC_SPEC_CTRL_2 GG82563_REG(2, 26) /* MAC Spec Ctrl 2 */ + +#define GG82563_PHY_DSP_DISTANCE GG82563_REG(5, 26) /* DSP Distance */ + +/* Page 193 - Port Control Registers */ +/* Kumeran Mode Control */ +#define GG82563_PHY_KMRN_MODE_CTRL GG82563_REG(193, 16) +#define GG82563_PHY_PORT_RESET GG82563_REG(193, 17) /* Port Reset */ +#define GG82563_PHY_REVISION_ID GG82563_REG(193, 18) /* Revision ID */ +#define GG82563_PHY_DEVICE_ID GG82563_REG(193, 19) /* Device ID */ +#define GG82563_PHY_PWR_MGMT_CTRL GG82563_REG(193, 20) /* Pwr Mgt Ctrl */ +/* Rate Adaptation Control */ +#define GG82563_PHY_RATE_ADAPT_CTRL GG82563_REG(193, 25) + +/* Page 194 - KMRN Registers */ +/* FIFO's Control/Status */ +#define GG82563_PHY_KMRN_FIFO_CTRL_STAT GG82563_REG(194, 16) +#define GG82563_PHY_KMRN_CTRL GG82563_REG(194, 17) /* Control */ +#define GG82563_PHY_INBAND_CTRL GG82563_REG(194, 18) /* Inband Ctrl */ +#define GG82563_PHY_KMRN_DIAGNOSTIC GG82563_REG(194, 19) /* Diagnostic */ +#define GG82563_PHY_ACK_TIMEOUTS GG82563_REG(194, 20) /* Ack Timeouts */ +#define GG82563_PHY_ADV_ABILITY GG82563_REG(194, 21) /* Adver Ability */ +/* Link Partner Advertised Ability */ +#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY GG82563_REG(194, 23) +#define GG82563_PHY_ADV_NEXT_PAGE GG82563_REG(194, 24) /* Adver Next Pg */ +/* Link Partner Advertised Next page */ +#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE GG82563_REG(194, 25) +#define GG82563_PHY_KMRN_MISC GG82563_REG(194, 26) /* Misc. */ /* MDI Control */ -#define E1000_MDIC_DATA_MASK 0x0000FFFF -#define E1000_MDIC_REG_MASK 0x001F0000 -#define E1000_MDIC_REG_SHIFT 16 -#define E1000_MDIC_PHY_MASK 0x03E00000 -#define E1000_MDIC_PHY_SHIFT 21 -#define E1000_MDIC_OP_WRITE 0x04000000 -#define E1000_MDIC_OP_READ 0x08000000 -#define E1000_MDIC_READY 0x10000000 -#define E1000_MDIC_INT_EN 0x20000000 -#define E1000_MDIC_ERROR 0x40000000 -#define E1000_MDIC_DEST 0x80000000 - -/* Thermal Sensor */ -#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ -#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Speed Throttle Event */ - -/* Energy Efficient Ethernet */ -#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* EEE Enable 1G AN */ -#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */ -#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */ -#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */ -#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 +#define E1000_MDIC_DEST 0x80000000 /* SerDes Control */ -#define E1000_GEN_CTL_READY 0x80000000 -#define E1000_GEN_CTL_ADDRESS_SHIFT 8 -#define E1000_GEN_POLL_TIMEOUT 640 - -#define E1000_VFTA_ENTRY_SHIFT 5 -#define E1000_VFTA_ENTRY_MASK 0x7F -#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F - -/* DMA Coalescing register fields */ -#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based - on DMA coal */ +#define E1000_GEN_CTL_READY 0x80000000 +#define E1000_GEN_CTL_ADDRESS_SHIFT 8 +#define E1000_GEN_POLL_TIMEOUT 640 + +/* LinkSec register fields */ +#define E1000_LSECTXCAP_SUM_MASK 0x00FF0000 +#define E1000_LSECTXCAP_SUM_SHIFT 16 +#define E1000_LSECRXCAP_SUM_MASK 0x00FF0000 +#define E1000_LSECRXCAP_SUM_SHIFT 16 + +#define E1000_LSECTXCTRL_EN_MASK 0x00000003 +#define E1000_LSECTXCTRL_DISABLE 0x0 +#define E1000_LSECTXCTRL_AUTH 0x1 +#define E1000_LSECTXCTRL_AUTH_ENCRYPT 0x2 +#define E1000_LSECTXCTRL_AISCI 0x00000020 +#define E1000_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 +#define E1000_LSECTXCTRL_RSV_MASK 0x000000D8 + +#define E1000_LSECRXCTRL_EN_MASK 0x0000000C +#define E1000_LSECRXCTRL_EN_SHIFT 2 +#define E1000_LSECRXCTRL_DISABLE 0x0 +#define E1000_LSECRXCTRL_CHECK 0x1 +#define E1000_LSECRXCTRL_STRICT 0x2 +#define E1000_LSECRXCTRL_DROP 0x3 +#define E1000_LSECRXCTRL_PLSH 0x00000040 +#define E1000_LSECRXCTRL_RP 0x00000080 +#define E1000_LSECRXCTRL_RSV_MASK 0xFFFFFF33 /* Tx Rate-Scheduler Config fields */ #define E1000_RTTBCNRC_RS_ENA 0x80000000 @@ -835,4 +1672,58 @@ #define E1000_RTTBCNRC_RF_INT_MASK \ (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT) -#endif +/* DMA Coalescing register fields */ +/* DMA Coalescing Watchdog Timer */ +#define E1000_DMACR_DMACWT_MASK 0x00003FFF +/* DMA Coalescing Rx Threshold */ +#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 +#define E1000_DMACR_DMACTHR_SHIFT 16 +/* Lx when no PCIe transactions */ +#define E1000_DMACR_DMAC_LX_MASK 0x30000000 +#define E1000_DMACR_DMAC_LX_SHIFT 28 +#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ +/* DMA Coalescing BMC-to-OS Watchdog Enable */ +#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000 + +/* DMA Coalescing Transmit Threshold */ +#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF + +#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ + +/* Rx Traffic Rate Threshold */ +#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF +/* Rx packet rate in current window */ +#define E1000_DMCRTRH_LRPRCW 0x80000000 + +/* DMA Coal Rx Traffic Current Count */ +#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF + +/* Flow ctrl Rx Threshold High val */ +#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 +#define E1000_FCRTC_RTH_COAL_SHIFT 4 +/* Lx power decision based on DMA coal */ +#define E1000_PCIEMISC_LX_DECISION 0x00000080 + +/* Proxy Filer Control */ +#define E1000_PROXYFC_D0 0x00000001 /* Enable offload in D0 */ +#define E1000_PROXYFC_EX 0x00000004 /* Directed exact proxy */ +#define E1000_PROXYFC_MC 0x00000008 /* Directed MC Proxy */ +#define E1000_PROXYFC_BC 0x00000010 /* Broadcast Proxy Enable */ +#define E1000_PROXYFC_ARP_DIRECTED 0x00000020 /* Directed ARP Proxy Ena */ +#define E1000_PROXYFC_IPV4 0x00000040 /* Directed IPv4 Enable */ +#define E1000_PROXYFC_IPV6 0x00000080 /* Directed IPv6 Enable */ +#define E1000_PROXYFC_NS 0x00000200 /* IPv4 NBRHD Solicitation */ +#define E1000_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Ena */ +/* Proxy Status */ +#define E1000_PROXYS_CLEAR 0xFFFFFFFF /* Clear */ + +/* Firmware Status */ +#define E1000_FWSTS_FWRI 0x80000000 /* FW Reset Indication */ +/* VF Control */ +#define E1000_VTCTRL_RST 0x04000000 /* Reset VF */ + +#define E1000_STATUS_LAN_ID_MASK 0x00000000C /* Mask for Lan ID field */ +/* Lan ID bit field offset in status register */ +#define E1000_STATUS_LAN_ID_OFFSET 2 +#define E1000_VFTA_ENTRIES 128 +#endif /* _E1000_DEFINES_H_ */ diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h index f67cbd3fa307..800a13b27e08 100644 --- a/drivers/net/igb/e1000_hw.h +++ b/drivers/net/igb/e1000_hw.h @@ -28,54 +28,53 @@ #ifndef _E1000_HW_H_ #define _E1000_HW_H_ -#include -#include -#include -#include - +#include "e1000_osdep.h" #include "e1000_regs.h" #include "e1000_defines.h" struct e1000_hw; -#define E1000_DEV_ID_82576 0x10C9 -#define E1000_DEV_ID_82576_FIBER 0x10E6 -#define E1000_DEV_ID_82576_SERDES 0x10E7 -#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 -#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 -#define E1000_DEV_ID_82576_NS 0x150A -#define E1000_DEV_ID_82576_NS_SERDES 0x1518 -#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D -#define E1000_DEV_ID_82575EB_COPPER 0x10A7 -#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 -#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 -#define E1000_DEV_ID_82580_COPPER 0x150E -#define E1000_DEV_ID_82580_FIBER 0x150F -#define E1000_DEV_ID_82580_SERDES 0x1510 -#define E1000_DEV_ID_82580_SGMII 0x1511 -#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 -#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 -#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 -#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A -#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C -#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 -#define E1000_DEV_ID_I350_COPPER 0x1521 -#define E1000_DEV_ID_I350_FIBER 0x1522 -#define E1000_DEV_ID_I350_SERDES 0x1523 -#define E1000_DEV_ID_I350_SGMII 0x1524 - -#define E1000_REVISION_2 2 -#define E1000_REVISION_4 4 - -#define E1000_FUNC_0 0 -#define E1000_FUNC_1 1 -#define E1000_FUNC_2 2 -#define E1000_FUNC_3 3 - -#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 -#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 -#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 -#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 +#define E1000_DEV_ID_82576 0x10C9 +#define E1000_DEV_ID_82576_FIBER 0x10E6 +#define E1000_DEV_ID_82576_SERDES 0x10E7 +#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 +#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 +#define E1000_DEV_ID_82576_NS 0x150A +#define E1000_DEV_ID_82576_NS_SERDES 0x1518 +#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D +#define E1000_DEV_ID_82575EB_COPPER 0x10A7 +#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 +#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 +#define E1000_DEV_ID_82580_COPPER 0x150E +#define E1000_DEV_ID_82580_FIBER 0x150F +#define E1000_DEV_ID_82580_SERDES 0x1510 +#define E1000_DEV_ID_82580_SGMII 0x1511 +#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 +#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 +#define E1000_DEV_ID_I350_COPPER 0x1521 +#define E1000_DEV_ID_I350_FIBER 0x1522 +#define E1000_DEV_ID_I350_SERDES 0x1523 +#define E1000_DEV_ID_I350_SGMII 0x1524 +#define E1000_DEV_ID_I350_DA4 0x1546 +#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 +#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A +#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C +#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 +#define E1000_REVISION_4 4 + +#define E1000_FUNC_0 0 +#define E1000_FUNC_1 1 +#define E1000_FUNC_2 2 +#define E1000_FUNC_3 3 + +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 enum e1000_mac_type { e1000_undefined = 0, @@ -89,7 +88,8 @@ enum e1000_mac_type { enum e1000_media_type { e1000_media_type_unknown = 0, e1000_media_type_copper = 1, - e1000_media_type_internal_serdes = 2, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, e1000_num_media_types }; @@ -117,6 +117,7 @@ enum e1000_phy_type { e1000_phy_igp_3, e1000_phy_ife, e1000_phy_82580, + e1000_phy_vf, }; enum e1000_bus_type { @@ -170,6 +171,172 @@ enum e1000_fc_mode { e1000_fc_default = 0xFF }; +enum e1000_ms_type { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +}; + +enum e1000_smart_speed { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +}; + +enum e1000_serdes_link_state { + e1000_serdes_link_down = 0, + e1000_serdes_link_autoneg_progress, + e1000_serdes_link_autoneg_complete, + e1000_serdes_link_forced_up +}; + +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 +#endif +/* Receive Descriptor */ +struct e1000_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 special; +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + __le64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length0; /* length of buffer 0 */ + __le16 vlan; /* VLAN tag */ + } middle; + struct { + __le16 header_status; + __le16 length[3]; /* length of buffers 1-3 */ + } upper; + __le64 reserved; + } wb; /* writeback */ +}; + +/* Transmit Descriptor */ +struct e1000_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 special; + } fields; + } upper; +}; + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + __le16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + __le16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + __le16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + __le64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 typ_len_ext; + u8 cmd; + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + __le16 special; + } fields; + } upper; +}; + /* Statistics counters collected by the MAC */ struct e1000_hw_stats { u64 crcerrs; @@ -254,6 +421,7 @@ struct e1000_hw_stats { u64 b2ogprc; }; + struct e1000_phy_stats { u32 idle_errors; u32 receive_errors; @@ -278,7 +446,7 @@ struct e1000_host_command_header { u8 checksum; }; -#define E1000_HI_MAX_DATA_LENGTH 252 +#define E1000_HI_MAX_DATA_LENGTH 252 struct e1000_host_command_info { struct e1000_host_command_header command_header; u8 command_data[E1000_HI_MAX_DATA_LENGTH]; @@ -293,7 +461,7 @@ struct e1000_host_mng_command_header { u16 command_length; }; -#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 struct e1000_host_mng_command_info { struct e1000_host_mng_command_header command_header; u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; @@ -302,67 +470,135 @@ struct e1000_host_mng_command_info { #include "e1000_mac.h" #include "e1000_phy.h" #include "e1000_nvm.h" +#include "e1000_manage.h" #include "e1000_mbx.h" struct e1000_mac_operations { + /* Function pointers for the MAC. */ + s32 (*init_params)(struct e1000_hw *); + s32 (*id_led_init)(struct e1000_hw *); + s32 (*blink_led)(struct e1000_hw *); s32 (*check_for_link)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *hw); + s32 (*cleanup_led)(struct e1000_hw *); + void (*clear_hw_cntrs)(struct e1000_hw *); + void (*clear_vfta)(struct e1000_hw *); + s32 (*get_bus_info)(struct e1000_hw *); + void (*set_lan_id)(struct e1000_hw *); + s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); + s32 (*led_on)(struct e1000_hw *); + s32 (*led_off)(struct e1000_hw *); + void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32); s32 (*reset_hw)(struct e1000_hw *); s32 (*init_hw)(struct e1000_hw *); - bool (*check_mng_mode)(struct e1000_hw *); + void (*shutdown_serdes)(struct e1000_hw *); + void (*power_up_serdes)(struct e1000_hw *); + s32 (*setup_link)(struct e1000_hw *); s32 (*setup_physical_interface)(struct e1000_hw *); - void (*rar_set)(struct e1000_hw *, u8 *, u32); + s32 (*setup_led)(struct e1000_hw *); + void (*write_vfta)(struct e1000_hw *, u32, u32); + void (*config_collision_dist)(struct e1000_hw *); + void (*rar_set)(struct e1000_hw *, u8*, u32); s32 (*read_mac_addr)(struct e1000_hw *); - s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); -}; - + s32 (*validate_mdi_setting)(struct e1000_hw *); + s32 (*mng_host_if_write)(struct e1000_hw *, u8*, u16, u16, u8*); + s32 (*mng_write_cmd_header)(struct e1000_hw *hw, + struct e1000_host_mng_command_header*); + s32 (*mng_enable_host_if)(struct e1000_hw *); + s32 (*wait_autoneg)(struct e1000_hw *); + s32 (*get_thermal_sensor_data)(struct e1000_hw *); + s32 (*init_thermal_sensor_thresh)(struct e1000_hw *); + s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); + void (*release_swfw_sync)(struct e1000_hw *, u16); +}; + +/* + * When to use various PHY register access functions: + * + * Func Caller + * Function Does Does When to use + * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * X_reg L,P,A n/a for simple PHY reg accesses + * X_reg_locked P,A L for multiple accesses of different regs + * on different pages + * X_reg_page A L,P for multiple accesses of different regs + * on the same page + * + * Where X=[read|write], L=locking, P=sets page, A=register access + * + */ struct e1000_phy_operations { + s32 (*init_params)(struct e1000_hw *); s32 (*acquire)(struct e1000_hw *); s32 (*check_polarity)(struct e1000_hw *); s32 (*check_reset_block)(struct e1000_hw *); + s32 (*commit)(struct e1000_hw *); s32 (*force_speed_duplex)(struct e1000_hw *); s32 (*get_cfg_done)(struct e1000_hw *hw); s32 (*get_cable_length)(struct e1000_hw *); - s32 (*get_phy_info)(struct e1000_hw *); + s32 (*get_info)(struct e1000_hw *); + s32 (*set_page)(struct e1000_hw *, u16); s32 (*read_reg)(struct e1000_hw *, u32, u16 *); + s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *); + s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *); void (*release)(struct e1000_hw *); s32 (*reset)(struct e1000_hw *); s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); s32 (*write_reg)(struct e1000_hw *, u32, u16); + s32 (*write_reg_locked)(struct e1000_hw *, u32, u16); + s32 (*write_reg_page)(struct e1000_hw *, u32, u16); + void (*power_up)(struct e1000_hw *); + void (*power_down)(struct e1000_hw *); + s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8); }; struct e1000_nvm_operations { + s32 (*init_params)(struct e1000_hw *); s32 (*acquire)(struct e1000_hw *); s32 (*read)(struct e1000_hw *, u16, u16, u16 *); void (*release)(struct e1000_hw *); - s32 (*write)(struct e1000_hw *, u16, u16, u16 *); + void (*reload)(struct e1000_hw *); s32 (*update)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); s32 (*validate)(struct e1000_hw *); + s32 (*write)(struct e1000_hw *, u16, u16, u16 *); }; -struct e1000_info { - s32 (*get_invariants)(struct e1000_hw *); - struct e1000_mac_operations *mac_ops; - struct e1000_phy_operations *phy_ops; - struct e1000_nvm_operations *nvm_ops; +#define E1000_MAX_SENSORS 3 + +struct e1000_thermal_diode_data { + u8 location; + u8 temp; + u8 caution_thresh; + u8 max_op_thresh; }; -extern const struct e1000_info e1000_82575_info; +struct e1000_thermal_sensor_data { + struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS]; +}; struct e1000_mac_info { struct e1000_mac_operations ops; - - u8 addr[6]; - u8 perm_addr[6]; + u8 addr[ETH_ADDR_LEN]; + u8 perm_addr[ETH_ADDR_LEN]; enum e1000_mac_type type; + u32 collision_delta; u32 ledctl_default; u32 ledctl_mode1; u32 ledctl_mode2; u32 mc_filter_type; + u32 tx_packet_delta; u32 txcw; + u16 current_ifs_val; + u16 ifs_max_val; + u16 ifs_min_val; + u16 ifs_ratio; + u16 ifs_step_size; u16 mta_reg_count; u16 uta_reg_count; @@ -374,22 +610,21 @@ struct e1000_mac_info { u8 forced_speed_duplex; bool adaptive_ifs; + bool has_fwsm; bool arc_subsystem_valid; bool asf_firmware_present; bool autoneg; bool autoneg_failed; - bool disable_hw_init_bits; bool get_link_status; - bool ifs_params_forced; bool in_ifs_mode; - bool report_tx_early; + enum e1000_serdes_link_state serdes_link_state; bool serdes_has_link; bool tx_pkt_filtering; + struct e1000_thermal_sensor_data thermal_sensor_data; }; struct e1000_phy_info { struct e1000_phy_operations ops; - enum e1000_phy_type type; enum e1000_1000t_rx_status local_rx; @@ -442,20 +677,19 @@ struct e1000_bus_info { enum e1000_bus_speed speed; enum e1000_bus_width width; - u32 snoop; - u16 func; u16 pci_cmd_word; }; struct e1000_fc_info { - u32 high_water; /* Flow control high-water mark */ - u32 low_water; /* Flow control low-water mark */ - u16 pause_time; /* Flow control pause timer */ - bool send_xon; /* Flow control send XON */ - bool strict_ieee; /* Strict IEEE mode */ - enum e1000_fc_mode current_mode; /* Type of flow control */ - enum e1000_fc_mode requested_mode; + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + u16 refresh_time; /* Flow control refresh timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum e1000_fc_mode current_mode; /* FC mode in effect */ + enum e1000_fc_mode requested_mode; /* FC mode requested by caller */ }; struct e1000_mbx_operations { @@ -490,6 +724,13 @@ struct e1000_dev_spec_82575 { bool sgmii_active; bool global_device_reset; bool eee_disable; + bool module_plugged; + u32 mtu; +}; + +struct e1000_dev_spec_vf { + u32 vf_number; + u32 v2p_mailbox; }; struct e1000_hw { @@ -508,7 +749,8 @@ struct e1000_hw { struct e1000_host_mng_dhcp_cookie mng_cookie; union { - struct e1000_dev_spec_82575 _82575; + struct e1000_dev_spec_82575 _82575; + struct e1000_dev_spec_vf vf; } dev_spec; u16 device_id; @@ -519,11 +761,10 @@ struct e1000_hw { u8 revision_id; }; -extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw); -#define hw_dbg(format, arg...) \ - netdev_dbg(igb_get_hw_dev(hw), format, ##arg) +#include "e1000_82575.h" /* These functions must be implemented by drivers */ -s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); -s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); -#endif /* _E1000_HW_H_ */ +s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); + +#endif diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c index f57338afd71f..e54fd8f3374b 100644 --- a/drivers/net/igb/e1000_mac.c +++ b/drivers/net/igb/e1000_mac.c @@ -25,48 +25,157 @@ *******************************************************************************/ -#include -#include -#include -#include -#include +#include "e1000_api.h" -#include "e1000_mac.h" +static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw); +static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); +static void e1000_config_collision_dist_generic(struct e1000_hw *hw); -#include "igb.h" +/** + * e1000_init_mac_ops_generic - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * Setups up the function pointers to no-op functions + **/ +void e1000_init_mac_ops_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + DEBUGFUNC("e1000_init_mac_ops_generic"); + + /* General Setup */ + mac->ops.init_params = e1000_null_ops_generic; + mac->ops.init_hw = e1000_null_ops_generic; + mac->ops.reset_hw = e1000_null_ops_generic; + mac->ops.setup_physical_interface = e1000_null_ops_generic; + mac->ops.get_bus_info = e1000_null_ops_generic; + mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pcie; + mac->ops.read_mac_addr = e1000_read_mac_addr_generic; + mac->ops.config_collision_dist = e1000_config_collision_dist_generic; + mac->ops.clear_hw_cntrs = e1000_null_mac_generic; + /* LED */ + mac->ops.cleanup_led = e1000_null_ops_generic; + mac->ops.setup_led = e1000_null_ops_generic; + mac->ops.blink_led = e1000_null_ops_generic; + mac->ops.led_on = e1000_null_ops_generic; + mac->ops.led_off = e1000_null_ops_generic; + /* LINK */ + mac->ops.setup_link = e1000_null_ops_generic; + mac->ops.get_link_up_info = e1000_null_link_info; + mac->ops.check_for_link = e1000_null_ops_generic; + mac->ops.wait_autoneg = e1000_wait_autoneg_generic; + /* Management */ + mac->ops.check_mng_mode = e1000_null_mng_mode; + mac->ops.mng_host_if_write = e1000_mng_host_if_write_generic; + mac->ops.mng_write_cmd_header = e1000_mng_write_cmd_header_generic; + mac->ops.mng_enable_host_if = e1000_mng_enable_host_if_generic; + /* VLAN, MC, etc. */ + mac->ops.update_mc_addr_list = e1000_null_update_mc; + mac->ops.clear_vfta = e1000_null_mac_generic; + mac->ops.write_vfta = e1000_null_write_vfta; + mac->ops.rar_set = e1000_rar_set_generic; + mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic; +} + +/** + * e1000_null_ops_generic - No-op function, returns 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_ops_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_null_ops_generic"); + return E1000_SUCCESS; +} + +/** + * e1000_null_mac_generic - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_mac_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_null_mac_generic"); + return; +} + +/** + * e1000_null_link_info - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d) +{ + DEBUGFUNC("e1000_null_link_info"); + return E1000_SUCCESS; +} + +/** + * e1000_null_mng_mode - No-op function, return false + * @hw: pointer to the HW structure + **/ +bool e1000_null_mng_mode(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_null_mng_mode"); + return false; +} + +/** + * e1000_null_update_mc - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a) +{ + DEBUGFUNC("e1000_null_update_mc"); + return; +} -static s32 igb_set_default_fc(struct e1000_hw *hw); -static s32 igb_set_fc_watermarks(struct e1000_hw *hw); +/** + * e1000_null_write_vfta - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b) +{ + DEBUGFUNC("e1000_null_write_vfta"); + return; +} + +/** + * e1000_null_rar_set - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a) +{ + DEBUGFUNC("e1000_null_rar_set"); + return; +} /** - * igb_get_bus_info_pcie - Get PCIe bus information + * e1000_get_bus_info_pcie_generic - Get PCIe bus information * @hw: pointer to the HW structure * * Determines and stores the system bus information for a particular * network interface. The following bus information is determined and stored: * bus speed, bus width, type (PCIe), and PCIe function. **/ -s32 igb_get_bus_info_pcie(struct e1000_hw *hw) +s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw) { + struct e1000_mac_info *mac = &hw->mac; struct e1000_bus_info *bus = &hw->bus; s32 ret_val; - u32 reg; u16 pcie_link_status; + DEBUGFUNC("e1000_get_bus_info_pcie_generic"); + bus->type = e1000_bus_type_pci_express; - ret_val = igb_read_pcie_cap_reg(hw, - PCI_EXP_LNKSTA, - &pcie_link_status); + ret_val = e1000_read_pcie_cap_reg(hw, PCIE_LINK_STATUS, + &pcie_link_status); if (ret_val) { bus->width = e1000_bus_width_unknown; bus->speed = e1000_bus_speed_unknown; } else { - switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) { - case PCI_EXP_LNKSTA_CLS_2_5GB: + switch (pcie_link_status & PCIE_LINK_SPEED_MASK) { + case PCIE_LINK_SPEED_2500: bus->speed = e1000_bus_speed_2500; break; - case PCI_EXP_LNKSTA_CLS_5_0GB: + case PCIE_LINK_SPEED_5000: bus->speed = e1000_bus_speed_5000; break; default: @@ -75,75 +184,69 @@ s32 igb_get_bus_info_pcie(struct e1000_hw *hw) } bus->width = (enum e1000_bus_width)((pcie_link_status & - PCI_EXP_LNKSTA_NLW) >> - PCI_EXP_LNKSTA_NLW_SHIFT); + PCIE_LINK_WIDTH_MASK) >> PCIE_LINK_WIDTH_SHIFT); } - reg = rd32(E1000_STATUS); - bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; + mac->ops.set_lan_id(hw); - return 0; + return E1000_SUCCESS; } /** - * igb_clear_vfta - Clear VLAN filter table + * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * * @hw: pointer to the HW structure * - * Clears the register array which contains the VLAN filter table by - * setting all the values to 0. + * Determines the LAN function id by reading memory-mapped registers + * and swaps the port value if requested. **/ -void igb_clear_vfta(struct e1000_hw *hw) +static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw) { - u32 offset; + struct e1000_bus_info *bus = &hw->bus; + u32 reg; - for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { - array_wr32(E1000_VFTA, offset, 0); - wrfl(); - } + /* + * The status register reports the correct function number + * for the device regardless of function swap state. + */ + reg = E1000_READ_REG(hw, E1000_STATUS); + bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; } /** - * igb_write_vfta - Write value to VLAN filter table + * e1000_set_lan_id_single_port - Set LAN id for a single port device * @hw: pointer to the HW structure - * @offset: register offset in VLAN filter table - * @value: register value written to VLAN filter table * - * Writes value at the given offset in the register array which stores - * the VLAN filter table. + * Sets the LAN function id to zero for a single port device. **/ -static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +void e1000_set_lan_id_single_port(struct e1000_hw *hw) { - array_wr32(E1000_VFTA, offset, value); - wrfl(); -} + struct e1000_bus_info *bus = &hw->bus; -/* Due to a hw errata, if the host tries to configure the VFTA register - * while performing queries from the BMC or DMA, then the VFTA in some - * cases won't be written. - */ + bus->func = 0; +} /** - * igb_clear_vfta_i350 - Clear VLAN filter table + * e1000_clear_vfta_generic - Clear VLAN filter table * @hw: pointer to the HW structure * * Clears the register array which contains the VLAN filter table by * setting all the values to 0. **/ -void igb_clear_vfta_i350(struct e1000_hw *hw) +void e1000_clear_vfta_generic(struct e1000_hw *hw) { u32 offset; - int i; - for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { - for (i = 0; i < 10; i++) - array_wr32(E1000_VFTA, offset, 0); + DEBUGFUNC("e1000_clear_vfta_generic"); - wrfl(); + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); + E1000_WRITE_FLUSH(hw); } } /** - * igb_write_vfta_i350 - Write value to VLAN filter table + * e1000_write_vfta_generic - Write value to VLAN filter table * @hw: pointer to the HW structure * @offset: register offset in VLAN filter table * @value: register value written to VLAN filter table @@ -151,114 +254,85 @@ void igb_clear_vfta_i350(struct e1000_hw *hw) * Writes value at the given offset in the register array which stores * the VLAN filter table. **/ -static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) { - int i; - - for (i = 0; i < 10; i++) - array_wr32(E1000_VFTA, offset, value); + DEBUGFUNC("e1000_write_vfta_generic"); - wrfl(); + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); + E1000_WRITE_FLUSH(hw); } /** - * igb_init_rx_addrs - Initialize receive address's + * e1000_init_rx_addrs_generic - Initialize receive address's * @hw: pointer to the HW structure * @rar_count: receive address registers * - * Setups the receive address registers by setting the base receive address + * Setup the receive address registers by setting the base receive address * register to the devices MAC address and clearing all the other receive * address registers to 0. **/ -void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) +void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count) { u32 i; - u8 mac_addr[ETH_ALEN] = {0}; + u8 mac_addr[ETH_ADDR_LEN] = {0}; + + DEBUGFUNC("e1000_init_rx_addrs_generic"); /* Setup the receive address */ - hw_dbg("Programming MAC Address into RAR[0]\n"); + DEBUGOUT("Programming MAC Address into RAR[0]\n"); hw->mac.ops.rar_set(hw, hw->mac.addr, 0); /* Zero out the other (rar_entry_count - 1) receive addresses */ - hw_dbg("Clearing RAR[1-%u]\n", rar_count-1); + DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1); for (i = 1; i < rar_count; i++) hw->mac.ops.rar_set(hw, mac_addr, i); } /** - * igb_vfta_set - enable or disable vlan in VLAN filter table - * @hw: pointer to the HW structure - * @vid: VLAN id to add or remove - * @add: if true add filter, if false remove - * - * Sets or clears a bit in the VLAN filter table array based on VLAN id - * and if we are adding or removing the filter - **/ -s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add) -{ - u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK; - u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK); - u32 vfta; - struct igb_adapter *adapter = hw->back; - s32 ret_val = 0; - - vfta = adapter->shadow_vfta[index]; - - /* bit was set/cleared before we started */ - if ((!!(vfta & mask)) == add) { - ret_val = -E1000_ERR_CONFIG; - } else { - if (add) - vfta |= mask; - else - vfta &= ~mask; - } - if (hw->mac.type == e1000_i350) - igb_write_vfta_i350(hw, index, vfta); - else - igb_write_vfta(hw, index, vfta); - adapter->shadow_vfta[index] = vfta; - - return ret_val; -} - -/** - * igb_check_alt_mac_addr - Check for alternate MAC addr + * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr * @hw: pointer to the HW structure * * Checks the nvm for an alternate MAC address. An alternate MAC address * can be setup by pre-boot software and must be treated like a permanent - * address and must override the actual permanent MAC address. If an - * alternate MAC address is fopund it is saved in the hw struct and - * prgrammed into RAR0 and the cuntion returns success, otherwise the - * function returns an error. + * address and must override the actual permanent MAC address. If an + * alternate MAC address is found it is programmed into RAR0, replacing + * the permanent address that was installed into RAR0 by the Si on reset. + * This function will return SUCCESS unless it encounters an error while + * reading the EEPROM. **/ -s32 igb_check_alt_mac_addr(struct e1000_hw *hw) +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) { u32 i; - s32 ret_val = 0; + s32 ret_val = E1000_SUCCESS; u16 offset, nvm_alt_mac_addr_offset, nvm_data; - u8 alt_mac_addr[ETH_ALEN]; + u8 alt_mac_addr[ETH_ADDR_LEN]; + + DEBUGFUNC("e1000_check_alt_mac_addr_generic"); + + ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data); + if (ret_val) + return ret_val; + /* * Alternate MAC address is handled by the option ROM for 82580 * and newer. SW support not required. */ if (hw->mac.type >= e1000_82580) - goto out; + return E1000_SUCCESS; ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1, - &nvm_alt_mac_addr_offset); + &nvm_alt_mac_addr_offset); if (ret_val) { - hw_dbg("NVM Read Error\n"); - goto out; + DEBUGOUT("NVM Read Error\n"); + return ret_val; } if ((nvm_alt_mac_addr_offset == 0xFFFF) || (nvm_alt_mac_addr_offset == 0x0000)) /* There is no Alternate MAC Address */ - goto out; + return E1000_SUCCESS; if (hw->bus.func == E1000_FUNC_1) nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; @@ -267,12 +341,12 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw) if (hw->bus.func == E1000_FUNC_3) nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3; - for (i = 0; i < ETH_ALEN; i += 2) { + for (i = 0; i < ETH_ADDR_LEN; i += 2) { offset = nvm_alt_mac_addr_offset + (i >> 1); ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); if (ret_val) { - hw_dbg("NVM Read Error\n"); - goto out; + DEBUGOUT("NVM Read Error\n"); + return ret_val; } alt_mac_addr[i] = (u8)(nvm_data & 0xFF); @@ -280,9 +354,9 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw) } /* if multicast bit is set, the alternate address will not be used */ - if (is_multicast_ether_addr(alt_mac_addr)) { - hw_dbg("Ignoring Alternate Mac Address with MC bit set\n"); - goto out; + if (alt_mac_addr[0] & 0x01) { + DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n"); + return E1000_SUCCESS; } /* @@ -292,12 +366,11 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw) */ hw->mac.ops.rar_set(hw, alt_mac_addr, 0); -out: - return ret_val; + return E1000_SUCCESS; } /** - * igb_rar_set - Set receive address register + * e1000_rar_set_generic - Set receive address register * @hw: pointer to the HW structure * @addr: pointer to the receive address * @index: receive address array register @@ -305,17 +378,18 @@ out: * Sets the receive address array register at index to the address passed * in by addr. **/ -void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) { u32 rar_low, rar_high; + DEBUGFUNC("e1000_rar_set_generic"); + /* * HW expects these in little endian so we reverse the byte order * from network order (big endian) to little endian */ - rar_low = ((u32) addr[0] | - ((u32) addr[1] << 8) | - ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); @@ -328,61 +402,27 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) * a single burst write, which will malfunction on some parts. * The flushes avoid this. */ - wr32(E1000_RAL(index), rar_low); - wrfl(); - wr32(E1000_RAH(index), rar_high); - wrfl(); + E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); + E1000_WRITE_FLUSH(hw); } /** - * igb_mta_set - Set multicast filter table address - * @hw: pointer to the HW structure - * @hash_value: determines the MTA register and bit to set - * - * The multicast table address is a register array of 32-bit registers. - * The hash_value is used to determine what register the bit is in, the - * current value is read, the new bit is OR'd in and the new value is - * written back into the register. - **/ -void igb_mta_set(struct e1000_hw *hw, u32 hash_value) -{ - u32 hash_bit, hash_reg, mta; - - /* - * The MTA is a register array of 32-bit registers. It is - * treated like an array of (32*mta_reg_count) bits. We want to - * set bit BitArray[hash_value]. So we figure out what register - * the bit is in, read it, OR in the new bit, then write - * back the new value. The (hw->mac.mta_reg_count - 1) serves as a - * mask to bits 31:5 of the hash value which gives us the - * register we're modifying. The hash bit within that register - * is determined by the lower 5 bits of the hash value. - */ - hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); - hash_bit = hash_value & 0x1F; - - mta = array_rd32(E1000_MTA, hash_reg); - - mta |= (1 << hash_bit); - - array_wr32(E1000_MTA, hash_reg, mta); - wrfl(); -} - -/** - * igb_hash_mc_addr - Generate a multicast hash value + * e1000_hash_mc_addr_generic - Generate a multicast hash value * @hw: pointer to the HW structure * @mc_addr: pointer to a multicast address * * Generates a multicast address hash value which is used to determine - * the multicast filter table array address and new table value. See - * igb_mta_set() + * the multicast filter table array address and new table value. **/ -static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr) { u32 hash_value, hash_mask; u8 bit_shift = 0; + DEBUGFUNC("e1000_hash_mc_addr_generic"); + /* Register count multiplied by bits per register */ hash_mask = (hw->mac.mta_reg_count * 32) - 1; @@ -412,7 +452,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) * values resulting from each mc_filter_type... * [0] [1] [2] [3] [4] [5] * 01 AA 00 12 34 56 - * LSB MSB + * LSB MSB * * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 @@ -441,7 +481,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) } /** - * igb_update_mc_addr_list - Update Multicast addresses + * e1000_update_mc_addr_list_generic - Update Multicast addresses * @hw: pointer to the HW structure * @mc_addr_list: array of multicast addresses to program * @mc_addr_count: number of multicast addresses to program @@ -449,115 +489,119 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) * Updates entire Multicast Table Array. * The caller must have a packed mc_addr_list of multicast addresses. **/ -void igb_update_mc_addr_list(struct e1000_hw *hw, - u8 *mc_addr_list, u32 mc_addr_count) +void e1000_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) { u32 hash_value, hash_bit, hash_reg; int i; + DEBUGFUNC("e1000_update_mc_addr_list_generic"); + /* clear mta_shadow */ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); /* update mta_shadow from mc_addr_list */ for (i = 0; (u32) i < mc_addr_count; i++) { - hash_value = igb_hash_mc_addr(hw, mc_addr_list); + hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list); hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); hash_bit = hash_value & 0x1F; hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); - mc_addr_list += (ETH_ALEN); + mc_addr_list += (ETH_ADDR_LEN); } /* replace the entire MTA table */ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) - array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); - wrfl(); + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]); + E1000_WRITE_FLUSH(hw); } /** - * igb_clear_hw_cntrs_base - Clear base hardware counters + * e1000_clear_hw_cntrs_base_generic - Clear base hardware counters * @hw: pointer to the HW structure * * Clears the base hardware counters by reading the counter registers. **/ -void igb_clear_hw_cntrs_base(struct e1000_hw *hw) +void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw) { - rd32(E1000_CRCERRS); - rd32(E1000_SYMERRS); - rd32(E1000_MPC); - rd32(E1000_SCC); - rd32(E1000_ECOL); - rd32(E1000_MCC); - rd32(E1000_LATECOL); - rd32(E1000_COLC); - rd32(E1000_DC); - rd32(E1000_SEC); - rd32(E1000_RLEC); - rd32(E1000_XONRXC); - rd32(E1000_XONTXC); - rd32(E1000_XOFFRXC); - rd32(E1000_XOFFTXC); - rd32(E1000_FCRUC); - rd32(E1000_GPRC); - rd32(E1000_BPRC); - rd32(E1000_MPRC); - rd32(E1000_GPTC); - rd32(E1000_GORCL); - rd32(E1000_GORCH); - rd32(E1000_GOTCL); - rd32(E1000_GOTCH); - rd32(E1000_RNBC); - rd32(E1000_RUC); - rd32(E1000_RFC); - rd32(E1000_ROC); - rd32(E1000_RJC); - rd32(E1000_TORL); - rd32(E1000_TORH); - rd32(E1000_TOTL); - rd32(E1000_TOTH); - rd32(E1000_TPR); - rd32(E1000_TPT); - rd32(E1000_MPTC); - rd32(E1000_BPTC); + DEBUGFUNC("e1000_clear_hw_cntrs_base_generic"); + + E1000_READ_REG(hw, E1000_CRCERRS); + E1000_READ_REG(hw, E1000_SYMERRS); + E1000_READ_REG(hw, E1000_MPC); + E1000_READ_REG(hw, E1000_SCC); + E1000_READ_REG(hw, E1000_ECOL); + E1000_READ_REG(hw, E1000_MCC); + E1000_READ_REG(hw, E1000_LATECOL); + E1000_READ_REG(hw, E1000_COLC); + E1000_READ_REG(hw, E1000_DC); + E1000_READ_REG(hw, E1000_SEC); + E1000_READ_REG(hw, E1000_RLEC); + E1000_READ_REG(hw, E1000_XONRXC); + E1000_READ_REG(hw, E1000_XONTXC); + E1000_READ_REG(hw, E1000_XOFFRXC); + E1000_READ_REG(hw, E1000_XOFFTXC); + E1000_READ_REG(hw, E1000_FCRUC); + E1000_READ_REG(hw, E1000_GPRC); + E1000_READ_REG(hw, E1000_BPRC); + E1000_READ_REG(hw, E1000_MPRC); + E1000_READ_REG(hw, E1000_GPTC); + E1000_READ_REG(hw, E1000_GORCL); + E1000_READ_REG(hw, E1000_GORCH); + E1000_READ_REG(hw, E1000_GOTCL); + E1000_READ_REG(hw, E1000_GOTCH); + E1000_READ_REG(hw, E1000_RNBC); + E1000_READ_REG(hw, E1000_RUC); + E1000_READ_REG(hw, E1000_RFC); + E1000_READ_REG(hw, E1000_ROC); + E1000_READ_REG(hw, E1000_RJC); + E1000_READ_REG(hw, E1000_TORL); + E1000_READ_REG(hw, E1000_TORH); + E1000_READ_REG(hw, E1000_TOTL); + E1000_READ_REG(hw, E1000_TOTH); + E1000_READ_REG(hw, E1000_TPR); + E1000_READ_REG(hw, E1000_TPT); + E1000_READ_REG(hw, E1000_MPTC); + E1000_READ_REG(hw, E1000_BPTC); } /** - * igb_check_for_copper_link - Check for link (Copper) + * e1000_check_for_copper_link_generic - Check for link (Copper) * @hw: pointer to the HW structure * * Checks to see of the link status of the hardware has changed. If a * change in link status has been detected, then we read the PHY registers * to get the current speed/duplex if link exists. **/ -s32 igb_check_for_copper_link(struct e1000_hw *hw) +s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val; bool link; + DEBUGFUNC("e1000_check_for_copper_link"); + /* * We only want to go out to the PHY registers to see if Auto-Neg * has completed and/or if our link status has changed. The * get_link_status flag is set upon receiving a Link Status * Change or Rx Sequence Error interrupt. */ - if (!mac->get_link_status) { - ret_val = 0; - goto out; - } + if (!mac->get_link_status) + return E1000_SUCCESS; /* * First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex * of the PHY. */ - ret_val = igb_phy_has_link(hw, 1, 0, &link); + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) - goto out; + return ret_val; if (!link) - goto out; /* No link detected */ + return E1000_SUCCESS; /* No link detected */ mac->get_link_status = false; @@ -565,23 +609,21 @@ s32 igb_check_for_copper_link(struct e1000_hw *hw) * Check if there was DownShift, must be checked * immediately after link-up */ - igb_check_downshift(hw); + e1000_check_downshift_generic(hw); /* * If we are forcing speed/duplex, then we simply return since * we have already determined whether we have link or not. */ - if (!mac->autoneg) { - ret_val = -E1000_ERR_CONFIG; - goto out; - } + if (!mac->autoneg) + return -E1000_ERR_CONFIG; /* * Auto-Neg is enabled. Auto Speed Detection takes care * of MAC speed/duplex configuration. So we only need to * configure Collision Distance in the MAC. */ - igb_config_collision_dist(hw); + mac->ops.config_collision_dist(hw); /* * Configure Flow Control now that Auto-Neg has completed. @@ -589,16 +631,234 @@ s32 igb_check_for_copper_link(struct e1000_hw *hw) * settings because we may have had to re-autoneg with a * different link partner. */ - ret_val = igb_config_fc_after_link_up(hw); + ret_val = e1000_config_fc_after_link_up_generic(hw); if (ret_val) - hw_dbg("Error configuring flow control\n"); + DEBUGOUT("Error configuring flow control\n"); -out: return ret_val; } /** - * igb_setup_link - Setup flow control and link settings + * e1000_check_for_fiber_link_generic - Check for link (Fiber) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + DEBUGFUNC("e1000_check_for_fiber_link_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + status = E1000_READ_REG(hw, E1000_STATUS); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + + /* + * If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), the cable is plugged in (we have signal), + * and our link partner is not trying to auto-negotiate with us (we + * are receiving idles or data), we need to force link up. We also + * need to give auto-negotiation time to complete, in case the cable + * was just plugged in. The autoneg_failed flag does this. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) && + !(rxcw & E1000_RXCW_C)) { + if (!mac->autoneg_failed) { + mac->autoneg_failed = true; + return E1000_SUCCESS; + } + DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* + * If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = true; + } + + return E1000_SUCCESS; +} + +/** + * e1000_check_for_serdes_link_generic - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + DEBUGFUNC("e1000_check_for_serdes_link_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + status = E1000_READ_REG(hw, E1000_STATUS); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + + /* + * If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) { + if (!mac->autoneg_failed) { + mac->autoneg_failed = true; + return E1000_SUCCESS; + } + DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* + * If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = true; + } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) { + /* + * If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + usec_delay(10); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = true; + DEBUGOUT("SERDES: Link up - forced.\n"); + } + } else { + mac->serdes_has_link = false; + DEBUGOUT("SERDES: Link down - force failed.\n"); + } + } + + if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) { + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_LU) { + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + usec_delay(10); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = true; + DEBUGOUT("SERDES: Link up - autoneg completed successfully.\n"); + } else { + mac->serdes_has_link = false; + DEBUGOUT("SERDES: Link down - invalid codewords detected in autoneg.\n"); + } + } else { + mac->serdes_has_link = false; + DEBUGOUT("SERDES: Link down - no sync.\n"); + } + } else { + mac->serdes_has_link = false; + DEBUGOUT("SERDES: Link down - autoneg failed\n"); + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_default_fc_generic - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + **/ +static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 nvm_data; + + DEBUGFUNC("e1000_set_default_fc_generic"); + + /* + * Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); + + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) + hw->fc.requested_mode = e1000_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == + NVM_WORD0F_ASM_DIR) + hw->fc.requested_mode = e1000_fc_tx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + + return E1000_SUCCESS; +} + +/** + * e1000_setup_link_generic - Setup flow control and link settings * @hw: pointer to the HW structure * * Determines which flow control settings to use, then configures flow @@ -607,40 +867,42 @@ out: * should be established. Assumes the hardware has previously been reset * and the transmitter and receiver are not enabled. **/ -s32 igb_setup_link(struct e1000_hw *hw) +s32 e1000_setup_link_generic(struct e1000_hw *hw) { - s32 ret_val = 0; + s32 ret_val; + + DEBUGFUNC("e1000_setup_link_generic"); /* * In the case of the phy reset being blocked, we already have a link. * We do not need to set it up again. */ - if (igb_check_reset_block(hw)) - goto out; + if (hw->phy.ops.check_reset_block(hw)) + return E1000_SUCCESS; /* * If requested flow control is set to default, set flow control * based on the EEPROM flow control settings. */ if (hw->fc.requested_mode == e1000_fc_default) { - ret_val = igb_set_default_fc(hw); + ret_val = e1000_set_default_fc_generic(hw); if (ret_val) - goto out; + return ret_val; } /* - * We want to save off the original Flow Control configuration just - * in case we get disconnected and then reconnected into a different - * hub or switch with different Flow Control capabilities. + * Save off the requested flow control mode for use later. Depending + * on the link partner's capabilities, we may or may not use this mode. */ hw->fc.current_mode = hw->fc.requested_mode; - hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); + DEBUGOUT1("After fix-ups FlowControl is now = %x\n", + hw->fc.current_mode); /* Call the necessary media_type subroutine to configure the link. */ ret_val = hw->mac.ops.setup_physical_interface(hw); if (ret_val) - goto out; + return ret_val; /* * Initialize the flow control address, type, and PAUSE timer @@ -648,53 +910,229 @@ s32 igb_setup_link(struct e1000_hw *hw) * control is disabled, because it does not hurt anything to * initialize these registers. */ - hw_dbg("Initializing the Flow Control address, type and timer regs\n"); - wr32(E1000_FCT, FLOW_CONTROL_TYPE); - wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); - wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); + DEBUGOUT("Initializing the Flow Control address, type and timer regs\n"); + E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE); + E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); - wr32(E1000_FCTTV, hw->fc.pause_time); + E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); - ret_val = igb_set_fc_watermarks(hw); + return e1000_set_fc_watermarks_generic(hw); +} + +/** + * e1000_commit_fc_settings_generic - Configure flow control + * @hw: pointer to the HW structure + * + * Write the flow control settings to the Transmit Config Word Register (TXCW) + * base on the flow control settings in e1000_mac_info. + **/ +static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 txcw; + + DEBUGFUNC("e1000_commit_fc_settings_generic"); + + /* + * Check for a software override of the flow control settings, and + * setup the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Transmit Config Word Register (TXCW) and re-start auto- + * negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we + * do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control completely disabled by a software over-ride. */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case e1000_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is disabled + * by a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case e1000_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case e1000_fc_full: + /* + * Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + break; + } + + E1000_WRITE_REG(hw, E1000_TXCW, txcw); + mac->txcw = txcw; + + return E1000_SUCCESS; +} + +/** + * e1000_poll_fiber_serdes_link_generic - Poll for link up + * @hw: pointer to the HW structure + * + * Polls for link up by reading the status register, if link fails to come + * up with auto-negotiation, then the link is forced if a signal is detected. + **/ +static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 i, status; + s32 ret_val; + + DEBUGFUNC("e1000_poll_fiber_serdes_link_generic"); + + /* + * If we have a signal (the cable is plugged in, or assumed true for + * serdes media) then poll for a "Link-Up" indication in the Device + * Status Register. Time-out if a link isn't seen in 500 milliseconds + * seconds (Auto-negotiation should complete in less than 500 + * milliseconds even if the other end is doing it in SW). + */ + for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { + msec_delay(10); + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == FIBER_LINK_UP_LIMIT) { + DEBUGOUT("Never got a valid link from auto-neg!!!\n"); + mac->autoneg_failed = true; + /* + * AutoNeg failed to achieve a link, so we'll call + * mac->check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = mac->ops.check_for_link(hw); + if (ret_val) { + DEBUGOUT("Error while checking for link\n"); + return ret_val; + } + mac->autoneg_failed = false; + } else { + mac->autoneg_failed = false; + DEBUGOUT("Valid Link Found\n"); + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes + * links. Upon successful setup, poll for link. + **/ +s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + DEBUGFUNC("e1000_setup_fiber_serdes_link_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* Take the link out of reset */ + ctrl &= ~E1000_CTRL_LRST; + + hw->mac.ops.config_collision_dist(hw); + + ret_val = e1000_commit_fc_settings_generic(hw); + if (ret_val) + return ret_val; + + /* + * Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + DEBUGOUT("Auto-negotiation enabled\n"); + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + msec_delay(1); + + /* + * For these adapters, the SW definable pin 1 is set when the optics + * detect a signal. If we have a signal, then poll for a "Link-Up" + * indication. + */ + if (hw->phy.media_type == e1000_media_type_internal_serdes || + (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) { + ret_val = e1000_poll_fiber_serdes_link_generic(hw); + } else { + DEBUGOUT("No signal detected\n"); + } -out: return ret_val; } /** - * igb_config_collision_dist - Configure collision distance + * e1000_config_collision_dist_generic - Configure collision distance * @hw: pointer to the HW structure * * Configures the collision distance to the default value and is used - * during link setup. Currently no func pointer exists and all - * implementations are handled in the generic version of this function. + * during link setup. **/ -void igb_config_collision_dist(struct e1000_hw *hw) +static void e1000_config_collision_dist_generic(struct e1000_hw *hw) { u32 tctl; - tctl = rd32(E1000_TCTL); + DEBUGFUNC("e1000_config_collision_dist_generic"); + + tctl = E1000_READ_REG(hw, E1000_TCTL); tctl &= ~E1000_TCTL_COLD; tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; - wr32(E1000_TCTL, tctl); - wrfl(); + E1000_WRITE_REG(hw, E1000_TCTL, tctl); + E1000_WRITE_FLUSH(hw); } /** - * igb_set_fc_watermarks - Set flow control high/low watermarks + * e1000_set_fc_watermarks_generic - Set flow control high/low watermarks * @hw: pointer to the HW structure * * Sets the flow control high/low threshold (watermark) registers. If * flow control XON frame transmission is enabled, then set XON frame - * tansmission as well. + * transmission as well. **/ -static s32 igb_set_fc_watermarks(struct e1000_hw *hw) +s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw) { - s32 ret_val = 0; u32 fcrtl = 0, fcrth = 0; + DEBUGFUNC("e1000_set_fc_watermarks_generic"); + /* * Set the flow control receive threshold registers. Normally, * these registers will be set to a default threshold that may be @@ -714,54 +1152,14 @@ static s32 igb_set_fc_watermarks(struct e1000_hw *hw) fcrth = hw->fc.high_water; } - wr32(E1000_FCRTL, fcrtl); - wr32(E1000_FCRTH, fcrth); + E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl); + E1000_WRITE_REG(hw, E1000_FCRTH, fcrth); - return ret_val; + return E1000_SUCCESS; } /** - * igb_set_default_fc - Set flow control default values - * @hw: pointer to the HW structure - * - * Read the EEPROM for the default values for flow control and store the - * values. - **/ -static s32 igb_set_default_fc(struct e1000_hw *hw) -{ - s32 ret_val = 0; - u16 nvm_data; - - /* - * Read and store word 0x0F of the EEPROM. This word contains bits - * that determine the hardware's default PAUSE (flow control) mode, - * a bit that determines whether the HW defaults to enabling or - * disabling auto-negotiation, and the direction of the - * SW defined pins. If there is no SW over-ride of the flow - * control setting, then the variable hw->fc will - * be initialized based on a value in the EEPROM. - */ - ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); - - if (ret_val) { - hw_dbg("NVM Read Error\n"); - goto out; - } - - if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) - hw->fc.requested_mode = e1000_fc_none; - else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == - NVM_WORD0F_ASM_DIR) - hw->fc.requested_mode = e1000_fc_tx_pause; - else - hw->fc.requested_mode = e1000_fc_full; - -out: - return ret_val; -} - -/** - * igb_force_mac_fc - Force the MAC's flow control settings + * e1000_force_mac_fc_generic - Force the MAC's flow control settings * @hw: pointer to the HW structure * * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the @@ -770,12 +1168,13 @@ out: * autonegotiation is managed by the PHY rather than the MAC. Software must * also configure these bits when link is forced on a fiber connection. **/ -s32 igb_force_mac_fc(struct e1000_hw *hw) +s32 e1000_force_mac_fc_generic(struct e1000_hw *hw) { u32 ctrl; - s32 ret_val = 0; - ctrl = rd32(E1000_CTRL); + DEBUGFUNC("e1000_force_mac_fc_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); /* * Because we didn't get link via the internal auto-negotiation @@ -792,10 +1191,10 @@ s32 igb_force_mac_fc(struct e1000_hw *hw) * frames but not send pause frames). * 2: Tx flow control is enabled (we can send pause frames * frames but we do not receive pause frames). - * 3: Both Rx and TX flow control (symmetric) is enabled. + * 3: Both Rx and Tx flow control (symmetric) is enabled. * other: No other values should be possible at this point. */ - hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); + DEBUGOUT1("hw->fc.current_mode = %u\n", hw->fc.current_mode); switch (hw->fc.current_mode) { case e1000_fc_none: @@ -813,19 +1212,17 @@ s32 igb_force_mac_fc(struct e1000_hw *hw) ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); break; default: - hw_dbg("Flow control param set incorrectly\n"); - ret_val = -E1000_ERR_CONFIG; - goto out; + DEBUGOUT("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; } - wr32(E1000_CTRL, ctrl); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); -out: - return ret_val; + return E1000_SUCCESS; } /** - * igb_config_fc_after_link_up - Configures flow control after link + * e1000_config_fc_after_link_up_generic - Configures flow control after link * @hw: pointer to the HW structure * * Checks the status of auto-negotiation after link up to ensure that the @@ -834,29 +1231,32 @@ out: * and did not fail, then we configure flow control based on our link * partner. **/ -s32 igb_config_fc_after_link_up(struct e1000_hw *hw) +s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; - s32 ret_val = 0; + s32 ret_val = E1000_SUCCESS; u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; u16 speed, duplex; + DEBUGFUNC("e1000_config_fc_after_link_up_generic"); + /* * Check for the case where we have fiber media and auto-neg failed * so we had to force link. In this case, we need to force the * configuration of the MAC to match the "fc" parameter. */ if (mac->autoneg_failed) { - if (hw->phy.media_type == e1000_media_type_internal_serdes) - ret_val = igb_force_mac_fc(hw); + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) + ret_val = e1000_force_mac_fc_generic(hw); } else { if (hw->phy.media_type == e1000_media_type_copper) - ret_val = igb_force_mac_fc(hw); + ret_val = e1000_force_mac_fc_generic(hw); } if (ret_val) { - hw_dbg("Error forcing flow control settings\n"); - goto out; + DEBUGOUT("Error forcing flow control settings\n"); + return ret_val; } /* @@ -871,19 +1271,16 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) * has completed. We read this twice because this reg has * some "sticky" (latched) bits. */ - ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, - &mii_status_reg); + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); if (ret_val) - goto out; - ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, - &mii_status_reg); + return ret_val; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); if (ret_val) - goto out; + return ret_val; if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { - hw_dbg("Copper PHY and Auto Neg " - "has not completed.\n"); - goto out; + DEBUGOUT("Copper PHY and Auto Neg has not completed.\n"); + return ret_val; } /* @@ -894,13 +1291,13 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) * flow control was negotiated. */ ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, - &mii_nway_adv_reg); + &mii_nway_adv_reg); if (ret_val) - goto out; + return ret_val; ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, - &mii_nway_lp_ability_reg); + &mii_nway_lp_ability_reg); if (ret_val) - goto out; + return ret_val; /* * Two bits in the Auto Negotiation Advertisement Register @@ -939,19 +1336,18 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { /* - * Now we need to check if the user selected RX ONLY + * Now we need to check if the user selected Rx ONLY * of pause frames. In this case, we had to advertise - * FULL flow control because we could not advertise RX + * FULL flow control because we could not advertise Rx * ONLY. Hence, we must now check to see if we need to - * turn OFF the TRANSMISSION of PAUSE frames. + * turn OFF the TRANSMISSION of PAUSE frames. */ if (hw->fc.requested_mode == e1000_fc_full) { hw->fc.current_mode = e1000_fc_full; - hw_dbg("Flow Control = FULL.\r\n"); + DEBUGOUT("Flow Control = FULL.\n"); } else { hw->fc.current_mode = e1000_fc_rx_pause; - hw_dbg("Flow Control = " - "RX PAUSE frames only.\r\n"); + DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); } } /* @@ -967,7 +1363,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { hw->fc.current_mode = e1000_fc_tx_pause; - hw_dbg("Flow Control = TX PAUSE frames only.\r\n"); + DEBUGOUT("Flow Control = Tx PAUSE frames only.\n"); } /* * For transmitting PAUSE frames ONLY. @@ -982,37 +1378,14 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { hw->fc.current_mode = e1000_fc_rx_pause; - hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); - } - /* - * Per the IEEE spec, at this point flow control should be - * disabled. However, we want to consider that we could - * be connected to a legacy switch that doesn't advertise - * desired flow control, but can be forced on the link - * partner. So if we advertised no flow control, that is - * what we will resolve to. If we advertised some kind of - * receive capability (Rx Pause Only or Full Flow Control) - * and the link partner advertised none, we will configure - * ourselves to enable Rx Flow Control only. We can do - * this safely for two reasons: If the link partner really - * didn't want flow control enabled, and we enable Rx, no - * harm done since we won't be receiving any PAUSE frames - * anyway. If the intent on the link partner was to have - * flow control enabled, then by us enabling RX only, we - * can at least receive pause frames and process them. - * This is a good idea because in most cases, since we are - * predominantly a server NIC, more times than not we will - * be asked to delay transmission of packets than asking - * our link partner to pause transmission of frames. - */ - else if ((hw->fc.requested_mode == e1000_fc_none || - hw->fc.requested_mode == e1000_fc_tx_pause) || - hw->fc.strict_ieee) { - hw->fc.current_mode = e1000_fc_none; - hw_dbg("Flow Control = NONE.\r\n"); + DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); } else { - hw->fc.current_mode = e1000_fc_rx_pause; - hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); + /* + * Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + DEBUGOUT("Flow Control = NONE.\n"); } /* @@ -1020,10 +1393,10 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) * negotiated to HALF DUPLEX, flow control should not be * enabled per IEEE 802.3 spec. */ - ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); if (ret_val) { - hw_dbg("Error getting link speed and duplex\n"); - goto out; + DEBUGOUT("Error getting link speed and duplex\n"); + return ret_val; } if (duplex == HALF_DUPLEX) @@ -1033,19 +1406,18 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) * Now we call a subroutine to actually force the MAC * controller to use the correct flow control settings. */ - ret_val = igb_force_mac_fc(hw); + ret_val = e1000_force_mac_fc_generic(hw); if (ret_val) { - hw_dbg("Error forcing flow control settings\n"); - goto out; + DEBUGOUT("Error forcing flow control settings\n"); + return ret_val; } } -out: - return ret_val; + return E1000_SUCCESS; } /** - * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex + * e1000_get_speed_and_duplex_copper_generic - Retrieve current speed/duplex * @hw: pointer to the HW structure * @speed: stores the current speed * @duplex: stores the current duplex @@ -1053,172 +1425,185 @@ out: * Read the status register for the current speed/duplex and store the current * speed and duplex for copper connections. **/ -s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, - u16 *duplex) +s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, + u16 *duplex) { u32 status; - status = rd32(E1000_STATUS); + DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic"); + + status = E1000_READ_REG(hw, E1000_STATUS); if (status & E1000_STATUS_SPEED_1000) { *speed = SPEED_1000; - hw_dbg("1000 Mbs, "); + DEBUGOUT("1000 Mbs, "); } else if (status & E1000_STATUS_SPEED_100) { *speed = SPEED_100; - hw_dbg("100 Mbs, "); + DEBUGOUT("100 Mbs, "); } else { *speed = SPEED_10; - hw_dbg("10 Mbs, "); + DEBUGOUT("10 Mbs, "); } if (status & E1000_STATUS_FD) { *duplex = FULL_DUPLEX; - hw_dbg("Full Duplex\n"); + DEBUGOUT("Full Duplex\n"); } else { *duplex = HALF_DUPLEX; - hw_dbg("Half Duplex\n"); + DEBUGOUT("Half Duplex\n"); } - return 0; + return E1000_SUCCESS; +} + +/** + * e1000_get_speed_and_duplex_fiber_generic - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Sets the speed and duplex to gigabit full duplex (the only possible option) + * for fiber/serdes links. + **/ +s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw, + u16 *speed, u16 *duplex) +{ + DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic"); + + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + + return E1000_SUCCESS; } /** - * igb_get_hw_semaphore - Acquire hardware semaphore + * e1000_get_hw_semaphore_generic - Acquire hardware semaphore * @hw: pointer to the HW structure * * Acquire the HW semaphore to access the PHY or NVM **/ -s32 igb_get_hw_semaphore(struct e1000_hw *hw) +s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw) { u32 swsm; - s32 ret_val = 0; s32 timeout = hw->nvm.word_size + 1; s32 i = 0; + DEBUGFUNC("e1000_get_hw_semaphore_generic"); + /* Get the SW semaphore */ while (i < timeout) { - swsm = rd32(E1000_SWSM); + swsm = E1000_READ_REG(hw, E1000_SWSM); if (!(swsm & E1000_SWSM_SMBI)) break; - udelay(50); + usec_delay(50); i++; } if (i == timeout) { - hw_dbg("Driver can't access device - SMBI bit is set.\n"); - ret_val = -E1000_ERR_NVM; - goto out; + DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); + return -E1000_ERR_NVM; } /* Get the FW semaphore. */ for (i = 0; i < timeout; i++) { - swsm = rd32(E1000_SWSM); - wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + swsm = E1000_READ_REG(hw, E1000_SWSM); + E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); /* Semaphore acquired if bit latched */ - if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) + if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) break; - udelay(50); + usec_delay(50); } if (i == timeout) { /* Release semaphores */ - igb_put_hw_semaphore(hw); - hw_dbg("Driver can't access the NVM\n"); - ret_val = -E1000_ERR_NVM; - goto out; + e1000_put_hw_semaphore_generic(hw); + DEBUGOUT("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; } -out: - return ret_val; + return E1000_SUCCESS; } /** - * igb_put_hw_semaphore - Release hardware semaphore + * e1000_put_hw_semaphore_generic - Release hardware semaphore * @hw: pointer to the HW structure * * Release hardware semaphore used to access the PHY or NVM **/ -void igb_put_hw_semaphore(struct e1000_hw *hw) +void e1000_put_hw_semaphore_generic(struct e1000_hw *hw) { u32 swsm; - swsm = rd32(E1000_SWSM); + DEBUGFUNC("e1000_put_hw_semaphore_generic"); + + swsm = E1000_READ_REG(hw, E1000_SWSM); swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); - wr32(E1000_SWSM, swsm); + E1000_WRITE_REG(hw, E1000_SWSM, swsm); } /** - * igb_get_auto_rd_done - Check for auto read completion + * e1000_get_auto_rd_done_generic - Check for auto read completion * @hw: pointer to the HW structure * * Check EEPROM for Auto Read done bit. **/ -s32 igb_get_auto_rd_done(struct e1000_hw *hw) +s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw) { s32 i = 0; - s32 ret_val = 0; + DEBUGFUNC("e1000_get_auto_rd_done_generic"); while (i < AUTO_READ_DONE_TIMEOUT) { - if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD) + if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD) break; - msleep(1); + msec_delay(1); i++; } if (i == AUTO_READ_DONE_TIMEOUT) { - hw_dbg("Auto read by HW from NVM has not completed.\n"); - ret_val = -E1000_ERR_RESET; - goto out; + DEBUGOUT("Auto read by HW from NVM has not completed.\n"); + return -E1000_ERR_RESET; } -out: - return ret_val; + return E1000_SUCCESS; } /** - * igb_valid_led_default - Verify a valid default LED config + * e1000_valid_led_default_generic - Verify a valid default LED config * @hw: pointer to the HW structure * @data: pointer to the NVM (EEPROM) * * Read the EEPROM for the current default LED configuration. If the * LED configuration is not valid, set to a valid LED configuration. **/ -static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data) +s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data) { s32 ret_val; + DEBUGFUNC("e1000_valid_led_default_generic"); + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); if (ret_val) { - hw_dbg("NVM Read Error\n"); - goto out; + DEBUGOUT("NVM Read Error\n"); + return ret_val; } - if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { - switch(hw->phy.media_type) { - case e1000_media_type_internal_serdes: - *data = ID_LED_DEFAULT_82575_SERDES; - break; - case e1000_media_type_copper: - default: - *data = ID_LED_DEFAULT; - break; - } - } -out: - return ret_val; + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + + return E1000_SUCCESS; } /** - * igb_id_led_init - + * e1000_id_led_init_generic - * @hw: pointer to the HW structure * **/ -s32 igb_id_led_init(struct e1000_hw *hw) +s32 e1000_id_led_init_generic(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val; @@ -1228,11 +1613,13 @@ s32 igb_id_led_init(struct e1000_hw *hw) u16 data, i, temp; const u16 led_mask = 0x0F; - ret_val = igb_valid_led_default(hw, &data); + DEBUGFUNC("e1000_id_led_init_generic"); + + ret_val = hw->nvm.ops.valid_led_default(hw, &data); if (ret_val) - goto out; + return ret_val; - mac->ledctl_default = rd32(E1000_LEDCTL); + mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL); mac->ledctl_mode1 = mac->ledctl_default; mac->ledctl_mode2 = mac->ledctl_default; @@ -1274,135 +1661,298 @@ s32 igb_id_led_init(struct e1000_hw *hw) } } -out: - return ret_val; + return E1000_SUCCESS; } /** - * igb_cleanup_led - Set LED config to default operation + * e1000_setup_led_generic - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. + **/ +s32 e1000_setup_led_generic(struct e1000_hw *hw) +{ + u32 ledctl; + + DEBUGFUNC("e1000_setup_led_generic"); + + if (hw->mac.ops.setup_led != e1000_setup_led_generic) + return -E1000_ERR_CONFIG; + + if (hw->phy.media_type == e1000_media_type_fiber) { + ledctl = E1000_READ_REG(hw, E1000_LEDCTL); + hw->mac.ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); + } else if (hw->phy.media_type == e1000_media_type_copper) { + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); + } + + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led_generic - Set LED config to default operation * @hw: pointer to the HW structure * * Remove the current LED configuration and set the LED configuration * to the default value, saved from the EEPROM. **/ -s32 igb_cleanup_led(struct e1000_hw *hw) +s32 e1000_cleanup_led_generic(struct e1000_hw *hw) { - wr32(E1000_LEDCTL, hw->mac.ledctl_default); - return 0; + DEBUGFUNC("e1000_cleanup_led_generic"); + + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); + return E1000_SUCCESS; } /** - * igb_blink_led - Blink LED + * e1000_blink_led_generic - Blink LED * @hw: pointer to the HW structure * - * Blink the led's which are set to be on. + * Blink the LEDs which are set to be on. **/ -s32 igb_blink_led(struct e1000_hw *hw) +s32 e1000_blink_led_generic(struct e1000_hw *hw) { u32 ledctl_blink = 0; u32 i; - /* - * set the blink bit for each LED that's "on" (0x0E) - * in ledctl_mode2 - */ - ledctl_blink = hw->mac.ledctl_mode2; - for (i = 0; i < 4; i++) - if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == - E1000_LEDCTL_MODE_LED_ON) - ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << - (i * 8)); + DEBUGFUNC("e1000_blink_led_generic"); - wr32(E1000_LEDCTL, ledctl_blink); + if (hw->phy.media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* + * set the blink bit for each LED that's "on" (0x0E) + * in ledctl_mode2 + */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 4; i++) + if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == + E1000_LEDCTL_MODE_LED_ON) + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << + (i * 8)); + } + + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink); - return 0; + return E1000_SUCCESS; } /** - * igb_led_off - Turn LED off + * e1000_led_on_generic - Turn LED on + * @hw: pointer to the HW structure + * + * Turn LED on. + **/ +s32 e1000_led_on_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + DEBUGFUNC("e1000_led_on_generic"); + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + break; + case e1000_media_type_copper: + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2); + break; + default: + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_led_off_generic - Turn LED off * @hw: pointer to the HW structure * * Turn LED off. **/ -s32 igb_led_off(struct e1000_hw *hw) +s32 e1000_led_off_generic(struct e1000_hw *hw) { + u32 ctrl; + + DEBUGFUNC("e1000_led_off_generic"); + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + break; case e1000_media_type_copper: - wr32(E1000_LEDCTL, hw->mac.ledctl_mode1); + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); break; default: break; } - return 0; + return E1000_SUCCESS; +} + +/** + * e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities + * @hw: pointer to the HW structure + * @no_snoop: bitmap of snoop events + * + * Set the PCI-express register to snoop for events enabled in 'no_snoop'. + **/ +void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop) +{ + u32 gcr; + + DEBUGFUNC("e1000_set_pcie_no_snoop_generic"); + + if (no_snoop) { + gcr = E1000_READ_REG(hw, E1000_GCR); + gcr &= ~(PCIE_NO_SNOOP_ALL); + gcr |= no_snoop; + E1000_WRITE_REG(hw, E1000_GCR, gcr); + } } /** - * igb_disable_pcie_master - Disables PCI-express master access + * e1000_disable_pcie_master_generic - Disables PCI-express master access * @hw: pointer to the HW structure * - * Returns 0 (0) if successful, else returns -10 - * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued + * Returns E1000_SUCCESS if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused * the master requests to be disabled. * * Disables PCI-Express master access and verifies there are no pending * requests. **/ -s32 igb_disable_pcie_master(struct e1000_hw *hw) +s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw) { u32 ctrl; s32 timeout = MASTER_DISABLE_TIMEOUT; - s32 ret_val = 0; - if (hw->bus.type != e1000_bus_type_pci_express) - goto out; + DEBUGFUNC("e1000_disable_pcie_master_generic"); - ctrl = rd32(E1000_CTRL); + ctrl = E1000_READ_REG(hw, E1000_CTRL); ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; - wr32(E1000_CTRL, ctrl); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); while (timeout) { - if (!(rd32(E1000_STATUS) & + if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_GIO_MASTER_ENABLE)) break; - udelay(100); + usec_delay(100); timeout--; } if (!timeout) { - hw_dbg("Master requests are pending.\n"); - ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING; - goto out; + DEBUGOUT("Master requests are pending.\n"); + return -E1000_ERR_MASTER_REQUESTS_PENDING; } -out: - return ret_val; + return E1000_SUCCESS; +} + +/** + * e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Reset the Adaptive Interframe Spacing throttle to default values. + **/ +void e1000_reset_adaptive_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_reset_adaptive_generic"); + + if (!mac->adaptive_ifs) { + DEBUGOUT("Not in Adaptive IFS mode!\n"); + return; + } + + mac->current_ifs_val = 0; + mac->ifs_min_val = IFS_MIN; + mac->ifs_max_val = IFS_MAX; + mac->ifs_step_size = IFS_STEP; + mac->ifs_ratio = IFS_RATIO; + + mac->in_ifs_mode = false; + E1000_WRITE_REG(hw, E1000_AIT, 0); } /** - * igb_validate_mdi_setting - Verify MDI/MDIx settings + * e1000_update_adaptive_generic - Update Adaptive Interframe Spacing * @hw: pointer to the HW structure * - * Verify that when not using auto-negotitation that MDI/MDIx is correctly + * Update the Adaptive Interframe Spacing Throttle value based on the + * time between transmitted packets and time between collisions. + **/ +void e1000_update_adaptive_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_update_adaptive_generic"); + + if (!mac->adaptive_ifs) { + DEBUGOUT("Not in Adaptive IFS mode!\n"); + return; + } + + if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { + if (mac->tx_packet_delta > MIN_NUM_XMITS) { + mac->in_ifs_mode = true; + if (mac->current_ifs_val < mac->ifs_max_val) { + if (!mac->current_ifs_val) + mac->current_ifs_val = mac->ifs_min_val; + else + mac->current_ifs_val += + mac->ifs_step_size; + E1000_WRITE_REG(hw, E1000_AIT, + mac->current_ifs_val); + } + } + } else { + if (mac->in_ifs_mode && + (mac->tx_packet_delta <= MIN_NUM_XMITS)) { + mac->current_ifs_val = 0; + mac->in_ifs_mode = false; + E1000_WRITE_REG(hw, E1000_AIT, 0); + } + } +} + +/** + * e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings + * @hw: pointer to the HW structure + * + * Verify that when not using auto-negotiation that MDI/MDIx is correctly * set, which is forced to MDI mode only. **/ -s32 igb_validate_mdi_setting(struct e1000_hw *hw) +static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw) { - s32 ret_val = 0; + DEBUGFUNC("e1000_validate_mdi_setting_generic"); if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { - hw_dbg("Invalid MDI setting detected\n"); + DEBUGOUT("Invalid MDI setting detected\n"); hw->phy.mdix = 1; - ret_val = -E1000_ERR_CONFIG; - goto out; + return -E1000_ERR_CONFIG; } -out: - return ret_val; + return E1000_SUCCESS; } /** - * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register + * e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register * @hw: pointer to the HW structure * @reg: 32bit register offset such as E1000_SCTL * @offset: register offset to write to @@ -1412,72 +1962,28 @@ out: * and they all have the format address << 8 | data and bit 31 is polled for * completion. **/ -s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, - u32 offset, u8 data) +s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data) { u32 i, regvalue = 0; - s32 ret_val = 0; + + DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic"); /* Set up the address and data */ regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT); - wr32(reg, regvalue); + E1000_WRITE_REG(hw, reg, regvalue); /* Poll the ready bit to see if the MDI read completed */ for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { - udelay(5); - regvalue = rd32(reg); + usec_delay(5); + regvalue = E1000_READ_REG(hw, reg); if (regvalue & E1000_GEN_CTL_READY) break; } if (!(regvalue & E1000_GEN_CTL_READY)) { - hw_dbg("Reg %08x did not indicate ready\n", reg); - ret_val = -E1000_ERR_PHY; - goto out; + DEBUGOUT1("Reg %08x did not indicate ready\n", reg); + return -E1000_ERR_PHY; } -out: - return ret_val; -} - -/** - * igb_enable_mng_pass_thru - Enable processing of ARP's - * @hw: pointer to the HW structure - * - * Verifies the hardware needs to leave interface enabled so that frames can - * be directed to and from the management interface. - **/ -bool igb_enable_mng_pass_thru(struct e1000_hw *hw) -{ - u32 manc; - u32 fwsm, factps; - bool ret_val = false; - - if (!hw->mac.asf_firmware_present) - goto out; - - manc = rd32(E1000_MANC); - - if (!(manc & E1000_MANC_RCV_TCO_EN)) - goto out; - - if (hw->mac.arc_subsystem_valid) { - fwsm = rd32(E1000_FWSM); - factps = rd32(E1000_FACTPS); - - if (!(factps & E1000_FACTPS_MNGCG) && - ((fwsm & E1000_FWSM_MODE_MASK) == - (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { - ret_val = true; - goto out; - } - } else { - if ((manc & E1000_MANC_SMBUS_EN) && - !(manc & E1000_MANC_ASF_EN)) { - ret_val = true; - goto out; - } - } - -out: - return ret_val; + return E1000_SUCCESS; } diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h index cbddc4e51e30..36bba5f1ef03 100644 --- a/drivers/net/igb/e1000_mac.h +++ b/drivers/net/igb/e1000_mac.h @@ -28,64 +28,58 @@ #ifndef _E1000_MAC_H_ #define _E1000_MAC_H_ -#include "e1000_hw.h" - -#include "e1000_phy.h" -#include "e1000_nvm.h" -#include "e1000_defines.h" - /* * Functions that should not be called directly from drivers but can be used * by other files in this 'shared code' */ -s32 igb_blink_led(struct e1000_hw *hw); -s32 igb_check_for_copper_link(struct e1000_hw *hw); -s32 igb_cleanup_led(struct e1000_hw *hw); -s32 igb_config_fc_after_link_up(struct e1000_hw *hw); -s32 igb_disable_pcie_master(struct e1000_hw *hw); -s32 igb_force_mac_fc(struct e1000_hw *hw); -s32 igb_get_auto_rd_done(struct e1000_hw *hw); -s32 igb_get_bus_info_pcie(struct e1000_hw *hw); -s32 igb_get_hw_semaphore(struct e1000_hw *hw); -s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, - u16 *duplex); -s32 igb_id_led_init(struct e1000_hw *hw); -s32 igb_led_off(struct e1000_hw *hw); -void igb_update_mc_addr_list(struct e1000_hw *hw, - u8 *mc_addr_list, u32 mc_addr_count); -s32 igb_setup_link(struct e1000_hw *hw); -s32 igb_validate_mdi_setting(struct e1000_hw *hw); -s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, - u32 offset, u8 data); - -void igb_clear_hw_cntrs_base(struct e1000_hw *hw); -void igb_clear_vfta(struct e1000_hw *hw); -void igb_clear_vfta_i350(struct e1000_hw *hw); -s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add); -void igb_config_collision_dist(struct e1000_hw *hw); -void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); -void igb_mta_set(struct e1000_hw *hw, u32 hash_value); -void igb_put_hw_semaphore(struct e1000_hw *hw); -void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); -s32 igb_check_alt_mac_addr(struct e1000_hw *hw); - -bool igb_enable_mng_pass_thru(struct e1000_hw *hw); - -enum e1000_mng_mode { - e1000_mng_mode_none = 0, - e1000_mng_mode_asf, - e1000_mng_mode_pt, - e1000_mng_mode_ipmi, - e1000_mng_mode_host_if_only -}; - -#define E1000_FACTPS_MNGCG 0x20000000 - -#define E1000_FWSM_MODE_MASK 0xE -#define E1000_FWSM_MODE_SHIFT 1 - -#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 - -extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); +void e1000_init_mac_ops_generic(struct e1000_hw *hw); +void e1000_null_mac_generic(struct e1000_hw *hw); +s32 e1000_null_ops_generic(struct e1000_hw *hw); +s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d); +bool e1000_null_mng_mode(struct e1000_hw *hw); +void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a); +void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b); +void e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a); +s32 e1000_blink_led_generic(struct e1000_hw *hw); +s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw); +s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw); +s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw); +s32 e1000_cleanup_led_generic(struct e1000_hw *hw); +s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw); +s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw); +s32 e1000_force_mac_fc_generic(struct e1000_hw *hw); +s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw); +s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw); +void e1000_set_lan_id_single_port(struct e1000_hw *hw); +s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw, + u16 *speed, u16 *duplex); +s32 e1000_id_led_init_generic(struct e1000_hw *hw); +s32 e1000_led_on_generic(struct e1000_hw *hw); +s32 e1000_led_off_generic(struct e1000_hw *hw); +void e1000_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); +s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw); +s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw); +s32 e1000_setup_led_generic(struct e1000_hw *hw); +s32 e1000_setup_link_generic(struct e1000_hw *hw); +s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data); + +u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr); + +void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw); +void e1000_clear_vfta_generic(struct e1000_hw *hw); +void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count); +void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw); +void e1000_put_hw_semaphore_generic(struct e1000_hw *hw); +void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index); +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); +void e1000_reset_adaptive_generic(struct e1000_hw *hw); +void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop); +void e1000_update_adaptive_generic(struct e1000_hw *hw); +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); #endif diff --git a/drivers/net/igb/e1000_manage.c b/drivers/net/igb/e1000_manage.c new file mode 100644 index 000000000000..91942dc1738c --- /dev/null +++ b/drivers/net/igb/e1000_manage.c @@ -0,0 +1,441 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000_api.h" + +/** + * e1000_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +u8 e1000_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + DEBUGFUNC("e1000_calculate_checksum"); + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8) (0 - sum); +} + +/** + * e1000_mng_enable_host_if_generic - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operation + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + **/ +s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw) +{ + u32 hicr; + u8 i; + + DEBUGFUNC("e1000_mng_enable_host_if_generic"); + + if (!hw->mac.arc_subsystem_valid) { + DEBUGOUT("ARC subsystem not valid.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Check that the host interface is enabled. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + if ((hicr & E1000_HICR_EN) == 0) { + DEBUGOUT("E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + /* check the previous command is completed */ + for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_C)) + break; + msec_delay_irq(1); + } + + if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { + DEBUGOUT("Previous command timeout failed .\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + return E1000_SUCCESS; +} + +/** + * e1000_check_mng_mode_generic - Generic check management mode + * @hw: pointer to the HW structure + * + * Reads the firmware semaphore register and returns true (>0) if + * manageability is enabled, else false (0). + **/ +bool e1000_check_mng_mode_generic(struct e1000_hw *hw) +{ + u32 fwsm = E1000_READ_REG(hw, E1000_FWSM); + + DEBUGFUNC("e1000_check_mng_mode_generic"); + + + return (fwsm & E1000_FWSM_MODE_MASK) == + (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); +} + +/** + * e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on Tx + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + **/ +bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw) +{ + struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; + u32 *buffer = (u32 *)&hw->mng_cookie; + u32 offset; + s32 ret_val, hdr_csum, csum; + u8 i, len; + + DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic"); + + hw->mac.tx_pkt_filtering = true; + + /* No manageability, no filtering */ + if (!hw->mac.ops.check_mng_mode(hw)) { + hw->mac.tx_pkt_filtering = false; + return hw->mac.tx_pkt_filtering; + } + + /* + * If we can't read from the host interface for whatever + * reason, disable filtering. + */ + ret_val = hw->mac.ops.mng_enable_host_if(hw); + if (ret_val != E1000_SUCCESS) { + hw->mac.tx_pkt_filtering = false; + return hw->mac.tx_pkt_filtering; + } + + /* Read in the header. Length and offset are in dwords. */ + len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; + offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; + for (i = 0; i < len; i++) + *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, + offset + i); + hdr_csum = hdr->checksum; + hdr->checksum = 0; + csum = e1000_calculate_checksum((u8 *)hdr, + E1000_MNG_DHCP_COOKIE_LENGTH); + /* + * If either the checksums or signature don't match, then + * the cookie area isn't considered valid, in which case we + * take the safe route of assuming Tx filtering is enabled. + */ + if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { + hw->mac.tx_pkt_filtering = true; + return hw->mac.tx_pkt_filtering; + } + + /* Cookie area is valid, make the final check for filtering. */ + if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) + hw->mac.tx_pkt_filtering = false; + + return hw->mac.tx_pkt_filtering; +} + +/** + * e1000_mng_write_cmd_header_generic - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + **/ +s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr) +{ + u16 i, length = sizeof(struct e1000_host_mng_command_header); + + DEBUGFUNC("e1000_mng_write_cmd_header_generic"); + + /* Write the whole command header structure with new checksum. */ + + hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); + + length >>= 2; + /* Write the relevant command block into the ram area. */ + for (i = 0; i < length; i++) { + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i, + *((u32 *) hdr + i)); + E1000_WRITE_FLUSH(hw); + } + + return E1000_SUCCESS; +} + +/** + * e1000_mng_host_if_write_generic - Write to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + **/ +s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum) +{ + u8 *tmp; + u8 *bufptr = buffer; + u32 data = 0; + u16 remaining, i, j, prev_bytes; + + DEBUGFUNC("e1000_mng_host_if_write_generic"); + + /* sum = only sum of the data and it is not checksum */ + + if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) + return -E1000_ERR_PARAM; + + tmp = (u8 *)&data; + prev_bytes = offset & 0x3; + offset >>= 2; + + if (prev_bytes) { + data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset); + for (j = prev_bytes; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data); + length -= j - prev_bytes; + offset++; + } + + remaining = length & 0x3; + length -= remaining; + + /* Calculate length in DWORDs */ + length >>= 2; + + /* + * The device driver writes the relevant command block into the + * ram area. + */ + for (i = 0; i < length; i++) { + for (j = 0; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, + data); + } + if (remaining) { + for (j = 0; j < sizeof(u32); j++) { + if (j < remaining) + *(tmp + j) = *bufptr++; + else + *(tmp + j) = 0; + + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, + data); + } + + return E1000_SUCCESS; +} + +/** + * e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer, + u16 length) +{ + struct e1000_host_mng_command_header hdr; + s32 ret_val; + u32 hicr; + + DEBUGFUNC("e1000_mng_write_dhcp_info_generic"); + + hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; + hdr.command_length = length; + hdr.reserved1 = 0; + hdr.reserved2 = 0; + hdr.checksum = 0; + + /* Enable the host interface */ + ret_val = hw->mac.ops.mng_enable_host_if(hw); + if (ret_val) + return ret_val; + + /* Populate the host interface with the contents of "buffer". */ + ret_val = hw->mac.ops.mng_host_if_write(hw, buffer, length, + sizeof(hdr), &(hdr.checksum)); + if (ret_val) + return ret_val; + + /* Write the manageability command header */ + ret_val = hw->mac.ops.mng_write_cmd_header(hw, &hdr); + if (ret_val) + return ret_val; + + /* Tell the ARC a new command is pending. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); + + return E1000_SUCCESS; +} + +/** + * e1000_enable_mng_pass_thru - Check if management passthrough is needed + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + **/ +bool e1000_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + + DEBUGFUNC("e1000_enable_mng_pass_thru"); + + if (!hw->mac.asf_firmware_present) + return false; + + manc = E1000_READ_REG(hw, E1000_MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN)) + return false; + + if (hw->mac.has_fwsm) { + fwsm = E1000_READ_REG(hw, E1000_FWSM); + factps = E1000_READ_REG(hw, E1000_FACTPS); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) + return true; + } else if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + return true; + } + + return false; +} + +/** + * e1000_host_interface_command - Writes buffer to host interface + * @hw: pointer to the HW structure + * @buffer: contains a command to write + * @length: the byte length of the buffer, must be multiple of 4 bytes + * + * Writes a buffer to the Host Interface. Upon success, returns E1000_SUCCESS + * else returns E1000_ERR_HOST_INTERFACE_COMMAND. + **/ +s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length) +{ + u32 hicr, i; + + DEBUGFUNC("e1000_host_interface_command"); + + if (!(hw->mac.arc_subsystem_valid)) { + DEBUGOUT("Hardware doesn't support host interface command.\n"); + return E1000_SUCCESS; + } + + if (!hw->mac.asf_firmware_present) { + DEBUGOUT("Firmware is not present.\n"); + return E1000_SUCCESS; + } + + if (length == 0 || length & 0x3 || + length > E1000_HI_MAX_BLOCK_BYTE_LENGTH) { + DEBUGOUT("Buffer length failure.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Check that the host interface is enabled. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + if ((hicr & E1000_HICR_EN) == 0) { + DEBUGOUT("E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Calculate length in DWORDs */ + length >>= 2; + + /* + * The device driver writes the relevant command block + * into the ram area. + */ + for (i = 0; i < length; i++) + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i, + *((u32 *)buffer + i)); + + /* Setting this bit tells the ARC that a new command is pending. */ + E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); + + for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_C)) + break; + msec_delay(1); + } + + /* Check command successful completion. */ + if (i == E1000_HI_COMMAND_TIMEOUT || + (!(E1000_READ_REG(hw, E1000_HICR) & E1000_HICR_SV))) { + DEBUGOUT("Command has failed with no status valid.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + for (i = 0; i < length; i++) + *((u32 *)buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, + E1000_HOST_IF, + i); + + return E1000_SUCCESS; +} + diff --git a/drivers/net/igb/e1000_manage.h b/drivers/net/igb/e1000_manage.h new file mode 100644 index 000000000000..0d8c3f92b51c --- /dev/null +++ b/drivers/net/igb/e1000_manage.h @@ -0,0 +1,82 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_MANAGE_H_ +#define _E1000_MANAGE_H_ + +bool e1000_check_mng_mode_generic(struct e1000_hw *hw); +bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw); +s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw); +s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum); +s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr); +s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, + u8 *buffer, u16 length); +bool e1000_enable_mng_pass_thru(struct e1000_hw *hw); +u8 e1000_calculate_checksum(u8 *buffer, u32 length); +s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length); + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_if_only +}; + +#define E1000_FACTPS_MNGCG 0x20000000 + +#define E1000_FWSM_MODE_MASK 0xE +#define E1000_FWSM_MODE_SHIFT 1 + +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 + +#define E1000_VFTA_ENTRY_SHIFT 5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ +#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ +#define E1000_HI_COMMAND_TIMEOUT 500 /* Process HI cmd limit */ +#define E1000_HICR_EN 0x01 /* Enable bit - RO */ +/* Driver sets this bit when done to put command in RAM */ +#define E1000_HICR_C 0x02 +#define E1000_HICR_SV 0x04 /* Status Validity */ +#define E1000_HICR_FW_RESET_ENABLE 0x40 +#define E1000_HICR_FW_RESET 0x80 + +/* Intel(R) Active Management Technology signature */ +#define E1000_IAMT_SIGNATURE 0x544D4149 + +#endif diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c index b7b99699c1cb..ae57bbc4457e 100644 --- a/drivers/net/igb/e1000_mbx.c +++ b/drivers/net/igb/e1000_mbx.c @@ -28,7 +28,30 @@ #include "e1000_mbx.h" /** - * igb_read_mbx - Reads a message from the mailbox + * e1000_null_mbx_check_for_flag - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +static s32 e1000_null_mbx_check_for_flag(struct e1000_hw *hw, u16 mbx_id) +{ + DEBUGFUNC("e1000_null_mbx_check_flag"); + + return E1000_SUCCESS; +} + +/** + * e1000_null_mbx_transact - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +static s32 e1000_null_mbx_transact(struct e1000_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + DEBUGFUNC("e1000_null_mbx_rw_msg"); + + return E1000_SUCCESS; +} + +/** + * e1000_read_mbx - Reads a message from the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer @@ -36,11 +59,13 @@ * * returns SUCCESS if it successfuly read message from buffer **/ -s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; + DEBUGFUNC("e1000_read_mbx"); + /* limit read to size of mailbox */ if (size > mbx->size) size = mbx->size; @@ -52,7 +77,7 @@ s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) } /** - * igb_write_mbx - Write a message to the mailbox + * e1000_write_mbx - Write a message to the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer @@ -60,10 +85,12 @@ s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) * * returns SUCCESS if it successfully copied message into the buffer **/ -s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +s32 e1000_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; - s32 ret_val = 0; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_mbx"); if (size > mbx->size) ret_val = -E1000_ERR_MBX; @@ -75,17 +102,19 @@ s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) } /** - * igb_check_for_msg - checks to see if someone sent us mail + * e1000_check_for_msg - checks to see if someone sent us mail * @hw: pointer to the HW structure * @mbx_id: id of mailbox to check * * returns SUCCESS if the Status bit was found or else ERR_MBX **/ -s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id) +s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; + DEBUGFUNC("e1000_check_for_msg"); + if (mbx->ops.check_for_msg) ret_val = mbx->ops.check_for_msg(hw, mbx_id); @@ -93,17 +122,19 @@ s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id) } /** - * igb_check_for_ack - checks to see if someone sent us ACK + * e1000_check_for_ack - checks to see if someone sent us ACK * @hw: pointer to the HW structure * @mbx_id: id of mailbox to check * * returns SUCCESS if the Status bit was found or else ERR_MBX **/ -s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id) +s32 e1000_check_for_ack(struct e1000_hw *hw, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; + DEBUGFUNC("e1000_check_for_ack"); + if (mbx->ops.check_for_ack) ret_val = mbx->ops.check_for_ack(hw, mbx_id); @@ -111,17 +142,19 @@ s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id) } /** - * igb_check_for_rst - checks to see if other side has reset + * e1000_check_for_rst - checks to see if other side has reset * @hw: pointer to the HW structure * @mbx_id: id of mailbox to check * * returns SUCCESS if the Status bit was found or else ERR_MBX **/ -s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id) +s32 e1000_check_for_rst(struct e1000_hw *hw, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; + DEBUGFUNC("e1000_check_for_rst"); + if (mbx->ops.check_for_rst) ret_val = mbx->ops.check_for_rst(hw, mbx_id); @@ -129,17 +162,19 @@ s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id) } /** - * igb_poll_for_msg - Wait for message notification + * e1000_poll_for_msg - Wait for message notification * @hw: pointer to the HW structure * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully received a message notification **/ -static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) +static s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; + DEBUGFUNC("e1000_poll_for_msg"); + if (!countdown || !mbx->ops.check_for_msg) goto out; @@ -147,28 +182,30 @@ static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) countdown--; if (!countdown) break; - udelay(mbx->usec_delay); + usec_delay(mbx->usec_delay); } /* if we failed, all future posted messages fail until reset */ if (!countdown) mbx->timeout = 0; out: - return countdown ? 0 : -E1000_ERR_MBX; + return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; } /** - * igb_poll_for_ack - Wait for message acknowledgement + * e1000_poll_for_ack - Wait for message acknowledgement * @hw: pointer to the HW structure * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully received a message acknowledgement **/ -static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) +static s32 e1000_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; + DEBUGFUNC("e1000_poll_for_ack"); + if (!countdown || !mbx->ops.check_for_ack) goto out; @@ -176,18 +213,18 @@ static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) countdown--; if (!countdown) break; - udelay(mbx->usec_delay); + usec_delay(mbx->usec_delay); } /* if we failed, all future posted messages fail until reset */ if (!countdown) mbx->timeout = 0; out: - return countdown ? 0 : -E1000_ERR_MBX; + return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; } /** - * igb_read_posted_mbx - Wait for message notification and receive message + * e1000_read_posted_mbx - Wait for message notification and receive message * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer @@ -196,16 +233,19 @@ out: * returns SUCCESS if it successfully received a message notification and * copied it into the receive buffer. **/ -static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; + DEBUGFUNC("e1000_read_posted_mbx"); + if (!mbx->ops.read) goto out; - ret_val = igb_poll_for_msg(hw, mbx_id); + ret_val = e1000_poll_for_msg(hw, mbx_id); + /* if ack received read message, otherwise we timed out */ if (!ret_val) ret_val = mbx->ops.read(hw, msg, size, mbx_id); out: @@ -213,7 +253,7 @@ out: } /** - * igb_write_posted_mbx - Write a message to the mailbox, wait for ack + * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer @@ -222,11 +262,13 @@ out: * returns SUCCESS if it successfully copied message into the buffer and * received an ack to that message within delay * timeout period **/ -static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; + DEBUGFUNC("e1000_write_posted_mbx"); + /* exit if either we can't write or there isn't a defined timeout */ if (!mbx->ops.write || !mbx->timeout) goto out; @@ -236,37 +278,58 @@ static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx /* if msg sent wait until we receive an ack */ if (!ret_val) - ret_val = igb_poll_for_ack(hw, mbx_id); + ret_val = e1000_poll_for_ack(hw, mbx_id); out: return ret_val; } -static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask) +/** + * e1000_init_mbx_ops_generic - Initialize mbx function pointers + * @hw: pointer to the HW structure + * + * Sets the function pointers to no-op functions + **/ +void e1000_init_mbx_ops_generic(struct e1000_hw *hw) { - u32 mbvficr = rd32(E1000_MBVFICR); + struct e1000_mbx_info *mbx = &hw->mbx; + mbx->ops.init_params = e1000_null_ops_generic; + mbx->ops.read = e1000_null_mbx_transact; + mbx->ops.write = e1000_null_mbx_transact; + mbx->ops.check_for_msg = e1000_null_mbx_check_for_flag; + mbx->ops.check_for_ack = e1000_null_mbx_check_for_flag; + mbx->ops.check_for_rst = e1000_null_mbx_check_for_flag; + mbx->ops.read_posted = e1000_read_posted_mbx; + mbx->ops.write_posted = e1000_write_posted_mbx; +} + +static s32 e1000_check_for_bit_pf(struct e1000_hw *hw, u32 mask) +{ + u32 mbvficr = E1000_READ_REG(hw, E1000_MBVFICR); s32 ret_val = -E1000_ERR_MBX; if (mbvficr & mask) { - ret_val = 0; - wr32(E1000_MBVFICR, mask); + ret_val = E1000_SUCCESS; + E1000_WRITE_REG(hw, E1000_MBVFICR, mask); } return ret_val; } /** - * igb_check_for_msg_pf - checks to see if the VF has sent mail + * e1000_check_for_msg_pf - checks to see if the VF has sent mail * @hw: pointer to the HW structure * @vf_number: the VF index * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ -static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) +static s32 e1000_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) { s32 ret_val = -E1000_ERR_MBX; - if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) { - ret_val = 0; + DEBUGFUNC("e1000_check_for_msg_pf"); + + if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) { + ret_val = E1000_SUCCESS; hw->mbx.stats.reqs++; } @@ -274,18 +337,20 @@ static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) } /** - * igb_check_for_ack_pf - checks to see if the VF has ACKed + * e1000_check_for_ack_pf - checks to see if the VF has ACKed * @hw: pointer to the HW structure * @vf_number: the VF index * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ -static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) +static s32 e1000_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) { s32 ret_val = -E1000_ERR_MBX; - if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) { - ret_val = 0; + DEBUGFUNC("e1000_check_for_ack_pf"); + + if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) { + ret_val = E1000_SUCCESS; hw->mbx.stats.acks++; } @@ -293,20 +358,22 @@ static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) } /** - * igb_check_for_rst_pf - checks to see if the VF has reset + * e1000_check_for_rst_pf - checks to see if the VF has reset * @hw: pointer to the HW structure * @vf_number: the VF index * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ -static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) +static s32 e1000_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) { - u32 vflre = rd32(E1000_VFLRE); + u32 vflre = E1000_READ_REG(hw, E1000_VFLRE); s32 ret_val = -E1000_ERR_MBX; + DEBUGFUNC("e1000_check_for_rst_pf"); + if (vflre & (1 << vf_number)) { - ret_val = 0; - wr32(E1000_VFLRE, (1 << vf_number)); + ret_val = E1000_SUCCESS; + E1000_WRITE_REG(hw, E1000_VFLRE, (1 << vf_number)); hw->mbx.stats.rsts++; } @@ -314,31 +381,32 @@ static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) } /** - * igb_obtain_mbx_lock_pf - obtain mailbox lock + * e1000_obtain_mbx_lock_pf - obtain mailbox lock * @hw: pointer to the HW structure * @vf_number: the VF index * * return SUCCESS if we obtained the mailbox lock **/ -static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) +static s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) { s32 ret_val = -E1000_ERR_MBX; u32 p2v_mailbox; + DEBUGFUNC("e1000_obtain_mbx_lock_pf"); /* Take ownership of the buffer */ - wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); + E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); /* reserve mailbox for vf use */ - p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); + p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number)); if (p2v_mailbox & E1000_P2VMAILBOX_PFU) - ret_val = 0; + ret_val = E1000_SUCCESS; return ret_val; } /** - * igb_write_mbx_pf - Places a message in the mailbox + * e1000_write_mbx_pf - Places a message in the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer @@ -346,27 +414,29 @@ static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) * * returns SUCCESS if it successfully copied message into the buffer **/ -static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, - u16 vf_number) +static s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number) { s32 ret_val; u16 i; + DEBUGFUNC("e1000_write_mbx_pf"); + /* lock the mailbox to prevent pf/vf race condition */ - ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); + ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number); if (ret_val) goto out_no_write; /* flush msg and acks as we are overwriting the message buffer */ - igb_check_for_msg_pf(hw, vf_number); - igb_check_for_ack_pf(hw, vf_number); + e1000_check_for_msg_pf(hw, vf_number); + e1000_check_for_ack_pf(hw, vf_number); /* copy the caller specified message to the mailbox memory buffer */ for (i = 0; i < size; i++) - array_wr32(E1000_VMBMEM(vf_number), i, msg[i]); + E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i, msg[i]); /* Interrupt VF to tell it a message has been sent and release buffer*/ - wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS); + E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS); /* update stats */ hw->mbx.stats.msgs_tx++; @@ -377,7 +447,7 @@ out_no_write: } /** - * igb_read_mbx_pf - Read a message from the mailbox + * e1000_read_mbx_pf - Read a message from the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer @@ -387,23 +457,25 @@ out_no_write: * memory buffer. The presumption is that the caller knows that there was * a message due to a VF request so no polling for message is needed. **/ -static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, - u16 vf_number) +static s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number) { s32 ret_val; u16 i; + DEBUGFUNC("e1000_read_mbx_pf"); + /* lock the mailbox to prevent pf/vf race condition */ - ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); + ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number); if (ret_val) goto out_no_read; /* copy the message to the mailbox memory buffer */ for (i = 0; i < size; i++) - msg[i] = array_rd32(E1000_VMBMEM(vf_number), i); + msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i); /* Acknowledge the message and release buffer */ - wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); + E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); /* update stats */ hw->mbx.stats.msgs_rx++; @@ -418,29 +490,33 @@ out_no_read: * * Initializes the hw->mbx struct to correct values for pf mailbox */ -s32 igb_init_mbx_params_pf(struct e1000_hw *hw) +s32 e1000_init_mbx_params_pf(struct e1000_hw *hw) { struct e1000_mbx_info *mbx = &hw->mbx; - mbx->timeout = 0; - mbx->usec_delay = 0; - - mbx->size = E1000_VFMAILBOX_SIZE; - - mbx->ops.read = igb_read_mbx_pf; - mbx->ops.write = igb_write_mbx_pf; - mbx->ops.read_posted = igb_read_posted_mbx; - mbx->ops.write_posted = igb_write_posted_mbx; - mbx->ops.check_for_msg = igb_check_for_msg_pf; - mbx->ops.check_for_ack = igb_check_for_ack_pf; - mbx->ops.check_for_rst = igb_check_for_rst_pf; - - mbx->stats.msgs_tx = 0; - mbx->stats.msgs_rx = 0; - mbx->stats.reqs = 0; - mbx->stats.acks = 0; - mbx->stats.rsts = 0; - - return 0; + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: + mbx->timeout = 0; + mbx->usec_delay = 0; + + mbx->size = E1000_VFMAILBOX_SIZE; + + mbx->ops.read = e1000_read_mbx_pf; + mbx->ops.write = e1000_write_mbx_pf; + mbx->ops.read_posted = e1000_read_posted_mbx; + mbx->ops.write_posted = e1000_write_posted_mbx; + mbx->ops.check_for_msg = e1000_check_for_msg_pf; + mbx->ops.check_for_ack = e1000_check_for_ack_pf; + mbx->ops.check_for_rst = e1000_check_for_rst_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + default: + return E1000_SUCCESS; + } } diff --git a/drivers/net/igb/e1000_mbx.h b/drivers/net/igb/e1000_mbx.h index dbcfa3d5caec..d686e16162aa 100644 --- a/drivers/net/igb/e1000_mbx.h +++ b/drivers/net/igb/e1000_mbx.h @@ -28,50 +28,60 @@ #ifndef _E1000_MBX_H_ #define _E1000_MBX_H_ -#include "e1000_hw.h" +#include "e1000_api.h" -#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ -#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ -#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ -#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ -#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ +#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ #define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ -#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ #define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ -#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ +#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ -#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ +#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ /* If it's a E1000_VF_* msg then it originates in the VF and is sent to the * PF. The reverse is true if it is E1000_PF_*. * Message ACK's are the value or'd with 0xF0000000 */ -#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with - * this are the ACK */ -#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with - * this are the NACK */ -#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still - clear to send requests */ -#define E1000_VT_MSGINFO_SHIFT 16 -/* bits 23:16 are used for exra info for certain messages */ -#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) - -#define E1000_VF_RESET 0x01 /* VF requests reset */ -#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ -#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ -#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ -#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */ -#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/ -#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) - -#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ - -s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16); -s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16); -s32 igb_check_for_msg(struct e1000_hw *, u16); -s32 igb_check_for_ack(struct e1000_hw *, u16); -s32 igb_check_for_rst(struct e1000_hw *, u16); -s32 igb_init_mbx_params_pf(struct e1000_hw *); +/* Msgs below or'd with this are the ACK */ +#define E1000_VT_MSGTYPE_ACK 0x80000000 +/* Msgs below or'd with this are the NACK */ +#define E1000_VT_MSGTYPE_NACK 0x40000000 +/* Indicates that VF is still clear to send requests */ +#define E1000_VT_MSGTYPE_CTS 0x20000000 +#define E1000_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for extra info for certain messages */ +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) + +#define E1000_VF_RESET 0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ +#define E1000_VF_SET_MULTICAST_COUNT_MASK (0x1F << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_MULTICAST_OVERFLOW (0x80 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ +#define E1000_VF_SET_VLAN_ADD (0x01 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_LPE 0x05 /* reqs to set VMOLR.LPE */ +#define E1000_VF_SET_PROMISC 0x06 /* reqs to clear VMOLR.ROPE/MPME*/ +#define E1000_VF_SET_PROMISC_UNICAST (0x01 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) + +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ + +#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define E1000_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +s32 e1000_read_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 e1000_write_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 e1000_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 e1000_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 e1000_check_for_msg(struct e1000_hw *, u16); +s32 e1000_check_for_ack(struct e1000_hw *, u16); +s32 e1000_check_for_rst(struct e1000_hw *, u16); +void e1000_init_mbx_ops_generic(struct e1000_hw *hw); +s32 e1000_init_mbx_params_pf(struct e1000_hw *); #endif /* _E1000_MBX_H_ */ diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c index fa2c6ba62139..0d894628ef77 100644 --- a/drivers/net/igb/e1000_nvm.c +++ b/drivers/net/igb/e1000_nvm.c @@ -25,44 +25,105 @@ *******************************************************************************/ -#include -#include +#include "e1000_api.h" -#include "e1000_mac.h" -#include "e1000_nvm.h" +static void e1000_reload_nvm_generic(struct e1000_hw *hw); /** - * igb_raise_eec_clk - Raise EEPROM clock + * e1000_init_nvm_ops_generic - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * Setups up the function pointers to no-op functions + **/ +void e1000_init_nvm_ops_generic(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + DEBUGFUNC("e1000_init_nvm_ops_generic"); + + /* Initialize function pointers */ + nvm->ops.init_params = e1000_null_ops_generic; + nvm->ops.acquire = e1000_null_ops_generic; + nvm->ops.read = e1000_null_read_nvm; + nvm->ops.release = e1000_null_nvm_generic; + nvm->ops.reload = e1000_reload_nvm_generic; + nvm->ops.update = e1000_null_ops_generic; + nvm->ops.valid_led_default = e1000_null_led_default; + nvm->ops.validate = e1000_null_ops_generic; + nvm->ops.write = e1000_null_write_nvm; +} + +/** + * e1000_null_nvm_read - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c) +{ + DEBUGFUNC("e1000_null_read_nvm"); + return E1000_SUCCESS; +} + +/** + * e1000_null_nvm_generic - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_nvm_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_null_nvm_generic"); + return; +} + +/** + * e1000_null_led_default - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data) +{ + DEBUGFUNC("e1000_null_led_default"); + return E1000_SUCCESS; +} + +/** + * e1000_null_write_nvm - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c) +{ + DEBUGFUNC("e1000_null_write_nvm"); + return E1000_SUCCESS; +} + +/** + * e1000_raise_eec_clk - Raise EEPROM clock * @hw: pointer to the HW structure * @eecd: pointer to the EEPROM * * Enable/Raise the EEPROM clock bit. **/ -static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) +static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) { *eecd = *eecd | E1000_EECD_SK; - wr32(E1000_EECD, *eecd); - wrfl(); - udelay(hw->nvm.delay_usec); + E1000_WRITE_REG(hw, E1000_EECD, *eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(hw->nvm.delay_usec); } /** - * igb_lower_eec_clk - Lower EEPROM clock + * e1000_lower_eec_clk - Lower EEPROM clock * @hw: pointer to the HW structure * @eecd: pointer to the EEPROM * * Clear/Lower the EEPROM clock bit. **/ -static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) +static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) { *eecd = *eecd & ~E1000_EECD_SK; - wr32(E1000_EECD, *eecd); - wrfl(); - udelay(hw->nvm.delay_usec); + E1000_WRITE_REG(hw, E1000_EECD, *eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(hw->nvm.delay_usec); } /** - * igb_shift_out_eec_bits - Shift data bits our to the EEPROM + * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM * @hw: pointer to the HW structure * @data: data to send to the EEPROM * @count: number of bits to shift out @@ -71,12 +132,14 @@ static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) * "data" parameter will be shifted out to the EEPROM one bit at a time. * In order to do this, "data" must be broken down into bits. **/ -static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) +static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) { struct e1000_nvm_info *nvm = &hw->nvm; - u32 eecd = rd32(E1000_EECD); + u32 eecd = E1000_READ_REG(hw, E1000_EECD); u32 mask; + DEBUGFUNC("e1000_shift_out_eec_bits"); + mask = 0x01 << (count - 1); if (nvm->type == e1000_nvm_eeprom_spi) eecd |= E1000_EECD_DO; @@ -87,23 +150,23 @@ static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) if (data & mask) eecd |= E1000_EECD_DI; - wr32(E1000_EECD, eecd); - wrfl(); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); - udelay(nvm->delay_usec); + usec_delay(nvm->delay_usec); - igb_raise_eec_clk(hw, &eecd); - igb_lower_eec_clk(hw, &eecd); + e1000_raise_eec_clk(hw, &eecd); + e1000_lower_eec_clk(hw, &eecd); mask >>= 1; } while (mask); eecd &= ~E1000_EECD_DI; - wr32(E1000_EECD, eecd); + E1000_WRITE_REG(hw, E1000_EECD, eecd); } /** - * igb_shift_in_eec_bits - Shift data bits in from the EEPROM + * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM * @hw: pointer to the HW structure * @count: number of bits to shift in * @@ -113,121 +176,124 @@ static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) * "DO" bit. During this "shifting in" process the data in "DI" bit should * always be clear. **/ -static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count) +static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) { u32 eecd; u32 i; u16 data; - eecd = rd32(E1000_EECD); + DEBUGFUNC("e1000_shift_in_eec_bits"); + + eecd = E1000_READ_REG(hw, E1000_EECD); eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); data = 0; for (i = 0; i < count; i++) { data <<= 1; - igb_raise_eec_clk(hw, &eecd); + e1000_raise_eec_clk(hw, &eecd); - eecd = rd32(E1000_EECD); + eecd = E1000_READ_REG(hw, E1000_EECD); eecd &= ~E1000_EECD_DI; if (eecd & E1000_EECD_DO) data |= 1; - igb_lower_eec_clk(hw, &eecd); + e1000_lower_eec_clk(hw, &eecd); } return data; } /** - * igb_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion * @hw: pointer to the HW structure * @ee_reg: EEPROM flag for polling * * Polls the EEPROM status bit for either read or write completion based * upon the value of 'ee_reg'. **/ -static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) +s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) { u32 attempts = 100000; u32 i, reg = 0; - s32 ret_val = -E1000_ERR_NVM; + + DEBUGFUNC("e1000_poll_eerd_eewr_done"); for (i = 0; i < attempts; i++) { if (ee_reg == E1000_NVM_POLL_READ) - reg = rd32(E1000_EERD); + reg = E1000_READ_REG(hw, E1000_EERD); else - reg = rd32(E1000_EEWR); + reg = E1000_READ_REG(hw, E1000_EEWR); - if (reg & E1000_NVM_RW_REG_DONE) { - ret_val = 0; - break; - } + if (reg & E1000_NVM_RW_REG_DONE) + return E1000_SUCCESS; - udelay(5); + usec_delay(5); } - return ret_val; + return -E1000_ERR_NVM; } /** - * igb_acquire_nvm - Generic request for access to EEPROM + * e1000_acquire_nvm_generic - Generic request for access to EEPROM * @hw: pointer to the HW structure * * Set the EEPROM access request bit and wait for EEPROM access grant bit. * Return successful if access grant bit set, else clear the request for * EEPROM access and return -E1000_ERR_NVM (-1). **/ -s32 igb_acquire_nvm(struct e1000_hw *hw) +s32 e1000_acquire_nvm_generic(struct e1000_hw *hw) { - u32 eecd = rd32(E1000_EECD); + u32 eecd = E1000_READ_REG(hw, E1000_EECD); s32 timeout = E1000_NVM_GRANT_ATTEMPTS; - s32 ret_val = 0; + DEBUGFUNC("e1000_acquire_nvm_generic"); - wr32(E1000_EECD, eecd | E1000_EECD_REQ); - eecd = rd32(E1000_EECD); + E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ); + eecd = E1000_READ_REG(hw, E1000_EECD); while (timeout) { if (eecd & E1000_EECD_GNT) break; - udelay(5); - eecd = rd32(E1000_EECD); + usec_delay(5); + eecd = E1000_READ_REG(hw, E1000_EECD); timeout--; } if (!timeout) { eecd &= ~E1000_EECD_REQ; - wr32(E1000_EECD, eecd); - hw_dbg("Could not acquire NVM grant\n"); - ret_val = -E1000_ERR_NVM; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + DEBUGOUT("Could not acquire NVM grant\n"); + return -E1000_ERR_NVM; } - return ret_val; + return E1000_SUCCESS; } /** - * igb_standby_nvm - Return EEPROM to standby state + * e1000_standby_nvm - Return EEPROM to standby state * @hw: pointer to the HW structure * * Return the EEPROM to a standby state. **/ -static void igb_standby_nvm(struct e1000_hw *hw) +static void e1000_standby_nvm(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; - u32 eecd = rd32(E1000_EECD); + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + + DEBUGFUNC("e1000_standby_nvm"); if (nvm->type == e1000_nvm_eeprom_spi) { /* Toggle CS to flush commands */ eecd |= E1000_EECD_CS; - wr32(E1000_EECD, eecd); - wrfl(); - udelay(nvm->delay_usec); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); eecd &= ~E1000_EECD_CS; - wr32(E1000_EECD, eecd); - wrfl(); - udelay(nvm->delay_usec); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); } } @@ -241,53 +307,57 @@ static void e1000_stop_nvm(struct e1000_hw *hw) { u32 eecd; - eecd = rd32(E1000_EECD); + DEBUGFUNC("e1000_stop_nvm"); + + eecd = E1000_READ_REG(hw, E1000_EECD); if (hw->nvm.type == e1000_nvm_eeprom_spi) { /* Pull CS high */ eecd |= E1000_EECD_CS; - igb_lower_eec_clk(hw, &eecd); + e1000_lower_eec_clk(hw, &eecd); } } /** - * igb_release_nvm - Release exclusive access to EEPROM + * e1000_release_nvm_generic - Release exclusive access to EEPROM * @hw: pointer to the HW structure * * Stop any current commands to the EEPROM and clear the EEPROM request bit. **/ -void igb_release_nvm(struct e1000_hw *hw) +void e1000_release_nvm_generic(struct e1000_hw *hw) { u32 eecd; + DEBUGFUNC("e1000_release_nvm_generic"); + e1000_stop_nvm(hw); - eecd = rd32(E1000_EECD); + eecd = E1000_READ_REG(hw, E1000_EECD); eecd &= ~E1000_EECD_REQ; - wr32(E1000_EECD, eecd); + E1000_WRITE_REG(hw, E1000_EECD, eecd); } /** - * igb_ready_nvm_eeprom - Prepares EEPROM for read/write + * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write * @hw: pointer to the HW structure * * Setups the EEPROM for reading and writing. **/ -static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw) +static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; - u32 eecd = rd32(E1000_EECD); - s32 ret_val = 0; - u16 timeout = 0; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); u8 spi_stat_reg; + DEBUGFUNC("e1000_ready_nvm_eeprom"); if (nvm->type == e1000_nvm_eeprom_spi) { + u16 timeout = NVM_MAX_RETRY_SPI; + /* Clear SK and CS */ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); - wr32(E1000_EECD, eecd); - wrfl(); - udelay(1); - timeout = NVM_MAX_RETRY_SPI; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(1); /* * Read "Status Register" repeatedly until the LSB is cleared. @@ -296,30 +366,28 @@ static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw) * not cleared within 'timeout', then error out. */ while (timeout) { - igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, + e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, hw->nvm.opcode_bits); - spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8); + spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) break; - udelay(5); - igb_standby_nvm(hw); + usec_delay(5); + e1000_standby_nvm(hw); timeout--; } if (!timeout) { - hw_dbg("SPI NVM Status error\n"); - ret_val = -E1000_ERR_NVM; - goto out; + DEBUGOUT("SPI NVM Status error\n"); + return -E1000_ERR_NVM; } } -out: - return ret_val; + return E1000_SUCCESS; } /** - * igb_read_nvm_spi - Read EEPROM's using SPI + * e1000_read_nvm_spi - Read EEPROM's using SPI * @hw: pointer to the HW structure * @offset: offset of word in the EEPROM to read * @words: number of words to read @@ -327,7 +395,7 @@ out: * * Reads a 16 bit word from the EEPROM. **/ -s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_nvm_info *nvm = &hw->nvm; u32 i = 0; @@ -335,33 +403,34 @@ s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) u16 word_in; u8 read_opcode = NVM_READ_OPCODE_SPI; + DEBUGFUNC("e1000_read_nvm_spi"); + /* * A check for invalid values: offset too large, too many words, * and not enough words. */ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || (words == 0)) { - hw_dbg("nvm parameter(s) out of bounds\n"); - ret_val = -E1000_ERR_NVM; - goto out; + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; } ret_val = nvm->ops.acquire(hw); if (ret_val) - goto out; + return ret_val; - ret_val = igb_ready_nvm_eeprom(hw); + ret_val = e1000_ready_nvm_eeprom(hw); if (ret_val) goto release; - igb_standby_nvm(hw); + e1000_standby_nvm(hw); if ((nvm->address_bits == 8) && (offset >= 128)) read_opcode |= NVM_A8_OPCODE_SPI; /* Send the READ command (opcode + addr) */ - igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); - igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); + e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); /* * Read the data. SPI NVMs increment the address with each byte @@ -369,19 +438,18 @@ s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) * us to read the whole NVM from any offset */ for (i = 0; i < words; i++) { - word_in = igb_shift_in_eec_bits(hw, 16); + word_in = e1000_shift_in_eec_bits(hw, 16); data[i] = (word_in >> 8) | (word_in << 8); } release: nvm->ops.release(hw); -out: return ret_val; } /** - * igb_read_nvm_eerd - Reads EEPROM using EERD register + * e1000_read_nvm_eerd - Reads EEPROM using EERD register * @hw: pointer to the HW structure * @offset: offset of word in the EEPROM to read * @words: number of words to read @@ -389,42 +457,42 @@ out: * * Reads a 16 bit word from the EEPROM using the EERD register. **/ -s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_nvm_info *nvm = &hw->nvm; u32 i, eerd = 0; - s32 ret_val = 0; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_read_nvm_eerd"); /* * A check for invalid values: offset too large, too many words, - * and not enough words. + * too many words for the offset, and not enough words. */ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || (words == 0)) { - hw_dbg("nvm parameter(s) out of bounds\n"); - ret_val = -E1000_ERR_NVM; - goto out; + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; } for (i = 0; i < words; i++) { eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + E1000_NVM_RW_REG_START; - wr32(E1000_EERD, eerd); - ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + E1000_WRITE_REG(hw, E1000_EERD, eerd); + ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); if (ret_val) break; - data[i] = (rd32(E1000_EERD) >> - E1000_NVM_RW_REG_DATA); + data[i] = (E1000_READ_REG(hw, E1000_EERD) >> + E1000_NVM_RW_REG_DATA); } -out: return ret_val; } /** - * igb_write_nvm_spi - Write to EEPROM using SPI + * e1000_write_nvm_spi - Write to EEPROM using SPI * @hw: pointer to the HW structure * @offset: offset within the EEPROM to be written to * @words: number of words to write @@ -433,45 +501,44 @@ out: * Writes data to EEPROM at offset using SPI interface. * * If e1000_update_nvm_checksum is not called after this function , the - * EEPROM will most likley contain an invalid checksum. + * EEPROM will most likely contain an invalid checksum. **/ -s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_nvm_info *nvm = &hw->nvm; s32 ret_val; u16 widx = 0; + DEBUGFUNC("e1000_write_nvm_spi"); + /* * A check for invalid values: offset too large, too many words, * and not enough words. */ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || (words == 0)) { - hw_dbg("nvm parameter(s) out of bounds\n"); - ret_val = -E1000_ERR_NVM; - goto out; + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; } - ret_val = hw->nvm.ops.acquire(hw); + ret_val = nvm->ops.acquire(hw); if (ret_val) - goto out; - - msleep(10); + return ret_val; while (widx < words) { u8 write_opcode = NVM_WRITE_OPCODE_SPI; - ret_val = igb_ready_nvm_eeprom(hw); + ret_val = e1000_ready_nvm_eeprom(hw); if (ret_val) goto release; - igb_standby_nvm(hw); + e1000_standby_nvm(hw); /* Send the WRITE ENABLE command (8 bit opcode) */ - igb_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, nvm->opcode_bits); - igb_standby_nvm(hw); + e1000_standby_nvm(hw); /* * Some SPI eeproms use the 8th address bit embedded in the @@ -481,160 +548,218 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) write_opcode |= NVM_A8_OPCODE_SPI; /* Send the Write command (8-bit opcode + addr) */ - igb_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); - igb_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), nvm->address_bits); /* Loop to allow for up to whole page write of eeprom */ while (widx < words) { u16 word_out = data[widx]; word_out = (word_out >> 8) | (word_out << 8); - igb_shift_out_eec_bits(hw, word_out, 16); + e1000_shift_out_eec_bits(hw, word_out, 16); widx++; if ((((offset + widx) * 2) % nvm->page_size) == 0) { - igb_standby_nvm(hw); + e1000_standby_nvm(hw); break; } } } - msleep(10); + msec_delay(10); release: - hw->nvm.ops.release(hw); + nvm->ops.release(hw); -out: return ret_val; } /** - * igb_read_part_string - Read device part number + * e1000_read_pba_string_generic - Read device part number * @hw: pointer to the HW structure - * @part_num: pointer to device part number - * @part_num_size: size of part number buffer + * @pba_num: pointer to device part number + * @pba_num_size: size of part number buffer * * Reads the product board assembly (PBA) number from the EEPROM and stores - * the value in part_num. + * the value in pba_num. **/ -s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size) +s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size) { s32 ret_val; u16 nvm_data; - u16 pointer; + u16 pba_ptr; u16 offset; u16 length; - if (part_num == NULL) { - hw_dbg("PBA string buffer was null\n"); - ret_val = E1000_ERR_INVALID_ARGUMENT; - goto out; + DEBUGFUNC("e1000_read_pba_string_generic"); + + if (pba_num == NULL) { + DEBUGOUT("PBA string buffer was null\n"); + return -E1000_ERR_INVALID_ARGUMENT; } ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); if (ret_val) { - hw_dbg("NVM Read Error\n"); - goto out; + DEBUGOUT("NVM Read Error\n"); + return ret_val; } - ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer); + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); if (ret_val) { - hw_dbg("NVM Read Error\n"); - goto out; + DEBUGOUT("NVM Read Error\n"); + return ret_val; } /* * if nvm_data is not ptr guard the PBA must be in legacy format which - * means pointer is actually our second data word for the PBA number + * means pba_ptr is actually our second data word for the PBA number * and we can decode it into an ascii string */ if (nvm_data != NVM_PBA_PTR_GUARD) { - hw_dbg("NVM PBA number is not stored as string\n"); + DEBUGOUT("NVM PBA number is not stored as string\n"); /* we will need 11 characters to store the PBA */ - if (part_num_size < 11) { - hw_dbg("PBA string buffer too small\n"); + if (pba_num_size < 11) { + DEBUGOUT("PBA string buffer too small\n"); return E1000_ERR_NO_SPACE; } - /* extract hex string from data and pointer */ - part_num[0] = (nvm_data >> 12) & 0xF; - part_num[1] = (nvm_data >> 8) & 0xF; - part_num[2] = (nvm_data >> 4) & 0xF; - part_num[3] = nvm_data & 0xF; - part_num[4] = (pointer >> 12) & 0xF; - part_num[5] = (pointer >> 8) & 0xF; - part_num[6] = '-'; - part_num[7] = 0; - part_num[8] = (pointer >> 4) & 0xF; - part_num[9] = pointer & 0xF; + /* extract hex string from data and pba_ptr */ + pba_num[0] = (nvm_data >> 12) & 0xF; + pba_num[1] = (nvm_data >> 8) & 0xF; + pba_num[2] = (nvm_data >> 4) & 0xF; + pba_num[3] = nvm_data & 0xF; + pba_num[4] = (pba_ptr >> 12) & 0xF; + pba_num[5] = (pba_ptr >> 8) & 0xF; + pba_num[6] = '-'; + pba_num[7] = 0; + pba_num[8] = (pba_ptr >> 4) & 0xF; + pba_num[9] = pba_ptr & 0xF; /* put a null character on the end of our string */ - part_num[10] = '\0'; + pba_num[10] = '\0'; /* switch all the data but the '-' to hex char */ for (offset = 0; offset < 10; offset++) { - if (part_num[offset] < 0xA) - part_num[offset] += '0'; - else if (part_num[offset] < 0x10) - part_num[offset] += 'A' - 0xA; + if (pba_num[offset] < 0xA) + pba_num[offset] += '0'; + else if (pba_num[offset] < 0x10) + pba_num[offset] += 'A' - 0xA; } - goto out; + return E1000_SUCCESS; } - ret_val = hw->nvm.ops.read(hw, pointer, 1, &length); + ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length); if (ret_val) { - hw_dbg("NVM Read Error\n"); - goto out; + DEBUGOUT("NVM Read Error\n"); + return ret_val; } if (length == 0xFFFF || length == 0) { - hw_dbg("NVM PBA number section invalid length\n"); - ret_val = E1000_ERR_NVM_PBA_SECTION; - goto out; + DEBUGOUT("NVM PBA number section invalid length\n"); + return -E1000_ERR_NVM_PBA_SECTION; } - /* check if part_num buffer is big enough */ - if (part_num_size < (((u32)length * 2) - 1)) { - hw_dbg("PBA string buffer too small\n"); - ret_val = E1000_ERR_NO_SPACE; - goto out; + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) { + DEBUGOUT("PBA string buffer too small\n"); + return -E1000_ERR_NO_SPACE; } /* trim pba length from start of string */ - pointer++; + pba_ptr++; length--; for (offset = 0; offset < length; offset++) { - ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data); + ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data); if (ret_val) { - hw_dbg("NVM Read Error\n"); - goto out; + DEBUGOUT("NVM Read Error\n"); + return ret_val; } - part_num[offset * 2] = (u8)(nvm_data >> 8); - part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); + pba_num[offset * 2] = (u8)(nvm_data >> 8); + pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); } - part_num[offset * 2] = '\0'; + pba_num[offset * 2] = '\0'; -out: - return ret_val; + return E1000_SUCCESS; +} + +/** + * e1000_read_pba_length_generic - Read device part number length + * @hw: pointer to the HW structure + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number length from the EEPROM and + * stores the value in pba_num_size. + **/ +s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pba_ptr; + u16 length; + + DEBUGFUNC("e1000_read_pba_length_generic"); + + if (pba_num_size == NULL) { + DEBUGOUT("PBA buffer size was null\n"); + return -E1000_ERR_INVALID_ARGUMENT; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + /* if data is not ptr guard the PBA must be in legacy format */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + *pba_num_size = 11; + return E1000_SUCCESS; + } + + ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + DEBUGOUT("NVM PBA number section invalid length\n"); + return -E1000_ERR_NVM_PBA_SECTION; + } + + /* + * Convert from length in u16 values to u8 chars, add 1 for NULL, + * and subtract 2 because length field is included in length. + */ + *pba_num_size = ((u32)length * 2) - 1; + + return E1000_SUCCESS; } /** - * igb_read_mac_addr - Read device MAC address + * e1000_read_mac_addr_generic - Read device MAC address * @hw: pointer to the HW structure * * Reads the device MAC address from the EEPROM and stores the value. * Since devices with two ports use the same EEPROM, we increment the * last bit in the MAC address for the second port. **/ -s32 igb_read_mac_addr(struct e1000_hw *hw) +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw) { u32 rar_high; u32 rar_low; u16 i; - rar_high = rd32(E1000_RAH(0)); - rar_low = rd32(E1000_RAL(0)); + rar_high = E1000_READ_REG(hw, E1000_RAH(0)); + rar_low = E1000_READ_REG(hw, E1000_RAL(0)); for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); @@ -642,72 +767,93 @@ s32 igb_read_mac_addr(struct e1000_hw *hw) for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); - for (i = 0; i < ETH_ALEN; i++) + for (i = 0; i < ETH_ADDR_LEN; i++) hw->mac.addr[i] = hw->mac.perm_addr[i]; - return 0; + return E1000_SUCCESS; } /** - * igb_validate_nvm_checksum - Validate EEPROM checksum + * e1000_validate_nvm_checksum_generic - Validate EEPROM checksum * @hw: pointer to the HW structure * * Calculates the EEPROM checksum by reading/adding each word of the EEPROM * and then verifies that the sum of the EEPROM is equal to 0xBABA. **/ -s32 igb_validate_nvm_checksum(struct e1000_hw *hw) +s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw) { - s32 ret_val = 0; + s32 ret_val; u16 checksum = 0; u16 i, nvm_data; + DEBUGFUNC("e1000_validate_nvm_checksum_generic"); + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); if (ret_val) { - hw_dbg("NVM Read Error\n"); - goto out; + DEBUGOUT("NVM Read Error\n"); + return ret_val; } checksum += nvm_data; } if (checksum != (u16) NVM_SUM) { - hw_dbg("NVM Checksum Invalid\n"); - ret_val = -E1000_ERR_NVM; - goto out; + DEBUGOUT("NVM Checksum Invalid\n"); + return -E1000_ERR_NVM; } -out: - return ret_val; + return E1000_SUCCESS; } /** - * igb_update_nvm_checksum - Update EEPROM checksum + * e1000_update_nvm_checksum_generic - Update EEPROM checksum * @hw: pointer to the HW structure * * Updates the EEPROM checksum by reading/adding each word of the EEPROM * up to the checksum. Then calculates the EEPROM checksum and writes the * value to the EEPROM. **/ -s32 igb_update_nvm_checksum(struct e1000_hw *hw) +s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw) { - s32 ret_val; + s32 ret_val; u16 checksum = 0; u16 i, nvm_data; + DEBUGFUNC("e1000_update_nvm_checksum"); + for (i = 0; i < NVM_CHECKSUM_REG; i++) { ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); if (ret_val) { - hw_dbg("NVM Read Error while updating checksum.\n"); - goto out; + DEBUGOUT("NVM Read Error while updating checksum.\n"); + return ret_val; } checksum += nvm_data; } checksum = (u16) NVM_SUM - checksum; ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); if (ret_val) - hw_dbg("NVM Write Error while updating checksum.\n"); + DEBUGOUT("NVM Write Error while updating checksum.\n"); -out: return ret_val; } +/** + * e1000_reload_nvm_generic - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + **/ +static void e1000_reload_nvm_generic(struct e1000_hw *hw) +{ + u32 ctrl_ext; + + DEBUGFUNC("e1000_reload_nvm_generic"); + + usec_delay(10); + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); +} + diff --git a/drivers/net/igb/e1000_nvm.h b/drivers/net/igb/e1000_nvm.h index 825b0228cac0..6ca9565657f5 100644 --- a/drivers/net/igb/e1000_nvm.h +++ b/drivers/net/igb/e1000_nvm.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2012 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -28,16 +28,27 @@ #ifndef _E1000_NVM_H_ #define _E1000_NVM_H_ -s32 igb_acquire_nvm(struct e1000_hw *hw); -void igb_release_nvm(struct e1000_hw *hw); -s32 igb_read_mac_addr(struct e1000_hw *hw); -s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num); -s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, - u32 part_num_size); -s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); -s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); -s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); -s32 igb_validate_nvm_checksum(struct e1000_hw *hw); -s32 igb_update_nvm_checksum(struct e1000_hw *hw); - +void e1000_init_nvm_ops_generic(struct e1000_hw *hw); +s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c); +void e1000_null_nvm_generic(struct e1000_hw *hw); +s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data); +s32 e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c); +s32 e1000_acquire_nvm_generic(struct e1000_hw *hw); + +s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw); +s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size); +s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size); +s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data); +s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw); +s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw); +void e1000_release_nvm_generic(struct e1000_hw *hw); + +#define E1000_STM_OPCODE 0xDB00 #endif diff --git a/drivers/net/igb/e1000_osdep.h b/drivers/net/igb/e1000_osdep.h new file mode 100644 index 000000000000..22847a49a8c8 --- /dev/null +++ b/drivers/net/igb/e1000_osdep.h @@ -0,0 +1,122 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +/* glue for the OS independent part of e1000 + * includes register access macros + */ + +#ifndef _E1000_OSDEP_H_ +#define _E1000_OSDEP_H_ + +#include +#include +#include +#include +#include +#include "kcompat.h" + +#define usec_delay(x) udelay(x) +#ifndef msec_delay +#define msec_delay(x) do { \ + /* Don't mdelay in interrupt context! */ \ + if (in_interrupt()) \ + BUG(); \ + else \ + msleep(x); \ +} while (0) + +/* Some workarounds require millisecond delays and are run during interrupt + * context. Most notably, when establishing link, the phy may need tweaking + * but cannot process phy register reads/writes faster than millisecond + * intervals...and we establish link due to a "link status change" interrupt. + */ +#define msec_delay_irq(x) mdelay(x) +#endif + +#define PCI_COMMAND_REGISTER PCI_COMMAND +#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE +#define ETH_ADDR_LEN ETH_ALEN + +#ifdef __BIG_ENDIAN +#define E1000_BIG_ENDIAN __BIG_ENDIAN +#endif + + +#define DEBUGOUT(S) +#define DEBUGOUT1(S, A...) + +#define DEBUGFUNC(F) +#define DEBUGOUT2 DEBUGOUT1 +#define DEBUGOUT3 DEBUGOUT2 +#define DEBUGOUT7 DEBUGOUT3 + +#define E1000_REGISTER(a, reg) reg + +#define E1000_WRITE_REG(a, reg, value) ( \ + writel((value), ((a)->hw_addr + E1000_REGISTER(a, reg)))) + +#define E1000_READ_REG(a, reg) (readl((a)->hw_addr + E1000_REGISTER(a, reg))) + +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 2)))) + +#define E1000_READ_REG_ARRAY(a, reg, offset) ( \ + readl((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 2))) + +#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY +#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY + +#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \ + writew((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1)))) + +#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \ + readw((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1))) + +#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \ + writeb((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + (offset)))) + +#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \ + readb((a)->hw_addr + E1000_REGISTER(a, reg) + (offset))) + +#define E1000_WRITE_REG_IO(a, reg, offset) do { \ + outl(reg, ((a)->io_base)); \ + outl(offset, ((a)->io_base + 4)); } while (0) + +#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS) + +#define E1000_WRITE_FLASH_REG(a, reg, value) ( \ + writel((value), ((a)->flash_address + reg))) + +#define E1000_WRITE_FLASH_REG16(a, reg, value) ( \ + writew((value), ((a)->flash_address + reg))) + +#define E1000_READ_FLASH_REG(a, reg) (readl((a)->flash_address + reg)) + +#define E1000_READ_FLASH_REG16(a, reg) (readw((a)->flash_address + reg)) + +#endif /* _E1000_OSDEP_H_ */ diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c index 789de5b83aad..f6e4503c5fc5 100644 --- a/drivers/net/igb/e1000_phy.c +++ b/drivers/net/igb/e1000_phy.c @@ -25,127 +25,243 @@ *******************************************************************************/ -#include -#include - -#include "e1000_mac.h" -#include "e1000_phy.h" - -static s32 igb_phy_setup_autoneg(struct e1000_hw *hw); -static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, - u16 *phy_ctrl); -static s32 igb_wait_autoneg(struct e1000_hw *hw); +#include "e1000_api.h" +static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw); /* Cable length tables */ -static const u16 e1000_m88_cable_length_table[] = - { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; +static const u16 e1000_m88_cable_length_table[] = { + 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; #define M88E1000_CABLE_LENGTH_TABLE_SIZE \ - (sizeof(e1000_m88_cable_length_table) / \ - sizeof(e1000_m88_cable_length_table[0])) - -static const u16 e1000_igp_2_cable_length_table[] = - { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, - 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, - 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, - 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, - 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, - 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, - 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, - 104, 109, 114, 118, 121, 124}; + (sizeof(e1000_m88_cable_length_table) / \ + sizeof(e1000_m88_cable_length_table[0])) + +static const u16 e1000_igp_2_cable_length_table[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, + 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22, + 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40, + 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61, + 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, + 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, + 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, + 124}; #define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ (sizeof(e1000_igp_2_cable_length_table) / \ sizeof(e1000_igp_2_cable_length_table[0])) /** - * igb_check_reset_block - Check if PHY reset is blocked + * e1000_init_phy_ops_generic - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Setups up the function pointers to no-op functions + **/ +void e1000_init_phy_ops_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + DEBUGFUNC("e1000_init_phy_ops_generic"); + + /* Initialize function pointers */ + phy->ops.init_params = e1000_null_ops_generic; + phy->ops.acquire = e1000_null_ops_generic; + phy->ops.check_polarity = e1000_null_ops_generic; + phy->ops.check_reset_block = e1000_null_ops_generic; + phy->ops.commit = e1000_null_ops_generic; + phy->ops.force_speed_duplex = e1000_null_ops_generic; + phy->ops.get_cfg_done = e1000_null_ops_generic; + phy->ops.get_cable_length = e1000_null_ops_generic; + phy->ops.get_info = e1000_null_ops_generic; + phy->ops.set_page = e1000_null_set_page; + phy->ops.read_reg = e1000_null_read_reg; + phy->ops.read_reg_locked = e1000_null_read_reg; + phy->ops.read_reg_page = e1000_null_read_reg; + phy->ops.release = e1000_null_phy_generic; + phy->ops.reset = e1000_null_ops_generic; + phy->ops.set_d0_lplu_state = e1000_null_lplu_state; + phy->ops.set_d3_lplu_state = e1000_null_lplu_state; + phy->ops.write_reg = e1000_null_write_reg; + phy->ops.write_reg_locked = e1000_null_write_reg; + phy->ops.write_reg_page = e1000_null_write_reg; + phy->ops.power_up = e1000_null_phy_generic; + phy->ops.power_down = e1000_null_phy_generic; + phy->ops.read_i2c_byte = e1000_read_i2c_byte_null; + phy->ops.write_i2c_byte = e1000_write_i2c_byte_null; +} + +/** + * e1000_null_set_page - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_set_page(struct e1000_hw *hw, u16 data) +{ + DEBUGFUNC("e1000_null_set_page"); + return E1000_SUCCESS; +} + +/** + * e1000_null_read_reg - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + DEBUGFUNC("e1000_null_read_reg"); + return E1000_SUCCESS; +} + +/** + * e1000_null_phy_generic - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_phy_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_null_phy_generic"); + return; +} + +/** + * e1000_null_lplu_state - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_lplu_state(struct e1000_hw *hw, bool active) +{ + DEBUGFUNC("e1000_null_lplu_state"); + return E1000_SUCCESS; +} + +/** + * e1000_null_write_reg - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + DEBUGFUNC("e1000_null_write_reg"); + return E1000_SUCCESS; +} + +/** + * e1000_read_i2c_byte_null - No-op function, return 0 + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: data value read + * + **/ +s32 e1000_read_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + DEBUGFUNC("e1000_read_i2c_byte_null"); + return E1000_SUCCESS; +} + +/** + * e1000_write_i2c_byte_null - No-op function, return 0 + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: data value to write + * + **/ +s32 e1000_write_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + DEBUGFUNC("e1000_write_i2c_byte_null"); + return E1000_SUCCESS; +} + +/** + * e1000_check_reset_block_generic - Check if PHY reset is blocked * @hw: pointer to the HW structure * * Read the PHY management control register and check whether a PHY reset - * is blocked. If a reset is not blocked return 0, otherwise + * is blocked. If a reset is not blocked return E1000_SUCCESS, otherwise * return E1000_BLK_PHY_RESET (12). **/ -s32 igb_check_reset_block(struct e1000_hw *hw) +s32 e1000_check_reset_block_generic(struct e1000_hw *hw) { u32 manc; - manc = rd32(E1000_MANC); + DEBUGFUNC("e1000_check_reset_block"); + + manc = E1000_READ_REG(hw, E1000_MANC); return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? - E1000_BLK_PHY_RESET : 0; + E1000_BLK_PHY_RESET : E1000_SUCCESS; } /** - * igb_get_phy_id - Retrieve the PHY ID and revision + * e1000_get_phy_id - Retrieve the PHY ID and revision * @hw: pointer to the HW structure * * Reads the PHY registers and stores the PHY ID and possibly the PHY * revision in the hardware structure. **/ -s32 igb_get_phy_id(struct e1000_hw *hw) +s32 e1000_get_phy_id(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; - s32 ret_val = 0; + s32 ret_val = E1000_SUCCESS; u16 phy_id; + DEBUGFUNC("e1000_get_phy_id"); + + if (!phy->ops.read_reg) + return E1000_SUCCESS; + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); if (ret_val) - goto out; + return ret_val; phy->id = (u32)(phy_id << 16); - udelay(20); + usec_delay(20); ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); if (ret_val) - goto out; + return ret_val; phy->id |= (u32)(phy_id & PHY_REVISION_MASK); phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); -out: - return ret_val; + + return E1000_SUCCESS; } /** - * igb_phy_reset_dsp - Reset PHY DSP + * e1000_phy_reset_dsp_generic - Reset PHY DSP * @hw: pointer to the HW structure * * Reset the digital signal processor. **/ -static s32 igb_phy_reset_dsp(struct e1000_hw *hw) +s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw) { - s32 ret_val = 0; + s32 ret_val; - if (!(hw->phy.ops.write_reg)) - goto out; + DEBUGFUNC("e1000_phy_reset_dsp_generic"); + + if (!hw->phy.ops.write_reg) + return E1000_SUCCESS; ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); if (ret_val) - goto out; - - ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); + return ret_val; -out: - return ret_val; + return hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); } /** - * igb_read_phy_reg_mdic - Read MDI control register + * e1000_read_phy_reg_mdic - Read MDI control register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * - * Reads the MDI control regsiter in the PHY at offset and stores the + * Reads the MDI control register in the PHY at offset and stores the * information read to data. **/ -s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) +s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) { struct e1000_phy_info *phy = &hw->phy; u32 i, mdic = 0; - s32 ret_val = 0; + + DEBUGFUNC("e1000_read_phy_reg_mdic"); if (offset > MAX_PHY_REG_ADDRESS) { - hw_dbg("PHY Address %d is out of range\n", offset); - ret_val = -E1000_ERR_PARAM; - goto out; + DEBUGOUT1("PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; } /* @@ -157,7 +273,7 @@ s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) (phy->addr << E1000_MDIC_PHY_SHIFT) | (E1000_MDIC_OP_READ)); - wr32(E1000_MDIC, mdic); + E1000_WRITE_REG(hw, E1000_MDIC, mdic); /* * Poll the ready bit to see if the MDI read completed @@ -165,45 +281,42 @@ s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) * the lower time out */ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { - udelay(50); - mdic = rd32(E1000_MDIC); + usec_delay(50); + mdic = E1000_READ_REG(hw, E1000_MDIC); if (mdic & E1000_MDIC_READY) break; } if (!(mdic & E1000_MDIC_READY)) { - hw_dbg("MDI Read did not complete\n"); - ret_val = -E1000_ERR_PHY; - goto out; + DEBUGOUT("MDI Read did not complete\n"); + return -E1000_ERR_PHY; } if (mdic & E1000_MDIC_ERROR) { - hw_dbg("MDI Error\n"); - ret_val = -E1000_ERR_PHY; - goto out; + DEBUGOUT("MDI Error\n"); + return -E1000_ERR_PHY; } *data = (u16) mdic; -out: - return ret_val; + return E1000_SUCCESS; } /** - * igb_write_phy_reg_mdic - Write MDI control register + * e1000_write_phy_reg_mdic - Write MDI control register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write to register at offset * * Writes data to MDI control register in the PHY at offset. **/ -s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) +s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) { struct e1000_phy_info *phy = &hw->phy; u32 i, mdic = 0; - s32 ret_val = 0; + + DEBUGFUNC("e1000_write_phy_reg_mdic"); if (offset > MAX_PHY_REG_ADDRESS) { - hw_dbg("PHY Address %d is out of range\n", offset); - ret_val = -E1000_ERR_PARAM; - goto out; + DEBUGOUT1("PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; } /* @@ -216,7 +329,7 @@ s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) (phy->addr << E1000_MDIC_PHY_SHIFT) | (E1000_MDIC_OP_WRITE)); - wr32(E1000_MDIC, mdic); + E1000_WRITE_REG(hw, E1000_MDIC, mdic); /* * Poll the ready bit to see if the MDI read completed @@ -224,28 +337,25 @@ s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) * the lower time out */ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { - udelay(50); - mdic = rd32(E1000_MDIC); + usec_delay(50); + mdic = E1000_READ_REG(hw, E1000_MDIC); if (mdic & E1000_MDIC_READY) break; } if (!(mdic & E1000_MDIC_READY)) { - hw_dbg("MDI Write did not complete\n"); - ret_val = -E1000_ERR_PHY; - goto out; + DEBUGOUT("MDI Write did not complete\n"); + return -E1000_ERR_PHY; } if (mdic & E1000_MDIC_ERROR) { - hw_dbg("MDI Error\n"); - ret_val = -E1000_ERR_PHY; - goto out; + DEBUGOUT("MDI Error\n"); + return -E1000_ERR_PHY; } -out: - return ret_val; + return E1000_SUCCESS; } /** - * igb_read_phy_reg_i2c - Read PHY register using i2c + * e1000_read_phy_reg_i2c - Read PHY register using i2c * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data @@ -253,11 +363,12 @@ out: * Reads the PHY register at offset using the i2c interface and stores the * retrieved information in data. **/ -s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) +s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) { struct e1000_phy_info *phy = &hw->phy; u32 i, i2ccmd = 0; + DEBUGFUNC("e1000_read_phy_reg_i2c"); /* * Set up Op-code, Phy Address, and register address in the I2CCMD @@ -265,50 +376,52 @@ s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) * PHY to retrieve the desired data. */ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | - (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | - (E1000_I2CCMD_OPCODE_READ)); + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + (E1000_I2CCMD_OPCODE_READ)); - wr32(E1000_I2CCMD, i2ccmd); + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); /* Poll the ready bit to see if the I2C read completed */ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { - udelay(50); - i2ccmd = rd32(E1000_I2CCMD); + usec_delay(50); + i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); if (i2ccmd & E1000_I2CCMD_READY) break; } if (!(i2ccmd & E1000_I2CCMD_READY)) { - hw_dbg("I2CCMD Read did not complete\n"); + DEBUGOUT("I2CCMD Read did not complete\n"); return -E1000_ERR_PHY; } if (i2ccmd & E1000_I2CCMD_ERROR) { - hw_dbg("I2CCMD Error bit set\n"); + DEBUGOUT("I2CCMD Error bit set\n"); return -E1000_ERR_PHY; } /* Need to byte-swap the 16-bit value. */ *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00); - return 0; + return E1000_SUCCESS; } /** - * igb_write_phy_reg_i2c - Write PHY register using i2c + * e1000_write_phy_reg_i2c - Write PHY register using i2c * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Writes the data to PHY register at the offset using the i2c interface. **/ -s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) +s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) { struct e1000_phy_info *phy = &hw->phy; u32 i, i2ccmd = 0; u16 phy_data_swapped; + DEBUGFUNC("e1000_write_phy_reg_i2c"); + /* Prevent overwritting SFP I2C EEPROM which is at A0 address.*/ if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) { - hw_dbg("PHY I2C Address %d is out of range.\n", + DEBUGOUT1("PHY I2C Address %d is out of range.\n", hw->phy.addr); return -E1000_ERR_CONFIG; } @@ -322,33 +435,166 @@ s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) * PHY to retrieve the desired data. */ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | - (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | - E1000_I2CCMD_OPCODE_WRITE | - phy_data_swapped); + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_WRITE | + phy_data_swapped); - wr32(E1000_I2CCMD, i2ccmd); + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); /* Poll the ready bit to see if the I2C read completed */ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { - udelay(50); - i2ccmd = rd32(E1000_I2CCMD); + usec_delay(50); + i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); if (i2ccmd & E1000_I2CCMD_READY) break; } if (!(i2ccmd & E1000_I2CCMD_READY)) { - hw_dbg("I2CCMD Write did not complete\n"); + DEBUGOUT("I2CCMD Write did not complete\n"); return -E1000_ERR_PHY; } if (i2ccmd & E1000_I2CCMD_ERROR) { - hw_dbg("I2CCMD Error bit set\n"); + DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_sfp_data_byte - Reads SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to be read + * @data: read data buffer pointer + * + * Reads one byte from SFP module data stored + * in SFP resided EEPROM memory or SFP diagnostic area. + * Function should be called with + * E1000_I2CCMD_SFP_DATA_ADDR() for SFP module database access + * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters + * access + **/ +s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data) +{ + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + + DEBUGFUNC("e1000_read_sfp_data_byte"); + + if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { + DEBUGOUT("I2CCMD command address exceeds upper limit\n"); return -E1000_ERR_PHY; } - return 0; + /* + * Set up Op-code, EEPROM Address,in the I2CCMD + * register. The MAC will take care of interfacing with the + * EEPROM to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_READ); + + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + data_local = E1000_READ_REG(hw, E1000_I2CCMD); + if (data_local & E1000_I2CCMD_READY) + break; + } + if (!(data_local & E1000_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (data_local & E1000_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + *data = (u8) data_local & 0xFF; + + return E1000_SUCCESS; } /** - * igb_read_phy_reg_igp - Read igp PHY register + * e1000_write_sfp_data_byte - Writes SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to write to + * @data: data to write + * + * Writes one byte to SFP module data stored + * in SFP resided EEPROM memory or SFP diagnostic area. + * Function should be called with + * E1000_I2CCMD_SFP_DATA_ADDR() for SFP module database access + * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters + * access + **/ +s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data) +{ + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + + DEBUGFUNC("e1000_write_sfp_data_byte"); + + if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { + DEBUGOUT("I2CCMD command address exceeds upper limit\n"); + return -E1000_ERR_PHY; + } + /* + * The programming interface is 16 bits wide + * so we need to read the whole word first + * then update appropriate byte lane and write + * the updated word back. + */ + /* + * Set up Op-code, EEPROM Address,in the I2CCMD + * register. The MAC will take care of interfacing + * with an EEPROM to write the data given. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_READ); + /* Set a command to read single word */ + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + /* + * Poll the ready bit to see if lastly + * launched I2C operation completed + */ + i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) { + /* Check if this is READ or WRITE phase */ + if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) == + E1000_I2CCMD_OPCODE_READ) { + /* + * Write the selected byte + * lane and update whole word + */ + data_local = i2ccmd & 0xFF00; + data_local |= data; + i2ccmd = ((offset << + E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_WRITE | data_local); + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + } else { + break; + } + } + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + return E1000_SUCCESS; +} + +/** + * e1000_read_phy_reg_m88 - Read m88 PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data @@ -357,38 +603,29 @@ s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) * and storing the retrieved information in data. Release any acquired * semaphores before exiting. **/ -s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) { - s32 ret_val = 0; + s32 ret_val; - if (!(hw->phy.ops.acquire)) - goto out; + DEBUGFUNC("e1000_read_phy_reg_m88"); + + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; ret_val = hw->phy.ops.acquire(hw); if (ret_val) - goto out; - - if (offset > MAX_PHY_MULTI_PAGE_REG) { - ret_val = igb_write_phy_reg_mdic(hw, - IGP01E1000_PHY_PAGE_SELECT, - (u16)offset); - if (ret_val) { - hw->phy.ops.release(hw); - goto out; - } - } + return ret_val; - ret_val = igb_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); + ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); hw->phy.ops.release(hw); -out: return ret_val; } /** - * igb_write_phy_reg_igp - Write igp PHY register + * e1000_write_phy_reg_m88 - Write m88 PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset @@ -396,100 +633,433 @@ out: * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ -s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) { - s32 ret_val = 0; + s32 ret_val; + + DEBUGFUNC("e1000_write_phy_reg_m88"); - if (!(hw->phy.ops.acquire)) - goto out; + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; ret_val = hw->phy.ops.acquire(hw); if (ret_val) - goto out; + return ret_val; + + ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); - if (offset > MAX_PHY_MULTI_PAGE_REG) { - ret_val = igb_write_phy_reg_mdic(hw, + return ret_val; +} + +/** + * e1000_set_page_igp - Set page as on IGP-like PHY(s) + * @hw: pointer to the HW structure + * @page: page to set (shifted left when necessary) + * + * Sets PHY page required for PHY register access. Assumes semaphore is + * already acquired. Note, this function sets phy.addr to 1 so the caller + * must set it appropriately (if necessary) after this function returns. + **/ +s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page) +{ + DEBUGFUNC("e1000_set_page_igp"); + + DEBUGOUT1("Setting page 0x%x\n", page); + + hw->phy.addr = 1; + + return e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page); +} + +/** + * __e1000_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and stores the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +static s32 __e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("__e1000_read_phy_reg_igp"); + + if (!locked) { + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + if (offset > MAX_PHY_MULTI_PAGE_REG) + ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, (u16)offset); - if (ret_val) { - hw->phy.ops.release(hw); - goto out; - } - } + if (!ret_val) + ret_val = e1000_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + if (!locked) + hw->phy.ops.release(hw); - ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); + return ret_val; +} - hw->phy.ops.release(hw); +/** + * e1000_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset and stores the + * retrieved information in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_igp(hw, offset, data, false); +} + +/** + * e1000_read_phy_reg_igp_locked - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired. + **/ +s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_igp(hw, offset, data, true); +} + +/** + * e1000_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +static s32 __e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_phy_reg_igp"); + + if (!locked) { + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + if (offset > MAX_PHY_MULTI_PAGE_REG) + ret_val = e1000_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (!ret_val) + ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & + offset, + data); + if (!locked) + hw->phy.ops.release(hw); -out: return ret_val; } /** - * igb_copper_link_setup_82580 - Setup 82580 PHY for copper link + * e1000_write_phy_reg_igp - Write igp PHY register * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset * - * Sets up Carrier-sense on Transmit and downshift values. + * Acquires semaphore then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. **/ -s32 igb_copper_link_setup_82580(struct e1000_hw *hw) +s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_igp(hw, offset, data, false); +} + +/** + * e1000_write_phy_reg_igp_locked - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. + * Assumes semaphore already acquired. + **/ +s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_igp(hw, offset, data, true); +} + +/** + * __e1000_read_kmrn_reg - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary. Then reads the PHY register at offset + * using the kumeran interface. The information retrieved is stored in data. + * Release any acquired semaphores before exiting. + **/ +static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) +{ + u32 kmrnctrlsta; + + DEBUGFUNC("__e1000_read_kmrn_reg"); + + if (!locked) { + s32 ret_val = E1000_SUCCESS; + + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); + E1000_WRITE_FLUSH(hw); + + usec_delay(2); + + kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA); + *data = (u16)kmrnctrlsta; + + if (!locked) + hw->phy.ops.release(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_read_kmrn_reg_generic - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset using the + * kumeran interface. The information retrieved is stored in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, false); +} + +/** + * e1000_read_kmrn_reg_locked - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the kumeran interface. The + * information retrieved is stored in data. + * Assumes semaphore already acquired. + **/ +s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, true); +} + +/** + * __e1000_write_kmrn_reg - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary. Then write the data to PHY register + * at the offset using the kumeran interface. Release any acquired semaphores + * before exiting. + **/ +static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) +{ + u32 kmrnctrlsta; + + DEBUGFUNC("e1000_write_kmrn_reg_generic"); + + if (!locked) { + s32 ret_val = E1000_SUCCESS; + + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | data; + E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); + E1000_WRITE_FLUSH(hw); + + usec_delay(2); + + if (!locked) + hw->phy.ops.release(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_write_kmrn_reg_generic - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to the PHY register at the offset + * using the kumeran interface. Release the acquired semaphore before exiting. + **/ +s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, false); +} + +/** + * e1000_write_kmrn_reg_locked - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Write the data to PHY register at the offset using the kumeran interface. + * Assumes semaphore already acquired. + **/ +s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, true); +} + +/** + * e1000_set_master_slave_mode - Setup PHY for Master/slave mode + * @hw: pointer to the HW structure + * + * Sets up Master/slave mode + **/ +static s32 e1000_set_master_slave_mode(struct e1000_hw *hw) { - struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; + /* Resolve Master/Slave mode */ + ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : e1000_ms_auto; - if (phy->reset_disable) { - ret_val = 0; - goto out; + switch (hw->phy.ms_type) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + /* fall-through */ + default: + break; } - if (phy->type == e1000_phy_82580) { + return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data); +} + +/** + * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link + * @hw: pointer to the HW structure + * + * Sets up Carrier-sense on Transmit and downshift values. + **/ +s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + DEBUGFUNC("e1000_copper_link_setup_82577"); + + if (hw->phy.reset_disable) + return E1000_SUCCESS; + + if (hw->phy.type == e1000_phy_82580) { ret_val = hw->phy.ops.reset(hw); if (ret_val) { - hw_dbg("Error resetting the PHY.\n"); - goto out; + DEBUGOUT("Error resetting the PHY.\n"); + return ret_val; } } - /* Enable CRS on TX. This must be set for half-duplex operation. */ - ret_val = phy->ops.read_reg(hw, I82580_CFG_REG, &phy_data); + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = hw->phy.ops.read_reg(hw, I82577_CFG_REG, &phy_data); if (ret_val) - goto out; + return ret_val; - phy_data |= I82580_CFG_ASSERT_CRS_ON_TX; + phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; /* Enable downshift */ - phy_data |= I82580_CFG_ENABLE_DOWNSHIFT; + phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; - ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data); + ret_val = hw->phy.ops.write_reg(hw, I82577_CFG_REG, phy_data); + if (ret_val) + return ret_val; -out: - return ret_val; + return e1000_set_master_slave_mode(hw); } /** - * igb_copper_link_setup_m88 - Setup m88 PHY's for copper link + * e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link * @hw: pointer to the HW structure * * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock * and downshift values are set also. **/ -s32 igb_copper_link_setup_m88(struct e1000_hw *hw) +s32 e1000_copper_link_setup_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; - if (phy->reset_disable) { - ret_val = 0; - goto out; - } + DEBUGFUNC("e1000_copper_link_setup_m88"); - /* Enable CRS on TX. This must be set for half-duplex operation. */ + if (phy->reset_disable) + return E1000_SUCCESS; + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) - goto out; + return ret_val; phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; @@ -532,7 +1102,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw) ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); if (ret_val) - goto out; + return ret_val; if (phy->revision < E1000_REVISION_4) { /* @@ -540,9 +1110,9 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw) * to 25MHz clock. */ ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, - &phy_data); + &phy_data); if (ret_val) - goto out; + return ret_val; phy_data |= M88E1000_EPSCR_TX_CLK_25; @@ -554,49 +1124,48 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw) } else { /* Configure Master and Slave downshift values */ phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | - M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); } ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); if (ret_val) - goto out; + return ret_val; } /* Commit the changes. */ - ret_val = igb_phy_sw_reset(hw); + ret_val = phy->ops.commit(hw); if (ret_val) { - hw_dbg("Error committing the PHY changes\n"); - goto out; + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; } -out: - return ret_val; + return E1000_SUCCESS; } /** - * igb_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link + * e1000_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link * @hw: pointer to the HW structure * * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's. * Also enables and sets the downshift parameters. **/ -s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw) +s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; - if (phy->reset_disable) { - ret_val = 0; - goto out; - } + DEBUGFUNC("e1000_copper_link_setup_m88_gen2"); + + if (phy->reset_disable) + return E1000_SUCCESS; /* Enable CRS on Tx. This must be set for half-duplex operation. */ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) - goto out; + return ret_val; /* * Options: @@ -645,73 +1214,60 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw) ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); if (ret_val) - goto out; + return ret_val; /* Commit the changes. */ - ret_val = igb_phy_sw_reset(hw); + ret_val = phy->ops.commit(hw); if (ret_val) { - hw_dbg("Error committing the PHY changes\n"); - goto out; + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; } -out: - return ret_val; + return E1000_SUCCESS; } /** - * igb_copper_link_setup_igp - Setup igp PHY's for copper link + * e1000_copper_link_setup_igp - Setup igp PHY's for copper link * @hw: pointer to the HW structure * * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for * igp PHY's. **/ -s32 igb_copper_link_setup_igp(struct e1000_hw *hw) +s32 e1000_copper_link_setup_igp(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; - if (phy->reset_disable) { - ret_val = 0; - goto out; - } + DEBUGFUNC("e1000_copper_link_setup_igp"); + + if (phy->reset_disable) + return E1000_SUCCESS; - ret_val = phy->ops.reset(hw); + ret_val = hw->phy.ops.reset(hw); if (ret_val) { - hw_dbg("Error resetting the PHY.\n"); - goto out; + DEBUGOUT("Error resetting the PHY.\n"); + return ret_val; } /* * Wait 100ms for MAC to configure PHY from NVM settings, to avoid * timeout issues when LFS is enabled. */ - msleep(100); + msec_delay(100); - /* - * The NVM settings will configure LPLU in D3 for - * non-IGP1 PHYs. - */ - if (phy->type == e1000_phy_igp) { - /* disable lplu d3 during driver init */ - if (phy->ops.set_d3_lplu_state) - ret_val = phy->ops.set_d3_lplu_state(hw, false); + /* disable lplu d0 during driver init */ + if (hw->phy.ops.set_d0_lplu_state) { + ret_val = hw->phy.ops.set_d0_lplu_state(hw, false); if (ret_val) { - hw_dbg("Error Disabling LPLU D3\n"); - goto out; + DEBUGOUT("Error Disabling LPLU D0\n"); + return ret_val; } } - - /* disable lplu d0 during driver init */ - ret_val = phy->ops.set_d0_lplu_state(hw, false); - if (ret_val) { - hw_dbg("Error Disabling LPLU D0\n"); - goto out; - } /* Configure mdi-mdix settings */ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data); if (ret_val) - goto out; + return ret_val; data &= ~IGP01E1000_PSCR_AUTO_MDIX; @@ -729,7 +1285,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw) } ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data); if (ret_val) - goto out; + return ret_val; /* set auto-master slave resolution settings */ if (hw->mac.autoneg) { @@ -744,129 +1300,34 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw) IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) - goto out; + return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) - goto out; + return ret_val; /* Set auto Master/Slave resolution process */ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); if (ret_val) - goto out; + return ret_val; data &= ~CR_1000T_MS_ENABLE; ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); if (ret_val) - goto out; + return ret_val; } - ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); - if (ret_val) - goto out; - - /* load defaults for future use */ - phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? - ((data & CR_1000T_MS_VALUE) ? - e1000_ms_force_master : - e1000_ms_force_slave) : - e1000_ms_auto; - - switch (phy->ms_type) { - case e1000_ms_force_master: - data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); - break; - case e1000_ms_force_slave: - data |= CR_1000T_MS_ENABLE; - data &= ~(CR_1000T_MS_VALUE); - break; - case e1000_ms_auto: - data &= ~CR_1000T_MS_ENABLE; - default: - break; - } - ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); - if (ret_val) - goto out; + ret_val = e1000_set_master_slave_mode(hw); } -out: return ret_val; } /** - * igb_copper_link_autoneg - Setup/Enable autoneg for copper link - * @hw: pointer to the HW structure - * - * Performs initial bounds checking on autoneg advertisement parameter, then - * configure to advertise the full capability. Setup the PHY to autoneg - * and restart the negotiation process between the link partner. If - * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. - **/ -static s32 igb_copper_link_autoneg(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_ctrl; - - /* - * Perform some bounds checking on the autoneg advertisement - * parameter. - */ - phy->autoneg_advertised &= phy->autoneg_mask; - - /* - * If autoneg_advertised is zero, we assume it was not defaulted - * by the calling code so we set to advertise full capability. - */ - if (phy->autoneg_advertised == 0) - phy->autoneg_advertised = phy->autoneg_mask; - - hw_dbg("Reconfiguring auto-neg advertisement params\n"); - ret_val = igb_phy_setup_autoneg(hw); - if (ret_val) { - hw_dbg("Error Setting up Auto-Negotiation\n"); - goto out; - } - hw_dbg("Restarting Auto-Neg\n"); - - /* - * Restart auto-negotiation by setting the Auto Neg Enable bit and - * the Auto Neg Restart bit in the PHY control register. - */ - ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); - if (ret_val) - goto out; - - phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); - ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); - if (ret_val) - goto out; - - /* - * Does the user want to wait for Auto-Neg to complete here, or - * check at a later time (for example, callback routine). - */ - if (phy->autoneg_wait_to_complete) { - ret_val = igb_wait_autoneg(hw); - if (ret_val) { - hw_dbg("Error while waiting for " - "autoneg to complete\n"); - goto out; - } - } - - hw->mac.get_link_status = true; - -out: - return ret_val; -} - -/** - * igb_phy_setup_autoneg - Configure PHY for auto-negotiation + * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation * @hw: pointer to the HW structure * * Reads the MII auto-neg advertisement register and/or the 1000T control @@ -874,26 +1335,28 @@ out: * return successful. Otherwise, setup advertisement and flow control to * the appropriate values for the wanted auto-negotiation. **/ -static s32 igb_phy_setup_autoneg(struct e1000_hw *hw) +static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 mii_autoneg_adv_reg; u16 mii_1000t_ctrl_reg = 0; + DEBUGFUNC("e1000_phy_setup_autoneg"); + phy->autoneg_advertised &= phy->autoneg_mask; /* Read the MII Auto-Neg Advertisement Register (Address 4). */ ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); if (ret_val) - goto out; + return ret_val; if (phy->autoneg_mask & ADVERTISE_1000_FULL) { /* Read the MII 1000Base-T Control Register (Address 9). */ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); if (ret_val) - goto out; + return ret_val; } /* @@ -915,39 +1378,39 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw) NWAY_AR_10T_HD_CAPS); mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); - hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); + DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised); /* Do we want to advertise 10 Mb Half Duplex? */ if (phy->autoneg_advertised & ADVERTISE_10_HALF) { - hw_dbg("Advertise 10mb Half duplex\n"); + DEBUGOUT("Advertise 10mb Half duplex\n"); mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; } /* Do we want to advertise 10 Mb Full Duplex? */ if (phy->autoneg_advertised & ADVERTISE_10_FULL) { - hw_dbg("Advertise 10mb Full duplex\n"); + DEBUGOUT("Advertise 10mb Full duplex\n"); mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; } /* Do we want to advertise 100 Mb Half Duplex? */ if (phy->autoneg_advertised & ADVERTISE_100_HALF) { - hw_dbg("Advertise 100mb Half duplex\n"); + DEBUGOUT("Advertise 100mb Half duplex\n"); mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; } /* Do we want to advertise 100 Mb Full Duplex? */ if (phy->autoneg_advertised & ADVERTISE_100_FULL) { - hw_dbg("Advertise 100mb Full duplex\n"); + DEBUGOUT("Advertise 100mb Full duplex\n"); mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; } /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ if (phy->autoneg_advertised & ADVERTISE_1000_HALF) - hw_dbg("Advertise 1000mb Half duplex request denied!\n"); + DEBUGOUT("Advertise 1000mb Half duplex request denied!\n"); /* Do we want to advertise 1000 Mb Full Duplex? */ if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { - hw_dbg("Advertise 1000mb Full duplex\n"); + DEBUGOUT("Advertise 1000mb Full duplex\n"); mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; } @@ -965,26 +1428,26 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw) * but not send pause frames). * 2: Tx flow control is enabled (we can send pause frames * but we do not support receiving pause frames). - * 3: Both Rx and TX flow control (symmetric) are enabled. + * 3: Both Rx and Tx flow control (symmetric) are enabled. * other: No software override. The flow control configuration * in the EEPROM is used. */ switch (hw->fc.current_mode) { case e1000_fc_none: /* - * Flow control (RX & TX) is completely disabled by a + * Flow control (Rx & Tx) is completely disabled by a * software over-ride. */ mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); break; case e1000_fc_rx_pause: /* - * RX Flow control is enabled, and TX Flow control is + * Rx Flow control is enabled, and Tx Flow control is * disabled, by a software over-ride. * * Since there really isn't a way to advertise that we are - * capable of RX Pause ONLY, we will advertise that we - * support both symmetric and asymmetric RX PAUSE. Later + * capable of Rx Pause ONLY, we will advertise that we + * support both symmetric and asymmetric Rx PAUSE. Later * (in e1000_config_fc_after_link_up) we will disable the * hw's ability to send PAUSE frames. */ @@ -992,7 +1455,7 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw) break; case e1000_fc_tx_pause: /* - * TX Flow control is enabled, and RX Flow control is + * Tx Flow control is enabled, and Rx Flow control is * disabled, by a software over-ride. */ mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; @@ -1000,37 +1463,99 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw) break; case e1000_fc_full: /* - * Flow control (both RX and TX) is enabled by a software + * Flow control (both Rx and Tx) is enabled by a software * over-ride. */ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); break; default: - hw_dbg("Flow control param set incorrectly\n"); - ret_val = -E1000_ERR_CONFIG; - goto out; + DEBUGOUT("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + + return ret_val; +} + +/** + * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + **/ +static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_ctrl; + + DEBUGFUNC("e1000_copper_link_autoneg"); + + /* + * Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* + * If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + DEBUGOUT("Error Setting up Auto-Negotiation\n"); + return ret_val; } + DEBUGOUT("Restarting Auto-Neg\n"); - ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + /* + * Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); if (ret_val) - goto out; + return ret_val; - hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + return ret_val; - if (phy->autoneg_mask & ADVERTISE_1000_FULL) { - ret_val = phy->ops.write_reg(hw, - PHY_1000T_CTRL, - mii_1000t_ctrl_reg); - if (ret_val) - goto out; + /* + * Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = hw->mac.ops.wait_autoneg(hw); + if (ret_val) { + DEBUGOUT("Error while waiting for autoneg to complete\n"); + return ret_val; + } } -out: + hw->mac.get_link_status = true; + return ret_val; } /** - * igb_setup_copper_link - Configure copper link settings + * e1000_setup_copper_link_generic - Configure copper link settings * @hw: pointer to the HW structure * * Calls the appropriate function to configure the link for auto-neg or forced @@ -1038,30 +1563,31 @@ out: * to configure collision distance and flow control are called. If link is * not established, we return -E1000_ERR_PHY (-2). **/ -s32 igb_setup_copper_link(struct e1000_hw *hw) +s32 e1000_setup_copper_link_generic(struct e1000_hw *hw) { s32 ret_val; bool link; + DEBUGFUNC("e1000_setup_copper_link_generic"); if (hw->mac.autoneg) { /* * Setup autoneg and flow control advertisement and perform * autonegotiation. */ - ret_val = igb_copper_link_autoneg(hw); + ret_val = e1000_copper_link_autoneg(hw); if (ret_val) - goto out; + return ret_val; } else { /* * PHY will be set to 10H, 10F, 100H or 100F * depending on user settings. */ - hw_dbg("Forcing Speed and Duplex\n"); + DEBUGOUT("Forcing Speed and Duplex\n"); ret_val = hw->phy.ops.force_speed_duplex(hw); if (ret_val) { - hw_dbg("Error Forcing Speed and Duplex\n"); - goto out; + DEBUGOUT("Error Forcing Speed and Duplex\n"); + return ret_val; } } @@ -1069,49 +1595,48 @@ s32 igb_setup_copper_link(struct e1000_hw *hw) * Check link status. Wait up to 100 microseconds for link to become * valid. */ - ret_val = igb_phy_has_link(hw, - COPPER_LINK_UP_LIMIT, - 10, - &link); + ret_val = e1000_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10, + &link); if (ret_val) - goto out; + return ret_val; if (link) { - hw_dbg("Valid link established!!!\n"); - igb_config_collision_dist(hw); - ret_val = igb_config_fc_after_link_up(hw); + DEBUGOUT("Valid link established!!!\n"); + hw->mac.ops.config_collision_dist(hw); + ret_val = e1000_config_fc_after_link_up_generic(hw); } else { - hw_dbg("Unable to establish link!!!\n"); + DEBUGOUT("Unable to establish link!!!\n"); } -out: return ret_val; } /** - * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY * @hw: pointer to the HW structure * * Calls the PHY setup function to force speed and duplex. Clears the * auto-crossover to force MDI manually. Waits for link and returns * successful if link up is successful, else -E1000_ERR_PHY (-2). **/ -s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw) +s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; bool link; + DEBUGFUNC("e1000_phy_force_speed_duplex_igp"); + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); if (ret_val) - goto out; + return ret_val; - igb_phy_force_speed_duplex_setup(hw, &phy_data); + e1000_phy_force_speed_duplex_setup(hw, &phy_data); ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); if (ret_val) - goto out; + return ret_val; /* * Clear Auto-Crossover to force MDI manually. IGP requires MDI @@ -1119,136 +1644,145 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw) */ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); if (ret_val) - goto out; + return ret_val; phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); if (ret_val) - goto out; + return ret_val; - hw_dbg("IGP PSCR: %X\n", phy_data); + DEBUGOUT1("IGP PSCR: %X\n", phy_data); - udelay(1); + usec_delay(1); if (phy->autoneg_wait_to_complete) { - hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); + DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n"); - ret_val = igb_phy_has_link(hw, - PHY_FORCE_LIMIT, - 100000, - &link); + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); if (ret_val) - goto out; + return ret_val; if (!link) - hw_dbg("Link taking longer than expected.\n"); + DEBUGOUT("Link taking longer than expected.\n"); /* Try once more */ - ret_val = igb_phy_has_link(hw, - PHY_FORCE_LIMIT, - 100000, - &link); - if (ret_val) - goto out; + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); } -out: return ret_val; } /** - * igb_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY * @hw: pointer to the HW structure * * Calls the PHY setup function to force speed and duplex. Clears the * auto-crossover to force MDI manually. Resets the PHY to commit the * changes. If time expires while waiting for link up, we reset the DSP. - * After reset, TX_CLK and CRS on TX must be set. Return successful upon + * After reset, TX_CLK and CRS on Tx must be set. Return successful upon * successful completion, else return corresponding error code. **/ -s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) +s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; bool link; + DEBUGFUNC("e1000_phy_force_speed_duplex_m88"); + /* * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI * forced whenever speed and duplex are forced. */ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) - goto out; + return ret_val; phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); if (ret_val) - goto out; + return ret_val; - hw_dbg("M88E1000 PSCR: %X\n", phy_data); + DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data); ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); if (ret_val) - goto out; + return ret_val; - igb_phy_force_speed_duplex_setup(hw, &phy_data); + e1000_phy_force_speed_duplex_setup(hw, &phy_data); ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); if (ret_val) - goto out; + return ret_val; /* Reset the phy to commit changes. */ - ret_val = igb_phy_sw_reset(hw); + ret_val = hw->phy.ops.commit(hw); if (ret_val) - goto out; + return ret_val; if (phy->autoneg_wait_to_complete) { - hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); + DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n"); - ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); if (ret_val) - goto out; + return ret_val; if (!link) { - if (hw->phy.type != e1000_phy_m88 || - hw->phy.id == I347AT4_E_PHY_ID || - hw->phy.id == M88E1112_E_PHY_ID) { - hw_dbg("Link taking longer than expected.\n"); - } else { + bool reset_dsp = true; + + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1340M_E_PHY_ID: + case M88E1112_E_PHY_ID: + reset_dsp = false; + break; + default: + if (hw->phy.type != e1000_phy_m88) + reset_dsp = false; + break; + } + if (!reset_dsp) { + DEBUGOUT("Link taking longer than expected.\n"); + } else { /* * We didn't get link. * Reset the DSP and cross our fingers. */ ret_val = phy->ops.write_reg(hw, - M88E1000_PHY_PAGE_SELECT, - 0x001d); + M88E1000_PHY_PAGE_SELECT, + 0x001d); if (ret_val) - goto out; - ret_val = igb_phy_reset_dsp(hw); + return ret_val; + ret_val = e1000_phy_reset_dsp_generic(hw); if (ret_val) - goto out; + return ret_val; } } /* Try once more */ - ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, - 100000, &link); + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); if (ret_val) - goto out; + return ret_val; } - if (hw->phy.type != e1000_phy_m88 || - hw->phy.id == I347AT4_E_PHY_ID || - hw->phy.id == M88E1112_E_PHY_ID) - goto out; + if (hw->phy.type != e1000_phy_m88) + return E1000_SUCCESS; + if (hw->phy.id == I347AT4_E_PHY_ID || + hw->phy.id == M88E1340M_E_PHY_ID || + hw->phy.id == M88E1112_E_PHY_ID) + return E1000_SUCCESS; ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); if (ret_val) - goto out; + return ret_val; /* * Resetting the phy means we need to re-force TX_CLK in the @@ -1258,7 +1792,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) phy_data |= M88E1000_EPSCR_TX_CLK_25; ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); if (ret_val) - goto out; + return ret_val; /* * In addition, we must re-enable CRS on Tx for both half and full @@ -1266,17 +1800,80 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) */ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) - goto out; + return ret_val; phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); -out: return ret_val; } /** - * igb_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex + * @hw: pointer to the HW structure + * + * Forces the speed and duplex settings of the PHY. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_ife"); + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data); + if (ret_val) + return ret_val; + + e1000_phy_force_speed_duplex_setup(hw, &data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data); + if (ret_val) + return ret_val; + + /* Disable MDI-X support for 10/100 */ + ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + return ret_val; + + data &= ~IFE_PMC_AUTO_MDIX; + data &= ~IFE_PMC_FORCE_MDIX; + + ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data); + if (ret_val) + return ret_val; + + DEBUGOUT1("IFE PMC: %X\n", data); + + usec_delay(1); + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n"); + + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) + DEBUGOUT("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex * @hw: pointer to the HW structure * @phy_ctrl: pointer to current value of PHY_CONTROL * @@ -1287,17 +1884,18 @@ out: * caller must write to the PHY_CONTROL register for these settings to * take affect. **/ -static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, - u16 *phy_ctrl) +void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) { struct e1000_mac_info *mac = &hw->mac; u32 ctrl; + DEBUGFUNC("e1000_phy_force_speed_duplex_setup"); + /* Turn off flow control when forcing speed/duplex */ hw->fc.current_mode = e1000_fc_none; /* Force speed/duplex on the mac */ - ctrl = rd32(E1000_CTRL); + ctrl = E1000_READ_REG(hw, E1000_CTRL); ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); ctrl &= ~E1000_CTRL_SPD_SEL; @@ -1311,11 +1909,11 @@ static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { ctrl &= ~E1000_CTRL_FD; *phy_ctrl &= ~MII_CR_FULL_DUPLEX; - hw_dbg("Half Duplex\n"); + DEBUGOUT("Half Duplex\n"); } else { ctrl |= E1000_CTRL_FD; *phy_ctrl |= MII_CR_FULL_DUPLEX; - hw_dbg("Full Duplex\n"); + DEBUGOUT("Full Duplex\n"); } /* Forcing 10mb or 100mb? */ @@ -1323,21 +1921,21 @@ static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, ctrl |= E1000_CTRL_SPD_100; *phy_ctrl |= MII_CR_SPEED_100; *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); - hw_dbg("Forcing 100mb\n"); + DEBUGOUT("Forcing 100mb\n"); } else { ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); *phy_ctrl |= MII_CR_SPEED_10; *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); - hw_dbg("Forcing 10mb\n"); + DEBUGOUT("Forcing 10mb\n"); } - igb_config_collision_dist(hw); + hw->mac.ops.config_collision_dist(hw); - wr32(E1000_CTRL, ctrl); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); } /** - * igb_set_d3_lplu_state - Sets low power link up state for D3 + * e1000_set_d3_lplu_state_generic - Sets low power link up state for D3 * @hw: pointer to the HW structure * @active: boolean used to enable/disable lplu * @@ -1350,25 +1948,27 @@ static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, * During driver activity, SmartSpeed should be enabled so performance is * maintained. **/ -s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active) +s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; - s32 ret_val = 0; + s32 ret_val; u16 data; - if (!(hw->phy.ops.read_reg)) - goto out; + DEBUGFUNC("e1000_set_d3_lplu_state_generic"); + + if (!hw->phy.ops.read_reg) + return E1000_SUCCESS; ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); if (ret_val) - goto out; + return ret_val; if (!active) { data &= ~IGP02E1000_PM_D3_LPLU; ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, data); if (ret_val) - goto out; + return ret_val; /* * LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most @@ -1380,108 +1980,108 @@ s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active) IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) - goto out; + return ret_val; data |= IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) - goto out; + return ret_val; } else if (phy->smart_speed == e1000_smart_speed_off) { ret_val = phy->ops.read_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, - &data); + IGP01E1000_PHY_PORT_CONFIG, + &data); if (ret_val) - goto out; + return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) - goto out; + return ret_val; } } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { data |= IGP02E1000_PM_D3_LPLU; ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, - data); + data); if (ret_val) - goto out; + return ret_val; /* When LPLU is enabled, we should disable SmartSpeed */ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, - &data); + &data); if (ret_val) - goto out; + return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, - data); + data); } -out: return ret_val; } /** - * igb_check_downshift - Checks whether a downshift in speed occurred + * e1000_check_downshift_generic - Checks whether a downshift in speed occurred * @hw: pointer to the HW structure * * Success returns 0, Failure returns 1 * * A downshift is detected by querying the PHY link health. **/ -s32 igb_check_downshift(struct e1000_hw *hw) +s32 e1000_check_downshift_generic(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, offset, mask; + DEBUGFUNC("e1000_check_downshift_generic"); + switch (phy->type) { case e1000_phy_m88: case e1000_phy_gg82563: - offset = M88E1000_PHY_SPEC_STATUS; - mask = M88E1000_PSSR_DOWNSHIFT; + offset = M88E1000_PHY_SPEC_STATUS; + mask = M88E1000_PSSR_DOWNSHIFT; break; case e1000_phy_igp_2: - case e1000_phy_igp: case e1000_phy_igp_3: - offset = IGP01E1000_PHY_LINK_HEALTH; - mask = IGP01E1000_PLHR_SS_DOWNGRADE; + offset = IGP01E1000_PHY_LINK_HEALTH; + mask = IGP01E1000_PLHR_SS_DOWNGRADE; break; default: /* speed downshift not supported */ phy->speed_downgraded = false; - ret_val = 0; - goto out; + return E1000_SUCCESS; } ret_val = phy->ops.read_reg(hw, offset, &phy_data); if (!ret_val) - phy->speed_downgraded = (phy_data & mask) ? true : false; + phy->speed_downgraded = !!(phy_data & mask); -out: return ret_val; } /** - * igb_check_polarity_m88 - Checks the polarity. + * e1000_check_polarity_m88 - Checks the polarity. * @hw: pointer to the HW structure * * Success returns 0, Failure returns -E1000_ERR_PHY (-2) * * Polarity is determined based on the PHY specific status register. **/ -static s32 igb_check_polarity_m88(struct e1000_hw *hw) +s32 e1000_check_polarity_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; + DEBUGFUNC("e1000_check_polarity_m88"); + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data); if (!ret_val) @@ -1493,7 +2093,7 @@ static s32 igb_check_polarity_m88(struct e1000_hw *hw) } /** - * igb_check_polarity_igp - Checks the polarity. + * e1000_check_polarity_igp - Checks the polarity. * @hw: pointer to the HW structure * * Success returns 0, Failure returns -E1000_ERR_PHY (-2) @@ -1501,31 +2101,33 @@ static s32 igb_check_polarity_m88(struct e1000_hw *hw) * Polarity is determined based on the PHY port status register, and the * current speed (since there is no polarity at 100Mbps). **/ -static s32 igb_check_polarity_igp(struct e1000_hw *hw) +s32 e1000_check_polarity_igp(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data, offset, mask; + DEBUGFUNC("e1000_check_polarity_igp"); + /* * Polarity is determined based on the speed of * our connection. */ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); if (ret_val) - goto out; + return ret_val; if ((data & IGP01E1000_PSSR_SPEED_MASK) == IGP01E1000_PSSR_SPEED_1000MBPS) { - offset = IGP01E1000_PHY_PCS_INIT_REG; - mask = IGP01E1000_PHY_POLARITY_MASK; + offset = IGP01E1000_PHY_PCS_INIT_REG; + mask = IGP01E1000_PHY_POLARITY_MASK; } else { /* * This really only applies to 10Mbps since * there is no polarity for 100Mbps (always 0). */ - offset = IGP01E1000_PHY_PORT_STATUS; - mask = IGP01E1000_PSSR_POLARITY_REVERSED; + offset = IGP01E1000_PHY_PORT_STATUS; + mask = IGP01E1000_PSSR_POLARITY_REVERSED; } ret_val = phy->ops.read_reg(hw, offset, &data); @@ -1535,22 +2137,61 @@ static s32 igb_check_polarity_igp(struct e1000_hw *hw) ? e1000_rev_polarity_reversed : e1000_rev_polarity_normal; -out: return ret_val; } /** - * igb_wait_autoneg - Wait for auto-neg compeletion + * e1000_check_polarity_ife - Check cable polarity for IFE PHY + * @hw: pointer to the HW structure + * + * Polarity is determined on the polarity reversal feature being enabled. + **/ +s32 e1000_check_polarity_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + DEBUGFUNC("e1000_check_polarity_ife"); + + /* + * Polarity is determined based on the reversal feature being enabled. + */ + if (phy->polarity_correction) { + offset = IFE_PHY_EXTENDED_STATUS_CONTROL; + mask = IFE_PESC_POLARITY_REVERSED; + } else { + offset = IFE_PHY_SPECIAL_CONTROL; + mask = IFE_PSC_FORCE_POLARITY; + } + + ret_val = phy->ops.read_reg(hw, offset, &phy_data); + + if (!ret_val) + phy->cable_polarity = (phy_data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * e1000_wait_autoneg_generic - Wait for auto-neg completion * @hw: pointer to the HW structure * * Waits for auto-negotiation to complete or for the auto-negotiation time * limit to expire, which ever happens first. **/ -static s32 igb_wait_autoneg(struct e1000_hw *hw) +s32 e1000_wait_autoneg_generic(struct e1000_hw *hw) { - s32 ret_val = 0; + s32 ret_val = E1000_SUCCESS; u16 i, phy_status; + DEBUGFUNC("e1000_wait_autoneg_generic"); + + if (!hw->phy.ops.read_reg) + return E1000_SUCCESS; + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); @@ -1561,7 +2202,7 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw) break; if (phy_status & MII_SR_AUTONEG_COMPLETE) break; - msleep(100); + msec_delay(100); } /* @@ -1572,7 +2213,7 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw) } /** - * igb_phy_has_link - Polls PHY for link + * e1000_phy_has_link_generic - Polls PHY for link * @hw: pointer to the HW structure * @iterations: number of times to poll for link * @usec_interval: delay between polling attempts @@ -1580,12 +2221,17 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw) * * Polls the PHY status register for link, 'iterations' number of times. **/ -s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, +s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, u32 usec_interval, bool *success) { - s32 ret_val = 0; + s32 ret_val = E1000_SUCCESS; u16 i, phy_status; + DEBUGFUNC("e1000_phy_has_link_generic"); + + if (!hw->phy.ops.read_reg) + return E1000_SUCCESS; + for (i = 0; i < iterations; i++) { /* * Some PHYs require the PHY_STATUS register to be read @@ -1593,32 +2239,31 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, * it across the board. */ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); - if (ret_val) { + if (ret_val) /* * If the first read fails, another entity may have * ownership of the resources, wait and try again to * see if they have relinquished the resources yet. */ - udelay(usec_interval); - } + usec_delay(usec_interval); ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); if (ret_val) break; if (phy_status & MII_SR_LINK_STATUS) break; if (usec_interval >= 1000) - mdelay(usec_interval/1000); + msec_delay_irq(usec_interval/1000); else - udelay(usec_interval); + usec_delay(usec_interval); } - *success = (i < iterations) ? true : false; + *success = (i < iterations); return ret_val; } /** - * igb_get_cable_length_m88 - Determine cable length for m88 PHY + * e1000_get_cable_length_m88 - Determine cable length for m88 PHY * @hw: pointer to the HW structure * * Reads the PHY specific status register to retrieve the cable length @@ -1632,60 +2277,63 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, * 3 110 - 140 meters * 4 > 140 meters **/ -s32 igb_get_cable_length_m88(struct e1000_hw *hw) +s32 e1000_get_cable_length_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, index; + DEBUGFUNC("e1000_get_cable_length_m88"); + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); if (ret_val) - goto out; + return ret_val; index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> M88E1000_PSSR_CABLE_LENGTH_SHIFT; - if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { - ret_val = -E1000_ERR_PHY; - goto out; - } + + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) + return -E1000_ERR_PHY; phy->min_cable_length = e1000_m88_cable_length_table[index]; phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; -out: - return ret_val; + return E1000_SUCCESS; } -s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) +s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, phy_data2, index, default_page, is_cm; + DEBUGFUNC("e1000_get_cable_length_m88_gen2"); + switch (hw->phy.id) { + case M88E1340M_E_PHY_ID: case I347AT4_E_PHY_ID: /* Remember the original page select and set it to 7 */ ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, &default_page); if (ret_val) - goto out; + return ret_val; ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); if (ret_val) - goto out; + return ret_val; /* Get cable length from PHY Cable Diagnostics Control Reg */ ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr), &phy_data); if (ret_val) - goto out; + return ret_val; /* Check if the unit of cable length is meters or cm */ ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); if (ret_val) - goto out; + return ret_val; is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); @@ -1694,34 +2342,34 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) phy->max_cable_length = phy_data / (is_cm ? 100 : 1); phy->cable_length = phy_data / (is_cm ? 100 : 1); - /* Reset the page selec to its original value */ + /* Reset the page select to its original value */ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, default_page); if (ret_val) - goto out; + return ret_val; break; + case M88E1112_E_PHY_ID: /* Remember the original page select and set it to 5 */ ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, &default_page); if (ret_val) - goto out; + return ret_val; ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05); if (ret_val) - goto out; + return ret_val; ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE, &phy_data); if (ret_val) - goto out; + return ret_val; index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> M88E1000_PSSR_CABLE_LENGTH_SHIFT; - if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { - ret_val = -E1000_ERR_PHY; - goto out; - } + + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) + return -E1000_ERR_PHY; phy->min_cable_length = e1000_m88_cable_length_table[index]; phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; @@ -1733,20 +2381,18 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, default_page); if (ret_val) - goto out; + return ret_val; break; default: - ret_val = -E1000_ERR_PHY; - goto out; + return -E1000_ERR_PHY; } -out: return ret_val; } /** - * igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY * @hw: pointer to the HW structure * * The automatic gain control (agc) normalizes the amplitude of the @@ -1756,25 +2402,27 @@ out: * into a lookup table to obtain the approximate cable length * for each channel. **/ -s32 igb_get_cable_length_igp_2(struct e1000_hw *hw) +s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; - s32 ret_val = 0; + s32 ret_val; u16 phy_data, i, agc_value = 0; u16 cur_agc_index, max_agc_index = 0; u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { - IGP02E1000_PHY_AGC_A, - IGP02E1000_PHY_AGC_B, - IGP02E1000_PHY_AGC_C, - IGP02E1000_PHY_AGC_D + IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D }; + DEBUGFUNC("e1000_get_cable_length_igp_2"); + /* Read the AGC registers for all channels */ for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data); if (ret_val) - goto out; + return ret_val; /* * Getting bits 15:9, which represent the combination of @@ -1787,10 +2435,8 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw) /* Array index bound check. */ if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || - (cur_agc_index == 0)) { - ret_val = -E1000_ERR_PHY; - goto out; - } + (cur_agc_index == 0)) + return -E1000_ERR_PHY; /* Remove min & max AGC values from calculation. */ if (e1000_igp_2_cable_length_table[min_agc_index] > @@ -1814,12 +2460,11 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw) phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; -out: - return ret_val; + return E1000_SUCCESS; } /** - * igb_get_phy_info_m88 - Retrieve PHY information + * e1000_get_phy_info_m88 - Retrieve PHY information * @hw: pointer to the HW structure * * Valid for only copper links. Read the PHY status register (sticky read) @@ -1828,54 +2473,54 @@ out: * special status register to determine MDI/MDIx and current speed. If * speed is 1000, then determine cable length, local and remote receiver. **/ -s32 igb_get_phy_info_m88(struct e1000_hw *hw) +s32 e1000_get_phy_info_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; bool link; + DEBUGFUNC("e1000_get_phy_info_m88"); + if (phy->media_type != e1000_media_type_copper) { - hw_dbg("Phy info is only valid for copper media\n"); - ret_val = -E1000_ERR_CONFIG; - goto out; + DEBUGOUT("Phy info is only valid for copper media\n"); + return -E1000_ERR_CONFIG; } - ret_val = igb_phy_has_link(hw, 1, 0, &link); + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) - goto out; + return ret_val; if (!link) { - hw_dbg("Phy info is only valid if link is up\n"); - ret_val = -E1000_ERR_CONFIG; - goto out; + DEBUGOUT("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; } ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) - goto out; + return ret_val; - phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL) - ? true : false; + phy->polarity_correction = !!(phy_data & + M88E1000_PSCR_POLARITY_REVERSAL); - ret_val = igb_check_polarity_m88(hw); + ret_val = e1000_check_polarity_m88(hw); if (ret_val) - goto out; + return ret_val; ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); if (ret_val) - goto out; + return ret_val; - phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false; + phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX); if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { - ret_val = phy->ops.get_cable_length(hw); + ret_val = hw->phy.ops.get_cable_length(hw); if (ret_val) - goto out; + return ret_val; ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); if (ret_val) - goto out; + return ret_val; phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) ? e1000_1000t_rx_status_ok @@ -1891,12 +2536,11 @@ s32 igb_get_phy_info_m88(struct e1000_hw *hw) phy->remote_rx = e1000_1000t_rx_status_undefined; } -out: return ret_val; } /** - * igb_get_phy_info_igp - Retrieve igp PHY information + * e1000_get_phy_info_igp - Retrieve igp PHY information * @hw: pointer to the HW structure * * Read PHY status to determine if link is up. If link is up, then @@ -1904,44 +2548,45 @@ out: * PHY port status to determine MDI/MDIx and speed. Based on the speed, * determine on the cable length, local and remote receiver. **/ -s32 igb_get_phy_info_igp(struct e1000_hw *hw) +s32 e1000_get_phy_info_igp(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; bool link; - ret_val = igb_phy_has_link(hw, 1, 0, &link); + DEBUGFUNC("e1000_get_phy_info_igp"); + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) - goto out; + return ret_val; if (!link) { - hw_dbg("Phy info is only valid if link is up\n"); - ret_val = -E1000_ERR_CONFIG; - goto out; + DEBUGOUT("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; } phy->polarity_correction = true; - ret_val = igb_check_polarity_igp(hw); + ret_val = e1000_check_polarity_igp(hw); if (ret_val) - goto out; + return ret_val; ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); if (ret_val) - goto out; + return ret_val; - phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false; + phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX); if ((data & IGP01E1000_PSSR_SPEED_MASK) == IGP01E1000_PSSR_SPEED_1000MBPS) { ret_val = phy->ops.get_cable_length(hw); if (ret_val) - goto out; + return ret_val; ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); if (ret_val) - goto out; + return ret_val; phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) ? e1000_1000t_rx_status_ok @@ -1956,93 +2601,160 @@ s32 igb_get_phy_info_igp(struct e1000_hw *hw) phy->remote_rx = e1000_1000t_rx_status_undefined; } -out: return ret_val; } /** - * igb_phy_sw_reset - PHY software reset + * e1000_get_phy_info_ife - Retrieves various IFE PHY states + * @hw: pointer to the HW structure + * + * Populates "phy" structure with various feature states. + **/ +s32 e1000_get_phy_info_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_ife"); + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = phy->ops.read_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data); + if (ret_val) + return ret_val; + phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE); + + if (phy->polarity_correction) { + ret_val = e1000_check_polarity_ife(hw); + if (ret_val) + return ret_val; + } else { + /* Polarity is forced */ + phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + } + + ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS); + + /* The following parameters are undefined for 10/100 operation. */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + + return E1000_SUCCESS; +} + +/** + * e1000_phy_sw_reset_generic - PHY software reset * @hw: pointer to the HW structure * * Does a software reset of the PHY by reading the PHY control register and * setting/write the control register reset bit to the PHY. **/ -s32 igb_phy_sw_reset(struct e1000_hw *hw) +s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw) { - s32 ret_val = 0; + s32 ret_val; u16 phy_ctrl; - if (!(hw->phy.ops.read_reg)) - goto out; + DEBUGFUNC("e1000_phy_sw_reset_generic"); + + if (!hw->phy.ops.read_reg) + return E1000_SUCCESS; ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); if (ret_val) - goto out; + return ret_val; phy_ctrl |= MII_CR_RESET; ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl); if (ret_val) - goto out; + return ret_val; - udelay(1); + usec_delay(1); -out: return ret_val; } /** - * igb_phy_hw_reset - PHY hardware reset + * e1000_phy_hw_reset_generic - PHY hardware reset * @hw: pointer to the HW structure * * Verify the reset block is not blocking us from resetting. Acquire * semaphore (if necessary) and read/set/write the device control reset * bit in the PHY. Wait the appropriate delay time for the device to - * reset and relase the semaphore (if necessary). + * reset and release the semaphore (if necessary). **/ -s32 igb_phy_hw_reset(struct e1000_hw *hw) +s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; + s32 ret_val; u32 ctrl; - ret_val = igb_check_reset_block(hw); - if (ret_val) { - ret_val = 0; - goto out; - } + DEBUGFUNC("e1000_phy_hw_reset_generic"); + + ret_val = phy->ops.check_reset_block(hw); + if (ret_val) + return E1000_SUCCESS; ret_val = phy->ops.acquire(hw); if (ret_val) - goto out; + return ret_val; - ctrl = rd32(E1000_CTRL); - wr32(E1000_CTRL, ctrl | E1000_CTRL_PHY_RST); - wrfl(); + ctrl = E1000_READ_REG(hw, E1000_CTRL); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST); + E1000_WRITE_FLUSH(hw); - udelay(phy->reset_delay_us); + usec_delay(phy->reset_delay_us); - wr32(E1000_CTRL, ctrl); - wrfl(); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); - udelay(150); + usec_delay(150); phy->ops.release(hw); - ret_val = phy->ops.get_cfg_done(hw); + return phy->ops.get_cfg_done(hw); +} -out: - return ret_val; +/** + * e1000_get_cfg_done_generic - Generic configuration done + * @hw: pointer to the HW structure + * + * Generic function to wait 10 milli-seconds for configuration to complete + * and return success. + **/ +s32 e1000_get_cfg_done_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_get_cfg_done_generic"); + + msec_delay_irq(10); + + return E1000_SUCCESS; } /** - * igb_phy_init_script_igp3 - Inits the IGP3 PHY + * e1000_phy_init_script_igp3 - Inits the IGP3 PHY * @hw: pointer to the HW structure * * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. **/ -s32 igb_phy_init_script_igp3(struct e1000_hw *hw) +s32 e1000_phy_init_script_igp3(struct e1000_hw *hw) { - hw_dbg("Running IGP 3 PHY init script\n"); + DEBUGOUT("Running IGP 3 PHY init script\n"); /* PHY init IGP 3 */ /* Enable rise/fall, 10-mode work in class-A */ @@ -2053,7 +2765,7 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw) hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24); /* Increase Hybrid poly bias */ hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0); - /* Add 4% to TX amplitude in Giga mode */ + /* Add 4% to Tx amplitude in Gig mode */ hw->phy.ops.write_reg(hw, 0x2010, 0x10B0); /* Disable trimming (TTT) */ hw->phy.ops.write_reg(hw, 0x2011, 0x0000); @@ -2116,17 +2828,101 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw) /* Restart AN, Speed selection is 1000 */ hw->phy.ops.write_reg(hw, 0x0000, 0x1340); - return 0; + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_type_from_id - Get PHY type from id + * @phy_id: phy_id read from the phy + * + * Returns the phy type from the id. + **/ +enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id) +{ + enum e1000_phy_type phy_type = e1000_phy_unknown; + + switch (phy_id) { + case M88E1000_I_PHY_ID: + case M88E1000_E_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1011_I_PHY_ID: + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1340M_E_PHY_ID: + phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ + phy_type = e1000_phy_igp_2; + break; + case GG82563_E_PHY_ID: + phy_type = e1000_phy_gg82563; + break; + case IGP03E1000_E_PHY_ID: + phy_type = e1000_phy_igp_3; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy_type = e1000_phy_ife; + break; + case I82580_I_PHY_ID: + phy_type = e1000_phy_82580; + break; + default: + phy_type = e1000_phy_unknown; + break; + } + return phy_type; +} + +/** + * e1000_determine_phy_address - Determines PHY address. + * @hw: pointer to the HW structure + * + * This uses a trial and error method to loop through possible PHY + * addresses. It tests each by reading the PHY ID registers and + * checking for a match. + **/ +s32 e1000_determine_phy_address(struct e1000_hw *hw) +{ + u32 phy_addr = 0; + u32 i; + enum e1000_phy_type phy_type = e1000_phy_unknown; + + hw->phy.id = phy_type; + + for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) { + hw->phy.addr = phy_addr; + i = 0; + + do { + e1000_get_phy_id(hw); + phy_type = e1000_get_phy_type_from_id(hw->phy.id); + + /* + * If phy_type is valid, break - we found our + * PHY address + */ + if (phy_type != e1000_phy_unknown) + return E1000_SUCCESS; + + msec_delay(1); + i++; + } while (i < 10); + } + + return -E1000_ERR_PHY_TYPE; } /** - * igb_power_up_phy_copper - Restore copper link in case of PHY power down + * e1000_power_up_phy_copper - Restore copper link in case of PHY power down * @hw: pointer to the HW structure * * In the case of a PHY power down to save power, or to turn off link during a - * driver unload, restore the link to previous settings. + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. **/ -void igb_power_up_phy_copper(struct e1000_hw *hw) +void e1000_power_up_phy_copper(struct e1000_hw *hw) { u16 mii_reg = 0; @@ -2137,13 +2933,14 @@ void igb_power_up_phy_copper(struct e1000_hw *hw) } /** - * igb_power_down_phy_copper - Power down copper PHY + * e1000_power_down_phy_copper - Restore copper link in case of PHY power down * @hw: pointer to the HW structure * - * Power down PHY to save power when interface is down and wake on lan - * is not enabled. + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. **/ -void igb_power_down_phy_copper(struct e1000_hw *hw) +void e1000_power_down_phy_copper(struct e1000_hw *hw) { u16 mii_reg = 0; @@ -2151,107 +2948,83 @@ void igb_power_down_phy_copper(struct e1000_hw *hw) hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); mii_reg |= MII_CR_POWER_DOWN; hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); - msleep(1); + msec_delay(1); } /** - * igb_check_polarity_82580 - Checks the polarity. + * e1000_check_polarity_82577 - Checks the polarity. * @hw: pointer to the HW structure * * Success returns 0, Failure returns -E1000_ERR_PHY (-2) * * Polarity is determined based on the PHY specific status register. **/ -static s32 igb_check_polarity_82580(struct e1000_hw *hw) +s32 e1000_check_polarity_82577(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; + DEBUGFUNC("e1000_check_polarity_82577"); - ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); + ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); if (!ret_val) - phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; + phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; return ret_val; } /** - * igb_phy_force_speed_duplex_82580 - Force speed/duplex for I82580 PHY + * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY * @hw: pointer to the HW structure * - * Calls the PHY setup function to force speed and duplex. Clears the - * auto-crossover to force MDI manually. Waits for link and returns - * successful if link up is successful, else -E1000_ERR_PHY (-2). + * Calls the PHY setup function to force speed and duplex. **/ -s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw) +s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; bool link; + DEBUGFUNC("e1000_phy_force_speed_duplex_82577"); ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); if (ret_val) - goto out; + return ret_val; - igb_phy_force_speed_duplex_setup(hw, &phy_data); + e1000_phy_force_speed_duplex_setup(hw, &phy_data); ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); if (ret_val) - goto out; - - /* - * Clear Auto-Crossover to force MDI manually. 82580 requires MDI - * forced whenever speed and duplex are forced. - */ - ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); - if (ret_val) - goto out; - - phy_data &= ~I82580_PHY_CTRL2_AUTO_MDIX; - phy_data &= ~I82580_PHY_CTRL2_FORCE_MDI_MDIX; - - ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); - if (ret_val) - goto out; - - hw_dbg("I82580_PHY_CTRL_2: %X\n", phy_data); + return ret_val; - udelay(1); + usec_delay(1); if (phy->autoneg_wait_to_complete) { - hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n"); + DEBUGOUT("Waiting for forced speed/duplex link on 82577 phy\n"); - ret_val = igb_phy_has_link(hw, - PHY_FORCE_LIMIT, - 100000, - &link); + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); if (ret_val) - goto out; + return ret_val; if (!link) - hw_dbg("Link taking longer than expected.\n"); + DEBUGOUT("Link taking longer than expected.\n"); /* Try once more */ - ret_val = igb_phy_has_link(hw, - PHY_FORCE_LIMIT, - 100000, - &link); - if (ret_val) - goto out; + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); } -out: return ret_val; } /** - * igb_get_phy_info_82580 - Retrieve I82580 PHY information + * e1000_get_phy_info_82577 - Retrieve I82577 PHY information * @hw: pointer to the HW structure * * Read PHY status to determine if link is up. If link is up, then @@ -2259,89 +3032,88 @@ out: * PHY port status to determine MDI/MDIx and speed. Based on the speed, * determine on the cable length, local and remote receiver. **/ -s32 igb_get_phy_info_82580(struct e1000_hw *hw) +s32 e1000_get_phy_info_82577(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; bool link; + DEBUGFUNC("e1000_get_phy_info_82577"); - ret_val = igb_phy_has_link(hw, 1, 0, &link); + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) - goto out; + return ret_val; if (!link) { - hw_dbg("Phy info is only valid if link is up\n"); - ret_val = -E1000_ERR_CONFIG; - goto out; + DEBUGOUT("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; } phy->polarity_correction = true; - ret_val = igb_check_polarity_82580(hw); + ret_val = e1000_check_polarity_82577(hw); if (ret_val) - goto out; + return ret_val; - ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); + ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); if (ret_val) - goto out; + return ret_val; - phy->is_mdix = (data & I82580_PHY_STATUS2_MDIX) ? true : false; + phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX); - if ((data & I82580_PHY_STATUS2_SPEED_MASK) == - I82580_PHY_STATUS2_SPEED_1000MBPS) { + if ((data & I82577_PHY_STATUS2_SPEED_MASK) == + I82577_PHY_STATUS2_SPEED_1000MBPS) { ret_val = hw->phy.ops.get_cable_length(hw); if (ret_val) - goto out; + return ret_val; ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); if (ret_val) - goto out; + return ret_val; phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; } else { phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; phy->local_rx = e1000_1000t_rx_status_undefined; phy->remote_rx = e1000_1000t_rx_status_undefined; } -out: - return ret_val; + return E1000_SUCCESS; } /** - * igb_get_cable_length_82580 - Determine cable length for 82580 PHY + * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY * @hw: pointer to the HW structure * * Reads the diagnostic status register and verifies result is valid before * placing it in the phy_cable_length field. **/ -s32 igb_get_cable_length_82580(struct e1000_hw *hw) +s32 e1000_get_cable_length_82577(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, length; + DEBUGFUNC("e1000_get_cable_length_82577"); - ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data); + ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); if (ret_val) - goto out; + return ret_val; - length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >> - I82580_DSTATUS_CABLE_LENGTH_SHIFT; + length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >> + I82577_DSTATUS_CABLE_LENGTH_SHIFT; if (length == E1000_CABLE_LENGTH_UNDEFINED) ret_val = -E1000_ERR_PHY; phy->cable_length = length; -out: - return ret_val; + return E1000_SUCCESS; } diff --git a/drivers/net/igb/e1000_phy.h b/drivers/net/igb/e1000_phy.h index 4c32ac66ff39..16518ad3eba4 100644 --- a/drivers/net/igb/e1000_phy.h +++ b/drivers/net/igb/e1000_phy.h @@ -28,109 +28,217 @@ #ifndef _E1000_PHY_H_ #define _E1000_PHY_H_ -enum e1000_ms_type { - e1000_ms_hw_default = 0, - e1000_ms_force_master, - e1000_ms_force_slave, - e1000_ms_auto -}; - -enum e1000_smart_speed { - e1000_smart_speed_default = 0, - e1000_smart_speed_on, - e1000_smart_speed_off -}; - -s32 igb_check_downshift(struct e1000_hw *hw); -s32 igb_check_reset_block(struct e1000_hw *hw); -s32 igb_copper_link_setup_igp(struct e1000_hw *hw); -s32 igb_copper_link_setup_m88(struct e1000_hw *hw); -s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw); -s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw); -s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw); -s32 igb_get_cable_length_m88(struct e1000_hw *hw); -s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw); -s32 igb_get_cable_length_igp_2(struct e1000_hw *hw); -s32 igb_get_phy_id(struct e1000_hw *hw); -s32 igb_get_phy_info_igp(struct e1000_hw *hw); -s32 igb_get_phy_info_m88(struct e1000_hw *hw); -s32 igb_phy_sw_reset(struct e1000_hw *hw); -s32 igb_phy_hw_reset(struct e1000_hw *hw); -s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); -s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active); -s32 igb_setup_copper_link(struct e1000_hw *hw); -s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); -s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, +void e1000_init_phy_ops_generic(struct e1000_hw *hw); +s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data); +void e1000_null_phy_generic(struct e1000_hw *hw); +s32 e1000_null_lplu_state(struct e1000_hw *hw, bool active); +s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_null_set_page(struct e1000_hw *hw, u16 data); +s32 e1000_read_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 e1000_write_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +s32 e1000_check_downshift_generic(struct e1000_hw *hw); +s32 e1000_check_polarity_m88(struct e1000_hw *hw); +s32 e1000_check_polarity_igp(struct e1000_hw *hw); +s32 e1000_check_polarity_ife(struct e1000_hw *hw); +s32 e1000_check_reset_block_generic(struct e1000_hw *hw); +s32 e1000_copper_link_setup_igp(struct e1000_hw *hw); +s32 e1000_copper_link_setup_m88(struct e1000_hw *hw); +s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); +s32 e1000_get_cable_length_m88(struct e1000_hw *hw); +s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw); +s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw); +s32 e1000_get_cfg_done_generic(struct e1000_hw *hw); +s32 e1000_get_phy_id(struct e1000_hw *hw); +s32 e1000_get_phy_info_igp(struct e1000_hw *hw); +s32 e1000_get_phy_info_m88(struct e1000_hw *hw); +s32 e1000_get_phy_info_ife(struct e1000_hw *hw); +s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw); +void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); +s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw); +s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw); +s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page); +s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active); +s32 e1000_setup_copper_link_generic(struct e1000_hw *hw); +s32 e1000_wait_autoneg_generic(struct e1000_hw *hw); +s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_phy_reset_dsp(struct e1000_hw *hw); +s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, u32 usec_interval, bool *success); -void igb_power_up_phy_copper(struct e1000_hw *hw); -void igb_power_down_phy_copper(struct e1000_hw *hw); -s32 igb_phy_init_script_igp3(struct e1000_hw *hw); -s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); -s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); -s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); -s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); -s32 igb_copper_link_setup_82580(struct e1000_hw *hw); -s32 igb_get_phy_info_82580(struct e1000_hw *hw); -s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); -s32 igb_get_cable_length_82580(struct e1000_hw *hw); +s32 e1000_phy_init_script_igp3(struct e1000_hw *hw); +enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id); +s32 e1000_determine_phy_address(struct e1000_hw *hw); +s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); +s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); +void e1000_power_up_phy_copper(struct e1000_hw *hw); +void e1000_power_down_phy_copper(struct e1000_hw *hw); +s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data); +s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data); +s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); +s32 e1000_check_polarity_82577(struct e1000_hw *hw); +s32 e1000_get_phy_info_82577(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); +s32 e1000_get_cable_length_82577(struct e1000_hw *hw); + +#define E1000_MAX_PHY_ADDR 8 /* IGP01E1000 Specific Registers */ -#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ -#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ -#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ -#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ -#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ -#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ -#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 -#define IGP01E1000_PHY_POLARITY_MASK 0x0078 -#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 -#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ -#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 - -#define I82580_ADDR_REG 16 -#define I82580_CFG_REG 22 -#define I82580_CFG_ASSERT_CRS_ON_TX (1 << 15) -#define I82580_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ -#define I82580_CTRL_REG 23 -#define I82580_CTRL_DOWNSHIFT_MASK (7 << 10) - -/* 82580 specific PHY registers */ -#define I82580_PHY_CTRL_2 18 -#define I82580_PHY_LBK_CTRL 19 -#define I82580_PHY_STATUS_2 26 -#define I82580_PHY_DIAG_STATUS 31 - -/* I82580 PHY Status 2 */ -#define I82580_PHY_STATUS2_REV_POLARITY 0x0400 -#define I82580_PHY_STATUS2_MDIX 0x0800 -#define I82580_PHY_STATUS2_SPEED_MASK 0x0300 -#define I82580_PHY_STATUS2_SPEED_1000MBPS 0x0200 -#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100 - -/* I82580 PHY Control 2 */ -#define I82580_PHY_CTRL2_AUTO_MDIX 0x0400 -#define I82580_PHY_CTRL2_FORCE_MDI_MDIX 0x0200 - -/* I82580 PHY Diagnostics Status */ -#define I82580_DSTATUS_CABLE_LENGTH 0x03FC -#define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2 +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO */ +#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ +#define IGP_PAGE_SHIFT 5 +#define PHY_REG_MASK 0x1F + +#define HV_INTC_FC_PAGE_START 768 +#define I82578_ADDR_REG 29 +#define I82577_ADDR_REG 16 +#define I82577_CFG_REG 22 +#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) +#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ +#define I82577_CTRL_REG 23 + +/* 82577 specific PHY registers */ +#define I82577_PHY_CTRL_2 18 +#define I82577_PHY_LBK_CTRL 19 +#define I82577_PHY_STATUS_2 26 +#define I82577_PHY_DIAG_STATUS 31 + +/* I82577 PHY Status 2 */ +#define I82577_PHY_STATUS2_REV_POLARITY 0x0400 +#define I82577_PHY_STATUS2_MDIX 0x0800 +#define I82577_PHY_STATUS2_SPEED_MASK 0x0300 +#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 +#define I82577_PHY_STATUS2_SPEED_100MBPS 0x0100 + +/* I82577 PHY Control 2 */ +#define I82577_PHY_CTRL2_AUTO_MDIX 0x0400 +#define I82577_PHY_CTRL2_FORCE_MDI_MDIX 0x0200 + +/* I82577 PHY Diagnostics Status */ +#define I82577_DSTATUS_CABLE_LENGTH 0x03FC +#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2 + +/* 82580 PHY Power Management */ +#define E1000_82580_PHY_POWER_MGMT 0xE14 +#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */ +#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */ +#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */ + +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ + +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + /* Enable flexible speed on link-up */ -#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ -#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ -#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 -#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 -#define IGP01E1000_PSSR_MDIX 0x0800 -#define IGP01E1000_PSSR_SPEED_MASK 0xC000 -#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 -#define IGP02E1000_PHY_CHANNEL_NUM 4 -#define IGP02E1000_PHY_AGC_A 0x11B1 -#define IGP02E1000_PHY_AGC_B 0x12B1 -#define IGP02E1000_PHY_AGC_C 0x14B1 -#define IGP02E1000_PHY_AGC_D 0x18B1 -#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ -#define IGP02E1000_AGC_LENGTH_MASK 0x7F -#define IGP02E1000_AGC_RANGE 15 - -#define E1000_CABLE_LENGTH_UNDEFINED 0xFF +#define IGP01E1000_GMII_FLEX_SPD 0x0010 +#define IGP01E1000_GMII_SPD 0x0020 /* Enable SPD */ + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ + +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 + +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 + +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +#define IGP03E1000_PHY_MISC_CTRL 0x1B +#define IGP03E1000_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Manually Set Duplex */ + +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF + +#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 +#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KMRNCTRLSTA_REN 0x00200000 +#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ +#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */ +#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ +#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */ +#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ + +/* IFE PHY Extended Status Control */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 + +/* IFE PHY Special Control */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 +#define IFE_PSC_FORCE_POLARITY 0x0020 +#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 + +/* IFE PHY Special Control and LED Control */ +#define IFE_PSCL_PROBE_MODE 0x0020 +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +/* IFE PHY MDIX Control */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */ + +/* SFP modules ID memory locations */ +#define E1000_SFF_IDENTIFIER_OFFSET 0x00 +#define E1000_SFF_IDENTIFIER_SFF 0x02 +#define E1000_SFF_IDENTIFIER_SFP 0x03 + +#define E1000_SFF_ETH_FLAGS_OFFSET 0x06 +/* Flags for SFP modules compatible with ETH up to 1Gb */ +struct sfp_e1000_flags { + u8 e1000_base_sx:1; + u8 e1000_base_lx:1; + u8 e1000_base_cx:1; + u8 e1000_base_t:1; + u8 e100_base_lx:1; + u8 e100_base_fx:1; + u8 e10_base_bx10:1; + u8 e10_base_px:1; +}; + +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ +#define E1000_SFF_VENDOR_OUI_TYCO 0x00407600 +#define E1000_SFF_VENDOR_OUI_FTL 0x00906500 +#define E1000_SFF_VENDOR_OUI_AVAGO 0x00176A00 +#define E1000_SFF_VENDOR_OUI_INTEL 0x001B2100 #endif diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h index ccdf36d503fd..2191640b428c 100644 --- a/drivers/net/igb/e1000_regs.h +++ b/drivers/net/igb/e1000_regs.h @@ -28,99 +28,110 @@ #ifndef _E1000_REGS_H_ #define _E1000_REGS_H_ -#define E1000_CTRL 0x00000 /* Device Control - RW */ -#define E1000_STATUS 0x00008 /* Device Status - RO */ -#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ -#define E1000_EERD 0x00014 /* EEPROM Read - RW */ -#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ -#define E1000_MDIC 0x00020 /* MDI Control - RW */ -#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ -#define E1000_SCTL 0x00024 /* SerDes Control - RW */ -#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ -#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ -#define E1000_FCT 0x00030 /* Flow Control Type - RW */ -#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ -#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ -#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ -#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ -#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ -#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ -#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ -#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ -#define E1000_RCTL 0x00100 /* RX Control - RW */ -#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ -#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ -#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ -#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) -#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ -#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ -#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ -#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ -#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ -#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ -#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ -#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ -#define E1000_TCTL 0x00400 /* TX Control - RW */ -#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ -#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ -#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ -#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ -#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ -#define E1000_PBS 0x01008 /* Packet Buffer Size */ -#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ -#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ -#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ -#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ -#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ -#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ -#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ -#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ - -/* IEEE 1588 TIMESYNCH */ -#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ -#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ -#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ -#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ -#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ -#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ -#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ -#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ -#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ -#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ -#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ -#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ -#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ -#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ - -/* Filtering Registers */ -#define E1000_SAQF(_n) (0x5980 + 4 * (_n)) -#define E1000_DAQF(_n) (0x59A0 + 4 * (_n)) -#define E1000_SPQF(_n) (0x59C0 + 4 * (_n)) -#define E1000_FTQF(_n) (0x59E0 + 4 * (_n)) -#define E1000_SAQF0 E1000_SAQF(0) -#define E1000_DAQF0 E1000_DAQF(0) -#define E1000_SPQF0 E1000_SPQF(0) -#define E1000_FTQF0 E1000_FTQF(0) -#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ -#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ - -#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) - -/* DMA Coalescing registers */ -#define E1000_DMACR 0x02508 /* Control Register */ -#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ -#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ -#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ -#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ -#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ -#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ - -/* TX Rate Limit Registers */ -#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */ -#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */ - -/* Split and Replication RX Control - RW */ -#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ +#define E1000_REGISTER_SET_SIZE 0x20000 /* CSR Size */ +#define E1000_EEPROM_INIT_CTRL_WORD_2 0x0F /* EEPROM Init Ctrl Word 2 */ +#define E1000_EEPROM_PCIE_CTRL_WORD_2 0x28 /* EEPROM PCIe Ctrl Word 2 */ +#define E1000_BARCTRL 0x5BBC /* BAR ctrl reg */ +#define E1000_BARCTRL_FLSIZE 0x0700 /* BAR ctrl Flsize */ +#define E1000_BARCTRL_CSRSIZE 0x2000 /* BAR ctrl CSR size */ +#define E1000_I350_BARCTRL 0x5BFC /* BAR ctrl reg */ +#define E1000_I350_DTXMXPKTSZ 0x355C /* Maximum sent packet size reg*/ +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FEXT 0x0002C /* Future Extended - RW */ +#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ +#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ +#define E1000_RCTL 0x00100 /* Rx Control - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +#define E1000_TCTL 0x00400 /* Tx Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */ +#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */ +#define E1000_TBT 0x00448 /* Tx Burst Timer - RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_FLASHT 0x01028 /* FLASH Timer Register */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLSWCTL 0x01030 /* FLASH control register */ +#define E1000_FLSWDATA 0x01034 /* FLASH data register */ +#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ +#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ +#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */ +#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */ +#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */ +#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */ +#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */ +#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */ +#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */ +#define E1000_I2C_CLK_STRETCH_DIS 0x00008000 /* I2C- Dis Clk Stretching */ +#define E1000_WDSTP 0x01040 /* Watchdog Setup - RW */ +#define E1000_SWDSTS 0x01044 /* SW Device Status - RW */ +#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ +#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ +#define E1000_VPDDIAG 0x01060 /* VPD Diagnostic - RO */ +#define E1000_ICR_V2 0x01500 /* Intr Cause - new location - RC */ +#define E1000_ICS_V2 0x01504 /* Intr Cause Set - new location - WO */ +#define E1000_IMS_V2 0x01508 /* Intr Mask Set/Read - new location - RW */ +#define E1000_IMC_V2 0x0150C /* Intr Mask Clear - new location - WO */ +#define E1000_IAM_V2 0x01510 /* Intr Ack Auto Mask - new location - RW */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDFPCQ(_n) (0x02430 + (0x4 * (_n))) +#define E1000_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */ +#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ +/* Split and Replication Rx Control - RW */ +#define E1000_RDPUMB 0x025CC /* DMA Rx Descriptor uC Mailbox - RW */ +#define E1000_RDPUAD 0x025D0 /* DMA Rx Descriptor uC Addr Command - RW */ +#define E1000_RDPUWD 0x025D4 /* DMA Rx Descriptor uC Data Write - RW */ +#define E1000_RDPURD 0x025D8 /* DMA Rx Descriptor uC Data Read - RW */ +#define E1000_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */ +#define E1000_PBDIAG 0x02458 /* Packet Buffer Diagnostic - RW */ +#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +#define E1000_IRPBS 0x02404 /* Same as RXPBS, renamed for newer Si - RW */ +#define E1000_PBRWAC 0x024E8 /* Rx packet buffer wrap around counter - RO */ +#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */ +#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */ /* * Convenience macros * @@ -129,227 +140,455 @@ * Example usage: * E1000_RDBAL_REG(current_rx_queue) */ -#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) \ - : (0x0C000 + ((_n) * 0x40))) -#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) \ - : (0x0C004 + ((_n) * 0x40))) -#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) \ - : (0x0C008 + ((_n) * 0x40))) -#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) \ - : (0x0C00C + ((_n) * 0x40))) -#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) \ - : (0x0C010 + ((_n) * 0x40))) -#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) \ - : (0x0C018 + ((_n) * 0x40))) -#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) \ - : (0x0C028 + ((_n) * 0x40))) -#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) \ - : (0x0E000 + ((_n) * 0x40))) -#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) \ - : (0x0E004 + ((_n) * 0x40))) -#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) \ - : (0x0E008 + ((_n) * 0x40))) -#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) \ - : (0x0E010 + ((_n) * 0x40))) -#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) \ - : (0x0E018 + ((_n) * 0x40))) -#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \ - : (0x0E028 + ((_n) * 0x40))) -#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) -#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) -#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \ - : (0x0E038 + ((_n) * 0x40))) -#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \ - : (0x0E03C + ((_n) * 0x40))) -#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ -#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ -#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ -#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ -#define E1000_DTXCTL 0x03590 /* DMA TX Control - RW */ -#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ -#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ -#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ -#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ -#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ -#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ -#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ -#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ -#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ -#define E1000_COLC 0x04028 /* Collision Count - R/clr */ -#define E1000_DC 0x04030 /* Defer Count - R/clr */ -#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ -#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ -#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ -#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ -#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ -#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ -#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ -#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ -#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ -#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ -#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ -#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ -#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ -#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ -#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ -#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ -#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ -#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ -#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ -#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ -#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ -#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ -#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ -#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ -#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ -#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ -#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ -#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ -#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ -#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ -#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ -#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ -#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ -#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ -#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ -#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ -#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ -#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ -#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ -#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ -#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ -#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ -#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ -#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ -#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ -#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ -#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ -#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ -/* Interrupt Cause Rx Packet Timer Expire Count */ -#define E1000_ICRXPTC 0x04104 -/* Interrupt Cause Rx Absolute Timer Expire Count */ -#define E1000_ICRXATC 0x04108 -/* Interrupt Cause Tx Packet Timer Expire Count */ -#define E1000_ICTXPTC 0x0410C -/* Interrupt Cause Tx Absolute Timer Expire Count */ -#define E1000_ICTXATC 0x04110 -/* Interrupt Cause Tx Queue Empty Count */ -#define E1000_ICTXQEC 0x04118 -/* Interrupt Cause Tx Queue Minimum Threshold Count */ -#define E1000_ICTXQMTC 0x0411C -/* Interrupt Cause Rx Descriptor Minimum Threshold Count */ -#define E1000_ICRXDMTC 0x04120 -#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ -#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ -#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ -#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ -#define E1000_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */ -#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ -#define E1000_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */ -#define E1000_RPTHC 0x04104 /* Rx Packets To Host */ -#define E1000_HGPTC 0x04118 /* Host Good Packets TX Count */ -#define E1000_HTCBDPC 0x04124 /* Host TX Circuit Breaker Dropped Count */ -#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ -#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ -#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ -#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ -#define E1000_LENERRS 0x04138 /* Length Errors Count */ -#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ -#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ -#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ -#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ -#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Page - RW */ -#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ -#define E1000_RLPML 0x05004 /* RX Long Packet Max Length */ -#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ -#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ -#define E1000_RA 0x05400 /* Receive Address - RW Array */ -#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */ -#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) -#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ - (0x054E0 + ((_i - 16) * 8))) -#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ - (0x054E4 + ((_i - 16) * 8))) -#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) -#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) -#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) -#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) -#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) -#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) -#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ -#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ -#define E1000_WUC 0x05800 /* Wakeup Control - RW */ -#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ -#define E1000_WUS 0x05810 /* Wakeup Status - RO */ -#define E1000_MANC 0x05820 /* Management Control - RW */ -#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ -#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ + (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ + (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ + (0x0C008 + ((_n) * 0x40))) +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ + (0x0C00C + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ + (0x0C010 + ((_n) * 0x40))) +#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ + (0x0C014 + ((_n) * 0x40))) +#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ + (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ + (0x0C028 + ((_n) * 0x40))) +#define E1000_RQDPC(_n) ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \ + (0x0C030 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ + (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ + (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ + (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ + (0x0E010 + ((_n) * 0x40))) +#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ + (0x0E014 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ + (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ + (0x0E028 + ((_n) * 0x40))) +#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \ + (0x0E038 + ((_n) * 0x40))) +#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \ + (0x0E03C + ((_n) * 0x40))) +#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100)) +#define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */ +#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ +#define E1000_TXDMAC 0x03000 /* Tx DMA Control - RW */ +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) +#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) +#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) +#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) +#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) +#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) +#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) +#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) +#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) +#define E1000_PBSLAC 0x03100 /* Pkt Buffer Slave Access Control */ +#define E1000_PBSLAD(_n) (0x03110 + (0x4 * (_n))) /* Pkt Buffer DWORD */ +#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ +/* Same as TXPBS, renamed for newer Si - RW */ +#define E1000_ITPBS 0x03404 +#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ +#define E1000_TDPUMB 0x0357C /* DMA Tx Desc uC Mail Box - RW */ +#define E1000_TDPUAD 0x03580 /* DMA Tx Desc uC Addr Command - RW */ +#define E1000_TDPUWD 0x03584 /* DMA Tx Desc uC Data Write - RW */ +#define E1000_TDPURD 0x03588 /* DMA Tx Desc uC Data Read - RW */ +#define E1000_TDPUCTL 0x0358C /* DMA Tx Desc uC Control - RW */ +#define E1000_DTXCTL 0x03590 /* DMA Tx Control - RW */ +#define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */ +#define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */ +/* DMA Tx Max Total Allow Size Reqs - RW */ +#define E1000_DTXMXSZRQ 0x03540 +#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */ +#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */ +#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ -#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ -#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ -#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ -#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ -#define E1000_GCR 0x05B00 /* PCI-Ex Control */ -#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ -#define E1000_SWSM 0x05B50 /* SW Semaphore */ -#define E1000_FWSM 0x05B54 /* FW Semaphore */ -#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ +/* Virtualization statistical counters */ +#define E1000_PFVFGPRC(_n) (0x010010 + (0x100 * (_n))) +#define E1000_PFVFGPTC(_n) (0x010014 + (0x100 * (_n))) +#define E1000_PFVFGORC(_n) (0x010018 + (0x100 * (_n))) +#define E1000_PFVFGOTC(_n) (0x010034 + (0x100 * (_n))) +#define E1000_PFVFMPRC(_n) (0x010038 + (0x100 * (_n))) +#define E1000_PFVFGPRLBC(_n) (0x010040 + (0x100 * (_n))) +#define E1000_PFVFGPTLBC(_n) (0x010044 + (0x100 * (_n))) +#define E1000_PFVFGORLBC(_n) (0x010048 + (0x100 * (_n))) +#define E1000_PFVFGOTLBC(_n) (0x010050 + (0x100 * (_n))) -/* RSS registers */ -#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ -#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ -#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate Interrupt Ext*/ -#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt RX VLAN Priority - RW */ -/* MSI-X Allocation Register (_i) - RW */ -#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) -/* Redirection Table - RW Array */ -#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) -#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ +/* LinkSec */ +#define E1000_LSECTXUT 0x04300 /* Tx Untagged Pkt Cnt */ +#define E1000_LSECTXPKTE 0x04304 /* Encrypted Tx Pkts Cnt */ +#define E1000_LSECTXPKTP 0x04308 /* Protected Tx Pkt Cnt */ +#define E1000_LSECTXOCTE 0x0430C /* Encrypted Tx Octets Cnt */ +#define E1000_LSECTXOCTP 0x04310 /* Protected Tx Octets Cnt */ +#define E1000_LSECRXUT 0x04314 /* Untagged non-Strict Rx Pkt Cnt */ +#define E1000_LSECRXOCTD 0x0431C /* Rx Octets Decrypted Count */ +#define E1000_LSECRXOCTV 0x04320 /* Rx Octets Validated */ +#define E1000_LSECRXBAD 0x04324 /* Rx Bad Tag */ +#define E1000_LSECRXNOSCI 0x04328 /* Rx Packet No SCI Count */ +#define E1000_LSECRXUNSCI 0x0432C /* Rx Packet Unknown SCI Count */ +#define E1000_LSECRXUNCH 0x04330 /* Rx Unchecked Packets Count */ +#define E1000_LSECRXDELAY 0x04340 /* Rx Delayed Packet Count */ +#define E1000_LSECRXLATE 0x04350 /* Rx Late Packets Count */ +#define E1000_LSECRXOK(_n) (0x04360 + (0x04 * (_n))) /* Rx Pkt OK Cnt */ +#define E1000_LSECRXINV(_n) (0x04380 + (0x04 * (_n))) /* Rx Invalid Cnt */ +#define E1000_LSECRXNV(_n) (0x043A0 + (0x04 * (_n))) /* Rx Not Valid Cnt */ +#define E1000_LSECRXUNSA 0x043C0 /* Rx Unused SA Count */ +#define E1000_LSECRXNUSA 0x043D0 /* Rx Not Using SA Count */ +#define E1000_LSECTXCAP 0x0B000 /* Tx Capabilities Register - RO */ +#define E1000_LSECRXCAP 0x0B300 /* Rx Capabilities Register - RO */ +#define E1000_LSECTXCTRL 0x0B004 /* Tx Control - RW */ +#define E1000_LSECRXCTRL 0x0B304 /* Rx Control - RW */ +#define E1000_LSECTXSCL 0x0B008 /* Tx SCI Low - RW */ +#define E1000_LSECTXSCH 0x0B00C /* Tx SCI High - RW */ +#define E1000_LSECTXSA 0x0B010 /* Tx SA0 - RW */ +#define E1000_LSECTXPN0 0x0B018 /* Tx SA PN 0 - RW */ +#define E1000_LSECTXPN1 0x0B01C /* Tx SA PN 1 - RW */ +#define E1000_LSECRXSCL 0x0B3D0 /* Rx SCI Low - RW */ +#define E1000_LSECRXSCH 0x0B3E0 /* Rx SCI High - RW */ +/* LinkSec Tx 128-bit Key 0 - WO */ +#define E1000_LSECTXKEY0(_n) (0x0B020 + (0x04 * (_n))) +/* LinkSec Tx 128-bit Key 1 - WO */ +#define E1000_LSECTXKEY1(_n) (0x0B030 + (0x04 * (_n))) +#define E1000_LSECRXSA(_n) (0x0B310 + (0x04 * (_n))) /* Rx SAs - RW */ +#define E1000_LSECRXPN(_n) (0x0B330 + (0x04 * (_n))) /* Rx SAs - RW */ +/* + * LinkSec Rx Keys - where _n is the SA no. and _m the 4 dwords of the 128 bit + * key - RW. + */ +#define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m))) + +#define E1000_SSVPC 0x041A0 /* Switch Security Violation Pkt Cnt */ +#define E1000_IPSCTRL 0xB430 /* IpSec Control Register */ +#define E1000_IPSRXCMD 0x0B408 /* IPSec Rx Command Register - RW */ +#define E1000_IPSRXIDX 0x0B400 /* IPSec Rx Index - RW */ +/* IPSec Rx IPv4/v6 Address - RW */ +#define E1000_IPSRXIPADDR(_n) (0x0B420 + (0x04 * (_n))) +/* IPSec Rx 128-bit Key - RW */ +#define E1000_IPSRXKEY(_n) (0x0B410 + (0x04 * (_n))) +#define E1000_IPSRXSALT 0x0B404 /* IPSec Rx Salt - RW */ +#define E1000_IPSRXSPI 0x0B40C /* IPSec Rx SPI - RW */ +/* IPSec Tx 128-bit Key - RW */ +#define E1000_IPSTXKEY(_n) (0x0B460 + (0x04 * (_n))) +#define E1000_IPSTXSALT 0x0B454 /* IPSec Tx Salt - RW */ +#define E1000_IPSTXIDX 0x0B450 /* IPSec Tx SA IDX - RW */ +#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ +#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ +#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ +#define E1000_CBTMPC 0x0402C /* Circuit Breaker Tx Packet Count */ +#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ +#define E1000_CBRDPC 0x04044 /* Circuit Breaker Rx Dropped Count */ +#define E1000_CBRMPC 0x040FC /* Circuit Breaker Rx Packet Count */ +#define E1000_RPTHC 0x04104 /* Rx Packets To Host */ +#define E1000_HGPTC 0x04118 /* Host Good Packets Tx Count */ +#define E1000_HTCBDPC 0x04124 /* Host Tx Circuit Breaker Dropped Count */ +#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define E1000_LENERRS 0x04138 /* Length Errors Count */ +#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ +#define E1000_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */ +#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ +#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ +#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ +#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Pg - RW */ +#define E1000_1GSTAT_RCV 0x04228 /* 1GSTAT Code Violation Pkt Cnt - RW */ +#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */ +#define E1000_RLPML 0x05004 /* Rx Long Packet Max Length */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ +#define E1000_CIAA 0x05B88 /* Config Indirect Access Address - RW */ +#define E1000_CIAD 0x05B8C /* Config Indirect Access Data - RW */ +#define E1000_VFQA0 0x0B000 /* VLAN Filter Queue Array 0 - RW Array */ +#define E1000_VFQA1 0x0B200 /* VLAN Filter Queue Array 1 - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ +#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ +#define E1000_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ +#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ +#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ +/* Flexible Host Filter Table */ +#define E1000_FHFT(_n) (0x09000 + ((_n) * 0x100)) +/* Ext Flexible Host Filter Table */ +#define E1000_FHFT_EXT(_n) (0x09A00 + ((_n) * 0x100)) + + +#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MDPHYA 0x0003C /* PHY address - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +/* Management Decision Filters */ +#define E1000_MDEF(_n) (0x05890 + (4 * (_n))) +#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ +#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ +#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ +#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */ +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +/* Driver-only SW semaphore (not used by BOOT agents) */ +#define E1000_SWSM2 0x05B58 +#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */ +#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ +#define E1000_UFUSE 0x05B78 /* UFUSE - RO */ +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Interface Control */ +#define E1000_FWSTS 0x08F0C /* FW Status */ +/* RSS registers */ +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/ +#define E1000_IMIRVP 0x05AC0 /* Immediate INT Rx VLAN Priority -RW */ +#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Alloc Reg -RW */ +/* MSI-X Table entry addr low reg - RW */ +#define E1000_MSIXTADD(_i) (0x0C000 + ((_i) * 0x10)) +/* MSI-X Table entry addr upper reg - RW */ +#define E1000_MSIXTUADD(_i) (0x0C004 + ((_i) * 0x10)) +/* MSI-X Table entry message reg - RW */ +#define E1000_MSIXTMSG(_i) (0x0C008 + ((_i) * 0x10)) +/* MSI-X Table entry vector ctrl reg - RW */ +#define E1000_MSIXVCTRL(_i) (0x0C00C + ((_i) * 0x10)) +#define E1000_MSIXPBA 0x0E000 /* MSI-X Pending bit array */ +#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */ +#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */ +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ /* VT Registers */ -#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ -#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ -#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ -#define E1000_VFRE 0x00C8C /* VF Receive Enables */ -#define E1000_VFTE 0x00C90 /* VF Transmit Enables */ -#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ -#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ -#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */ -#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ -#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ -#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ -#define E1000_TXSWC 0x05ACC /* Tx Switch Control */ +#define E1000_SWPBS 0x03004 /* Switch Packet Buffer Size - RW */ +#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ +#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ +#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ +#define E1000_VFRE 0x00C8C /* VF Receive Enables */ +#define E1000_VFTE 0x00C90 /* VF Transmit Enables */ +#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ +#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ +#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */ +#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ +#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ +#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ +#define E1000_VMRCTL 0X05D80 /* Virtual Mirror Rule Control */ +#define E1000_VMRVLAN 0x05D90 /* Virtual Mirror Rule VLAN */ +#define E1000_VMRVM 0x05DA0 /* Virtual Mirror Rule VM */ +#define E1000_MDFB 0x03558 /* Malicious Driver free block */ +#define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */ +#define E1000_TXSWC 0x05ACC /* Tx Switch Control */ +#define E1000_SCCRL 0x05DB0 /* Storm Control Control */ +#define E1000_BSCTRH 0x05DB8 /* Broadcast Storm Control Threshold */ +#define E1000_MSCTRH 0x05DBC /* Multicast Storm Control Threshold */ /* These act per VF so an array friendly macro is used */ -#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) -#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) -#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) -#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine - * Filter - RW */ -#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) +#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) +#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) +#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) +#define E1000_VFVMBMEM(_n) (0x00800 + (_n)) +#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) +/* VLAN Virtual Machine Filter - RW */ +#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) +#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) +#define E1000_DVMOLR(_n) (0x0C038 + (0x40 * (_n))) /* DMA VM offload */ +#define E1000_VTCTRL(_n) (0x10000 + (0x100 * (_n))) /* VT Control */ +#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ +#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ +#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ +#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ +#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ +#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ +#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ -#define wr32(reg, value) (writel(value, hw->hw_addr + reg)) -#define rd32(reg) (readl(hw->hw_addr + reg)) -#define wrfl() ((void)rd32(E1000_STATUS)) +/* Filtering Registers */ +#define E1000_SAQF(_n) (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */ +#define E1000_DAQF(_n) (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */ +#define E1000_SPQF(_n) (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */ +#define E1000_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */ +#define E1000_TTQF(_n) (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */ +#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ +#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ -#define array_wr32(reg, offset, value) \ - (writel(value, hw->hw_addr + reg + ((offset) << 2))) -#define array_rd32(reg, offset) \ - (readl(hw->hw_addr + reg + ((offset) << 2))) +#define E1000_RTTDCS 0x3600 /* Reedtown Tx Desc plane control and status */ +#define E1000_RTTPCS 0x3474 /* Reedtown Tx Packet Plane control and status */ +#define E1000_RTRPCS 0x2474 /* Rx packet plane control and status */ +#define E1000_RTRUP2TC 0x05AC4 /* Rx User Priority to Traffic Class */ +#define E1000_RTTUP2TC 0x0418 /* Transmit User Priority to Traffic Class */ +/* Tx Desc plane TC Rate-scheduler config */ +#define E1000_RTTDTCRC(_n) (0x3610 + ((_n) * 4)) +/* Tx Packet plane TC Rate-Scheduler Config */ +#define E1000_RTTPTCRC(_n) (0x3480 + ((_n) * 4)) +/* Rx Packet plane TC Rate-Scheduler Config */ +#define E1000_RTRPTCRC(_n) (0x2480 + ((_n) * 4)) +/* Tx Desc Plane TC Rate-Scheduler Status */ +#define E1000_RTTDTCRS(_n) (0x3630 + ((_n) * 4)) +/* Tx Desc Plane TC Rate-Scheduler MMW */ +#define E1000_RTTDTCRM(_n) (0x3650 + ((_n) * 4)) +/* Tx Packet plane TC Rate-Scheduler Status */ +#define E1000_RTTPTCRS(_n) (0x34A0 + ((_n) * 4)) +/* Tx Packet plane TC Rate-scheduler MMW */ +#define E1000_RTTPTCRM(_n) (0x34C0 + ((_n) * 4)) +/* Rx Packet plane TC Rate-Scheduler Status */ +#define E1000_RTRPTCRS(_n) (0x24A0 + ((_n) * 4)) +/* Rx Packet plane TC Rate-Scheduler MMW */ +#define E1000_RTRPTCRM(_n) (0x24C0 + ((_n) * 4)) +/* Tx Desc plane VM Rate-Scheduler MMW*/ +#define E1000_RTTDVMRM(_n) (0x3670 + ((_n) * 4)) +/* Tx BCN Rate-Scheduler MMW */ +#define E1000_RTTBCNRM(_n) (0x3690 + ((_n) * 4)) +#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select */ +#define E1000_RTTDVMRC 0x3608 /* Tx Desc Plane VM Rate-Scheduler Config */ +#define E1000_RTTDVMRS 0x360C /* Tx Desc Plane VM Rate-Scheduler Status */ +#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config */ +#define E1000_RTTBCNRS 0x36B4 /* Tx BCN Rate-Scheduler Status */ +#define E1000_RTTBCNCR 0xB200 /* Tx BCN Control Register */ +#define E1000_RTTBCNTG 0x35A4 /* Tx BCN Tagging */ +#define E1000_RTTBCNCP 0xB208 /* Tx BCN Congestion point */ +#define E1000_RTRBCNCR 0xB20C /* Rx BCN Control Register */ +#define E1000_RTTBCNRD 0x36B8 /* Tx BCN Rate Drift */ +#define E1000_PFCTOP 0x1080 /* Priority Flow Control Type and Opcode */ +#define E1000_RTTBCNIDX 0xB204 /* Tx BCN Congestion Point */ +#define E1000_RTTBCNACH 0x0B214 /* Tx BCN Control High */ +#define E1000_RTTBCNACL 0x0B210 /* Tx BCN Control Low */ /* DMA Coalescing registers */ -#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ +#define E1000_DMACR 0x02508 /* Control Register */ +#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ +#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ +#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ +#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ +#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ +#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ -/* Energy Efficient Ethernet "EEE" register */ -#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ -#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */ +/* PCIe Parity Status Register */ +#define E1000_PCIEERRSTS 0x05BA8 -/* Thermal Sensor Register */ -#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ +#define E1000_PROXYS 0x5F64 /* Proxying Status */ +#define E1000_PROXYFC 0x5F60 /* Proxying Filter Control */ +/* Thermal sensor configuration and status registers */ +#define E1000_THMJT 0x08100 /* Junction Temperature */ +#define E1000_THLOWTC 0x08104 /* Low Threshold Control */ +#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */ +#define E1000_THHIGHTC 0x0810C /* High Threshold Control */ +#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ + +/* Energy Efficient Ethernet "EEE" registers */ +#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define E1000_LTRC 0x01A0 /* Latency Tolerance Reporting Control */ +#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/ +#define E1000_EEE_SU 0x0E34 /* EEE Setup */ +#define E1000_TLPIC 0x4148 /* EEE Tx LPI Count - TLPIC */ +#define E1000_RLPIC 0x414C /* EEE Rx LPI Count - RLPIC */ /* OS2BMC Registers */ -#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ -#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ -#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ -#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ +#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ +#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ +#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ +#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ + #endif diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index 127147753a63..dd0e2c116446 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h @@ -25,29 +25,75 @@ *******************************************************************************/ - /* Linux PRO/1000 Ethernet Driver main header file */ #ifndef _IGB_H_ #define _IGB_H_ -#include "e1000_mac.h" -#include "e1000_82575.h" +#include + +#ifndef IGB_NO_LRO +#include +#endif +#include +#include +#include + +#ifdef SIOCETHTOOL +#include +#endif + +#ifdef HAVE_HW_TIME_STAMP #include #include #include -#include -#include +#endif struct igb_adapter; +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) +#define IGB_DCA +#endif +#ifdef IGB_DCA +#include +#endif + +#ifndef HAVE_HW_TIME_STAMP +#undef IGB_PER_PKT_TIMESTAMP +#endif + + +#include "kcompat.h" + +#ifdef HAVE_SCTP +#include +#endif + +#include "e1000_api.h" +#include "e1000_82575.h" +#include "e1000_manage.h" +#include "e1000_mbx.h" + +#define IGB_ERR(args...) printk(KERN_ERR "igb: " args) + +#define PFX "igb: " +#define DPRINTK(nlevel, klevel, fmt, args...) \ + (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ + printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ + __FUNCTION__ , ## args)) + /* Interrupt defines */ #define IGB_START_ITR 648 /* ~6000 ints/sec */ #define IGB_4K_ITR 980 #define IGB_20K_ITR 196 #define IGB_70K_ITR 56 +/* Interrupt modes, as used by the IntMode paramter */ +#define IGB_INT_MODE_LEGACY 0 +#define IGB_INT_MODE_MSI 1 +#define IGB_INT_MODE_MSIX 2 + /* TX/RX descriptor defines */ #define IGB_DEFAULT_TXD 256 #define IGB_DEFAULT_TX_WORK 128 @@ -58,33 +104,41 @@ struct igb_adapter; #define IGB_MIN_RXD 80 #define IGB_MAX_RXD 4096 -#define IGB_DEFAULT_ITR 3 /* dynamic */ -#define IGB_MAX_ITR_USECS 10000 -#define IGB_MIN_ITR_USECS 10 +#define IGB_MIN_ITR_USECS 10 /* 100k irq/sec */ +#define IGB_MAX_ITR_USECS 8191 /* 120 irq/sec */ + #define NON_Q_VECTORS 1 -#define MAX_Q_VECTORS 8 +#define MAX_Q_VECTORS 10 /* Transmit and receive queues */ -#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? 2 : \ - (hw->mac.type > e1000_82575 ? 8 : 4)) -#define IGB_MAX_TX_QUEUES 16 +#define IGB_MAX_RX_QUEUES 16 +#define IGB_MAX_TX_QUEUES 16 -#define IGB_MAX_VF_MC_ENTRIES 30 +#define IGB_MAX_VF_MC_ENTRIES 30 #define IGB_MAX_VF_FUNCTIONS 8 -#define IGB_MAX_VFTA_ENTRIES 128 -#define IGB_82576_VF_DEV_ID 0x10CA -#define IGB_I350_VF_DEV_ID 0x1520 +#define IGB_82576_VF_DEV_ID 0x10CA +#define IGB_I350_VF_DEV_ID 0x1520 +#define IGB_MAX_UTA_ENTRIES 128 +#define MAX_EMULATION_MAC_ADDRS 16 +#define OUI_LEN 3 +#define IGB_MAX_VMDQ_QUEUES 8 + struct vf_data_storage { unsigned char vf_mac_addresses[ETH_ALEN]; u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; u16 num_vf_mc_hashes; + u16 default_vf_vlan_id; u16 vlans_enabled; + unsigned char em_mac_addresses[MAX_EMULATION_MAC_ADDRS * ETH_ALEN]; + u32 uta_table_copy[IGB_MAX_UTA_ENTRIES]; u32 flags; unsigned long last_nack; +#ifdef IFLA_VF_MAX u16 pf_vlan; /* When set, guest VLAN config not allowed. */ u16 pf_qos; u16 tx_rate; +#endif struct pci_dev *vfdev; }; @@ -109,25 +163,41 @@ struct vf_data_storage { #define IGB_TX_PTHRESH 8 #define IGB_TX_HTHRESH 1 #define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \ - adapter->msix_entries) ? 1 : 4) + adapter->msix_entries) ? 1 : 4) #define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \ - adapter->msix_entries) ? 1 : 16) + adapter->msix_entries) ? 1 : 16) /* this is the size past which hardware will drop packets when setting LPE=0 */ #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 +/* NOTE: netdev_alloc_skb reserves 16 bytes, NET_IP_ALIGN means we + * reserve 2 more, and skb_shared_info adds an additional 384 more, + * this adds roughly 448 bytes of extra data meaning the smallest + * allocation we could have is 1K. + * i.e. RXBUFFER_512 --> size-1024 slab + */ /* Supported Rx Buffer Sizes */ #define IGB_RXBUFFER_512 512 #define IGB_RXBUFFER_16384 16384 #define IGB_RX_HDR_LEN IGB_RXBUFFER_512 + +/* Packet Buffer allocations */ +#define IGB_PBA_BYTES_SHIFT 0xA +#define IGB_TX_HEAD_ADDR_SHIFT 7 +#define IGB_PBA_TX_MASK 0xFFFF0000 + +#define IGB_FC_PAUSE_TIME 0x0680 /* 858 usec */ + /* How many Tx Descriptors do we need to call netif_wake_queue ? */ -#define IGB_TX_QUEUE_WAKE 16 +#define IGB_TX_QUEUE_WAKE 32 /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ -#define AUTO_ALL_MODES 0 #define IGB_EEPROM_APME 0x0400 +#ifndef ETH_TP_MDI_X +#define AUTO_ALL_MODES 0 +#endif #ifndef IGB_MASTER_SLAVE /* Switch to override PHY master/slave setting */ @@ -136,13 +206,61 @@ struct vf_data_storage { #define IGB_MNG_VLAN_NONE -1 +#ifndef IGB_NO_LRO +#define IGB_LRO_MAX 32 /*Maximum number of LRO descriptors*/ +struct igb_lro_stats { + u32 flushed; + u32 coal; + u32 recycled; +}; + +/* + * igb_lro_header - header format to be aggregated by LRO + * @iph: IP header without options + * @tcp: TCP header + * @ts: Optional TCP timestamp data in TCP options + * + * This structure relies on the check above that verifies that the header + * is IPv4 and does not contain any options. + */ +struct igb_lrohdr { + struct iphdr iph; + struct tcphdr th; + __be32 ts[0]; +}; + +struct igb_lro_list { + struct sk_buff_head active; + struct igb_lro_stats stats; +}; + +#endif /* IGB_NO_LRO */ +struct igb_cb { +#ifndef IGB_NO_LRO + union { /* Union defining head/tail partner */ + struct sk_buff *head; + struct sk_buff *tail; + }; + __be32 tsecr; /* timestamp echo response */ + u32 tsval; /* timestamp value in host order */ + u32 next_seq; /* next expected sequence number */ + u16 free; /* 65521 minus total size */ + u16 mss; /* size of data portion of packet */ + u16 append_cnt; /* number of skb's appended */ +#endif /* IGB_NO_LRO */ +#ifdef HAVE_VLAN_RX_REGISTER + u16 vid; /* VLAN tag */ +#endif +}; +#define IGB_CB(skb) ((struct igb_cb *)(skb)->cb) + #define IGB_TX_FLAGS_CSUM 0x00000001 #define IGB_TX_FLAGS_VLAN 0x00000002 #define IGB_TX_FLAGS_TSO 0x00000004 #define IGB_TX_FLAGS_IPV4 0x00000008 #define IGB_TX_FLAGS_TSTAMP 0x00000010 #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 -#define IGB_TX_FLAGS_VLAN_SHIFT 16 +#define IGB_TX_FLAGS_VLAN_SHIFT 16 /* wrapper around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ @@ -153,24 +271,25 @@ struct igb_tx_buffer { unsigned int bytecount; u16 gso_segs; __be16 protocol; - dma_addr_t dma; - u32 length; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); u32 tx_flags; }; struct igb_rx_buffer { struct sk_buff *skb; dma_addr_t dma; +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT struct page *page; dma_addr_t page_dma; u32 page_offset; +#endif }; struct igb_tx_queue_stats { u64 packets; u64 bytes; u64 restart_queue; - u64 restart_queue2; }; struct igb_rx_queue_stats { @@ -204,25 +323,31 @@ struct igb_q_vector { u8 set_itr; void __iomem *itr_register; +#ifndef IGB_NO_LRO + struct igb_lro_list *lrolist; /* LRO list for queue vector*/ +#endif char name[IFNAMSIZ + 9]; -}; +#ifndef HAVE_NETDEV_NAPI_LIST + struct net_device poll_dev; +#endif +} ____cacheline_internodealigned_in_smp; struct igb_ring { - struct igb_q_vector *q_vector; /* backlink to q_vector */ - struct net_device *netdev; /* back pointer to net_device */ - struct device *dev; /* device pointer for dma mapping */ + struct igb_q_vector *q_vector; /* backlink to q_vector */ + struct net_device *netdev; /* back pointer to net_device */ + struct device *dev; /* device for dma mapping */ union { /* array of buffer info structs */ struct igb_tx_buffer *tx_buffer_info; struct igb_rx_buffer *rx_buffer_info; }; - void *desc; /* descriptor ring memory */ - unsigned long flags; /* ring specific flags */ - void __iomem *tail; /* pointer to ring tail register */ + void *desc; /* descriptor ring memory */ + unsigned long flags; /* ring specific flags */ + void __iomem *tail; /* pointer to ring tail register */ - u16 count; /* number of desc. in the ring */ - u8 queue_index; /* logical index of the ring*/ - u8 reg_idx; /* physical index of the ring */ - u32 size; /* length of desc. ring in bytes */ + u16 count; /* number of desc. in the ring */ + u8 queue_index; /* logical index of the ring*/ + u8 reg_idx; /* physical index of the ring */ + u32 size; /* length of desc. ring in bytes */ /* everything past this point are written often */ u16 next_to_clean ____cacheline_aligned_in_smp; @@ -232,26 +357,42 @@ struct igb_ring { /* TX */ struct { struct igb_tx_queue_stats tx_stats; - struct u64_stats_sync tx_syncp; - struct u64_stats_sync tx_syncp2; }; /* RX */ struct { struct igb_rx_queue_stats rx_stats; - struct u64_stats_sync rx_syncp; +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + u16 rx_buffer_len; +#endif }; }; +#ifdef CONFIG_IGB_VMDQ_NETDEV + struct net_device *vmdq_netdev; + int vqueue_index; /* queue index for virtual netdev */ +#endif /* Items past this point are only used during ring alloc / free */ - dma_addr_t dma; /* phys address of the ring */ + dma_addr_t dma; /* phys address of the ring */ int numa_node; /* node to alloc ring memory on */ -}; + +} ____cacheline_internodealigned_in_smp; enum e1000_ring_flags_t { +#ifndef HAVE_NDO_SET_FEATURES + IGB_RING_FLAG_RX_CSUM, +#endif IGB_RING_FLAG_RX_SCTP_CSUM, IGB_RING_FLAG_RX_LB_VLAN_BSWAP, IGB_RING_FLAG_TX_CTX_IDX, - IGB_RING_FLAG_TX_DETECT_HANG + IGB_RING_FLAG_TX_DETECT_HANG, }; +struct igb_mac_addr { + u8 addr[ETH_ALEN]; + u16 queue; + u16 state; /* bitmask */ +}; +#define IGB_MAC_STATE_DEFAULT 0x1 +#define IGB_MAC_STATE_MODIFIED 0x2 +#define IGB_MAC_STATE_IN_USE 0x4 #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) @@ -262,6 +403,16 @@ enum e1000_ring_flags_t { #define IGB_TX_CTXTDESC(R, i) \ (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i])) +#ifdef CONFIG_IGB_VMDQ_NETDEV +#define netdev_ring(ring) \ + ((ring->vmdq_netdev ? ring->vmdq_netdev : ring->netdev)) +#define ring_queue_index(ring) \ + ((ring->vmdq_netdev ? ring->vqueue_index : ring->queue_index)) +#else +#define netdev_ring(ring) (ring->netdev) +#define ring_queue_index(ring) (ring->queue_index) +#endif /* CONFIG_IGB_VMDQ_NETDEV */ + /* igb_test_staterr - tests bits within Rx descriptor status and error fields */ static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc, const u32 stat_err_bits) @@ -270,18 +421,40 @@ static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc, } /* igb_desc_unused - calculate if we have unused descriptors */ -static inline int igb_desc_unused(struct igb_ring *ring) +static inline u16 igb_desc_unused(const struct igb_ring *ring) { - if (ring->next_to_clean > ring->next_to_use) - return ring->next_to_clean - ring->next_to_use - 1; + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; - return ring->count + ring->next_to_clean - ring->next_to_use - 1; + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; } +#ifdef CONFIG_BQL +static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) +{ + return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); +} +#endif /* CONFIG_BQL */ + +// #ifdef EXT_THERMAL_SENSOR_SUPPORT +// #ifdef IGB_PROCFS +struct igb_therm_proc_data +{ + struct e1000_hw *hw; + struct e1000_thermal_diode_data *sensor_data; +}; + +// #endif /* IGB_PROCFS */ +// #endif /* EXT_THERMAL_SENSOR_SUPPORT */ + /* board specific private data structure */ struct igb_adapter { +#ifdef HAVE_VLAN_RX_REGISTER + /* vlgrp must be first member of structure */ + struct vlan_group *vlgrp; +#else unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - +#endif struct net_device *netdev; unsigned long state; @@ -290,51 +463,54 @@ struct igb_adapter { unsigned int num_q_vectors; struct msix_entry *msix_entries; - /* Interrupt Throttle Rate */ - u32 rx_itr_setting; - u32 tx_itr_setting; - u16 tx_itr; - u16 rx_itr; /* TX */ u16 tx_work_limit; u32 tx_timeout_count; int num_tx_queues; - struct igb_ring *tx_ring[16]; + struct igb_ring *tx_ring[IGB_MAX_TX_QUEUES]; /* RX */ int num_rx_queues; - struct igb_ring *rx_ring[16]; - - u32 max_frame_size; - u32 min_frame_size; + struct igb_ring *rx_ring[IGB_MAX_RX_QUEUES]; struct timer_list watchdog_timer; + struct timer_list dma_err_timer; struct timer_list phy_info_timer; - u16 mng_vlan_id; u32 bd_number; u32 wol; u32 en_mng_pt; u16 link_speed; u16 link_duplex; + u8 port_num; + + /* Interrupt Throttle Rate */ + u32 rx_itr_setting; + u32 tx_itr_setting; struct work_struct reset_task; struct work_struct watchdog_task; + struct work_struct dma_err_task; bool fc_autoneg; u8 tx_timeout_factor; - struct timer_list blink_timer; - unsigned long led_status; + + u32 max_frame_size; /* OS defined structs */ struct pci_dev *pdev; +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats net_stats; +#endif +#ifndef IGB_NO_LRO + struct igb_lro_stats lro_stats; +#endif +#ifdef HAVE_HW_TIME_STAMP struct cyclecounter cycles; struct timecounter clock; struct timecompare compare; struct hwtstamp_config hwtstamp_config; - - spinlock_t stats64_lock; - struct rtnl_link_stats64 stats64; +#endif /* structs defined in e1000_hw.h */ struct e1000_hw hw; @@ -342,9 +518,11 @@ struct igb_adapter { struct e1000_phy_info phy_info; struct e1000_phy_stats phy_stats; +#ifdef ETHTOOL_TEST u32 test_icr; struct igb_ring test_tx_ring; struct igb_ring test_rx_ring; +#endif int msg_enable; @@ -355,41 +533,135 @@ struct igb_adapter { /* to not mess up cache alignment, always add to the bottom */ u32 eeprom_wol; + u32 *config_space; u16 tx_ring_count; u16 rx_ring_count; - unsigned int vfs_allocated_count; struct vf_data_storage *vf_data; +#ifdef IFLA_VF_MAX int vf_rate_link_speed; +#endif + u32 lli_port; + u32 lli_size; + unsigned int vfs_allocated_count; + /* Malicious Driver Detection flag. Valid only when SR-IOV is enabled */ + bool mdd; + int int_mode; u32 rss_queues; - u32 wvbr; + u32 vmdq_pools; + u16 fw_version; int node; + u32 wvbr; + struct igb_mac_addr *mac_table; +#ifdef CONFIG_IGB_VMDQ_NETDEV + struct net_device *vmdq_netdev[IGB_MAX_VMDQ_QUEUES]; +#endif + int vferr_refcount; + int dmac; u32 *shadow_vfta; + + /* External Thermal Sensor support flag */ + bool ets; +#ifdef IGB_SYSFS + struct kobject *info_kobj; + struct kobject *therm_kobj[E1000_MAX_SENSORS]; +#else /* IGB_SYSFS */ +#ifdef IGB_PROCFS + struct proc_dir_entry *eth_dir; + struct proc_dir_entry *info_dir; + struct proc_dir_entry *therm_dir[E1000_MAX_SENSORS]; + struct igb_therm_proc_data therm_data[E1000_MAX_SENSORS]; +#endif /* IGB_PROCFS */ +#endif /* IGB_SYSFS */ }; +#ifdef CONFIG_IGB_VMDQ_NETDEV +struct igb_vmdq_adapter { +#ifdef HAVE_VLAN_RX_REGISTER + /* vlgrp must be first member of structure */ + struct vlan_group *vlgrp; +#else + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; +#endif + struct igb_adapter *real_adapter; + struct net_device *vnetdev; + struct net_device_stats net_stats; + struct igb_ring *tx_ring; + struct igb_ring *rx_ring; +}; +#endif + + #define IGB_FLAG_HAS_MSI (1 << 0) -#define IGB_FLAG_DCA_ENABLED (1 << 1) -#define IGB_FLAG_QUAD_PORT_A (1 << 2) -#define IGB_FLAG_QUEUE_PAIRS (1 << 3) -#define IGB_FLAG_DMAC (1 << 4) +#define IGB_FLAG_MSI_ENABLE (1 << 1) +#define IGB_FLAG_DCA_ENABLED (1 << 2) +#define IGB_FLAG_LLI_PUSH (1 << 3) +#define IGB_FLAG_QUAD_PORT_A (1 << 4) +#define IGB_FLAG_QUEUE_PAIRS (1 << 5) +#define IGB_FLAG_EEE (1 << 6) +#define IGB_FLAG_DMAC (1 << 7) +#define IGB_FLAG_DETECT_BAD_DMA (1 << 8) -/* DMA Coalescing defines */ #define IGB_MIN_TXPBSIZE 20408 #define IGB_TX_BUF_4096 4096 + #define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ +/* DMA Coalescing defines */ +#define IGB_DMAC_DISABLE 0 +#define IGB_DMAC_MIN 250 +#define IGB_DMAC_500 500 +#define IGB_DMAC_EN_DEFAULT 1000 +#define IGB_DMAC_2000 2000 +#define IGB_DMAC_3000 3000 +#define IGB_DMAC_4000 4000 +#define IGB_DMAC_5000 5000 +#define IGB_DMAC_6000 6000 +#define IGB_DMAC_7000 7000 +#define IGB_DMAC_8000 8000 +#define IGB_DMAC_9000 9000 +#define IGB_DMAC_MAX 10000 + + #define IGB_82576_TSYNC_SHIFT 19 #define IGB_82580_TSYNC_SHIFT 24 #define IGB_TS_HDR_LEN 16 + +/* CEM Support */ +#define FW_HDR_LEN 0x4 +#define FW_CMD_DRV_INFO 0xDD +#define FW_CMD_DRV_INFO_LEN 0x5 +#define FW_CMD_RESERVED 0X0 +#define FW_RESP_SUCCESS 0x1 +#define FW_UNUSED_VER 0x0 +#define FW_MAX_RETRIES 3 +#define FW_STATUS_SUCCESS 0x1 +#define FW_FAMILY_DRV_VER 0Xffffffff + +#define IGB_MAX_LINK_TRIES 20 + +struct e1000_fw_hdr { + u8 cmd; + u8 buf_len; + union + { + u8 cmd_resv; + u8 ret_status; + } cmd_or_resp; + u8 checksum; +}; +struct e1000_fw_drv_info { + struct e1000_fw_hdr hdr; + u8 port_num; + u32 drv_version; + u16 pad; /* end spacing to ensure length is mult. of dword */ + u8 pad2; /* end spacing to ensure length is mult. of dword2 */ +}; enum e1000_state_t { __IGB_TESTING, __IGB_RESETTING, __IGB_DOWN }; -enum igb_boards { - board_82575, -}; - extern char igb_driver_name[]; extern char igb_driver_version[]; @@ -397,7 +669,7 @@ extern int igb_up(struct igb_adapter *); extern void igb_down(struct igb_adapter *); extern void igb_reinit_locked(struct igb_adapter *); extern void igb_reset(struct igb_adapter *); -extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8); +extern int igb_set_spd_dplx(struct igb_adapter *, u16); extern int igb_setup_tx_resources(struct igb_ring *); extern int igb_setup_rx_resources(struct igb_ring *); extern void igb_free_tx_resources(struct igb_ring *); @@ -408,43 +680,38 @@ extern void igb_setup_tctl(struct igb_adapter *); extern void igb_setup_rctl(struct igb_adapter *); extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); extern void igb_unmap_and_free_tx_resource(struct igb_ring *, - struct igb_tx_buffer *); + struct igb_tx_buffer *); extern void igb_alloc_rx_buffers(struct igb_ring *, u16); -extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *); +extern void igb_clean_rx_ring(struct igb_ring *); +extern void igb_update_stats(struct igb_adapter *); extern bool igb_has_link(struct igb_adapter *adapter); extern void igb_set_ethtool_ops(struct net_device *); +extern void igb_check_options(struct igb_adapter *); extern void igb_power_up_link(struct igb_adapter *); +#ifdef ETHTOOL_OPS_COMPAT +extern int ethtool_ioctl(struct ifreq *); +#endif +extern int igb_write_mc_addr_list(struct net_device *netdev); +extern int igb_add_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue); +extern int igb_del_mac_filter(struct igb_adapter *adapter, u8* addr, u16 queue); +extern int igb_available_rars(struct igb_adapter *adapter); +extern s32 igb_vlvf_set(struct igb_adapter *, u32, bool, u32); +extern void igb_configure_vt_default_pool(struct igb_adapter *adapter); +extern void igb_enable_vlan_tags(struct igb_adapter *adapter); +#ifndef HAVE_VLAN_RX_REGISTER +extern void igb_vlan_mode(struct net_device *, u32); +#endif -static inline s32 igb_reset_phy(struct e1000_hw *hw) -{ - if (hw->phy.ops.reset) - return hw->phy.ops.reset(hw); - - return 0; -} - -static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) -{ - if (hw->phy.ops.read_reg) - return hw->phy.ops.read_reg(hw, offset, data); - - return 0; -} - -static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) -{ - if (hw->phy.ops.write_reg) - return hw->phy.ops.write_reg(hw, offset, data); - - return 0; -} - -static inline s32 igb_get_phy_info(struct e1000_hw *hw) -{ - if (hw->phy.ops.get_phy_info) - return hw->phy.ops.get_phy_info(hw); - - return 0; -} +#ifdef IGB_SYSFS +void igb_sysfs_exit(struct igb_adapter *adapter); +int igb_sysfs_init(struct igb_adapter *adapter); +#else +#ifdef IGB_PROCFS +int igb_procfs_init(struct igb_adapter* adapter); +void igb_procfs_exit(struct igb_adapter* adapter); +int igb_procfs_topdir_init(void); +void igb_procfs_topdir_exit(void); +#endif /* IGB_PROCFS */ +#endif /* IGB_SYSFS */ #endif /* _IGB_H_ */ diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index 9f100f372128..01d85b92e109 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c @@ -27,19 +27,23 @@ /* ethtool support for igb */ -#include #include -#include -#include -#include -#include +#include + +#ifdef SIOCETHTOOL #include -#include -#include +#ifdef CONFIG_PM_RUNTIME #include +#endif /* CONFIG_PM_RUNTIME */ #include "igb.h" +#include "igb_regtest.h" +#include +#ifdef ETHTOOL_OPS_COMPAT +#include "kcompat_ethtool.c" +#endif +#ifdef ETHTOOL_GSTATS struct igb_stats { char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; @@ -84,6 +88,11 @@ static const struct igb_stats igb_gstrings_stats[] = { IGB_STAT("tx_flow_control_xoff", stats.xofftxc), IGB_STAT("rx_long_byte_count", stats.gorc), IGB_STAT("tx_dma_out_of_sync", stats.doosync), +#ifndef IGB_NO_LRO + IGB_STAT("lro_aggregated", lro_stats.coal), + IGB_STAT("lro_flushed", lro_stats.flushed), + IGB_STAT("lro_recycled", lro_stats.recycled), +#endif /* IGB_LRO */ IGB_STAT("tx_smbus", stats.mgptc), IGB_STAT("rx_smbus", stats.mgprc), IGB_STAT("dropped_smbus", stats.mgpdc), @@ -94,9 +103,9 @@ static const struct igb_stats igb_gstrings_stats[] = { }; #define IGB_NETDEV_STAT(_net_stat) { \ - .stat_string = __stringify(_net_stat), \ - .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \ - .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ + .stat_string = #_net_stat, \ + .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ + .stat_offset = offsetof(struct net_device_stats, _net_stat) \ } static const struct igb_stats igb_gstrings_net_stats[] = { IGB_NETDEV_STAT(rx_errors), @@ -110,15 +119,12 @@ static const struct igb_stats igb_gstrings_net_stats[] = { IGB_NETDEV_STAT(tx_heartbeat_errors) }; -#define IGB_GLOBAL_STATS_LEN \ - (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)) -#define IGB_NETDEV_STATS_LEN \ - (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats)) +#define IGB_GLOBAL_STATS_LEN ARRAY_SIZE(igb_gstrings_stats) +#define IGB_NETDEV_STATS_LEN ARRAY_SIZE(igb_gstrings_net_stats) #define IGB_RX_QUEUE_STATS_LEN \ (sizeof(struct igb_rx_queue_stats) / sizeof(u64)) - -#define IGB_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ - +#define IGB_TX_QUEUE_STATS_LEN \ + (sizeof(struct igb_tx_queue_stats) / sizeof(u64)) #define IGB_QUEUE_STATS_LEN \ ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \ IGB_RX_QUEUE_STATS_LEN) + \ @@ -127,12 +133,15 @@ static const struct igb_stats igb_gstrings_net_stats[] = { #define IGB_STATS_LEN \ (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN) +#endif /* ETHTOOL_GSTATS */ +#ifdef ETHTOOL_TEST static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { "Register test (offline)", "Eeprom test (offline)", "Interrupt test (offline)", "Loopback test (offline)", "Link test (on/offline)" }; #define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN) +#endif /* ETHTOOL_TEST */ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { @@ -171,21 +180,21 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ADVERTISED_Pause); ecmd->port = PORT_FIBRE; - } + } ecmd->transceiver = XCVR_INTERNAL; - status = rd32(E1000_STATUS); + status = E1000_READ_REG(hw, E1000_STATUS); if (status & E1000_STATUS_LU) { if ((status & E1000_STATUS_SPEED_1000) || hw->phy.media_type != e1000_media_type_copper) - ethtool_cmd_speed_set(ecmd, SPEED_1000); + ecmd->speed = SPEED_1000; else if (status & E1000_STATUS_SPEED_100) - ethtool_cmd_speed_set(ecmd, SPEED_100); + ecmd->speed = SPEED_100; else - ethtool_cmd_speed_set(ecmd, SPEED_10); + ecmd->speed = SPEED_10; if ((status & E1000_STATUS_FD) || hw->phy.media_type != e1000_media_type_copper) @@ -193,11 +202,22 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) else ecmd->duplex = DUPLEX_HALF; } else { - ethtool_cmd_speed_set(ecmd, -1); + ecmd->speed = -1; ecmd->duplex = -1; } ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; +#ifdef ETH_TP_MDI_X + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if ((hw->phy.media_type == e1000_media_type_copper) && + netif_carrier_ok(netdev)) + ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + ETH_TP_MDI; + else + ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + +#endif /* ETH_TP_MDI_X */ return 0; } @@ -208,14 +228,14 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) /* When SoL/IDER sessions are active, autoneg/speed/duplex * cannot be changed */ - if (igb_check_reset_block(hw)) { - dev_err(&adapter->pdev->dev, "Cannot change link " + if (e1000_check_reset_block(hw)) { + dev_err(pci_dev_to_dev(adapter->pdev), "Cannot change link " "characteristics when SoL/IDER is active.\n"); return -EINVAL; } while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); if (ecmd->autoneg == AUTONEG_ENABLE) { hw->mac.autoneg = 1; @@ -226,13 +246,30 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) if (adapter->fc_autoneg) hw->fc.requested_mode = e1000_fc_default; } else { - u32 speed = ethtool_cmd_speed(ecmd); - if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) { + if (igb_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) { clear_bit(__IGB_RESETTING, &adapter->state); return -EINVAL; } } +#ifdef ETH_TP_MDI_X + /* MDI-X =>2; MDI=>1; Invalid =>0 */ + if (hw->phy.media_type == e1000_media_type_copper) { + switch (ecmd->eth_tp_mdix) { + case ETH_TP_MDI_X: + hw->phy.mdix = 2; + break; + case ETH_TP_MDI: + hw->phy.mdix = 1; + break; + case ETH_TP_MDI_INVALID: + default: + hw->phy.mdix = 0; + break; + } + } + +#endif /* ETH_TP_MDI_X */ /* reset the link */ if (netif_running(adapter->netdev)) { igb_down(adapter); @@ -291,7 +328,7 @@ static int igb_set_pauseparam(struct net_device *netdev, adapter->fc_autoneg = pause->autoneg; while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); if (adapter->fc_autoneg == AUTONEG_ENABLE) { hw->fc.requested_mode = e1000_fc_default; @@ -314,7 +351,7 @@ static int igb_set_pauseparam(struct net_device *netdev, hw->fc.current_mode = hw->fc.requested_mode; retval = ((hw->phy.media_type == e1000_media_type_copper) ? - igb_force_mac_fc(hw) : igb_setup_link(hw)); + e1000_force_mac_fc(hw) : hw->mac.ops.setup_link(hw)); } clear_bit(__IGB_RESETTING, &adapter->state); @@ -335,7 +372,7 @@ static void igb_set_msglevel(struct net_device *netdev, u32 data) static int igb_get_regs_len(struct net_device *netdev) { -#define IGB_REGS_LEN 551 +#define IGB_REGS_LEN 555 return IGB_REGS_LEN * sizeof(u32); } @@ -352,78 +389,78 @@ static void igb_get_regs(struct net_device *netdev, regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; /* General Registers */ - regs_buff[0] = rd32(E1000_CTRL); - regs_buff[1] = rd32(E1000_STATUS); - regs_buff[2] = rd32(E1000_CTRL_EXT); - regs_buff[3] = rd32(E1000_MDIC); - regs_buff[4] = rd32(E1000_SCTL); - regs_buff[5] = rd32(E1000_CONNSW); - regs_buff[6] = rd32(E1000_VET); - regs_buff[7] = rd32(E1000_LEDCTL); - regs_buff[8] = rd32(E1000_PBA); - regs_buff[9] = rd32(E1000_PBS); - regs_buff[10] = rd32(E1000_FRTIMER); - regs_buff[11] = rd32(E1000_TCPTIMER); + regs_buff[0] = E1000_READ_REG(hw, E1000_CTRL); + regs_buff[1] = E1000_READ_REG(hw, E1000_STATUS); + regs_buff[2] = E1000_READ_REG(hw, E1000_CTRL_EXT); + regs_buff[3] = E1000_READ_REG(hw, E1000_MDIC); + regs_buff[4] = E1000_READ_REG(hw, E1000_SCTL); + regs_buff[5] = E1000_READ_REG(hw, E1000_CONNSW); + regs_buff[6] = E1000_READ_REG(hw, E1000_VET); + regs_buff[7] = E1000_READ_REG(hw, E1000_LEDCTL); + regs_buff[8] = E1000_READ_REG(hw, E1000_PBA); + regs_buff[9] = E1000_READ_REG(hw, E1000_PBS); + regs_buff[10] = E1000_READ_REG(hw, E1000_FRTIMER); + regs_buff[11] = E1000_READ_REG(hw, E1000_TCPTIMER); /* NVM Register */ - regs_buff[12] = rd32(E1000_EECD); + regs_buff[12] = E1000_READ_REG(hw, E1000_EECD); /* Interrupt */ /* Reading EICS for EICR because they read the * same but EICS does not clear on read */ - regs_buff[13] = rd32(E1000_EICS); - regs_buff[14] = rd32(E1000_EICS); - regs_buff[15] = rd32(E1000_EIMS); - regs_buff[16] = rd32(E1000_EIMC); - regs_buff[17] = rd32(E1000_EIAC); - regs_buff[18] = rd32(E1000_EIAM); + regs_buff[13] = E1000_READ_REG(hw, E1000_EICS); + regs_buff[14] = E1000_READ_REG(hw, E1000_EICS); + regs_buff[15] = E1000_READ_REG(hw, E1000_EIMS); + regs_buff[16] = E1000_READ_REG(hw, E1000_EIMC); + regs_buff[17] = E1000_READ_REG(hw, E1000_EIAC); + regs_buff[18] = E1000_READ_REG(hw, E1000_EIAM); /* Reading ICS for ICR because they read the * same but ICS does not clear on read */ - regs_buff[19] = rd32(E1000_ICS); - regs_buff[20] = rd32(E1000_ICS); - regs_buff[21] = rd32(E1000_IMS); - regs_buff[22] = rd32(E1000_IMC); - regs_buff[23] = rd32(E1000_IAC); - regs_buff[24] = rd32(E1000_IAM); - regs_buff[25] = rd32(E1000_IMIRVP); + regs_buff[19] = E1000_READ_REG(hw, E1000_ICS); + regs_buff[20] = E1000_READ_REG(hw, E1000_ICS); + regs_buff[21] = E1000_READ_REG(hw, E1000_IMS); + regs_buff[22] = E1000_READ_REG(hw, E1000_IMC); + regs_buff[23] = E1000_READ_REG(hw, E1000_IAC); + regs_buff[24] = E1000_READ_REG(hw, E1000_IAM); + regs_buff[25] = E1000_READ_REG(hw, E1000_IMIRVP); /* Flow Control */ - regs_buff[26] = rd32(E1000_FCAL); - regs_buff[27] = rd32(E1000_FCAH); - regs_buff[28] = rd32(E1000_FCTTV); - regs_buff[29] = rd32(E1000_FCRTL); - regs_buff[30] = rd32(E1000_FCRTH); - regs_buff[31] = rd32(E1000_FCRTV); + regs_buff[26] = E1000_READ_REG(hw, E1000_FCAL); + regs_buff[27] = E1000_READ_REG(hw, E1000_FCAH); + regs_buff[28] = E1000_READ_REG(hw, E1000_FCTTV); + regs_buff[29] = E1000_READ_REG(hw, E1000_FCRTL); + regs_buff[30] = E1000_READ_REG(hw, E1000_FCRTH); + regs_buff[31] = E1000_READ_REG(hw, E1000_FCRTV); /* Receive */ - regs_buff[32] = rd32(E1000_RCTL); - regs_buff[33] = rd32(E1000_RXCSUM); - regs_buff[34] = rd32(E1000_RLPML); - regs_buff[35] = rd32(E1000_RFCTL); - regs_buff[36] = rd32(E1000_MRQC); - regs_buff[37] = rd32(E1000_VT_CTL); + regs_buff[32] = E1000_READ_REG(hw, E1000_RCTL); + regs_buff[33] = E1000_READ_REG(hw, E1000_RXCSUM); + regs_buff[34] = E1000_READ_REG(hw, E1000_RLPML); + regs_buff[35] = E1000_READ_REG(hw, E1000_RFCTL); + regs_buff[36] = E1000_READ_REG(hw, E1000_MRQC); + regs_buff[37] = E1000_READ_REG(hw, E1000_VT_CTL); /* Transmit */ - regs_buff[38] = rd32(E1000_TCTL); - regs_buff[39] = rd32(E1000_TCTL_EXT); - regs_buff[40] = rd32(E1000_TIPG); - regs_buff[41] = rd32(E1000_DTXCTL); + regs_buff[38] = E1000_READ_REG(hw, E1000_TCTL); + regs_buff[39] = E1000_READ_REG(hw, E1000_TCTL_EXT); + regs_buff[40] = E1000_READ_REG(hw, E1000_TIPG); + regs_buff[41] = E1000_READ_REG(hw, E1000_DTXCTL); /* Wake Up */ - regs_buff[42] = rd32(E1000_WUC); - regs_buff[43] = rd32(E1000_WUFC); - regs_buff[44] = rd32(E1000_WUS); - regs_buff[45] = rd32(E1000_IPAV); - regs_buff[46] = rd32(E1000_WUPL); + regs_buff[42] = E1000_READ_REG(hw, E1000_WUC); + regs_buff[43] = E1000_READ_REG(hw, E1000_WUFC); + regs_buff[44] = E1000_READ_REG(hw, E1000_WUS); + regs_buff[45] = E1000_READ_REG(hw, E1000_IPAV); + regs_buff[46] = E1000_READ_REG(hw, E1000_WUPL); /* MAC */ - regs_buff[47] = rd32(E1000_PCS_CFG0); - regs_buff[48] = rd32(E1000_PCS_LCTL); - regs_buff[49] = rd32(E1000_PCS_LSTAT); - regs_buff[50] = rd32(E1000_PCS_ANADV); - regs_buff[51] = rd32(E1000_PCS_LPAB); - regs_buff[52] = rd32(E1000_PCS_NPTX); - regs_buff[53] = rd32(E1000_PCS_LPABNP); + regs_buff[47] = E1000_READ_REG(hw, E1000_PCS_CFG0); + regs_buff[48] = E1000_READ_REG(hw, E1000_PCS_LCTL); + regs_buff[49] = E1000_READ_REG(hw, E1000_PCS_LSTAT); + regs_buff[50] = E1000_READ_REG(hw, E1000_PCS_ANADV); + regs_buff[51] = E1000_READ_REG(hw, E1000_PCS_LPAB); + regs_buff[52] = E1000_READ_REG(hw, E1000_PCS_NPTX); + regs_buff[53] = E1000_READ_REG(hw, E1000_PCS_LPABNP); /* Statistics */ regs_buff[54] = adapter->stats.crcerrs; @@ -489,73 +526,75 @@ static void igb_get_regs(struct net_device *netdev, regs_buff[120] = adapter->stats.hrmpc; for (i = 0; i < 4; i++) - regs_buff[121 + i] = rd32(E1000_SRRCTL(i)); + regs_buff[121 + i] = E1000_READ_REG(hw, E1000_SRRCTL(i)); for (i = 0; i < 4; i++) - regs_buff[125 + i] = rd32(E1000_PSRTYPE(i)); + regs_buff[125 + i] = E1000_READ_REG(hw, E1000_PSRTYPE(i)); for (i = 0; i < 4; i++) - regs_buff[129 + i] = rd32(E1000_RDBAL(i)); + regs_buff[129 + i] = E1000_READ_REG(hw, E1000_RDBAL(i)); for (i = 0; i < 4; i++) - regs_buff[133 + i] = rd32(E1000_RDBAH(i)); + regs_buff[133 + i] = E1000_READ_REG(hw, E1000_RDBAH(i)); for (i = 0; i < 4; i++) - regs_buff[137 + i] = rd32(E1000_RDLEN(i)); + regs_buff[137 + i] = E1000_READ_REG(hw, E1000_RDLEN(i)); for (i = 0; i < 4; i++) - regs_buff[141 + i] = rd32(E1000_RDH(i)); + regs_buff[141 + i] = E1000_READ_REG(hw, E1000_RDH(i)); for (i = 0; i < 4; i++) - regs_buff[145 + i] = rd32(E1000_RDT(i)); + regs_buff[145 + i] = E1000_READ_REG(hw, E1000_RDT(i)); for (i = 0; i < 4; i++) - regs_buff[149 + i] = rd32(E1000_RXDCTL(i)); + regs_buff[149 + i] = E1000_READ_REG(hw, E1000_RXDCTL(i)); for (i = 0; i < 10; i++) - regs_buff[153 + i] = rd32(E1000_EITR(i)); + regs_buff[153 + i] = E1000_READ_REG(hw, E1000_EITR(i)); for (i = 0; i < 8; i++) - regs_buff[163 + i] = rd32(E1000_IMIR(i)); + regs_buff[163 + i] = E1000_READ_REG(hw, E1000_IMIR(i)); for (i = 0; i < 8; i++) - regs_buff[171 + i] = rd32(E1000_IMIREXT(i)); + regs_buff[171 + i] = E1000_READ_REG(hw, E1000_IMIREXT(i)); for (i = 0; i < 16; i++) - regs_buff[179 + i] = rd32(E1000_RAL(i)); + regs_buff[179 + i] = E1000_READ_REG(hw, E1000_RAL(i)); for (i = 0; i < 16; i++) - regs_buff[195 + i] = rd32(E1000_RAH(i)); + regs_buff[195 + i] = E1000_READ_REG(hw, E1000_RAH(i)); for (i = 0; i < 4; i++) - regs_buff[211 + i] = rd32(E1000_TDBAL(i)); + regs_buff[211 + i] = E1000_READ_REG(hw, E1000_TDBAL(i)); for (i = 0; i < 4; i++) - regs_buff[215 + i] = rd32(E1000_TDBAH(i)); + regs_buff[215 + i] = E1000_READ_REG(hw, E1000_TDBAH(i)); for (i = 0; i < 4; i++) - regs_buff[219 + i] = rd32(E1000_TDLEN(i)); + regs_buff[219 + i] = E1000_READ_REG(hw, E1000_TDLEN(i)); for (i = 0; i < 4; i++) - regs_buff[223 + i] = rd32(E1000_TDH(i)); + regs_buff[223 + i] = E1000_READ_REG(hw, E1000_TDH(i)); for (i = 0; i < 4; i++) - regs_buff[227 + i] = rd32(E1000_TDT(i)); + regs_buff[227 + i] = E1000_READ_REG(hw, E1000_TDT(i)); for (i = 0; i < 4; i++) - regs_buff[231 + i] = rd32(E1000_TXDCTL(i)); + regs_buff[231 + i] = E1000_READ_REG(hw, E1000_TXDCTL(i)); for (i = 0; i < 4; i++) - regs_buff[235 + i] = rd32(E1000_TDWBAL(i)); + regs_buff[235 + i] = E1000_READ_REG(hw, E1000_TDWBAL(i)); for (i = 0; i < 4; i++) - regs_buff[239 + i] = rd32(E1000_TDWBAH(i)); + regs_buff[239 + i] = E1000_READ_REG(hw, E1000_TDWBAH(i)); for (i = 0; i < 4; i++) - regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i)); + regs_buff[243 + i] = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i)); for (i = 0; i < 4; i++) - regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i)); + regs_buff[247 + i] = E1000_READ_REG(hw, E1000_IP4AT_REG(i)); for (i = 0; i < 4; i++) - regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i)); + regs_buff[251 + i] = E1000_READ_REG(hw, E1000_IP6AT_REG(i)); for (i = 0; i < 32; i++) - regs_buff[255 + i] = rd32(E1000_WUPM_REG(i)); + regs_buff[255 + i] = E1000_READ_REG(hw, E1000_WUPM_REG(i)); for (i = 0; i < 128; i++) - regs_buff[287 + i] = rd32(E1000_FFMT_REG(i)); + regs_buff[287 + i] = E1000_READ_REG(hw, E1000_FFMT_REG(i)); for (i = 0; i < 128; i++) - regs_buff[415 + i] = rd32(E1000_FFVT_REG(i)); + regs_buff[415 + i] = E1000_READ_REG(hw, E1000_FFVT_REG(i)); for (i = 0; i < 4; i++) - regs_buff[543 + i] = rd32(E1000_FFLT_REG(i)); - - regs_buff[547] = rd32(E1000_TDFH); - regs_buff[548] = rd32(E1000_TDFT); - regs_buff[549] = rd32(E1000_TDFHS); - regs_buff[550] = rd32(E1000_TDFPC); - regs_buff[551] = adapter->stats.o2bgptc; - regs_buff[552] = adapter->stats.b2ospc; - regs_buff[553] = adapter->stats.o2bspc; - regs_buff[554] = adapter->stats.b2ogprc; + regs_buff[543 + i] = E1000_READ_REG(hw, E1000_FFLT_REG(i)); + + regs_buff[547] = E1000_READ_REG(hw, E1000_TDFH); + regs_buff[548] = E1000_READ_REG(hw, E1000_TDFT); + regs_buff[549] = E1000_READ_REG(hw, E1000_TDFHS); + regs_buff[550] = E1000_READ_REG(hw, E1000_TDFPC); + if (hw->mac.type > e1000_82580) { + regs_buff[551] = adapter->stats.o2bgptc; + regs_buff[552] = adapter->stats.b2ospc; + regs_buff[553] = adapter->stats.o2bspc; + regs_buff[554] = adapter->stats.b2ogprc; + } } static int igb_get_eeprom_len(struct net_device *netdev) @@ -588,13 +627,13 @@ static int igb_get_eeprom(struct net_device *netdev, return -ENOMEM; if (hw->nvm.type == e1000_nvm_eeprom_spi) - ret_val = hw->nvm.ops.read(hw, first_word, - last_word - first_word + 1, - eeprom_buff); + ret_val = e1000_read_nvm(hw, first_word, + last_word - first_word + 1, + eeprom_buff); else { for (i = 0; i < last_word - first_word + 1; i++) { - ret_val = hw->nvm.ops.read(hw, first_word + i, 1, - &eeprom_buff[i]); + ret_val = e1000_read_nvm(hw, first_word + i, 1, + &eeprom_buff[i]); if (ret_val) break; } @@ -602,7 +641,7 @@ static int igb_get_eeprom(struct net_device *netdev, /* Device's eeprom is always little-endian, word addressable */ for (i = 0; i < last_word - first_word + 1; i++) - le16_to_cpus(&eeprom_buff[i]); + eeprom_buff[i] = le16_to_cpu(eeprom_buff[i]); memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); @@ -640,15 +679,15 @@ static int igb_set_eeprom(struct net_device *netdev, if (eeprom->offset & 1) { /* need read/modify/write of first changed EEPROM word */ /* only the second byte of the word is being modified */ - ret_val = hw->nvm.ops.read(hw, first_word, 1, + ret_val = e1000_read_nvm(hw, first_word, 1, &eeprom_buff[0]); ptr++; } if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { /* need read/modify/write of last changed EEPROM word */ /* only the first byte of the word is being modified */ - ret_val = hw->nvm.ops.read(hw, last_word, 1, - &eeprom_buff[last_word - first_word]); + ret_val = e1000_read_nvm(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); } /* Device's eeprom is always little-endian, word addressable */ @@ -658,15 +697,15 @@ static int igb_set_eeprom(struct net_device *netdev, memcpy(ptr, bytes, eeprom->len); for (i = 0; i < last_word - first_word + 1; i++) - eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); + cpu_to_le16s(&eeprom_buff[i]); - ret_val = hw->nvm.ops.write(hw, first_word, - last_word - first_word + 1, eeprom_buff); + ret_val = e1000_write_nvm(hw, first_word, + last_word - first_word + 1, eeprom_buff); /* Update the checksum over the first part of the EEPROM if needed * and flush shadow RAM for 82573 controllers */ if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG))) - hw->nvm.ops.update(hw); + e1000_update_nvm_checksum(hw); kfree(eeprom_buff); return ret_val; @@ -676,25 +715,18 @@ static void igb_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct igb_adapter *adapter = netdev_priv(netdev); - char firmware_version[32]; - u16 eeprom_data; strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1); - strncpy(drvinfo->version, igb_driver_version, - sizeof(drvinfo->version) - 1); + strncpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version) - 1); /* EEPROM image version # is reported as firmware version # for * 82575 controllers */ - adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data); - sprintf(firmware_version, "%d.%d-%d", - (eeprom_data & 0xF000) >> 12, - (eeprom_data & 0x0FF0) >> 4, - eeprom_data & 0x000F); - - strncpy(drvinfo->fw_version, firmware_version, - sizeof(drvinfo->fw_version) - 1); - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), - sizeof(drvinfo->bus_info) - 1); + snprintf(drvinfo->fw_version, 32, "%d.%d-%d", + (adapter->fw_version & 0xF000) >> 12, + (adapter->fw_version & 0x0FF0) >> 4, + adapter->fw_version & 0x000F); + + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info) -1); drvinfo->n_stats = IGB_STATS_LEN; drvinfo->testinfo_len = IGB_TEST_LEN; drvinfo->regdump_len = igb_get_regs_len(netdev); @@ -708,8 +740,12 @@ static void igb_get_ringparam(struct net_device *netdev, ring->rx_max_pending = IGB_MAX_RXD; ring->tx_max_pending = IGB_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; ring->rx_pending = adapter->rx_ring_count; ring->tx_pending = adapter->tx_ring_count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; } static int igb_set_ringparam(struct net_device *netdev, @@ -723,12 +759,12 @@ static int igb_set_ringparam(struct net_device *netdev, if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD); - new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD); + new_rx_count = min(ring->rx_pending, (u32)IGB_MAX_RXD); + new_rx_count = max(new_rx_count, (u16)IGB_MIN_RXD); new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); - new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD); - new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD); + new_tx_count = min(ring->tx_pending, (u32)IGB_MAX_TXD); + new_tx_count = max(new_tx_count, (u16)IGB_MIN_TXD); new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); if ((new_tx_count == adapter->tx_ring_count) && @@ -738,7 +774,7 @@ static int igb_set_ringparam(struct net_device *netdev, } while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); if (!netif_running(adapter->netdev)) { for (i = 0; i < adapter->num_tx_queues; i++) @@ -826,192 +862,6 @@ clear_reset: clear_bit(__IGB_RESETTING, &adapter->state); return err; } - -/* ethtool register test data */ -struct igb_reg_test { - u16 reg; - u16 reg_offset; - u16 array_len; - u16 test_type; - u32 mask; - u32 write; -}; - -/* In the hardware, registers are laid out either singly, in arrays - * spaced 0x100 bytes apart, or in contiguous tables. We assume - * most tests take place on arrays or single registers (handled - * as a single-element array) and special-case the tables. - * Table tests are always pattern tests. - * - * We also make provision for some required setup steps by specifying - * registers to be written without any read-back testing. - */ - -#define PATTERN_TEST 1 -#define SET_READ_TEST 2 -#define WRITE_NO_TEST 3 -#define TABLE32_TEST 4 -#define TABLE64_TEST_LO 5 -#define TABLE64_TEST_HI 6 - -/* i350 reg test */ -static struct igb_reg_test reg_test_i350[] = { - { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, - { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, - { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 }, - { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, - { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, - /* RDH is read-only for i350, only test RDT. */ - { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, - { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, - { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, - { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, - { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, - { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, - { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, - { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, - { E1000_RA, 0, 16, TABLE64_TEST_LO, - 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RA, 0, 16, TABLE64_TEST_HI, - 0xC3FFFFFF, 0xFFFFFFFF }, - { E1000_RA2, 0, 16, TABLE64_TEST_LO, - 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RA2, 0, 16, TABLE64_TEST_HI, - 0xC3FFFFFF, 0xFFFFFFFF }, - { E1000_MTA, 0, 128, TABLE32_TEST, - 0xFFFFFFFF, 0xFFFFFFFF }, - { 0, 0, 0, 0 } -}; - -/* 82580 reg test */ -static struct igb_reg_test reg_test_82580[] = { - { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, - { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, - { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, - { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, - /* RDH is read-only for 82580, only test RDT. */ - { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, - { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, - { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, - { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, - { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, - { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, - { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, - { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, - { E1000_RA, 0, 16, TABLE64_TEST_LO, - 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RA, 0, 16, TABLE64_TEST_HI, - 0x83FFFFFF, 0xFFFFFFFF }, - { E1000_RA2, 0, 8, TABLE64_TEST_LO, - 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RA2, 0, 8, TABLE64_TEST_HI, - 0x83FFFFFF, 0xFFFFFFFF }, - { E1000_MTA, 0, 128, TABLE32_TEST, - 0xFFFFFFFF, 0xFFFFFFFF }, - { 0, 0, 0, 0 } -}; - -/* 82576 reg test */ -static struct igb_reg_test reg_test_82576[] = { - { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, - { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, - { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, - { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, - /* Enable all RX queues before testing. */ - { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, - { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, - /* RDH is read-only for 82576, only test RDT. */ - { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, - { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 }, - { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, - { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, - { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, - { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, - { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, - { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, - { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, - { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, - { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, - { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, - { E1000_MTA, 0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { 0, 0, 0, 0 } -}; - -/* 82575 register test */ -static struct igb_reg_test reg_test_82575[] = { - { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, - { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, - { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, - /* Enable all four RX queues before testing. */ - { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, - /* RDH is read-only for 82575, only test RDT. */ - { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, - { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, - { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, - { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, - { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, - { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB }, - { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF }, - { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, - { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF }, - { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF }, - { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { 0, 0, 0, 0 } -}; - static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, int reg, u32 mask, u32 write) { @@ -1020,13 +870,13 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, static const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { - wr32(reg, (_test[pat] & write)); - val = rd32(reg) & mask; + E1000_WRITE_REG(hw, reg, (_test[pat] & write)); + val = E1000_READ_REG(hw, reg) & mask; if (val != (_test[pat] & write & mask)) { - dev_err(&adapter->pdev->dev, "pattern test reg %04X " + dev_err(pci_dev_to_dev(adapter->pdev), "pattern test reg %04X " "failed: got 0x%08X expected 0x%08X\n", - reg, val, (_test[pat] & write & mask)); - *data = reg; + E1000_REGISTER(hw, reg), val, (_test[pat] & write & mask)); + *data = E1000_REGISTER(hw, reg); return 1; } } @@ -1039,13 +889,13 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, { struct e1000_hw *hw = &adapter->hw; u32 val; - wr32(reg, write & mask); - val = rd32(reg); + E1000_WRITE_REG(hw, reg, write & mask); + val = E1000_READ_REG(hw, reg); if ((write & mask) != (val & mask)) { - dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:" + dev_err(pci_dev_to_dev(adapter->pdev), "set/check reg %04X test failed:" " got 0x%08X expected 0x%08X\n", reg, (val & mask), (write & mask)); - *data = reg; + *data = E1000_REGISTER(hw, reg); return 1; } @@ -1095,18 +945,18 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data) * tests. Some bits are read-only, some toggle, and some * are writable on newer MACs. */ - before = rd32(E1000_STATUS); - value = (rd32(E1000_STATUS) & toggle); - wr32(E1000_STATUS, toggle); - after = rd32(E1000_STATUS) & toggle; + before = E1000_READ_REG(hw, E1000_STATUS); + value = (E1000_READ_REG(hw, E1000_STATUS) & toggle); + E1000_WRITE_REG(hw, E1000_STATUS, toggle); + after = E1000_READ_REG(hw, E1000_STATUS) & toggle; if (value != after) { - dev_err(&adapter->pdev->dev, "failed STATUS register test " + dev_err(pci_dev_to_dev(adapter->pdev), "failed STATUS register test " "got: 0x%08X expected: 0x%08X\n", after, value); *data = 1; return 1; } /* restore previous status */ - wr32(E1000_STATUS, before); + E1000_WRITE_REG(hw, E1000_STATUS, before); /* Perform the remainder of the register test, looping through * the test table until we either fail or reach the null entry. @@ -1128,7 +978,7 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data) break; case WRITE_NO_TEST: writel(test->write, - (adapter->hw.hw_addr + test->reg) + (adapter->hw.hw_addr + test->reg) + (i * test->reg_offset)); break; case TABLE32_TEST: @@ -1164,7 +1014,7 @@ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data) *data = 0; /* Read and add up the contents of the EEPROM */ for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { - if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) < 0) { + if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) { *data = 1; break; } @@ -1183,7 +1033,7 @@ static irqreturn_t igb_test_intr(int irq, void *data) struct igb_adapter *adapter = (struct igb_adapter *) data; struct e1000_hw *hw = &adapter->hw; - adapter->test_icr |= rd32(E1000_ICR); + adapter->test_icr |= E1000_READ_REG(hw, E1000_ICR); return IRQ_HANDLED; } @@ -1192,7 +1042,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) { struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; - u32 mask, ics_mask, i = 0, shared_int = true; + u32 mask, ics_mask, i = 0, shared_int = TRUE; u32 irq = adapter->pdev->irq; *data = 0; @@ -1200,12 +1050,12 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) /* Hook up test interrupt handler just for this test */ if (adapter->msix_entries) { if (request_irq(adapter->msix_entries[0].vector, - igb_test_intr, 0, netdev->name, adapter)) { + &igb_test_intr, 0, netdev->name, adapter)) { *data = 1; return -1; } } else if (adapter->flags & IGB_FLAG_HAS_MSI) { - shared_int = false; + shared_int = FALSE; if (request_irq(irq, igb_test_intr, 0, netdev->name, adapter)) { *data = 1; @@ -1213,19 +1063,19 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) } } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED, netdev->name, adapter)) { - shared_int = false; - } else if (request_irq(irq, igb_test_intr, IRQF_SHARED, + shared_int = FALSE; + } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED, netdev->name, adapter)) { *data = 1; return -1; } - dev_info(&adapter->pdev->dev, "testing %s interrupt\n", - (shared_int ? "shared" : "unshared")); + dev_info(pci_dev_to_dev(adapter->pdev), "testing %s interrupt\n", + (shared_int ? "shared" : "unshared")); /* Disable all the interrupts */ - wr32(E1000_IMC, ~0); - wrfl(); - msleep(10); + E1000_WRITE_REG(hw, E1000_IMC, ~0); + E1000_WRITE_FLUSH(hw); + usleep_range(10000, 20000); /* Define all writable bits for ICS */ switch (hw->mac.type) { @@ -1264,12 +1114,12 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) adapter->test_icr = 0; /* Flush any pending interrupts */ - wr32(E1000_ICR, ~0); + E1000_WRITE_REG(hw, E1000_ICR, ~0); - wr32(E1000_IMC, mask); - wr32(E1000_ICS, mask); - wrfl(); - msleep(10); + E1000_WRITE_REG(hw, E1000_IMC, mask); + E1000_WRITE_REG(hw, E1000_ICS, mask); + E1000_WRITE_FLUSH(hw); + usleep_range(10000, 20000); if (adapter->test_icr & mask) { *data = 3; @@ -1286,12 +1136,12 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) adapter->test_icr = 0; /* Flush any pending interrupts */ - wr32(E1000_ICR, ~0); + E1000_WRITE_REG(hw, E1000_ICR, ~0); - wr32(E1000_IMS, mask); - wr32(E1000_ICS, mask); - wrfl(); - msleep(10); + E1000_WRITE_REG(hw, E1000_IMS, mask); + E1000_WRITE_REG(hw, E1000_ICS, mask); + E1000_WRITE_FLUSH(hw); + usleep_range(10000, 20000); if (!(adapter->test_icr & mask)) { *data = 4; @@ -1308,12 +1158,12 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) adapter->test_icr = 0; /* Flush any pending interrupts */ - wr32(E1000_ICR, ~0); + E1000_WRITE_REG(hw, E1000_ICR, ~0); - wr32(E1000_IMC, ~mask); - wr32(E1000_ICS, ~mask); - wrfl(); - msleep(10); + E1000_WRITE_REG(hw, E1000_IMC, ~mask); + E1000_WRITE_REG(hw, E1000_ICS, ~mask); + E1000_WRITE_FLUSH(hw); + usleep_range(10000, 20000); if (adapter->test_icr & mask) { *data = 5; @@ -1323,9 +1173,9 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) } /* Disable all the interrupts */ - wr32(E1000_IMC, ~0); - wrfl(); - msleep(10); + E1000_WRITE_REG(hw, E1000_IMC, ~0); + E1000_WRITE_FLUSH(hw); + usleep_range(10000, 20000); /* Unhook test interrupt handler */ if (adapter->msix_entries) @@ -1351,7 +1201,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter) /* Setup Tx descriptor ring and Tx buffers */ tx_ring->count = IGB_DEFAULT_TXD; - tx_ring->dev = &adapter->pdev->dev; + tx_ring->dev = pci_dev_to_dev(adapter->pdev); tx_ring->netdev = adapter->netdev; tx_ring->reg_idx = adapter->vfs_allocated_count; @@ -1365,17 +1215,20 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter) /* Setup Rx descriptor ring and Rx buffers */ rx_ring->count = IGB_DEFAULT_RXD; - rx_ring->dev = &adapter->pdev->dev; + rx_ring->dev = pci_dev_to_dev(adapter->pdev); rx_ring->netdev = adapter->netdev; +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + rx_ring->rx_buffer_len = IGB_RXBUFFER_512; +#endif rx_ring->reg_idx = adapter->vfs_allocated_count; if (igb_setup_rx_resources(rx_ring)) { - ret_val = 3; + ret_val = 2; goto err_nomem; } /* set the default queue to queue 0 of PF */ - wr32(E1000_MRQC, adapter->vfs_allocated_count << 3); + E1000_WRITE_REG(hw, E1000_MRQC, adapter->vfs_allocated_count << 3); /* enable receive ring */ igb_setup_rctl(adapter); @@ -1395,10 +1248,10 @@ static void igb_phy_disable_receiver(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; /* Write out to PHY registers 29 and 30 to disable the Receiver. */ - igb_write_phy_reg(hw, 29, 0x001F); - igb_write_phy_reg(hw, 30, 0x8FFC); - igb_write_phy_reg(hw, 29, 0x001A); - igb_write_phy_reg(hw, 30, 0x8FF0); + e1000_write_phy_reg(hw, 29, 0x001F); + e1000_write_phy_reg(hw, 30, 0x8FFC); + e1000_write_phy_reg(hw, 29, 0x001A); + e1000_write_phy_reg(hw, 30, 0x8FF0); } static int igb_integrated_phy_loopback(struct igb_adapter *adapter) @@ -1406,27 +1259,27 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; u32 ctrl_reg = 0; - hw->mac.autoneg = false; + hw->mac.autoneg = FALSE; if (hw->phy.type == e1000_phy_m88) { - /* Auto-MDI/MDIX Off */ - igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); - /* reset to update Auto-MDI/MDIX */ - igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); - /* autoneg off */ - igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); - } else if (hw->phy.type == e1000_phy_82580) { + /* Auto-MDI/MDIX Off */ + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + e1000_write_phy_reg(hw, PHY_CONTROL, 0x9140); + /* autoneg off */ + e1000_write_phy_reg(hw, PHY_CONTROL, 0x8140); + } else { /* enable MII loopback */ - igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041); + if (hw->phy.type == e1000_phy_82580) + e1000_write_phy_reg(hw, I82577_PHY_LBK_CTRL, 0x8041); } - ctrl_reg = rd32(E1000_CTRL); - - /* force 1000, set loopback */ - igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); + /* force 1000, set loopback */ + e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140); + ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); /* Now set up the MAC to the same speed/duplex as the PHY. */ - ctrl_reg = rd32(E1000_CTRL); + ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ @@ -1437,16 +1290,15 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) if (hw->phy.type == e1000_phy_m88) ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ - wr32(E1000_CTRL, ctrl_reg); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg); /* Disable the receiver on the PHY so when a cable is plugged in, the * PHY does not begin to autoneg when a cable is reconnected to the NIC. */ if (hw->phy.type == e1000_phy_m88) igb_phy_disable_receiver(adapter); - + udelay(500); - return 0; } @@ -1460,54 +1312,54 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; u32 reg; - reg = rd32(E1000_CTRL_EXT); + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); /* use CTRL_EXT to identify link type as SGMII can appear as copper */ if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) { - if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || - (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || - (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || - (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) { - - /* Enable DH89xxCC MPHY for near end loopback */ - reg = rd32(E1000_MPHY_ADDR_CTL); - reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | - E1000_MPHY_PCS_CLK_REG_OFFSET; - wr32(E1000_MPHY_ADDR_CTL, reg); - - reg = rd32(E1000_MPHY_DATA); - reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN; - wr32(E1000_MPHY_DATA, reg); - } - - reg = rd32(E1000_RCTL); + if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) { + + /* Enable DH89xxCC MPHY for near end loopback */ + reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL); + reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | + E1000_MPHY_PCS_CLK_REG_OFFSET; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg); + + reg = E1000_READ_REG(hw, E1000_MPHY_DATA); + reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN; + E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg); + } + + reg = E1000_READ_REG(hw, E1000_RCTL); reg |= E1000_RCTL_LBM_TCVR; - wr32(E1000_RCTL, reg); + E1000_WRITE_REG(hw, E1000_RCTL, reg); - wr32(E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK); + E1000_WRITE_REG(hw, E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK); - reg = rd32(E1000_CTRL); + reg = E1000_READ_REG(hw, E1000_CTRL); reg &= ~(E1000_CTRL_RFCE | E1000_CTRL_TFCE | E1000_CTRL_LRST); reg |= E1000_CTRL_SLU | E1000_CTRL_FD; - wr32(E1000_CTRL, reg); + E1000_WRITE_REG(hw, E1000_CTRL, reg); /* Unset switch control to serdes energy detect */ - reg = rd32(E1000_CONNSW); + reg = E1000_READ_REG(hw, E1000_CONNSW); reg &= ~E1000_CONNSW_ENRGSRC; - wr32(E1000_CONNSW, reg); + E1000_WRITE_REG(hw, E1000_CONNSW, reg); /* Set PCS register for forced speed */ - reg = rd32(E1000_PCS_LCTL); + reg = E1000_READ_REG(hw, E1000_PCS_LCTL); reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/ reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */ E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ E1000_PCS_LCTL_FSD | /* Force Speed */ E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ - wr32(E1000_PCS_LCTL, reg); + E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg); return 0; } @@ -1521,36 +1373,35 @@ static void igb_loopback_cleanup(struct igb_adapter *adapter) u32 rctl; u16 phy_reg; - if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || - (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || - (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || - (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) { + if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) { u32 reg; /* Disable near end loopback on DH89xxCC */ - reg = rd32(E1000_MPHY_ADDR_CTL); - reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | - E1000_MPHY_PCS_CLK_REG_OFFSET; - wr32(E1000_MPHY_ADDR_CTL, reg); - - reg = rd32(E1000_MPHY_DATA); - reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN; - wr32(E1000_MPHY_DATA, reg); + reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL); + reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK ) | + E1000_MPHY_PCS_CLK_REG_OFFSET; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg); + + reg = E1000_READ_REG(hw, E1000_MPHY_DATA); + reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN; + E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg); } - - rctl = rd32(E1000_RCTL); + + rctl = E1000_READ_REG(hw, E1000_RCTL); rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); - wr32(E1000_RCTL, rctl); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); - hw->mac.autoneg = true; - igb_read_phy_reg(hw, PHY_CONTROL, &phy_reg); + hw->mac.autoneg = TRUE; + e1000_read_phy_reg(hw, PHY_CONTROL, &phy_reg); if (phy_reg & MII_CR_LOOPBACK) { phy_reg &= ~MII_CR_LOOPBACK; - igb_write_phy_reg(hw, PHY_CONTROL, phy_reg); - igb_phy_sw_reset(hw); + e1000_write_phy_reg(hw, PHY_CONTROL, phy_reg); + e1000_phy_commit(hw); } } - static void igb_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) { @@ -1573,13 +1424,18 @@ static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) return 13; } -static int igb_clean_test_rings(struct igb_ring *rx_ring, +static u16 igb_clean_test_rings(struct igb_ring *rx_ring, struct igb_ring *tx_ring, unsigned int size) { union e1000_adv_rx_desc *rx_desc; struct igb_rx_buffer *rx_buffer_info; struct igb_tx_buffer *tx_buffer_info; +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + const int bufsz = rx_ring->rx_buffer_len; +#else + const int bufsz = IGB_RX_HDR_LEN; +#endif u16 rx_ntc, tx_ntc, count = 0; /* initialize next to clean and descriptor values */ @@ -1593,8 +1449,8 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring, /* unmap rx buffer, will be remapped by alloc_rx_buffers */ dma_unmap_single(rx_ring->dev, - rx_buffer_info->dma, - IGB_RX_HDR_LEN, + rx_buffer_info->dma, + bufsz, DMA_FROM_DEVICE); rx_buffer_info->dma = 0; @@ -1632,7 +1488,7 @@ static int igb_run_loopback_test(struct igb_adapter *adapter) struct igb_ring *rx_ring = &adapter->test_rx_ring; u16 i, j, lc, good_cnt; int ret_val = 0; - unsigned int size = IGB_RX_HDR_LEN; + unsigned int size = IGB_RXBUFFER_512; netdev_tx_t tx_ret_val; struct sk_buff *skb; @@ -1693,8 +1549,8 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) { /* PHY loopback cannot be performed if SoL/IDER * sessions are active */ - if (igb_check_reset_block(&adapter->hw)) { - dev_err(&adapter->pdev->dev, + if (e1000_check_reset_block(&adapter->hw)) { + dev_err(pci_dev_to_dev(adapter->pdev), "Cannot do PHY loopback test " "when SoL/IDER is active.\n"); *data = 0; @@ -1707,6 +1563,7 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) if (*data) goto err_loopback; *data = igb_run_loopback_test(adapter); + igb_loopback_cleanup(adapter); err_loopback: @@ -1717,31 +1574,40 @@ out: static int igb_link_test(struct igb_adapter *adapter, u64 *data) { - struct e1000_hw *hw = &adapter->hw; + u32 link; + int i, time; + *data = 0; - if (hw->phy.media_type == e1000_media_type_internal_serdes) { + time = 0; + if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { int i = 0; - hw->mac.serdes_has_link = false; + adapter->hw.mac.serdes_has_link = FALSE; /* On some blade server designs, link establishment * could take as long as 2-3 minutes */ do { - hw->mac.ops.check_for_link(&adapter->hw); - if (hw->mac.serdes_has_link) - return *data; + e1000_check_for_link(&adapter->hw); + if (adapter->hw.mac.serdes_has_link) + goto out; msleep(20); } while (i++ < 3750); *data = 1; } else { - hw->mac.ops.check_for_link(&adapter->hw); - if (hw->mac.autoneg) - msleep(4000); - - if (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) + for (i=0; i < IGB_MAX_LINK_TRIES; i++) { + link = igb_has_link(adapter); + if (link) + goto out; + else { + time++; + msleep(1000); + } + } + if (!link) *data = 1; } - return *data; + out: + return *data; } static void igb_diag_test(struct net_device *netdev, @@ -1761,11 +1627,11 @@ static void igb_diag_test(struct net_device *netdev, forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; autoneg = adapter->hw.mac.autoneg; - dev_info(&adapter->pdev->dev, "offline testing starting\n"); + dev_info(pci_dev_to_dev(adapter->pdev), "offline testing starting\n"); /* power up link for link test */ - igb_power_up_link(adapter); - + igb_power_up_link(adapter); + /* Link test performed before hardware reset so autoneg doesn't * interfere with test result */ if (igb_link_test(adapter, &data[4])) @@ -1790,7 +1656,8 @@ static void igb_diag_test(struct net_device *netdev, igb_reset(adapter); /* power up link for loopback test */ - igb_power_up_link(adapter); + igb_power_up_link(adapter); + if (igb_loopback_test(adapter, &data[3])) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -1800,15 +1667,15 @@ static void igb_diag_test(struct net_device *netdev, adapter->hw.mac.autoneg = autoneg; /* force this routine to wait until autoneg complete/timeout */ - adapter->hw.phy.autoneg_wait_to_complete = true; + adapter->hw.phy.autoneg_wait_to_complete = TRUE; igb_reset(adapter); - adapter->hw.phy.autoneg_wait_to_complete = false; + adapter->hw.phy.autoneg_wait_to_complete = FALSE; clear_bit(__IGB_TESTING, &adapter->state); if (if_running) dev_open(netdev); } else { - dev_info(&adapter->pdev->dev, "online testing starting\n"); + dev_info(pci_dev_to_dev(adapter->pdev), "online testing starting\n"); /* PHY is powered down when interface is down */ if (if_running && igb_link_test(adapter, &data[4])) @@ -1842,7 +1709,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter, case E1000_DEV_ID_82576_FIBER: case E1000_DEV_ID_82576_SERDES: /* Wake events not supported on port B */ - if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) { + if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1) { wol->supported = 0; break; } @@ -1863,7 +1730,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter, /* dual port cards only support WoL on port A from now on * unless it was enabled in the eeprom for port B * so exclude FUNC_1 ports from having WoL enabled */ - if ((rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) && + if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_MASK) && !adapter->eeprom_wol) { wol->supported = 0; break; @@ -1918,7 +1785,6 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) if (igb_wol_exclusion(adapter, wol) || !device_can_wakeup(&adapter->pdev->dev)) return wol->wolopts ? -EOPNOTSUPP : 0; - /* these settings will always override what we currently have */ adapter->wol = 0; @@ -1938,33 +1804,56 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) } /* bit defines for adapter->led_status */ -#define IGB_LED_ON 0 - +#ifdef HAVE_ETHTOOL_SET_PHYS_ID static int igb_set_phys_id(struct net_device *netdev, - enum ethtool_phys_id_state state) + enum ethtool_phys_id_state state) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + e1000_blink_led(hw); + return 2; + case ETHTOOL_ID_ON: + e1000_led_on(hw); + break; + case ETHTOOL_ID_OFF: + e1000_led_off(hw); + break; + case ETHTOOL_ID_INACTIVE: + e1000_led_off(hw); + e1000_cleanup_led(hw); + break; + } + + return 0; +} +#else +static int igb_phys_id(struct net_device *netdev, u32 data) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + unsigned long timeout; - switch (state) { - case ETHTOOL_ID_ACTIVE: - igb_blink_led(hw); - return 2; - case ETHTOOL_ID_ON: - igb_blink_led(hw); - break; - case ETHTOOL_ID_OFF: - igb_led_off(hw); - break; - case ETHTOOL_ID_INACTIVE: - igb_led_off(hw); - clear_bit(IGB_LED_ON, &adapter->led_status); - igb_cleanup_led(hw); - break; - } + timeout = data * 1000; + + /* + * msleep_interruptable only accepts unsigned int so we are limited + * in how long a duration we can wait + */ + if (!timeout || timeout > UINT_MAX) + timeout = UINT_MAX; + + e1000_blink_led(hw); + msleep_interruptible(timeout); + + e1000_led_off(hw); + e1000_cleanup_led(hw); return 0; } +#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ static int igb_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) @@ -1976,7 +1865,10 @@ static int igb_set_coalesce(struct net_device *netdev, ((ec->rx_coalesce_usecs > 3) && (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) || (ec->rx_coalesce_usecs == 2)) + { + printk("set_coalesce:invalid parameter.."); return -EINVAL; + } if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) || ((ec->tx_coalesce_usecs > 3) && @@ -1987,12 +1879,14 @@ static int igb_set_coalesce(struct net_device *netdev, if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) return -EINVAL; + if (ec->tx_max_coalesced_frames_irq) + adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; + /* If ITR is disabled, disable DMAC */ if (ec->rx_coalesce_usecs == 0) { - if (adapter->flags & IGB_FLAG_DMAC) - adapter->flags &= ~IGB_FLAG_DMAC; + adapter->dmac = IGB_DMAC_DISABLE; } - + /* convert to rate of irq's per second */ if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) adapter->rx_itr_setting = ec->rx_coalesce_usecs; @@ -2032,6 +1926,8 @@ static int igb_get_coalesce(struct net_device *netdev, else ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit; + if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) { if (adapter->tx_itr_setting <= 3) ec->tx_coalesce_usecs = adapter->tx_itr_setting; @@ -2050,6 +1946,7 @@ static int igb_nway_reset(struct net_device *netdev) return 0; } +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT static int igb_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { @@ -2061,19 +1958,32 @@ static int igb_get_sset_count(struct net_device *netdev, int sset) return -ENOTSUPP; } } +#else +static int igb_get_stats_count(struct net_device *netdev) +{ + return IGB_STATS_LEN; +} + +static int igb_diag_test_count(struct net_device *netdev) +{ + return IGB_TEST_LEN; +} +#endif static void igb_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct igb_adapter *adapter = netdev_priv(netdev); - struct rtnl_link_stats64 *net_stats = &adapter->stats64; - unsigned int start; - struct igb_ring *ring; - int i, j; +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats *net_stats = &netdev->stats; +#else + struct net_device_stats *net_stats = &adapter->net_stats; +#endif + u64 *queue_stat; + int i, j, k; char *p; - spin_lock(&adapter->stats64_lock); - igb_update_stats(adapter, net_stats); + igb_update_stats(adapter); for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { p = (char *)adapter + igb_gstrings_stats[i].stat_offset; @@ -2086,36 +1996,15 @@ static void igb_get_ethtool_stats(struct net_device *netdev, sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } for (j = 0; j < adapter->num_tx_queues; j++) { - u64 restart2; - - ring = adapter->tx_ring[j]; - do { - start = u64_stats_fetch_begin_bh(&ring->tx_syncp); - data[i] = ring->tx_stats.packets; - data[i+1] = ring->tx_stats.bytes; - data[i+2] = ring->tx_stats.restart_queue; - } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start)); - do { - start = u64_stats_fetch_begin_bh(&ring->tx_syncp2); - restart2 = ring->tx_stats.restart_queue2; - } while (u64_stats_fetch_retry_bh(&ring->tx_syncp2, start)); - data[i+2] += restart2; - - i += IGB_TX_QUEUE_STATS_LEN; + queue_stat = (u64 *)&adapter->tx_ring[j]->tx_stats; + for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++) + data[i] = queue_stat[k]; } for (j = 0; j < adapter->num_rx_queues; j++) { - ring = adapter->rx_ring[j]; - do { - start = u64_stats_fetch_begin_bh(&ring->rx_syncp); - data[i] = ring->rx_stats.packets; - data[i+1] = ring->rx_stats.bytes; - data[i+2] = ring->rx_stats.drops; - data[i+3] = ring->rx_stats.csum_err; - data[i+4] = ring->rx_stats.alloc_failed; - } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start)); - i += IGB_RX_QUEUE_STATS_LEN; + queue_stat = (u64 *)&adapter->rx_ring[j]->rx_stats; + for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++) + data[i] = queue_stat[k]; } - spin_unlock(&adapter->stats64_lock); } static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) @@ -2165,20 +2054,221 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) } } +#ifdef CONFIG_PM_RUNTIME static int igb_ethtool_begin(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); + pm_runtime_get_sync(&adapter->pdev->dev); + return 0; } static void igb_ethtool_complete(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); + pm_runtime_put(&adapter->pdev->dev); } +#endif /* CONFIG_PM_RUNTIME */ + +#ifndef HAVE_NDO_SET_FEATURES +static u32 igb_get_rx_csum(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + return test_bit(IGB_RING_FLAG_RX_CSUM, &adapter->rx_ring[0]->flags); +} + +static int igb_set_rx_csum(struct net_device *netdev, u32 data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int i; + + for (i = 0; i < adapter->rss_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + if (data) + set_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags); + else + clear_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags); + } + + return 0; +} + +static u32 igb_get_tx_csum(struct net_device *netdev) +{ + return (netdev->features & NETIF_F_IP_CSUM) != 0; +} + +static int igb_set_tx_csum(struct net_device *netdev, u32 data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if (data) { +#ifdef NETIF_F_IPV6_CSUM + netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + if (adapter->hw.mac.type >= e1000_82576) + netdev->features |= NETIF_F_SCTP_CSUM; + } else { + netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_SCTP_CSUM); +#else + netdev->features |= NETIF_F_IP_CSUM; + if (adapter->hw.mac.type == e1000_82576) + netdev->features |= NETIF_F_SCTP_CSUM; + } else { + netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SCTP_CSUM); +#endif + } + + return 0; +} + +#ifdef NETIF_F_TSO +static int igb_set_tso(struct net_device *netdev, u32 data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); +#ifndef HAVE_NETDEV_VLAN_FEATURES + int i; + struct net_device *v_netdev; +#endif + + if (data) { + netdev->features |= NETIF_F_TSO; +#ifdef NETIF_F_TSO6 + netdev->features |= NETIF_F_TSO6; +#endif + } else { + netdev->features &= ~NETIF_F_TSO; +#ifdef NETIF_F_TSO6 + netdev->features &= ~NETIF_F_TSO6; +#endif +#ifndef HAVE_NETDEV_VLAN_FEATURES + /* disable TSO on all VLANs if they're present */ + if (!adapter->vlgrp) + goto tso_out; + for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { + v_netdev = vlan_group_get_device(adapter->vlgrp, i); + if (!v_netdev) + continue; + + v_netdev->features &= ~NETIF_F_TSO; +#ifdef NETIF_F_TSO6 + v_netdev->features &= ~NETIF_F_TSO6; +#endif + vlan_group_set_device(adapter->vlgrp, i, v_netdev); + } +#endif /* HAVE_NETDEV_VLAN_FEATURES */ + } + +#ifndef HAVE_NETDEV_VLAN_FEATURES +tso_out: +#endif /* HAVE_NETDEV_VLAN_FEATURES */ + dev_info(pci_dev_to_dev(adapter->pdev), "TSO is %s\n", + data ? "Enabled" : "Disabled"); + return 0; +} + +#endif /* NETIF_F_TSO */ +#ifdef ETHTOOL_GFLAGS +static int igb_set_flags(struct net_device *netdev, u32 data) +{ + u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | + ETH_FLAG_RXHASH; +#ifndef HAVE_VLAN_RX_REGISTER + u32 changed = netdev->features ^ data; +#endif + int rc; +#ifndef IGB_NO_LRO + + supported_flags |= ETH_FLAG_LRO; +#endif + /* + * Since there is no support for separate tx vlan accel + * enabled make sure tx flag is cleared if rx is. + */ + if (!(data & ETH_FLAG_RXVLAN)) + data &= ~ETH_FLAG_TXVLAN; + + rc = ethtool_op_set_flags(netdev, data, supported_flags); + if (rc) + return rc; +#ifndef HAVE_VLAN_RX_REGISTER + + if (changed & ETH_FLAG_RXVLAN) + igb_vlan_mode(netdev, data); +#endif -static const struct ethtool_ops igb_ethtool_ops = { + return 0; +} + +#endif /* ETHTOOL_GFLAGS */ +#endif /* HAVE_NDO_SET_FEATURES */ +#ifdef ETHTOOL_SADV_COAL +static int igb_set_adv_coal(struct net_device *netdev, struct ethtool_value *edata) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + switch (edata->data) { + case IGB_DMAC_DISABLE: + adapter->dmac = edata->data; + break; + case IGB_DMAC_MIN: + adapter->dmac = edata->data; + break; + case IGB_DMAC_500: + adapter->dmac = edata->data; + break; + case IGB_DMAC_EN_DEFAULT: + adapter->dmac = edata->data; + break; + case IGB_DMAC_2000: + adapter->dmac = edata->data; + break; + case IGB_DMAC_3000: + adapter->dmac = edata->data; + break; + case IGB_DMAC_4000: + adapter->dmac = edata->data; + break; + case IGB_DMAC_5000: + adapter->dmac = edata->data; + break; + case IGB_DMAC_6000: + adapter->dmac = edata->data; + break; + case IGB_DMAC_7000: + adapter->dmac = edata->data; + break; + case IGB_DMAC_8000: + adapter->dmac = edata->data; + break; + case IGB_DMAC_9000: + adapter->dmac = edata->data; + break; + case IGB_DMAC_MAX: + adapter->dmac = edata->data; + break; + default: + adapter->dmac = IGB_DMAC_DISABLE; + printk("set_dmac: invalid setting, setting DMAC to %d\n", + adapter->dmac); + } + printk("%s: setting DMAC to %d\n", netdev->name, adapter->dmac); + return 0; +} +#endif /* ETHTOOL_SADV_COAL */ +#ifdef ETHTOOL_GADV_COAL +static void igb_get_dmac(struct net_device *netdev, + struct ethtool_value *edata) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + edata->data = adapter->dmac; + + return; +} +#endif +static struct ethtool_ops igb_ethtool_ops = { .get_settings = igb_get_settings, .set_settings = igb_set_settings, .get_drvinfo = igb_get_drvinfo, @@ -2199,16 +2289,52 @@ static const struct ethtool_ops igb_ethtool_ops = { .set_pauseparam = igb_set_pauseparam, .self_test = igb_diag_test, .get_strings = igb_get_strings, +#ifdef HAVE_ETHTOOL_SET_PHYS_ID .set_phys_id = igb_set_phys_id, +#else + .phys_id = igb_phys_id, +#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT .get_sset_count = igb_get_sset_count, +#else + .get_stats_count = igb_get_stats_count, + .self_test_count = igb_diag_test_count, +#endif .get_ethtool_stats = igb_get_ethtool_stats, +#ifdef HAVE_ETHTOOL_GET_PERM_ADDR + .get_perm_addr = ethtool_op_get_perm_addr, +#endif .get_coalesce = igb_get_coalesce, .set_coalesce = igb_set_coalesce, +#ifdef CONFIG_PM_RUNTIME .begin = igb_ethtool_begin, .complete = igb_ethtool_complete, +#endif /* CONFIG_PM_RUNTIME */ +#ifndef HAVE_NDO_SET_FEATURES + .get_rx_csum = igb_get_rx_csum, + .set_rx_csum = igb_set_rx_csum, + .get_tx_csum = igb_get_tx_csum, + .set_tx_csum = igb_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, +#ifdef NETIF_F_TSO + .get_tso = ethtool_op_get_tso, + .set_tso = igb_set_tso, +#endif +#ifdef ETHTOOL_GFLAGS + .get_flags = ethtool_op_get_flags, + .set_flags = igb_set_flags, +#endif /* ETHTOOL_GFLAGS */ +#endif /* HAVE_NDO_SET_FEATURES */ +#ifdef ETHTOOL_GADV_COAL + .get_advcoal = igb_get_adv_coal, + .set_advcoal = igb_set_dmac_coal +#endif /* ETHTOOL_GADV_COAL */ }; void igb_set_ethtool_ops(struct net_device *netdev) { SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops); } + +#endif /* SIOCETHTOOL */ diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 4f23ab079651..2153a529c7bc 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -25,81 +25,75 @@ *******************************************************************************/ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include #include #include -#include #include #include #include -#include -#include +#include +#ifdef NETIF_F_TSO #include +#ifdef NETIF_F_TSO6 +#include #include -#include +#endif +#endif +#ifdef SIOCGMIIPHY #include +#endif +#ifdef SIOCETHTOOL #include -#include +#endif #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#ifdef CONFIG_PM_RUNTIME #include -#ifdef CONFIG_IGB_DCA -#include -#endif +#endif /* CONFIG_PM_RUNTIME */ + #include "igb.h" +#include "igb_vmdq.h" + +#define DRV_DEBUG +#define DRV_HW_PERF +#define VERSION_SUFFIX #define MAJ 3 -#define MIN 2 -#define BUILD 10 -#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ -__stringify(BUILD) "-k" +#define MIN 4 +#define BUILD 8 +#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." __stringify(BUILD) VERSION_SUFFIX DRV_DEBUG DRV_HW_PERF + char igb_driver_name[] = "igb"; char igb_driver_version[] = DRV_VERSION; static const char igb_driver_string[] = - "Intel(R) Gigabit Ethernet Network Driver"; -static const char igb_copyright[] = "Copyright (c) 2007-2012 Intel Corporation."; - -static const struct e1000_info *igb_info_tbl[] = { - [board_82575] = &e1000_82575_info, -}; + "Intel(R) Gigabit Ethernet Network Driver"; +static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation."; static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { - { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER) }, /* required last entry */ {0, } }; @@ -112,9 +106,12 @@ static int igb_setup_all_rx_resources(struct igb_adapter *); static void igb_free_all_tx_resources(struct igb_adapter *); static void igb_free_all_rx_resources(struct igb_adapter *); static void igb_setup_mrqc(struct igb_adapter *); +void igb_update_stats(struct igb_adapter *); static int igb_probe(struct pci_dev *, const struct pci_device_id *); static void __devexit igb_remove(struct pci_dev *pdev); +#ifdef HAVE_HW_TIME_STAMP static void igb_init_hw_timer(struct igb_adapter *adapter); +#endif static int igb_sw_init(struct igb_adapter *); static int igb_open(struct net_device *); static int igb_close(struct net_device *); @@ -123,73 +120,93 @@ static void igb_configure_rx(struct igb_adapter *); static void igb_clean_all_tx_rings(struct igb_adapter *); static void igb_clean_all_rx_rings(struct igb_adapter *); static void igb_clean_tx_ring(struct igb_ring *); -static void igb_clean_rx_ring(struct igb_ring *); static void igb_set_rx_mode(struct net_device *); static void igb_update_phy_info(unsigned long); static void igb_watchdog(unsigned long); static void igb_watchdog_task(struct work_struct *); +static void igb_dma_err_task(struct work_struct *); +static void igb_dma_err_timer(unsigned long data); static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *); -static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats); +static struct net_device_stats *igb_get_stats(struct net_device *); static int igb_change_mtu(struct net_device *, int); +void igb_full_sync_mac_table(struct igb_adapter *adapter); static int igb_set_mac(struct net_device *, void *); static void igb_set_uta(struct igb_adapter *adapter); static irqreturn_t igb_intr(int irq, void *); static irqreturn_t igb_intr_msi(int irq, void *); static irqreturn_t igb_msix_other(int irq, void *); static irqreturn_t igb_msix_ring(int irq, void *); -#ifdef CONFIG_IGB_DCA +#ifdef IGB_DCA static void igb_update_dca(struct igb_q_vector *); static void igb_setup_dca(struct igb_adapter *); -#endif /* CONFIG_IGB_DCA */ +#endif /* IGB_DCA */ static int igb_poll(struct napi_struct *, int); static bool igb_clean_tx_irq(struct igb_q_vector *); static bool igb_clean_rx_irq(struct igb_q_vector *, int); static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); static void igb_tx_timeout(struct net_device *); static void igb_reset_task(struct work_struct *); -static void igb_vlan_mode(struct net_device *netdev, u32 features); +#ifdef HAVE_VLAN_RX_REGISTER +static void igb_vlan_mode(struct net_device *, struct vlan_group *); +#endif +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +static int igb_vlan_rx_add_vid(struct net_device *, u16); +static int igb_vlan_rx_kill_vid(struct net_device *, u16); +#else static void igb_vlan_rx_add_vid(struct net_device *, u16); static void igb_vlan_rx_kill_vid(struct net_device *, u16); +#endif static void igb_restore_vlan(struct igb_adapter *); -static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8); +void igb_rar_set(struct igb_adapter *adapter, u32 index); static void igb_ping_all_vfs(struct igb_adapter *); static void igb_msg_task(struct igb_adapter *); static void igb_vmm_control(struct igb_adapter *); static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *); static void igb_restore_vf_multicasts(struct igb_adapter *adapter); -static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); +static void igb_process_mdd_event(struct igb_adapter *); +#ifdef IFLA_VF_MAX +static int igb_ndo_set_vf_mac( struct net_device *netdev, int vf, u8 *mac); static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); static void igb_check_vf_rate_limit(struct igb_adapter *); - -#ifdef CONFIG_PCI_IOV +#endif static int igb_vf_configure(struct igb_adapter *adapter, int vf); -static int igb_find_enabled_vfs(struct igb_adapter *adapter); static int igb_check_vf_assignment(struct igb_adapter *adapter); +#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED +static int igb_find_enabled_vfs(struct igb_adapter *adapter); #endif - #ifdef CONFIG_PM -#ifdef CONFIG_PM_SLEEP -static int igb_suspend(struct device *); -#endif -static int igb_resume(struct device *); +#ifdef HAVE_SYSTEM_SLEEP_PM_OPS +static int igb_suspend(struct device *dev); +static int igb_resume(struct device *dev); #ifdef CONFIG_PM_RUNTIME static int igb_runtime_suspend(struct device *dev); static int igb_runtime_resume(struct device *dev); static int igb_runtime_idle(struct device *dev); -#endif +#endif /* CONFIG_PM_RUNTIME */ static const struct dev_pm_ops igb_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume) +#ifdef CONFIG_PM_RUNTIME SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume, igb_runtime_idle) +#endif /* CONFIG_PM_RUNTIME */ }; -#endif +#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ +#endif /* CONFIG_PM */ +#ifndef USE_REBOOT_NOTIFIER static void igb_shutdown(struct pci_dev *); -#ifdef CONFIG_IGB_DCA +#else +static int igb_notify_reboot(struct notifier_block *, unsigned long, void *); +static struct notifier_block igb_notifier_reboot = { + .notifier_call = igb_notify_reboot, + .next = NULL, + .priority = 0 +}; +#endif +#ifdef IGB_DCA static int igb_notify_dca(struct notifier_block *, unsigned long, void *); static struct notifier_block dca_notifier = { .notifier_call = igb_notify_dca, @@ -201,13 +218,8 @@ static struct notifier_block dca_notifier = { /* for netdump / net console */ static void igb_netpoll(struct net_device *); #endif -#ifdef CONFIG_PCI_IOV -static unsigned int max_vfs = 0; -module_param(max_vfs, uint, 0); -MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate " - "per physical function"); -#endif /* CONFIG_PCI_IOV */ +#ifdef HAVE_PCI_ERS static pci_ers_result_t igb_io_error_detected(struct pci_dev *, pci_channel_state_t); static pci_ers_result_t igb_io_slot_reset(struct pci_dev *); @@ -218,7 +230,9 @@ static struct pci_error_handlers igb_err_handler = { .slot_reset = igb_io_slot_reset, .resume = igb_io_resume, }; +#endif +static void igb_init_fw(struct igb_adapter *adapter); static void igb_init_dmac(struct igb_adapter *adapter, u32 pba); static struct pci_driver igb_driver = { @@ -227,10 +241,16 @@ static struct pci_driver igb_driver = { .probe = igb_probe, .remove = __devexit_p(igb_remove), #ifdef CONFIG_PM +#ifdef HAVE_SYSTEM_SLEEP_PM_OPS .driver.pm = &igb_pm_ops, -#endif +#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ +#endif /* CONFIG_PM */ +#ifndef USE_REBOOT_NOTIFIER .shutdown = igb_shutdown, +#endif +#ifdef HAVE_PCI_ERS .err_handler = &igb_err_handler +#endif }; MODULE_AUTHOR("Intel Corporation, "); @@ -238,329 +258,34 @@ MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); -struct igb_reg_info { - u32 ofs; - char *name; -}; - -static const struct igb_reg_info igb_reg_info_tbl[] = { - - /* General Registers */ - {E1000_CTRL, "CTRL"}, - {E1000_STATUS, "STATUS"}, - {E1000_CTRL_EXT, "CTRL_EXT"}, - - /* Interrupt Registers */ - {E1000_ICR, "ICR"}, - - /* RX Registers */ - {E1000_RCTL, "RCTL"}, - {E1000_RDLEN(0), "RDLEN"}, - {E1000_RDH(0), "RDH"}, - {E1000_RDT(0), "RDT"}, - {E1000_RXDCTL(0), "RXDCTL"}, - {E1000_RDBAL(0), "RDBAL"}, - {E1000_RDBAH(0), "RDBAH"}, - - /* TX Registers */ - {E1000_TCTL, "TCTL"}, - {E1000_TDBAL(0), "TDBAL"}, - {E1000_TDBAH(0), "TDBAH"}, - {E1000_TDLEN(0), "TDLEN"}, - {E1000_TDH(0), "TDH"}, - {E1000_TDT(0), "TDT"}, - {E1000_TXDCTL(0), "TXDCTL"}, - {E1000_TDFH, "TDFH"}, - {E1000_TDFT, "TDFT"}, - {E1000_TDFHS, "TDFHS"}, - {E1000_TDFPC, "TDFPC"}, - - /* List Terminator */ - {} -}; - -/* - * igb_regdump - register printout routine - */ -static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo) -{ - int n = 0; - char rname[16]; - u32 regs[8]; - - switch (reginfo->ofs) { - case E1000_RDLEN(0): - for (n = 0; n < 4; n++) - regs[n] = rd32(E1000_RDLEN(n)); - break; - case E1000_RDH(0): - for (n = 0; n < 4; n++) - regs[n] = rd32(E1000_RDH(n)); - break; - case E1000_RDT(0): - for (n = 0; n < 4; n++) - regs[n] = rd32(E1000_RDT(n)); - break; - case E1000_RXDCTL(0): - for (n = 0; n < 4; n++) - regs[n] = rd32(E1000_RXDCTL(n)); - break; - case E1000_RDBAL(0): - for (n = 0; n < 4; n++) - regs[n] = rd32(E1000_RDBAL(n)); - break; - case E1000_RDBAH(0): - for (n = 0; n < 4; n++) - regs[n] = rd32(E1000_RDBAH(n)); - break; - case E1000_TDBAL(0): - for (n = 0; n < 4; n++) - regs[n] = rd32(E1000_RDBAL(n)); - break; - case E1000_TDBAH(0): - for (n = 0; n < 4; n++) - regs[n] = rd32(E1000_TDBAH(n)); - break; - case E1000_TDLEN(0): - for (n = 0; n < 4; n++) - regs[n] = rd32(E1000_TDLEN(n)); - break; - case E1000_TDH(0): - for (n = 0; n < 4; n++) - regs[n] = rd32(E1000_TDH(n)); - break; - case E1000_TDT(0): - for (n = 0; n < 4; n++) - regs[n] = rd32(E1000_TDT(n)); - break; - case E1000_TXDCTL(0): - for (n = 0; n < 4; n++) - regs[n] = rd32(E1000_TXDCTL(n)); - break; - default: - pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs)); - return; - } - - snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); - pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], - regs[2], regs[3]); -} - -/* - * igb_dump - Print registers, tx-rings and rx-rings - */ -static void igb_dump(struct igb_adapter *adapter) +static void igb_vfta_set(struct igb_adapter *adapter, u32 vid, bool add) { - struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; - struct igb_reg_info *reginfo; - struct igb_ring *tx_ring; - union e1000_adv_tx_desc *tx_desc; - struct my_u0 { u64 a; u64 b; } *u0; - struct igb_ring *rx_ring; - union e1000_adv_rx_desc *rx_desc; - u32 staterr; - u16 i, n; - - if (!netif_msg_hw(adapter)) - return; - - /* Print netdevice Info */ - if (netdev) { - dev_info(&adapter->pdev->dev, "Net device Info\n"); - pr_info("Device Name state trans_start " - "last_rx\n"); - pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, - netdev->state, netdev->trans_start, netdev->last_rx); - } - - /* Print Registers */ - dev_info(&adapter->pdev->dev, "Register Dump\n"); - pr_info(" Register Name Value\n"); - for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl; - reginfo->name; reginfo++) { - igb_regdump(hw, reginfo); - } - - /* Print TX Ring Summary */ - if (!netdev || !netif_running(netdev)) - goto exit; - - dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); - pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); - for (n = 0; n < adapter->num_tx_queues; n++) { - struct igb_tx_buffer *buffer_info; - tx_ring = adapter->tx_ring[n]; - buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; - pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", - n, tx_ring->next_to_use, tx_ring->next_to_clean, - (u64)buffer_info->dma, - buffer_info->length, - buffer_info->next_to_watch, - (u64)buffer_info->time_stamp); - } - - /* Print TX Rings */ - if (!netif_msg_tx_done(adapter)) - goto rx_ring_summary; - - dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); - - /* Transmit Descriptor Formats - * - * Advanced Transmit Descriptor - * +--------------------------------------------------------------+ - * 0 | Buffer Address [63:0] | - * +--------------------------------------------------------------+ - * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN | - * +--------------------------------------------------------------+ - * 63 46 45 40 39 38 36 35 32 31 24 15 0 - */ - - for (n = 0; n < adapter->num_tx_queues; n++) { - tx_ring = adapter->tx_ring[n]; - pr_info("------------------------------------\n"); - pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); - pr_info("------------------------------------\n"); - pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] " - "[bi->dma ] leng ntw timestamp " - "bi->skb\n"); - - for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { - const char *next_desc; - struct igb_tx_buffer *buffer_info; - tx_desc = IGB_TX_DESC(tx_ring, i); - buffer_info = &tx_ring->tx_buffer_info[i]; - u0 = (struct my_u0 *)tx_desc; - if (i == tx_ring->next_to_use && - i == tx_ring->next_to_clean) - next_desc = " NTC/U"; - else if (i == tx_ring->next_to_use) - next_desc = " NTU"; - else if (i == tx_ring->next_to_clean) - next_desc = " NTC"; - else - next_desc = ""; - - pr_info("T [0x%03X] %016llX %016llX %016llX" - " %04X %p %016llX %p%s\n", i, - le64_to_cpu(u0->a), - le64_to_cpu(u0->b), - (u64)buffer_info->dma, - buffer_info->length, - buffer_info->next_to_watch, - (u64)buffer_info->time_stamp, - buffer_info->skb, next_desc); - - if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) - print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, - 16, 1, phys_to_virt(buffer_info->dma), - buffer_info->length, true); - } - } - - /* Print RX Rings Summary */ -rx_ring_summary: - dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); - pr_info("Queue [NTU] [NTC]\n"); - for (n = 0; n < adapter->num_rx_queues; n++) { - rx_ring = adapter->rx_ring[n]; - pr_info(" %5d %5X %5X\n", - n, rx_ring->next_to_use, rx_ring->next_to_clean); - } - - /* Print RX Rings */ - if (!netif_msg_rx_status(adapter)) - goto exit; + struct e1000_host_mng_dhcp_cookie *mng_cookie = &hw->mng_cookie; + u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK; + u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK); + u32 vfta; - dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); - - /* Advanced Receive Descriptor (Read) Format - * 63 1 0 - * +-----------------------------------------------------+ - * 0 | Packet Buffer Address [63:1] |A0/NSE| - * +----------------------------------------------+------+ - * 8 | Header Buffer Address [63:1] | DD | - * +-----------------------------------------------------+ - * - * - * Advanced Receive Descriptor (Write-Back) Format - * - * 63 48 47 32 31 30 21 20 17 16 4 3 0 - * +------------------------------------------------------+ - * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | - * | Checksum Ident | | | | Type | Type | - * +------------------------------------------------------+ - * 8 | VLAN Tag | Length | Extended Error | Extended Status | - * +------------------------------------------------------+ - * 63 48 47 32 31 20 19 0 + /* + * if this is the management vlan the only option is to add it in so + * that the management pass through will continue to work */ + if ((mng_cookie->status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && + (vid == mng_cookie->vlan_id)) + add = TRUE; + + vfta = adapter->shadow_vfta[index]; + + if (add) + vfta |= mask; + else + vfta &= ~mask; - for (n = 0; n < adapter->num_rx_queues; n++) { - rx_ring = adapter->rx_ring[n]; - pr_info("------------------------------------\n"); - pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); - pr_info("------------------------------------\n"); - pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] " - "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); - pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----" - "----------- [bi->skb] <-- Adv Rx Write-Back format\n"); - - for (i = 0; i < rx_ring->count; i++) { - const char *next_desc; - struct igb_rx_buffer *buffer_info; - buffer_info = &rx_ring->rx_buffer_info[i]; - rx_desc = IGB_RX_DESC(rx_ring, i); - u0 = (struct my_u0 *)rx_desc; - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - - if (i == rx_ring->next_to_use) - next_desc = " NTU"; - else if (i == rx_ring->next_to_clean) - next_desc = " NTC"; - else - next_desc = ""; - - if (staterr & E1000_RXD_STAT_DD) { - /* Descriptor Done */ - pr_info("%s[0x%03X] %016llX %016llX -------" - "--------- %p%s\n", "RWB", i, - le64_to_cpu(u0->a), - le64_to_cpu(u0->b), - buffer_info->skb, next_desc); - } else { - pr_info("%s[0x%03X] %016llX %016llX %016llX" - " %p%s\n", "R ", i, - le64_to_cpu(u0->a), - le64_to_cpu(u0->b), - (u64)buffer_info->dma, - buffer_info->skb, next_desc); - - if (netif_msg_pktdata(adapter)) { - print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, - 16, 1, - phys_to_virt(buffer_info->dma), - IGB_RX_HDR_LEN, true); - print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, - 16, 1, - phys_to_virt( - buffer_info->page_dma + - buffer_info->page_offset), - PAGE_SIZE/2, true); - } - } - } - } - -exit: - return; + e1000_write_vfta(hw, index, vfta); + adapter->shadow_vfta[index] = vfta; } - +#ifdef HAVE_HW_TIME_STAMP /** * igb_read_clock - read raw cycle counter (to be used by time counter) */ @@ -578,24 +303,19 @@ static cycle_t igb_read_clock(const struct cyclecounter *tc) * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it. */ if (hw->mac.type >= e1000_82580) { - stamp = rd32(E1000_SYSTIMR) >> 8; + stamp = E1000_READ_REG(hw, E1000_SYSTIMR) >> 8; shift = IGB_82580_TSYNC_SHIFT; } - stamp |= (u64)rd32(E1000_SYSTIML) << shift; - stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32); + stamp |= (u64)E1000_READ_REG(hw, E1000_SYSTIML) << shift; + stamp |= (u64)E1000_READ_REG(hw, E1000_SYSTIMH) << (shift + 32); return stamp; } -/** - * igb_get_hw_dev - return device - * used by hardware layer to print debugging information - **/ -struct net_device *igb_get_hw_dev(struct e1000_hw *hw) -{ - struct igb_adapter *adapter = hw->back; - return adapter->netdev; -} +#endif /* SIOCSHWTSTAMP */ +static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none, ..., 16=all)"); /** * igb_init_module - Driver Registration Routine @@ -606,15 +326,29 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw) static int __init igb_init_module(void) { int ret; - pr_info("%s - version %s\n", + + printk(KERN_INFO "%s - version %s\n", igb_driver_string, igb_driver_version); - pr_info("%s\n", igb_copyright); + printk(KERN_INFO "%s\n", igb_copyright); +#ifdef IGB_SYSFS +/* only use IGB_PROCFS if IGB_SYSFS is not defined */ +#else +#ifdef IGB_PROCFS + if (igb_procfs_topdir_init()) + printk(KERN_INFO "Procfs failed to initialize topdir\n"); +#endif /* IGB_PROCFS */ +#endif /* IGB_SYSFS */ -#ifdef CONFIG_IGB_DCA +#ifdef IGB_DCA dca_register_notify(&dca_notifier); #endif ret = pci_register_driver(&igb_driver); +#ifdef USE_REBOOT_NOTIFIER + if (ret >= 0) { + register_reboot_notifier(&igb_notifier_reboot); + } +#endif return ret; } @@ -628,10 +362,21 @@ module_init(igb_init_module); **/ static void __exit igb_exit_module(void) { -#ifdef CONFIG_IGB_DCA +#ifdef IGB_DCA dca_unregister_notify(&dca_notifier); +#endif +#ifdef USE_REBOOT_NOTIFIER + unregister_reboot_notifier(&igb_notifier_reboot); #endif pci_unregister_driver(&igb_driver); + +#ifdef IGB_SYSFS +/* only compile IGB_PROCFS if IGB_SYSFS is not defined */ +#else +#ifdef IGB_PROCFS + igb_procfs_topdir_exit(); +#endif /* IGB_PROCFS */ +#endif /* IGB_SYSFS */ } module_exit(igb_exit_module); @@ -656,7 +401,7 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) * In order to avoid collision we start at the first free queue * and continue consuming queues in the same sequence */ - if (adapter->vfs_allocated_count) { + if ((adapter->rss_queues > 1) && adapter->vmdq_pools) { for (; i < adapter->rss_queues; i++) adapter->rx_ring[i]->reg_idx = rbase_offset + Q_IDX_82576(i); @@ -700,24 +445,28 @@ static int igb_alloc_queues(struct igb_adapter *adapter) { struct igb_ring *ring; int i; +#ifdef HAVE_DEVICE_NUMA_NODE int orig_node = adapter->node; +#endif /* HAVE_DEVICE_NUMA_NODE */ for (i = 0; i < adapter->num_tx_queues; i++) { +#ifdef HAVE_DEVICE_NUMA_NODE if (orig_node == -1) { int cur_node = next_online_node(adapter->node); if (cur_node == MAX_NUMNODES) cur_node = first_online_node; adapter->node = cur_node; } +#endif /* HAVE_DEVICE_NUMA_NODE */ ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL, - adapter->node); + adapter->node); if (!ring) ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); if (!ring) goto err; ring->count = adapter->tx_ring_count; ring->queue_index = i; - ring->dev = &adapter->pdev->dev; + ring->dev = pci_dev_to_dev(adapter->pdev); ring->netdev = adapter->netdev; ring->numa_node = adapter->node; /* For 82575, context index must be unique per ring. */ @@ -725,27 +474,39 @@ static int igb_alloc_queues(struct igb_adapter *adapter) set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags); adapter->tx_ring[i] = ring; } +#ifdef HAVE_DEVICE_NUMA_NODE /* Restore the adapter's original node */ adapter->node = orig_node; +#endif /* HAVE_DEVICE_NUMA_NODE */ for (i = 0; i < adapter->num_rx_queues; i++) { +#ifdef HAVE_DEVICE_NUMA_NODE if (orig_node == -1) { int cur_node = next_online_node(adapter->node); if (cur_node == MAX_NUMNODES) cur_node = first_online_node; adapter->node = cur_node; } +#endif /* HAVE_DEVICE_NUMA_NODE */ ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL, - adapter->node); + adapter->node); if (!ring) ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); if (!ring) goto err; ring->count = adapter->rx_ring_count; ring->queue_index = i; - ring->dev = &adapter->pdev->dev; + ring->dev = pci_dev_to_dev(adapter->pdev); ring->netdev = adapter->netdev; ring->numa_node = adapter->node; +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; +#endif +#ifndef HAVE_NDO_SET_FEATURES + /* enable rx checksum */ + set_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags); + +#endif /* set flag indicating ring supports SCTP checksum offload */ if (adapter->hw.mac.type >= e1000_82576) set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); @@ -756,21 +517,61 @@ static int igb_alloc_queues(struct igb_adapter *adapter) adapter->rx_ring[i] = ring; } +#ifdef HAVE_DEVICE_NUMA_NODE /* Restore the adapter's original node */ adapter->node = orig_node; +#endif /* HAVE_DEVICE_NUMA_NODE */ igb_cache_ring_register(adapter); - return 0; + return E1000_SUCCESS; err: +#ifdef HAVE_DEVICE_NUMA_NODE /* Restore the adapter's original node */ adapter->node = orig_node; +#endif /* HAVE_DEVICE_NUMA_NODE */ igb_free_queues(adapter); return -ENOMEM; } +static void igb_configure_lli(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 port; + + /* LLI should only be enabled for MSI-X or MSI interrupts */ + if (!adapter->msix_entries && !(adapter->flags & IGB_FLAG_HAS_MSI)) + return; + + if (adapter->lli_port) { + /* use filter 0 for port */ + port = htons((u16)adapter->lli_port); + E1000_WRITE_REG(hw, E1000_IMIR(0), + (port | E1000_IMIR_PORT_IM_EN)); + E1000_WRITE_REG(hw, E1000_IMIREXT(0), + (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); + } + + if (adapter->flags & IGB_FLAG_LLI_PUSH) { + /* use filter 1 for push flag */ + E1000_WRITE_REG(hw, E1000_IMIR(1), + (E1000_IMIR_PORT_BP | E1000_IMIR_PORT_IM_EN)); + E1000_WRITE_REG(hw, E1000_IMIREXT(1), + (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_PSH)); + } + + if (adapter->lli_size) { + /* use filter 2 for size */ + E1000_WRITE_REG(hw, E1000_IMIR(2), + (E1000_IMIR_PORT_BP | E1000_IMIR_PORT_IM_EN)); + E1000_WRITE_REG(hw, E1000_IMIREXT(2), + (adapter->lli_size | E1000_IMIREXT_CTRL_BP)); + } + +} + /** * igb_write_ivar - configure ivar for given MSI-X vector * @hw: pointer to the HW structure @@ -786,7 +587,7 @@ err: static void igb_write_ivar(struct e1000_hw *hw, int msix_vector, int index, int offset) { - u32 ivar = array_rd32(E1000_IVAR0, index); + u32 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); /* clear any bits that are currently set */ ivar &= ~((u32)0xFF << offset); @@ -794,7 +595,7 @@ static void igb_write_ivar(struct e1000_hw *hw, int msix_vector, /* write vector and valid bit */ ivar |= (msix_vector | E1000_IVAR_VALID) << offset; - array_wr32(E1000_IVAR0, index, ivar); + E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); } #define IGB_N0_QUEUE -1 @@ -823,14 +624,14 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; if (!adapter->msix_entries && msix_vector == 0) msixbm |= E1000_EIMS_OTHER; - array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); + E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), msix_vector, msixbm); q_vector->eims_value = msixbm; break; case e1000_82576: /* * 82576 uses a table that essentially consists of 2 columns * with 8 rows. The ordering is column-major so we use the - * lower 3 bits as the row index, and the 4th bit as the + * lower 3 bits as the row index, and the 4th bit as the * column offset. */ if (rx_queue > IGB_N0_QUEUE) @@ -891,7 +692,7 @@ static void igb_configure_msix(struct igb_adapter *adapter) /* set vector for other causes, i.e. link changes */ switch (hw->mac.type) { case e1000_82575: - tmp = rd32(E1000_CTRL_EXT); + tmp = E1000_READ_REG(hw, E1000_CTRL_EXT); /* enable MSI-X PBA support*/ tmp |= E1000_CTRL_EXT_PBA_CLR; @@ -899,10 +700,10 @@ static void igb_configure_msix(struct igb_adapter *adapter) tmp |= E1000_CTRL_EXT_EIAME; tmp |= E1000_CTRL_EXT_IRCA; - wr32(E1000_CTRL_EXT, tmp); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp); /* enable msix_other interrupt */ - array_wr32(E1000_MSIXBM(0), vector++, + E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER); adapter->eims_other = E1000_EIMS_OTHER; @@ -913,7 +714,7 @@ static void igb_configure_msix(struct igb_adapter *adapter) case e1000_i350: /* Turn on MSI-X capability first, or our settings * won't stick. And it will take days to debug. */ - wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | + E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE | E1000_GPIE_PBA | E1000_GPIE_EIAME | E1000_GPIE_NSICR); @@ -921,7 +722,7 @@ static void igb_configure_msix(struct igb_adapter *adapter) adapter->eims_other = 1 << vector; tmp = (vector++ | E1000_IVAR_VALID) << 8; - wr32(E1000_IVAR_MISC, tmp); + E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmp); break; default: /* do nothing, since nothing else supports MSI-X */ @@ -933,7 +734,7 @@ static void igb_configure_msix(struct igb_adapter *adapter) for (i = 0; i < adapter->num_q_vectors; i++) igb_assign_vector(adapter->q_vector[i], vector++); - wrfl(); + E1000_WRITE_FLUSH(hw); } /** @@ -949,7 +750,7 @@ static int igb_request_msix(struct igb_adapter *adapter) int i, err = 0, vector = 0; err = request_irq(adapter->msix_entries[vector].vector, - igb_msix_other, 0, netdev->name, adapter); + &igb_msix_other, 0, netdev->name, adapter); if (err) goto out; vector++; @@ -961,13 +762,13 @@ static int igb_request_msix(struct igb_adapter *adapter) if (q_vector->rx.ring && q_vector->tx.ring) sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, - q_vector->rx.ring->queue_index); + q_vector->rx.ring->queue_index); else if (q_vector->tx.ring) sprintf(q_vector->name, "%s-tx-%u", netdev->name, - q_vector->tx.ring->queue_index); + q_vector->tx.ring->queue_index); else if (q_vector->rx.ring) sprintf(q_vector->name, "%s-rx-%u", netdev->name, - q_vector->rx.ring->queue_index); + q_vector->rx.ring->queue_index); else sprintf(q_vector->name, "%s-unused", netdev->name); @@ -1014,6 +815,13 @@ static void igb_free_q_vectors(struct igb_adapter *adapter) if (!q_vector) continue; netif_napi_del(&q_vector->napi); +#ifndef IGB_NO_LRO + if (q_vector->lrolist) { + __skb_queue_purge(&q_vector->lrolist->active); + vfree(q_vector->lrolist); + q_vector->lrolist = NULL; + } +#endif kfree(q_vector); } adapter->num_q_vectors = 0; @@ -1033,180 +841,388 @@ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) } /** - * igb_set_interrupt_capability - set MSI or MSI-X if supported + * igb_process_mdd_event + * @adapter - board private structure * - * Attempt to configure interrupts using the best available - * capabilities of the hardware and kernel. - **/ -static int igb_set_interrupt_capability(struct igb_adapter *adapter) + * Identify a malicious VF, disable the VF TX/RX queues and log a message. + */ +static void igb_process_mdd_event(struct igb_adapter *adapter) { - int err; - int numvecs, i; - - /* Number of supported queues. */ - adapter->num_rx_queues = adapter->rss_queues; - if (adapter->vfs_allocated_count) - adapter->num_tx_queues = 1; - else - adapter->num_tx_queues = adapter->rss_queues; - - /* start with one vector for every rx queue */ - numvecs = adapter->num_rx_queues; - - /* if tx handler is separate add 1 for every tx queue */ - if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) - numvecs += adapter->num_tx_queues; + struct e1000_hw *hw = &adapter->hw; + u32 lvmmc, vfte, vfre, mdfb; + u8 vf_queue; - /* store the number of vectors reserved for queues */ - adapter->num_q_vectors = numvecs; + lvmmc = E1000_READ_REG(hw, E1000_LVMMC); + vf_queue = lvmmc >> 29; - /* add 1 vector for link status interrupts */ - numvecs++; - adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), - GFP_KERNEL); - if (!adapter->msix_entries) - goto msi_only; + /* VF index cannot be bigger or equal to VFs allocated */ + if (vf_queue >= adapter->vfs_allocated_count) + return; - for (i = 0; i < numvecs; i++) - adapter->msix_entries[i].entry = i; + netdev_info(adapter->netdev, + "VF %d misbehaved. VF queues are disabled. " + "VM misbehavior code is 0x%x\n", vf_queue, lvmmc); - err = pci_enable_msix(adapter->pdev, - adapter->msix_entries, - numvecs); - if (err == 0) - goto out; + /* Disable VFTE and VFRE related bits */ + vfte = E1000_READ_REG(hw, E1000_VFTE); + vfte &= ~(1 << vf_queue); + E1000_WRITE_REG(hw, E1000_VFTE, vfte); - igb_reset_interrupt_capability(adapter); + vfre = E1000_READ_REG(hw, E1000_VFRE); + vfre &= ~(1 << vf_queue); + E1000_WRITE_REG(hw, E1000_VFRE, vfre); - /* If we can't do MSI-X, try MSI */ -msi_only: -#ifdef CONFIG_PCI_IOV - /* disable SR-IOV for non MSI-X configurations */ - if (adapter->vf_data) { - struct e1000_hw *hw = &adapter->hw; - /* disable iov and allow time for transactions to clear */ - pci_disable_sriov(adapter->pdev); - msleep(500); + /* Disable MDFB related bit */ + mdfb = E1000_READ_REG(hw, E1000_MDFB); + mdfb &= ~(1 << vf_queue); + E1000_WRITE_REG(hw, E1000_MDFB, mdfb); - kfree(adapter->vf_data); - adapter->vf_data = NULL; - wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); - wrfl(); - msleep(100); - dev_info(&adapter->pdev->dev, "IOV Disabled\n"); - } -#endif - adapter->vfs_allocated_count = 0; - adapter->rss_queues = 1; - adapter->flags |= IGB_FLAG_QUEUE_PAIRS; - adapter->num_rx_queues = 1; - adapter->num_tx_queues = 1; - adapter->num_q_vectors = 1; - if (!pci_enable_msi(adapter->pdev)) - adapter->flags |= IGB_FLAG_HAS_MSI; -out: - /* Notify the stack of the (possibly) reduced queue counts. */ - rtnl_lock(); - netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); - err = netif_set_real_num_rx_queues(adapter->netdev, - adapter->num_rx_queues); - rtnl_unlock(); - return err; + /* Reset the specific VF */ + E1000_WRITE_REG(hw, E1000_VTCTRL(vf_queue), E1000_VTCTRL_RST); } /** - * igb_alloc_q_vectors - Allocate memory for interrupt vectors - * @adapter: board private structure to initialize + * igb_disable_mdd + * @adapter - board private structure * - * We allocate one q_vector per queue interrupt. If allocation fails we - * return -ENOMEM. + * Disable MDD behavior in the HW **/ -static int igb_alloc_q_vectors(struct igb_adapter *adapter) +static void igb_disable_mdd(struct igb_adapter *adapter) { - struct igb_q_vector *q_vector; struct e1000_hw *hw = &adapter->hw; - int v_idx; - int orig_node = adapter->node; - - for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { - if ((adapter->num_q_vectors == (adapter->num_rx_queues + - adapter->num_tx_queues)) && - (adapter->num_rx_queues == v_idx)) - adapter->node = orig_node; - if (orig_node == -1) { - int cur_node = next_online_node(adapter->node); - if (cur_node == MAX_NUMNODES) - cur_node = first_online_node; - adapter->node = cur_node; - } - q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL, - adapter->node); - if (!q_vector) - q_vector = kzalloc(sizeof(struct igb_q_vector), - GFP_KERNEL); - if (!q_vector) - goto err_out; - q_vector->adapter = adapter; - q_vector->itr_register = hw->hw_addr + E1000_EITR(0); - q_vector->itr_val = IGB_START_ITR; - netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64); - adapter->q_vector[v_idx] = q_vector; - } - /* Restore the adapter's original node */ - adapter->node = orig_node; + u32 reg; - return 0; + if (hw->mac.type != e1000_i350) + return; -err_out: - /* Restore the adapter's original node */ - adapter->node = orig_node; - igb_free_q_vectors(adapter); - return -ENOMEM; + reg = E1000_READ_REG(hw, E1000_DTXCTL); + reg &= (~E1000_DTXCTL_MDP_EN); + E1000_WRITE_REG(hw, E1000_DTXCTL, reg); } -static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter, - int ring_idx, int v_idx) +/** + * igb_enable_mdd + * @adapter - board private structure + * + * Enable the HW to detect malicious driver and sends an interrupt to + * the driver. + * + * Only available on i350 device + **/ +static void igb_enable_mdd(struct igb_adapter *adapter) { - struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; - - q_vector->rx.ring = adapter->rx_ring[ring_idx]; - q_vector->rx.ring->q_vector = q_vector; - q_vector->rx.count++; - q_vector->itr_val = adapter->rx_itr_setting; - if (q_vector->itr_val && q_vector->itr_val <= 3) - q_vector->itr_val = IGB_START_ITR; -} + struct e1000_hw *hw = &adapter->hw; + u32 reg; -static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter, - int ring_idx, int v_idx) -{ - struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + if (hw->mac.type != e1000_i350) + return; - q_vector->tx.ring = adapter->tx_ring[ring_idx]; - q_vector->tx.ring->q_vector = q_vector; - q_vector->tx.count++; - q_vector->itr_val = adapter->tx_itr_setting; - q_vector->tx.work_limit = adapter->tx_work_limit; - if (q_vector->itr_val && q_vector->itr_val <= 3) - q_vector->itr_val = IGB_START_ITR; + reg = E1000_READ_REG(hw, E1000_DTXCTL); + reg |= E1000_DTXCTL_MDP_EN; + E1000_WRITE_REG(hw, E1000_DTXCTL, reg); } /** - * igb_map_ring_to_vector - maps allocated queues to vectors + * igb_reset_sriov_capability - disable SR-IOV if enabled * - * This function maps the recently allocated queues to vectors. + * Attempt to disable single root IO virtualization capabilites present in the + * kernel. **/ -static int igb_map_ring_to_vector(struct igb_adapter *adapter) +static void igb_reset_sriov_capability(struct igb_adapter *adapter) { - int i; - int v_idx = 0; - - if ((adapter->num_q_vectors < adapter->num_rx_queues) || - (adapter->num_q_vectors < adapter->num_tx_queues)) - return -ENOMEM; + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; - if (adapter->num_q_vectors >= - (adapter->num_rx_queues + adapter->num_tx_queues)) { + /* reclaim resources allocated to VFs */ + if (adapter->vf_data) { + if (!igb_check_vf_assignment(adapter)) { + /* + * disable iov and allow time for transactions to + * clear + */ + pci_disable_sriov(pdev); + msleep(500); + + dev_info(pci_dev_to_dev(pdev), "IOV Disabled\n"); + } else { + dev_info(pci_dev_to_dev(pdev), "IOV Not Disabled\n " + "VF(s) are assigned to guests!\n"); + } + /* Disable Malicious Driver Detection */ + igb_disable_mdd(adapter); + + /* free vf data storage */ + kfree(adapter->vf_data); + adapter->vf_data = NULL; + + /* switch rings back to PF ownership */ + E1000_WRITE_REG(hw, E1000_IOVCTL, + E1000_IOVCTL_REUSE_VFQ); + E1000_WRITE_FLUSH(hw); + msleep(100); + } + + adapter->vfs_allocated_count = 0; +} + +/** + * igb_set_sriov_capability - setup SR-IOV if supported + * + * Attempt to enable single root IO virtualization capabilites present in the + * kernel. + **/ +static void igb_set_sriov_capability(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int old_vfs = 0; + int i; + +#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED + old_vfs = igb_find_enabled_vfs(adapter); +#endif + if (old_vfs) { + dev_info(pci_dev_to_dev(pdev), + "%d pre-allocated VFs found - override " + "max_vfs setting of %d\n", old_vfs, + adapter->vfs_allocated_count); + adapter->vfs_allocated_count = old_vfs; + } + /* no VFs requested, do nothing */ + if (!adapter->vfs_allocated_count) + return; + + /* allocate vf data storage */ + adapter->vf_data = kcalloc(adapter->vfs_allocated_count, + sizeof(struct vf_data_storage), + GFP_KERNEL); + + if (adapter->vf_data) { + if (!old_vfs) { + if (pci_enable_sriov(pdev, + adapter->vfs_allocated_count)) + goto err_out; + } + for (i = 0; i < adapter->vfs_allocated_count; i++) + igb_vf_configure(adapter, i); + + /* DMA Coalescing is not supported in IOV mode. */ + if (adapter->hw.mac.type >= e1000_i350) + adapter->dmac = IGB_DMAC_DISABLE; + if (adapter->hw.mac.type < e1000_i350) + adapter->flags |= IGB_FLAG_DETECT_BAD_DMA; + return; + + } + +err_out: + kfree(adapter->vf_data); + adapter->vf_data = NULL; + adapter->vfs_allocated_count = 0; + dev_warn(pci_dev_to_dev(pdev), + "Failed to initialize SR-IOV virtualization\n"); +} + +/** + * igb_set_interrupt_capability - set MSI or MSI-X if supported + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static void igb_set_interrupt_capability(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int err; + int numvecs, i; + + /* Number of supported queues. */ + adapter->num_rx_queues = adapter->rss_queues; + + if (adapter->vmdq_pools > 1) + adapter->num_rx_queues += adapter->vmdq_pools - 1; + +#ifdef HAVE_TX_MQ + if (adapter->vmdq_pools) + adapter->num_tx_queues = adapter->vmdq_pools; + else + adapter->num_tx_queues = adapter->num_rx_queues; +#else + adapter->num_tx_queues = max_t(u32, 1, adapter->vmdq_pools); +#endif + + switch (adapter->int_mode) { + case IGB_INT_MODE_MSIX: + /* start with one vector for every rx queue */ + numvecs = adapter->num_rx_queues; + + /* if tx handler is seperate add 1 for every tx queue */ + if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) + numvecs += adapter->num_tx_queues; + + /* store the number of vectors reserved for queues */ + adapter->num_q_vectors = numvecs; + + /* add 1 vector for link status interrupts */ + numvecs++; + adapter->msix_entries = kcalloc(numvecs, + sizeof(struct msix_entry), + GFP_KERNEL); + if (adapter->msix_entries) { + for (i = 0; i < numvecs; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix(pdev, + adapter->msix_entries, numvecs); + if (err == 0) + break; + } + /* MSI-X failed, so fall through and try MSI */ + dev_warn(pci_dev_to_dev(pdev), "Failed to initialize MSI-X interrupts. " + "Falling back to MSI interrupts.\n"); + igb_reset_interrupt_capability(adapter); + case IGB_INT_MODE_MSI: + if (!pci_enable_msi(pdev)) + adapter->flags |= IGB_FLAG_HAS_MSI; + else + dev_warn(pci_dev_to_dev(pdev), "Failed to initialize MSI " + "interrupts. Falling back to legacy " + "interrupts.\n"); + /* Fall through */ + case IGB_INT_MODE_LEGACY: + /* disable advanced features and set number of queues to 1 */ + igb_reset_sriov_capability(adapter); + adapter->vmdq_pools = 0; + adapter->rss_queues = 1; + adapter->flags |= IGB_FLAG_QUEUE_PAIRS; + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_q_vectors = 1; + /* Don't do anything; this is system default */ + break; + } + +#ifdef HAVE_TX_MQ + /* Notify the stack of the (possibly) reduced Tx Queue count. */ +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + adapter->netdev->egress_subqueue_count = adapter->num_tx_queues; +#else + adapter->netdev->real_num_tx_queues = + (adapter->vmdq_pools ? 1 : adapter->num_tx_queues); +#endif /* CONFIG_NETDEVICES_MULTIQUEUE */ +#endif /* HAVE_TX_MQ */ +} + +/** + * igb_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int igb_alloc_q_vectors(struct igb_adapter *adapter) +{ + struct igb_q_vector *q_vector; + struct e1000_hw *hw = &adapter->hw; + int v_idx; +#ifdef HAVE_DEVICE_NUMA_NODE + int orig_node = adapter->node; +#endif /* HAVE_DEVICE_NUMA_NODE */ + + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { +#ifdef HAVE_DEVICE_NUMA_NODE + if ((adapter->num_q_vectors == (adapter->num_rx_queues + + adapter->num_tx_queues)) && + (adapter->num_rx_queues == v_idx)) + adapter->node = orig_node; + if (orig_node == -1) { + int cur_node = next_online_node(adapter->node); + if (cur_node == MAX_NUMNODES) + cur_node = first_online_node; + adapter->node = cur_node; + } +#endif /* HAVE_DEVICE_NUMA_NODE */ + q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL, + adapter->node); + if (!q_vector) + q_vector = kzalloc(sizeof(struct igb_q_vector), + GFP_KERNEL); + if (!q_vector) + goto err_out; + q_vector->adapter = adapter; + q_vector->itr_register = hw->hw_addr + E1000_EITR(0); + q_vector->itr_val = IGB_START_ITR; + netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64); + adapter->q_vector[v_idx] = q_vector; +#ifndef IGB_NO_LRO + if (v_idx < adapter->num_rx_queues) { + int size = sizeof(struct igb_lro_list); + q_vector->lrolist = vzalloc_node(size, q_vector->numa_node); + if (!q_vector->lrolist) + q_vector->lrolist = vzalloc(size); + if (!q_vector->lrolist) + goto err_out; + __skb_queue_head_init(&q_vector->lrolist->active); + } +#endif /* IGB_NO_LRO */ + } +#ifdef HAVE_DEVICE_NUMA_NODE + /* Restore the adapter's original node */ + adapter->node = orig_node; +#endif /* HAVE_DEVICE_NUMA_NODE */ + + return 0; + +err_out: +#ifdef HAVE_DEVICE_NUMA_NODE + /* Restore the adapter's original node */ + adapter->node = orig_node; +#endif /* HAVE_DEVICE_NUMA_NODE */ + igb_free_q_vectors(adapter); + return -ENOMEM; +} + +static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter, + int ring_idx, int v_idx) +{ + struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + + q_vector->rx.ring = adapter->rx_ring[ring_idx]; + q_vector->rx.ring->q_vector = q_vector; + q_vector->rx.count++; + q_vector->itr_val = adapter->rx_itr_setting; + if (q_vector->itr_val && q_vector->itr_val <= 3) + q_vector->itr_val = IGB_START_ITR; +} + +static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter, + int ring_idx, int v_idx) +{ + struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + + q_vector->tx.ring = adapter->tx_ring[ring_idx]; + q_vector->tx.ring->q_vector = q_vector; + q_vector->tx.count++; + q_vector->itr_val = adapter->tx_itr_setting; + q_vector->tx.work_limit = adapter->tx_work_limit; + if (q_vector->itr_val && q_vector->itr_val <= 3) + q_vector->itr_val = IGB_START_ITR; +} + +/** + * igb_map_ring_to_vector - maps allocated queues to vectors + * + * This function maps the recently allocated queues to vectors. + **/ +static int igb_map_ring_to_vector(struct igb_adapter *adapter) +{ + int i; + int v_idx = 0; + + if ((adapter->num_q_vectors < adapter->num_rx_queues) || + (adapter->num_q_vectors < adapter->num_tx_queues)) + return -ENOMEM; + + if (adapter->num_q_vectors >= + (adapter->num_rx_queues + adapter->num_tx_queues)) { for (i = 0; i < adapter->num_rx_queues; i++) igb_map_rx_ring_to_vector(adapter, i, v_idx++); for (i = 0; i < adapter->num_tx_queues; i++) @@ -1233,25 +1249,23 @@ static int igb_init_interrupt_scheme(struct igb_adapter *adapter) struct pci_dev *pdev = adapter->pdev; int err; - err = igb_set_interrupt_capability(adapter); - if (err) - return err; + igb_set_interrupt_capability(adapter); err = igb_alloc_q_vectors(adapter); if (err) { - dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); + dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for vectors\n"); goto err_alloc_q_vectors; } err = igb_alloc_queues(adapter); if (err) { - dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for queues\n"); goto err_alloc_queues; } err = igb_map_ring_to_vector(adapter); if (err) { - dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n"); + dev_err(pci_dev_to_dev(pdev), "Invalid q_vector to ring mapping\n"); goto err_map_queues; } @@ -1284,6 +1298,7 @@ static int igb_request_irq(struct igb_adapter *adapter) goto request_done; /* fall back to MSI */ igb_clear_interrupt_scheme(adapter); + igb_reset_sriov_capability(adapter); if (!pci_enable_msi(pdev)) adapter->flags |= IGB_FLAG_HAS_MSI; igb_free_all_tx_resources(adapter); @@ -1293,13 +1308,13 @@ static int igb_request_irq(struct igb_adapter *adapter) adapter->num_q_vectors = 1; err = igb_alloc_q_vectors(adapter); if (err) { - dev_err(&pdev->dev, + dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for vectors\n"); goto request_done; } err = igb_alloc_queues(adapter); if (err) { - dev_err(&pdev->dev, + dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for queues\n"); igb_free_q_vectors(adapter); goto request_done; @@ -1311,7 +1326,7 @@ static int igb_request_irq(struct igb_adapter *adapter) igb_assign_vector(adapter->q_vector[0], 0); if (adapter->flags & IGB_FLAG_HAS_MSI) { - err = request_irq(pdev->irq, igb_intr_msi, 0, + err = request_irq(pdev->irq, &igb_intr_msi, 0, netdev->name, adapter); if (!err) goto request_done; @@ -1321,11 +1336,11 @@ static int igb_request_irq(struct igb_adapter *adapter) adapter->flags &= ~IGB_FLAG_HAS_MSI; } - err = request_irq(pdev->irq, igb_intr, IRQF_SHARED, + err = request_irq(pdev->irq, &igb_intr, IRQF_SHARED, netdev->name, adapter); if (err) - dev_err(&pdev->dev, "Error %d getting interrupt\n", + dev_err(pci_dev_to_dev(pdev), "Error %d getting interrupt\n", err); request_done: @@ -1341,7 +1356,7 @@ static void igb_free_irq(struct igb_adapter *adapter) for (i = 0; i < adapter->num_q_vectors; i++) free_irq(adapter->msix_entries[vector++].vector, - adapter->q_vector[i]); + adapter->q_vector[i]); } else { free_irq(adapter->pdev->irq, adapter); } @@ -1361,20 +1376,24 @@ static void igb_irq_disable(struct igb_adapter *adapter) * issues on the VF drivers so we only need to clear what we set */ if (adapter->msix_entries) { - u32 regval = rd32(E1000_EIAM); - wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); - wr32(E1000_EIMC, adapter->eims_enable_mask); - regval = rd32(E1000_EIAC); - wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask); + u32 regval = E1000_READ_REG(hw, E1000_EIAM); + E1000_WRITE_REG(hw, E1000_EIAM, regval & ~adapter->eims_enable_mask); + E1000_WRITE_REG(hw, E1000_EIMC, adapter->eims_enable_mask); + regval = E1000_READ_REG(hw, E1000_EIAC); + E1000_WRITE_REG(hw, E1000_EIAC, regval & ~adapter->eims_enable_mask); } - wr32(E1000_IAM, 0); - wr32(E1000_IMC, ~0); - wrfl(); + E1000_WRITE_REG(hw, E1000_IAM, 0); + E1000_WRITE_REG(hw, E1000_IMC, ~0); + E1000_WRITE_FLUSH(hw); + if (adapter->msix_entries) { - int i; + int vector = 0, i; + + synchronize_irq(adapter->msix_entries[vector++].vector); + for (i = 0; i < adapter->num_q_vectors; i++) - synchronize_irq(adapter->msix_entries[i].vector); + synchronize_irq(adapter->msix_entries[vector++].vector); } else { synchronize_irq(adapter->pdev->irq); } @@ -1390,20 +1409,24 @@ static void igb_irq_enable(struct igb_adapter *adapter) if (adapter->msix_entries) { u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA; - u32 regval = rd32(E1000_EIAC); - wr32(E1000_EIAC, regval | adapter->eims_enable_mask); - regval = rd32(E1000_EIAM); - wr32(E1000_EIAM, regval | adapter->eims_enable_mask); - wr32(E1000_EIMS, adapter->eims_enable_mask); + u32 regval = E1000_READ_REG(hw, E1000_EIAC); + E1000_WRITE_REG(hw, E1000_EIAC, regval | adapter->eims_enable_mask); + regval = E1000_READ_REG(hw, E1000_EIAM); + E1000_WRITE_REG(hw, E1000_EIAM, regval | adapter->eims_enable_mask); + E1000_WRITE_REG(hw, E1000_EIMS, adapter->eims_enable_mask); if (adapter->vfs_allocated_count) { - wr32(E1000_MBVFIMR, 0xFF); + E1000_WRITE_REG(hw, E1000_MBVFIMR, 0xFF); ims |= E1000_IMS_VMMB; + /* For I350 device only enable MDD interrupts*/ + if ((adapter->mdd) && + (adapter->hw.mac.type == e1000_i350)) + ims |= E1000_IMS_MDDET; } - wr32(E1000_IMS, ims); + E1000_WRITE_REG(hw, E1000_IMS, ims); } else { - wr32(E1000_IMS, IMS_ENABLE_MASK | + E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK | E1000_IMS_DRSTA); - wr32(E1000_IAM, IMS_ENABLE_MASK | + E1000_WRITE_REG(hw, E1000_IAM, IMS_ENABLE_MASK | E1000_IMS_DRSTA); } } @@ -1416,7 +1439,7 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter) if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { /* add VID to filter table */ - igb_vfta_set(hw, vid, true); + igb_vfta_set(adapter, vid, TRUE); adapter->mng_vlan_id = vid; } else { adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; @@ -1424,9 +1447,13 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter) if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && (vid != old_vid) && +#ifdef HAVE_VLAN_RX_REGISTER + !vlan_group_get_device(adapter->vlgrp, old_vid)) { +#else !test_bit(old_vid, adapter->active_vlans)) { +#endif /* remove VID from filter table */ - igb_vfta_set(hw, old_vid, false); + igb_vfta_set(adapter, old_vid, FALSE); } } @@ -1445,8 +1472,8 @@ static void igb_release_hw_control(struct igb_adapter *adapter) u32 ctrl_ext; /* Let firmware take over control of h/w */ - ctrl_ext = rd32(E1000_CTRL_EXT); - wr32(E1000_CTRL_EXT, + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); } @@ -1465,8 +1492,8 @@ static void igb_get_hw_control(struct igb_adapter *adapter) u32 ctrl_ext; /* Let firmware know the driver has taken over */ - ctrl_ext = rd32(E1000_CTRL_EXT); - wr32(E1000_CTRL_EXT, + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); } @@ -1491,7 +1518,14 @@ static void igb_configure(struct igb_adapter *adapter) igb_configure_tx(adapter); igb_configure_rx(adapter); - igb_rx_fifo_flush_82575(&adapter->hw); + e1000_rx_fifo_flush_82575(&adapter->hw); +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + + if (adapter->num_tx_queues > 1) + netdev->features |= NETIF_F_MULTI_QUEUE; + else + netdev->features &= ~NETIF_F_MULTI_QUEUE; +#endif /* call igb_desc_unused which always leaves * at least 1 descriptor unused to make sure @@ -1509,10 +1543,11 @@ static void igb_configure(struct igb_adapter *adapter) void igb_power_up_link(struct igb_adapter *adapter) { if (adapter->hw.phy.media_type == e1000_media_type_copper) - igb_power_up_phy_copper(&adapter->hw); + e1000_power_up_phy(&adapter->hw); else - igb_power_up_serdes_link_82575(&adapter->hw); - igb_reset_phy(&adapter->hw); + e1000_power_up_fiber_serdes_link(&adapter->hw); + + e1000_phy_hw_reset(&adapter->hw); } /** @@ -1522,9 +1557,9 @@ void igb_power_up_link(struct igb_adapter *adapter) static void igb_power_down_link(struct igb_adapter *adapter) { if (adapter->hw.phy.media_type == e1000_media_type_copper) - igb_power_down_phy_copper_82575(&adapter->hw); + e1000_power_down_phy(&adapter->hw); else - igb_shutdown_serdes_link_82575(&adapter->hw); + e1000_shutdown_fiber_serdes_link(&adapter->hw); } /** @@ -1549,19 +1584,23 @@ int igb_up(struct igb_adapter *adapter) else igb_assign_vector(adapter->q_vector[0], 0); + igb_configure_lli(adapter); + /* Clear any pending interrupts. */ - rd32(E1000_ICR); + E1000_READ_REG(hw, E1000_ICR); igb_irq_enable(adapter); /* notify VFs that reset has been completed */ if (adapter->vfs_allocated_count) { - u32 reg_data = rd32(E1000_CTRL_EXT); + u32 reg_data = E1000_READ_REG(hw, E1000_CTRL_EXT); reg_data |= E1000_CTRL_EXT_PFRSTD; - wr32(E1000_CTRL_EXT, reg_data); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg_data); } netif_tx_start_all_queues(adapter->netdev); + if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) + schedule_work(&adapter->dma_err_task); /* start the watchdog. */ hw->mac.get_link_status = 1; schedule_work(&adapter->watchdog_task); @@ -1581,19 +1620,19 @@ void igb_down(struct igb_adapter *adapter) set_bit(__IGB_DOWN, &adapter->state); /* disable receives in the hardware */ - rctl = rd32(E1000_RCTL); - wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); + rctl = E1000_READ_REG(hw, E1000_RCTL); + E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); /* flush and sleep below */ netif_tx_stop_all_queues(netdev); /* disable transmits in the hardware */ - tctl = rd32(E1000_TCTL); + tctl = E1000_READ_REG(hw, E1000_TCTL); tctl &= ~E1000_TCTL_EN; - wr32(E1000_TCTL, tctl); + E1000_WRITE_REG(hw, E1000_TCTL, tctl); /* flush both disables and wait for them to finish */ - wrfl(); - msleep(10); + E1000_WRITE_FLUSH(hw); + usleep_range(10000, 20000); for (i = 0; i < adapter->num_q_vectors; i++) napi_disable(&(adapter->q_vector[i]->napi)); @@ -1601,23 +1640,27 @@ void igb_down(struct igb_adapter *adapter) igb_irq_disable(adapter); del_timer_sync(&adapter->watchdog_timer); + if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) + del_timer_sync(&adapter->dma_err_timer); del_timer_sync(&adapter->phy_info_timer); netif_carrier_off(netdev); /* record the stats before reset*/ - spin_lock(&adapter->stats64_lock); - igb_update_stats(adapter, &adapter->stats64); - spin_unlock(&adapter->stats64_lock); + igb_update_stats(adapter); adapter->link_speed = 0; adapter->link_duplex = 0; +#ifdef HAVE_PCI_ERS if (!pci_channel_offline(adapter->pdev)) igb_reset(adapter); +#else + igb_reset(adapter); +#endif igb_clean_all_tx_rings(adapter); igb_clean_all_rx_rings(adapter); -#ifdef CONFIG_IGB_DCA +#ifdef IGB_DCA /* since we reset the hardware DCA settings were cleared */ igb_setup_dca(adapter); @@ -1628,7 +1671,7 @@ void igb_reinit_locked(struct igb_adapter *adapter) { WARN_ON(in_interrupt()); while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); igb_down(adapter); igb_up(adapter); clear_bit(__IGB_RESETTING, &adapter->state); @@ -1649,11 +1692,11 @@ void igb_reset(struct igb_adapter *adapter) switch (mac->type) { case e1000_i350: case e1000_82580: - pba = rd32(E1000_RXPBS); - pba = igb_rxpbs_adjust_82580(pba); + pba = E1000_READ_REG(hw, E1000_RXPBS); + pba = e1000_rxpbs_adjust_82580(pba); break; case e1000_82576: - pba = rd32(E1000_RXPBS); + pba = E1000_READ_REG(hw, E1000_RXPBS); pba &= E1000_RXPBS_SIZE_MASK_82576; break; case e1000_82575: @@ -1665,7 +1708,7 @@ void igb_reset(struct igb_adapter *adapter) if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) && (mac->type < e1000_82576)) { /* adjust PBA for jumbo frames */ - wr32(E1000_PBA, pba); + E1000_WRITE_REG(hw, E1000_PBA, pba); /* To maintain wire speed transmits, the Tx FIFO should be * large enough to accommodate two full transmit packets, @@ -1673,7 +1716,7 @@ void igb_reset(struct igb_adapter *adapter) * the Rx FIFO should be large enough to accommodate at least * one full receive packet and is similarly rounded up and * expressed in KB. */ - pba = rd32(E1000_PBA); + pba = E1000_READ_REG(hw, E1000_PBA); /* upper 16 bits has Tx packet buffer allocation size in KB */ tx_space = pba >> 16; /* lower 16 bits has Rx packet buffer allocation size in KB */ @@ -1702,7 +1745,7 @@ void igb_reset(struct igb_adapter *adapter) if (pba < min_rx_space) pba = min_rx_space; } - wr32(E1000_PBA, pba); + E1000_WRITE_REG(hw, E1000_PBA, pba); } /* flow control settings */ @@ -1723,6 +1766,10 @@ void igb_reset(struct igb_adapter *adapter) /* disable receive for all VFs and wait one second */ if (adapter->vfs_allocated_count) { int i; + /* + * Clear all flags except indication that the PF has set + * the VF MAC addresses administratively + */ for (i = 0 ; i < adapter->vfs_allocated_count; i++) adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC; @@ -1730,44 +1777,59 @@ void igb_reset(struct igb_adapter *adapter) igb_ping_all_vfs(adapter); /* disable transmits and receives */ - wr32(E1000_VFRE, 0); - wr32(E1000_VFTE, 0); + E1000_WRITE_REG(hw, E1000_VFRE, 0); + E1000_WRITE_REG(hw, E1000_VFTE, 0); } /* Allow time for pending master requests to run */ - hw->mac.ops.reset_hw(hw); - wr32(E1000_WUC, 0); + e1000_reset_hw(hw); + E1000_WRITE_REG(hw, E1000_WUC, 0); - if (hw->mac.ops.init_hw(hw)) - dev_err(&pdev->dev, "Hardware Error\n"); + if (e1000_init_hw(hw)) + dev_err(pci_dev_to_dev(pdev), "Hardware Error\n"); igb_init_dmac(adapter, pba); + /* Re-initialize the thermal sensor on i350 devices. */ + if (mac->type == e1000_i350 && hw->bus.func == 0) { + /* + * If present, re-initialize the external thermal sensor + * interface. + */ + if (adapter->ets) + e1000_set_i2c_bb(hw); + e1000_init_thermal_sensor_thresh(hw); + } if (!netif_running(adapter->netdev)) igb_power_down_link(adapter); igb_update_mng_vlan(adapter); /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ - wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); + E1000_WRITE_REG(hw, E1000_VET, ETHERNET_IEEE_VLAN_TYPE); - igb_get_phy_info(hw); + e1000_get_phy_info(hw); } -static u32 igb_fix_features(struct net_device *netdev, u32 features) +#ifdef HAVE_NDO_SET_FEATURES +static netdev_features_t igb_fix_features(struct net_device *netdev, + netdev_features_t features) { /* - * Since there is no support for separate rx/tx vlan accel - * enable/disable make sure tx flag is always in same state as rx. + * Since there is no support for separate tx vlan accel + * enabled make sure tx flag is cleared if rx is. */ - if (features & NETIF_F_HW_VLAN_RX) - features |= NETIF_F_HW_VLAN_TX; - else + if (!(features & NETIF_F_HW_VLAN_RX)) features &= ~NETIF_F_HW_VLAN_TX; + /* If Rx checksum is disabled, then LRO should also be disabled */ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + return features; } -static int igb_set_features(struct net_device *netdev, u32 features) +static int igb_set_features(struct net_device *netdev, + netdev_features_t features) { u32 changed = netdev->features ^ features; @@ -1777,13 +1839,14 @@ static int igb_set_features(struct net_device *netdev, u32 features) return 0; } +#endif /* HAVE_NDO_SET_FEATURES */ +#ifdef HAVE_NET_DEVICE_OPS static const struct net_device_ops igb_netdev_ops = { .ndo_open = igb_open, .ndo_stop = igb_close, .ndo_start_xmit = igb_xmit_frame, - .ndo_get_stats64 = igb_get_stats64, + .ndo_get_stats = igb_get_stats, .ndo_set_rx_mode = igb_set_rx_mode, - .ndo_set_multicast_list = igb_set_rx_mode, .ndo_set_mac_address = igb_set_mac, .ndo_change_mtu = igb_change_mtu, .ndo_do_ioctl = igb_ioctl, @@ -1791,17 +1854,119 @@ static const struct net_device_ops igb_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid, +#ifdef IFLA_VF_MAX .ndo_set_vf_mac = igb_ndo_set_vf_mac, .ndo_set_vf_vlan = igb_ndo_set_vf_vlan, .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw, .ndo_get_vf_config = igb_ndo_get_vf_config, +#endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = igb_netpoll, #endif +#ifdef HAVE_NDO_SET_FEATURES .ndo_fix_features = igb_fix_features, .ndo_set_features = igb_set_features, +#endif +#ifdef HAVE_VLAN_RX_REGISTER + .ndo_vlan_rx_register = igb_vlan_mode, +#endif }; +#ifdef CONFIG_IGB_VMDQ_NETDEV +static const struct net_device_ops igb_vmdq_ops = { + .ndo_open = &igb_vmdq_open, + .ndo_stop = &igb_vmdq_close, + .ndo_start_xmit = &igb_vmdq_xmit_frame, + .ndo_get_stats = &igb_vmdq_get_stats, + .ndo_set_rx_mode = &igb_vmdq_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = &igb_vmdq_set_mac, + .ndo_change_mtu = &igb_vmdq_change_mtu, + .ndo_tx_timeout = &igb_vmdq_tx_timeout, + .ndo_vlan_rx_register = &igb_vmdq_vlan_rx_register, + .ndo_vlan_rx_add_vid = &igb_vmdq_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = &igb_vmdq_vlan_rx_kill_vid, +}; + +#endif /* CONFIG_IGB_VMDQ_NETDEV */ +#endif /* HAVE_NET_DEVICE_OPS */ +#ifdef CONFIG_IGB_VMDQ_NETDEV +void igb_assign_vmdq_netdev_ops(struct net_device *vnetdev) +{ +#ifdef HAVE_NET_DEVICE_OPS + vnetdev->netdev_ops = &igb_vmdq_ops; +#else + dev->open = &igb_vmdq_open; + dev->stop = &igb_vmdq_close; + dev->hard_start_xmit = &igb_vmdq_xmit_frame; + dev->get_stats = &igb_vmdq_get_stats; +#ifdef HAVE_SET_RX_MODE + dev->set_rx_mode = &igb_vmdq_set_rx_mode; +#endif + dev->set_multicast_list = &igb_vmdq_set_rx_mode; + dev->set_mac_address = &igb_vmdq_set_mac; + dev->change_mtu = &igb_vmdq_change_mtu; +#ifdef HAVE_TX_TIMEOUT + dev->tx_timeout = &igb_vmdq_tx_timeout; +#endif +#ifdef NETIF_F_HW_VLAN_TX + dev->vlan_rx_register = &igb_vmdq_vlan_rx_register; + dev->vlan_rx_add_vid = &igb_vmdq_vlan_rx_add_vid; + dev->vlan_rx_kill_vid = &igb_vmdq_vlan_rx_kill_vid; +#endif +#endif + igb_vmdq_set_ethtool_ops(vnetdev); + vnetdev->watchdog_timeo = 5 * HZ; + +} + +int igb_init_vmdq_netdevs(struct igb_adapter *adapter) +{ + int pool, err = 0, base_queue; + struct net_device *vnetdev; + struct igb_vmdq_adapter *vmdq_adapter; + + for (pool = 1; pool < adapter->vmdq_pools; pool++) { + int qpp = (!adapter->rss_queues ? 1 : adapter->rss_queues); + base_queue = pool * qpp; + vnetdev = alloc_etherdev(sizeof(struct igb_vmdq_adapter)); + if (!vnetdev) { + err = -ENOMEM; + break; + } + vmdq_adapter = netdev_priv(vnetdev); + vmdq_adapter->vnetdev = vnetdev; + vmdq_adapter->real_adapter = adapter; + vmdq_adapter->rx_ring = adapter->rx_ring[base_queue]; + vmdq_adapter->tx_ring = adapter->tx_ring[base_queue]; + igb_assign_vmdq_netdev_ops(vnetdev); + snprintf(vnetdev->name, IFNAMSIZ, "%sv%d", + adapter->netdev->name, pool); + vnetdev->features = adapter->netdev->features; +#ifdef HAVE_NETDEV_VLAN_FEATURES + vnetdev->vlan_features = adapter->netdev->vlan_features; +#endif + adapter->vmdq_netdev[pool-1] = vnetdev; + err = register_netdev(vnetdev); + if (err) + break; + } + return err; +} + +int igb_remove_vmdq_netdevs(struct igb_adapter *adapter) +{ + int pool, err = 0; + + for (pool = 1; pool < adapter->vmdq_pools; pool++) { + unregister_netdev(adapter->vmdq_netdev[pool-1]); + free_netdev(adapter->vmdq_netdev[pool-1]); + adapter->vmdq_netdev[pool-1] = NULL; + } + return err; +} +#endif /* CONFIG_IGB_VMDQ_NETDEV */ + /** * igb_probe - Device Initialization Routine * @pdev: PCI device information struct @@ -1820,47 +1985,49 @@ static int __devinit igb_probe(struct pci_dev *pdev, struct igb_adapter *adapter; struct e1000_hw *hw; u16 eeprom_data = 0; + u8 pba_str[E1000_PBANUM_LENGTH]; s32 ret_val; static int global_quad_port_a; /* global quad port a indication */ - const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; - unsigned long mmio_start, mmio_len; - int err, pci_using_dac; - u16 eeprom_apme_mask = IGB_EEPROM_APME; - u8 part_str[E1000_PBANUM_LENGTH]; - - /* Catch broken hardware that put the wrong VF device ID in - * the PCIe SR-IOV capability. - */ - if (pdev->is_virtfn) { - WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", - pci_name(pdev), pdev->vendor, pdev->device); - return -EINVAL; - } + int i, err, pci_using_dac; + static int cards_found; err = pci_enable_device_mem(pdev); if (err) return err; pci_using_dac = 0; - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64)); if (!err) { - err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + err = dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64)); if (!err) pci_using_dac = 1; } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32)); if (err) { - err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + err = dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32)); if (err) { - dev_err(&pdev->dev, "No usable DMA " - "configuration, aborting\n"); + IGB_ERR("No usable DMA configuration, " + "aborting\n"); goto err_dma; } } } - err = pci_request_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM), +#ifndef HAVE_ASPM_QUIRKS + /* 82575 requires that the pci-e link partner disable the L0s state */ + switch (pdev->device) { + case E1000_DEV_ID_82575EB_COPPER: + case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82575GB_QUAD_COPPER: + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S); + default: + break; + } + +#endif /* HAVE_ASPM_QUIRKS */ + err = pci_request_selected_regions(pdev, + pci_select_bars(pdev, + IORESOURCE_MEM), igb_driver_name); if (err) goto err_pci_reg; @@ -1868,14 +2035,18 @@ static int __devinit igb_probe(struct pci_dev *pdev, pci_enable_pcie_error_reporting(pdev); pci_set_master(pdev); - pci_save_state(pdev); err = -ENOMEM; +#ifdef HAVE_TX_MQ netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), - IGB_MAX_TX_QUEUES); + IGB_MAX_TX_QUEUES); +#else + netdev = alloc_etherdev(sizeof(struct igb_adapter)); +#endif /* HAVE_TX_MQ */ if (!netdev) goto err_alloc_etherdev; + SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); @@ -1884,59 +2055,76 @@ static int __devinit igb_probe(struct pci_dev *pdev, adapter->pdev = pdev; hw = &adapter->hw; hw->back = adapter; - adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE; - - mmio_start = pci_resource_start(pdev, 0); - mmio_len = pci_resource_len(pdev, 0); + adapter->port_num = hw->bus.func; + adapter->msg_enable = (1 << debug) - 1; +#ifdef HAVE_PCI_ERS + err = pci_save_state(pdev); + if (err) + goto err_ioremap; +#endif err = -EIO; - hw->hw_addr = ioremap(mmio_start, mmio_len); + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); if (!hw->hw_addr) goto err_ioremap; +#ifdef HAVE_NET_DEVICE_OPS netdev->netdev_ops = &igb_netdev_ops; +#else /* HAVE_NET_DEVICE_OPS */ + netdev->open = &igb_open; + netdev->stop = &igb_close; + netdev->get_stats = &igb_get_stats; +#ifdef HAVE_SET_RX_MODE + netdev->set_rx_mode = &igb_set_rx_mode; +#endif + netdev->set_multicast_list = &igb_set_rx_mode; + netdev->set_mac_address = &igb_set_mac; + netdev->change_mtu = &igb_change_mtu; + netdev->do_ioctl = &igb_ioctl; +#ifdef HAVE_TX_TIMEOUT + netdev->tx_timeout = &igb_tx_timeout; +#endif + netdev->vlan_rx_register = igb_vlan_mode; + netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid; + netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid; +#ifdef CONFIG_NET_POLL_CONTROLLER + netdev->poll_controller = igb_netpoll; +#endif + netdev->hard_start_xmit = &igb_xmit_frame; +#endif /* HAVE_NET_DEVICE_OPS */ igb_set_ethtool_ops(netdev); +#ifdef HAVE_TX_TIMEOUT netdev->watchdog_timeo = 5 * HZ; +#endif strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); - netdev->mem_start = mmio_start; - netdev->mem_end = mmio_start + mmio_len; - - /* PCI config space info */ - hw->vendor_id = pdev->vendor; - hw->device_id = pdev->device; - hw->revision_id = pdev->revision; - hw->subsystem_vendor_id = pdev->subsystem_vendor; - hw->subsystem_device_id = pdev->subsystem_device; - - /* Copy the default MAC, PHY and NVM function pointers */ - memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); - memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); - memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); - /* Initialize skew-specific constants */ - err = ei->get_invariants(hw); - if (err) - goto err_sw_init; + adapter->bd_number = cards_found; /* setup the private structure */ err = igb_sw_init(adapter); if (err) goto err_sw_init; - igb_get_bus_info_pcie(hw); + e1000_get_bus_info(hw); - hw->phy.autoneg_wait_to_complete = false; + hw->phy.autoneg_wait_to_complete = FALSE; + hw->mac.adaptive_ifs = FALSE; /* Copper options */ if (hw->phy.media_type == e1000_media_type_copper) { +#ifdef ETH_TP_MDI_X + hw->phy.mdix = ETH_TP_MDI_INVALID; +#else hw->phy.mdix = AUTO_ALL_MODES; - hw->phy.disable_polarity_correction = false; +#endif /* ETH_TP_MDI_X */ + hw->phy.disable_polarity_correction = FALSE; hw->phy.ms_type = e1000_ms_hw_default; } - if (igb_check_reset_block(hw)) - dev_info(&pdev->dev, + if (e1000_check_reset_block(hw)) + dev_info(pci_dev_to_dev(pdev), "PHY reset is blocked due to SOL/IDER session.\n"); /* @@ -1946,71 +2134,106 @@ static int __devinit igb_probe(struct pci_dev *pdev, */ netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | +#ifdef NETIF_F_IPV6_CSUM NETIF_F_IPV6_CSUM | +#endif +#ifdef NETIF_F_TSO NETIF_F_TSO | +#ifdef NETIF_F_TSO6 NETIF_F_TSO6 | +#endif +#endif /* NETIF_F_TSO */ +#ifdef NETIF_F_RXHASH NETIF_F_RXHASH | +#endif +#ifdef HAVE_NDO_SET_FEATURES NETIF_F_RXCSUM | +#endif NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; +#ifdef HAVE_NDO_SET_FEATURES /* copy netdev features into list of user selectable features */ netdev->hw_features |= netdev->features; +#ifndef IGB_NO_LRO + + /* give us the option of enabling LRO later */ + netdev->hw_features |= NETIF_F_LRO; +#endif +#else +#ifdef NETIF_F_GRO + + /* this is only needed on kernels prior to 2.6.39 */ + netdev->features |= NETIF_F_GRO; +#endif +#endif /* set this bit last since it cannot be part of hw_features */ netdev->features |= NETIF_F_HW_VLAN_FILTER; +#ifdef HAVE_NETDEV_VLAN_FEATURES netdev->vlan_features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG; - if (pci_using_dac) { +#endif + if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; - netdev->vlan_features |= NETIF_F_HIGHDMA; - } - if (hw->mac.type >= e1000_82576) { - netdev->hw_features |= NETIF_F_SCTP_CSUM; + if (hw->mac.type >= e1000_82576) netdev->features |= NETIF_F_SCTP_CSUM; - } - - netdev->priv_flags |= IFF_UNICAST_FLT; - adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); + adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); /* before reading the NVM, reset the controller to put the device in a * known good starting state */ - hw->mac.ops.reset_hw(hw); + e1000_reset_hw(hw); /* make sure the NVM is good */ - if (hw->nvm.ops.validate(hw) < 0) { - dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); + if (e1000_validate_nvm_checksum(hw) < 0) { + dev_err(pci_dev_to_dev(pdev), "The NVM Checksum Is Not" + " Valid\n"); err = -EIO; goto err_eeprom; } /* copy the MAC address out of the NVM */ - if (hw->mac.ops.read_mac_addr(hw)) - dev_err(&pdev->dev, "NVM Read Error\n"); - + if (e1000_read_mac_addr(hw)) + dev_err(pci_dev_to_dev(pdev), "NVM Read Error\n"); memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); +#ifdef ETHTOOL_GPERMADDR memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len); if (!is_valid_ether_addr(netdev->perm_addr)) { - dev_err(&pdev->dev, "Invalid MAC Address\n"); +#else + if (!is_valid_ether_addr(netdev->dev_addr)) { +#endif + dev_err(pci_dev_to_dev(pdev), "Invalid MAC Address\n"); err = -EIO; goto err_eeprom; } - setup_timer(&adapter->watchdog_timer, igb_watchdog, + memcpy(&adapter->mac_table[0].addr, hw->mac.addr, netdev->addr_len); + adapter->mac_table[0].queue = adapter->vfs_allocated_count; + adapter->mac_table[0].state = (IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE); + igb_rar_set(adapter, 0); + + /* get firmware version for ethtool -i */ + e1000_read_nvm(&adapter->hw, 5, 1, &adapter->fw_version); + setup_timer(&adapter->watchdog_timer, &igb_watchdog, (unsigned long) adapter); - setup_timer(&adapter->phy_info_timer, igb_update_phy_info, + if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) + setup_timer(&adapter->dma_err_timer, &igb_dma_err_timer, + (unsigned long) adapter); + setup_timer(&adapter->phy_info_timer, &igb_update_phy_info, (unsigned long) adapter); INIT_WORK(&adapter->reset_task, igb_reset_task); INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); + if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) + INIT_WORK(&adapter->dma_err_task, igb_dma_err_task); /* Initialize link properties that are user-changeable */ adapter->fc_autoneg = true; @@ -2020,22 +2243,22 @@ static int __devinit igb_probe(struct pci_dev *pdev, hw->fc.requested_mode = e1000_fc_default; hw->fc.current_mode = e1000_fc_default; - igb_validate_mdi_setting(hw); + e1000_validate_mdi_setting(hw); /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM, * enable the ACPI Magic Packet filter */ if (hw->bus.func == 0) - hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); + e1000_read_nvm(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); else if (hw->mac.type >= e1000_82580) hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, &eeprom_data); else if (hw->bus.func == 1) - hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + e1000_read_nvm(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); - if (eeprom_data & eeprom_apme_mask) + if (eeprom_data & IGB_EEPROM_APME) adapter->eeprom_wol |= E1000_WUFC_MAG; /* now that we have the eeprom settings, apply the special cases where @@ -2050,7 +2273,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, case E1000_DEV_ID_82576_SERDES: /* Wake events only supported on port A for dual fiber * regardless of eeprom setting */ - if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) + if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1) adapter->eeprom_wol = 0; break; case E1000_DEV_ID_82576_QUAD_COPPER: @@ -2068,7 +2291,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, /* initialize the wol settings based on the eeprom settings */ adapter->wol = adapter->eeprom_wol; - device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol); /* reset the hardware with the new settings */ igb_reset(adapter); @@ -2077,54 +2300,103 @@ static int __devinit igb_probe(struct pci_dev *pdev, * driver. */ igb_get_hw_control(adapter); - strcpy(netdev->name, "eth%d"); + strncpy(netdev->name, "eth%d", IFNAMSIZ); err = register_netdev(netdev); if (err) goto err_register; +#ifdef CONFIG_IGB_VMDQ_NETDEV + err = igb_init_vmdq_netdevs(adapter); + if (err) + goto err_register; +#endif /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); -#ifdef CONFIG_IGB_DCA - if (dca_add_requester(&pdev->dev) == 0) { +#ifdef IGB_DCA + if (dca_add_requester(&pdev->dev) == E1000_SUCCESS) { adapter->flags |= IGB_FLAG_DCA_ENABLED; - dev_info(&pdev->dev, "DCA enabled\n"); + dev_info(pci_dev_to_dev(pdev), "DCA enabled\n"); igb_setup_dca(adapter); } #endif +#ifdef HAVE_HW_TIME_STAMP /* do hw tstamp init after resetting */ igb_init_hw_timer(adapter); - dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); +#endif + dev_info(pci_dev_to_dev(pdev), "Intel(R) Gigabit Ethernet Network Connection\n"); /* print bus type/speed/width info */ - dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", - netdev->name, - ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : - (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" : - "unknown"), - ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : - (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : - (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : - "unknown"), - netdev->dev_addr); - - ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH); + dev_info(pci_dev_to_dev(pdev), "%s: (PCIe:%s:%s) ", + netdev->name, + ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5GT/s" : + (hw->bus.speed == e1000_bus_speed_5000) ? "5.0GT/s" : + "unknown"), + ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4\n" : + (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2\n" : + (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1\n" : + "unknown")); + dev_info(pci_dev_to_dev(pdev), "%s: MAC: ", netdev->name); + for (i = 0; i < 6; i++) + printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':'); + + ret_val = e1000_read_pba_string(hw, pba_str, E1000_PBANUM_LENGTH); if (ret_val) - strcpy(part_str, "Unknown"); - dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str); - dev_info(&pdev->dev, - "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", - adapter->msix_entries ? "MSI-X" : - (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", - adapter->num_rx_queues, adapter->num_tx_queues); + strcpy(pba_str, "Unknown"); + dev_info(pci_dev_to_dev(pdev), "%s: PBA No: %s\n", netdev->name, + pba_str); + + + /* Initialize the thermal sensor on i350 devices. */ + if (hw->mac.type == e1000_i350 && hw->bus.func == 0) { + u16 ets_word; + + /* + * Read the NVM to determine if this i350 device supports an + * external thermal sensor. + */ + e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_word); + if (ets_word != 0x0000 && ets_word != 0xFFFF) + adapter->ets = true; + else + adapter->ets = false; +#ifdef IGB_SYSFS + igb_sysfs_init(adapter); +#else +#ifdef IGB_PROCFS + igb_procfs_init(adapter); +#endif /* IGB_PROCFS */ +#endif /* IGB_SYSFS */ + } else { + adapter->ets = false; + } + switch (hw->mac.type) { case e1000_i350: - igb_set_eee_i350(hw); + /* Enable EEE for internal copper PHY devices */ + if (hw->phy.media_type == e1000_media_type_copper) + e1000_set_eee_i350(hw); + + /* send driver version info to firmware */ + igb_init_fw(adapter); break; default: break; } +#ifndef IGB_NO_LRO + if (netdev->features & NETIF_F_LRO) + dev_info(pci_dev_to_dev(pdev), "Internal LRO is enabled \n"); + else + dev_info(pci_dev_to_dev(pdev), "LRO is disabled \n"); +#endif + dev_info(pci_dev_to_dev(pdev), + "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", + adapter->msix_entries ? "MSI-X" : + (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", + adapter->num_rx_queues, adapter->num_tx_queues); + + cards_found++; pm_runtime_put_noidle(&pdev->dev); return 0; @@ -2132,13 +2404,14 @@ static int __devinit igb_probe(struct pci_dev *pdev, err_register: igb_release_hw_control(adapter); err_eeprom: - if (!igb_check_reset_block(hw)) - igb_reset_phy(hw); + if (!e1000_check_reset_block(hw)) + e1000_phy_hw_reset(hw); if (hw->flash_address) iounmap(hw->flash_address); err_sw_init: igb_clear_interrupt_scheme(adapter); + igb_reset_sriov_capability(adapter); iounmap(hw->hw_addr); err_ioremap: free_netdev(netdev); @@ -2168,23 +2441,22 @@ static void __devexit igb_remove(struct pci_dev *pdev) pm_runtime_get_noresume(&pdev->dev); - /* - * The watchdog timer may be rescheduled, so explicitly - * disable watchdog from being rescheduled. - */ + /* flush_scheduled work may reschedule our watchdog task, so + * explicitly disable watchdog tasks from being rescheduled */ set_bit(__IGB_DOWN, &adapter->state); del_timer_sync(&adapter->watchdog_timer); + if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) + del_timer_sync(&adapter->dma_err_timer); del_timer_sync(&adapter->phy_info_timer); - cancel_work_sync(&adapter->reset_task); - cancel_work_sync(&adapter->watchdog_task); + flush_scheduled_work(); -#ifdef CONFIG_IGB_DCA +#ifdef IGB_DCA if (adapter->flags & IGB_FLAG_DCA_ENABLED) { - dev_info(&pdev->dev, "DCA disabled\n"); + dev_info(pci_dev_to_dev(pdev), "DCA disabled\n"); dca_remove_requester(&pdev->dev); adapter->flags &= ~IGB_FLAG_DCA_ENABLED; - wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); + E1000_WRITE_REG(hw, E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_DISABLE); } #endif @@ -2193,28 +2465,12 @@ static void __devexit igb_remove(struct pci_dev *pdev) igb_release_hw_control(adapter); unregister_netdev(netdev); +#ifdef CONFIG_IGB_VMDQ_NETDEV + igb_remove_vmdq_netdevs(adapter); +#endif igb_clear_interrupt_scheme(adapter); - -#ifdef CONFIG_PCI_IOV - /* reclaim resources allocated to VFs */ - if (adapter->vf_data) { - /* disable iov and allow time for transactions to clear */ - if (!igb_check_vf_assignment(adapter)) { - pci_disable_sriov(pdev); - msleep(500); - } else { - dev_info(&pdev->dev, "VF(s) assigned to guests!\n"); - } - - kfree(adapter->vf_data); - adapter->vf_data = NULL; - wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); - wrfl(); - msleep(100); - dev_info(&pdev->dev, "IOV Disabled\n"); - } -#endif + igb_reset_sriov_capability(adapter); iounmap(hw->hw_addr); if (hw->flash_address) @@ -2222,70 +2478,24 @@ static void __devexit igb_remove(struct pci_dev *pdev) pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); + kfree(adapter->mac_table); kfree(adapter->shadow_vfta); free_netdev(netdev); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); -} - -/** - * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space - * @adapter: board private structure to initialize - * - * This function initializes the vf specific data storage and then attempts to - * allocate the VFs. The reason for ordering it this way is because it is much - * mor expensive time wise to disable SR-IOV than it is to allocate and free - * the memory for the VFs. - **/ -static void __devinit igb_probe_vfs(struct igb_adapter * adapter) -{ -#ifdef CONFIG_PCI_IOV - struct pci_dev *pdev = adapter->pdev; - int old_vfs = igb_find_enabled_vfs(adapter); - int i; - - if (old_vfs) { - dev_info(&pdev->dev, "%d pre-allocated VFs found - override " - "max_vfs setting of %d\n", old_vfs, max_vfs); - adapter->vfs_allocated_count = old_vfs; - } - - if (!adapter->vfs_allocated_count) - return; - - adapter->vf_data = kcalloc(adapter->vfs_allocated_count, - sizeof(struct vf_data_storage), GFP_KERNEL); - /* if allocation failed then we do not support SR-IOV */ - if (!adapter->vf_data) { - adapter->vfs_allocated_count = 0; - dev_err(&pdev->dev, "Unable to allocate memory for VF " - "Data Storage\n"); - goto out; - } - - if (!old_vfs) { - if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) - goto err_out; - } - dev_info(&pdev->dev, "%d VFs allocated\n", - adapter->vfs_allocated_count); - for (i = 0; i < adapter->vfs_allocated_count; i++) - igb_vf_configure(adapter, i); - /* DMA Coalescing is not supported in IOV mode. */ - adapter->flags &= ~IGB_FLAG_DMAC; - goto out; -err_out: - kfree(adapter->vf_data); - adapter->vf_data = NULL; - adapter->vfs_allocated_count = 0; -out: - return; -#endif /* CONFIG_PCI_IOV */ +#ifdef IGB_SYSFS + igb_sysfs_exit(adapter); +#else +#ifdef IGB_PROCFS + igb_procfs_exit(adapter); +#endif /* IGB_PROCFS */ +#endif /* IGB_SYSFS */ } +#ifdef HAVE_HW_TIME_STAMP /** * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp * @adapter: board private structure to initialize @@ -2313,18 +2523,18 @@ static void igb_init_hw_timer(struct igb_adapter *adapter) */ adapter->cycles.shift = IGB_82580_TSYNC_SHIFT; /* disable system timer temporarily by setting bit 31 */ - wr32(E1000_TSAUXC, 0x80000000); - wrfl(); + E1000_WRITE_REG(hw, E1000_TSAUXC, 0x80000000); + E1000_WRITE_FLUSH(hw); /* Set registers so that rollover occurs soon to test this. */ - wr32(E1000_SYSTIMR, 0x00000000); - wr32(E1000_SYSTIML, 0x80000000); - wr32(E1000_SYSTIMH, 0x000000FF); - wrfl(); + E1000_WRITE_REG(hw, E1000_SYSTIMR, 0x00000000); + E1000_WRITE_REG(hw, E1000_SYSTIML, 0x80000000); + E1000_WRITE_REG(hw, E1000_SYSTIMH, 0x000000FF); + E1000_WRITE_FLUSH(hw); /* enable system timer by clearing bit 31 */ - wr32(E1000_TSAUXC, 0x0); - wrfl(); + E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0); + E1000_WRITE_FLUSH(hw); timecounter_init(&adapter->clock, &adapter->cycles, @@ -2360,14 +2570,14 @@ static void igb_init_hw_timer(struct igb_adapter *adapter) * 19 so we can fit a value of 16 into the TIMINCA register. */ adapter->cycles.shift = IGB_82576_TSYNC_SHIFT; - wr32(E1000_TIMINCA, + E1000_WRITE_REG(hw, E1000_TIMINCA, (1 << E1000_TIMINCA_16NS_SHIFT) | (16 << IGB_82576_TSYNC_SHIFT)); /* Set registers so that rollover occurs soon to test this. */ - wr32(E1000_SYSTIML, 0x00000000); - wr32(E1000_SYSTIMH, 0xFF800000); - wrfl(); + E1000_WRITE_REG(hw, E1000_SYSTIML, 0x00000000); + E1000_WRITE_REG(hw, E1000_SYSTIMH, 0xFF800000); + E1000_WRITE_FLUSH(hw); timecounter_init(&adapter->clock, &adapter->cycles, @@ -2389,9 +2599,9 @@ static void igb_init_hw_timer(struct igb_adapter *adapter) default: break; } - } +#endif /* HAVE_HW_TIME_STAMP */ /** * igb_sw_init - Initialize general software structures (struct igb_adapter) * @adapter: board private structure to initialize @@ -2406,74 +2616,54 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; + /* PCI config space info */ + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); /* set default ring sizes */ adapter->tx_ring_count = IGB_DEFAULT_TXD; adapter->rx_ring_count = IGB_DEFAULT_RXD; - /* set default ITR values */ - adapter->rx_itr_setting = IGB_DEFAULT_ITR; - adapter->tx_itr_setting = IGB_DEFAULT_ITR; - /* set default work limits */ adapter->tx_work_limit = IGB_DEFAULT_TX_WORK; adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + - VLAN_HLEN; - adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + VLAN_HLEN; - adapter->node = -1; - - spin_lock_init(&adapter->stats64_lock); -#ifdef CONFIG_PCI_IOV - switch (hw->mac.type) { - case e1000_82576: - case e1000_i350: - if (max_vfs > 7) { - dev_warn(&pdev->dev, - "Maximum of 7 VFs per PF, using max\n"); - adapter->vfs_allocated_count = 7; - } else - adapter->vfs_allocated_count = max_vfs; - break; - default: - break; + /* Initialize the hardware-specific values */ + if (e1000_setup_init_funcs(hw, TRUE)) { + dev_err(pci_dev_to_dev(pdev), "Hardware Initialization Failure\n"); + return -EIO; } -#endif /* CONFIG_PCI_IOV */ - adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); - /* i350 cannot do RSS and SR-IOV at the same time */ - if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count) - adapter->rss_queues = 1; - /* - * if rss_queues > 4 or vfs are going to be allocated with rss_queues - * then we should combine the queues into a queue pair in order to - * conserve interrupts due to limited supply - */ - if ((adapter->rss_queues > 4) || - ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6))) - adapter->flags |= IGB_FLAG_QUEUE_PAIRS; + igb_check_options(adapter); + + adapter->mac_table = kzalloc(sizeof(struct igb_mac_addr) * + hw->mac.rar_entry_count, + GFP_ATOMIC); /* Setup and initialize a copy of the hw vlan table array */ - adapter->shadow_vfta = kzalloc(sizeof(u32) * - E1000_VLAN_FILTER_TBL_SIZE, - GFP_ATOMIC); + adapter->shadow_vfta = (u32 *)kzalloc(sizeof(u32) * E1000_VFTA_ENTRIES, + GFP_ATOMIC); + + /* These calls may decrease the number of queues */ + igb_set_sriov_capability(adapter); - /* This call may decrease the number of queues */ if (igb_init_interrupt_scheme(adapter)) { - dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for queues\n"); return -ENOMEM; } - igb_probe_vfs(adapter); - /* Explicitly disable IRQ since the NIC can be in any state. */ igb_irq_disable(adapter); - if (hw->mac.type == e1000_i350) - adapter->flags &= ~IGB_FLAG_DMAC; - set_bit(__IGB_DOWN, &adapter->state); return 0; } @@ -2494,7 +2684,9 @@ static int __igb_open(struct net_device *netdev, bool resuming) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; +#ifdef CONFIG_PM_RUNTIME struct pci_dev *pdev = adapter->pdev; +#endif /* CONFIG_PM_RUNTIME */ int err; int i; @@ -2504,8 +2696,10 @@ static int __igb_open(struct net_device *netdev, bool resuming) return -EBUSY; } +#ifdef CONFIG_PM_RUNTIME if (!resuming) pm_runtime_get_sync(&pdev->dev); +#endif /* CONFIG_PM_RUNTIME */ netif_carrier_off(netdev); @@ -2536,29 +2730,30 @@ static int __igb_open(struct net_device *netdev, bool resuming) for (i = 0; i < adapter->num_q_vectors; i++) napi_enable(&(adapter->q_vector[i]->napi)); + igb_configure_lli(adapter); /* Clear any pending interrupts. */ - rd32(E1000_ICR); + E1000_READ_REG(hw, E1000_ICR); igb_irq_enable(adapter); /* notify VFs that reset has been completed */ if (adapter->vfs_allocated_count) { - u32 reg_data = rd32(E1000_CTRL_EXT); + u32 reg_data = E1000_READ_REG(hw, E1000_CTRL_EXT); reg_data |= E1000_CTRL_EXT_PFRSTD; - wr32(E1000_CTRL_EXT, reg_data); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg_data); } netif_tx_start_all_queues(netdev); - if (!resuming) - pm_runtime_put(&pdev->dev); + if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) + schedule_work(&adapter->dma_err_task); /* start the watchdog. */ hw->mac.get_link_status = 1; schedule_work(&adapter->watchdog_task); - return 0; + return E1000_SUCCESS; err_req_irq: igb_release_hw_control(adapter); @@ -2568,8 +2763,11 @@ err_setup_rx: igb_free_all_tx_resources(adapter); err_setup_tx: igb_reset(adapter); + +#ifdef CONFIG_PM_RUNTIME if (!resuming) pm_runtime_put(&pdev->dev); +#endif /* CONFIG_PM_RUNTIME */ return err; } @@ -2593,21 +2791,31 @@ static int igb_open(struct net_device *netdev) static int __igb_close(struct net_device *netdev, bool suspending) { struct igb_adapter *adapter = netdev_priv(netdev); +#ifdef CONFIG_PM_RUNTIME struct pci_dev *pdev = adapter->pdev; +#endif /* CONFIG_PM_RUNTIME */ WARN_ON(test_bit(__IGB_RESETTING, &adapter->state)); +#ifdef CONFIG_PM_RUNTIME if (!suspending) pm_runtime_get_sync(&pdev->dev); +#endif /* CONFIG_PM_RUNTIME */ igb_down(adapter); + + igb_release_hw_control(adapter); + igb_free_irq(adapter); igb_free_all_tx_resources(adapter); igb_free_all_rx_resources(adapter); +#ifdef CONFIG_PM_RUNTIME if (!suspending) pm_runtime_put_sync(&pdev->dev); +#endif /* CONFIG_PM_RUNTIME */ + return 0; } @@ -2681,7 +2889,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) { err = igb_setup_tx_resources(adapter->tx_ring[i]); if (err) { - dev_err(&pdev->dev, + dev_err(pci_dev_to_dev(pdev), "Allocation for Tx Queue %u failed\n", i); for (i--; i >= 0; i--) igb_free_tx_resources(adapter->tx_ring[i]); @@ -2702,20 +2910,20 @@ void igb_setup_tctl(struct igb_adapter *adapter) u32 tctl; /* disable queue 0 which is enabled by default on 82575 and 82576 */ - wr32(E1000_TXDCTL(0), 0); + E1000_WRITE_REG(hw, E1000_TXDCTL(0), 0); /* Program the Transmit Control Register */ - tctl = rd32(E1000_TCTL); + tctl = E1000_READ_REG(hw, E1000_TCTL); tctl &= ~E1000_TCTL_CT; tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); - igb_config_collision_dist(hw); + e1000_config_collision_dist(hw); /* Enable transmits */ tctl |= E1000_TCTL_EN; - wr32(E1000_TCTL, tctl); + E1000_WRITE_REG(hw, E1000_TCTL, tctl); } /** @@ -2734,18 +2942,18 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, int reg_idx = ring->reg_idx; /* disable the queue */ - wr32(E1000_TXDCTL(reg_idx), 0); - wrfl(); + E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx), 0); + E1000_WRITE_FLUSH(hw); mdelay(10); - wr32(E1000_TDLEN(reg_idx), + E1000_WRITE_REG(hw, E1000_TDLEN(reg_idx), ring->count * sizeof(union e1000_adv_tx_desc)); - wr32(E1000_TDBAL(reg_idx), + E1000_WRITE_REG(hw, E1000_TDBAL(reg_idx), tdba & 0x00000000ffffffffULL); - wr32(E1000_TDBAH(reg_idx), tdba >> 32); + E1000_WRITE_REG(hw, E1000_TDBAH(reg_idx), tdba >> 32); ring->tail = hw->hw_addr + E1000_TDT(reg_idx); - wr32(E1000_TDH(reg_idx), 0); + E1000_WRITE_REG(hw, E1000_TDH(reg_idx), 0); writel(0, ring->tail); txdctl |= IGB_TX_PTHRESH; @@ -2753,7 +2961,7 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, txdctl |= IGB_TX_WTHRESH << 16; txdctl |= E1000_TXDCTL_QUEUE_ENABLE; - wr32(E1000_TXDCTL(reg_idx), txdctl); + E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx), txdctl); } /** @@ -2838,7 +3046,7 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) { err = igb_setup_rx_resources(adapter->rx_ring[i]); if (err) { - dev_err(&pdev->dev, + dev_err(pci_dev_to_dev(pdev), "Allocation for Rx Queue %u failed\n", i); for (i--; i >= 0; i--) igb_free_rx_resources(adapter->rx_ring[i]); @@ -2874,12 +3082,12 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) rsskey |= rsshash[(j * 4) + 1] << 8; rsskey |= rsshash[(j * 4) + 2] << 16; rsskey |= rsshash[(j * 4) + 3] << 24; - array_wr32(E1000_RSSRK(0), j, rsskey); + E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), j, rsskey); } num_rx_queues = adapter->rss_queues; - if (adapter->vfs_allocated_count) { + if (adapter->vfs_allocated_count || adapter->vmdq_pools) { /* 82575 and 82576 supports 2 RSS queues for VMDq */ switch (hw->mac.type) { case e1000_i350: @@ -2907,7 +3115,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) if (shift2) reta.bytes[j & 3] |= num_rx_queues << shift2; if ((j & 3) == 3) - wr32(E1000_RETA(j >> 2), reta.dword); + E1000_WRITE_REG(hw, E1000_RETA(j >> 2), reta.dword); } /* @@ -2915,7 +3123,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) * descriptor on writeback. No need to enable TCP/UDP/IP checksum * offloads as they are enabled by default */ - rxcsum = rd32(E1000_RXCSUM); + rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); rxcsum |= E1000_RXCSUM_PCSD; if (adapter->hw.mac.type >= e1000_82576) @@ -2923,20 +3131,24 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) rxcsum |= E1000_RXCSUM_CRCOFL; /* Don't need to set TUOFL or IPOFL, they default to 1 */ - wr32(E1000_RXCSUM, rxcsum); + E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); /* If VMDq is enabled then we set the appropriate mode for that, else * we default to RSS so that an RSS hash is calculated per packet even * if we are only using one queue */ - if (adapter->vfs_allocated_count) { + if (adapter->vfs_allocated_count || adapter->vmdq_pools) { if (hw->mac.type > e1000_82575) { /* Set the default pool for the PF's first queue */ - u32 vtctl = rd32(E1000_VT_CTL); + u32 vtctl = E1000_READ_REG(hw, E1000_VT_CTL); vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | E1000_VT_CTL_DISABLE_DEF_POOL); vtctl |= adapter->vfs_allocated_count << E1000_VT_CTL_DEFAULT_POOL_SHIFT; - wr32(E1000_VT_CTL, vtctl); + E1000_WRITE_REG(hw, E1000_VT_CTL, vtctl); + } else if (adapter->rss_queues > 1) { + /* set default queue for pool 1 to queue 2 */ + E1000_WRITE_REG(hw, E1000_VT_CTL, + adapter->rss_queues << 7); } if (adapter->rss_queues > 1) mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q; @@ -2945,6 +3157,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) } else { mrqc = E1000_MRQC_ENABLE_RSS_4Q; } + igb_vmm_control(adapter); /* @@ -2958,7 +3171,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) E1000_MRQC_RSS_FIELD_IPV6_TCP | E1000_MRQC_RSS_FIELD_IPV6_TCP_EX; - wr32(E1000_MRQC, mrqc); + E1000_WRITE_REG(hw, E1000_MRQC, mrqc); } /** @@ -2970,7 +3183,7 @@ void igb_setup_rctl(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; u32 rctl; - rctl = rd32(E1000_RCTL); + rctl = E1000_READ_REG(hw, E1000_RCTL); rctl &= ~(3 << E1000_RCTL_MO_SHIFT); rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); @@ -2992,7 +3205,7 @@ void igb_setup_rctl(struct igb_adapter *adapter) rctl |= E1000_RCTL_LPE; /* disable queue 0 to prevent tail write w/o re-config */ - wr32(E1000_RXDCTL(0), 0); + E1000_WRITE_REG(hw, E1000_RXDCTL(0), 0); /* Attention!!! For SR-IOV PF driver operations you must enable * queue drop for all VF and PF queues to prevent head of line blocking @@ -3000,10 +3213,10 @@ void igb_setup_rctl(struct igb_adapter *adapter) */ if (adapter->vfs_allocated_count) { /* set all queue drop enable bits */ - wr32(E1000_QDE, ALL_QUEUES); + E1000_WRITE_REG(hw, E1000_QDE, ALL_QUEUES); } - wr32(E1000_RCTL, rctl); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); } static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, @@ -3016,12 +3229,22 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, * increase the size to support vlan tags */ if (vfn < adapter->vfs_allocated_count && adapter->vf_data[vfn].vlans_enabled) - size += VLAN_TAG_SIZE; + size += VLAN_HLEN; + +#ifdef CONFIG_IGB_VMDQ_NETDEV + if (vfn >= adapter->vfs_allocated_count) { + int queue = vfn - adapter->vfs_allocated_count; + struct igb_vmdq_adapter *vadapter; - vmolr = rd32(E1000_VMOLR(vfn)); + vadapter = netdev_priv(adapter->vmdq_netdev[queue-1]); + if (vadapter->vlgrp) + size += VLAN_HLEN; + } +#endif + vmolr = E1000_READ_REG(hw, E1000_VMOLR(vfn)); vmolr &= ~E1000_VMOLR_RLPML_MASK; vmolr |= size | E1000_VMOLR_LPE; - wr32(E1000_VMOLR(vfn), vmolr); + E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr); return 0; } @@ -3038,8 +3261,10 @@ static void igb_rlpml_set(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; u16 pf_id = adapter->vfs_allocated_count; - if (pf_id) { - igb_set_vf_rlpml(adapter, max_frame_size, pf_id); + if (adapter->vmdq_pools && hw->mac.type != e1000_82575) { + int i; + for (i = 0; i < adapter->vmdq_pools; i++) + igb_set_vf_rlpml(adapter, max_frame_size, pf_id + i); /* * If we're in VMDQ or SR-IOV mode, then set global RLPML * to our max jumbo frame size, in case we need to enable @@ -3049,10 +3274,35 @@ static void igb_rlpml_set(struct igb_adapter *adapter) */ max_frame_size = MAX_JUMBO_FRAME_SIZE; } + /* Set VF RLPML for the PF device. */ + if (adapter->vfs_allocated_count) + igb_set_vf_rlpml(adapter, max_frame_size, pf_id); - wr32(E1000_RLPML, max_frame_size); + E1000_WRITE_REG(hw, E1000_RLPML, max_frame_size); } +static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter, + int vfn, bool enable) +{ + struct e1000_hw *hw = &adapter->hw; + u32 val; + void __iomem *reg; + + if (hw->mac.type < e1000_82576) + return; + + if (hw->mac.type == e1000_i350) + reg = hw->hw_addr + E1000_DVMOLR(vfn); + else + reg = hw->hw_addr + E1000_VMOLR(vfn); + + val = readl(reg); + if (enable) + val |= E1000_VMOLR_STRVLAN; + else + val &= ~(E1000_VMOLR_STRVLAN); + writel(val, reg); +} static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn, bool aupe) { @@ -3066,26 +3316,23 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter, if (hw->mac.type < e1000_82576) return; - vmolr = rd32(E1000_VMOLR(vfn)); - vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */ + vmolr = E1000_READ_REG(hw, E1000_VMOLR(vfn)); + if (aupe) vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ else vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */ /* clear all bits that might not be set */ - vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE); + vmolr &= ~E1000_VMOLR_RSSE; if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ - /* - * for VMDq only allow the VFs and pool 0 to accept broadcast and - * multicast packets - */ - if (vfn <= adapter->vfs_allocated_count) - vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ - wr32(E1000_VMOLR(vfn), vmolr); + vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ + vmolr |= E1000_VMOLR_LPE; /* Accept long packets */ + + E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr); } /** @@ -3104,21 +3351,22 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, u32 srrctl = 0, rxdctl = 0; /* disable the queue */ - wr32(E1000_RXDCTL(reg_idx), 0); + E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx), 0); /* Set DMA base address registers */ - wr32(E1000_RDBAL(reg_idx), - rdba & 0x00000000ffffffffULL); - wr32(E1000_RDBAH(reg_idx), rdba >> 32); - wr32(E1000_RDLEN(reg_idx), + E1000_WRITE_REG(hw, E1000_RDBAL(reg_idx), + rdba & 0x00000000ffffffffULL); + E1000_WRITE_REG(hw, E1000_RDBAH(reg_idx), rdba >> 32); + E1000_WRITE_REG(hw, E1000_RDLEN(reg_idx), ring->count * sizeof(union e1000_adv_rx_desc)); /* initialize head and tail */ ring->tail = hw->hw_addr + E1000_RDT(reg_idx); - wr32(E1000_RDH(reg_idx), 0); + E1000_WRITE_REG(hw, E1000_RDH(reg_idx), 0); writel(0, ring->tail); /* set descriptor configuration */ +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT; @@ -3126,13 +3374,31 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT; #endif srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; +#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ + srrctl = ALIGN(ring->rx_buffer_len, 1024) >> + E1000_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; +#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ +#ifdef IGB_PER_PKT_TIMESTAMP if (hw->mac.type >= e1000_82580) srrctl |= E1000_SRRCTL_TIMESTAMP; - /* Only set Drop Enable if we are supporting multiple queues */ - if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) +#endif + /* + * We should set the drop enable bit if: + * SR-IOV is enabled + * or + * Flow Control is disabled and number of RX queues > 1 + * + * This allows us to avoid head of line blocking for security + * and performance reasons. + */ + if (adapter->vfs_allocated_count || + (adapter->num_rx_queues > 1 && + (hw->fc.requested_mode == e1000_fc_none || + hw->fc.requested_mode == e1000_fc_rx_pause))) srrctl |= E1000_SRRCTL_DROP_EN; - wr32(E1000_SRRCTL(reg_idx), srrctl); + E1000_WRITE_REG(hw, E1000_SRRCTL(reg_idx), srrctl); /* set filtering for VMDQ pools */ igb_set_vmolr(adapter, reg_idx & 0x7, true); @@ -3143,7 +3409,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, /* enable receive descriptor fetching */ rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; - wr32(E1000_RXDCTL(reg_idx), rxdctl); + E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx), rxdctl); } /** @@ -3159,10 +3425,7 @@ static void igb_configure_rx(struct igb_adapter *adapter) /* set UTA to appropriate mode */ igb_set_uta(adapter); - /* set the correct pool for the PF default MAC address in entry 0 */ - igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, - adapter->vfs_allocated_count); - + igb_full_sync_mac_table(adapter); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ for (i = 0; i < adapter->num_rx_queues; i++) @@ -3211,20 +3474,20 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *ring, { if (tx_buffer->skb) { dev_kfree_skb_any(tx_buffer->skb); - if (tx_buffer->dma) + if (dma_unmap_len(tx_buffer, len)) dma_unmap_single(ring->dev, - tx_buffer->dma, - tx_buffer->length, - DMA_TO_DEVICE); - } else if (tx_buffer->dma) { + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { dma_unmap_page(ring->dev, - tx_buffer->dma, - tx_buffer->length, - DMA_TO_DEVICE); + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); } tx_buffer->next_to_watch = NULL; tx_buffer->skb = NULL; - tx_buffer->dma = 0; + dma_unmap_len_set(tx_buffer, len, 0); /* buffer_info must be completely set up in the transmit path */ } @@ -3247,6 +3510,10 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) igb_unmap_and_free_tx_resource(tx_ring, buffer_info); } +#ifdef CONFIG_BQL + netdev_tx_reset_queue(txring_txq(tx_ring)); +#endif /* CONFIG_BQL */ + size = sizeof(struct igb_tx_buffer) * tx_ring->count; memset(tx_ring->tx_buffer_info, 0, size); @@ -3310,9 +3577,14 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter) * igb_clean_rx_ring - Free Rx Buffers per Queue * @rx_ring: ring to free buffers from **/ -static void igb_clean_rx_ring(struct igb_ring *rx_ring) +void igb_clean_rx_ring(struct igb_ring *rx_ring) { unsigned long size; +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + const int bufsz = rx_ring->rx_buffer_len; +#else + const int bufsz = IGB_RX_HDR_LEN; +#endif u16 i; if (!rx_ring->rx_buffer_info) @@ -3324,7 +3596,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) if (buffer_info->dma) { dma_unmap_single(rx_ring->dev, buffer_info->dma, - IGB_RX_HDR_LEN, + bufsz, DMA_FROM_DEVICE); buffer_info->dma = 0; } @@ -3333,6 +3605,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) dev_kfree_skb(buffer_info->skb); buffer_info->skb = NULL; } +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT if (buffer_info->page_dma) { dma_unmap_page(rx_ring->dev, buffer_info->page_dma, @@ -3345,6 +3618,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) buffer_info->page = NULL; buffer_info->page_offset = 0; } +#endif } size = sizeof(struct igb_rx_buffer) * rx_ring->count; @@ -3385,14 +3659,14 @@ static int igb_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; + igb_del_mac_filter(adapter, hw->mac.addr, + adapter->vfs_allocated_count); memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); /* set the correct pool for the new PF MAC address in entry 0 */ - igb_rar_set_qsel(adapter, hw->mac.addr, 0, - adapter->vfs_allocated_count); - - return 0; + return igb_add_mac_filter(adapter, hw->mac.addr, + adapter->vfs_allocated_count); } /** @@ -3404,36 +3678,130 @@ static int igb_set_mac(struct net_device *netdev, void *p) * 0 on no addresses written * X on writing X addresses to MTA **/ -static int igb_write_mc_addr_list(struct net_device *netdev) +int igb_write_mc_addr_list(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; +#ifdef NETDEV_HW_ADDR_T_MULTICAST struct netdev_hw_addr *ha; +#else + struct dev_mc_list *ha; +#endif u8 *mta_list; - int i; + int i, count; +#ifdef CONFIG_IGB_VMDQ_NETDEV + int vm; +#endif + count = netdev_mc_count(netdev); +#ifdef CONFIG_IGB_VMDQ_NETDEV + for (vm = 1; vm < adapter->vmdq_pools; vm++) { + if (!adapter->vmdq_netdev[vm]) + break; + if (!netif_running(adapter->vmdq_netdev[vm])) + continue; + count += netdev_mc_count(adapter->vmdq_netdev[vm]); + } +#endif - if (netdev_mc_empty(netdev)) { - /* nothing to program, so clear mc list */ - igb_update_mc_addr_list(hw, NULL, 0); - igb_restore_vf_multicasts(adapter); + if (!count) { + e1000_update_mc_addr_list(hw, NULL, 0); return 0; } - - mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); + mta_list = kzalloc(count * 6, GFP_ATOMIC); if (!mta_list) return -ENOMEM; /* The shared function expects a packed array of only addresses. */ i = 0; netdev_for_each_mc_addr(ha, netdev) +#ifdef NETDEV_HW_ADDR_T_MULTICAST memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); - - igb_update_mc_addr_list(hw, mta_list, i); +#else + memcpy(mta_list + (i++ * ETH_ALEN), ha->dmi_addr, ETH_ALEN); +#endif +#ifdef CONFIG_IGB_VMDQ_NETDEV + for (vm = 1; vm < adapter->vmdq_pools; vm++) { + if (!adapter->vmdq_netdev[vm]) + break; + if (!netif_running(adapter->vmdq_netdev[vm]) || + !netdev_mc_count(adapter->vmdq_netdev[vm])) + continue; + netdev_for_each_mc_addr(ha, adapter->vmdq_netdev[vm]) +#ifdef NETDEV_HW_ADDR_T_MULTICAST + memcpy(mta_list + (i++ * ETH_ALEN), + ha->addr, ETH_ALEN); +#else + memcpy(mta_list + (i++ * ETH_ALEN), + ha->dmi_addr, ETH_ALEN); +#endif + } +#endif + e1000_update_mc_addr_list(hw, mta_list, i); kfree(mta_list); - return netdev_mc_count(netdev); + return count; +} + +void igb_rar_set(struct igb_adapter *adapter, u32 index) +{ + u32 rar_low, rar_high; + struct e1000_hw *hw = &adapter->hw; + u8 *addr = adapter->mac_table[index].addr; + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* Indicate to hardware the Address is Valid. */ + if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) + rar_high |= E1000_RAH_AV; + + if (hw->mac.type == e1000_82575) + rar_high |= E1000_RAH_POOL_1 * adapter->mac_table[index].queue; + else + rar_high |= E1000_RAH_POOL_1 << adapter->mac_table[index].queue; + + E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); + E1000_WRITE_FLUSH(hw); +} + +void igb_full_sync_mac_table(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + for (i = 0; i < hw->mac.rar_entry_count; i++) { + igb_rar_set(adapter, i); + } +} + +void igb_sync_mac_table(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + for (i = 0; i < hw->mac.rar_entry_count; i++) { + if (adapter->mac_table[i].state & IGB_MAC_STATE_MODIFIED) + igb_rar_set(adapter, i); + adapter->mac_table[i].state &= ~(IGB_MAC_STATE_MODIFIED); + } +} + +int igb_available_rars(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i, count = 0; + + for (i = 0; i < hw->mac.rar_entry_count; i++) { + if (adapter->mac_table[i].state == 0) + count++; + } + return count; } +#ifdef HAVE_SET_RX_MODE /** * igb_write_uc_addr_list - write unicast addresses to RAR table * @netdev: network interface device structure @@ -3446,37 +3814,33 @@ static int igb_write_mc_addr_list(struct net_device *netdev) static int igb_write_uc_addr_list(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; unsigned int vfn = adapter->vfs_allocated_count; - unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1); int count = 0; /* return ENOMEM indicating insufficient memory for addresses */ - if (netdev_uc_count(netdev) > rar_entries) + if (netdev_uc_count(netdev) > igb_available_rars(adapter)) return -ENOMEM; - - if (!netdev_uc_empty(netdev) && rar_entries) { + if (!netdev_uc_empty(netdev)) { +#ifdef NETDEV_HW_ADDR_T_UNICAST struct netdev_hw_addr *ha; - +#else + struct dev_mc_list *ha; +#endif netdev_for_each_uc_addr(ha, netdev) { - if (!rar_entries) - break; - igb_rar_set_qsel(adapter, ha->addr, - rar_entries--, - vfn); +#ifdef NETDEV_HW_ADDR_T_UNICAST + igb_del_mac_filter(adapter, ha->addr, vfn); + igb_add_mac_filter(adapter, ha->addr, vfn); +#else + igb_del_mac_filter(adapter, ha->da_addr, vfn); + igb_add_mac_filter(adapter, ha->da_addr, vfn); +#endif count++; } } - /* write the addresses in reverse order to avoid write combining */ - for (; rar_entries > 0 ; rar_entries--) { - wr32(E1000_RAH(rar_entries), 0); - wr32(E1000_RAL(rar_entries), 0); - } - wrfl(); - return count; } +#endif /* HAVE_SET_RX_MODE */ /** * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure @@ -3495,7 +3859,7 @@ static void igb_set_rx_mode(struct net_device *netdev) int count; /* Check for Promiscuous and All Multicast modes */ - rctl = rd32(E1000_RCTL); + rctl = E1000_READ_REG(hw, E1000_RCTL); /* clear the effected bits */ rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE); @@ -3521,6 +3885,7 @@ static void igb_set_rx_mode(struct net_device *netdev) vmolr |= E1000_VMOLR_ROMPE; } } +#ifdef HAVE_SET_RX_MODE /* * Write addresses to available RAR registers, if there is not * sufficient space to store all the addresses then enable @@ -3531,9 +3896,10 @@ static void igb_set_rx_mode(struct net_device *netdev) rctl |= E1000_RCTL_UPE; vmolr |= E1000_VMOLR_ROPE; } +#endif /* HAVE_SET_RX_MODE */ rctl |= E1000_RCTL_VFE; } - wr32(E1000_RCTL, rctl); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); /* * In order to support SR-IOV and eventually VMDq it is necessary to set @@ -3544,9 +3910,9 @@ static void igb_set_rx_mode(struct net_device *netdev) if (hw->mac.type < e1000_82576) return; - vmolr |= rd32(E1000_VMOLR(vfn)) & + vmolr |= E1000_READ_REG(hw, E1000_VMOLR(vfn)) & ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); - wr32(E1000_VMOLR(vfn), vmolr); + E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr); igb_restore_vf_multicasts(adapter); } @@ -3558,7 +3924,7 @@ static void igb_check_wvbr(struct igb_adapter *adapter) switch (hw->mac.type) { case e1000_82576: case e1000_i350: - if (!(wvbr = rd32(E1000_WVBR))) + if (!(wvbr = E1000_READ_REG(hw, E1000_WVBR))) return; break; default: @@ -3580,7 +3946,7 @@ static void igb_spoof_check(struct igb_adapter *adapter) for(j = 0; j < adapter->vfs_allocated_count; j++) { if (adapter->wvbr & (1 << j) || adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) { - dev_warn(&adapter->pdev->dev, + DPRINTK(DRV, WARNING, "Spoof event(s) detected on VF %d\n", j); adapter->wvbr &= ~((1 << j) | @@ -3594,7 +3960,7 @@ static void igb_spoof_check(struct igb_adapter *adapter) static void igb_update_phy_info(unsigned long data) { struct igb_adapter *adapter = (struct igb_adapter *) data; - igb_get_phy_info(&adapter->hw); + e1000_get_phy_info(&adapter->hw); } /** @@ -3604,8 +3970,7 @@ static void igb_update_phy_info(unsigned long data) bool igb_has_link(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - bool link_active = false; - s32 ret_val = 0; + bool link_active = FALSE; /* get_link_status is set on LSC (link status) interrupt or * rx sequence error interrupt. get_link_status will stay @@ -3614,49 +3979,25 @@ bool igb_has_link(struct igb_adapter *adapter) */ switch (hw->phy.media_type) { case e1000_media_type_copper: - if (hw->mac.get_link_status) { - ret_val = hw->mac.ops.check_for_link(hw); - link_active = !hw->mac.get_link_status; - } else { - link_active = true; - } - break; + if (!hw->mac.get_link_status) + return true; case e1000_media_type_internal_serdes: - ret_val = hw->mac.ops.check_for_link(hw); - link_active = hw->mac.serdes_has_link; + e1000_check_for_link(hw); + link_active = !hw->mac.get_link_status; break; - default: case e1000_media_type_unknown: + default: break; } return link_active; } -static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event) -{ - bool ret = false; - u32 ctrl_ext, thstat; - - /* check for thermal sensor event on i350, copper only */ - if (hw->mac.type == e1000_i350) { - thstat = rd32(E1000_THSTAT); - ctrl_ext = rd32(E1000_CTRL_EXT); - - if ((hw->phy.media_type == e1000_media_type_copper) && - !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) { - ret = !!(thstat & event); - } - } - - return ret; -} - -/** - * igb_watchdog - Timer Call-back - * @data: pointer to adapter cast into an unsigned long - **/ -static void igb_watchdog(unsigned long data) +/** + * igb_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void igb_watchdog(unsigned long data) { struct igb_adapter *adapter = (struct igb_adapter *)data; /* Do the rest outside of interrupt context */ @@ -3672,6 +4013,8 @@ static void igb_watchdog_task(struct work_struct *work) struct net_device *netdev = adapter->netdev; u32 link; int i; + u32 thstat, ctrl_ext; + link = igb_has_link(adapter); if (link) { @@ -3680,31 +4023,22 @@ static void igb_watchdog_task(struct work_struct *work) if (!netif_carrier_ok(netdev)) { u32 ctrl; - hw->mac.ops.get_speed_and_duplex(hw, - &adapter->link_speed, - &adapter->link_duplex); + e1000_get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); - ctrl = rd32(E1000_CTRL); + ctrl = E1000_READ_REG(hw, E1000_CTRL); /* Links status message must follow this format */ - printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s " - "Duplex, Flow Control: %s\n", + printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, " + "Flow Control: %s\n", netdev->name, adapter->link_speed, adapter->link_duplex == FULL_DUPLEX ? - "Full" : "Half", - (ctrl & E1000_CTRL_TFCE) && - (ctrl & E1000_CTRL_RFCE) ? "RX/TX" : - (ctrl & E1000_CTRL_RFCE) ? "RX" : - (ctrl & E1000_CTRL_TFCE) ? "TX" : "None"); - - /* check for thermal sensor event */ - if (igb_thermal_sensor_event(hw, - E1000_THSTAT_LINK_THROTTLE)) { - netdev_info(netdev, "The network adapter link " - "speed was downshifted because it " - "overheated\n"); - } - + "Full Duplex" : "Half Duplex", + ((ctrl & E1000_CTRL_TFCE) && + (ctrl & E1000_CTRL_RFCE)) ? "RX/TX": + ((ctrl & E1000_CTRL_RFCE) ? "RX" : + ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None"))); /* adjust timeout factor according to speed/duplex */ adapter->tx_timeout_factor = 1; switch (adapter->link_speed) { @@ -3717,9 +4051,12 @@ static void igb_watchdog_task(struct work_struct *work) } netif_carrier_on(netdev); + netif_tx_wake_all_queues(netdev); igb_ping_all_vfs(adapter); +#ifdef IFLA_VF_MAX igb_check_vf_rate_limit(adapter); +#endif /* IFLA_VF_MAX */ /* link state has changed, schedule phy info update */ if (!test_bit(__IGB_DOWN, &adapter->state)) @@ -3730,18 +4067,38 @@ static void igb_watchdog_task(struct work_struct *work) if (netif_carrier_ok(netdev)) { adapter->link_speed = 0; adapter->link_duplex = 0; - - /* check for thermal sensor event */ - if (igb_thermal_sensor_event(hw, - E1000_THSTAT_PWR_DOWN)) { - netdev_err(netdev, "The network adapter was " - "stopped because it overheated\n"); + /* check for thermal sensor event on i350 */ + if (hw->mac.type == e1000_i350) { + thstat = E1000_READ_REG(hw, E1000_THSTAT); + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + if ((hw->phy.media_type == + e1000_media_type_copper) && + !(ctrl_ext & + E1000_CTRL_EXT_LINK_MODE_SGMII)) { + if (thstat & E1000_THSTAT_PWR_DOWN) { + printk(KERN_ERR "igb: %s The " + "network adapter was stopped " + "because it overheated.\n", + netdev->name); + } + if (thstat & E1000_THSTAT_LINK_THROTTLE) { + printk(KERN_INFO + "igb: %s The network " + "adapter supported " + "link speed " + "was downshifted " + "because it " + "overheated.\n", + netdev->name); + } + } } /* Links status message must follow this format */ printk(KERN_INFO "igb: %s NIC Link is Down\n", netdev->name); netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); igb_ping_all_vfs(adapter); @@ -3755,9 +4112,7 @@ static void igb_watchdog_task(struct work_struct *work) } } - spin_lock(&adapter->stats64_lock); - igb_update_stats(adapter, &adapter->stats64); - spin_unlock(&adapter->stats64_lock); + igb_update_stats(adapter); for (i = 0; i < adapter->num_tx_queues; i++) { struct igb_ring *tx_ring = adapter->tx_ring[i]; @@ -3783,9 +4138,9 @@ static void igb_watchdog_task(struct work_struct *work) u32 eics = 0; for (i = 0; i < adapter->num_q_vectors; i++) eics |= adapter->q_vector[i]->eims_value; - wr32(E1000_EICS, eics); + E1000_WRITE_REG(hw, E1000_EICS, eics); } else { - wr32(E1000_ICS, E1000_ICS_RXDMT0); + E1000_WRITE_REG(hw, E1000_ICS, E1000_ICS_RXDMT0); } igb_spoof_check(adapter); @@ -3796,6 +4151,70 @@ static void igb_watchdog_task(struct work_struct *work) round_jiffies(jiffies + 2 * HZ)); } +static void igb_dma_err_task(struct work_struct *work) +{ + struct igb_adapter *adapter = container_of(work, + struct igb_adapter, + dma_err_task); + int vf; + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 hgptc; + u32 ciaa, ciad; + + hgptc = E1000_READ_REG(hw, E1000_HGPTC); + if (hgptc) /* If incrementing then no need for the check below */ + goto dma_timer_reset; + /* + * Check to see if a bad DMA write target from an errant or + * malicious VF has caused a PCIe error. If so then we can + * issue a VFLR to the offending VF(s) and then resume without + * requesting a full slot reset. + */ + + for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { + ciaa = (vf << 16) | 0x80000000; + /* 32 bit read so align, we really want status at offset 6 */ + ciaa |= PCI_COMMAND; + E1000_WRITE_REG(hw, E1000_CIAA, ciaa); + ciad = E1000_READ_REG(hw, E1000_CIAD); + ciaa &= 0x7FFFFFFF; + /* disable debug mode asap after reading data */ + E1000_WRITE_REG(hw, E1000_CIAA, ciaa); + /* Get the upper 16 bits which will be the PCI status reg */ + ciad >>= 16; + if (ciad & (PCI_STATUS_REC_MASTER_ABORT | + PCI_STATUS_REC_TARGET_ABORT | + PCI_STATUS_SIG_SYSTEM_ERROR)) { + netdev_err(netdev, "VF %d suffered error\n", vf); + /* Issue VFLR */ + ciaa = (vf << 16) | 0x80000000; + ciaa |= 0xA8; + E1000_WRITE_REG(hw, E1000_CIAA, ciaa); + ciad = 0x00008000; /* VFLR */ + E1000_WRITE_REG(hw, E1000_CIAD, ciad); + ciaa &= 0x7FFFFFFF; + E1000_WRITE_REG(hw, E1000_CIAA, ciaa); + } + } +dma_timer_reset: + /* Reset the timer */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->dma_err_timer, + round_jiffies(jiffies + HZ / 10)); +} + +/** + * igb_dma_err_timer - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void igb_dma_err_timer(unsigned long data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->dma_err_task); +} + enum latency_range { lowest_latency = 0, low_latency = 1, @@ -3841,7 +4260,7 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector) packets = q_vector->tx.total_packets; if (packets) avg_wire_size = max_t(u32, avg_wire_size, - q_vector->tx.total_bytes / packets); + q_vector->tx.total_bytes / packets); /* if avg_wire_size isn't set no work was done */ if (!avg_wire_size) @@ -4006,8 +4425,8 @@ set_itr_now: } } -static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, - u32 type_tucmd, u32 mss_l4len_idx) +void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, + u32 type_tucmd, u32 mss_l4len_idx) { struct e1000_adv_tx_context_desc *context_desc; u16 i = tx_ring->next_to_use; @@ -4034,12 +4453,15 @@ static int igb_tso(struct igb_ring *tx_ring, struct igb_tx_buffer *first, u8 *hdr_len) { +#ifdef NETIF_F_TSO struct sk_buff *skb = first->skb; u32 vlan_macip_lens, type_tucmd; u32 mss_l4len_idx, l4len; if (!skb_is_gso(skb)) +#endif /* NETIF_F_TSO */ return 0; +#ifdef NETIF_F_TSO if (skb_header_cloned(skb)) { int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); @@ -4062,6 +4484,7 @@ static int igb_tso(struct igb_ring *tx_ring, first->tx_flags |= IGB_TX_FLAGS_TSO | IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_IPV4; +#ifdef NETIF_F_TSO6 } else if (skb_is_gso_v6(skb)) { ipv6_hdr(skb)->payload_len = 0; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, @@ -4069,6 +4492,7 @@ static int igb_tso(struct igb_ring *tx_ring, 0, IPPROTO_TCP, 0); first->tx_flags |= IGB_TX_FLAGS_TSO | IGB_TX_FLAGS_CSUM; +#endif } /* compute header lengths */ @@ -4091,6 +4515,7 @@ static int igb_tso(struct igb_ring *tx_ring, igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); return 1; +#endif /* NETIF_F_TSO */ } static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) @@ -4111,10 +4536,12 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; l4_hdr = ip_hdr(skb)->protocol; break; +#ifdef NETIF_F_IPV6_CSUM case __constant_htons(ETH_P_IPV6): vlan_macip_lens |= skb_network_header_len(skb); l4_hdr = ipv6_hdr(skb)->nexthdr; break; +#endif default: if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, @@ -4130,11 +4557,13 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) mss_l4len_idx = tcp_hdrlen(skb) << E1000_ADVTXD_L4LEN_SHIFT; break; +#ifdef HAVE_SCTP case IPPROTO_SCTP: type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP; mss_l4len_idx = sizeof(struct sctphdr) << E1000_ADVTXD_L4LEN_SHIFT; break; +#endif case IPPROTO_UDP: mss_l4len_idx = sizeof(struct udphdr) << E1000_ADVTXD_L4LEN_SHIFT; @@ -4215,11 +4644,13 @@ static void igb_tx_map(struct igb_ring *tx_ring, const u8 hdr_len) { struct sk_buff *skb = first->skb; - struct igb_tx_buffer *tx_buffer_info; + struct igb_tx_buffer *tx_buffer; union e1000_adv_tx_desc *tx_desc; dma_addr_t dma; +#ifdef MAX_SKB_FRAGS struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; unsigned int data_len = skb->data_len; +#endif unsigned int size = skb_headlen(skb); unsigned int paylen = skb->len - hdr_len; __le32 cmd_type; @@ -4236,11 +4667,13 @@ static void igb_tx_map(struct igb_ring *tx_ring, goto dma_error; /* record length, and DMA address */ - first->length = size; - first->dma = dma; + dma_unmap_len_set(first, len, size); + dma_unmap_addr_set(first, dma, dma); tx_desc->read.buffer_addr = cpu_to_le64(dma); +#ifdef MAX_SKB_FRAGS for (;;) { +#endif while (unlikely(size > IGB_MAX_DATA_PER_TXD)) { tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD); @@ -4259,6 +4692,7 @@ static void igb_tx_map(struct igb_ring *tx_ring, tx_desc->read.buffer_addr = cpu_to_le64(dma); } +#ifdef MAX_SKB_FRAGS if (likely(!data_len)) break; @@ -4271,17 +4705,17 @@ static void igb_tx_map(struct igb_ring *tx_ring, i = 0; } - size = frag->size; + size = skb_frag_size(frag); data_len -= size; - dma = skb_frag_dma_map(tx_ring->dev, frag, 0, - size, DMA_TO_DEVICE); + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); if (dma_mapping_error(tx_ring->dev, dma)) goto dma_error; - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - tx_buffer_info->length = size; - tx_buffer_info->dma = dma; + tx_buffer = &tx_ring->tx_buffer_info[i]; + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); tx_desc->read.olinfo_status = 0; tx_desc->read.buffer_addr = cpu_to_le64(dma); @@ -4289,6 +4723,11 @@ static void igb_tx_map(struct igb_ring *tx_ring, frag++; } +#endif /* MAX_SKB_FRAGS */ +#ifdef CONFIG_BQL + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); +#endif /* CONFIG_BQL */ + /* write last descriptor with RS and EOP bits */ cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD); tx_desc->read.cmd_type_len = cmd_type; @@ -4328,9 +4767,9 @@ dma_error: /* clear dma mappings for failed tx_buffer_info map */ for (;;) { - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); - if (tx_buffer_info == first) + tx_buffer= &tx_ring->tx_buffer_info[i]; + igb_unmap_and_free_tx_resource(tx_ring, tx_buffer); + if (tx_buffer == first) break; if (i == 0) i = tx_ring->count; @@ -4342,9 +4781,12 @@ dma_error: static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) { - struct net_device *netdev = tx_ring->netdev; + struct net_device *netdev = netdev_ring(tx_ring); - netif_stop_subqueue(netdev, tx_ring->queue_index); + if (netif_is_multiqueue(netdev)) + netif_stop_subqueue(netdev, ring_queue_index(tx_ring)); + else + netif_stop_queue(netdev); /* Herbert's original patch had: * smp_mb__after_netif_stop_queue(); @@ -4357,11 +4799,12 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) return -EBUSY; /* A reprieve! */ - netif_wake_subqueue(netdev, tx_ring->queue_index); + if (netif_is_multiqueue(netdev)) + netif_wake_subqueue(netdev, ring_queue_index(tx_ring)); + else + netif_wake_queue(netdev); - u64_stats_update_begin(&tx_ring->tx_syncp2); - tx_ring->tx_stats.restart_queue2++; - u64_stats_update_end(&tx_ring->tx_syncp2); + tx_ring->tx_stats.restart_queue++; return 0; } @@ -4398,11 +4841,20 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, first->bytecount = skb->len; first->gso_segs = 1; +#ifdef HAVE_HW_TIME_STAMP +#ifdef SKB_SHARED_TX_IS_UNION + if (unlikely(skb_shinfo(skb)->tx_flags.flags & SKBTX_HW_TSTAMP)) { + skb_shinfo(skb)->tx_flags.flags |= SKBTX_IN_PROGRESS; + tx_flags |= IGB_TX_FLAGS_TSTAMP; + } +#else if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= IGB_TX_FLAGS_TSTAMP; } +#endif +#endif if (vlan_tx_tag_present(skb)) { tx_flags |= IGB_TX_FLAGS_VLAN; tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); @@ -4420,6 +4872,10 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, igb_tx_map(tx_ring, first, hdr_len); +#ifndef HAVE_TRANS_START_IN_QUEUE + netdev_ring(tx_ring)->trans_start = jiffies; + +#endif /* Make sure there is space in the ring for the next send. */ igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4); @@ -4431,8 +4887,9 @@ out_drop: return NETDEV_TX_OK; } +#ifdef HAVE_TX_MQ static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter, - struct sk_buff *skb) + struct sk_buff *skb) { unsigned int r_idx = skb->queue_mapping; @@ -4441,9 +4898,12 @@ static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter, return adapter->tx_ring[r_idx]; } +#else +#define igb_tx_queue_mapping(_adapter, _skb) (_adapter)->tx_ring[0] +#endif static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, - struct net_device *netdev) + struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); @@ -4486,8 +4946,8 @@ static void igb_tx_timeout(struct net_device *netdev) hw->dev_spec._82575.global_device_reset = true; schedule_work(&adapter->reset_task); - wr32(E1000_EICS, - (adapter->eims_enable_mask & ~adapter->eims_other)); + E1000_WRITE_REG(hw, E1000_EICS, + (adapter->eims_enable_mask & ~adapter->eims_other)); } static void igb_reset_task(struct work_struct *work) @@ -4495,28 +4955,30 @@ static void igb_reset_task(struct work_struct *work) struct igb_adapter *adapter; adapter = container_of(work, struct igb_adapter, reset_task); - igb_dump(adapter); - netdev_err(adapter->netdev, "Reset adapter\n"); igb_reinit_locked(adapter); } /** - * igb_get_stats64 - Get System Network Statistics + * igb_get_stats - Get System Network Statistics * @netdev: network interface device structure - * @stats: rtnl_link_stats64 pointer * + * Returns the address of the device statistics structure. + * The statistics are updated here and also from the timer callback. **/ -static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static struct net_device_stats *igb_get_stats(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); - spin_lock(&adapter->stats64_lock); - igb_update_stats(adapter, &adapter->stats64); - memcpy(stats, &adapter->stats64, sizeof(*stats)); - spin_unlock(&adapter->stats64_lock); + if (!test_bit(__IGB_RESETTING, &adapter->state)) + igb_update_stats(adapter); - return stats; +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + /* only return the current stats */ + return &netdev->stats; +#else + /* only return the current stats */ + return &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ } /** @@ -4531,31 +4993,56 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) struct igb_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = adapter->pdev; int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + u32 rx_buffer_len, i; +#endif if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { - dev_err(&pdev->dev, "Invalid MTU setting\n"); + dev_err(pci_dev_to_dev(pdev), "Invalid MTU setting\n"); return -EINVAL; } #define MAX_STD_JUMBO_FRAME_SIZE 9238 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { - dev_err(&pdev->dev, "MTU > 9216 not supported.\n"); + dev_err(pci_dev_to_dev(pdev), "MTU > 9216 not supported.\n"); return -EINVAL; } while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); /* igb_down has a dependency on max_frame_size */ adapter->max_frame_size = max_frame; +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT +#ifdef IGB_PER_PKT_TIMESTAMP + if (adapter->hw.mac.type >= e1000_82580) + max_frame += IGB_TS_HDR_LEN; + +#endif + /* + * RLPML prevents us from receiving a frame larger than max_frame so + * it is safe to just set the rx_buffer_len to max_frame without the + * risk of an skb over panic. + */ + if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE) + rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + else + rx_buffer_len = max_frame; + +#endif if (netif_running(netdev)) igb_down(adapter); - dev_info(&pdev->dev, "changing MTU from %d to %d\n", - netdev->mtu, new_mtu); + dev_info(pci_dev_to_dev(pdev), "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); netdev->mtu = new_mtu; +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len; + +#endif if (netif_running(netdev)) igb_up(adapter); else @@ -4571,17 +5058,25 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) * @adapter: board private structure **/ -void igb_update_stats(struct igb_adapter *adapter, - struct rtnl_link_stats64 *net_stats) +void igb_update_stats(struct igb_adapter *adapter) { +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats *net_stats = &adapter->netdev->stats; +#else + struct net_device_stats *net_stats = &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ struct e1000_hw *hw = &adapter->hw; +#ifdef HAVE_PCI_ERS struct pci_dev *pdev = adapter->pdev; +#endif u32 reg, mpc; u16 phy_tmp; int i; u64 bytes, packets; - unsigned int start; - u64 _bytes, _packets; +#ifndef IGB_NO_LRO + u32 flushed = 0, coal = 0, recycled = 0; + struct igb_q_vector *q_vector; +#endif #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF @@ -4591,25 +5086,41 @@ void igb_update_stats(struct igb_adapter *adapter, */ if (adapter->link_speed == 0) return; +#ifdef HAVE_PCI_ERS if (pci_channel_offline(pdev)) return; +#endif +#ifndef IGB_NO_LRO + for (i = 0; i < adapter->num_q_vectors; i++) { + q_vector = adapter->q_vector[i]; + if (!q_vector || !q_vector->lrolist) + continue; + flushed += q_vector->lrolist->stats.flushed; + coal += q_vector->lrolist->stats.coal; + recycled += q_vector->lrolist->stats.recycled; + } + adapter->lro_stats.flushed = flushed; + adapter->lro_stats.coal = coal; + adapter->lro_stats.recycled = recycled; + +#endif bytes = 0; packets = 0; for (i = 0; i < adapter->num_rx_queues; i++) { - u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF; + u32 rqdpc_tmp = E1000_READ_REG(hw, E1000_RQDPC(i)) & 0x0FFF; struct igb_ring *ring = adapter->rx_ring[i]; - ring->rx_stats.drops += rqdpc_tmp; net_stats->rx_fifo_errors += rqdpc_tmp; - - do { - start = u64_stats_fetch_begin_bh(&ring->rx_syncp); - _bytes = ring->rx_stats.bytes; - _packets = ring->rx_stats.packets; - } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start)); - bytes += _bytes; - packets += _packets; +#ifdef CONFIG_IGB_VMDQ_NETDEV + if (!ring->vmdq_netdev) { + bytes += ring->rx_stats.bytes; + packets += ring->rx_stats.packets; + } +#else + bytes += ring->rx_stats.bytes; + packets += ring->rx_stats.packets; +#endif } net_stats->rx_bytes = bytes; @@ -4619,93 +5130,95 @@ void igb_update_stats(struct igb_adapter *adapter, packets = 0; for (i = 0; i < adapter->num_tx_queues; i++) { struct igb_ring *ring = adapter->tx_ring[i]; - do { - start = u64_stats_fetch_begin_bh(&ring->tx_syncp); - _bytes = ring->tx_stats.bytes; - _packets = ring->tx_stats.packets; - } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start)); - bytes += _bytes; - packets += _packets; +#ifdef CONFIG_IGB_VMDQ_NETDEV + if (!ring->vmdq_netdev) { + bytes += ring->tx_stats.bytes; + packets += ring->tx_stats.packets; + } +#else + bytes += ring->tx_stats.bytes; + packets += ring->tx_stats.packets; +#endif } net_stats->tx_bytes = bytes; net_stats->tx_packets = packets; /* read stats registers */ - adapter->stats.crcerrs += rd32(E1000_CRCERRS); - adapter->stats.gprc += rd32(E1000_GPRC); - adapter->stats.gorc += rd32(E1000_GORCL); - rd32(E1000_GORCH); /* clear GORCL */ - adapter->stats.bprc += rd32(E1000_BPRC); - adapter->stats.mprc += rd32(E1000_MPRC); - adapter->stats.roc += rd32(E1000_ROC); - - adapter->stats.prc64 += rd32(E1000_PRC64); - adapter->stats.prc127 += rd32(E1000_PRC127); - adapter->stats.prc255 += rd32(E1000_PRC255); - adapter->stats.prc511 += rd32(E1000_PRC511); - adapter->stats.prc1023 += rd32(E1000_PRC1023); - adapter->stats.prc1522 += rd32(E1000_PRC1522); - adapter->stats.symerrs += rd32(E1000_SYMERRS); - adapter->stats.sec += rd32(E1000_SEC); - - mpc = rd32(E1000_MPC); + adapter->stats.crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); + adapter->stats.gprc += E1000_READ_REG(hw, E1000_GPRC); + adapter->stats.gorc += E1000_READ_REG(hw, E1000_GORCL); + E1000_READ_REG(hw, E1000_GORCH); /* clear GORCL */ + adapter->stats.bprc += E1000_READ_REG(hw, E1000_BPRC); + adapter->stats.mprc += E1000_READ_REG(hw, E1000_MPRC); + adapter->stats.roc += E1000_READ_REG(hw, E1000_ROC); + + adapter->stats.prc64 += E1000_READ_REG(hw, E1000_PRC64); + adapter->stats.prc127 += E1000_READ_REG(hw, E1000_PRC127); + adapter->stats.prc255 += E1000_READ_REG(hw, E1000_PRC255); + adapter->stats.prc511 += E1000_READ_REG(hw, E1000_PRC511); + adapter->stats.prc1023 += E1000_READ_REG(hw, E1000_PRC1023); + adapter->stats.prc1522 += E1000_READ_REG(hw, E1000_PRC1522); + adapter->stats.symerrs += E1000_READ_REG(hw, E1000_SYMERRS); + adapter->stats.sec += E1000_READ_REG(hw, E1000_SEC); + + mpc = E1000_READ_REG(hw, E1000_MPC); adapter->stats.mpc += mpc; net_stats->rx_fifo_errors += mpc; - adapter->stats.scc += rd32(E1000_SCC); - adapter->stats.ecol += rd32(E1000_ECOL); - adapter->stats.mcc += rd32(E1000_MCC); - adapter->stats.latecol += rd32(E1000_LATECOL); - adapter->stats.dc += rd32(E1000_DC); - adapter->stats.rlec += rd32(E1000_RLEC); - adapter->stats.xonrxc += rd32(E1000_XONRXC); - adapter->stats.xontxc += rd32(E1000_XONTXC); - adapter->stats.xoffrxc += rd32(E1000_XOFFRXC); - adapter->stats.xofftxc += rd32(E1000_XOFFTXC); - adapter->stats.fcruc += rd32(E1000_FCRUC); - adapter->stats.gptc += rd32(E1000_GPTC); - adapter->stats.gotc += rd32(E1000_GOTCL); - rd32(E1000_GOTCH); /* clear GOTCL */ - adapter->stats.rnbc += rd32(E1000_RNBC); - adapter->stats.ruc += rd32(E1000_RUC); - adapter->stats.rfc += rd32(E1000_RFC); - adapter->stats.rjc += rd32(E1000_RJC); - adapter->stats.tor += rd32(E1000_TORH); - adapter->stats.tot += rd32(E1000_TOTH); - adapter->stats.tpr += rd32(E1000_TPR); - - adapter->stats.ptc64 += rd32(E1000_PTC64); - adapter->stats.ptc127 += rd32(E1000_PTC127); - adapter->stats.ptc255 += rd32(E1000_PTC255); - adapter->stats.ptc511 += rd32(E1000_PTC511); - adapter->stats.ptc1023 += rd32(E1000_PTC1023); - adapter->stats.ptc1522 += rd32(E1000_PTC1522); - - adapter->stats.mptc += rd32(E1000_MPTC); - adapter->stats.bptc += rd32(E1000_BPTC); - - adapter->stats.tpt += rd32(E1000_TPT); - adapter->stats.colc += rd32(E1000_COLC); - - adapter->stats.algnerrc += rd32(E1000_ALGNERRC); - /* read internal phy specific stats */ - reg = rd32(E1000_CTRL_EXT); + adapter->stats.scc += E1000_READ_REG(hw, E1000_SCC); + adapter->stats.ecol += E1000_READ_REG(hw, E1000_ECOL); + adapter->stats.mcc += E1000_READ_REG(hw, E1000_MCC); + adapter->stats.latecol += E1000_READ_REG(hw, E1000_LATECOL); + adapter->stats.dc += E1000_READ_REG(hw, E1000_DC); + adapter->stats.rlec += E1000_READ_REG(hw, E1000_RLEC); + adapter->stats.xonrxc += E1000_READ_REG(hw, E1000_XONRXC); + adapter->stats.xontxc += E1000_READ_REG(hw, E1000_XONTXC); + adapter->stats.xoffrxc += E1000_READ_REG(hw, E1000_XOFFRXC); + adapter->stats.xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); + adapter->stats.fcruc += E1000_READ_REG(hw, E1000_FCRUC); + adapter->stats.gptc += E1000_READ_REG(hw, E1000_GPTC); + adapter->stats.gotc += E1000_READ_REG(hw, E1000_GOTCL); + E1000_READ_REG(hw, E1000_GOTCH); /* clear GOTCL */ + adapter->stats.rnbc += E1000_READ_REG(hw, E1000_RNBC); + adapter->stats.ruc += E1000_READ_REG(hw, E1000_RUC); + adapter->stats.rfc += E1000_READ_REG(hw, E1000_RFC); + adapter->stats.rjc += E1000_READ_REG(hw, E1000_RJC); + adapter->stats.tor += E1000_READ_REG(hw, E1000_TORH); + adapter->stats.tot += E1000_READ_REG(hw, E1000_TOTH); + adapter->stats.tpr += E1000_READ_REG(hw, E1000_TPR); + + adapter->stats.ptc64 += E1000_READ_REG(hw, E1000_PTC64); + adapter->stats.ptc127 += E1000_READ_REG(hw, E1000_PTC127); + adapter->stats.ptc255 += E1000_READ_REG(hw, E1000_PTC255); + adapter->stats.ptc511 += E1000_READ_REG(hw, E1000_PTC511); + adapter->stats.ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); + adapter->stats.ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); + + adapter->stats.mptc += E1000_READ_REG(hw, E1000_MPTC); + adapter->stats.bptc += E1000_READ_REG(hw, E1000_BPTC); + + adapter->stats.tpt += E1000_READ_REG(hw, E1000_TPT); + adapter->stats.colc += E1000_READ_REG(hw, E1000_COLC); + + adapter->stats.algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); + /* read internal phy sepecific stats */ + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) { - adapter->stats.rxerrc += rd32(E1000_RXERRC); - adapter->stats.tncrs += rd32(E1000_TNCRS); + adapter->stats.rxerrc += E1000_READ_REG(hw, E1000_RXERRC); + adapter->stats.tncrs += E1000_READ_REG(hw, E1000_TNCRS); } - adapter->stats.tsctc += rd32(E1000_TSCTC); - adapter->stats.tsctfc += rd32(E1000_TSCTFC); + adapter->stats.tsctc += E1000_READ_REG(hw, E1000_TSCTC); + adapter->stats.tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); - adapter->stats.iac += rd32(E1000_IAC); - adapter->stats.icrxoc += rd32(E1000_ICRXOC); - adapter->stats.icrxptc += rd32(E1000_ICRXPTC); - adapter->stats.icrxatc += rd32(E1000_ICRXATC); - adapter->stats.ictxptc += rd32(E1000_ICTXPTC); - adapter->stats.ictxatc += rd32(E1000_ICTXATC); - adapter->stats.ictxqec += rd32(E1000_ICTXQEC); - adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC); - adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); + adapter->stats.iac += E1000_READ_REG(hw, E1000_IAC); + adapter->stats.icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); + adapter->stats.icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); + adapter->stats.icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); + adapter->stats.ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); + adapter->stats.ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); + adapter->stats.ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); + adapter->stats.ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); + adapter->stats.icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); /* Fill out the OS statistics structure */ net_stats->multicast = adapter->stats.mprc; @@ -4737,24 +5250,20 @@ void igb_update_stats(struct igb_adapter *adapter, /* Phy Stats */ if (hw->phy.media_type == e1000_media_type_copper) { if ((adapter->link_speed == SPEED_1000) && - (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { + (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; adapter->phy_stats.idle_errors += phy_tmp; } } /* Management Stats */ - adapter->stats.mgptc += rd32(E1000_MGTPTC); - adapter->stats.mgprc += rd32(E1000_MGTPRC); - adapter->stats.mgpdc += rd32(E1000_MGTPDC); - - /* OS2BMC Stats */ - reg = rd32(E1000_MANC); - if (reg & E1000_MANC_EN_BMC2OS) { - adapter->stats.o2bgptc += rd32(E1000_O2BGPTC); - adapter->stats.o2bspc += rd32(E1000_O2BSPC); - adapter->stats.b2ospc += rd32(E1000_B2OSPC); - adapter->stats.b2ogprc += rd32(E1000_B2OGPRC); + adapter->stats.mgptc += E1000_READ_REG(hw, E1000_MGTPTC); + adapter->stats.mgprc += E1000_READ_REG(hw, E1000_MGTPRC); + if (hw->mac.type > e1000_82580) { + adapter->stats.o2bgptc += E1000_READ_REG(hw, E1000_O2BGPTC); + adapter->stats.o2bspc += E1000_READ_REG(hw, E1000_O2BSPC); + adapter->stats.b2ospc += E1000_READ_REG(hw, E1000_B2OSPC); + adapter->stats.b2ogprc += E1000_READ_REG(hw, E1000_B2OGPRC); } } @@ -4762,7 +5271,7 @@ static irqreturn_t igb_msix_other(int irq, void *data) { struct igb_adapter *adapter = data; struct e1000_hw *hw = &adapter->hw; - u32 icr = rd32(E1000_ICR); + u32 icr = E1000_READ_REG(hw, E1000_ICR); /* reading ICR causes bit 31 of EICR to be cleared */ if (icr & E1000_ICR_DRSTA) @@ -4788,7 +5297,11 @@ static irqreturn_t igb_msix_other(int irq, void *data) mod_timer(&adapter->watchdog_timer, jiffies + 1); } - wr32(E1000_EIMS, adapter->eims_other); + /* Check for MDD event */ + if (icr & E1000_ICR_MDDET) + igb_process_mdd_event(adapter); + + E1000_WRITE_REG(hw, E1000_EIMS, adapter->eims_other); return IRQ_HANDLED; } @@ -4825,7 +5338,7 @@ static irqreturn_t igb_msix_ring(int irq, void *data) return IRQ_HANDLED; } -#ifdef CONFIG_IGB_DCA +#ifdef IGB_DCA static void igb_update_dca(struct igb_q_vector *q_vector) { struct igb_adapter *adapter = q_vector->adapter; @@ -4837,33 +5350,33 @@ static void igb_update_dca(struct igb_q_vector *q_vector) if (q_vector->tx.ring) { int q = q_vector->tx.ring->reg_idx; - u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); + u32 dca_txctrl = E1000_READ_REG(hw, E1000_DCA_TXCTRL(q)); if (hw->mac.type == e1000_82575) { dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); } else { dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576; dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << - E1000_DCA_TXCTRL_CPUID_SHIFT; + E1000_DCA_TXCTRL_CPUID_SHIFT_82576; } dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN; - wr32(E1000_DCA_TXCTRL(q), dca_txctrl); + E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(q), dca_txctrl); } if (q_vector->rx.ring) { int q = q_vector->rx.ring->reg_idx; - u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); + u32 dca_rxctrl = E1000_READ_REG(hw, E1000_DCA_RXCTRL(q)); if (hw->mac.type == e1000_82575) { dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); } else { dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576; dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << - E1000_DCA_RXCTRL_CPUID_SHIFT; + E1000_DCA_RXCTRL_CPUID_SHIFT_82576; } dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN; dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN; dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN; - wr32(E1000_DCA_RXCTRL(q), dca_rxctrl); + E1000_WRITE_REG(hw, E1000_DCA_RXCTRL(q), dca_rxctrl); } q_vector->cpu = cpu; out_no_update: @@ -4879,7 +5392,7 @@ static void igb_setup_dca(struct igb_adapter *adapter) return; /* Always use CB2 mode, difference is masked in the CB driver. */ - wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); + E1000_WRITE_REG(hw, E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); for (i = 0; i < adapter->num_q_vectors; i++) { adapter->q_vector[i]->cpu = -1; @@ -4900,9 +5413,9 @@ static int __igb_notify_dca(struct device *dev, void *data) /* if already enabled, don't do it again */ if (adapter->flags & IGB_FLAG_DCA_ENABLED) break; - if (dca_add_requester(dev) == 0) { + if (dca_add_requester(dev) == E1000_SUCCESS) { adapter->flags |= IGB_FLAG_DCA_ENABLED; - dev_info(&pdev->dev, "DCA enabled\n"); + dev_info(pci_dev_to_dev(pdev), "DCA enabled\n"); igb_setup_dca(adapter); break; } @@ -4912,14 +5425,14 @@ static int __igb_notify_dca(struct device *dev, void *data) /* without this a class_device is left * hanging around in the sysfs model */ dca_remove_requester(dev); - dev_info(&pdev->dev, "DCA disabled\n"); + dev_info(pci_dev_to_dev(pdev), "DCA disabled\n"); adapter->flags &= ~IGB_FLAG_DCA_ENABLED; - wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); + E1000_WRITE_REG(hw, E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_DISABLE); } break; } - return 0; + return E1000_SUCCESS; } static int igb_notify_dca(struct notifier_block *nb, unsigned long event, @@ -4932,21 +5445,23 @@ static int igb_notify_dca(struct notifier_block *nb, unsigned long event, return ret_val ? NOTIFY_BAD : NOTIFY_DONE; } -#endif /* CONFIG_IGB_DCA */ +#endif /* IGB_DCA */ -#ifdef CONFIG_PCI_IOV static int igb_vf_configure(struct igb_adapter *adapter, int vf) { unsigned char mac_addr[ETH_ALEN]; +#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED struct pci_dev *pdev = adapter->pdev; struct e1000_hw *hw = &adapter->hw; struct pci_dev *pvfdev; unsigned int device_id; u16 thisvf_devfn; +#endif random_ether_addr(mac_addr); igb_set_vf_mac(adapter, vf, mac_addr); +#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED switch (adapter->hw.mac.type) { case e1000_82576: device_id = IGB_82576_VF_DEV_ID; @@ -4981,8 +5496,12 @@ static int igb_vf_configure(struct igb_adapter *adapter, int vf) "Couldn't find pci dev ptr for VF %4.4x\n", thisvf_devfn); return pvfdev != NULL; +#else + return true; +#endif } +#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED static int igb_find_enabled_vfs(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; @@ -5013,8 +5532,7 @@ static int igb_find_enabled_vfs(struct igb_adapter *adapter) vf_devfn = pdev->devfn + 0x80; pvfdev = pci_get_device(hw->vendor_id, device_id, NULL); while (pvfdev) { - if (pvfdev->devfn == vf_devfn && - (pvfdev->bus->number >= pdev->bus->number)) + if (pvfdev->devfn == vf_devfn) vfs_found++; vf_devfn += vf_stride; pvfdev = pci_get_device(hw->vendor_id, @@ -5023,9 +5541,11 @@ static int igb_find_enabled_vfs(struct igb_adapter *adapter) return vfs_found; } +#endif static int igb_check_vf_assignment(struct igb_adapter *adapter) { +#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED int i; for (i = 0; i < adapter->vfs_allocated_count; i++) { if (adapter->vf_data[i].vfdev) { @@ -5034,10 +5554,10 @@ static int igb_check_vf_assignment(struct igb_adapter *adapter) return true; } } +#endif return false; } -#endif static void igb_ping_all_vfs(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; @@ -5048,20 +5568,64 @@ static void igb_ping_all_vfs(struct igb_adapter *adapter) ping = E1000_PF_CONTROL_MSG; if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS) ping |= E1000_VT_MSGTYPE_CTS; - igb_write_mbx(hw, &ping, 1, i); + e1000_write_mbx(hw, &ping, 1, i); } } +/** + * igb_mta_set_ - Set multicast filter table address + * @adapter: pointer to the adapter structure + * @hash_value: determines the MTA register and bit to set + * + * The multicast table address is a register array of 32-bit registers. + * The hash_value is used to determine what register the bit is in, the + * current value is read, the new bit is OR'd in and the new value is + * written back into the register. + **/ +void igb_mta_set(struct igb_adapter *adapter, u32 hash_value) +{ + struct e1000_hw *hw = &adapter->hw; + u32 hash_bit, hash_reg, mta; + + /* + * The MTA is a register array of 32-bit registers. It is + * treated like an array of (32*mta_reg_count) bits. We want to + * set bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The (hw->mac.mta_reg_count - 1) serves as a + * mask to bits 31:5 of the hash value which gives us the + * register we're modifying. The hash bit within that register + * is determined by the lower 5 bits of the hash value. + */ + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg); + + mta |= (1 << hash_bit); + + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta); + E1000_WRITE_FLUSH(hw); +} + static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) { + struct e1000_hw *hw = &adapter->hw; - u32 vmolr = rd32(E1000_VMOLR(vf)); + u32 vmolr = E1000_READ_REG(hw, E1000_VMOLR(vf)); struct vf_data_storage *vf_data = &adapter->vf_data[vf]; vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC | IGB_VF_FLAG_MULTI_PROMISC); vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); +#ifdef IGB_ENABLE_VF_PROMISC + if (*msgbuf & E1000_VF_SET_PROMISC_UNICAST) { + vmolr |= E1000_VMOLR_ROPE; + vf_data->flags |= IGB_VF_FLAG_UNI_PROMISC; + *msgbuf &= ~E1000_VF_SET_PROMISC_UNICAST; + } +#endif if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { vmolr |= E1000_VMOLR_MPME; vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC; @@ -5078,11 +5642,11 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) int j; vmolr |= E1000_VMOLR_ROMPE; for (j = 0; j < vf_data->num_vf_mc_hashes; j++) - igb_mta_set(hw, vf_data->vf_mc_hashes[j]); + igb_mta_set(adapter, vf_data->vf_mc_hashes[j]); } } - wr32(E1000_VMOLR(vf), vmolr); + E1000_WRITE_REG(hw, E1000_VMOLR(vf), vmolr); /* there are flags left unprocessed, likely not supported */ if (*msgbuf & E1000_VT_MSGINFO_MASK) @@ -5127,7 +5691,7 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter) int i, j; for (i = 0; i < adapter->vfs_allocated_count; i++) { - u32 vmolr = rd32(E1000_VMOLR(i)); + u32 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i)); vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); vf_data = &adapter->vf_data[i]; @@ -5138,9 +5702,9 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter) } else if (vf_data->num_vf_mc_hashes) { vmolr |= E1000_VMOLR_ROMPE; for (j = 0; j < vf_data->num_vf_mc_hashes; j++) - igb_mta_set(hw, vf_data->vf_mc_hashes[j]); + igb_mta_set(adapter, vf_data->vf_mc_hashes[j]); } - wr32(E1000_VMOLR(i), vmolr); + E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr); } } @@ -5148,13 +5712,14 @@ static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) { struct e1000_hw *hw = &adapter->hw; u32 pool_mask, reg, vid; + u16 vlan_default; int i; pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); /* Find the vlan filter for this id */ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { - reg = rd32(E1000_VLVF(i)); + reg = E1000_READ_REG(hw, E1000_VLVF(i)); /* remove the vf from the pool */ reg &= ~pool_mask; @@ -5164,16 +5729,20 @@ static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) (reg & E1000_VLVF_VLANID_ENABLE)) { reg = 0; vid = reg & E1000_VLVF_VLANID_MASK; - igb_vfta_set(hw, vid, false); + igb_vfta_set(adapter, vid, FALSE); } - wr32(E1000_VLVF(i), reg); + E1000_WRITE_REG(hw, E1000_VLVF(i), reg); } adapter->vf_data[vf].vlans_enabled = 0; + + vlan_default = adapter->vf_data[vf].default_vf_vlan_id; + if (vlan_default) + igb_vlvf_set(adapter, vlan_default, true, vf); } -static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) +s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) { struct e1000_hw *hw = &adapter->hw; u32 reg, i; @@ -5183,12 +5752,12 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) return -1; /* we only need to do this if VMDq is enabled */ - if (!adapter->vfs_allocated_count) + if (!adapter->vmdq_pools) return -1; /* Find the vlan filter for this id */ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { - reg = rd32(E1000_VLVF(i)); + reg = E1000_READ_REG(hw, E1000_VLVF(i)); if ((reg & E1000_VLVF_VLANID_ENABLE) && vid == (reg & E1000_VLVF_VLANID_MASK)) break; @@ -5201,7 +5770,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) * one without the enable bit set */ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { - reg = rd32(E1000_VLVF(i)); + reg = E1000_READ_REG(hw, E1000_VLVF(i)); if (!(reg & E1000_VLVF_VLANID_ENABLE)) break; } @@ -5213,25 +5782,25 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) /* if !enabled we need to set this up in vfta */ if (!(reg & E1000_VLVF_VLANID_ENABLE)) { /* add VID to filter table */ - igb_vfta_set(hw, vid, true); + igb_vfta_set(adapter, vid, TRUE); reg |= E1000_VLVF_VLANID_ENABLE; } reg &= ~E1000_VLVF_VLANID_MASK; reg |= vid; - wr32(E1000_VLVF(i), reg); + E1000_WRITE_REG(hw, E1000_VLVF(i), reg); /* do not modify RLPML for PF devices */ if (vf >= adapter->vfs_allocated_count) - return 0; + return E1000_SUCCESS; if (!adapter->vf_data[vf].vlans_enabled) { u32 size; - reg = rd32(E1000_VMOLR(vf)); + reg = E1000_READ_REG(hw, E1000_VMOLR(vf)); size = reg & E1000_VMOLR_RLPML_MASK; size += 4; reg &= ~E1000_VMOLR_RLPML_MASK; reg |= size; - wr32(E1000_VMOLR(vf), reg); + E1000_WRITE_REG(hw, E1000_VMOLR(vf), reg); } adapter->vf_data[vf].vlans_enabled++; @@ -5243,37 +5812,38 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) /* if pool is empty then remove entry from vfta */ if (!(reg & E1000_VLVF_POOLSEL_MASK)) { reg = 0; - igb_vfta_set(hw, vid, false); + igb_vfta_set(adapter, vid, FALSE); } - wr32(E1000_VLVF(i), reg); + E1000_WRITE_REG(hw, E1000_VLVF(i), reg); /* do not modify RLPML for PF devices */ if (vf >= adapter->vfs_allocated_count) - return 0; + return E1000_SUCCESS; adapter->vf_data[vf].vlans_enabled--; if (!adapter->vf_data[vf].vlans_enabled) { u32 size; - reg = rd32(E1000_VMOLR(vf)); + reg = E1000_READ_REG(hw, E1000_VMOLR(vf)); size = reg & E1000_VMOLR_RLPML_MASK; size -= 4; reg &= ~E1000_VMOLR_RLPML_MASK; reg |= size; - wr32(E1000_VMOLR(vf), reg); + E1000_WRITE_REG(hw, E1000_VMOLR(vf), reg); } } } - return 0; + return E1000_SUCCESS; } +#ifdef IFLA_VF_MAX static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) { struct e1000_hw *hw = &adapter->hw; if (vid) - wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT)); + E1000_WRITE_REG(hw, E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT)); else - wr32(E1000_VMVIR(vf), 0); + E1000_WRITE_REG(hw, E1000_VMVIR(vf), 0); } static int igb_ndo_set_vf_vlan(struct net_device *netdev, @@ -5282,7 +5852,8 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev, int err = 0; struct igb_adapter *adapter = netdev_priv(netdev); - if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) + /* VLAN IDs accepted range 0-4094 */ + if ((vf >= adapter->vfs_allocated_count) || (vlan > VLAN_VID_MASK-1) || (qos > 7)) return -EINVAL; if (vlan || qos) { err = igb_vlvf_set(adapter, vlan, !!vlan, vf); @@ -5292,6 +5863,7 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev, igb_set_vmolr(adapter, vf, !vlan); adapter->vf_data[vf].pf_vlan = vlan; adapter->vf_data[vf].pf_qos = qos; + igb_set_vf_vlan_strip(adapter, vf, true); dev_info(&adapter->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); if (test_bit(__IGB_DOWN, &adapter->state)) { @@ -5303,28 +5875,40 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev, " attempting to use the VF device.\n"); } } else { + if (adapter->vf_data[vf].pf_vlan) + dev_info(&adapter->pdev->dev, + "Clearing VLAN on VF %d\n", vf); igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan, false, vf); igb_set_vmvir(adapter, vlan, vf); igb_set_vmolr(adapter, vf, true); + igb_set_vf_vlan_strip(adapter, vf, false); adapter->vf_data[vf].pf_vlan = 0; adapter->vf_data[vf].pf_qos = 0; } out: return err; } +#endif static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) { int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); + if (vid) + igb_set_vf_vlan_strip(adapter, vf, true); + else + igb_set_vf_vlan_strip(adapter, vf, false); + return igb_vlvf_set(adapter, vid, add, vf); } static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) { - /* clear flags - except flag that indicates PF has set the MAC */ + struct e1000_hw *hw = &adapter->hw; + + /* clear flags except flag that the PF has set the MAC */ adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC; adapter->vf_data[vf].last_nack = jiffies; @@ -5333,18 +5917,31 @@ static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) /* reset vlans for device */ igb_clear_vf_vfta(adapter, vf); +#ifdef IFLA_VF_MAX if (adapter->vf_data[vf].pf_vlan) igb_ndo_set_vf_vlan(adapter->netdev, vf, adapter->vf_data[vf].pf_vlan, adapter->vf_data[vf].pf_qos); else igb_clear_vf_vfta(adapter, vf); +#endif /* reset multicast table array for vf */ adapter->vf_data[vf].num_vf_mc_hashes = 0; /* Flush and reset the mta with the new values */ igb_set_rx_mode(adapter->netdev); + + /* + * Reset the VFs TDWBAL and TDWBAH registers which are not + * cleared by a VFLR + */ + E1000_WRITE_REG(hw, E1000_TDWBAH(vf), 0); + E1000_WRITE_REG(hw, E1000_TDWBAL(vf), 0); + if (hw->mac.type == e1000_82576) { + E1000_WRITE_REG(hw, E1000_TDWBAH(IGB_MAX_VF_FUNCTIONS + vf), 0); + E1000_WRITE_REG(hw, E1000_TDWBAL(IGB_MAX_VF_FUNCTIONS + vf), 0); + } } static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) @@ -5363,7 +5960,6 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) { struct e1000_hw *hw = &adapter->hw; unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; - int rar_entry = hw->mac.rar_entry_count - (vf + 1); u32 reg, msgbuf[3]; u8 *addr = (u8 *)(&msgbuf[1]); @@ -5371,20 +5967,21 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) igb_vf_reset(adapter, vf); /* set vf mac address */ - igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf); + igb_del_mac_filter(adapter, vf_mac, vf); + igb_add_mac_filter(adapter, vf_mac, vf); /* enable transmit and receive for vf */ - reg = rd32(E1000_VFTE); - wr32(E1000_VFTE, reg | (1 << vf)); - reg = rd32(E1000_VFRE); - wr32(E1000_VFRE, reg | (1 << vf)); + reg = E1000_READ_REG(hw, E1000_VFTE); + E1000_WRITE_REG(hw, E1000_VFTE, reg | (1 << vf)); + reg = E1000_READ_REG(hw, E1000_VFRE); + E1000_WRITE_REG(hw, E1000_VFRE, reg | (1 << vf)); adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; /* reply to reset with ack and vf mac address */ msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; memcpy(addr, vf_mac, 6); - igb_write_mbx(hw, msgbuf, 3, vf); + e1000_write_mbx(hw, msgbuf, 3, vf); } static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) @@ -5393,7 +5990,7 @@ static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) * The VF MAC Address is stored in a packed array of bytes * starting at the second 32 bit word of the msg array */ - unsigned char *addr = (char *)&msg[1]; + unsigned char *addr = (unsigned char *)&msg[1]; int err = -1; if (is_valid_ether_addr(addr)) @@ -5411,7 +6008,7 @@ static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf) /* if device isn't clear to send it shouldn't be reading either */ if (!(vf_data->flags & IGB_VF_FLAG_CTS) && time_after(jiffies, vf_data->last_nack + (2 * HZ))) { - igb_write_mbx(hw, &msg, 1, vf); + e1000_write_mbx(hw, &msg, 1, vf); vf_data->last_nack = jiffies; } } @@ -5424,15 +6021,11 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) struct vf_data_storage *vf_data = &adapter->vf_data[vf]; s32 retval; - retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf); + retval = e1000_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf); if (retval) { - /* if receive failed revoke VF CTS stats and restart init */ - dev_err(&pdev->dev, "Error receiving message from VF\n"); - vf_data->flags &= ~IGB_VF_FLAG_CTS; - if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) - return; - goto out; + dev_err(pci_dev_to_dev(pdev), "Error receiving message from VF\n"); + return; } /* this is a message we already processed, do nothing */ @@ -5450,22 +6043,26 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) } if (!(vf_data->flags & IGB_VF_FLAG_CTS)) { - if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) - return; - retval = -1; - goto out; + msgbuf[0] = E1000_VT_MSGTYPE_NACK; + if (time_after(jiffies, vf_data->last_nack + (2 * HZ))) { + e1000_write_mbx(hw, msgbuf, 1, vf); + vf_data->last_nack = jiffies; + } + return; } switch ((msgbuf[0] & 0xFFFF)) { case E1000_VF_SET_MAC_ADDR: retval = -EINVAL; +#ifndef IGB_DISABLE_VF_MAC_SET if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC)) retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); else - dev_warn(&pdev->dev, - "VF %d attempted to override administratively " - "set MAC address\nReload the VF driver to " - "resume operations\n", vf); + DPRINTK(DRV, INFO, + "VF %d attempted to override administratively " + "set MAC address\nReload the VF driver to " + "resume operations\n", vf); +#endif break; case E1000_VF_SET_PROMISC: retval = igb_set_vf_promisc(adapter, msgbuf, vf); @@ -5478,29 +6075,31 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) break; case E1000_VF_SET_VLAN: retval = -1; +#ifdef IFLA_VF_MAX if (vf_data->pf_vlan) - dev_warn(&pdev->dev, - "VF %d attempted to override administratively " - "set VLAN tag\nReload the VF driver to " - "resume operations\n", vf); + DPRINTK(DRV, INFO, + "VF %d attempted to override administratively " + "set VLAN tag\nReload the VF driver to " + "resume operations\n", vf); else +#endif retval = igb_set_vf_vlan(adapter, msgbuf, vf); break; default: - dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); - retval = -1; + dev_err(pci_dev_to_dev(pdev), "Unhandled Msg %08x\n", msgbuf[0]); + retval = -E1000_ERR_MBX; break; } - msgbuf[0] |= E1000_VT_MSGTYPE_CTS; -out: /* notify the VF of the results of what it sent us */ if (retval) msgbuf[0] |= E1000_VT_MSGTYPE_NACK; else msgbuf[0] |= E1000_VT_MSGTYPE_ACK; - igb_write_mbx(hw, msgbuf, 1, vf); + msgbuf[0] |= E1000_VT_MSGTYPE_CTS; + + e1000_write_mbx(hw, msgbuf, 1, vf); } static void igb_msg_task(struct igb_adapter *adapter) @@ -5510,15 +6109,15 @@ static void igb_msg_task(struct igb_adapter *adapter) for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { /* process any reset requests */ - if (!igb_check_for_rst(hw, vf)) + if (!e1000_check_for_rst(hw, vf)) igb_vf_reset_event(adapter, vf); /* process any messages pending */ - if (!igb_check_for_msg(hw, vf)) + if (!e1000_check_for_msg(hw, vf)) igb_rcv_msg_from_vf(adapter, vf); /* process any acks */ - if (!igb_check_for_ack(hw, vf)) + if (!e1000_check_for_ack(hw, vf)) igb_rcv_ack_from_vf(adapter, vf); } } @@ -5543,11 +6142,11 @@ static void igb_set_uta(struct igb_adapter *adapter) return; /* we only need to do this if VMDq is enabled */ - if (!adapter->vfs_allocated_count) + if (!adapter->vmdq_pools) return; for (i = 0; i < hw->mac.uta_reg_count; i++) - array_wr32(E1000_UTA, i, ~0); + E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, ~0); } /** @@ -5561,7 +6160,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data) struct igb_q_vector *q_vector = adapter->q_vector[0]; struct e1000_hw *hw = &adapter->hw; /* read ICR disables interrupts using IAM */ - u32 icr = rd32(E1000_ICR); + u32 icr = E1000_READ_REG(hw, E1000_ICR); igb_write_itr(q_vector); @@ -5596,7 +6195,7 @@ static irqreturn_t igb_intr(int irq, void *data) struct e1000_hw *hw = &adapter->hw; /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No * need for the IMC write */ - u32 icr = rd32(E1000_ICR); + u32 icr = E1000_READ_REG(hw, E1000_ICR); /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is * not set, then the adapter didn't send an interrupt */ @@ -5625,7 +6224,7 @@ static irqreturn_t igb_intr(int irq, void *data) return IRQ_HANDLED; } -static void igb_ring_irq_enable(struct igb_q_vector *q_vector) +void igb_ring_irq_enable(struct igb_q_vector *q_vector) { struct igb_adapter *adapter = q_vector->adapter; struct e1000_hw *hw = &adapter->hw; @@ -5640,7 +6239,7 @@ static void igb_ring_irq_enable(struct igb_q_vector *q_vector) if (!test_bit(__IGB_DOWN, &adapter->state)) { if (adapter->msix_entries) - wr32(E1000_EIMS, q_vector->eims_value); + E1000_WRITE_REG(hw, E1000_EIMS, q_vector->eims_value); else igb_irq_enable(adapter); } @@ -5653,12 +6252,10 @@ static void igb_ring_irq_enable(struct igb_q_vector *q_vector) **/ static int igb_poll(struct napi_struct *napi, int budget) { - struct igb_q_vector *q_vector = container_of(napi, - struct igb_q_vector, - napi); + struct igb_q_vector *q_vector = container_of(napi, struct igb_q_vector, napi); bool clean_complete = true; -#ifdef CONFIG_IGB_DCA +#ifdef IGB_DCA if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) igb_update_dca(q_vector); #endif @@ -5668,6 +6265,12 @@ static int igb_poll(struct napi_struct *napi, int budget) if (q_vector->rx.ring) clean_complete &= igb_clean_rx_irq(q_vector, budget); +#ifndef HAVE_NETDEV_NAPI_LIST + /* if netdev is disabled we need to stop polling */ + if (!netif_running(q_vector->adapter->netdev)) + clean_complete = true; + +#endif /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; @@ -5679,6 +6282,7 @@ static int igb_poll(struct napi_struct *napi, int budget) return 0; } +#ifdef HAVE_HW_TIME_STAMP /** * igb_systim_to_hwtstamp - convert system time value to hw timestamp * @adapter: board private structure @@ -5702,7 +6306,15 @@ static void igb_systim_to_hwtstamp(struct igb_adapter *adapter, regval <<= IGB_82580_TSYNC_SHIFT; ns = timecounter_cyc2time(&adapter->clock, regval); - timecompare_update(&adapter->compare, ns); + + /* + * force a timecompare_update here (even if less than a second + * has passed) in order to prevent the case when ptpd or other + * software jumps the clock offset. othwerise there is a small + * window when the timestamp would be based on previous skew + * and invalid results would be pushed to the network stack. + */ + timecompare_update(&adapter->compare, 0); memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamps->hwtstamp = ns_to_ktime(ns); shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns); @@ -5727,20 +6339,21 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, /* if skb does not support hw timestamp or TX stamp not valid exit */ if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) || - !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID)) + !(E1000_READ_REG(hw, E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID)) return; - regval = rd32(E1000_TXSTMPL); - regval |= (u64)rd32(E1000_TXSTMPH) << 32; + regval = E1000_READ_REG(hw, E1000_TXSTMPL); + regval |= (u64)E1000_READ_REG(hw, E1000_TXSTMPH) << 32; igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval); skb_tstamp_tx(buffer_info->skb, &shhwtstamps); } +#endif /** * igb_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: pointer to q_vector containing needed info - * returns true if ring is completely cleaned + * returns TRUE if ring is completely cleaned **/ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) { @@ -5780,23 +6393,26 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) total_bytes += tx_buffer->bytecount; total_packets += tx_buffer->gso_segs; +#ifdef HAVE_HW_TIME_STAMP /* retrieve hardware timestamp */ igb_tx_hwtstamp(q_vector, tx_buffer); +#endif /* free the skb */ dev_kfree_skb_any(tx_buffer->skb); - tx_buffer->skb = NULL; /* unmap skb header data */ dma_unmap_single(tx_ring->dev, - tx_buffer->dma, - tx_buffer->length, - DMA_TO_DEVICE); + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); /* clear last DMA location and unmap remaining buffers */ while (tx_desc != eop_desc) { - tx_buffer->dma = 0; - tx_buffer++; tx_desc++; i++; @@ -5807,17 +6423,15 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) } /* unmap any remaining paged data */ - if (tx_buffer->dma) { + if (dma_unmap_len(tx_buffer, len)) { dma_unmap_page(tx_ring->dev, - tx_buffer->dma, - tx_buffer->length, - DMA_TO_DEVICE); + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); } } - /* clear last DMA location */ - tx_buffer->dma = 0; - /* move us one more past the eop_desc for start of next pkt */ tx_buffer++; tx_desc++; @@ -5829,12 +6443,15 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) } } +#ifdef CONFIG_BQL + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); +#endif /* CONFIG_BQL */ + i += tx_ring->count; tx_ring->next_to_clean = i; - u64_stats_update_begin(&tx_ring->tx_syncp); tx_ring->tx_stats.bytes += total_bytes; tx_ring->tx_stats.packets += total_packets; - u64_stats_update_end(&tx_ring->tx_syncp); q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_packets += total_packets; @@ -5848,8 +6465,9 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); if (eop_desc && time_after(jiffies, tx_buffer->time_stamp + - (adapter->tx_timeout_factor * HZ)) && - !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { + (adapter->tx_timeout_factor * HZ)) + && !(E1000_READ_REG(hw, E1000_STATUS) & + E1000_STATUS_TXOFF)) { /* detected Tx unit hang */ dev_err(tx_ring->dev, @@ -5865,7 +6483,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) " jiffies <%lx>\n" " desc.status <%x>\n", tx_ring->queue_index, - rd32(E1000_TDH(tx_ring->reg_idx)), + E1000_READ_REG(hw, E1000_TDH(tx_ring->reg_idx)), readl(tx_ring->tail), tx_ring->next_to_use, tx_ring->next_to_clean, @@ -5873,8 +6491,11 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) eop_desc, jiffies, eop_desc->wb.status); - netif_stop_subqueue(tx_ring->netdev, - tx_ring->queue_index); + if (netif_is_multiqueue(netdev_ring(tx_ring))) + netif_stop_subqueue(netdev_ring(tx_ring), + ring_queue_index(tx_ring)); + else + netif_stop_queue(netdev_ring(tx_ring)); /* we are about to reset, no point in enabling stuff */ return true; @@ -5882,27 +6503,56 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) } if (unlikely(total_packets && - netif_carrier_ok(tx_ring->netdev) && + netif_carrier_ok(netdev_ring(tx_ring)) && igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ smp_mb(); - if (__netif_subqueue_stopped(tx_ring->netdev, - tx_ring->queue_index) && - !(test_bit(__IGB_DOWN, &adapter->state))) { - netif_wake_subqueue(tx_ring->netdev, - tx_ring->queue_index); - - u64_stats_update_begin(&tx_ring->tx_syncp); - tx_ring->tx_stats.restart_queue++; - u64_stats_update_end(&tx_ring->tx_syncp); + if (netif_is_multiqueue(netdev_ring(tx_ring))) { + if (__netif_subqueue_stopped(netdev_ring(tx_ring), + ring_queue_index(tx_ring)) && + !(test_bit(__IGB_DOWN, &adapter->state))) { + netif_wake_subqueue(netdev_ring(tx_ring), + ring_queue_index(tx_ring)); + tx_ring->tx_stats.restart_queue++; + } + } else { + if (netif_queue_stopped(netdev_ring(tx_ring)) && + !(test_bit(__IGB_DOWN, &adapter->state))) { + netif_wake_queue(netdev_ring(tx_ring)); + tx_ring->tx_stats.restart_queue++; + } } } return !!budget; } +#ifdef HAVE_VLAN_RX_REGISTER +/** + * igb_receive_skb - helper function to handle rx indications + * @q_vector: structure containing interrupt and ring information + * @skb: packet to send up + **/ +static void igb_receive_skb(struct igb_q_vector *q_vector, + struct sk_buff *skb) +{ + struct vlan_group **vlgrp = netdev_priv(skb->dev); + + if (IGB_CB(skb)->vid) { + if (*vlgrp) { + vlan_gro_receive(&q_vector->napi, *vlgrp, + IGB_CB(skb)->vid, skb); + } else { + dev_kfree_skb_any(skb); + } + } else { + napi_gro_receive(&q_vector->napi, skb); + } +} + +#endif /* HAVE_VLAN_RX_REGISTER */ static inline void igb_rx_checksum(struct igb_ring *ring, union e1000_adv_rx_desc *rx_desc, struct sk_buff *skb) @@ -5914,12 +6564,16 @@ static inline void igb_rx_checksum(struct igb_ring *ring, return; /* Rx checksum disabled via ethtool */ - if (!(ring->netdev->features & NETIF_F_RXCSUM)) +#ifdef HAVE_NDO_SET_FEATURES + if (!(netdev_ring(ring)->features & NETIF_F_RXCSUM)) +#else + if (!test_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags)) +#endif return; /* TCP/UDP checksum error bit is set */ if (igb_test_staterr(rx_desc, - E1000_RXDEXT_STATERR_TCPE | + E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { /* * work around errata with sctp packets where the TCPE aka @@ -5927,104 +6581,559 @@ static inline void igb_rx_checksum(struct igb_ring *ring, * packets, (aka let the stack check the crc32c) */ if (!((skb->len == 60) && - test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { - u64_stats_update_begin(&ring->rx_syncp); + test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) ring->rx_stats.csum_err++; - u64_stats_update_end(&ring->rx_syncp); + + /* let the stack verify checksum errors */ + return; + } + /* It must be a TCP or UDP packet with a valid checksum */ + if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS | + E1000_RXD_STAT_UDPCS)) + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + +#ifdef NETIF_F_RXHASH +static inline void igb_rx_hash(struct igb_ring *ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (netdev_ring(ring)->features & NETIF_F_RXHASH) + skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); +} + +#endif +#ifdef HAVE_HW_TIME_STAMP +static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + u64 regval; + + if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP | + E1000_RXDADV_STAT_TS)) + return; + + /* + * If this bit is set, then the RX registers contain the time stamp. No + * other packet will be time stamped until we read these registers, so + * read the registers to make them available again. Because only one + * packet can be time stamped at a time, we know that the register + * values must belong to this one here and therefore we don't need to + * compare any of the additional attributes stored for it. + * + * If nothing went wrong, then it should have a skb_shared_tx that we + * can turn into a skb_shared_hwtstamps. + */ + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { + u32 *stamp = (u32 *)skb->data; + regval = le32_to_cpu(*(stamp + 2)); + regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32; + skb_pull(skb, IGB_TS_HDR_LEN); + } else { + if(!(E1000_READ_REG(hw, E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) + return; + + regval = E1000_READ_REG(hw, E1000_RXSTMPL); + regval |= (u64)E1000_READ_REG(hw, E1000_RXSTMPH) << 32; + } + + igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); +} +#endif +static void igb_rx_vlan(struct igb_ring *ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { + u16 vid = 0; + if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && + test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags)) + vid = be16_to_cpu(rx_desc->wb.upper.vlan); + else + vid = le16_to_cpu(rx_desc->wb.upper.vlan); +#ifdef HAVE_VLAN_RX_REGISTER + IGB_CB(skb)->vid = vid; + } else { + IGB_CB(skb)->vid = 0; +#else + __vlan_hwaccel_put_tag(skb, vid); +#endif + } +} + +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT +static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc) +{ + /* HW will not DMA in data larger than the given buffer, even if it + * parses the (NFS, of course) header to be larger. In that case, it + * fills the header buffer and spills the rest into the page. + */ + u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) & + E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; + if (hlen > IGB_RX_HDR_LEN) + hlen = IGB_RX_HDR_LEN; + return hlen; +} + +#endif +#ifndef IGB_NO_LRO +/** + * igb_merge_active_tail - merge active tail into lro skb + * @tail: pointer to active tail in frag_list + * + * This function merges the length and data of an active tail into the + * skb containing the frag_list. It resets the tail's pointer to the head, + * but it leaves the heads pointer to tail intact. + **/ +static inline struct sk_buff *igb_merge_active_tail(struct sk_buff *tail) +{ + struct sk_buff *head = IGB_CB(tail)->head; + + if (!head) + return tail; + + head->len += tail->len; + head->data_len += tail->len; + head->truesize += tail->len; + + IGB_CB(tail)->head = NULL; + + return head; +} + +/** + * igb_add_active_tail - adds an active tail into the skb frag_list + * @head: pointer to the start of the skb + * @tail: pointer to active tail to add to frag_list + * + * This function adds an active tail to the end of the frag list. This tail + * will still be receiving data so we cannot yet ad it's stats to the main + * skb. That is done via igb_merge_active_tail. + **/ +static inline void igb_add_active_tail(struct sk_buff *head, struct sk_buff *tail) +{ + struct sk_buff *old_tail = IGB_CB(head)->tail; + + if (old_tail) { + igb_merge_active_tail(old_tail); + old_tail->next = tail; + } else { + skb_shinfo(head)->frag_list = tail; + } + + IGB_CB(tail)->head = head; + IGB_CB(head)->tail = tail; + + IGB_CB(head)->append_cnt++; +} + +/** + * igb_close_active_frag_list - cleanup pointers on a frag_list skb + * @head: pointer to head of an active frag list + * + * This function will clear the frag_tail_tracker pointer on an active + * frag_list and returns true if the pointer was actually set + **/ +static inline bool igb_close_active_frag_list(struct sk_buff *head) +{ + struct sk_buff *tail = IGB_CB(head)->tail; + + if (!tail) + return false; + + igb_merge_active_tail(tail); + + IGB_CB(head)->tail = NULL; + + return true; +} + +/** + * igb_can_lro - returns true if packet is TCP/IPV4 and LRO is enabled + * @adapter: board private structure + * @rx_desc: pointer to the rx descriptor + * @skb: pointer to the skb to be merged + * + **/ +static inline bool igb_can_lro(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct iphdr *iph = (struct iphdr *)skb->data; + __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; + + /* verify LRO is enabled */ + if (!(netdev_ring(rx_ring)->features & NETIF_F_LRO)) + return false; + + /* verify hardware indicates this is IPv4/TCP */ + if((!(pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_TCP)) || + !(pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_IPV4)))) + return false; + + /* verify the header is large enough for us to read IP/TCP fields */ + if (!pskb_may_pull(skb, sizeof(struct igb_lrohdr))) + return false; + + /* verify there are no VLANs on packet */ + if (skb->protocol != __constant_htons(ETH_P_IP)) + return false; + + /* ensure we are version 4 with no options */ + if (*(u8 *)iph != 0x45) + return false; + + /* .. and the packet is not fragmented */ + if (iph->frag_off & htons(IP_MF | IP_OFFSET)) + return false; + + /* .. and that next header is TCP */ + if (iph->protocol != IPPROTO_TCP) + return false; + + return true; +} + +static inline struct igb_lrohdr *igb_lro_hdr(struct sk_buff *skb) +{ + return (struct igb_lrohdr *)skb->data; +} + +/** + * igb_lro_flush - Indicate packets to upper layer. + * + * Update IP and TCP header part of head skb if more than one + * skb's chained and indicate packets to upper layer. + **/ +static void igb_lro_flush(struct igb_q_vector *q_vector, + struct sk_buff *skb) +{ + struct igb_lro_list *lrolist = q_vector->lrolist; + + __skb_unlink(skb, &lrolist->active); + + if (IGB_CB(skb)->append_cnt) { + struct igb_lrohdr *lroh = igb_lro_hdr(skb); + + /* close any active lro contexts */ + igb_close_active_frag_list(skb); + + /* incorporate ip header and re-calculate checksum */ + lroh->iph.tot_len = ntohs(skb->len); + lroh->iph.check = 0; + + /* header length is 5 since we know no options exist */ + lroh->iph.check = ip_fast_csum((u8 *)lroh, 5); + + /* clear TCP checksum to indicate we are an LRO frame */ + lroh->th.check = 0; + + /* incorporate latest timestamp into the tcp header */ + if (IGB_CB(skb)->tsecr) { + lroh->ts[2] = IGB_CB(skb)->tsecr; + lroh->ts[1] = htonl(IGB_CB(skb)->tsval); + } +#ifdef NETIF_F_TSO + + skb_shinfo(skb)->gso_size = IGB_CB(skb)->mss; +#endif + } + +#ifdef HAVE_VLAN_RX_REGISTER + igb_receive_skb(q_vector, skb); +#else + napi_gro_receive(&q_vector->napi, skb); +#endif + lrolist->stats.flushed++; +} + +static void igb_lro_flush_all(struct igb_q_vector *q_vector) +{ + struct igb_lro_list *lrolist = q_vector->lrolist; + struct sk_buff *skb, *tmp; + + skb_queue_reverse_walk_safe(&lrolist->active, skb, tmp) + igb_lro_flush(q_vector, skb); +} + +/* + * igb_lro_header_ok - Main LRO function. + **/ +static void igb_lro_header_ok(struct sk_buff *skb) +{ + struct igb_lrohdr *lroh = igb_lro_hdr(skb); + u16 opt_bytes, data_len; + + IGB_CB(skb)->tail = NULL; + IGB_CB(skb)->tsecr = 0; + IGB_CB(skb)->append_cnt = 0; + IGB_CB(skb)->mss = 0; + + /* ensure that the checksum is valid */ + if (skb->ip_summed != CHECKSUM_UNNECESSARY) + return; + + /* If we see CE codepoint in IP header, packet is not mergeable */ + if (INET_ECN_is_ce(ipv4_get_dsfield(&lroh->iph))) + return; + + /* ensure no bits set besides ack or psh */ + if (lroh->th.fin || lroh->th.syn || lroh->th.rst || + lroh->th.urg || lroh->th.ece || lroh->th.cwr || + !lroh->th.ack) + return; + + /* store the total packet length */ + data_len = ntohs(lroh->iph.tot_len); + + /* remove any padding from the end of the skb */ + __pskb_trim(skb, data_len); + + /* remove header length from data length */ + data_len -= sizeof(struct igb_lrohdr); + + /* + * check for timestamps. Since the only option we handle are timestamps, + * we only have to handle the simple case of aligned timestamps + */ + opt_bytes = (lroh->th.doff << 2) - sizeof(struct tcphdr); + if (opt_bytes != 0) { + if ((opt_bytes != TCPOLEN_TSTAMP_ALIGNED) || + !pskb_may_pull(skb, sizeof(struct igb_lrohdr) + + TCPOLEN_TSTAMP_ALIGNED) || + (lroh->ts[0] != htonl((TCPOPT_NOP << 24) | + (TCPOPT_NOP << 16) | + (TCPOPT_TIMESTAMP << 8) | + TCPOLEN_TIMESTAMP)) || + (lroh->ts[2] == 0)) { + return; + } + + IGB_CB(skb)->tsval = ntohl(lroh->ts[1]); + IGB_CB(skb)->tsecr = lroh->ts[2]; + + data_len -= TCPOLEN_TSTAMP_ALIGNED; + } + + /* record data_len as mss for the packet */ + IGB_CB(skb)->mss = data_len; + IGB_CB(skb)->next_seq = ntohl(lroh->th.seq); +} + +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT +static bool igb_merge_frags(struct sk_buff *lro_skb, struct sk_buff *new_skb) +{ + struct sk_buff *tail; + struct skb_shared_info *tail_info; + struct skb_shared_info *new_skb_info; + u16 data_len; + + /* header must be empty to pull frags into current skb */ + if (skb_headlen(new_skb)) + return false; + + if (IGB_CB(lro_skb)->tail) + tail = IGB_CB(lro_skb)->tail; + else + tail = lro_skb; + + tail_info = skb_shinfo(tail); + new_skb_info = skb_shinfo(new_skb); + + /* make sure we have room in frags list */ + if (new_skb_info->nr_frags >= (MAX_SKB_FRAGS - tail_info->nr_frags)) + return false; + + /* bump append count */ + IGB_CB(lro_skb)->append_cnt++; + + /* copy frags into the last skb */ + memcpy(tail_info->frags + tail_info->nr_frags, + new_skb_info->frags, + new_skb_info->nr_frags * sizeof(skb_frag_t)); + + /* copy size data over */ + tail_info->nr_frags += new_skb_info->nr_frags; + data_len = IGB_CB(new_skb)->mss; + tail->len += data_len; + tail->data_len += data_len; + tail->truesize += data_len; + + /* wipe record of data from new_skb */ + new_skb_info->nr_frags = 0; + new_skb->len = new_skb->data_len = 0; + new_skb->truesize -= data_len; + new_skb->data = new_skb->head + NET_SKB_PAD + NET_IP_ALIGN; + skb_reset_tail_pointer(new_skb); + new_skb->protocol = 0; + new_skb->ip_summed = CHECKSUM_NONE; +#ifdef HAVE_VLAN_RX_REGISTER + IGB_CB(new_skb)->vid = 0; +#else + new_skb->vlan_tci = 0; +#endif + + return true; +} + +#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ +/** + * igb_lro_queue - if able, queue skb into lro chain + * @q_vector: structure containing interrupt and ring information + * @new_skb: pointer to current skb being checked + * + * Checks whether the skb given is eligible for LRO and if that's + * fine chains it to the existing lro_skb based on flowid. If an LRO for + * the flow doesn't exist create one. + **/ +static struct sk_buff *igb_lro_queue(struct igb_q_vector *q_vector, + struct sk_buff *new_skb) +{ + struct sk_buff *lro_skb; + struct igb_lro_list *lrolist = q_vector->lrolist; + struct igb_lrohdr *lroh = igb_lro_hdr(new_skb); + __be32 saddr = lroh->iph.saddr; + __be32 daddr = lroh->iph.daddr; + __be32 tcp_ports = *(__be32 *)&lroh->th; + u16 data_len; +#ifdef HAVE_VLAN_RX_REGISTER + u16 vid = IGB_CB(new_skb)->vid; +#else + u16 vid = new_skb->vlan_tci; +#endif + + igb_lro_header_ok(new_skb); + + /* + * we have a packet that might be eligible for LRO, + * so see if it matches anything we might expect + */ + skb_queue_walk(&lrolist->active, lro_skb) { + if (*(__be32 *)&igb_lro_hdr(lro_skb)->th != tcp_ports || + igb_lro_hdr(lro_skb)->iph.saddr != saddr || + igb_lro_hdr(lro_skb)->iph.daddr != daddr) + continue; + +#ifdef HAVE_VLAN_RX_REGISTER + if (IGB_CB(lro_skb)->vid != vid) +#else + if (lro_skb->vlan_tci != vid) +#endif + continue; + + /* out of order packet */ + if (IGB_CB(lro_skb)->next_seq != IGB_CB(new_skb)->next_seq) { + igb_lro_flush(q_vector, lro_skb); + IGB_CB(new_skb)->mss = 0; + break; + } + + /* TCP timestamp options have changed */ + if (!IGB_CB(lro_skb)->tsecr != !IGB_CB(new_skb)->tsecr) { + igb_lro_flush(q_vector, lro_skb); + break; + } + + /* make sure timestamp values are increasing */ + if (IGB_CB(lro_skb)->tsecr && + IGB_CB(lro_skb)->tsval > IGB_CB(new_skb)->tsval) { + igb_lro_flush(q_vector, lro_skb); + IGB_CB(new_skb)->mss = 0; + break; + } + + data_len = IGB_CB(new_skb)->mss; + + /* + * malformed header, no tcp data, resultant packet would + * be too large, or new skb is larger than our current mss. + */ + if (data_len == 0 || + data_len > IGB_CB(lro_skb)->mss || + data_len > IGB_CB(lro_skb)->free) { + igb_lro_flush(q_vector, lro_skb); + break; + } + + /* ack sequence numbers or window size has changed */ + if (igb_lro_hdr(lro_skb)->th.ack_seq != lroh->th.ack_seq || + igb_lro_hdr(lro_skb)->th.window != lroh->th.window) { + igb_lro_flush(q_vector, lro_skb); + break; } - /* let the stack verify checksum errors */ - return; - } - /* It must be a TCP or UDP packet with a valid checksum */ - if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS | - E1000_RXD_STAT_UDPCS)) - skb->ip_summed = CHECKSUM_UNNECESSARY; - dev_dbg(ring->dev, "cksum success: bits %08X\n", - le32_to_cpu(rx_desc->wb.upper.status_error)); -} + /* Remove IP and TCP header*/ + skb_pull(new_skb, new_skb->len - data_len); -static inline void igb_rx_hash(struct igb_ring *ring, - union e1000_adv_rx_desc *rx_desc, - struct sk_buff *skb) -{ - if (ring->netdev->features & NETIF_F_RXHASH) - skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); -} + /* update timestamp and timestamp echo response */ + IGB_CB(lro_skb)->tsval = IGB_CB(new_skb)->tsval; + IGB_CB(lro_skb)->tsecr = IGB_CB(new_skb)->tsecr; -static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, - union e1000_adv_rx_desc *rx_desc, - struct sk_buff *skb) -{ - struct igb_adapter *adapter = q_vector->adapter; - struct e1000_hw *hw = &adapter->hw; - u64 regval; + /* update sequence and free space */ + IGB_CB(lro_skb)->next_seq += data_len; + IGB_CB(lro_skb)->free -= data_len; - if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP | - E1000_RXDADV_STAT_TS)) - return; +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT + /* if header is empty pull pages into current skb */ + if (igb_merge_frags(lro_skb, new_skb)) { + lrolist->stats.recycled++; + } else { +#endif + /* chain this new skb in frag_list */ + igb_add_active_tail(lro_skb, new_skb); + new_skb = NULL; +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT + } +#endif - /* - * If this bit is set, then the RX registers contain the time stamp. No - * other packet will be time stamped until we read these registers, so - * read the registers to make them available again. Because only one - * packet can be time stamped at a time, we know that the register - * values must belong to this one here and therefore we don't need to - * compare any of the additional attributes stored for it. - * - * If nothing went wrong, then it should have a shared tx_flags that we - * can turn into a skb_shared_hwtstamps. - */ - if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { - u32 *stamp = (u32 *)skb->data; - regval = le32_to_cpu(*(stamp + 2)); - regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32; - skb_pull(skb, IGB_TS_HDR_LEN); - } else { - if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) - return; + if ((data_len < IGB_CB(lro_skb)->mss) || lroh->th.psh) { + igb_lro_hdr(lro_skb)->th.psh |= lroh->th.psh; + igb_lro_flush(q_vector, lro_skb); + } - regval = rd32(E1000_RXSTMPL); - regval |= (u64)rd32(E1000_RXSTMPH) << 32; + lrolist->stats.coal++; + return new_skb; } - igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); -} + if (IGB_CB(new_skb)->mss && !lroh->th.psh) { + /* if we are at capacity flush the tail */ + if (skb_queue_len(&lrolist->active) >= IGB_LRO_MAX) { + lro_skb = skb_peek_tail(&lrolist->active); + if (lro_skb) + igb_lro_flush(q_vector, lro_skb); + } -static void igb_rx_vlan(struct igb_ring *ring, - union e1000_adv_rx_desc *rx_desc, - struct sk_buff *skb) -{ - if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { - u16 vid; - if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && - test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags)) - vid = be16_to_cpu(rx_desc->wb.upper.vlan); - else - vid = le16_to_cpu(rx_desc->wb.upper.vlan); + /* update sequence and free space */ + IGB_CB(new_skb)->next_seq += IGB_CB(new_skb)->mss; + IGB_CB(new_skb)->free = 65521 - new_skb->len; - __vlan_hwaccel_put_tag(skb, vid); + /* .. and insert at the front of the active list */ + __skb_queue_head(&lrolist->active, new_skb); + + lrolist->stats.coal++; + return NULL; } -} -static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc) -{ - /* HW will not DMA in data larger than the given buffer, even if it - * parses the (NFS, of course) header to be larger. In that case, it - * fills the header buffer and spills the rest into the page. - */ - u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) & - E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; - if (hlen > IGB_RX_HDR_LEN) - hlen = IGB_RX_HDR_LEN; - return hlen; + /* packet not handled by any of the above, pass it to the stack */ +#ifdef HAVE_VLAN_RX_REGISTER + igb_receive_skb(q_vector, new_skb); +#else + napi_gro_receive(&q_vector->napi, new_skb); +#endif + return NULL; } +#endif /* IGB_NO_LRO */ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) { struct igb_ring *rx_ring = q_vector->rx.ring; union e1000_adv_rx_desc *rx_desc; +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT const int current_node = numa_node_id(); +#endif unsigned int total_bytes = 0, total_packets = 0; u16 cleaned_count = igb_desc_unused(rx_ring); u16 i = rx_ring->next_to_clean; @@ -6053,10 +7162,18 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) */ rmb(); +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + __skb_put(skb, le16_to_cpu(rx_desc->wb.upper.length)); + dma_unmap_single(rx_ring->dev, buffer_info->dma, + rx_ring->rx_buffer_len, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + +#else if (!skb_is_nonlinear(skb)) { __skb_put(skb, igb_get_hlen(rx_desc)); dma_unmap_single(rx_ring->dev, buffer_info->dma, - IGB_RX_HDR_LEN, + IGB_RX_HDR_LEN, DMA_FROM_DEVICE); buffer_info->dma = 0; } @@ -6065,13 +7182,13 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) u16 length = le16_to_cpu(rx_desc->wb.upper.length); skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - buffer_info->page, - buffer_info->page_offset, - length); + buffer_info->page, + buffer_info->page_offset, + length); skb->len += length; skb->data_len += length; - skb->truesize += PAGE_SIZE / 2; + skb->truesize += length; if ((page_count(buffer_info->page) != 1) || (page_to_nid(buffer_info->page) != current_node)) @@ -6094,30 +7211,49 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) goto next_desc; } +#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ if (igb_test_staterr(rx_desc, E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { dev_kfree_skb_any(skb); goto next_desc; } +#ifdef HAVE_HW_TIME_STAMP igb_rx_hwtstamp(q_vector, rx_desc, skb); +#endif +#ifdef NETIF_F_RXHASH igb_rx_hash(rx_ring, rx_desc, skb); +#endif igb_rx_checksum(rx_ring, rx_desc, skb); igb_rx_vlan(rx_ring, rx_desc, skb); total_bytes += skb->len; total_packets++; - skb->protocol = eth_type_trans(skb, rx_ring->netdev); + skb->protocol = eth_type_trans(skb, netdev_ring(rx_ring)); - napi_gro_receive(&q_vector->napi, skb); +#ifndef IGB_NO_LRO + if (igb_can_lro(rx_ring, rx_desc, skb)) + buffer_info->skb = igb_lro_queue(q_vector, skb); + else +#endif +#ifdef HAVE_VLAN_RX_REGISTER + igb_receive_skb(q_vector, skb); +#else + napi_gro_receive(&q_vector->napi, skb); +#endif +#ifndef NETIF_F_GRO + netdev_ring(rx_ring)->last_rx = jiffies; + +#endif budget--; next_desc: + cleaned_count++; + if (!budget) break; - cleaned_count++; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IGB_RX_BUFFER_WRITE) { igb_alloc_rx_buffers(rx_ring, cleaned_count); @@ -6129,16 +7265,19 @@ next_desc: } rx_ring->next_to_clean = i; - u64_stats_update_begin(&rx_ring->rx_syncp); rx_ring->rx_stats.packets += total_packets; rx_ring->rx_stats.bytes += total_bytes; - u64_stats_update_end(&rx_ring->rx_syncp); q_vector->rx.total_packets += total_packets; q_vector->rx.total_bytes += total_bytes; if (cleaned_count) igb_alloc_rx_buffers(rx_ring, cleaned_count); +#ifndef IGB_NO_LRO + if (netdev_ring(rx_ring)->features & NETIF_F_LRO) + igb_lro_flush_all(q_vector); + +#endif /* IGB_NO_LRO */ return !!budget; } @@ -6152,8 +7291,13 @@ static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring, return true; if (likely(!skb)) { - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + skb = netdev_alloc_skb_ip_align(netdev_ring(rx_ring), + rx_ring->rx_buffer_len); +#else + skb = netdev_alloc_skb_ip_align(netdev_ring(rx_ring), IGB_RX_HDR_LEN); +#endif bi->skb = skb; if (!skb) { rx_ring->rx_stats.alloc_failed++; @@ -6161,11 +7305,16 @@ static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring, } /* initialize skb for ring */ - skb_record_rx_queue(skb, rx_ring->queue_index); + skb_record_rx_queue(skb, ring_queue_index(rx_ring)); } +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + dma = dma_map_single(rx_ring->dev, skb->data, + rx_ring->rx_buffer_len, DMA_FROM_DEVICE); +#else dma = dma_map_single(rx_ring->dev, skb->data, IGB_RX_HDR_LEN, DMA_FROM_DEVICE); +#endif if (dma_mapping_error(rx_ring->dev, dma)) { rx_ring->rx_stats.alloc_failed++; @@ -6176,6 +7325,7 @@ static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring, return true; } +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, struct igb_rx_buffer *bi) { @@ -6187,7 +7337,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, return true; if (!page) { - page = netdev_alloc_page(rx_ring->netdev); + page = alloc_page(GFP_ATOMIC | __GFP_COLD); bi->page = page; if (unlikely(!page)) { rx_ring->rx_stats.alloc_failed++; @@ -6209,6 +7359,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, return true; } +#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ /** * igb_alloc_rx_buffers - Replace used receive buffers; packet split * @adapter: address of board private structure @@ -6229,6 +7380,9 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); +#else rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); if (!igb_alloc_mapped_page(rx_ring, bi)) @@ -6236,6 +7390,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); +#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ rx_desc++; bi++; i++; @@ -6263,6 +7418,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) } } +#ifdef SIOCGMIIPHY /** * igb_mii_ioctl - * @netdev: @@ -6282,17 +7438,21 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) data->phy_id = adapter->hw.phy.addr; break; case SIOCGMIIREG: - if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, - &data->val_out)) + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, + &data->val_out)) return -EIO; break; case SIOCSMIIREG: default: return -EOPNOTSUPP; } - return 0; + return E1000_SUCCESS; } +#endif +#ifdef HAVE_HW_TIME_STAMP /** * igb_hwtstamp_ioctl - control hardware time stamping * @netdev: @@ -6401,6 +7561,7 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev, return 0; } +#ifdef IGB_PER_PKT_TIMESTAMP /* * Per-packet timestamping only works if all packets are * timestamped, so enable timestamping in all packets as @@ -6410,30 +7571,31 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev, tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; } +#endif /* enable/disable TX */ - regval = rd32(E1000_TSYNCTXCTL); + regval = E1000_READ_REG(hw, E1000_TSYNCTXCTL); regval &= ~E1000_TSYNCTXCTL_ENABLED; regval |= tsync_tx_ctl; - wr32(E1000_TSYNCTXCTL, regval); + E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, regval); /* enable/disable RX */ - regval = rd32(E1000_TSYNCRXCTL); + regval = E1000_READ_REG(hw, E1000_TSYNCRXCTL); regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); regval |= tsync_rx_ctl; - wr32(E1000_TSYNCRXCTL, regval); + E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, regval); /* define which PTP packets are time stamped */ - wr32(E1000_TSYNCRXCFG, tsync_rx_cfg); + E1000_WRITE_REG(hw, E1000_TSYNCRXCFG, tsync_rx_cfg); /* define ethertype filter for timestamped packets */ if (is_l2) - wr32(E1000_ETQF(3), + E1000_WRITE_REG(hw, E1000_ETQF(3), (E1000_ETQF_FILTER_ENABLE | /* enable filter */ E1000_ETQF_1588 | /* enable timestamping */ ETH_P_1588)); /* 1588 eth protocol type */ else - wr32(E1000_ETQF(3), 0); + E1000_WRITE_REG(hw, E1000_ETQF(3), 0); #define PTP_PORT 319 /* L4 Queue Filter[3]: filter by destination port and protocol */ @@ -6444,30 +7606,31 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev, | E1000_FTQF_MASK); /* mask all inputs */ ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */ - wr32(E1000_IMIR(3), htons(PTP_PORT)); - wr32(E1000_IMIREXT(3), - (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); + E1000_WRITE_REG(hw, E1000_IMIR(3), htons(PTP_PORT)); + E1000_WRITE_REG(hw, E1000_IMIREXT(3), + (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); if (hw->mac.type == e1000_82576) { /* enable source port check */ - wr32(E1000_SPQF(3), htons(PTP_PORT)); + E1000_WRITE_REG(hw, E1000_SPQF(3), htons(PTP_PORT)); ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; } - wr32(E1000_FTQF(3), ftqf); + E1000_WRITE_REG(hw, E1000_FTQF(3), ftqf); } else { - wr32(E1000_FTQF(3), E1000_FTQF_MASK); + E1000_WRITE_REG(hw, E1000_FTQF(3), E1000_FTQF_MASK); } - wrfl(); + E1000_WRITE_FLUSH(hw); adapter->hwtstamp_config = config; /* clear TX/RX time stamp registers, just to be sure */ - regval = rd32(E1000_TXSTMPH); - regval = rd32(E1000_RXSTMPH); + regval = E1000_READ_REG(hw, E1000_TXSTMPH); + regval = E1000_READ_REG(hw, E1000_RXSTMPH); return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } +#endif /** * igb_ioctl - * @netdev: @@ -6477,133 +7640,236 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev, static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { switch (cmd) { +#ifdef SIOCGMIIPHY case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return igb_mii_ioctl(netdev, ifr, cmd); +#endif +#ifdef HAVE_HW_TIME_STAMP case SIOCSHWTSTAMP: return igb_hwtstamp_ioctl(netdev, ifr, cmd); +#endif +#ifdef ETHTOOL_OPS_COMPAT + case SIOCETHTOOL: + return ethtool_ioctl(ifr); +#endif default: return -EOPNOTSUPP; } } -s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) +s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) { struct igb_adapter *adapter = hw->back; u16 cap_offset; - cap_offset = adapter->pdev->pcie_cap; + cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); if (!cap_offset) return -E1000_ERR_CONFIG; pci_read_config_word(adapter->pdev, cap_offset + reg, value); - return 0; + return E1000_SUCCESS; } -s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) +s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) { struct igb_adapter *adapter = hw->back; u16 cap_offset; - cap_offset = adapter->pdev->pcie_cap; + cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); if (!cap_offset) return -E1000_ERR_CONFIG; pci_write_config_word(adapter->pdev, cap_offset + reg, *value); - return 0; + return E1000_SUCCESS; } -static void igb_vlan_mode(struct net_device *netdev, u32 features) +#ifdef HAVE_VLAN_RX_REGISTER +static void igb_vlan_mode(struct net_device *netdev, struct vlan_group *vlgrp) +#else +void igb_vlan_mode(struct net_device *netdev, u32 features) +#endif { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 ctrl, rctl; + int i; +#ifdef HAVE_VLAN_RX_REGISTER + bool enable = !!vlgrp; + + igb_irq_disable(adapter); + + adapter->vlgrp = vlgrp; + + if (!test_bit(__IGB_DOWN, &adapter->state)) + igb_irq_enable(adapter); +#else bool enable = !!(features & NETIF_F_HW_VLAN_RX); +#endif if (enable) { /* enable VLAN tag insert/strip */ - ctrl = rd32(E1000_CTRL); + ctrl = E1000_READ_REG(hw, E1000_CTRL); ctrl |= E1000_CTRL_VME; - wr32(E1000_CTRL, ctrl); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); /* Disable CFI check */ - rctl = rd32(E1000_RCTL); + rctl = E1000_READ_REG(hw, E1000_RCTL); rctl &= ~E1000_RCTL_CFIEN; - wr32(E1000_RCTL, rctl); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); } else { /* disable VLAN tag insert/strip */ - ctrl = rd32(E1000_CTRL); + ctrl = E1000_READ_REG(hw, E1000_CTRL); ctrl &= ~E1000_CTRL_VME; - wr32(E1000_CTRL, ctrl); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + } + +#ifndef CONFIG_IGB_VMDQ_NETDEV + for (i = 0; i < adapter->vmdq_pools; i++) { + igb_set_vf_vlan_strip(adapter, + adapter->vfs_allocated_count + i, + enable); + } + +#else + igb_set_vf_vlan_strip(adapter, + adapter->vfs_allocated_count, + enable); + + for (i = 1; i < adapter->vmdq_pools; i++) { +#ifdef HAVE_VLAN_RX_REGISTER + struct igb_vmdq_adapter *vadapter; + vadapter = netdev_priv(adapter->vmdq_netdev[i-1]); + enable = !!vadapter->vlgrp; +#else + struct net_device *vnetdev; + vnetdev = adapter->vmdq_netdev[i-1]; + enable = !!(vnetdev->features & NETIF_F_HW_VLAN_RX); +#endif + igb_set_vf_vlan_strip(adapter, + adapter->vfs_allocated_count + i, + enable); } +#endif igb_rlpml_set(adapter); } +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +#else static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +#endif { struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; int pf_id = adapter->vfs_allocated_count; /* attempt to add filter to vlvf array */ - igb_vlvf_set(adapter, vid, true, pf_id); + igb_vlvf_set(adapter, vid, TRUE, pf_id); /* add the filter since PF can receive vlans w/o entry in vlvf */ - igb_vfta_set(hw, vid, true); + igb_vfta_set(adapter, vid, TRUE); +#ifndef HAVE_NETDEV_VLAN_FEATURES + + /* Copy feature flags from netdev to the vlan netdev for this vid. + * This allows things like TSO to bubble down to our vlan device. + * There is no need to update netdev for vlan 0 (DCB), since it + * wouldn't has v_netdev. + */ + if (adapter->vlgrp) { + struct vlan_group *vlgrp = adapter->vlgrp; + struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid); + if (v_netdev) { + v_netdev->features |= netdev->features; + vlan_group_set_device(vlgrp, vid, v_netdev); + } + } +#endif +#ifndef HAVE_VLAN_RX_REGISTER set_bit(vid, adapter->active_vlans); +#endif +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return 0; +#endif } +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +#else static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +#endif { struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; int pf_id = adapter->vfs_allocated_count; s32 err; +#ifdef HAVE_VLAN_RX_REGISTER + igb_irq_disable(adapter); + + vlan_group_set_device(adapter->vlgrp, vid, NULL); + + if (!test_bit(__IGB_DOWN, &adapter->state)) + igb_irq_enable(adapter); + +#endif /* HAVE_VLAN_RX_REGISTER */ /* remove vlan from VLVF table array */ - err = igb_vlvf_set(adapter, vid, false, pf_id); + err = igb_vlvf_set(adapter, vid, FALSE, pf_id); /* if vid was not present in VLVF just remove it from table */ if (err) - igb_vfta_set(hw, vid, false); + igb_vfta_set(adapter, vid, FALSE); +#ifndef HAVE_VLAN_RX_REGISTER clear_bit(vid, adapter->active_vlans); +#endif +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return 0; +#endif } static void igb_restore_vlan(struct igb_adapter *adapter) { +#ifdef HAVE_VLAN_RX_REGISTER + igb_vlan_mode(adapter->netdev, adapter->vlgrp); + + if (adapter->vlgrp) { + u16 vid; + for (vid = 0; vid < VLAN_N_VID; vid++) { + if (!vlan_group_get_device(adapter->vlgrp, vid)) + continue; + igb_vlan_rx_add_vid(adapter->netdev, vid); + } + } +#else u16 vid; igb_vlan_mode(adapter->netdev, adapter->netdev->features); for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) igb_vlan_rx_add_vid(adapter->netdev, vid); +#endif } -int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) +int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx) { struct pci_dev *pdev = adapter->pdev; struct e1000_mac_info *mac = &adapter->hw.mac; mac->autoneg = 0; - /* Make sure dplx is at most 1 bit and lsb of speed is not set - * for the switch() below to work */ - if ((spd & 1) || (dplx & ~1)) - goto err_inval; - - /* Fiber NIC's only allow 1000 Gbps Full duplex */ - if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) && - spd != SPEED_1000 && - dplx != DUPLEX_FULL) - goto err_inval; + /* Fiber NIC's only allow 1000 gbps Full duplex */ + if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes ) && + spddplx != (SPEED_1000 + DUPLEX_FULL)) { + dev_err(pci_dev_to_dev(pdev), + "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; + } - switch (spd + dplx) { + switch (spddplx) { case SPEED_10 + DUPLEX_HALF: mac->forced_speed_duplex = ADVERTISE_10_HALF; break; @@ -6622,13 +7888,10 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) break; case SPEED_1000 + DUPLEX_HALF: /* not supported */ default: - goto err_inval; + dev_err(pci_dev_to_dev(pdev), "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; } return 0; - -err_inval: - dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); - return -EINVAL; } static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, @@ -6656,7 +7919,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, return retval; #endif - status = rd32(E1000_STATUS); + status = E1000_READ_REG(hw, E1000_STATUS); if (status & E1000_STATUS_LU) wufc &= ~E1000_WUFC_LNKC; @@ -6666,27 +7929,25 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, /* turn on all-multi mode if wake on multicast is enabled */ if (wufc & E1000_WUFC_MC) { - rctl = rd32(E1000_RCTL); + rctl = E1000_READ_REG(hw, E1000_RCTL); rctl |= E1000_RCTL_MPE; - wr32(E1000_RCTL, rctl); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); } - ctrl = rd32(E1000_CTRL); - /* advertise wake from D3Cold */ - #define E1000_CTRL_ADVD3WUC 0x00100000 + ctrl = E1000_READ_REG(hw, E1000_CTRL); /* phy power management enable */ #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 ctrl |= E1000_CTRL_ADVD3WUC; - wr32(E1000_CTRL, ctrl); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); /* Allow time for pending master requests to run */ - igb_disable_pcie_master(hw); + e1000_disable_pcie_master(hw); - wr32(E1000_WUC, E1000_WUC_PME_EN); - wr32(E1000_WUFC, wufc); + E1000_WRITE_REG(hw, E1000_WUC, E1000_WUC_PME_EN); + E1000_WRITE_REG(hw, E1000_WUFC, wufc); } else { - wr32(E1000_WUC, 0); - wr32(E1000_WUFC, 0); + E1000_WRITE_REG(hw, E1000_WUC, 0); + E1000_WRITE_REG(hw, E1000_WUFC, 0); } *enable_wake = wufc || adapter->en_mng_pt; @@ -6705,7 +7966,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, } #ifdef CONFIG_PM -#ifdef CONFIG_PM_SLEEP +#ifdef HAVE_SYSTEM_SLEEP_PM_OPS static int igb_suspend(struct device *dev) { int retval; @@ -6725,7 +7986,6 @@ static int igb_suspend(struct device *dev) return 0; } -#endif /* CONFIG_PM_SLEEP */ static int igb_resume(struct device *dev) { @@ -6741,7 +8001,7 @@ static int igb_resume(struct device *dev) err = pci_enable_device_mem(pdev); if (err) { - dev_err(&pdev->dev, + dev_err(pci_dev_to_dev(pdev), "igb: Cannot enable PCI device from suspend\n"); return err; } @@ -6750,8 +8010,23 @@ static int igb_resume(struct device *dev) pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); +#ifdef CONFIG_PM_RUNTIME + if (!rtnl_is_locked()) { + /* + * shut up ASSERT_RTNL() warning in + * netif_set_real_num_tx/rx_queues. + */ + rtnl_lock(); + err = igb_init_interrupt_scheme(adapter); + rtnl_unlock(); + } else { + err = igb_init_interrupt_scheme(adapter); + } + if (err) { +#else if (igb_init_interrupt_scheme(adapter)) { - dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); +#endif /* CONFIG_PM_RUNTIME */ + dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for queues\n"); return -ENOMEM; } @@ -6761,7 +8036,7 @@ static int igb_resume(struct device *dev) * driver. */ igb_get_hw_control(adapter); - wr32(E1000_WUS, ~0); + E1000_WRITE_REG(hw, E1000_WUS, ~0); if (netdev->flags & IFF_UP) { err = __igb_open(netdev, true); @@ -6770,6 +8045,7 @@ static int igb_resume(struct device *dev) } netif_device_attach(netdev); + return 0; } @@ -6811,12 +8087,38 @@ static int igb_runtime_resume(struct device *dev) return igb_resume(dev); } #endif /* CONFIG_PM_RUNTIME */ -#endif +#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ +#endif /* CONFIG_PM */ -static void igb_shutdown(struct pci_dev *pdev) +#ifdef USE_REBOOT_NOTIFIER +/* only want to do this for 2.4 kernels? */ +static int igb_notify_reboot(struct notifier_block *nb, unsigned long event, + void *p) { + struct pci_dev *pdev = NULL; bool wake; + switch (event) { + case SYS_DOWN: + case SYS_HALT: + case SYS_POWER_OFF: + while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) { + if (pci_dev_driver(pdev) == &igb_driver) { + __igb_shutdown(pdev, &wake, 0); + if (event == SYS_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } + } + } + } + return NOTIFY_DONE; +} +#else +static void igb_shutdown(struct pci_dev *pdev) +{ + bool wake = false; + __igb_shutdown(pdev, &wake, 0); if (system_state == SYSTEM_POWER_OFF) { @@ -6824,6 +8126,7 @@ static void igb_shutdown(struct pci_dev *pdev) pci_set_power_state(pdev, PCI_D3hot); } } +#endif /* USE_REBOOT_NOTIFIER */ #ifdef CONFIG_NET_POLL_CONTROLLER /* @@ -6841,7 +8144,7 @@ static void igb_netpoll(struct net_device *netdev) for (i = 0; i < adapter->num_q_vectors; i++) { q_vector = adapter->q_vector[i]; if (adapter->msix_entries) - wr32(E1000_EIMC, q_vector->eims_value); + E1000_WRITE_REG(hw, E1000_EIMC, q_vector->eims_value); else igb_irq_disable(adapter); napi_schedule(&q_vector->napi); @@ -6849,6 +8152,8 @@ static void igb_netpoll(struct net_device *netdev) } #endif /* CONFIG_NET_POLL_CONTROLLER */ +#ifdef HAVE_PCI_ERS +#define E1000_DEV_ID_82576_VF 0x10CA /** * igb_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device @@ -6863,6 +8168,83 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); +#ifdef CONFIG_PCI_IOV + struct pci_dev *bdev, *vfdev; + u32 dw0, dw1, dw2, dw3; + int vf, pos; + u16 req_id, pf_func; + + if (!(adapter->flags & IGB_FLAG_DETECT_BAD_DMA)) + goto skip_bad_vf_detection; + + bdev = pdev->bus->self; + while (bdev && (bdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT)) + bdev = bdev->bus->self; + + if (!bdev) + goto skip_bad_vf_detection; + + pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR); + if (!pos) + goto skip_bad_vf_detection; + + pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0); + pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1); + pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2); + pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3); + + req_id = dw1 >> 16; + /* On the 82576 if bit 7 of the requestor ID is set then it's a VF */ + if (!(req_id & 0x0080)) + goto skip_bad_vf_detection; + + pf_func = req_id & 0x01; + if ((pf_func & 1) == (pdev->devfn & 1)) { + + vf = (req_id & 0x7F) >> 1; + dev_err(pci_dev_to_dev(pdev), + "VF %d has caused a PCIe error\n", vf); + dev_err(pci_dev_to_dev(pdev), + "TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: " + "%8.8x\tdw3: %8.8x\n", + dw0, dw1, dw2, dw3); + + /* Find the pci device of the offending VF */ + vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, + E1000_DEV_ID_82576_VF, NULL); + while (vfdev) { + if (vfdev->devfn == (req_id & 0xFF)) + break; + vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, + E1000_DEV_ID_82576_VF, vfdev); + } + /* + * There's a slim chance the VF could have been hot plugged, + * so if it is no longer present we don't need to issue the + * VFLR. Just clean up the AER in that case. + */ + if (vfdev) { + dev_err(pci_dev_to_dev(pdev), + "Issuing VFLR to VF %d\n", vf); + pci_write_config_dword(vfdev, 0xA8, 0x00008000); + } + + pci_cleanup_aer_uncorrect_error_status(pdev); + } + + /* + * Even though the error may have occurred on the other port + * we still need to increment the vf error reference count for + * both ports because the I/O resume function will be called + * for both of them. + */ + adapter->vferr_refcount++; + + return PCI_ERS_RESULT_RECOVERED; + +skip_bad_vf_detection: +#endif /* CONFIG_PCI_IOV */ + netif_device_detach(netdev); if (state == pci_channel_io_perm_failure) @@ -6889,10 +8271,9 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; pci_ers_result_t result; - int err; if (pci_enable_device_mem(pdev)) { - dev_err(&pdev->dev, + dev_err(pci_dev_to_dev(pdev), "Cannot re-enable PCI device after reset.\n"); result = PCI_ERS_RESULT_DISCONNECT; } else { @@ -6903,17 +8284,12 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); - igb_reset(adapter); - wr32(E1000_WUS, ~0); + schedule_work(&adapter->reset_task); + E1000_WRITE_REG(hw, E1000_WUS, ~0); result = PCI_ERS_RESULT_RECOVERED; } - err = pci_cleanup_aer_uncorrect_error_status(pdev); - if (err) { - dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status " - "failed 0x%0x\n", err); - /* non-fatal, continue */ - } + pci_cleanup_aer_uncorrect_error_status(pdev); return result; } @@ -6931,9 +8307,15 @@ static void igb_io_resume(struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); + if (adapter->vferr_refcount) { + dev_info(pci_dev_to_dev(pdev), "Resuming after VF err\n"); + adapter->vferr_refcount--; + return; + } + if (netif_running(netdev)) { if (igb_up(adapter)) { - dev_err(&pdev->dev, "igb_up failed after reset\n"); + dev_err(pci_dev_to_dev(pdev), "igb_up failed after reset\n"); return; } } @@ -6945,48 +8327,60 @@ static void igb_io_resume(struct pci_dev *pdev) igb_get_hw_control(adapter); } -static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, - u8 qsel) +#endif /* HAVE_PCI_ERS */ + +int igb_add_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue) { - u32 rar_low, rar_high; struct e1000_hw *hw = &adapter->hw; + int i; - /* HW expects these in little endian so we reverse the byte order - * from network order (big endian) to little endian - */ - rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | - ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); - rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); - - /* Indicate to hardware the Address is Valid. */ - rar_high |= E1000_RAH_AV; - - if (hw->mac.type == e1000_82575) - rar_high |= E1000_RAH_POOL_1 * qsel; - else - rar_high |= E1000_RAH_POOL_1 << qsel; + if (is_zero_ether_addr(addr)) + return 0; - wr32(E1000_RAL(index), rar_low); - wrfl(); - wr32(E1000_RAH(index), rar_high); - wrfl(); + for (i = 0; i < hw->mac.rar_entry_count; i++) { + if (adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) + continue; + adapter->mac_table[i].state = (IGB_MAC_STATE_MODIFIED | + IGB_MAC_STATE_IN_USE); + memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN); + adapter->mac_table[i].queue = queue; + igb_sync_mac_table(adapter); + return 0; + } + return -ENOMEM; } +int igb_del_mac_filter(struct igb_adapter *adapter, u8* addr, u16 queue) +{ + /* search table for addr, if found, set to 0 and sync */ + int i; + struct e1000_hw *hw = &adapter->hw; + if (is_zero_ether_addr(addr)) + return 0; + for (i = 0; i < hw->mac.rar_entry_count; i++) { + if (!compare_ether_addr(addr, adapter->mac_table[i].addr) && + adapter->mac_table[i].queue == queue) { + adapter->mac_table[i].state = IGB_MAC_STATE_MODIFIED; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + adapter->mac_table[i].queue = 0; + igb_sync_mac_table(adapter); + return 0; + } + } + return -ENOMEM; +} static int igb_set_vf_mac(struct igb_adapter *adapter, int vf, unsigned char *mac_addr) { - struct e1000_hw *hw = &adapter->hw; - /* VF MAC addresses start at end of receive addresses and moves - * torwards the first, as a result a collision should not be possible */ - int rar_entry = hw->mac.rar_entry_count - (vf + 1); - + igb_del_mac_filter(adapter, adapter->vf_data[vf].vf_mac_addresses, vf); memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN); - igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf); + igb_add_mac_filter(adapter, mac_addr, vf); return 0; } +#ifdef IFLA_VF_MAX static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct igb_adapter *adapter = netdev_priv(netdev); @@ -6995,7 +8389,7 @@ static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" - " change effective."); + " change effective.\n"); if (test_bit(__IGB_DOWN, &adapter->state)) { dev_warn(&adapter->pdev->dev, "The VF MAC address has been set," " but the PF device is not up.\n"); @@ -7018,7 +8412,7 @@ static int igb_link_mbps(int internal_link_speed) } static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate, - int link_speed) + int link_speed) { int rf_dec, rf_int; u32 bcnrc_val; @@ -7031,14 +8425,19 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate, bcnrc_val = E1000_RTTBCNRC_RS_ENA; bcnrc_val |= ((rf_int<vf_rate_link_speed == 0) || - (adapter->hw.mac.type != e1000_82576)) + /* VF TX rate limit was not set */ + if ((adapter->vf_rate_link_speed == 0) || + (adapter->hw.mac.type != e1000_82576)) return; actual_link_speed = igb_link_mbps(adapter->link_speed); @@ -7056,8 +8455,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter) reset_rate = true; adapter->vf_rate_link_speed = 0; dev_info(&adapter->pdev->dev, - "Link speed has been changed. VF Transmit " - "rate is disabled\n"); + "Link speed has been changed. VF Transmit rate is disabled\n"); } for (i = 0; i < adapter->vfs_allocated_count; i++) { @@ -7065,8 +8463,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter) adapter->vf_data[i].tx_rate = 0; igb_set_vf_rate_limit(&adapter->hw, i, - adapter->vf_data[i].tx_rate, - actual_link_speed); + adapter->vf_data[i].tx_rate, actual_link_speed); } } @@ -7075,14 +8472,14 @@ static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; int actual_link_speed; - + if (hw->mac.type != e1000_82576) return -EOPNOTSUPP; actual_link_speed = igb_link_mbps(adapter->link_speed); if ((vf >= adapter->vfs_allocated_count) || - (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) || - (tx_rate < 0) || (tx_rate > actual_link_speed)) + (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) || + (tx_rate < 0) || (tx_rate > actual_link_speed)) return -EINVAL; adapter->vf_rate_link_speed = actual_link_speed; @@ -7105,7 +8502,7 @@ static int igb_ndo_get_vf_config(struct net_device *netdev, ivi->qos = adapter->vf_data[vf].pf_qos; return 0; } - +#endif static void igb_vmm_control(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; @@ -7118,28 +8515,66 @@ static void igb_vmm_control(struct igb_adapter *adapter) return; case e1000_82576: /* notify HW that the MAC is adding vlan tags */ - reg = rd32(E1000_DTXCTL); - reg |= E1000_DTXCTL_VLAN_ADDED; - wr32(E1000_DTXCTL, reg); + reg = E1000_READ_REG(hw, E1000_DTXCTL); + reg |= (E1000_DTXCTL_VLAN_ADDED | + E1000_DTXCTL_SPOOF_INT); + E1000_WRITE_REG(hw, E1000_DTXCTL, reg); case e1000_82580: /* enable replication vlan tag stripping */ - reg = rd32(E1000_RPLOLR); + reg = E1000_READ_REG(hw, E1000_RPLOLR); reg |= E1000_RPLOLR_STRVLAN; - wr32(E1000_RPLOLR, reg); + E1000_WRITE_REG(hw, E1000_RPLOLR, reg); case e1000_i350: /* none of the above registers are supported by i350 */ break; } - if (adapter->vfs_allocated_count) { - igb_vmdq_set_loopback_pf(hw, true); - igb_vmdq_set_replication_pf(hw, true); - igb_vmdq_set_anti_spoofing_pf(hw, true, - adapter->vfs_allocated_count); - } else { - igb_vmdq_set_loopback_pf(hw, false); - igb_vmdq_set_replication_pf(hw, false); - } + /* Enable Malicious Driver Detection */ + if ((hw->mac.type == e1000_i350) && (adapter->vfs_allocated_count) && + (adapter->mdd)) + igb_enable_mdd(adapter); + + /* enable replication and loopback support */ + e1000_vmdq_set_loopback_pf(hw, adapter->vfs_allocated_count || + adapter->vmdq_pools); + + e1000_vmdq_set_anti_spoofing_pf(hw, adapter->vfs_allocated_count || + adapter->vmdq_pools, + adapter->vfs_allocated_count); + e1000_vmdq_set_replication_pf(hw, adapter->vfs_allocated_count || + adapter->vmdq_pools); +} + +static void igb_init_fw(struct igb_adapter *adapter) +{ + struct e1000_fw_drv_info fw_cmd; + struct e1000_hw *hw = &adapter->hw; + int i; + u16 mask; + + mask = E1000_SWFW_PHY0_SM; + + if (!hw->mac.ops.acquire_swfw_sync(hw, mask)) { + for (i = 0; i <= FW_MAX_RETRIES; i++) { + E1000_WRITE_REG(hw, E1000_FWSTS, E1000_FWSTS_FWRI); + fw_cmd.hdr.cmd = FW_CMD_DRV_INFO; + fw_cmd.hdr.buf_len = FW_CMD_DRV_INFO_LEN; + fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CMD_RESERVED; + fw_cmd.port_num = hw->bus.func; + fw_cmd.drv_version = FW_FAMILY_DRV_VER; + fw_cmd.hdr.checksum = 0; + fw_cmd.hdr.checksum = e1000_calculate_checksum((u8 *)&fw_cmd, + (FW_HDR_LEN + + fw_cmd.hdr.buf_len)); + e1000_host_interface_command(hw, (u8*)&fw_cmd, + sizeof(fw_cmd)); + if (fw_cmd.hdr.cmd_or_resp.ret_status == FW_STATUS_SUCCESS) + break; + } + } else + dev_warn(pci_dev_to_dev(adapter->pdev), + "Unable to get semaphore, firmware init failed.\n"); + hw->mac.ops.release_swfw_sync(hw, mask); } static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) @@ -7149,11 +8584,11 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) u16 hwm; if (hw->mac.type > e1000_82580) { - if (adapter->flags & IGB_FLAG_DMAC) { + if (adapter->dmac != IGB_DMAC_DISABLE) { u32 reg; - /* force threshold to 0. */ - wr32(E1000_DMCTXTH, 0); + /* force threshold to 0. */ + E1000_WRITE_REG(hw, E1000_DMCTXTH, 0); /* * DMA Coalescing high water mark needs to be greater @@ -7163,20 +8598,20 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) hwm = 64 * pba - adapter->max_frame_size / 16; if (hwm < 64 * (pba - 6)) hwm = 64 * (pba - 6); - reg = rd32(E1000_FCRTC); + reg = E1000_READ_REG(hw, E1000_FCRTC); reg &= ~E1000_FCRTC_RTH_COAL_MASK; reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) & E1000_FCRTC_RTH_COAL_MASK); - wr32(E1000_FCRTC, reg); + E1000_WRITE_REG(hw, E1000_FCRTC, reg); - /* + /* * Set the DMA Coalescing Rx threshold to PBA - 2 * max - * frame size, capping it at PBA - 10KB. - */ + * frame size, capping it at PBA - 10KB. + */ dmac_thr = pba - adapter->max_frame_size / 512; if (dmac_thr < pba - 10) dmac_thr = pba - 10; - reg = rd32(E1000_DMACR); + reg = E1000_READ_REG(hw, E1000_DMACR); reg &= ~E1000_DMACR_DMACTHR_MASK; reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT) & E1000_DMACR_DMACTHR_MASK); @@ -7184,39 +8619,36 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) /* transition to L0x or L1 if available..*/ reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); - /* watchdog timer= +-1000 usec in 32usec intervals */ - reg |= (1000 >> 5); - wr32(E1000_DMACR, reg); + /* watchdog timer= msec values in 32usec intervals */ + reg |= ((adapter->dmac) >> 5); + E1000_WRITE_REG(hw, E1000_DMACR, reg); + + /* no lower threshold to disable coalescing(smart fifb)-UTRESH=0*/ + E1000_WRITE_REG(hw, E1000_DMCRTRH, 0); /* - * no lower threshold to disable - * coalescing(smart fifb)-UTRESH=0 + * This sets the time to wait before requesting transition to + * low power state to number of usecs needed to receive 1 512 + * byte frame at gigabit line rate */ - wr32(E1000_DMCRTRH, 0); - reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4); - wr32(E1000_DMCTLX, reg); + E1000_WRITE_REG(hw, E1000_DMCTLX, reg); - /* - * free space in tx packet buffer to wake from - * DMA coal - */ - wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - - (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); + /* free space in tx packet buffer to wake from DMA coal */ + E1000_WRITE_REG(hw, E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - + (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); - /* - * make low power state decision controlled - * by DMA coal - */ - reg = rd32(E1000_PCIEMISC); + /* make low power state decision controlled by DMA coal */ + reg = E1000_READ_REG(hw, E1000_PCIEMISC); reg &= ~E1000_PCIEMISC_LX_DECISION; - wr32(E1000_PCIEMISC, reg); + E1000_WRITE_REG(hw, E1000_PCIEMISC, reg); } /* endif adapter->dmac is not disabled */ } else if (hw->mac.type == e1000_82580) { - u32 reg = rd32(E1000_PCIEMISC); - wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION); - wr32(E1000_DMACR, 0); + u32 reg = E1000_READ_REG(hw, E1000_PCIEMISC); + E1000_WRITE_REG(hw, E1000_PCIEMISC, + reg & ~E1000_PCIEMISC_LX_DECISION); + E1000_WRITE_REG(hw, E1000_DMACR, 0); } } diff --git a/drivers/net/igb/igb_param.c b/drivers/net/igb/igb_param.c new file mode 100644 index 000000000000..8cf6d6c18bb7 --- /dev/null +++ b/drivers/net/igb/igb_param.c @@ -0,0 +1,835 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +#include + +#include "igb.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define IGB_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 +#define MAX_NUM_LIST_OPTS 15 + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define IGB_PARAM_INIT { [0 ... IGB_MAX_NIC] = OPTION_UNSET } +#ifndef module_param_array +/* Module Parameters are always initialized to -1, so that the driver + * can tell the difference between no user specified value or the + * user asking for the default value. + * The true default values are loaded in when igb_check_options is called. + * + * This is a GCC extension to ANSI C. + * See the item "Labeled Elements in Initializers" in the section + * "Extensions to the C Language Family" of the GCC documentation. + */ + +#define IGB_PARAM(X, desc) \ + static const int __devinitdata X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \ + MODULE_PARM(X, "1-" __MODULE_STRING(IGB_MAX_NIC) "i"); \ + MODULE_PARM_DESC(X, desc); +#else +#define IGB_PARAM(X, desc) \ + static int __devinitdata X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); +#endif + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) + */ +IGB_PARAM(InterruptThrottleRate, + "Maximum interrupts per second, per vector, (max 100000), default 3=adaptive"); +#define DEFAULT_ITR 3 +#define MAX_ITR 100000 +/* #define MIN_ITR 120 */ +#define MIN_ITR 0 +/* IntMode (Interrupt Mode) + * + * Valid Range: 0 - 2 + * + * Default Value: 2 (MSI-X) + */ +IGB_PARAM(IntMode, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), default 2"); +#define MAX_INTMODE IGB_INT_MODE_MSIX +#define MIN_INTMODE IGB_INT_MODE_LEGACY + +IGB_PARAM(Node, "set the starting node to allocate memory on, default -1"); + +/* LLIPort (Low Latency Interrupt TCP Port) + * + * Valid Range: 0 - 65535 + * + * Default Value: 0 (disabled) + */ +IGB_PARAM(LLIPort, "Low Latency Interrupt TCP Port (0-65535), default 0=off"); + +#define DEFAULT_LLIPORT 0 +#define MAX_LLIPORT 0xFFFF +#define MIN_LLIPORT 0 + +/* LLIPush (Low Latency Interrupt on TCP Push flag) + * + * Valid Range: 0, 1 + * + * Default Value: 0 (disabled) + */ +IGB_PARAM(LLIPush, "Low Latency Interrupt on TCP Push flag (0,1), default 0=off"); + +#define DEFAULT_LLIPUSH 0 +#define MAX_LLIPUSH 1 +#define MIN_LLIPUSH 0 + +/* LLISize (Low Latency Interrupt on Packet Size) + * + * Valid Range: 0 - 1500 + * + * Default Value: 0 (disabled) + */ +IGB_PARAM(LLISize, "Low Latency Interrupt on Packet Size (0-1500), default 0=off"); + +#define DEFAULT_LLISIZE 0 +#define MAX_LLISIZE 1500 +#define MIN_LLISIZE 0 + +/* RSS (Enable RSS multiqueue receive) + * + * Valid Range: 0 - 8 + * + * Default Value: 1 + */ +IGB_PARAM(RSS, "Number of Receive-Side Scaling Descriptor Queues (0-8), default 1=number of cpus"); + +#define DEFAULT_RSS 1 +#define MAX_RSS 8 +#define MIN_RSS 0 + +/* VMDQ (Enable VMDq multiqueue receive) + * + * Valid Range: 0 - 8 + * + * Default Value: 0 + */ +IGB_PARAM(VMDQ, "Number of Virtual Machine Device Queues: 0-1 = disable, 2-8 enable, default 0"); + +#define DEFAULT_VMDQ 0 +#define MAX_VMDQ MAX_RSS +#define MIN_VMDQ 0 + +/* max_vfs (Enable SR-IOV VF devices) + * + * Valid Range: 0 - 7 + * + * Default Value: 0 + */ +IGB_PARAM(max_vfs, "Number of Virtual Functions: 0 = disable, 1-7 enable, default 0"); + +#define DEFAULT_SRIOV 0 +#define MAX_SRIOV 7 +#define MIN_SRIOV 0 + +/* MDD (Enable Malicious Driver Detection) + * + * Only available when SR-IOV is enabled - max_vfs is greater than 0 + * + * Valid Range: 0, 1 + * + * Default Value: 1 + */ +IGB_PARAM(MDD, "Malicious Driver Detection (0/1), default 1 = enabled. " + "Only available when max_vfs is greater than 0"); + + +/* QueuePairs (Enable TX/RX queue pairs for interrupt handling) + * + * Valid Range: 0 - 1 + * + * Default Value: 1 + */ +IGB_PARAM(QueuePairs, "Enable TX/RX queue pairs for interrupt handling (0,1), default 1=on"); + +#define DEFAULT_QUEUE_PAIRS 1 +#define MAX_QUEUE_PAIRS 1 +#define MIN_QUEUE_PAIRS 0 + +/* Enable/disable EEE (a.k.a. IEEE802.3az) + * + * Valid Range: 0, 1 + * + * Default Value: 1 + */ + IGB_PARAM(EEE, "Enable/disable on parts that support the feature"); + +/* Enable/disable DMA Coalescing + * + * Valid Values: 0(off), 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, + * 9000, 10000(msec), 250(usec), 500(usec) + * + * Default Value: 0 + */ + IGB_PARAM(DMAC, "Disable or set latency for DMA Coalescing ((0=off, 1000-10000(msec), 250, 500 (usec))"); + +#ifndef IGB_NO_LRO +/* Enable/disable Large Receive Offload + * + * Valid Values: 0(off), 1(on) + * + * Default Value: 0 + */ + IGB_PARAM(LRO, "Large Receive Offload (0,1), default 0=off"); + +#endif +struct igb_opt_list { + int i; + char *str; +}; +struct igb_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + struct igb_opt_list *p; + } l; + } arg; +}; + +static int __devinit igb_validate_option(unsigned int *value, + struct igb_option *opt, + struct igb_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + DPRINTK(PROBE, INFO, + "%s set to %d\n", opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + struct igb_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + DPRINTK(PROBE, INFO, "%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + DPRINTK(PROBE, INFO, "Invalid %s value specified (%d) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +/** + * igb_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ + +void __devinit igb_check_options(struct igb_adapter *adapter) +{ + int bd = adapter->bd_number; + struct e1000_hw *hw = &adapter->hw; + + if (bd >= IGB_MAX_NIC) { + DPRINTK(PROBE, NOTICE, + "Warning: no configuration for board #%d\n", bd); + DPRINTK(PROBE, NOTICE, "Using defaults for all values\n"); +#ifndef module_param_array + bd = IGB_MAX_NIC; +#endif + } + + { /* Interrupt Throttling Rate */ + struct igb_option opt = { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of " __MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR } } + }; + +#ifdef module_param_array + if (num_InterruptThrottleRate > bd) { +#endif + unsigned int itr = InterruptThrottleRate[bd]; + + switch (itr) { + case 0: + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + if(hw->mac.type >= e1000_i350) + adapter->dmac = IGB_DMAC_DISABLE; + adapter->rx_itr_setting = itr; + break; + case 1: + DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", + opt.name); + adapter->rx_itr_setting = itr; + break; + case 3: + DPRINTK(PROBE, INFO, + "%s set to dynamic conservative mode\n", + opt.name); + adapter->rx_itr_setting = itr; + break; + default: + igb_validate_option(&itr, &opt, adapter); + /* Save the setting, because the dynamic bits + * change itr. In case of invalid user value, + * default to conservative mode, else need to + * clear the lower two bits because they are + * used as control */ + if (itr == 3) { + adapter->rx_itr_setting = itr; + } else { + adapter->rx_itr_setting = 1000000000 / + (itr * 256); + adapter->rx_itr_setting &= ~3; + } + break; + } +#ifdef module_param_array + } else { + adapter->rx_itr_setting = opt.def; + } +#endif + adapter->tx_itr_setting = adapter->rx_itr_setting; + } + { /* Interrupt Mode */ + struct igb_option opt = { + .type = range_option, + .name = "Interrupt Mode", + .err = "defaulting to 2 (MSI-X)", + .def = IGB_INT_MODE_MSIX, + .arg = { .r = { .min = MIN_INTMODE, + .max = MAX_INTMODE } } + }; + +#ifdef module_param_array + if (num_IntMode > bd) { +#endif + unsigned int int_mode = IntMode[bd]; + igb_validate_option(&int_mode, &opt, adapter); + adapter->int_mode = int_mode; +#ifdef module_param_array + } else { + adapter->int_mode = opt.def; + } +#endif + } + { /* Low Latency Interrupt TCP Port */ + struct igb_option opt = { + .type = range_option, + .name = "Low Latency Interrupt TCP Port", + .err = "using default of " __MODULE_STRING(DEFAULT_LLIPORT), + .def = DEFAULT_LLIPORT, + .arg = { .r = { .min = MIN_LLIPORT, + .max = MAX_LLIPORT } } + }; + +#ifdef module_param_array + if (num_LLIPort > bd) { +#endif + adapter->lli_port = LLIPort[bd]; + if (adapter->lli_port) { + igb_validate_option(&adapter->lli_port, &opt, + adapter); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } +#ifdef module_param_array + } else { + adapter->lli_port = opt.def; + } +#endif + } + { /* Low Latency Interrupt on Packet Size */ + struct igb_option opt = { + .type = range_option, + .name = "Low Latency Interrupt on Packet Size", + .err = "using default of " __MODULE_STRING(DEFAULT_LLISIZE), + .def = DEFAULT_LLISIZE, + .arg = { .r = { .min = MIN_LLISIZE, + .max = MAX_LLISIZE } } + }; + +#ifdef module_param_array + if (num_LLISize > bd) { +#endif + adapter->lli_size = LLISize[bd]; + if (adapter->lli_size) { + igb_validate_option(&adapter->lli_size, &opt, + adapter); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } +#ifdef module_param_array + } else { + adapter->lli_size = opt.def; + } +#endif + } + { /* Low Latency Interrupt on TCP Push flag */ + struct igb_option opt = { + .type = enable_option, + .name = "Low Latency Interrupt on TCP Push flag", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + +#ifdef module_param_array + if (num_LLIPush > bd) { +#endif + unsigned int lli_push = LLIPush[bd]; + igb_validate_option(&lli_push, &opt, adapter); + adapter->flags |= lli_push ? IGB_FLAG_LLI_PUSH : 0; +#ifdef module_param_array + } else { + adapter->flags |= opt.def ? IGB_FLAG_LLI_PUSH : 0; + } +#endif + } + { /* SRIOV - Enable SR-IOV VF devices */ + struct igb_option opt = { + .type = range_option, + .name = "max_vfs - SR-IOV VF devices", + .err = "using default of " __MODULE_STRING(DEFAULT_SRIOV), + .def = DEFAULT_SRIOV, + .arg = { .r = { .min = MIN_SRIOV, + .max = MAX_SRIOV } } + }; + +#ifdef module_param_array + if (num_max_vfs > bd) { +#endif + adapter->vfs_allocated_count = max_vfs[bd]; + igb_validate_option(&adapter->vfs_allocated_count, &opt, adapter); + +#ifdef module_param_array + } else { + adapter->vfs_allocated_count = opt.def; + } +#endif + if (adapter->vfs_allocated_count) { + switch (hw->mac.type) { + case e1000_82575: + case e1000_82580: + adapter->vfs_allocated_count = 0; + DPRINTK(PROBE, INFO, "SR-IOV option max_vfs not supported.\n"); + default: + break; + } + } + } + { /* VMDQ - Enable VMDq multiqueue receive */ + struct igb_option opt = { + .type = range_option, + .name = "VMDQ - VMDq multiqueue queue count", + .err = "using default of " __MODULE_STRING(DEFAULT_VMDQ), + .def = DEFAULT_VMDQ, + .arg = { .r = { .min = MIN_VMDQ, + .max = (MAX_VMDQ - adapter->vfs_allocated_count) } } + }; +#ifdef module_param_array + if (num_VMDQ > bd) { +#endif + adapter->vmdq_pools = (VMDQ[bd] == 1 ? 0 : VMDQ[bd]); + if (adapter->vfs_allocated_count && !adapter->vmdq_pools) { + DPRINTK(PROBE, INFO, "Enabling SR-IOV requires VMDq be set to at least 1\n"); + adapter->vmdq_pools = 1; + } + igb_validate_option(&adapter->vmdq_pools, &opt, adapter); + +#ifdef module_param_array + } else { + if (!adapter->vfs_allocated_count) + adapter->vmdq_pools = (opt.def == 1 ? 0 : opt.def); + else + adapter->vmdq_pools = 1; + } +#endif +#ifdef CONFIG_IGB_VMDQ_NETDEV + if (hw->mac.type == e1000_82575 && adapter->vmdq_pools) { + DPRINTK(PROBE, INFO, "VMDq not supported on this part.\n"); + adapter->vmdq_pools = 0; + } +#endif + } + { /* RSS - Enable RSS multiqueue receives */ + struct igb_option opt = { + .type = range_option, + .name = "RSS - RSS multiqueue receive count", + .err = "using default of " __MODULE_STRING(DEFAULT_RSS), + .def = DEFAULT_RSS, + .arg = { .r = { .min = MIN_RSS, + .max = MAX_RSS } } + }; + + if (adapter->vmdq_pools) { + switch (hw->mac.type) { +#ifndef CONFIG_IGB_VMDQ_NETDEV + case e1000_82576: + opt.arg.r.max = 2; + break; + case e1000_82575: + if (adapter->vmdq_pools == 2) + opt.arg.r.max = 3; + if (adapter->vmdq_pools <= 2) + break; +#endif + default: + opt.arg.r.max = 1; + break; + } + } + + switch (hw->mac.type) { + case e1000_82575: + opt.arg.r.max = 4; + break; + default: + break; + } + +#ifdef module_param_array + if (num_RSS > bd) { +#endif + adapter->rss_queues = RSS[bd]; + switch (adapter->rss_queues) { + case 1: + break; + default: + igb_validate_option(&adapter->rss_queues, &opt, adapter); + if (adapter->rss_queues) + break; + case 0: + adapter->rss_queues = min_t(u32, opt.arg.r.max, num_online_cpus()); + break; + } +#ifdef module_param_array + } else { + adapter->rss_queues = opt.def; + } +#endif + } + { /* QueuePairs - Enable TX/RX queue pairs for interrupt handling */ + struct igb_option opt = { + .type = enable_option, + .name = "QueuePairs - TX/RX queue pairs for interrupt handling", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; +#ifdef module_param_array + if (num_QueuePairs > bd) { +#endif + unsigned int qp = QueuePairs[bd]; + /* + * we must enable queue pairs if the number of queues + * exceeds the number of avaialble interrupts. We are + * limited to 10, or 3 per unallocated vf. + */ + if ((adapter->rss_queues > 4) || + (adapter->vmdq_pools > 4) || + ((adapter->rss_queues > 1) && + ((adapter->vmdq_pools > 3) || + (adapter->vfs_allocated_count > 6)))) { + if (qp == OPTION_DISABLED) { + qp = OPTION_ENABLED; + DPRINTK(PROBE, INFO, + "Number of queues exceeds available interrupts, %s\n",opt.err); + } + } + igb_validate_option(&qp, &opt, adapter); + adapter->flags |= qp ? IGB_FLAG_QUEUE_PAIRS : 0; +#ifdef module_param_array + } else { + adapter->flags |= opt.def ? IGB_FLAG_QUEUE_PAIRS : 0; + } +#endif + } + { /* EEE - Enable EEE for capable adapters */ + + if (hw->mac.type >= e1000_i350) { + struct igb_option opt = { + .type = enable_option, + .name = "EEE Support", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; +#ifdef module_param_array + if (num_EEE > bd) { +#endif + unsigned int eee = EEE[bd]; + igb_validate_option(&eee, &opt, adapter); + adapter->flags |= eee ? IGB_FLAG_EEE : 0; + if (eee) + hw->dev_spec._82575.eee_disable = false; + else + hw->dev_spec._82575.eee_disable = true; + +#ifdef module_param_array + } else { + adapter->flags |= opt.def ? IGB_FLAG_EEE : 0; + if (adapter->flags & IGB_FLAG_EEE) + hw->dev_spec._82575.eee_disable = false; + else + hw->dev_spec._82575.eee_disable = true; + } +#endif + } + } + { /* DMAC - Enable DMA Coalescing for capable adapters */ + + if (hw->mac.type >= e1000_i350) { + struct igb_opt_list list [] = { + { IGB_DMAC_DISABLE, "DMAC Disable"}, + { IGB_DMAC_MIN, "DMAC 250 usec"}, + { IGB_DMAC_500, "DMAC 500 usec"}, + { IGB_DMAC_EN_DEFAULT, "DMAC 1000 usec"}, + { IGB_DMAC_2000, "DMAC 2000 usec"}, + { IGB_DMAC_3000, "DMAC 3000 usec"}, + { IGB_DMAC_4000, "DMAC 4000 usec"}, + { IGB_DMAC_5000, "DMAC 5000 usec"}, + { IGB_DMAC_6000, "DMAC 6000 usec"}, + { IGB_DMAC_7000, "DMAC 7000 usec"}, + { IGB_DMAC_8000, "DMAC 8000 usec"}, + { IGB_DMAC_9000, "DMAC 9000 usec"}, + { IGB_DMAC_MAX, "DMAC 10000 usec"} + }; + struct igb_option opt = { + .type = list_option, + .name = "DMA Coalescing", + .err = "using default of "__MODULE_STRING(IGB_DMAC_DISABLE), + .def = IGB_DMAC_DISABLE, + .arg = { .l = { .nr = 13, + .p = list + } + } + }; +#ifdef module_param_array + if (num_DMAC > bd) { +#endif + unsigned int dmac = DMAC[bd]; + if (adapter->rx_itr_setting == IGB_DMAC_DISABLE) + dmac = IGB_DMAC_DISABLE; + igb_validate_option(&dmac, &opt, adapter); + switch (dmac) { + case IGB_DMAC_DISABLE: + adapter->dmac = dmac; + break; + case IGB_DMAC_MIN: + adapter->dmac = dmac; + break; + case IGB_DMAC_500: + adapter->dmac = dmac; + break; + case IGB_DMAC_EN_DEFAULT: + adapter->dmac = dmac; + break; + case IGB_DMAC_2000: + adapter->dmac = dmac; + break; + case IGB_DMAC_3000: + adapter->dmac = dmac; + break; + case IGB_DMAC_4000: + adapter->dmac = dmac; + break; + case IGB_DMAC_5000: + adapter->dmac = dmac; + break; + case IGB_DMAC_6000: + adapter->dmac = dmac; + break; + case IGB_DMAC_7000: + adapter->dmac = dmac; + break; + case IGB_DMAC_8000: + adapter->dmac = dmac; + break; + case IGB_DMAC_9000: + adapter->dmac = dmac; + break; + case IGB_DMAC_MAX: + adapter->dmac = dmac; + break; + default: + adapter->dmac = opt.def; + DPRINTK(PROBE, INFO, + "Invalid DMAC setting, " + "resetting DMAC to %d\n", opt.def); + } +#ifdef module_param_array + } else + adapter->dmac = opt.def; +#endif + } + } +#ifndef IGB_NO_LRO + { /* LRO - Enable Large Receive Offload */ + struct igb_option opt = { + .type = enable_option, + .name = "LRO - Large Receive Offload", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + struct net_device *netdev = adapter->netdev; +#ifdef module_param_array + if (num_LRO > bd) { +#endif + unsigned int lro = LRO[bd]; + igb_validate_option(&lro, &opt, adapter); + netdev->features |= lro ? NETIF_F_LRO : 0; +#ifdef module_param_array + } else if (opt.def == OPTION_ENABLED) { + netdev->features |= NETIF_F_LRO; + } +#endif + } +#endif /* IGB_NO_LRO */ + { /* Node assignment */ + static struct igb_option opt = { + .type = range_option, + .name = "Node to start on", + .err = "defaulting to -1", +#ifdef HAVE_EARLY_VMALLOC_NODE + .def = 0, +#else + .def = -1, +#endif + .arg = { .r = { .min = 0, + .max = (MAX_NUMNODES - 1)}} + }; + int node_param = opt.def; + + /* if the default was zero then we need to set the + * default value to an online node, which is not + * necessarily zero, and the constant initializer + * above can't take first_online_node */ + if (node_param == 0) + /* must set opt.def for validate */ + opt.def = node_param = first_online_node; + +#ifdef module_param_array + if (num_Node > bd) { +#endif + node_param = Node[bd]; + igb_validate_option((uint *)&node_param, &opt, adapter); + + if (node_param != OPTION_UNSET) { + DPRINTK(PROBE, INFO, "node set to %d\n", node_param); + } +#ifdef module_param_array + } +#endif + + /* check sanity of the value */ + if (node_param != -1 && !node_online(node_param)) { + DPRINTK(PROBE, INFO, + "ignoring node set to invalid value %d\n", + node_param); + node_param = opt.def; + } + + adapter->node = node_param; + } + { /* MDD - Enable Malicious Driver Detection. Only available when + SR-IOV is enabled. */ + struct igb_option opt = { + .type = enable_option, + .name = "Malicious Driver Detection", + .err = "defaulting to 1", + .def = OPTION_ENABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = OPTION_ENABLED } } + }; + +#ifdef module_param_array + if (num_MDD > bd) { +#endif + adapter->mdd = MDD[bd]; + igb_validate_option((uint *)&adapter->mdd, &opt, + adapter); +#ifdef module_param_array + } else { + adapter->mdd = opt.def; + } +#endif + } +} + diff --git a/drivers/net/igb/igb_procfs.c b/drivers/net/igb/igb_procfs.c new file mode 100644 index 000000000000..c6e45178624e --- /dev/null +++ b/drivers/net/igb/igb_procfs.c @@ -0,0 +1,951 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "igb.h" +#include "e1000_82575.h" +#include "e1000_hw.h" + +#ifdef IGB_PROCFS +#ifndef IGB_SYSFS + +#include +#include +#include +#include +#include + +static struct proc_dir_entry *igb_top_dir = NULL; + +static struct net_device_stats *procfs_get_stats(struct net_device *netdev) +{ +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + struct igb_adapter *adapter; +#endif + if (netdev == NULL) + return NULL; + +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + /* only return the current stats */ + return &netdev->stats; +#else + adapter = netdev_priv(netdev); + + /* only return the current stats */ + return &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ +} + +bool igb_thermal_present(struct igb_adapter *adapter) +{ + s32 status; + struct e1000_hw *hw; + + if (adapter == NULL) + return false; + hw = &adapter->hw; + + /* + * Only set I2C bit-bang mode if an external thermal sensor is + * supported on this device. + */ + if (adapter->ets) { + status = e1000_set_i2c_bb(hw); + if (status != E1000_SUCCESS) + return false; + } + + status = hw->mac.ops.init_thermal_sensor_thresh(hw); + if (status != E1000_SUCCESS) + return false; + + return true; +} + +static int igb_fwbanner(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d.%d-%d\n", + (adapter->fw_version & 0xF000) >> 12, + (adapter->fw_version & 0x0FF0) >> 4, + adapter->fw_version & 0x000F); +} + +static int igb_numeports(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct e1000_hw *hw; + int ports; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + ports = 4; + + return snprintf(page, count, "%d\n", ports); +} + +static int igb_porttype(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", + test_bit(__IGB_DOWN, &adapter->state)); +} + +static int igb_portspeed(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + int speed = 0; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + switch (adapter->link_speed) { + case E1000_STATUS_SPEED_10: + speed = 10; + break; + case E1000_STATUS_SPEED_100: + speed = 100; + break; + case E1000_STATUS_SPEED_1000: + speed = 1000; + break; + } + return snprintf(page, count, "%d\n", speed); +} + +static int igb_wqlflag(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->wol); +} + +static int igb_xflowctl(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", hw->fc.current_mode); +} + +static int igb_rxdrops(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->rx_dropped); +} + +static int igb_rxerrors(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", net_stats->rx_errors); +} + +static int igb_rxupacks(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", E1000_READ_REG(hw, E1000_TPR)); +} + +static int igb_rxmpacks(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", + E1000_READ_REG(hw, E1000_MPRC)); +} + +static int igb_rxbpacks(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", + E1000_READ_REG(hw, E1000_BPRC)); +} + +static int igb_txupacks(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", E1000_READ_REG(hw, E1000_TPT)); +} + +static int igb_txmpacks(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", + E1000_READ_REG(hw, E1000_MPTC)); +} + +static int igb_txbpacks(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", + E1000_READ_REG(hw, E1000_BPTC)); + +} + +static int igb_txerrors(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_errors); +} + +static int igb_txdrops(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_dropped); +} + +static int igb_rxframes(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->rx_packets); +} + +static int igb_rxbytes(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->rx_bytes); +} + +static int igb_txframes(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_packets); +} + +static int igb_txbytes(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_bytes); +} + +static int igb_linkstat(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + int bitmask = 0; + struct e1000_hw *hw; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + if (test_bit(__IGB_DOWN, &adapter->state)) + bitmask |= 1; + + if (igb_has_link(adapter)) + bitmask |= 2; + return snprintf(page, count, "0x%X\n", bitmask); +} + +static int igb_funcid(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device* netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + + return snprintf(page, count, "0x%lX\n", netdev->base_addr); +} + +static int igb_funcvers(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device* netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + + return snprintf(page, count, "%s\n", igb_driver_version); +} + +static int igb_macburn(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "0x%X%X%X%X%X%X\n", + (unsigned int)hw->mac.perm_addr[0], + (unsigned int)hw->mac.perm_addr[1], + (unsigned int)hw->mac.perm_addr[2], + (unsigned int)hw->mac.perm_addr[3], + (unsigned int)hw->mac.perm_addr[4], + (unsigned int)hw->mac.perm_addr[5]); +} + +static int igb_macadmn(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "0x%X%X%X%X%X%X\n", + (unsigned int)hw->mac.addr[0], + (unsigned int)hw->mac.addr[1], + (unsigned int)hw->mac.addr[2], + (unsigned int)hw->mac.addr[3], + (unsigned int)hw->mac.addr[4], + (unsigned int)hw->mac.addr[5]); +} + +static int igb_maclla1(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct e1000_hw *hw; + u16 eeprom_buff[6]; + int first_word = 0x37; + int word_count = 6; + int rc; + + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + rc = e1000_read_nvm(hw, first_word, word_count, + eeprom_buff); + if (rc != E1000_SUCCESS) + return 0; + + switch (hw->bus.func) { + case 0: + return snprintf(page, count, "0x%04X%04X%04X\n", + eeprom_buff[0], + eeprom_buff[1], + eeprom_buff[2]); + case 1: + return snprintf(page, count, "0x%04X%04X%04X\n", + eeprom_buff[3], + eeprom_buff[4], + eeprom_buff[5]); + } + return snprintf(page, count, "unexpected port %d\n", hw->bus.func); +} + +static int igb_mtusize(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device* netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + + return snprintf(page, count, "%d\n", netdev->mtu); +} + +static int igb_featflag(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + int bitmask = 0; +#ifndef HAVE_NDO_SET_FEATURES + struct igb_ring *ring; +#endif + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device *netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + +#ifndef HAVE_NDO_SET_FEATURES + /* igb_get_rx_csum(netdev) doesn't compile so hard code */ + ring = adapter->rx_ring[0]; + bitmask = test_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags); + return snprintf(page, count, "%d\n", bitmask); +#else + if (netdev->features & NETIF_F_RXCSUM) + bitmask |= 1; + return snprintf(page, count, "%d\n", bitmask); +#endif +} + +static int igb_lsominct(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + return snprintf(page, count, "%d\n", 1); +} + +static int igb_prommode(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device *netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + + return snprintf(page, count, "%d\n", + netdev->flags & IFF_PROMISC); +} + +static int igb_txdscqsz(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->tx_ring[0]->count); +} + +static int igb_rxdscqsz(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->rx_ring[0]->count); +} + +static int igb_rxqavg(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + int index; + int totaldiff = 0; + u16 ntc; + u16 ntu; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + if (adapter->num_rx_queues <= 0) + return snprintf(page, count, + "can't calculate, number of queues %d\n", + adapter->num_rx_queues); + + for (index = 0; index < adapter->num_rx_queues; index++) { + ntc = adapter->rx_ring[index]->next_to_clean; + ntu = adapter->rx_ring[index]->next_to_use; + + if (ntc >= ntu) + totaldiff += (ntc - ntu); + else + totaldiff += (adapter->rx_ring[index]->count + - ntu + ntc); + } + if (adapter->num_rx_queues <= 0) + return snprintf(page, count, + "can't calculate, number of queues %d\n", + adapter->num_rx_queues); + return snprintf(page, count, "%d\n", totaldiff/adapter->num_rx_queues); +} + +static int igb_txqavg(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + int index; + int totaldiff = 0; + u16 ntc; + u16 ntu; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + if (adapter->num_tx_queues <= 0) + return snprintf(page, count, + "can't calculate, number of queues %d\n", + adapter->num_tx_queues); + + for (index = 0; index < adapter->num_tx_queues; index++) { + ntc = adapter->tx_ring[index]->next_to_clean; + ntu = adapter->tx_ring[index]->next_to_use; + + if (ntc >= ntu) + totaldiff += (ntc - ntu); + else + totaldiff += (adapter->tx_ring[index]->count + - ntu + ntc); + } + if (adapter->num_tx_queues <= 0) + return snprintf(page, count, + "can't calculate, number of queues %d\n", + adapter->num_tx_queues); + return snprintf(page, count, "%d\n", + totaldiff/adapter->num_tx_queues); +} + +static int igb_iovotype(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + return snprintf(page, count, "2\n"); +} + +static int igb_funcnbr(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->vfs_allocated_count); +} + +static int igb_therm_location(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + struct igb_therm_proc_data *therm_data = + (struct igb_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + return snprintf(page, count, "%d\n", therm_data->sensor_data->location); +} + +static int igb_therm_maxopthresh(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + struct igb_therm_proc_data *therm_data = + (struct igb_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + return snprintf(page, count, "%d\n", + therm_data->sensor_data->max_op_thresh); +} + +static int igb_therm_cautionthresh(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + struct igb_therm_proc_data *therm_data = + (struct igb_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + return snprintf(page, count, "%d\n", + therm_data->sensor_data->caution_thresh); +} + +static int igb_therm_temp(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + s32 status; + struct igb_therm_proc_data *therm_data = + (struct igb_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + status = e1000_get_thermal_sensor_data(therm_data->hw); + if (status != E1000_SUCCESS) + snprintf(page, count, "error: status %d returned\n", status); + + return snprintf(page, count, "%d\n", therm_data->sensor_data->temp); +} + +struct igb_proc_type{ + char name[32]; + int (*read)(char*, char**, off_t, int, int*, void*); +}; + +struct igb_proc_type igb_proc_entries[] = { + {"fwbanner", &igb_fwbanner}, + {"numeports", &igb_numeports}, + {"porttype", &igb_porttype}, + {"portspeed", &igb_portspeed}, + {"wqlflag", &igb_wqlflag}, + {"xflowctl", &igb_xflowctl}, + {"rxdrops", &igb_rxdrops}, + {"rxerrors", &igb_rxerrors}, + {"rxupacks", &igb_rxupacks}, + {"rxmpacks", &igb_rxmpacks}, + {"rxbpacks", &igb_rxbpacks}, + {"txdrops", &igb_txdrops}, + {"txerrors", &igb_txerrors}, + {"txupacks", &igb_txupacks}, + {"txmpacks", &igb_txmpacks}, + {"txbpacks", &igb_txbpacks}, + {"rxframes", &igb_rxframes}, + {"rxbytes", &igb_rxbytes}, + {"txframes", &igb_txframes}, + {"txbytes", &igb_txbytes}, + {"linkstat", &igb_linkstat}, + {"funcid", &igb_funcid}, + {"funcvers", &igb_funcvers}, + {"macburn", &igb_macburn}, + {"macadmn", &igb_macadmn}, + {"maclla1", &igb_maclla1}, + {"mtusize", &igb_mtusize}, + {"featflag", &igb_featflag}, + {"lsominct", &igb_lsominct}, + {"prommode", &igb_prommode}, + {"txdscqsz", &igb_txdscqsz}, + {"rxdscqsz", &igb_rxdscqsz}, + {"txqavg", &igb_txqavg}, + {"rxqavg", &igb_rxqavg}, + {"iovotype", &igb_iovotype}, + {"funcnbr", &igb_funcnbr}, + {"", NULL} +}; + +struct igb_proc_type igb_internal_entries[] = { + {"location", &igb_therm_location}, + {"temp", &igb_therm_temp}, + {"cautionthresh", &igb_therm_cautionthresh}, + {"maxopthresh", &igb_therm_maxopthresh}, + {"", NULL} +}; + +void igb_del_proc_entries(struct igb_adapter *adapter) +{ + int index, i; + char buf[16]; /* much larger than the sensor number will ever be */ + + if (igb_top_dir == NULL) + return; + + for (i = 0; i < E1000_MAX_SENSORS; i++) { + if (adapter->therm_dir[i] == NULL) + continue; + + for (index = 0; ; index++) { + if (igb_internal_entries[index].read == NULL) + break; + + remove_proc_entry(igb_internal_entries[index].name, + adapter->therm_dir[i]); + } + snprintf(buf, sizeof(buf), "sensor_%d", i); + remove_proc_entry(buf, adapter->info_dir); + } + + if (adapter->info_dir != NULL) { + for (index = 0; ; index++) { + if (igb_proc_entries[index].read == NULL) + break; + remove_proc_entry(igb_proc_entries[index].name, + adapter->info_dir); + } + remove_proc_entry("info", adapter->eth_dir); + } + + if (adapter->eth_dir != NULL) + remove_proc_entry(pci_name(adapter->pdev), igb_top_dir); +} + +/* called from igb_main.c */ +void igb_procfs_exit(struct igb_adapter *adapter) +{ + igb_del_proc_entries(adapter); +} + +int igb_procfs_topdir_init(void) +{ + igb_top_dir = proc_mkdir("driver/igb", NULL); + if (igb_top_dir == NULL) + return (-ENOMEM); + + return 0; +} + +void igb_procfs_topdir_exit(void) +{ +// remove_proc_entry("driver", proc_root_driver); + remove_proc_entry("driver/igb", NULL); +} + +/* called from igb_main.c */ +int igb_procfs_init(struct igb_adapter *adapter) +{ + int rc = 0; + int i; + int index; + char buf[16]; /* much larger than the sensor number will ever be */ + + adapter->eth_dir = NULL; + adapter->info_dir = NULL; + for (i = 0; i < E1000_MAX_SENSORS; i++) + adapter->therm_dir[i] = NULL; + + if ( igb_top_dir == NULL ) { + rc = -ENOMEM; + goto fail; + } + + adapter->eth_dir = proc_mkdir(pci_name(adapter->pdev), igb_top_dir); + if (adapter->eth_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + + adapter->info_dir = proc_mkdir("info", adapter->eth_dir); + if (adapter->info_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + for (index = 0; ; index++) { + if (igb_proc_entries[index].read == NULL) { + break; + } + if (!(create_proc_read_entry(igb_proc_entries[index].name, + 0444, + adapter->info_dir, + igb_proc_entries[index].read, + adapter))) { + + rc = -ENOMEM; + goto fail; + } + } + if (igb_thermal_present(adapter) == false) + goto exit; + + for (i = 0; i < E1000_MAX_SENSORS; i++) { + + if (adapter->hw.mac.thermal_sensor_data.sensor[i].location== 0) + continue; + + snprintf(buf, sizeof(buf), "sensor_%d", i); + adapter->therm_dir[i] = proc_mkdir(buf, adapter->info_dir); + if (adapter->therm_dir[i] == NULL) { + rc = -ENOMEM; + goto fail; + } + for (index = 0; ; index++) { + if (igb_internal_entries[index].read == NULL) + break; + /* + * therm_data struct contains pointer the read func + * will be needing + */ + adapter->therm_data[i].hw = &adapter->hw; + adapter->therm_data[i].sensor_data = + &adapter->hw.mac.thermal_sensor_data.sensor[i]; + + if (!(create_proc_read_entry( + igb_internal_entries[index].name, + 0444, + adapter->therm_dir[i], + igb_internal_entries[index].read, + &adapter->therm_data[i]))) { + rc = -ENOMEM; + goto fail; + } + } + } + goto exit; + +fail: + igb_del_proc_entries(adapter); +exit: + return rc; +} + +#endif /* !IGB_SYSFS */ +#endif /* IGB_PROCFS */ diff --git a/drivers/net/igb/igb_regtest.h b/drivers/net/igb/igb_regtest.h new file mode 100644 index 000000000000..8b886e3807e6 --- /dev/null +++ b/drivers/net/igb/igb_regtest.h @@ -0,0 +1,221 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ethtool register test data */ +struct igb_reg_test { + u16 reg; + u16 reg_offset; + u16 array_len; + u16 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x100 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +/* i350 reg test */ +static struct igb_reg_test reg_test_i350[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + /* VET is readonly on i350 */ + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* RDH is read-only for i350, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, + 0xC3FFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 16, TABLE64_TEST_HI, + 0xC3FFFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +/* 82580 reg test */ +static struct igb_reg_test reg_test_82580[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + /* RDH is read-only for 82580, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, + 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_HI, + 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +/* 82576 reg test */ +static struct igb_reg_test reg_test_82576[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + /* Enable all queues before testing. */ + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, + { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, + /* RDH is read-only for 82576, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, + { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, + 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_HI, + 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +/* 82575 register test */ +static struct igb_reg_test reg_test_82575[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* Enable all four RX queues before testing. */ + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, + /* RDH is read-only for 82575, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, + 0x800FFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + + diff --git a/drivers/net/igb/igb_sysfs.c b/drivers/net/igb/igb_sysfs.c new file mode 100644 index 000000000000..e148a1f34e4b --- /dev/null +++ b/drivers/net/igb/igb_sysfs.c @@ -0,0 +1,1054 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "igb.h" +#include "e1000_82575.h" +#include "e1000_hw.h" +#ifdef IGB_SYSFS +#include +#include +#include +#include +#include +#include + +static struct net_device_stats *sysfs_get_stats(struct net_device *netdev) +{ +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + struct igb_adapter *adapter; +#endif + if (netdev == NULL) + return NULL; + +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + /* only return the current stats */ + return &netdev->stats; +#else + adapter = netdev_priv(netdev); + + /* only return the current stats */ + return &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ +} + +struct net_device *igb_get_netdev(struct kobject *kobj) +{ + struct net_device *netdev; + struct kobject *parent = kobj->parent; + struct device *device_info_kobj; + + if (kobj == NULL) + return NULL; + + device_info_kobj = container_of(parent, struct device, kobj); + if (device_info_kobj == NULL) + return NULL; + + netdev = container_of(device_info_kobj, struct net_device, dev); + return netdev; +} +struct igb_adapter *igb_get_adapter(struct kobject *kobj) +{ + struct igb_adapter *adapter; + struct net_device *netdev = igb_get_netdev(kobj); + if (netdev == NULL) + return NULL; + adapter = netdev_priv(netdev); + return adapter; +} + +bool igb_thermal_present(struct kobject *kobj) +{ + s32 status; + struct igb_adapter *adapter = igb_get_adapter(kobj); + + if (adapter == NULL) + return false; + + /* + * Only set I2C bit-bang mode if an external thermal sensor is + * supported on this device. + */ + if (adapter->ets) { + status = e1000_set_i2c_bb(&(adapter->hw)); + if (status != E1000_SUCCESS) + return false; + } + + status = e1000_init_thermal_sensor_thresh(&(adapter->hw)); + if (status != E1000_SUCCESS) + return false; + + return true; +} + +/* + * Convert the directory to the sensor offset. + * + * Note: We know the name will be in the form of 'sensor_n' where 'n' is 0 + * - 'IGB_MAX_SENSORS'. E1000_MAX_SENSORS < 10. + */ +static int igb_name_to_idx(const char *c) { + + /* find first digit */ + while (*c < '0' || *c > '9') { + if (*c == '\n') + return -1; + c++; + } + + return ((int)(*c - '0')); +} + +/* + * We are a statistics entry; we do not take in data-this should be the + * same for all attributes + */ +static ssize_t igb_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + return -1; +} + +static ssize_t igb_fwbanner(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct igb_adapter *adapter = igb_get_adapter(kobj); + u16 nvm_ver; + + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + nvm_ver = adapter->fw_version; + + return snprintf(buf, PAGE_SIZE, "0x%08x\n", nvm_ver); +} + +static ssize_t igb_numeports(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct e1000_hw *hw; + int ports = 0; + struct igb_adapter *adapter = igb_get_adapter(kobj); + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(buf, PAGE_SIZE, "error: no hw data\n"); + + /* CMW taking the original out so and assigning ports generally + * by mac type for now. Want to have the daemon handle this some + * other way due to the variability of the 1GB parts. + */ + switch (hw->mac.type) { + case e1000_82575: + ports = 2; + break; + case e1000_82576: + ports = 2; + break; + case e1000_82580: + case e1000_i350: + ports = 4; + break; + default: + break; + } + + return snprintf(buf, PAGE_SIZE, "%d\n", ports); +} + +static ssize_t igb_porttype(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct igb_adapter *adapter = igb_get_adapter(kobj); + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + + return snprintf(buf, PAGE_SIZE, "%d\n", + test_bit(__IGB_DOWN, &adapter->state)); +} + +static ssize_t igb_portspeed(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct igb_adapter *adapter = igb_get_adapter(kobj); + int speed = 0; + + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + + switch (adapter->link_speed) { + case E1000_STATUS_SPEED_10: + speed = 10; + break;; + case E1000_STATUS_SPEED_100: + speed = 100; + break; + case E1000_STATUS_SPEED_1000: + speed = 1000; + break; + } + return snprintf(buf, PAGE_SIZE, "%d\n", speed); +} + +static ssize_t igb_wqlflag(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct igb_adapter *adapter = igb_get_adapter(kobj); + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + + return snprintf(buf, PAGE_SIZE, "%d\n", adapter->wol); +} + +static ssize_t igb_xflowctl(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = igb_get_adapter(kobj); + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(buf, PAGE_SIZE, "error: no hw data\n"); + + return snprintf(buf, PAGE_SIZE, "%d\n", hw->fc.current_mode); +} + +static ssize_t igb_rxdrops(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct net_device_stats *net_stats; + struct net_device *netdev = igb_get_netdev(kobj); + if (netdev == NULL) + return snprintf(buf, PAGE_SIZE, "error: no net device\n"); + + net_stats = sysfs_get_stats(netdev); + if (net_stats == NULL) + return snprintf(buf, PAGE_SIZE, "error: no net stats\n"); + + return snprintf(buf, PAGE_SIZE, "%lu\n", + net_stats->rx_dropped); +} + +static ssize_t igb_rxerrors(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct net_device_stats *net_stats; + struct net_device *netdev = igb_get_netdev(kobj); + if (netdev == NULL) + return snprintf(buf, PAGE_SIZE, "error: no net device\n"); + + net_stats = sysfs_get_stats(netdev); + if (net_stats == NULL) + return snprintf(buf, PAGE_SIZE, "error: no net stats\n"); + return snprintf(buf, PAGE_SIZE, "%lu\n", net_stats->rx_errors); +} + +static ssize_t igb_rxupacks(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = igb_get_adapter(kobj); + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(buf, PAGE_SIZE, "error: no hw data\n"); + + return snprintf(buf, PAGE_SIZE, "%d\n", E1000_READ_REG(hw, E1000_TPR)); +} + +static ssize_t igb_rxmpacks(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = igb_get_adapter(kobj); + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(buf, PAGE_SIZE, "error: no hw data\n"); + + return snprintf(buf, PAGE_SIZE, "%d\n", E1000_READ_REG(hw, E1000_MPRC)); +} + +static ssize_t igb_rxbpacks(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = igb_get_adapter(kobj); + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(buf, PAGE_SIZE, "error: no hw data\n"); + + return snprintf(buf, PAGE_SIZE, "%d\n", E1000_READ_REG(hw, E1000_BPRC)); +} + +static ssize_t igb_txupacks(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = igb_get_adapter(kobj); + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(buf, PAGE_SIZE, "error: no hw data\n"); + + return snprintf(buf, PAGE_SIZE, "%d\n", E1000_READ_REG(hw, E1000_TPT)); +} + +static ssize_t igb_txmpacks(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = igb_get_adapter(kobj); + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(buf, PAGE_SIZE, "error: no hw data\n"); + + return snprintf(buf, PAGE_SIZE, "%d\n", E1000_READ_REG(hw, E1000_MPTC)); +} + +static ssize_t igb_txbpacks(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = igb_get_adapter(kobj); + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(buf, PAGE_SIZE, "error: no hw data\n"); + + return snprintf(buf, PAGE_SIZE, "%d\n", E1000_READ_REG(hw, E1000_BPTC)); + +} + +static ssize_t igb_txerrors(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct net_device_stats *net_stats; + struct net_device *netdev = igb_get_netdev(kobj); + if (netdev == NULL) + return snprintf(buf, PAGE_SIZE, "error: no net device\n"); + + net_stats = sysfs_get_stats(netdev); + if (net_stats == NULL) + return snprintf(buf, PAGE_SIZE, "error: no net stats\n"); + + return snprintf(buf, PAGE_SIZE, "%lu\n", + net_stats->tx_errors); +} + +static ssize_t igb_txdrops(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct net_device_stats *net_stats; + struct net_device *netdev = igb_get_netdev(kobj); + if (netdev == NULL) + return snprintf(buf, PAGE_SIZE, "error: no net device\n"); + + net_stats = sysfs_get_stats(netdev); + if (net_stats == NULL) + return snprintf(buf, PAGE_SIZE, "error: no net stats\n"); + return snprintf(buf, PAGE_SIZE, "%lu\n", + net_stats->tx_dropped); +} + +static ssize_t igb_rxframes(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct net_device_stats *net_stats; + struct net_device *netdev = igb_get_netdev(kobj); + if (netdev == NULL) + return snprintf(buf, PAGE_SIZE, "error: no net device\n"); + + net_stats = sysfs_get_stats(netdev); + if (net_stats == NULL) + return snprintf(buf, PAGE_SIZE, "error: no net stats\n"); + + return snprintf(buf, PAGE_SIZE, "%lu\n", + net_stats->rx_packets); +} + +static ssize_t igb_rxbytes(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct net_device_stats *net_stats; + struct net_device *netdev = igb_get_netdev(kobj); + if (netdev == NULL) + return snprintf(buf, PAGE_SIZE, "error: no net device\n"); + + net_stats = sysfs_get_stats(netdev); + if (net_stats == NULL) + return snprintf(buf, PAGE_SIZE, "error: no net stats\n"); + + return snprintf(buf, PAGE_SIZE, "%lu\n", + net_stats->rx_bytes); +} + +static ssize_t igb_txframes(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct net_device_stats *net_stats; + struct net_device *netdev = igb_get_netdev(kobj); + if (netdev == NULL) + return snprintf(buf, PAGE_SIZE, "error: no net device\n"); + + net_stats = sysfs_get_stats(netdev); + if (net_stats == NULL) + return snprintf(buf, PAGE_SIZE, "error: no net stats\n"); + + return snprintf(buf, PAGE_SIZE, "%lu\n", + net_stats->tx_packets); +} + +static ssize_t igb_txbytes(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct net_device_stats *net_stats; + struct net_device *netdev = igb_get_netdev(kobj); + if (netdev == NULL) + return snprintf(buf, PAGE_SIZE, "error: no net device\n"); + + net_stats = sysfs_get_stats(netdev); + if (net_stats == NULL) + return snprintf(buf, PAGE_SIZE, "error: no net stats\n"); + + return snprintf(buf, PAGE_SIZE, "%lu\n", + net_stats->tx_bytes); +} + +static ssize_t igb_linkstat(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + bool link_up = false; + int bitmask = 0; + struct e1000_hw *hw; + struct igb_adapter *adapter = igb_get_adapter(kobj); + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(buf, PAGE_SIZE, "error: no hw data\n"); + + if (test_bit(__IGB_DOWN, &adapter->state)) + bitmask |= 1; + + if (hw->mac.ops.check_for_link) { + hw->mac.ops.check_for_link(hw); + } + else { + /* always assume link is up, if no check link function */ + link_up = true; + } + if (link_up) + bitmask |= 2; + return snprintf(buf, PAGE_SIZE, "0x%X\n", bitmask); +} + +static ssize_t igb_funcid(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct net_device *netdev = igb_get_netdev(kobj); + if (netdev == NULL) + return snprintf(buf, PAGE_SIZE, "error: no net device\n"); + + return snprintf(buf, PAGE_SIZE, "0x%lX\n", netdev->base_addr); +} + +static ssize_t igb_funcvers(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", igb_driver_version); +} + +static ssize_t igb_macburn(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = igb_get_adapter(kobj); + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(buf, PAGE_SIZE, "error: no hw data\n"); + + return snprintf(buf, PAGE_SIZE, "0x%X%X%X%X%X%X\n", + (unsigned int)hw->mac.perm_addr[0], + (unsigned int)hw->mac.perm_addr[1], + (unsigned int)hw->mac.perm_addr[2], + (unsigned int)hw->mac.perm_addr[3], + (unsigned int)hw->mac.perm_addr[4], + (unsigned int)hw->mac.perm_addr[5]); +} + +static ssize_t igb_macadmn(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct igb_adapter *adapter = igb_get_adapter(kobj); + struct e1000_hw *hw; + + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(buf, PAGE_SIZE, "error: no hw data\n"); + + return snprintf(buf, PAGE_SIZE, "0x%X%X%X%X%X%X\n", + (unsigned int)hw->mac.addr[0], + (unsigned int)hw->mac.addr[1], + (unsigned int)hw->mac.addr[2], + (unsigned int)hw->mac.addr[3], + (unsigned int)hw->mac.addr[4], + (unsigned int)hw->mac.addr[5]); +} + +static ssize_t igb_maclla1(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct e1000_hw *hw; + u16 eeprom_buff[6]; + int first_word = 0x37; + int word_count = 6; + int rc; + + struct igb_adapter *adapter = igb_get_adapter(kobj); + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(buf, PAGE_SIZE, "error: no hw data\n"); + + return 0; + + rc = e1000_read_nvm(hw, first_word, word_count, + eeprom_buff); + if (rc != E1000_SUCCESS) + return 0; + + switch (hw->bus.func) { + case 0: + return snprintf(buf, PAGE_SIZE, "0x%04X%04X%04X\n", + eeprom_buff[0], eeprom_buff[1], eeprom_buff[2]); + case 1: + return snprintf(buf, PAGE_SIZE, "0x%04X%04X%04X\n", + eeprom_buff[3], eeprom_buff[4], eeprom_buff[5]); + } + return snprintf(buf, PAGE_SIZE, "unexpected port %d\n", hw->bus.func); +} + +static ssize_t igb_mtusize(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct igb_adapter *adapter = igb_get_adapter(kobj); + struct net_device *netdev = igb_get_netdev(kobj); + if (netdev == NULL) + return snprintf(buf, PAGE_SIZE, "error: no net device\n"); + + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + + return snprintf(buf, PAGE_SIZE, "%d\n", netdev->mtu); +} + +static ssize_t igb_featflag(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ +#ifdef HAVE_NDO_SET_FEATURES + int bitmask = 0; +#endif + struct net_device *netdev = igb_get_netdev(kobj); + struct igb_adapter *adapter = igb_get_adapter(kobj); + + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + if (netdev == NULL) + return snprintf(buf, PAGE_SIZE, "error: no net device\n"); + +#ifndef HAVE_NDO_SET_FEATURES + /* igb_get_rx_csum(netdev) doesn't compile so hard code */ + return snprintf(buf, PAGE_SIZE, "%d\n", + test_bit(IGB_RING_FLAG_RX_CSUM, + &adapter->rx_ring[0]->flags)); +#else + if (netdev->features & NETIF_F_RXCSUM) + bitmask |= 1; + return snprintf(buf, PAGE_SIZE, "%d\n", bitmask); +#endif +} + +static ssize_t igb_lsominct(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", 1); +} + +static ssize_t igb_prommode(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct net_device *netdev = igb_get_netdev(kobj); + if (netdev == NULL) + return snprintf(buf, PAGE_SIZE, "error: no net device\n"); + + return snprintf(buf, PAGE_SIZE, "%d\n", + netdev->flags & IFF_PROMISC); +} + +static ssize_t igb_txdscqsz(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct igb_adapter *adapter = igb_get_adapter(kobj); + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + + return snprintf(buf, PAGE_SIZE, "%d\n", adapter->tx_ring[0]->count); +} + +static ssize_t igb_rxdscqsz(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct igb_adapter *adapter = igb_get_adapter(kobj); + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + + return snprintf(buf, PAGE_SIZE, "%d\n", adapter->rx_ring[0]->count); +} + +static ssize_t igb_rxqavg(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + int index; + int diff = 0; + u16 ntc; + u16 ntu; + struct igb_adapter *adapter = igb_get_adapter(kobj); + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + + for (index = 0; index < adapter->num_rx_queues; index++) { + ntc = adapter->rx_ring[index]->next_to_clean; + ntu = adapter->rx_ring[index]->next_to_use; + + if (ntc >= ntu) + diff += (ntc - ntu); + else + diff += (adapter->rx_ring[index]->count - ntu + ntc); + } + if (adapter->num_rx_queues <= 0) + return snprintf(buf, PAGE_SIZE, + "can't calculate, number of queues %d\n", + adapter->num_rx_queues); + return snprintf(buf, PAGE_SIZE, "%d\n", diff/adapter->num_rx_queues); +} + +static ssize_t igb_txqavg(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + int index; + int diff = 0; + u16 ntc; + u16 ntu; + struct igb_adapter *adapter = igb_get_adapter(kobj); + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + + for (index = 0; index < adapter->num_tx_queues; index++) { + ntc = adapter->tx_ring[index]->next_to_clean; + ntu = adapter->tx_ring[index]->next_to_use; + + if (ntc >= ntu) + diff += (ntc - ntu); + else + diff += (adapter->tx_ring[index]->count - ntu + ntc); + } + if (adapter->num_tx_queues <= 0) + return snprintf(buf, PAGE_SIZE, + "can't calculate, number of queues %d\n", + adapter->num_tx_queues); + return snprintf(buf, PAGE_SIZE, "%d\n", + diff/adapter->num_tx_queues); +} + +static ssize_t igb_iovotype(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "2\n"); +} + +static ssize_t igb_funcnbr(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct igb_adapter *adapter = igb_get_adapter(kobj); + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + + return snprintf(buf, PAGE_SIZE, "%d\n", adapter->vfs_allocated_count); +} + +s32 igb_sysfs_get_thermal_data(struct kobject *kobj, char *buf) +{ + s32 status; + struct igb_adapter *adapter = igb_get_adapter(kobj->parent); + + if (adapter == NULL) { + snprintf(buf, PAGE_SIZE, "error: missing adapter\n"); + return 0; + } + + if (&adapter->hw == NULL) { + snprintf(buf, PAGE_SIZE, "error: missing hw\n"); + return 0; + } + + status = e1000_get_thermal_sensor_data_generic(&(adapter->hw)); + if (status != E1000_SUCCESS) + snprintf(buf, PAGE_SIZE, "error: status %d returned\n", status); + + return status; +} + +static ssize_t igb_sysfs_location(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct igb_adapter *adapter = igb_get_adapter(kobj->parent); + int idx; + + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + + idx = igb_name_to_idx(kobj->name); + if (idx == -1) + return snprintf(buf, PAGE_SIZE, + "error: invalid sensor name %s\n", kobj->name); + + return snprintf(buf, PAGE_SIZE, "%d\n", + adapter->hw.mac.thermal_sensor_data.sensor[idx].location); +} + +static ssize_t igb_sysfs_temp(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct igb_adapter *adapter = igb_get_adapter(kobj->parent); + int idx; + + s32 status = igb_sysfs_get_thermal_data(kobj, buf); + + if (status != E1000_SUCCESS) + return snprintf(buf, PAGE_SIZE, "error: status %d returned", + status); + + idx = igb_name_to_idx(kobj->name); + if (idx == -1) + return snprintf(buf, PAGE_SIZE, + "error: invalid sensor name %s\n", kobj->name); + + return snprintf(buf, PAGE_SIZE, "%d\n", + adapter->hw.mac.thermal_sensor_data.sensor[idx].temp); +} + +static ssize_t igb_sysfs_maxopthresh(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct igb_adapter *adapter = igb_get_adapter(kobj->parent); + int idx; + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + + idx = igb_name_to_idx(kobj->name); + if (idx == -1) + return snprintf(buf, PAGE_SIZE, + "error: invalid sensor name %s\n", kobj->name); + + return snprintf(buf, PAGE_SIZE, "%d\n", + adapter->hw.mac.thermal_sensor_data.sensor[idx].max_op_thresh); +} + +static ssize_t igb_sysfs_cautionthresh(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct igb_adapter *adapter = igb_get_adapter(kobj->parent); + int idx; + + if (adapter == NULL) + return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); + + idx = igb_name_to_idx(kobj->name); + if (idx == -1) + return snprintf(buf, PAGE_SIZE, + "error: invalid sensor name %s\n", kobj->name); + + return snprintf(buf, PAGE_SIZE, "%d\n", + adapter->hw.mac.thermal_sensor_data.sensor[0].caution_thresh); +} + +/* Initialize the attributes */ +static struct kobj_attribute igb_sysfs_location_attr = + __ATTR(location, 0444, igb_sysfs_location, igb_store); +static struct kobj_attribute igb_sysfs_temp_attr = + __ATTR(temp, 0444, igb_sysfs_temp, igb_store); +static struct kobj_attribute igb_sysfs_cautionthresh_attr = + __ATTR(cautionthresh, 0444, igb_sysfs_cautionthresh, igb_store); +static struct kobj_attribute igb_sysfs_maxopthresh_attr = + __ATTR(maxopthresh, 0444, igb_sysfs_maxopthresh, igb_store); + +static struct kobj_attribute igb_sysfs_fwbanner_attr = + __ATTR(fwbanner, 0444, igb_fwbanner, igb_store); +static struct kobj_attribute igb_sysfs_numeports_attr = + __ATTR(numeports, 0444, igb_numeports, igb_store); +static struct kobj_attribute igb_sysfs_porttype_attr = + __ATTR(porttype, 0444, igb_porttype, igb_store); +static struct kobj_attribute igb_sysfs_portspeed_attr = + __ATTR(portspeed, 0444, igb_portspeed, igb_store); +static struct kobj_attribute igb_sysfs_wqlflag_attr = + __ATTR(wqlflag, 0444, igb_wqlflag, igb_store); +static struct kobj_attribute igb_sysfs_xflowctl_attr = + __ATTR(xflowctl, 0444, igb_xflowctl, igb_store); +static struct kobj_attribute igb_sysfs_rxdrops_attr = + __ATTR(rxdrops, 0444, igb_rxdrops, igb_store); +static struct kobj_attribute igb_sysfs_rxerrors_attr = + __ATTR(rxerrors, 0444, igb_rxerrors, igb_store); +static struct kobj_attribute igb_sysfs_rxupacks_attr = + __ATTR(rxupacks, 0444, igb_rxupacks, igb_store); +static struct kobj_attribute igb_sysfs_rxmpacks_attr = + __ATTR(rxmpacks, 0444, igb_rxmpacks, igb_store); +static struct kobj_attribute igb_sysfs_rxbpacks_attr = + __ATTR(rxbpacks, 0444, igb_rxbpacks, igb_store); +static struct kobj_attribute igb_sysfs_txupacks_attr = + __ATTR(txupacks, 0444, igb_txupacks, igb_store); +static struct kobj_attribute igb_sysfs_txmpacks_attr = + __ATTR(txmpacks, 0444, igb_txmpacks, igb_store); +static struct kobj_attribute igb_sysfs_txbpacks_attr = + __ATTR(txbpacks, 0444, igb_txbpacks, igb_store); +static struct kobj_attribute igb_sysfs_txerrors_attr = + __ATTR(txerrors, 0444, igb_txerrors, igb_store); +static struct kobj_attribute igb_sysfs_txdrops_attr = + __ATTR(txdrops, 0444, igb_txdrops, igb_store); +static struct kobj_attribute igb_sysfs_rxframes_attr = + __ATTR(rxframes, 0444, igb_rxframes, igb_store); +static struct kobj_attribute igb_sysfs_rxbytes_attr = + __ATTR(rxbytes, 0444, igb_rxbytes, igb_store); +static struct kobj_attribute igb_sysfs_txframes_attr = + __ATTR(txframes, 0444, igb_txframes, igb_store); +static struct kobj_attribute igb_sysfs_txbytes_attr = + __ATTR(txbytes, 0444, igb_txbytes, igb_store); +static struct kobj_attribute igb_sysfs_linkstat_attr = + __ATTR(linkstat, 0444, igb_linkstat, igb_store); +static struct kobj_attribute igb_sysfs_funcid_attr = + __ATTR(funcid, 0444, igb_funcid, igb_store); +static struct kobj_attribute igb_sysfs_funvers_attr = + __ATTR(funcvers, 0444, igb_funcvers, igb_store); +static struct kobj_attribute igb_sysfs_macburn_attr = + __ATTR(macburn, 0444, igb_macburn, igb_store); +static struct kobj_attribute igb_sysfs_macadmn_attr = + __ATTR(macadmn, 0444, igb_macadmn, igb_store); +static struct kobj_attribute igb_sysfs_maclla1_attr = + __ATTR(maclla1, 0444, igb_maclla1, igb_store); +static struct kobj_attribute igb_sysfs_mtusize_attr = + __ATTR(mtusize, 0444, igb_mtusize, igb_store); +static struct kobj_attribute igb_sysfs_featflag_attr = + __ATTR(featflag, 0444, igb_featflag, igb_store); +static struct kobj_attribute igb_sysfs_lsominct_attr = + __ATTR(lsominct, 0444, igb_lsominct, igb_store); +static struct kobj_attribute igb_sysfs_prommode_attr = + __ATTR(prommode, 0444, igb_prommode, igb_store); +static struct kobj_attribute igb_sysfs_txdscqsz_attr = + __ATTR(txdscqsz, 0444, igb_txdscqsz, igb_store); +static struct kobj_attribute igb_sysfs_rxdscqsz_attr = + __ATTR(rxdscqsz, 0444, igb_rxdscqsz, igb_store); +static struct kobj_attribute igb_sysfs_txqavg_attr = + __ATTR(txqavg, 0444, igb_txqavg, igb_store); +static struct kobj_attribute igb_sysfs_rxqavg_attr = + __ATTR(rxqavg, 0444, igb_rxqavg, igb_store); +static struct kobj_attribute igb_sysfs_iovotype_attr = + __ATTR(iovotype, 0444, igb_iovotype, igb_store); +static struct kobj_attribute igb_sysfs_funcnbr_attr = + __ATTR(funcnbr, 0444, igb_funcnbr, igb_store); + +/* Add the attributes into an array, to be added to a group */ +static struct attribute *therm_attrs[] = { + &igb_sysfs_location_attr.attr, + &igb_sysfs_temp_attr.attr, + &igb_sysfs_cautionthresh_attr.attr, + &igb_sysfs_maxopthresh_attr.attr, + NULL +}; + +static struct attribute *attrs[] = { + &igb_sysfs_fwbanner_attr.attr, + &igb_sysfs_numeports_attr.attr, + &igb_sysfs_porttype_attr.attr, + &igb_sysfs_portspeed_attr.attr, + &igb_sysfs_wqlflag_attr.attr, + &igb_sysfs_xflowctl_attr.attr, + &igb_sysfs_rxdrops_attr.attr, + &igb_sysfs_rxerrors_attr.attr, + &igb_sysfs_rxupacks_attr.attr, + &igb_sysfs_rxmpacks_attr.attr, + &igb_sysfs_rxbpacks_attr.attr, + &igb_sysfs_txdrops_attr.attr, + &igb_sysfs_txerrors_attr.attr, + &igb_sysfs_txupacks_attr.attr, + &igb_sysfs_txmpacks_attr.attr, + &igb_sysfs_txbpacks_attr.attr, + &igb_sysfs_rxframes_attr.attr, + &igb_sysfs_rxbytes_attr.attr, + &igb_sysfs_txframes_attr.attr, + &igb_sysfs_txbytes_attr.attr, + &igb_sysfs_linkstat_attr.attr, + &igb_sysfs_funcid_attr.attr, + &igb_sysfs_funvers_attr.attr, + &igb_sysfs_macburn_attr.attr, + &igb_sysfs_macadmn_attr.attr, + &igb_sysfs_maclla1_attr.attr, + &igb_sysfs_mtusize_attr.attr, + &igb_sysfs_featflag_attr.attr, + &igb_sysfs_lsominct_attr.attr, + &igb_sysfs_prommode_attr.attr, + &igb_sysfs_txdscqsz_attr.attr, + &igb_sysfs_rxdscqsz_attr.attr, + &igb_sysfs_txqavg_attr.attr, + &igb_sysfs_rxqavg_attr.attr, + &igb_sysfs_iovotype_attr.attr, + &igb_sysfs_funcnbr_attr.attr, + NULL +}; + +/* add attributes to a group */ +static struct attribute_group therm_attr_group = { + .attrs = therm_attrs, +}; + +/* add attributes to a group */ +static struct attribute_group attr_group = { + .attrs = attrs, +}; + +void igb_del_adapter(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < E1000_MAX_SENSORS; i++) { + if (adapter->therm_kobj[i] == NULL) + continue; + sysfs_remove_group(adapter->therm_kobj[i], &therm_attr_group); + kobject_put(adapter->therm_kobj[i]); + } + if (adapter->info_kobj != NULL) { + sysfs_remove_group(adapter->info_kobj, &attr_group); + kobject_put(adapter->info_kobj); + } +} + +/* cleanup goes here */ +void igb_sysfs_exit(struct igb_adapter *adapter) +{ + igb_del_adapter(adapter); +} + +int igb_sysfs_init(struct igb_adapter *adapter) +{ + struct net_device *netdev; + int rc = 0; + int i; + char buf[16]; + + if ( adapter == NULL ) + goto del_adapter; + netdev = adapter->netdev; + if (netdev == NULL) + goto del_adapter; + + adapter->info_kobj = NULL; + for (i = 0; i < E1000_MAX_SENSORS; i++) + adapter->therm_kobj[i] = NULL; + + /* create stats kobj and attribute listings in kobj */ + adapter->info_kobj = kobject_create_and_add("info", + &(netdev->dev.kobj)); + if (adapter->info_kobj == NULL) + goto del_adapter; + if (sysfs_create_group(adapter->info_kobj, &attr_group)) + goto del_adapter; + + /* Don't create thermal subkobjs if no data present */ + if (igb_thermal_present(adapter->info_kobj) != true) + goto exit; + + for (i = 0; i < E1000_MAX_SENSORS; i++) { + + /* + * Likewise only create individual kobjs that have + * meaningful data. + */ + if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0) + continue; + + /* directory named after sensor offset */ + snprintf(buf, sizeof(buf), "sensor_%d", i); + adapter->therm_kobj[i] = + kobject_create_and_add(buf, adapter->info_kobj); + if (adapter->therm_kobj[i] == NULL) + goto del_adapter; + if (sysfs_create_group(adapter->therm_kobj[i], + &therm_attr_group)) + goto del_adapter; + } + + goto exit; + +del_adapter: + igb_del_adapter(adapter); + rc = -1; +exit: + return rc; +} + +#endif /* IGB_SYSFS */ diff --git a/drivers/net/igb/igb_vmdq.c b/drivers/net/igb/igb_vmdq.c new file mode 100644 index 000000000000..5a1db162f2bd --- /dev/null +++ b/drivers/net/igb/igb_vmdq.c @@ -0,0 +1,437 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +#include + +#include "igb.h" +#include "igb_vmdq.h" +#include + +#ifdef CONFIG_IGB_VMDQ_NETDEV +int igb_vmdq_open(struct net_device *dev) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(dev); + struct igb_adapter *adapter = vadapter->real_adapter; + struct net_device *main_netdev = adapter->netdev; + int hw_queue = vadapter->rx_ring->queue_index + + adapter->vfs_allocated_count; + + if (test_bit(__IGB_DOWN, &adapter->state)) { + DPRINTK(DRV, WARNING, + "Open %s before opening this device.\n", + main_netdev->name); + return -EAGAIN; + } + netif_carrier_off(dev); + vadapter->tx_ring->vmdq_netdev = dev; + vadapter->rx_ring->vmdq_netdev = dev; + if (is_valid_ether_addr(dev->dev_addr)) { + igb_del_mac_filter(adapter, dev->dev_addr, hw_queue); + igb_add_mac_filter(adapter, dev->dev_addr, hw_queue); + } + netif_carrier_on(dev); + return 0; +} + +int igb_vmdq_close(struct net_device *dev) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(dev); + struct igb_adapter *adapter = vadapter->real_adapter; + int hw_queue = vadapter->rx_ring->queue_index + + adapter->vfs_allocated_count; + + netif_carrier_off(dev); + igb_del_mac_filter(adapter, dev->dev_addr, hw_queue); + + vadapter->tx_ring->vmdq_netdev = NULL; + vadapter->rx_ring->vmdq_netdev = NULL; + return 0; +} + +netdev_tx_t igb_vmdq_xmit_frame(struct sk_buff *skb, struct net_device *dev) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(dev); + + return igb_xmit_frame_ring(skb, vadapter->tx_ring); +} + +struct net_device_stats *igb_vmdq_get_stats(struct net_device *dev) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(dev); + struct igb_adapter *adapter = vadapter->real_adapter; + struct e1000_hw *hw = &adapter->hw; + int hw_queue = vadapter->rx_ring->queue_index + + adapter->vfs_allocated_count; + + vadapter->net_stats.rx_packets += + E1000_READ_REG(hw, E1000_PFVFGPRC(hw_queue)); + E1000_WRITE_REG(hw, E1000_PFVFGPRC(hw_queue), 0); + vadapter->net_stats.tx_packets += + E1000_READ_REG(hw, E1000_PFVFGPTC(hw_queue)); + E1000_WRITE_REG(hw, E1000_PFVFGPTC(hw_queue), 0); + vadapter->net_stats.rx_bytes += + E1000_READ_REG(hw, E1000_PFVFGORC(hw_queue)); + E1000_WRITE_REG(hw, E1000_PFVFGORC(hw_queue), 0); + vadapter->net_stats.tx_bytes += + E1000_READ_REG(hw, E1000_PFVFGOTC(hw_queue)); + E1000_WRITE_REG(hw, E1000_PFVFGOTC(hw_queue), 0); + vadapter->net_stats.multicast += + E1000_READ_REG(hw, E1000_PFVFMPRC(hw_queue)); + E1000_WRITE_REG(hw, E1000_PFVFMPRC(hw_queue), 0); + /* only return the current stats */ + return &vadapter->net_stats; +} + +/** + * igb_write_vm_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +static int igb_write_vm_addr_list(struct net_device *netdev) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); + struct igb_adapter *adapter = vadapter->real_adapter; + int count = 0; + int hw_queue = vadapter->rx_ring->queue_index + + adapter->vfs_allocated_count; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > igb_available_rars(adapter)) + return -ENOMEM; + + if (!netdev_uc_empty(netdev)) { +#ifdef NETDEV_HW_ADDR_T_UNICAST + struct netdev_hw_addr *ha; +#else + struct dev_mc_list *ha; +#endif + netdev_for_each_uc_addr(ha, netdev) { +#ifdef NETDEV_HW_ADDR_T_UNICAST + igb_del_mac_filter(adapter, ha->addr, hw_queue); + igb_add_mac_filter(adapter, ha->addr, hw_queue); +#else + igb_del_mac_filter(adapter, ha->da_addr, hw_queue); + igb_add_mac_filter(adapter, ha->da_addr, hw_queue); +#endif + count++; + } + } + return count; +} + + +#define E1000_VMOLR_UPE 0x20000000 /* Unicast promiscuous mode */ +void igb_vmdq_set_rx_mode(struct net_device *dev) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(dev); + struct igb_adapter *adapter = vadapter->real_adapter; + struct e1000_hw *hw = &adapter->hw; + u32 vmolr, rctl; + int hw_queue = vadapter->rx_ring->queue_index + + adapter->vfs_allocated_count; + + /* Check for Promiscuous and All Multicast modes */ + vmolr = E1000_READ_REG(hw, E1000_VMOLR(hw_queue)); + + /* clear the affected bits */ + vmolr &= ~(E1000_VMOLR_UPE | E1000_VMOLR_MPME | + E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE); + + if (dev->flags & IFF_PROMISC) { + vmolr |= E1000_VMOLR_UPE; + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= E1000_RCTL_UPE; + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + } else { + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl &= ~E1000_RCTL_UPE; + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + if (dev->flags & IFF_ALLMULTI) { + vmolr |= E1000_VMOLR_MPME; + } else { + /* + * Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscous mode so + * that we can at least receive multicast traffic + */ + if (igb_write_mc_addr_list(adapter->netdev) != 0) + vmolr |= E1000_VMOLR_ROMPE; + } +#ifdef HAVE_SET_RX_MODE + /* + * Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscous mode + */ + if (igb_write_vm_addr_list(dev) < 0) + vmolr |= E1000_VMOLR_UPE; +#endif + } + E1000_WRITE_REG(hw, E1000_VMOLR(hw_queue), vmolr); + + return; +} + +int igb_vmdq_set_mac(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + struct igb_vmdq_adapter *vadapter = netdev_priv(dev); + struct igb_adapter *adapter = vadapter->real_adapter; + int hw_queue = vadapter->rx_ring->queue_index + + adapter->vfs_allocated_count; + + igb_del_mac_filter(adapter, dev->dev_addr, hw_queue); + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + return igb_add_mac_filter(adapter, dev->dev_addr, hw_queue); +} + +int igb_vmdq_change_mtu(struct net_device *dev, int new_mtu) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(dev); + struct igb_adapter *adapter = vadapter->real_adapter; + + if (adapter->netdev->mtu < new_mtu) { + DPRINTK(PROBE, INFO, + "Set MTU on %s to >= %d " + "before changing MTU on %s\n", + adapter->netdev->name, new_mtu, dev->name); + return -EINVAL; + } + dev->mtu = new_mtu; + return 0; +} + +void igb_vmdq_tx_timeout(struct net_device *dev) +{ + return; +} + +void igb_vmdq_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(dev); + struct igb_adapter *adapter = vadapter->real_adapter; + struct e1000_hw *hw = &adapter->hw; + int hw_queue = vadapter->rx_ring->queue_index + + adapter->vfs_allocated_count; + + vadapter->vlgrp = grp; + + igb_enable_vlan_tags(adapter); + E1000_WRITE_REG(hw, E1000_VMVIR(hw_queue), 0); + + return; +} +void igb_vmdq_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(dev); + struct igb_adapter *adapter = vadapter->real_adapter; +#ifndef HAVE_NETDEV_VLAN_FEATURES + struct net_device *v_netdev; +#endif + int hw_queue = vadapter->rx_ring->queue_index + + adapter->vfs_allocated_count; + + /* attempt to add filter to vlvf array */ + igb_vlvf_set(adapter, vid, TRUE, hw_queue); + +#ifndef HAVE_NETDEV_VLAN_FEATURES + + /* Copy feature flags from netdev to the vlan netdev for this vid. + * This allows things like TSO to bubble down to our vlan device. + */ + v_netdev = vlan_group_get_device(vadapter->vlgrp, vid); + v_netdev->features |= adapter->netdev->features; + vlan_group_set_device(vadapter->vlgrp, vid, v_netdev); +#endif + + return; +} +void igb_vmdq_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(dev); + struct igb_adapter *adapter = vadapter->real_adapter; + int hw_queue = vadapter->rx_ring->queue_index + + adapter->vfs_allocated_count; + + vlan_group_set_device(vadapter->vlgrp, vid, NULL); + /* remove vlan from VLVF table array */ + igb_vlvf_set(adapter, vid, FALSE, hw_queue); + + + return; +} + +static int igb_vmdq_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); + struct igb_adapter *adapter = vadapter->real_adapter; + struct e1000_hw *hw = &adapter->hw; + u32 status; + + if (hw->phy.media_type == e1000_media_type_copper) { + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + SUPPORTED_Autoneg | + SUPPORTED_TP); + ecmd->advertising = ADVERTISED_TP; + + if (hw->mac.autoneg == 1) { + ecmd->advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + ecmd->advertising |= hw->phy.autoneg_advertised; + } + + ecmd->port = PORT_TP; + ecmd->phy_address = hw->phy.addr; + } else { + ecmd->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + ecmd->advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); + + ecmd->port = PORT_FIBRE; + } + + ecmd->transceiver = XCVR_INTERNAL; + + status = E1000_READ_REG(hw, E1000_STATUS); + + if (status & E1000_STATUS_LU) { + + if ((status & E1000_STATUS_SPEED_1000) || + hw->phy.media_type != e1000_media_type_copper) + ecmd->speed = SPEED_1000; + else if (status & E1000_STATUS_SPEED_100) + ecmd->speed = SPEED_100; + else + ecmd->speed = SPEED_10; + + if ((status & E1000_STATUS_FD) || + hw->phy.media_type != e1000_media_type_copper) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ecmd->speed = -1; + ecmd->duplex = -1; + } + + ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; + return 0; +} + + +static u32 igb_vmdq_get_msglevel(struct net_device *netdev) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); + struct igb_adapter *adapter = vadapter->real_adapter; + return adapter->msg_enable; +} + +static void igb_vmdq_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); + struct igb_adapter *adapter = vadapter->real_adapter; + struct net_device *main_netdev = adapter->netdev; + + strncpy(drvinfo->driver, igb_driver_name, 32); + strncpy(drvinfo->version, igb_driver_version, 32); + + strncpy(drvinfo->fw_version, "N/A", 4); + snprintf(drvinfo->bus_info, 32, "%s VMDQ %d", main_netdev->name, + vadapter->rx_ring->queue_index); + drvinfo->n_stats = 0; + drvinfo->testinfo_len = 0; + drvinfo->regdump_len = 0; +} + +static void igb_vmdq_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); + + struct igb_ring *tx_ring = vadapter->tx_ring; + struct igb_ring *rx_ring = vadapter->rx_ring; + + ring->rx_max_pending = IGB_MAX_RXD; + ring->tx_max_pending = IGB_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = rx_ring->count; + ring->tx_pending = tx_ring->count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} +static u32 igb_vmdq_get_rx_csum(struct net_device *netdev) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); + struct igb_adapter *adapter = vadapter->real_adapter; + + return test_bit(IGB_RING_FLAG_RX_CSUM, &adapter->rx_ring[0]->flags); +} + + +static struct ethtool_ops igb_vmdq_ethtool_ops = { + .get_settings = igb_vmdq_get_settings, + .get_drvinfo = igb_vmdq_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ringparam = igb_vmdq_get_ringparam, + .get_rx_csum = igb_vmdq_get_rx_csum, + .get_tx_csum = ethtool_op_get_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_msglevel = igb_vmdq_get_msglevel, +#ifdef NETIF_F_TSO + .get_tso = ethtool_op_get_tso, +#endif +#ifdef HAVE_ETHTOOL_GET_PERM_ADDR + .get_perm_addr = ethtool_op_get_perm_addr, +#endif +}; + +void igb_vmdq_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &igb_vmdq_ethtool_ops); +} + + +#endif /* CONFIG_IGB_VMDQ_NETDEV */ + diff --git a/drivers/net/igb/igb_vmdq.h b/drivers/net/igb/igb_vmdq.h new file mode 100644 index 000000000000..2e6930571f40 --- /dev/null +++ b/drivers/net/igb/igb_vmdq.h @@ -0,0 +1,46 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IGB_VMDQ_H_ +#define _IGB_VMDQ_H_ + +#ifdef CONFIG_IGB_VMDQ_NETDEV +int igb_vmdq_open(struct net_device *dev); +int igb_vmdq_close(struct net_device *dev); +netdev_tx_t igb_vmdq_xmit_frame(struct sk_buff *skb, struct net_device *dev); +struct net_device_stats *igb_vmdq_get_stats(struct net_device *dev); +void igb_vmdq_set_rx_mode(struct net_device *dev); +int igb_vmdq_set_mac(struct net_device *dev, void *addr); +int igb_vmdq_change_mtu(struct net_device *dev, int new_mtu); +void igb_vmdq_tx_timeout(struct net_device *dev); +void igb_vmdq_vlan_rx_register(struct net_device *dev, + struct vlan_group *grp); +void igb_vmdq_vlan_rx_add_vid(struct net_device *dev, unsigned short vid); +void igb_vmdq_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid); +void igb_vmdq_set_ethtool_ops(struct net_device *netdev); +#endif /* CONFIG_IGB_VMDQ_NETDEV */ +#endif /* _IGB_VMDQ_H_ */ diff --git a/drivers/net/igb/kcompat.c b/drivers/net/igb/kcompat.c new file mode 100644 index 000000000000..cde69073abdc --- /dev/null +++ b/drivers/net/igb/kcompat.c @@ -0,0 +1,1132 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "igb.h" +#include "kcompat.h" + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) +/* From lib/vsprintf.c */ +#include + +static int skip_atoi(const char **s) +{ + int i=0; + + while (isdigit(**s)) + i = i*10 + *((*s)++) - '0'; + return i; +} + +#define _kc_ZEROPAD 1 /* pad with zero */ +#define _kc_SIGN 2 /* unsigned/signed long */ +#define _kc_PLUS 4 /* show plus */ +#define _kc_SPACE 8 /* space if plus */ +#define _kc_LEFT 16 /* left justified */ +#define _kc_SPECIAL 32 /* 0x */ +#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ + +static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type) +{ + char c,sign,tmp[66]; + const char *digits; + const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; + const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + int i; + + digits = (type & _kc_LARGE) ? large_digits : small_digits; + if (type & _kc_LEFT) + type &= ~_kc_ZEROPAD; + if (base < 2 || base > 36) + return 0; + c = (type & _kc_ZEROPAD) ? '0' : ' '; + sign = 0; + if (type & _kc_SIGN) { + if (num < 0) { + sign = '-'; + num = -num; + size--; + } else if (type & _kc_PLUS) { + sign = '+'; + size--; + } else if (type & _kc_SPACE) { + sign = ' '; + size--; + } + } + if (type & _kc_SPECIAL) { + if (base == 16) + size -= 2; + else if (base == 8) + size--; + } + i = 0; + if (num == 0) + tmp[i++]='0'; + else while (num != 0) + tmp[i++] = digits[do_div(num,base)]; + if (i > precision) + precision = i; + size -= precision; + if (!(type&(_kc_ZEROPAD+_kc_LEFT))) { + while(size-->0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + } + if (sign) { + if (buf <= end) + *buf = sign; + ++buf; + } + if (type & _kc_SPECIAL) { + if (base==8) { + if (buf <= end) + *buf = '0'; + ++buf; + } else if (base==16) { + if (buf <= end) + *buf = '0'; + ++buf; + if (buf <= end) + *buf = digits[33]; + ++buf; + } + } + if (!(type & _kc_LEFT)) { + while (size-- > 0) { + if (buf <= end) + *buf = c; + ++buf; + } + } + while (i < precision--) { + if (buf <= end) + *buf = '0'; + ++buf; + } + while (i-- > 0) { + if (buf <= end) + *buf = tmp[i]; + ++buf; + } + while (size-- > 0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + return buf; +} + +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args) +{ + int len; + unsigned long long num; + int i, base; + char *str, *end, c; + const char *s; + + int flags; /* flags to number() */ + + int field_width; /* width of output field */ + int precision; /* min. # of digits for integers; max + number of chars for from string */ + int qualifier; /* 'h', 'l', or 'L' for integer fields */ + /* 'z' support added 23/7/1999 S.H. */ + /* 'z' changed to 'Z' --davidm 1/25/99 */ + + str = buf; + end = buf + size - 1; + + if (end < buf - 1) { + end = ((void *) -1); + size = end - buf + 1; + } + + for (; *fmt ; ++fmt) { + if (*fmt != '%') { + if (str <= end) + *str = *fmt; + ++str; + continue; + } + + /* process flags */ + flags = 0; + repeat: + ++fmt; /* this also skips first '%' */ + switch (*fmt) { + case '-': flags |= _kc_LEFT; goto repeat; + case '+': flags |= _kc_PLUS; goto repeat; + case ' ': flags |= _kc_SPACE; goto repeat; + case '#': flags |= _kc_SPECIAL; goto repeat; + case '0': flags |= _kc_ZEROPAD; goto repeat; + } + + /* get field width */ + field_width = -1; + if (isdigit(*fmt)) + field_width = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + field_width = va_arg(args, int); + if (field_width < 0) { + field_width = -field_width; + flags |= _kc_LEFT; + } + } + + /* get the precision */ + precision = -1; + if (*fmt == '.') { + ++fmt; + if (isdigit(*fmt)) + precision = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + precision = va_arg(args, int); + } + if (precision < 0) + precision = 0; + } + + /* get the conversion qualifier */ + qualifier = -1; + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') { + qualifier = *fmt; + ++fmt; + } + + /* default base */ + base = 10; + + switch (*fmt) { + case 'c': + if (!(flags & _kc_LEFT)) { + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + } + c = (unsigned char) va_arg(args, int); + if (str <= end) + *str = c; + ++str; + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 's': + s = va_arg(args, char *); + if (!s) + s = ""; + + len = strnlen(s, precision); + + if (!(flags & _kc_LEFT)) { + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + } + for (i = 0; i < len; ++i) { + if (str <= end) + *str = *s; + ++str; ++s; + } + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 'p': + if (field_width == -1) { + field_width = 2*sizeof(void *); + flags |= _kc_ZEROPAD; + } + str = number(str, end, + (unsigned long) va_arg(args, void *), + 16, field_width, precision, flags); + continue; + + + case 'n': + /* FIXME: + * What does C99 say about the overflow case here? */ + if (qualifier == 'l') { + long * ip = va_arg(args, long *); + *ip = (str - buf); + } else if (qualifier == 'Z') { + size_t * ip = va_arg(args, size_t *); + *ip = (str - buf); + } else { + int * ip = va_arg(args, int *); + *ip = (str - buf); + } + continue; + + case '%': + if (str <= end) + *str = '%'; + ++str; + continue; + + /* integer number formats - set up the flags and "break" */ + case 'o': + base = 8; + break; + + case 'X': + flags |= _kc_LARGE; + case 'x': + base = 16; + break; + + case 'd': + case 'i': + flags |= _kc_SIGN; + case 'u': + break; + + default: + if (str <= end) + *str = '%'; + ++str; + if (*fmt) { + if (str <= end) + *str = *fmt; + ++str; + } else { + --fmt; + } + continue; + } + if (qualifier == 'L') + num = va_arg(args, long long); + else if (qualifier == 'l') { + num = va_arg(args, unsigned long); + if (flags & _kc_SIGN) + num = (signed long) num; + } else if (qualifier == 'Z') { + num = va_arg(args, size_t); + } else if (qualifier == 'h') { + num = (unsigned short) va_arg(args, int); + if (flags & _kc_SIGN) + num = (signed short) num; + } else { + num = va_arg(args, unsigned int); + if (flags & _kc_SIGN) + num = (signed int) num; + } + str = number(str, end, num, base, + field_width, precision, flags); + } + if (str <= end) + *str = '\0'; + else if (size > 0) + /* don't write out a null byte if the buf size is zero */ + *end = '\0'; + /* the trailing null byte doesn't count towards the total + * ++str; + */ + return str-buf; +} + +int _kc_snprintf(char * buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = _kc_vsnprintf(buf,size,fmt,args); + va_end(args); + return i; +} +#endif /* < 2.4.8 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) + +/**************************************/ +/* PCI DMA MAPPING */ + +#if defined(CONFIG_HIGHMEM) + +#ifndef PCI_DRAM_OFFSET +#define PCI_DRAM_OFFSET 0 +#endif + +u64 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, + size_t size, int direction) +{ + return (((u64) (page - mem_map) << PAGE_SHIFT) + offset + + PCI_DRAM_OFFSET); +} + +#else /* CONFIG_HIGHMEM */ + +u64 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, + size_t size, int direction) +{ + return pci_map_single(dev, (void *)page_address(page) + offset, size, + direction); +} + +#endif /* CONFIG_HIGHMEM */ + +void +_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, + int direction) +{ + return pci_unmap_single(dev, dma_addr, size, direction); +} + +#endif /* 2.4.13 => 2.4.3 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) + +/**************************************/ +/* PCI DRIVER API */ + +int +_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask) +{ + if (!pci_dma_supported(dev, mask)) + return -EIO; + dev->dma_mask = mask; + return 0; +} + +int +_kc_pci_request_regions(struct pci_dev *dev, char *res_name) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) { + if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { + if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } + } + return 0; +} + +void +_kc_pci_release_regions(struct pci_dev *dev) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) + release_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); + + else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) + release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); + } +} + +/**************************************/ +/* NETWORK DRIVER API */ + +struct net_device * +_kc_alloc_etherdev(int sizeof_priv) +{ + struct net_device *dev; + int alloc_size; + + alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31; + dev = kzalloc(alloc_size, GFP_KERNEL); + if (!dev) + return NULL; + + if (sizeof_priv) + dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31); + dev->name[0] = '\0'; + ether_setup(dev); + + return dev; +} + +int +_kc_is_valid_ether_addr(u8 *addr) +{ + const char zaddr[6] = { 0, }; + + return !(addr[0] & 1) && memcmp(addr, zaddr, 6); +} + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) + +int +_kc_pci_set_power_state(struct pci_dev *dev, int state) +{ + return 0; +} + +int +_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable) +{ + return 0; +} + +#endif /* 2.4.6 => 2.4.3 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, + int off, int size) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + frag->page = page; + frag->page_offset = off; + frag->size = size; + skb_shinfo(skb)->nr_frags = i + 1; +} + +/* + * Original Copyright: + * find_next_bit.c: fallback find next bit implementation + * + * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp &= (~0UL << offset); + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + ffs(tmp); +} + +size_t _kc_strlcpy(char *dest, const char *src, size_t size) +{ + size_t ret = strlen(src); + + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + memcpy(dest, src, len); + dest[len] = '\0'; + } + return ret; +} + +#endif /* 2.6.0 => 2.4.6 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = vsnprintf(buf, size, fmt, args); + va_end(args); + return (i >= size) ? (size - 1) : i; +} +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) +DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1}; +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) +char *_kc_kstrdup(const char *s, unsigned int gfp) +{ + size_t len; + char *buf; + + if (!s) + return NULL; + + len = strlen(s) + 1; + buf = kmalloc(len, gfp); + if (buf) + memcpy(buf, s, len); + return buf; +} +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) +void *_kc_kzalloc(size_t size, int flags) +{ + void *ret = kmalloc(size, flags); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif /* <= 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) +int _kc_skb_pad(struct sk_buff *skb, int pad) +{ + int ntail; + + /* If the skbuff is non linear tailroom is always zero.. */ + if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) { + memset(skb->data+skb->len, 0, pad); + return 0; + } + + ntail = skb->data_len + pad - (skb->end - skb->tail); + if (likely(skb_cloned(skb) || ntail > 0)) { + if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC)); + goto free_skb; + } + +#ifdef MAX_SKB_FRAGS + if (skb_is_nonlinear(skb) && + !__pskb_pull_tail(skb, skb->data_len)) + goto free_skb; + +#endif + memset(skb->data + skb->len, 0, pad); + return 0; + +free_skb: + kfree_skb(skb); + return -ENOMEM; +} + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) +int _kc_pci_save_state(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct adapter_struct *adapter = netdev_priv(netdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset, pcie_link_status; + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) + /* no ->dev for 2.4 kernels */ + WARN_ON(pdev->dev.driver_data == NULL); +#endif + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset) { + if (!pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + } + pci_config_space_ich8lan(); +#ifdef HAVE_PCI_ERS + if (adapter->config_space == NULL) +#else + WARN_ON(adapter->config_space != NULL); +#endif + adapter->config_space = kmalloc(size, GFP_KERNEL); + if (!adapter->config_space) { + printk(KERN_ERR "Out of memory in pci_save_state\n"); + return -ENOMEM; + } + for (i = 0; i < (size / 4); i++) + pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]); + return 0; +} + +void _kc_pci_restore_state(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct adapter_struct *adapter = netdev_priv(netdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset; + u16 pcie_link_status; + + if (adapter->config_space != NULL) { + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset && + !pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + + pci_config_space_ich8lan(); + for (i = 0; i < (size / 4); i++) + pci_write_config_dword(pdev, i * 4, adapter->config_space[i]); +#ifndef HAVE_PCI_ERS + kfree(adapter->config_space); + adapter->config_space = NULL; +#endif + } +} +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +void _kc_free_netdev(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + + if (adapter->config_space != NULL) + kfree(adapter->config_space); +#ifdef CONFIG_SYSFS + if (netdev->reg_state == NETREG_UNINITIALIZED) { + kfree((char *)netdev - netdev->padded); + } else { + BUG_ON(netdev->reg_state != NETREG_UNREGISTERED); + netdev->reg_state = NETREG_RELEASED; + class_device_put(&netdev->class_dev); + } +#else + kfree((char *)netdev - netdev->padded); +#endif +} +#endif + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp) +{ + void *p; + + p = kzalloc(len, gfp); + if (p) + memcpy(p, src, len); + return p; +} +#endif /* <= 2.6.19 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +/* hexdump code taken from lib/hexdump.c */ +static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize, + int groupsize, unsigned char *linebuf, + size_t linebuflen, bool ascii) +{ + const u8 *ptr = buf; + u8 ch; + int j, lx = 0; + int ascii_column; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + if (!len) + goto nil; + if (len > rowsize) /* limit to one line at a time */ + len = rowsize; + if ((len % groupsize) != 0) /* no mixed size output */ + groupsize = 1; + + switch (groupsize) { + case 8: { + const u64 *ptr8 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%16.16llx", j ? " " : "", + (unsigned long long)*(ptr8 + j)); + ascii_column = 17 * ngroups + 2; + break; + } + + case 4: { + const u32 *ptr4 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%8.8x", j ? " " : "", *(ptr4 + j)); + ascii_column = 9 * ngroups + 2; + break; + } + + case 2: { + const u16 *ptr2 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%4.4x", j ? " " : "", *(ptr2 + j)); + ascii_column = 5 * ngroups + 2; + break; + } + + default: + for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { + ch = ptr[j]; + linebuf[lx++] = hex_asc(ch >> 4); + linebuf[lx++] = hex_asc(ch & 0x0f); + linebuf[lx++] = ' '; + } + if (j) + lx--; + + ascii_column = 3 * rowsize + 2; + break; + } + if (!ascii) + goto nil; + + while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) + linebuf[lx++] = ' '; + for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) + linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] + : '.'; +nil: + linebuf[lx++] = '\0'; +} + +void _kc_print_hex_dump(const char *level, + const char *prefix_str, int prefix_type, + int rowsize, int groupsize, + const void *buf, size_t len, bool ascii) +{ + const u8 *ptr = buf; + int i, linelen, remaining = len; + unsigned char linebuf[200]; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + for (i = 0; i < len; i += rowsize) { + linelen = min(remaining, rowsize); + remaining -= rowsize; + _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, + linebuf, sizeof(linebuf), ascii); + + switch (prefix_type) { + case DUMP_PREFIX_ADDRESS: + printk("%s%s%*p: %s\n", level, prefix_str, + (int)(2 * sizeof(void *)), ptr + i, linebuf); + break; + case DUMP_PREFIX_OFFSET: + printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); + break; + default: + printk("%s%s%s\n", level, prefix_str, linebuf); + break; + } + } +} +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +#ifdef NAPI +struct net_device *napi_to_poll_dev(struct napi_struct *napi) +{ + struct adapter_q_vector *q_vector = container_of(napi, + struct adapter_q_vector, + napi); + return &q_vector->poll_dev; +} + +int __kc_adapter_clean(struct net_device *netdev, int *budget) +{ + int work_done; + int work_to_do = min(*budget, netdev->quota); + /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */ + struct napi_struct *napi = netdev->priv; + work_done = napi->poll(napi, work_to_do); + *budget -= work_done; + netdev->quota -= work_done; + return (work_done >= work_to_do) ? 1 : 0; +} +#endif /* NAPI */ +#endif /* <= 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) +void _kc_pci_disable_link_state(struct pci_dev *pdev, int state) +{ + struct pci_dev *parent = pdev->bus->self; + u16 link_state; + int pos; + + if (!parent) + return; + + pos = pci_find_capability(parent, PCI_CAP_ID_EXP); + if (pos) { + pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state); + link_state &= ~state; + pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state); + } +} +#endif /* < 2.6.26 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) +#ifdef HAVE_TX_MQ +void _kc_netif_tx_stop_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_stop_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_stop_subqueue(netdev, i); +} +void _kc_netif_tx_wake_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_wake_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_wake_subqueue(netdev, i); +} +void _kc_netif_tx_start_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_start_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_start_subqueue(netdev, i); +} +#endif /* HAVE_TX_MQ */ + +#ifndef __WARN_printf +void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...) +{ + va_list args; + + printk(KERN_WARNING "------------[ cut here ]------------\n"); + printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file, line); + va_start(args, fmt); + vprintk(fmt, args); + va_end(args); + + dump_stack(); +} +#endif /* __WARN_printf */ +#endif /* < 2.6.27 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) + +int +_kc_pci_prepare_to_sleep(struct pci_dev *dev) +{ + pci_power_t target_state; + int error; + + target_state = pci_choose_state(dev, PMSG_SUSPEND); + + pci_enable_wake(dev, target_state, true); + + error = pci_set_power_state(dev, target_state); + + if (error) + pci_enable_wake(dev, target_state, false); + + return error; +} + +int +_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable) +{ + int err; + + err = pci_enable_wake(dev, PCI_D3cold, enable); + if (err) + goto out; + + err = pci_enable_wake(dev, PCI_D3hot, enable); + +out: + return err; +} + +void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, + int off, int size) +{ + skb_fill_page_desc(skb, i, page, off, size); + skb->len += size; + skb->data_len += size; + skb->truesize += size; +} +#endif /* < 2.6.28 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +#ifdef HAVE_TX_MQ +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +void _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) +{ + unsigned int real_num = dev->real_num_tx_queues; + struct Qdisc *qdisc; + int i; + + if (unlikely(txq > dev->num_tx_queues)) + ; + else if (txq > real_num) + dev->real_num_tx_queues = txq; + else if ( txq < real_num) { + dev->real_num_tx_queues = txq; + for (i = txq; i < dev->num_tx_queues; i++) { + qdisc = netdev_get_tx_queue(dev, i)->qdisc; + if (qdisc) { + spin_lock_bh(qdisc_lock(qdisc)); + qdisc_reset(qdisc); + spin_unlock_bh(qdisc_lock(qdisc)); + } + } + } +} +#endif /* CONFIG_NETDEVICES_MULTIQUEUE */ +#endif /* HAVE_TX_MQ */ +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) +static const u32 _kc_flags_dup_features = + (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH); + +u32 _kc_ethtool_op_get_flags(struct net_device *dev) +{ + return dev->features & _kc_flags_dup_features; +} + +int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) +{ + if (data & ~supported) + return -EINVAL; + + dev->features = ((dev->features & ~_kc_flags_dup_features) | + (data & _kc_flags_dup_features)); + return 0; +} +#endif /* < 2.6.36 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) +#ifdef HAVE_NETDEV_SELECT_QUEUE +#include +static u32 _kc_simple_tx_hashrnd; +static u32 _kc_simple_tx_hashrnd_initialized; + +u16 ___kc_skb_tx_hash(struct net_device *dev, const struct sk_buff *skb, + u16 num_tx_queues) +{ + u32 hash; + + if (skb_rx_queue_recorded(skb)) { + hash = skb_get_rx_queue(skb); + while (unlikely(hash >= num_tx_queues)) + hash -= num_tx_queues; + return hash; + } + + if (unlikely(!_kc_simple_tx_hashrnd_initialized)) { + get_random_bytes(&_kc_simple_tx_hashrnd, 4); + _kc_simple_tx_hashrnd_initialized = 1; + } + + if (skb->sk && skb->sk->sk_hash) + hash = skb->sk->sk_hash; + else +#ifdef NETIF_F_RXHASH + hash = (__force u16) skb->protocol ^ skb->rxhash; +#else + hash = skb->protocol; +#endif + + hash = jhash_1word(hash, _kc_simple_tx_hashrnd); + + return (u16) (((u64) hash * num_tx_queues) >> 32); +} +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#endif /* < 2.6.38 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ +#endif /* < 2.6.39 */ diff --git a/drivers/net/igb/kcompat.h b/drivers/net/igb/kcompat.h new file mode 100644 index 000000000000..39ed122140b5 --- /dev/null +++ b/drivers/net/igb/kcompat.h @@ -0,0 +1,3042 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _KCOMPAT_H_ +#define _KCOMPAT_H_ + +#ifndef LINUX_VERSION_CODE +#include +#else +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* NAPI enable/disable flags here */ +#define NAPI + +#define adapter_struct igb_adapter +#define adapter_q_vector igb_q_vector +#define NAPI + +/* and finally set defines so that the code sees the changes */ +#ifdef NAPI +#else +#endif /* NAPI */ + +/* packet split disable/enable */ +#ifdef DISABLE_PACKET_SPLIT +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT +#define CONFIG_IGB_DISABLE_PACKET_SPLIT +#endif +#endif /* DISABLE_PACKET_SPLIT */ + +/* MSI compatibility code for all kernels and drivers */ +#ifdef DISABLE_PCI_MSI +#undef CONFIG_PCI_MSI +#endif +#ifndef CONFIG_PCI_MSI +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) +struct msix_entry { + u16 vector; /* kernel uses to write allocated vector */ + u16 entry; /* driver uses to specify entry, OS writes */ +}; +#endif +#undef pci_enable_msi +#define pci_enable_msi(a) -ENOTSUPP +#undef pci_disable_msi +#define pci_disable_msi(a) do {} while (0) +#undef pci_enable_msix +#define pci_enable_msix(a, b, c) -ENOTSUPP +#undef pci_disable_msix +#define pci_disable_msix(a) do {} while (0) +#define msi_remove_pci_irq_vectors(a) do {} while (0) +#endif /* CONFIG_PCI_MSI */ +#ifdef DISABLE_PM +#undef CONFIG_PM +#endif + +#ifdef DISABLE_NET_POLL_CONTROLLER +#undef CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef PMSG_SUSPEND +#define PMSG_SUSPEND 3 +#endif + +/* generic boolean compatibility */ +#undef TRUE +#undef FALSE +#define TRUE true +#define FALSE false +#ifdef GCC_VERSION +#if ( GCC_VERSION < 3000 ) +#define _Bool char +#endif +#else +#define _Bool char +#endif + +/* kernels less than 2.4.14 don't have this */ +#ifndef ETH_P_8021Q +#define ETH_P_8021Q 0x8100 +#endif + +#ifndef module_param +#define module_param(v,t,p) MODULE_PARM(v, "i"); +#endif + +#ifndef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffffffffffffULL +#endif + +#ifndef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0x00000000ffffffffULL +#endif + +#ifndef PCI_CAP_ID_EXP +#define PCI_CAP_ID_EXP 0x10 +#endif + +#ifndef PCIE_LINK_STATE_L0S +#define PCIE_LINK_STATE_L0S 1 +#endif +#ifndef PCIE_LINK_STATE_L1 +#define PCIE_LINK_STATE_L1 2 +#endif + +#ifndef mmiowb +#ifdef CONFIG_IA64 +#define mmiowb() asm volatile ("mf.a" ::: "memory") +#else +#define mmiowb() +#endif +#endif + +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(net, pdev) +#endif + +#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) +#define free_netdev(x) kfree(x) +#endif + +#ifdef HAVE_POLL_CONTROLLER +#define CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef SKB_DATAREF_SHIFT +/* if we do not have the infrastructure to detect if skb_header is cloned + just return false in all cases */ +#define skb_header_cloned(x) 0 +#endif + +#ifndef NETIF_F_GSO +#define gso_size tso_size +#define gso_segs tso_segs +#endif + +#ifndef NETIF_F_GRO +#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \ + vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan) +#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb) +#endif + +#ifndef NETIF_F_SCTP_CSUM +#define NETIF_F_SCTP_CSUM 0 +#endif + +#ifndef NETIF_F_LRO +#define NETIF_F_LRO (1 << 15) +#endif + +#ifndef NETIF_F_NTUPLE +#define NETIF_F_NTUPLE (1 << 27) +#endif + +#ifndef IPPROTO_SCTP +#define IPPROTO_SCTP 132 +#endif + +#ifndef CHECKSUM_PARTIAL +#define CHECKSUM_PARTIAL CHECKSUM_HW +#define CHECKSUM_COMPLETE CHECKSUM_HW +#endif + +#ifndef __read_mostly +#define __read_mostly +#endif + +#ifndef MII_RESV1 +#define MII_RESV1 0x17 /* Reserved... */ +#endif + +#ifndef unlikely +#define unlikely(_x) _x +#define likely(_x) _x +#endif + +#ifndef WARN_ON +#define WARN_ON(x) +#endif + +#ifndef PCI_DEVICE +#define PCI_DEVICE(vend,dev) \ + .vendor = (vend), .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID +#endif + +#ifndef node_online +#define node_online(node) ((node) == 0) +#endif + +#ifndef num_online_cpus +#define num_online_cpus() smp_num_cpus +#endif + +#ifndef cpu_online +#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map) +#endif + +#ifndef _LINUX_RANDOM_H +#include +#endif + +#ifndef DECLARE_BITMAP +#ifndef BITS_TO_LONGS +#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) +#endif +#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)] +#endif + +#ifndef VLAN_HLEN +#define VLAN_HLEN 4 +#endif + +#ifndef VLAN_ETH_HLEN +#define VLAN_ETH_HLEN 18 +#endif + +#ifndef VLAN_ETH_FRAME_LEN +#define VLAN_ETH_FRAME_LEN 1518 +#endif + +#if !defined(IXGBE_DCA) && !defined(IGB_DCA) +#define dca_get_tag(b) 0 +#define dca_add_requester(a) -1 +#define dca_remove_requester(b) do { } while(0) +#define DCA_PROVIDER_ADD 0x0001 +#define DCA_PROVIDER_REMOVE 0x0002 +#endif + +#ifndef DCA_GET_TAG_TWO_ARGS +#define dca3_get_tag(a,b) dca_get_tag(b) +#endif + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#if defined(__i386__) || defined(__x86_64__) +#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#endif +#endif + +/* taken from 2.6.24 definition in linux/kernel.h */ +#ifndef IS_ALIGNED +#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0) +#endif + +#ifndef NETIF_F_HW_VLAN_TX +struct _kc_vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_ethhdr _kc_vlan_ethhdr +struct _kc_vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_hdr _kc_vlan_hdr +#define vlan_tx_tag_present(_skb) 0 +#define vlan_tx_tag_get(_skb) 0 +#endif + +#ifndef VLAN_PRIO_SHIFT +#define VLAN_PRIO_SHIFT 13 +#endif + + +#ifndef __GFP_COLD +#define __GFP_COLD 0 +#endif + +/*****************************************************************************/ +/* Installations with ethtool version without eeprom, adapter id, or statistics + * support */ + +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif + +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x1d +#undef ethtool_drvinfo +#define ethtool_drvinfo k_ethtool_drvinfo +struct k_ethtool_drvinfo { + u32 cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char reserved1[32]; + char reserved2[16]; + u32 n_stats; + u32 testinfo_len; + u32 eedump_len; + u32 regdump_len; +}; + +struct ethtool_stats { + u32 cmd; + u32 n_stats; + u64 data[0]; +}; +#endif /* ETHTOOL_GSTATS */ + +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x1c +#endif /* ETHTOOL_PHYS_ID */ + +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x1b +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, +}; +struct ethtool_gstrings { + u32 cmd; /* ETHTOOL_GSTRINGS */ + u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ + u32 len; /* number of strings in the string set */ + u8 data[0]; +}; +#endif /* ETHTOOL_GSTRINGS */ + +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x1a +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = (1 << 0), + ETH_TEST_FL_FAILED = (1 << 1), +}; +struct ethtool_test { + u32 cmd; + u32 flags; + u32 reserved; + u32 len; + u64 data[0]; +}; +#endif /* ETHTOOL_TEST */ + +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0xb +#undef ETHTOOL_GREGS +struct ethtool_eeprom { + u32 cmd; + u32 magic; + u32 offset; + u32 len; + u8 data[0]; +}; + +struct ethtool_value { + u32 cmd; + u32 data; +}; +#endif /* ETHTOOL_GEEPROM */ + +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0xa +#endif /* ETHTOOL_GLINK */ + +#ifndef ETHTOOL_GWOL +#define ETHTOOL_GWOL 0x5 +#define ETHTOOL_SWOL 0x6 +#define SOPASS_MAX 6 +struct ethtool_wolinfo { + u32 cmd; + u32 supported; + u32 wolopts; + u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ +}; +#endif /* ETHTOOL_GWOL */ + +#ifndef ETHTOOL_GREGS +#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */ +#define ethtool_regs _kc_ethtool_regs +/* for passing big chunks of data */ +struct _kc_ethtool_regs { + u32 cmd; + u32 version; /* driver-specific, indicates different chips/revs */ + u32 len; /* bytes */ + u8 data[0]; +}; +#endif /* ETHTOOL_GREGS */ + +#ifndef ETHTOOL_GMSGLVL +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ +#endif +#ifndef ETHTOOL_SMSGLVL +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */ +#endif +#ifndef ETHTOOL_NWAY_RST +#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */ +#endif +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0x0000000a /* Get link status */ +#endif +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ +#endif +#ifndef ETHTOOL_SEEPROM +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */ +#endif +#ifndef ETHTOOL_GCOALESCE +#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ +/* for configuring coalescing parameters of chip */ +#define ethtool_coalesce _kc_ethtool_coalesce +struct _kc_ethtool_coalesce { + u32 cmd; /* ETHTOOL_{G,S}COALESCE */ + + /* How many usecs to delay an RX interrupt after + * a packet arrives. If 0, only rx_max_coalesced_frames + * is used. + */ + u32 rx_coalesce_usecs; + + /* How many packets to delay an RX interrupt after + * a packet arrives. If 0, only rx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause RX interrupts to never be + * generated. + */ + u32 rx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 rx_coalesce_usecs_irq; + u32 rx_max_coalesced_frames_irq; + + /* How many usecs to delay a TX interrupt after + * a packet is sent. If 0, only tx_max_coalesced_frames + * is used. + */ + u32 tx_coalesce_usecs; + + /* How many packets to delay a TX interrupt after + * a packet is sent. If 0, only tx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause TX interrupts to never be + * generated. + */ + u32 tx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 tx_coalesce_usecs_irq; + u32 tx_max_coalesced_frames_irq; + + /* How many usecs to delay in-memory statistics + * block updates. Some drivers do not have an in-memory + * statistic block, and in such cases this value is ignored. + * This value must not be zero. + */ + u32 stats_block_coalesce_usecs; + + /* Adaptive RX/TX coalescing is an algorithm implemented by + * some drivers to improve latency under low packet rates and + * improve throughput under high packet rates. Some drivers + * only implement one of RX or TX adaptive coalescing. Anything + * not implemented by the driver causes these values to be + * silently ignored. + */ + u32 use_adaptive_rx_coalesce; + u32 use_adaptive_tx_coalesce; + + /* When the packet rate (measured in packets per second) + * is below pkt_rate_low, the {rx,tx}_*_low parameters are + * used. + */ + u32 pkt_rate_low; + u32 rx_coalesce_usecs_low; + u32 rx_max_coalesced_frames_low; + u32 tx_coalesce_usecs_low; + u32 tx_max_coalesced_frames_low; + + /* When the packet rate is below pkt_rate_high but above + * pkt_rate_low (both measured in packets per second) the + * normal {rx,tx}_* coalescing parameters are used. + */ + + /* When the packet rate is (measured in packets per second) + * is above pkt_rate_high, the {rx,tx}_*_high parameters are + * used. + */ + u32 pkt_rate_high; + u32 rx_coalesce_usecs_high; + u32 rx_max_coalesced_frames_high; + u32 tx_coalesce_usecs_high; + u32 tx_max_coalesced_frames_high; + + /* How often to do adaptive coalescing packet rate sampling, + * measured in seconds. Must not be zero. + */ + u32 rate_sample_interval; +}; +#endif /* ETHTOOL_GCOALESCE */ + +#ifndef ETHTOOL_SCOALESCE +#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ +#endif +#ifndef ETHTOOL_GRINGPARAM +#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ +/* for configuring RX/TX ring parameters */ +#define ethtool_ringparam _kc_ethtool_ringparam +struct _kc_ethtool_ringparam { + u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ + + /* Read only attributes. These indicate the maximum number + * of pending RX/TX ring entries the driver will allow the + * user to set. + */ + u32 rx_max_pending; + u32 rx_mini_max_pending; + u32 rx_jumbo_max_pending; + u32 tx_max_pending; + + /* Values changeable by the user. The valid values are + * in the range 1 to the "*_max_pending" counterpart above. + */ + u32 rx_pending; + u32 rx_mini_pending; + u32 rx_jumbo_pending; + u32 tx_pending; +}; +#endif /* ETHTOOL_GRINGPARAM */ + +#ifndef ETHTOOL_SRINGPARAM +#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */ +#endif +#ifndef ETHTOOL_GPAUSEPARAM +#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ +/* for configuring link flow control parameters */ +#define ethtool_pauseparam _kc_ethtool_pauseparam +struct _kc_ethtool_pauseparam { + u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ + + /* If the link is being auto-negotiated (via ethtool_cmd.autoneg + * being true) the user may set 'autoneg' here non-zero to have the + * pause parameters be auto-negotiated too. In such a case, the + * {rx,tx}_pause values below determine what capabilities are + * advertised. + * + * If 'autoneg' is zero or the link is not being auto-negotiated, + * then {rx,tx}_pause force the driver to use/not-use pause + * flow control. + */ + u32 autoneg; + u32 rx_pause; + u32 tx_pause; +}; +#endif /* ETHTOOL_GPAUSEPARAM */ + +#ifndef ETHTOOL_SPAUSEPARAM +#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ +#endif +#ifndef ETHTOOL_GRXCSUM +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_SRXCSUM +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GTXCSUM +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STXCSUM +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GSG +#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable + * (ethtool_value) */ +#endif +#ifndef ETHTOOL_SSG +#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable + * (ethtool_value). */ +#endif +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */ +#endif +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ +#endif +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ +#endif +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ +#endif +#ifndef ETHTOOL_GTSO +#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STSO +#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ +#endif + +#ifndef ETHTOOL_BUSINFO_LEN +#define ETHTOOL_BUSINFO_LEN 32 +#endif + +#ifndef RHEL_RELEASE_CODE +/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */ +#define RHEL_RELEASE_CODE 0 +#endif +#ifndef RHEL_RELEASE_VERSION +#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif +#ifndef AX_RELEASE_CODE +#define AX_RELEASE_CODE 0 +#endif +#ifndef AX_RELEASE_VERSION +#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif + +/* SuSE version macro is the same as Linux kernel version */ +#ifndef SLE_VERSION +#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c) +#endif +#ifndef SLE_VERSION_CODE +#ifdef CONFIG_SUSE_KERNEL +/* SLES11 GA is 2.6.27 based */ +#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) ) +#define SLE_VERSION_CODE SLE_VERSION(11,0,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) ) +/* SLES11 SP1 is 2.6.32 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,1,0) +#else +#define SLE_VERSION_CODE 0 +#endif +#else /* CONFIG_SUSE_KERNEL */ +#define SLE_VERSION_CODE 0 +#endif /* CONFIG_SUSE_KERNEL */ +#endif /* SLE_VERSION_CODE */ + +#ifdef __KLOCWORK__ +#ifdef ARRAY_SIZE +#undef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif +#endif /* __KLOCWORK__ */ + +/*****************************************************************************/ +/* 2.4.3 => 2.4.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) + +/**************************************/ +/* PCI DRIVER API */ + +#ifndef pci_set_dma_mask +#define pci_set_dma_mask _kc_pci_set_dma_mask +extern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask); +#endif + +#ifndef pci_request_regions +#define pci_request_regions _kc_pci_request_regions +extern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name); +#endif + +#ifndef pci_release_regions +#define pci_release_regions _kc_pci_release_regions +extern void _kc_pci_release_regions(struct pci_dev *pdev); +#endif + +/**************************************/ +/* NETWORK DRIVER API */ + +#ifndef alloc_etherdev +#define alloc_etherdev _kc_alloc_etherdev +extern struct net_device * _kc_alloc_etherdev(int sizeof_priv); +#endif + +#ifndef is_valid_ether_addr +#define is_valid_ether_addr _kc_is_valid_ether_addr +extern int _kc_is_valid_ether_addr(u8 *addr); +#endif + +/**************************************/ +/* MISCELLANEOUS */ + +#ifndef INIT_TQUEUE +#define INIT_TQUEUE(_tq, _routine, _data) \ + do { \ + INIT_LIST_HEAD(&(_tq)->list); \ + (_tq)->sync = 0; \ + (_tq)->routine = _routine; \ + (_tq)->data = _data; \ + } while (0) +#endif + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) ) +/* Generic MII registers. */ +#define MII_BMCR 0x00 /* Basic mode control register */ +#define MII_BMSR 0x01 /* Basic mode status register */ +#define MII_PHYSID1 0x02 /* PHYS ID 1 */ +#define MII_PHYSID2 0x03 /* PHYS ID 2 */ +#define MII_ADVERTISE 0x04 /* Advertisement control reg */ +#define MII_LPA 0x05 /* Link partner ability reg */ +#define MII_EXPANSION 0x06 /* Expansion register */ +/* Basic mode control register. */ +#define BMCR_FULLDPLX 0x0100 /* Full duplex */ +#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ +/* Basic mode status register. */ +#define BMSR_ERCAP 0x0001 /* Ext-reg capability */ +#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ +#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ +#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ +#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ +#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */ +/* Advertisement control register. */ +#define ADVERTISE_CSMA 0x0001 /* Only selector supported */ +#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ +#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ +#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ +#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ +#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \ + ADVERTISE_100HALF | ADVERTISE_100FULL) +/* Expansion register for auto-negotiation. */ +#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */ +#endif + +/*****************************************************************************/ +/* 2.4.6 => 2.4.3 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) + +#ifndef pci_set_power_state +#define pci_set_power_state _kc_pci_set_power_state +extern int _kc_pci_set_power_state(struct pci_dev *dev, int state); +#endif + +#ifndef pci_enable_wake +#define pci_enable_wake _kc_pci_enable_wake +extern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable); +#endif + +#ifndef pci_disable_device +#define pci_disable_device _kc_pci_disable_device +extern void _kc_pci_disable_device(struct pci_dev *pdev); +#endif + +/* PCI PM entry point syntax changed, so don't support suspend/resume */ +#undef CONFIG_PM + +#endif /* 2.4.6 => 2.4.3 */ + +#ifndef HAVE_PCI_SET_MWI +#define pci_set_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \ + PCI_COMMAND_INVALIDATE); +#define pci_clear_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \ + ~PCI_COMMAND_INVALIDATE); +#endif + +/*****************************************************************************/ +/* 2.4.10 => 2.4.9 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) ) + +/**************************************/ +/* MODULE API */ + +#ifndef MODULE_LICENSE + #define MODULE_LICENSE(X) +#endif + +/**************************************/ +/* OTHER */ + +#undef min +#define min(x,y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x < _y ? _x : _y; }) + +#undef max +#define max(x,y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x > _y ? _x : _y; }) + +#define min_t(type,x,y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x < _y ? _x : _y; }) + +#define max_t(type,x,y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x > _y ? _x : _y; }) + +#ifndef list_for_each_safe +#define list_for_each_safe(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) +#endif + +#ifndef ____cacheline_aligned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_aligned_in_smp ____cacheline_aligned +#else +#define ____cacheline_aligned_in_smp +#endif /* CONFIG_SMP */ +#endif + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) +extern int _kc_snprintf(char * buf, size_t size, const char *fmt, ...); +#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args) +extern int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args) +#else /* 2.4.8 => 2.4.9 */ +extern int snprintf(char * buf, size_t size, const char *fmt, ...); +extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#endif +#endif /* 2.4.10 -> 2.4.6 */ + + +/*****************************************************************************/ +/* 2.4.12 => 2.4.10 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) ) +#ifndef HAVE_NETIF_MSG +#define HAVE_NETIF_MSG 1 +enum { + NETIF_MSG_DRV = 0x0001, + NETIF_MSG_PROBE = 0x0002, + NETIF_MSG_LINK = 0x0004, + NETIF_MSG_TIMER = 0x0008, + NETIF_MSG_IFDOWN = 0x0010, + NETIF_MSG_IFUP = 0x0020, + NETIF_MSG_RX_ERR = 0x0040, + NETIF_MSG_TX_ERR = 0x0080, + NETIF_MSG_TX_QUEUED = 0x0100, + NETIF_MSG_INTR = 0x0200, + NETIF_MSG_TX_DONE = 0x0400, + NETIF_MSG_RX_STATUS = 0x0800, + NETIF_MSG_PKTDATA = 0x1000, + NETIF_MSG_HW = 0x2000, + NETIF_MSG_WOL = 0x4000, +}; + +#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) +#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) +#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) +#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) +#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) +#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) +#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) +#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) +#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) +#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) +#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) +#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) +#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) +#endif /* !HAVE_NETIF_MSG */ +#endif /* 2.4.12 => 2.4.10 */ + +/*****************************************************************************/ +/* 2.4.13 => 2.4.12 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) + +/**************************************/ +/* PCI DMA MAPPING */ + +#ifndef virt_to_page + #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT)) +#endif + +#ifndef pci_map_page +#define pci_map_page _kc_pci_map_page +extern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction); +#endif + +#ifndef pci_unmap_page +#define pci_unmap_page _kc_pci_unmap_page +extern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction); +#endif + +/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */ + +#undef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0xffffffff +#undef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffff + +/**************************************/ +/* OTHER */ + +#ifndef cpu_relax +#define cpu_relax() rep_nop() +#endif + +struct vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + unsigned short h_vlan_proto; + unsigned short h_vlan_TCI; + unsigned short h_vlan_encapsulated_proto; +}; +#endif /* 2.4.13 => 2.4.12 */ + +/*****************************************************************************/ +/* 2.4.17 => 2.4.12 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) ) + +#ifndef __devexit_p + #define __devexit_p(x) &(x) +#endif + +#endif /* 2.4.17 => 2.4.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) ) +#define NETIF_MSG_HW 0x2000 +#define NETIF_MSG_WOL 0x4000 + +#ifndef netif_msg_hw +#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) +#endif +#ifndef netif_msg_wol +#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) +#endif +#endif /* 2.4.18 */ + +/*****************************************************************************/ + +/*****************************************************************************/ +/* 2.4.20 => 2.4.19 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) ) + +/* we won't support NAPI on less than 2.4.20 */ +#ifdef NAPI +#undef NAPI +#endif + +#endif /* 2.4.20 => 2.4.19 */ + +/*****************************************************************************/ +/* 2.4.22 => 2.4.17 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) +#define pci_name(x) ((x)->slot_name) +#endif + +/*****************************************************************************/ +/* 2.4.22 => 2.4.17 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) +#ifndef IGB_NO_LRO +#define IGB_NO_LRO +#endif +#endif + +/*****************************************************************************/ +/*****************************************************************************/ +/* 2.4.23 => 2.4.22 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) ) +/*****************************************************************************/ +#ifdef NAPI +#ifndef netif_poll_disable +#define netif_poll_disable(x) _kc_netif_poll_disable(x) +static inline void _kc_netif_poll_disable(struct net_device *netdev) +{ + while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) { + /* No hurry */ + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(1); + } +} +#endif +#ifndef netif_poll_enable +#define netif_poll_enable(x) _kc_netif_poll_enable(x) +static inline void _kc_netif_poll_enable(struct net_device *netdev) +{ + clear_bit(__LINK_STATE_RX_SCHED, &netdev->state); +} +#endif +#endif /* NAPI */ +#ifndef netif_tx_disable +#define netif_tx_disable(x) _kc_netif_tx_disable(x) +static inline void _kc_netif_tx_disable(struct net_device *dev) +{ + spin_lock_bh(&dev->xmit_lock); + netif_stop_queue(dev); + spin_unlock_bh(&dev->xmit_lock); +} +#endif +#else /* 2.4.23 => 2.4.22 */ +#define HAVE_SCTP +#endif /* 2.4.23 => 2.4.22 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ) +#define ETHTOOL_OPS_COMPAT +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +/* 2.5.71 => 2.4.x */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) ) +#define sk_protocol protocol +#define pci_get_device pci_find_device +#endif /* 2.5.70 => 2.4.x */ + +/*****************************************************************************/ +/* < 2.4.27 or 2.6.0 <= 2.6.5 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) ) + +#ifndef netif_msg_init +#define netif_msg_init _kc_netif_msg_init +static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits) +{ + /* use default */ + if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) + return default_msg_enable_bits; + if (debug_value == 0) /* no output */ + return 0; + /* set low N bits */ + return (1 << debug_value) -1; +} +#endif + +#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */ +/*****************************************************************************/ +#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \ + (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \ + ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ))) +#define netdev_priv(x) x->priv +#endif + +/*****************************************************************************/ +/* <= 2.5.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) ) +#include +#undef pci_register_driver +#define pci_register_driver pci_module_init + +/* + * Most of the dma compat code is copied/modifed from the 2.4.37 + * /include/linux/libata-compat.h header file + */ +/* These definitions mirror those in pci.h, so they can be used + * interchangeably with their PCI_ counterparts */ +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; + +struct device { + struct pci_dev pdev; +}; + +static inline struct pci_dev *to_pci_dev (struct device *dev) +{ + return (struct pci_dev *) dev; +} +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return (struct device *) pdev; +} + +#define pdev_printk(lvl, pdev, fmt, args...) \ + printk("%s %s: " fmt, lvl, pci_name(pdev), ## args) +#define dev_err(dev, fmt, args...) \ + pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args) +#define dev_info(dev, fmt, args...) \ + pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args) +#define dev_warn(dev, fmt, args...) \ + pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args) + +/* NOTE: dangerous! we ignore the 'gfp' argument */ +#define dma_alloc_coherent(dev,sz,dma,gfp) \ + pci_alloc_consistent(to_pci_dev(dev),(sz),(dma)) +#define dma_free_coherent(dev,sz,addr,dma_addr) \ + pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr)) + +#define dma_map_page(dev,a,b,c,d) \ + pci_map_page(to_pci_dev(dev),(a),(b),(c),(d)) +#define dma_unmap_page(dev,a,b,c) \ + pci_unmap_page(to_pci_dev(dev),(a),(b),(c)) + +#define dma_map_single(dev,a,b,c) \ + pci_map_single(to_pci_dev(dev),(a),(b),(c)) +#define dma_unmap_single(dev,a,b,c) \ + pci_unmap_single(to_pci_dev(dev),(a),(b),(c)) + +#define dma_sync_single(dev,a,b,c) \ + pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c)) + +/* for range just sync everything, that's all the pci API can do */ +#define dma_sync_single_range(dev,addr,off,sz,dir) \ + pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir)) + +#define dma_set_mask(dev,mask) \ + pci_set_dma_mask(to_pci_dev(dev),(mask)) + +/* hlist_* code - double linked lists */ +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + +static inline void __hlist_del(struct hlist_node *n) +{ + struct hlist_node *next = n->next; + struct hlist_node **pprev = n->pprev; + *pprev = next; + if (next) + next->pprev = pprev; +} + +static inline void hlist_del(struct hlist_node *n) +{ + __hlist_del(n); + n->next = NULL; + n->pprev = NULL; +} + +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) +{ + struct hlist_node *first = h->first; + n->next = first; + if (first) + first->pprev = &n->next; + h->first = n; + n->pprev = &h->first; +} + +static inline int hlist_empty(const struct hlist_head *h) +{ + return !h->first; +} +#define HLIST_HEAD_INIT { .first = NULL } +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) +static inline void INIT_HLIST_NODE(struct hlist_node *h) +{ + h->next = NULL; + h->pprev = NULL; +} +#define hlist_entry(ptr, type, member) container_of(ptr,type,member) + +#define hlist_for_each_entry(tpos, pos, head, member) \ + for (pos = (head)->first; \ + pos && ({ prefetch(pos->next); 1;}) && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ + for (pos = (head)->first; \ + pos && ({ n = pos->next; 1; }) && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = n) + +#ifndef might_sleep +#define might_sleep() +#endif +#else +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return &pdev->dev; +} +#endif /* <= 2.5.0 */ + +/*****************************************************************************/ +/* 2.5.28 => 2.4.23 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + +static inline void _kc_synchronize_irq(void) +{ + synchronize_irq(); +} +#undef synchronize_irq +#define synchronize_irq(X) _kc_synchronize_irq() + +#include +#define work_struct tq_struct +#undef INIT_WORK +#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a) +#undef container_of +#define container_of list_entry +#define schedule_work schedule_task +#define flush_scheduled_work flush_scheduled_tasks +#define cancel_work_sync(x) flush_scheduled_work() + +#endif /* 2.5.28 => 2.4.17 */ + +/*****************************************************************************/ +/* 2.6.0 => 2.5.28 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#undef get_cpu +#define get_cpu() smp_processor_id() +#undef put_cpu +#define put_cpu() do { } while(0) +#define MODULE_INFO(version, _version) +#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT +#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1 +#endif +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT +#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1 +#endif + +#define dma_set_coherent_mask(dev,mask) 1 + +#undef dev_put +#define dev_put(dev) __dev_put(dev) + +#ifndef skb_fill_page_desc +#define skb_fill_page_desc _kc_skb_fill_page_desc +extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size); +#endif + +#undef ALIGN +#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) + +#ifndef page_count +#define page_count(p) atomic_read(&(p)->count) +#endif + +#ifdef MAX_NUMNODES +#undef MAX_NUMNODES +#endif +#define MAX_NUMNODES 1 + +/* find_first_bit and find_next bit are not defined for most + * 2.4 kernels (except for the redhat 2.4.21 kernels + */ +#include +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) +#undef find_next_bit +#define find_next_bit _kc_find_next_bit +extern unsigned long _kc_find_next_bit(const unsigned long *addr, + unsigned long size, + unsigned long offset); +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) + + +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (strchr(dev->name, '%')) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#ifndef strlcpy +#define strlcpy _kc_strlcpy +extern size_t _kc_strlcpy(char *dest, const char *src, size_t size); +#endif /* strlcpy */ + +#endif /* 2.6.0 => 2.5.28 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +#define MODULE_VERSION(_version) MODULE_INFO(version, _version) +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +/* 2.6.5 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) +#define dma_sync_single_for_cpu dma_sync_single +#define dma_sync_single_for_device dma_sync_single +#define dma_sync_single_range_for_cpu dma_sync_single_range +#define dma_sync_single_range_for_device dma_sync_single_range +#ifndef pci_dma_mapping_error +#define pci_dma_mapping_error _kc_pci_dma_mapping_error +static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr) +{ + return dma_addr == 0; +} +#endif +#endif /* 2.6.5 => 2.6.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +extern int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...); +#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args) +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) ) +/* taken from 2.6 include/linux/bitmap.h */ +#undef bitmap_zero +#define bitmap_zero _kc_bitmap_zero +static inline void _kc_bitmap_zero(unsigned long *dst, int nbits) +{ + if (nbits <= BITS_PER_LONG) + *dst = 0UL; + else { + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0, len); + } +} +#define random_ether_addr _kc_random_ether_addr +static inline void _kc_random_ether_addr(u8 *addr) +{ + get_random_bytes(addr, ETH_ALEN); + addr[0] &= 0xfe; /* clear multicast */ + addr[0] |= 0x02; /* set local assignment */ +} +#define page_to_nid(x) 0 + +#endif /* < 2.6.6 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) ) +#undef if_mii +#define if_mii _kc_if_mii +static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq) +{ + return (struct mii_ioctl_data *) &rq->ifr_ifru; +} + +#ifndef __force +#define __force +#endif +#endif /* < 2.6.7 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) +#ifndef PCI_EXP_DEVCTL +#define PCI_EXP_DEVCTL 8 +#endif +#ifndef PCI_EXP_DEVCTL_CERE +#define PCI_EXP_DEVCTL_CERE 0x0001 +#endif +#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \ + schedule_timeout((x * HZ)/1000 + 2); \ + } while (0) + +#endif /* < 2.6.8 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)) +#include +#define __iomem + +#ifndef kcalloc +#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags) +extern void *_kc_kzalloc(size_t size, int flags); +#endif +#define MSEC_PER_SEC 1000L +static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j) +{ +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (MSEC_PER_SEC / HZ) * j; +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); +#else + return (j * MSEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return m * (HZ / MSEC_PER_SEC); +#else + return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; +#endif +} + +#define msleep_interruptible _kc_msleep_interruptible +static inline unsigned long _kc_msleep_interruptible(unsigned int msecs) +{ + unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1; + + while (timeout && !signal_pending(current)) { + __set_current_state(TASK_INTERRUPTIBLE); + timeout = schedule_timeout(timeout); + } + return _kc_jiffies_to_msecs(timeout); +} + +/* Basic mode control register. */ +#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ + +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 +#endif +#ifndef __be16 +#define __be16 u16 +#endif +#ifndef __be32 +#define __be32 u32 +#endif +#ifndef __be64 +#define __be64 u64 +#endif + +static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) +{ + return (struct vlan_ethhdr *)skb->mac.raw; +} + +/* Wake-On-Lan options. */ +#define WAKE_PHY (1 << 0) +#define WAKE_UCAST (1 << 1) +#define WAKE_MCAST (1 << 2) +#define WAKE_BCAST (1 << 3) +#define WAKE_ARP (1 << 4) +#define WAKE_MAGIC (1 << 5) +#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */ + +#define skb_header_pointer _kc_skb_header_pointer +static inline void *_kc_skb_header_pointer(const struct sk_buff *skb, + int offset, int len, void *buffer) +{ + int hlen = skb_headlen(skb); + + if (hlen - offset >= len) + return skb->data + offset; + +#ifdef MAX_SKB_FRAGS + if (skb_copy_bits(skb, offset, buffer, len) < 0) + return NULL; + + return buffer; +#else + return NULL; +#endif + +#ifndef NETDEV_TX_OK +#define NETDEV_TX_OK 0 +#endif +#ifndef NETDEV_TX_BUSY +#define NETDEV_TX_BUSY 1 +#endif +#ifndef NETDEV_TX_LOCKED +#define NETDEV_TX_LOCKED -1 +#endif +} + +#ifndef __bitwise +#define __bitwise +#endif +#endif /* < 2.6.9 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) +#ifdef module_param_array_named +#undef module_param_array_named +#define module_param_array_named(name, array, type, nump, perm) \ + static struct kparam_array __param_arr_##name \ + = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \ + sizeof(array[0]), array }; \ + module_param_call(name, param_array_set, param_array_get, \ + &__param_arr_##name, perm) +#endif /* module_param_array_named */ +/* + * num_online is broken for all < 2.6.10 kernels. This is needed to support + * Node module parameter of ixgbe. + */ +#undef num_online_nodes +#define num_online_nodes(n) 1 +extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES); +#undef node_online_map +#define node_online_map _kcompat_node_online_map +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) ) +#define PCI_D0 0 +#define PCI_D1 1 +#define PCI_D2 2 +#define PCI_D3hot 3 +#define PCI_D3cold 4 +typedef int pci_power_t; +#define pci_choose_state(pdev,state) state +#define PMSG_SUSPEND 3 +#define PCI_EXP_LNKCTL 16 + +#undef NETIF_F_LLTX + +#ifndef ARCH_HAS_PREFETCH +#define prefetch(X) +#endif + +#ifndef NET_IP_ALIGN +#define NET_IP_ALIGN 2 +#endif + +#define KC_USEC_PER_SEC 1000000L +#define usecs_to_jiffies _kc_usecs_to_jiffies +static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j) +{ +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (KC_USEC_PER_SEC / HZ) * j; +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC); +#else + return (j * KC_USEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ); +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return m * (HZ / KC_USEC_PER_SEC); +#else + return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC; +#endif +} +#endif /* < 2.6.11 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) ) +#include +#define USE_REBOOT_NOTIFIER + +/* Generic MII registers. */ +#define MII_CTRL1000 0x09 /* 1000BASE-T control */ +#define MII_STAT1000 0x0a /* 1000BASE-T status */ +/* Advertisement control register. */ +#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ +#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */ +/* 1000BASE-T Control register */ +#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ +#ifndef is_zero_ether_addr +#define is_zero_ether_addr _kc_is_zero_ether_addr +static inline int _kc_is_zero_ether_addr(const u8 *addr) +{ + return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); +} +#endif /* is_zero_ether_addr */ +#ifndef is_multicast_ether_addr +#define is_multicast_ether_addr _kc_is_multicast_ether_addr +static inline int _kc_is_multicast_ether_addr(const u8 *addr) +{ + return addr[0] & 0x01; +} +#endif /* is_multicast_ether_addr */ +#endif /* < 2.6.12 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) +#ifndef kstrdup +#define kstrdup _kc_kstrdup +extern char *_kc_kstrdup(const char *s, unsigned int gfp); +#endif +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) +#define pm_message_t u32 +#ifndef kzalloc +#define kzalloc _kc_kzalloc +extern void *_kc_kzalloc(size_t size, int flags); +#endif + +/* Generic MII registers. */ +#define MII_ESTATUS 0x0f /* Extended Status */ +/* Basic mode status register. */ +#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */ +/* Extended status register. */ +#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */ +#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */ + +#define ADVERTISED_Pause (1 << 13) +#define ADVERTISED_Asym_Pause (1 << 14) + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)))) +#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t)) +#define gfp_t unsigned +#else +typedef unsigned gfp_t; +#endif +#endif /* !RHEL4.3->RHEL5.0 */ + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) ) +#ifdef CONFIG_X86_64 +#define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \ + dma_sync_single_for_cpu(dev, dma_handle, size, dir) +#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ + dma_sync_single_for_device(dev, dma_handle, size, dir) +#endif +#endif +#endif /* < 2.6.14 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) ) +#ifndef vmalloc_node +#define vmalloc_node(a,b) vmalloc(a) +#endif /* vmalloc_node*/ + +#define setup_timer(_timer, _function, _data) \ +do { \ + (_timer)->function = _function; \ + (_timer)->data = _data; \ + init_timer(_timer); \ +} while (0) +#ifndef device_can_wakeup +#define device_can_wakeup(dev) (1) +#endif +#ifndef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) do{}while(0) +#endif +#ifndef device_init_wakeup +#define device_init_wakeup(dev,val) do {} while (0) +#endif +static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2) +{ + const u16 *a = (const u16 *) addr1; + const u16 *b = (const u16 *) addr2; + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; +} +#undef compare_ether_addr +#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2) +#endif /* < 2.6.15 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) ) +#undef DEFINE_MUTEX +#define DEFINE_MUTEX(x) DECLARE_MUTEX(x) +#define mutex_lock(x) down_interruptible(x) +#define mutex_unlock(x) up(x) + +#ifndef ____cacheline_internodealigned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp +#else +#define ____cacheline_internodealigned_in_smp +#endif /* CONFIG_SMP */ +#endif /* ____cacheline_internodealigned_in_smp */ +#undef HAVE_PCI_ERS +#else /* 2.6.16 and above */ +#undef HAVE_PCI_ERS +#define HAVE_PCI_ERS +#endif /* < 2.6.16 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) ) +#ifndef first_online_node +#define first_online_node 0 +#endif +#ifndef NET_SKB_PAD +#define NET_SKB_PAD 16 +#endif +#endif /* < 2.6.17 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) ) + +#ifndef IRQ_HANDLED +#define irqreturn_t void +#define IRQ_HANDLED +#define IRQ_NONE +#endif + +#ifndef IRQF_PROBE_SHARED +#ifdef SA_PROBEIRQ +#define IRQF_PROBE_SHARED SA_PROBEIRQ +#else +#define IRQF_PROBE_SHARED 0 +#endif +#endif + +#ifndef IRQF_SHARED +#define IRQF_SHARED SA_SHIRQ +#endif + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#ifndef FIELD_SIZEOF +#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) +#endif + +#ifndef skb_is_gso +#ifdef NETIF_F_TSO +#define skb_is_gso _kc_skb_is_gso +static inline int _kc_skb_is_gso(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_size; +} +#else +#define skb_is_gso(a) 0 +#endif +#endif + +#ifndef resource_size_t +#define resource_size_t unsigned long +#endif + +#ifdef skb_pad +#undef skb_pad +#endif +#define skb_pad(x,y) _kc_skb_pad(x, y) +int _kc_skb_pad(struct sk_buff *skb, int pad); +#ifdef skb_padto +#undef skb_padto +#endif +#define skb_padto(x,y) _kc_skb_padto(x, y) +static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + if(likely(size >= len)) + return 0; + return _kc_skb_pad(skb, len - size); +} + +#ifndef DECLARE_PCI_UNMAP_ADDR +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ + dma_addr_t ADDR_NAME +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ + u32 LEN_NAME +#define pci_unmap_addr(PTR, ADDR_NAME) \ + ((PTR)->ADDR_NAME) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ + (((PTR)->ADDR_NAME) = (VAL)) +#define pci_unmap_len(PTR, LEN_NAME) \ + ((PTR)->LEN_NAME) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ + (((PTR)->LEN_NAME) = (VAL)) +#endif /* DECLARE_PCI_UNMAP_ADDR */ +#endif /* < 2.6.18 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) + +#ifndef DIV_ROUND_UP +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#endif +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ) +#if (!((RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))) || \ + (AX_RELEASE_CODE && AX_RELEASE_CODE > AX_RELEASE_VERSION(3,0)))) +typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *); +#endif +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#undef CONFIG_INET_LRO +#undef CONFIG_INET_LRO_MODULE +#ifdef IXGBE_FCOE +#undef CONFIG_FCOE +#undef CONFIG_FCOE_MODULE +#endif /* IXGBE_FCOE */ +#endif +typedef irqreturn_t (*new_handler_t)(int, void*); +static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#else /* 2.4.x */ +typedef void (*irq_handler_t)(int, void*, struct pt_regs *); +typedef void (*new_handler_t)(int, void*); +static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#endif /* >= 2.5.x */ +{ + irq_handler_t new_handler = (irq_handler_t) handler; + return request_irq(irq, new_handler, flags, devname, dev_id); +} + +#undef request_irq +#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id)) + +#define irq_handler_t new_handler_t +/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) +#define PCIE_CONFIG_SPACE_LEN 256 +#define PCI_CONFIG_SPACE_LEN 64 +#define PCIE_LINK_STATUS 0x12 +#define pci_config_space_ich8lan() do {} while(0) +#undef pci_save_state +extern int _kc_pci_save_state(struct pci_dev *); +#define pci_save_state(pdev) _kc_pci_save_state(pdev) +#undef pci_restore_state +extern void _kc_pci_restore_state(struct pci_dev *); +#define pci_restore_state(pdev) _kc_pci_restore_state(pdev) +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +#undef free_netdev +extern void _kc_free_netdev(struct net_device *); +#define free_netdev(netdev) _kc_free_netdev(netdev) +#endif +static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev) +{ + return 0; +} +#define pci_disable_pcie_error_reporting(dev) do {} while (0) +#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0) + +extern void *_kc_kmemdup(const void *src, size_t len, unsigned gfp); +#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp) +#ifndef bool +#define bool _Bool +#define true 1 +#define false 0 +#endif +#else /* 2.6.19 */ +#include +#include +#endif /* < 2.6.19 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) ) +#undef INIT_WORK +#define INIT_WORK(_work, _func) \ +do { \ + INIT_LIST_HEAD(&(_work)->entry); \ + (_work)->pending = 0; \ + (_work)->func = (void (*)(void *))_func; \ + (_work)->data = _work; \ + init_timer(&(_work)->timer); \ +} while (0) +#endif + +#ifndef PCI_VDEVICE +#define PCI_VDEVICE(ven, dev) \ + PCI_VENDOR_ID_##ven, (dev), \ + PCI_ANY_ID, PCI_ANY_ID, 0, 0 +#endif + +#ifndef round_jiffies +#define round_jiffies(x) x +#endif + +#define csum_offset csum + +#define HAVE_EARLY_VMALLOC_NODE +#define dev_to_node(dev) -1 +#undef set_dev_node +/* remove compiler warning with b=b, for unused variable */ +#define set_dev_node(a, b) do { (b) = (b); } while(0) + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +typedef __u16 __bitwise __sum16; +typedef __u32 __bitwise __wsum; +#endif + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +static inline __wsum csum_unfold(__sum16 n) +{ + return (__force __wsum)n; +} +#endif + +#else /* < 2.6.20 */ +#define HAVE_DEVICE_NUMA_NODE +#endif /* < 2.6.20 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +#define to_net_dev(class) container_of(class, struct net_device, class_dev) +#define NETDEV_CLASS_DEV +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5))) +#define vlan_group_get_device(vg, id) (vg->vlan_devices[id]) +#define vlan_group_set_device(vg, id, dev) \ + do { \ + if (vg) vg->vlan_devices[id] = dev; \ + } while (0) +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */ +#define pci_channel_offline(pdev) (pdev->error_state && \ + pdev->error_state != pci_channel_io_normal) +#define pci_request_selected_regions(pdev, bars, name) \ + pci_request_regions(pdev, name) +#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev); +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +#define tcp_hdr(skb) (skb->h.th) +#define tcp_hdrlen(skb) (skb->h.th->doff << 2) +#define skb_transport_offset(skb) (skb->h.raw - skb->data) +#define skb_transport_header(skb) (skb->h.raw) +#define ipv6_hdr(skb) (skb->nh.ipv6h) +#define ip_hdr(skb) (skb->nh.iph) +#define skb_network_offset(skb) (skb->nh.raw - skb->data) +#define skb_network_header(skb) (skb->nh.raw) +#define skb_tail_pointer(skb) skb->tail +#define skb_reset_tail_pointer(skb) \ + do { \ + skb->tail = skb->data; \ + } while (0) +#define skb_copy_to_linear_data(skb, from, len) \ + memcpy(skb->data, from, len) +#define skb_copy_to_linear_data_offset(skb, offset, from, len) \ + memcpy(skb->data + offset, from, len) +#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw) +#define pci_register_driver pci_module_init +#define skb_mac_header(skb) skb->mac.raw + +#ifdef NETIF_F_MULTI_QUEUE +#ifndef alloc_etherdev_mq +#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a) +#endif +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef ETH_FCS_LEN +#define ETH_FCS_LEN 4 +#endif +#define cancel_work_sync(x) flush_scheduled_work() +#ifndef udp_hdr +#define udp_hdr _udp_hdr +static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) +{ + return (struct udphdr *)skb_transport_header(skb); +} +#endif + +#ifdef cpu_to_be16 +#undef cpu_to_be16 +#endif +#define cpu_to_be16(x) __constant_htons(x) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1))) +enum { + DUMP_PREFIX_NONE, + DUMP_PREFIX_ADDRESS, + DUMP_PREFIX_OFFSET +}; +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */ +#ifndef hex_asc +#define hex_asc(x) "0123456789abcdef"[x] +#endif +#include +extern void _kc_print_hex_dump(const char *level, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii); +#define print_hex_dump(lvl, s, t, r, g, b, l, a) \ + _kc_print_hex_dump(lvl, s, t, r, g, b, l, a) +#else /* 2.6.22 */ +#define ETH_TYPE_TRANS_SETS_DEV +#define HAVE_NETDEV_STATS_IN_NETDEV +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) ) +#undef SET_MODULE_OWNER +#define SET_MODULE_OWNER(dev) do { } while (0) +#endif /* > 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) +#define netif_subqueue_stopped(_a, _b) 0 +#ifndef PTR_ALIGN +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#endif + +#ifndef CONFIG_PM_SLEEP +#define CONFIG_PM_SLEEP CONFIG_PM +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) ) +#define HAVE_ETHTOOL_GET_PERM_ADDR +#endif /* 2.6.14 through 2.6.22 */ +#endif /* < 2.6.23 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +#ifndef ETH_FLAG_LRO +#define ETH_FLAG_LRO NETIF_F_LRO +#endif + +/* if GRO is supported then the napi struct must already exist */ +#ifndef NETIF_F_GRO +/* NAPI API changes in 2.6.24 break everything */ +struct napi_struct { + /* used to look up the real NAPI polling routine */ + int (*poll)(struct napi_struct *, int); + struct net_device *dev; + int weight; +}; +#endif + +#ifdef NAPI +extern int __kc_adapter_clean(struct net_device *, int *); +extern struct net_device *napi_to_poll_dev(struct napi_struct *napi); +#define netif_napi_add(_netdev, _napi, _poll, _weight) \ + do { \ + struct napi_struct *__napi = (_napi); \ + struct net_device *poll_dev = napi_to_poll_dev(__napi); \ + poll_dev->poll = &(__kc_adapter_clean); \ + poll_dev->priv = (_napi); \ + poll_dev->weight = (_weight); \ + set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); \ + set_bit(__LINK_STATE_START, &poll_dev->state);\ + dev_hold(poll_dev); \ + __napi->poll = &(_poll); \ + __napi->weight = (_weight); \ + __napi->dev = (_netdev); \ + } while (0) +#define netif_napi_del(_napi) \ + do { \ + struct net_device *poll_dev = napi_to_poll_dev(_napi); \ + WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); \ + dev_put(poll_dev); \ + memset(poll_dev, 0, sizeof(struct net_device));\ + } while (0) +#define napi_schedule_prep(_napi) \ + (netif_running((_napi)->dev) && netif_rx_schedule_prep(napi_to_poll_dev(_napi))) +#define napi_schedule(_napi) \ + do { \ + if (napi_schedule_prep(_napi)) \ + __netif_rx_schedule(napi_to_poll_dev(_napi)); \ + } while (0) +#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi)) +#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi)) +#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi)) +#ifndef NETIF_F_GRO +#define napi_complete(_napi) netif_rx_complete(napi_to_poll_dev(_napi)) +#else +#define napi_complete(_napi) \ + do { \ + napi_gro_flush(_napi); \ + netif_rx_complete(napi_to_poll_dev(_napi)); \ + } while (0) +#endif /* NETIF_F_GRO */ +#else /* NAPI */ +#define netif_napi_add(_netdev, _napi, _poll, _weight) \ + do { \ + struct napi_struct *__napi = _napi; \ + _netdev->poll = &(_poll); \ + _netdev->weight = (_weight); \ + __napi->poll = &(_poll); \ + __napi->weight = (_weight); \ + __napi->dev = (_netdev); \ + } while (0) +#define netif_napi_del(_a) do {} while (0) +#endif /* NAPI */ + +#undef dev_get_by_name +#define dev_get_by_name(_a, _b) dev_get_by_name(_b) +#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b) +#ifndef DMA_BIT_MASK +#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1)) +#endif + +#ifdef NETIF_F_TSO6 +#define skb_is_gso_v6 _kc_skb_is_gso_v6 +static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; +} +#endif /* NETIF_F_TSO6 */ + +#ifndef KERN_CONT +#define KERN_CONT "" +#endif +#else /* < 2.6.24 */ +#define HAVE_ETHTOOL_GET_SSET_COUNT +#define HAVE_NETDEV_NAPI_LIST +#endif /* < 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) +#include +#else /* >= 3.2.0 */ +#include +#endif /* else >= 3.2.0 */ +#endif /* > 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ) +#define PM_QOS_CPU_DMA_LATENCY 1 + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) ) +#include +#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY +#define pm_qos_add_requirement(pm_qos_class, name, value) \ + set_acceptable_latency(name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) \ + remove_acceptable_latency(name) +#define pm_qos_update_requirement(pm_qos_class, name, value) \ + modify_acceptable_latency(name, value) +#else +#define PM_QOS_DEFAULT_VALUE -1 +#define pm_qos_add_requirement(pm_qos_class, name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) +#define pm_qos_update_requirement(pm_qos_class, name, value) { \ + if (value != PM_QOS_DEFAULT_VALUE) { \ + printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \ + pci_name(adapter->pdev)); \ + } \ +} + +#endif /* > 2.6.18 */ + +#define pci_enable_device_mem(pdev) pci_enable_device(pdev) + +#ifndef DEFINE_PCI_DEVICE_TABLE +#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] +#endif /* DEFINE_PCI_DEVICE_TABLE */ + + +#ifndef IGB_PROCFS +#define IGB_PROCFS +#endif /* IGB_PROCFS */ + +#else /* < 2.6.25 */ + + +#ifndef IGB_SYSFS +#define IGB_SYSFS +#endif /* IGB_SYSFS */ + +#endif /* < 2.6.25 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) +#ifndef clamp_t +#define clamp_t(type, val, min, max) ({ \ + type __val = (val); \ + type __min = (min); \ + type __max = (max); \ + __val = __val < __min ? __min : __val; \ + __val > __max ? __max : __val; }) +#endif /* clamp_t */ +#undef kzalloc_node +#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags) + +extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state); +#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s) +#else /* < 2.6.26 */ +#include +#define HAVE_NETDEV_VLAN_FEATURES +#endif /* < 2.6.26 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)speed; + /* ep->speed_hi = (__u16)(speed >> 16); */ +} +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set + +static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep) +{ + /* no speed_hi before 2.6.27, and probably no need for it yet */ + return (__u32)ep->speed; +} +#define ethtool_cmd_speed _kc_ethtool_cmd_speed + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) ) +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM)) +#define ANCIENT_PM 1 +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \ + defined(CONFIG_PM_SLEEP)) +#define NEWER_PM 1 +#endif +#if defined(ANCIENT_PM) || defined(NEWER_PM) +#undef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) \ + do { \ + u16 pmc = 0; \ + int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \ + if (pm) { \ + pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \ + &pmc); \ + } \ + (dev)->power.can_wakeup = !!(pmc >> 11); \ + (dev)->power.should_wakeup = (val && (pmc >> 11)); \ + } while (0) +#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */ +#endif /* 2.6.15 through 2.6.27 */ +#ifndef netif_napi_del +#define netif_napi_del(_a) do {} while (0) +#ifdef NAPI +#ifdef CONFIG_NETPOLL +#undef netif_napi_del +#define netif_napi_del(_a) list_del(&(_a)->dev_list); +#endif +#endif +#endif /* netif_napi_del */ +#ifdef dma_mapping_error +#undef dma_mapping_error +#endif +#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr) + +#ifdef CONFIG_NETDEVICES_MULTIQUEUE +#define HAVE_TX_MQ +#endif + +#ifdef HAVE_TX_MQ +extern void _kc_netif_tx_stop_all_queues(struct net_device *); +extern void _kc_netif_tx_wake_all_queues(struct net_device *); +extern void _kc_netif_tx_start_all_queues(struct net_device *); +#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a) +#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a) +#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a) +#undef netif_stop_subqueue +#define netif_stop_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_stop_subqueue((_ndev), (_qi)); \ + else \ + netif_stop_queue((_ndev)); \ + } while (0) +#undef netif_start_subqueue +#define netif_start_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_start_subqueue((_ndev), (_qi)); \ + else \ + netif_start_queue((_ndev)); \ + } while (0) +#else /* HAVE_TX_MQ */ +#define netif_tx_stop_all_queues(a) netif_stop_queue(a) +#define netif_tx_wake_all_queues(a) netif_wake_queue(a) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) ) +#define netif_tx_start_all_queues(a) netif_start_queue(a) +#else +#define netif_tx_start_all_queues(a) do {} while (0) +#endif +#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev)) +#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev)) +#endif /* HAVE_TX_MQ */ +#ifndef NETIF_F_MULTI_QUEUE +#define NETIF_F_MULTI_QUEUE 0 +#define netif_is_multiqueue(a) 0 +#define netif_wake_subqueue(a, b) +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef __WARN_printf +extern void __kc_warn_slowpath(const char *file, const int line, + const char *fmt, ...) __attribute__((format(printf, 3, 4))); +#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg) +#endif /* __WARN_printf */ + +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf(format); \ + unlikely(__ret_warn_on); \ +}) +#endif /* WARN */ +#else /* < 2.6.27 */ +#define HAVE_TX_MQ +#define HAVE_NETDEV_SELECT_QUEUE +#endif /* < 2.6.27 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) +#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \ + pci_resource_len(pdev, bar)) +#define pci_wake_from_d3 _kc_pci_wake_from_d3 +#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep +extern int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable); +extern int _kc_pci_prepare_to_sleep(struct pci_dev *dev); +#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC) +#ifndef __skb_queue_head_init +static inline void __kc_skb_queue_head_init(struct sk_buff_head *list) +{ + list->prev = list->next = (struct sk_buff *)list; + list->qlen = 0; +} +#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q) +#endif +#ifndef skb_add_rx_frag +#define skb_add_rx_frag _kc_skb_add_rx_frag +extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, int, int); +#endif +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) +#ifndef swap +#define swap(a, b) \ + do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) +#endif +#define pci_request_selected_regions_exclusive(pdev, bars, name) \ + pci_request_selected_regions(pdev, bars, name) +#ifndef CONFIG_NR_CPUS +#define CONFIG_NR_CPUS 1 +#endif /* CONFIG_NR_CPUS */ +#ifndef pcie_aspm_enabled +#define pcie_aspm_enabled() (1) +#endif /* pcie_aspm_enabled */ +#else /* < 2.6.29 */ +#ifndef HAVE_NET_DEVICE_OPS +#define HAVE_NET_DEVICE_OPS +#endif +#ifdef CONFIG_DCB +#define HAVE_PFC_MODE_ENABLE +#endif /* CONFIG_DCB */ +#endif /* < 2.6.29 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) ) +#define skb_rx_queue_recorded(a) false +#define skb_get_rx_queue(a) 0 +#define skb_record_rx_queue(a, b) do {} while (0) +#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues) +#ifdef IXGBE_FCOE +#undef CONFIG_FCOE +#undef CONFIG_FCOE_MODULE +#endif /* IXGBE_FCOE */ +#ifndef CONFIG_PCI_IOV +#undef pci_enable_sriov +#define pci_enable_sriov(a, b) -ENOTSUPP +#undef pci_disable_sriov +#define pci_disable_sriov(a) do {} while (0) +#endif /* CONFIG_PCI_IOV */ +#ifndef pr_cont +#define pr_cont(fmt, ...) \ + printk(KERN_CONT fmt, ##__VA_ARGS__) +#endif /* pr_cont */ +#else +#define HAVE_ASPM_QUIRKS +#endif /* < 2.6.30 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ) +#define ETH_P_1588 0x88F7 +#define ETH_P_FIP 0x8914 +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc_count) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(uclist, dev) \ + for (uclist = dev->uc_list; uclist; uclist = uclist->next) +#endif +#else +#ifndef HAVE_NETDEV_STORAGE_ADDRESS +#define HAVE_NETDEV_STORAGE_ADDRESS +#endif +#ifndef HAVE_NETDEV_HW_ADDR +#define HAVE_NETDEV_HW_ADDR +#endif +#ifndef HAVE_TRANS_START_IN_QUEUE +#define HAVE_TRANS_START_IN_QUEUE +#endif +#endif /* < 2.6.31 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) ) +#undef netdev_tx_t +#define netdev_tx_t int +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef NETIF_F_FCOE_MTU +#define NETIF_F_FCOE_MTU (1 << 26) +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ + +#ifndef pm_runtime_get_sync +#define pm_runtime_get_sync(dev) do {} while (0) +#endif +#ifndef pm_runtime_put +#define pm_runtime_put(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_sync +#define pm_runtime_put_sync(dev) do {} while (0) +#endif +#ifndef pm_runtime_resume +#define pm_runtime_resume(dev) do {} while (0) +#endif +#ifndef pm_schedule_suspend +#define pm_schedule_suspend(dev, t) do {} while (0) +#endif +#ifndef pm_runtime_set_suspended +#define pm_runtime_set_suspended(dev) do {} while (0) +#endif +#ifndef pm_runtime_disable +#define pm_runtime_disable(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_noidle +#define pm_runtime_put_noidle(dev) do {} while (0) +#endif +#ifndef pm_runtime_set_active +#define pm_runtime_set_active(dev) do {} while (0) +#endif +#ifndef pm_runtime_enable +#define pm_runtime_enable(dev) do {} while (0) +#endif +#ifndef pm_runtime_get_noresume +#define pm_runtime_get_noresume(dev) do {} while (0) +#endif +#else /* < 2.6.32 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE +#define HAVE_NETDEV_OPS_FCOE_ENABLE +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_OPS_GETAPP +#define HAVE_DCBNL_OPS_GETAPP +#endif +#endif /* CONFIG_DCB */ +#include +/* IOV bad DMA target work arounds require at least this kernel rev support */ +#define HAVE_PCIE_TYPE +#endif /* < 2.6.32 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#ifndef pci_pcie_cap +#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP) +#endif +#ifndef IPV4_FLOW +#define IPV4_FLOW 0x10 +#endif /* IPV4_FLOW */ +#ifndef IPV6_FLOW +#define IPV6_FLOW 0x11 +#endif /* IPV6_FLOW */ +/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */ +#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) ) +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* RHEL6 or SLES11 SP1 */ +#ifndef __percpu +#define __percpu +#endif /* __percpu */ +#else /* < 2.6.33 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#define HAVE_ETHTOOL_SFP_DISPLAY_PORT +#endif /* < 2.6.33 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) +#ifndef ETH_FLAG_NTUPLE +#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE +#endif + +#ifndef netdev_mc_count +#define netdev_mc_count(dev) ((dev)->mc_count) +#endif +#ifndef netdev_mc_empty +#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) +#endif +#ifndef netdev_for_each_mc_addr +#define netdev_for_each_mc_addr(mclist, dev) \ + for (mclist = dev->mc_list; mclist; mclist = mclist->next) +#endif +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc.count) +#endif +#ifndef netdev_uc_empty +#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(ha, dev) \ + list_for_each_entry(ha, &dev->uc.list, list) +#endif +#ifndef dma_set_coherent_mask +#define dma_set_coherent_mask(dev,mask) \ + pci_set_consistent_dma_mask(to_pci_dev(dev),(mask)) +#endif +#ifndef pci_dev_run_wake +#define pci_dev_run_wake(pdev) (0) +#endif + +/* netdev logging taken from include/linux/netdevice.h */ +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (dev->reg_state != NETREG_REGISTERED) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#undef netdev_printk +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct adapter_struct *kc_adapter = netdev_priv(netdev);\ + struct pci_dev *pdev = kc_adapter->pdev; \ + printk("%s %s: " format, level, pci_name(pdev), \ + ##args); \ +} while(0) +#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct adapter_struct *kc_adapter = netdev_priv(netdev);\ + struct pci_dev *pdev = kc_adapter->pdev; \ + struct device *dev = pci_dev_to_dev(pdev); \ + dev_printk(level, dev, "%s: " format, \ + netdev_name(netdev), ##args); \ +} while(0) +#else /* 2.6.21 => 2.6.34 */ +#define netdev_printk(level, netdev, format, args...) \ + dev_printk(level, (netdev)->dev.parent, \ + "%s: " format, \ + netdev_name(netdev), ##args) +#endif /* <2.6.0 <2.6.21 <2.6.34 */ +#undef netdev_emerg +#define netdev_emerg(dev, format, args...) \ + netdev_printk(KERN_EMERG, dev, format, ##args) +#undef netdev_alert +#define netdev_alert(dev, format, args...) \ + netdev_printk(KERN_ALERT, dev, format, ##args) +#undef netdev_crit +#define netdev_crit(dev, format, args...) \ + netdev_printk(KERN_CRIT, dev, format, ##args) +#undef netdev_err +#define netdev_err(dev, format, args...) \ + netdev_printk(KERN_ERR, dev, format, ##args) +#undef netdev_warn +#define netdev_warn(dev, format, args...) \ + netdev_printk(KERN_WARNING, dev, format, ##args) +#undef netdev_notice +#define netdev_notice(dev, format, args...) \ + netdev_printk(KERN_NOTICE, dev, format, ##args) +#undef netdev_info +#define netdev_info(dev, format, args...) \ + netdev_printk(KERN_INFO, dev, format, ##args) +#undef netdev_dbg +#if defined(CONFIG_DYNAMIC_DEBUG) +#define netdev_dbg(__dev, format, args...) \ +do { \ + dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ + netdev_name(__dev), ##args); \ +} while (0) +#else /* DEBUG */ +#define netdev_dbg(__dev, format, args...) \ +({ \ + if (0) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args); \ + 0; \ +}) +#endif /* DEBUG */ + +#undef netif_printk +#define netif_printk(priv, type, level, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_printk(level, (dev), fmt, ##args); \ +} while (0) + +#undef netif_emerg +#define netif_emerg(priv, type, dev, fmt, args...) \ + netif_level(emerg, priv, type, dev, fmt, ##args) +#undef netif_alert +#define netif_alert(priv, type, dev, fmt, args...) \ + netif_level(alert, priv, type, dev, fmt, ##args) +#undef netif_crit +#define netif_crit(priv, type, dev, fmt, args...) \ + netif_level(crit, priv, type, dev, fmt, ##args) +#undef netif_err +#define netif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, fmt, ##args) +#undef netif_warn +#define netif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, fmt, ##args) +#undef netif_notice +#define netif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, fmt, ##args) +#undef netif_info +#define netif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, fmt, ##args) + +#ifdef SET_SYSTEM_SLEEP_PM_OPS +#define HAVE_SYSTEM_SLEEP_PM_OPS +#endif + +#ifndef for_each_set_bit +#define for_each_set_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit */ + +#ifndef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN +#define dma_unmap_addr pci_unmap_addr +#define dma_unmap_addr_set pci_unmap_addr_set +#define dma_unmap_len pci_unmap_len +#define dma_unmap_len_set pci_unmap_len_set +#endif /* DEFINE_DMA_UNMAP_ADDR */ +#else /* < 2.6.34 */ +#define HAVE_SYSTEM_SLEEP_PM_OPS +#ifndef HAVE_SET_RX_MODE +#define HAVE_SET_RX_MODE +#endif + +#endif /* < 2.6.34 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +#ifndef numa_node_id +#define numa_node_id() 0 +#endif +#ifdef HAVE_TX_MQ +#include +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +void _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int); +#define netif_set_real_num_tx_queues _kc_netif_set_real_num_tx_queues +#else /* CONFIG_NETDEVICES_MULTI_QUEUE */ +#define netif_set_real_num_tx_queues(_netdev, _count) \ + do { \ + (_netdev)->egress_subqueue_count = _count; \ + } while (0) +#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */ +#else +#define netif_set_real_num_tx_queues(_netdev, _count) do {} while(0) +#endif /* HAVE_TX_MQ */ +#ifndef ETH_FLAG_RXHASH +#define ETH_FLAG_RXHASH (1<<28) +#endif /* ETH_FLAG_RXHASH */ +#else /* < 2.6.35 */ +#define HAVE_PM_QOS_REQUEST_LIST +#define HAVE_IRQ_AFFINITY_HINT +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) +extern int _kc_ethtool_op_set_flags(struct net_device *, u32, u32); +#define ethtool_op_set_flags _kc_ethtool_op_set_flags +extern u32 _kc_ethtool_op_get_flags(struct net_device *); +#define ethtool_op_get_flags _kc_ethtool_op_get_flags + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#ifdef NET_IP_ALIGN +#undef NET_IP_ALIGN +#endif +#define NET_IP_ALIGN 0 +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + +#ifdef NET_SKB_PAD +#undef NET_SKB_PAD +#endif + +#if (L1_CACHE_BYTES > 32) +#define NET_SKB_PAD L1_CACHE_BYTES +#else +#define NET_SKB_PAD 32 +#endif + +static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length) +{ + struct sk_buff *skb; + + skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC); + if (skb) { +#if (NET_IP_ALIGN + NET_SKB_PAD) + skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); +#endif + skb->dev = dev; + } + return skb; +} + +#ifdef netdev_alloc_skb_ip_align +#undef netdev_alloc_skb_ip_align +#endif +#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l) + +#undef netif_level +#define netif_level(level, priv, type, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_##level(dev, fmt, ##args); \ +} while (0) + +#undef usleep_range +#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) + +#else /* < 2.6.36 */ +#define HAVE_PM_QOS_REQUEST_ACTIVE +#define HAVE_8021P_SUPPORT +#define HAVE_NDO_GET_STATS64 +#endif /* < 2.6.36 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) ) +#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR +#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) +#endif +#ifndef VLAN_N_VID +#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN +#endif /* VLAN_N_VID */ +#ifndef ETH_FLAG_TXVLAN +#define ETH_FLAG_TXVLAN (1 << 7) +#endif /* ETH_FLAG_TXVLAN */ +#ifndef ETH_FLAG_RXVLAN +#define ETH_FLAG_RXVLAN (1 << 8) +#endif /* ETH_FLAG_RXVLAN */ + +static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb) +{ + WARN_ON(skb->ip_summed != CHECKSUM_NONE); +} +#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb) + +static inline void *_kc_vzalloc_node(unsigned long size, int node) +{ + void *addr = vmalloc_node(size, node); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node) + +static inline void *_kc_vzalloc(unsigned long size) +{ + void *addr = vmalloc(size); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc(_size) _kc_vzalloc(_size) + +#ifndef vlan_get_protocol +static inline __be16 __kc_vlan_get_protocol(const struct sk_buff *skb) +{ + if (vlan_tx_tag_present(skb) || + skb->protocol != cpu_to_be16(ETH_P_8021Q)) + return skb->protocol; + + if (skb_headlen(skb) < sizeof(struct vlan_ethhdr)) + return 0; + + return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto; +} +#define vlan_get_protocol(_skb) __kc_vlan_get_protocol(_skb) +#endif +#ifdef HAVE_HW_TIME_STAMP +#define SKBTX_HW_TSTAMP (1 << 0) +#define SKBTX_IN_PROGRESS (1 << 2) +#define SKB_SHARED_TX_IS_UNION +#endif +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) ) +#ifndef HAVE_VLAN_RX_REGISTER +#define HAVE_VLAN_RX_REGISTER +#endif +#endif /* > 2.4.18 */ +#endif /* < 2.6.37 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +#define skb_checksum_start_offset(skb) skb_transport_offset(skb) +#else /* 2.6.22 -> 2.6.37 */ +static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb) +{ + return skb->csum_start - skb_headroom(skb); +} +#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb) +#endif /* 2.6.22 -> 2.6.37 */ +#ifdef CONFIG_DCB +#ifndef IEEE_8021QAZ_MAX_TCS +#define IEEE_8021QAZ_MAX_TCS 8 +#endif +#ifndef DCB_CAP_DCBX_HOST +#define DCB_CAP_DCBX_HOST 0x01 +#endif +#ifndef DCB_CAP_DCBX_LLD_MANAGED +#define DCB_CAP_DCBX_LLD_MANAGED 0x02 +#endif +#ifndef DCB_CAP_DCBX_VER_CEE +#define DCB_CAP_DCBX_VER_CEE 0x04 +#endif +#ifndef DCB_CAP_DCBX_VER_IEEE +#define DCB_CAP_DCBX_VER_IEEE 0x08 +#endif +#ifndef DCB_CAP_DCBX_STATIC +#define DCB_CAP_DCBX_STATIC 0x10 +#endif +#endif /* CONFIG_DCB */ +extern u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16); +#define __skb_tx_hash(n, s, q) ___kc_skb_tx_hash((n), (s), (q)) +#else /* < 2.6.38 */ +#endif /* < 2.6.38 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) +#ifndef skb_queue_reverse_walk_safe +#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ + for (skb = (queue)->prev, tmp = skb->prev; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->prev) +#endif +#else /* < 2.6.39 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif +#ifndef HAVE_SETUP_TC +#define HAVE_SETUP_TC +#endif +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#endif +#endif /* CONFIG_DCB */ +#ifndef HAVE_NDO_SET_FEATURES +#define HAVE_NDO_SET_FEATURES +#endif +#endif /* < 2.6.39 */ + +/*****************************************************************************/ +/* use < 2.6.40 because of a Fedora 15 kernel update where they + * updated the kernel version to 2.6.40.x and they back-ported 3.0 features + * like set_phys_id for ethtool. + */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) ) +#ifdef ETHTOOL_GRXRINGS +#ifndef FLOW_EXT +#define FLOW_EXT 0x80000000 +union _kc_ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + __u8 hdata[60]; +}; +struct _kc_ethtool_flow_ext { + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; +struct _kc_ethtool_rx_flow_spec { + __u32 flow_type; + union _kc_ethtool_flow_union h_u; + struct _kc_ethtool_flow_ext h_ext; + union _kc_ethtool_flow_union m_u; + struct _kc_ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; +}; +#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec +#endif /* FLOW_EXT */ +#endif + +#define pci_disable_link_state_locked pci_disable_link_state + +#ifndef PCI_LTR_VALUE_MASK +#define PCI_LTR_VALUE_MASK 0x000003ff +#endif +#ifndef PCI_LTR_SCALE_MASK +#define PCI_LTR_SCALE_MASK 0x00001c00 +#endif +#ifndef PCI_LTR_SCALE_SHIFT +#define PCI_LTR_SCALE_SHIFT 10 +#endif + +#else /* < 2.6.40 */ +#define HAVE_ETHTOOL_SET_PHYS_ID +#endif /* < 2.6.40 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) +#ifndef __netdev_alloc_skb_ip_align +#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l) +#endif /* __netdev_alloc_skb_ip_align */ +#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app) +#define dcb_ieee_delapp(dev, app) 0 +#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority) +#else /* < 3.1.0 */ +#ifndef HAVE_DCBNL_IEEE_DELAPP +#define HAVE_DCBNL_IEEE_DELAPP +#endif +#endif /* < 3.1.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) +#ifdef ETHTOOL_GRXRINGS +#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS +#endif /* ETHTOOL_GRXRINGS */ + +#ifndef skb_frag_size +#define skb_frag_size(frag) _kc_skb_frag_size(frag) +static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag) +{ + return frag->size; +} +#endif /* skb_frag_size */ + +#ifndef skb_frag_size_sub +#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta) +static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta) +{ + frag->size -= delta; +} +#endif /* skb_frag_size_sub */ + +#ifndef skb_frag_page +#define skb_frag_page(frag) _kc_skb_frag_page(frag) +static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag) +{ + return frag->page; +} +#endif /* skb_frag_page */ + +#ifndef skb_frag_address +#define skb_frag_address(frag) _kc_skb_frag_address(frag) +static inline void *_kc_skb_frag_address(const skb_frag_t *frag) +{ + return page_address(skb_frag_page(frag)) + frag->page_offset; +} +#endif /* skb_frag_address */ + +#ifndef skb_frag_dma_map +#define skb_frag_dma_map(dev,frag,offset,size,dir) \ + _kc_skb_frag_dma_map(dev,frag,offset,size,dir) +static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev, + const skb_frag_t *frag, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + return dma_map_page(dev, skb_frag_page(frag), + frag->page_offset + offset, size, dir); +} +#endif /* skb_frag_dma_map */ + +#ifndef __skb_frag_unref +#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag) +static inline void __kc_skb_frag_unref(skb_frag_t *frag) +{ + put_page(skb_frag_page(frag)); +} +#endif /* __skb_frag_unref */ +#else /* < 3.2.0 */ +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_VF_SPOOFCHK_CONFIGURE +#endif +#endif /* < 3.2.0 */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2)) +#undef ixgbe_get_netdev_tc_txq +#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc]) +#endif + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) ) +//typedef u32 netdev_features_t; +#else /* ! < 3.3.0 */ +#define HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef ETHTOOL_SRXNTUPLE +#undef ETHTOOL_SRXNTUPLE +#endif +#endif /* < 3.3.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) +#ifndef NETIF_F_RXFCS +#define NETIF_F_RXFCS 0 +#endif /* NETIF_F_RXFCS */ +#ifndef NETIF_F_RXALL +#define NETIF_F_RXALL 0 +#endif /* NETIF_F_RXALL */ + +#define NUMTCS_RETURNS_U8 + + +#endif /* < 3.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) ) +#else +#define HAVE_FDB_OPS +#endif /* < 3.5.0 */ +#endif /* _KCOMPAT_H_ */ diff --git a/drivers/net/igb/kcompat_ethtool.c b/drivers/net/igb/kcompat_ethtool.c new file mode 100644 index 000000000000..01c8c9c5ac06 --- /dev/null +++ b/drivers/net/igb/kcompat_ethtool.c @@ -0,0 +1,1172 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* + * net/core/ethtool.c - Ethtool ioctl handler + * Copyright (c) 2003 Matthew Wilcox + * + * This file is where we call all the ethtool_ops commands to get + * the information ethtool needs. We fall back to calling do_ioctl() + * for drivers which haven't been converted to ethtool_ops yet. + * + * It's GPL, stupid. + * + * Modification by sfeldma@pobox.com to work as backward compat + * solution for pre-ethtool_ops kernels. + * - copied struct ethtool_ops from ethtool.h + * - defined SET_ETHTOOL_OPS + * - put in some #ifndef NETIF_F_xxx wrappers + * - changes refs to dev->ethtool_ops to ethtool_ops + * - changed dev_ethtool to ethtool_ioctl + * - remove EXPORT_SYMBOL()s + * - added _kc_ prefix in built-in ethtool_op_xxx ops. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "kcompat.h" + +#undef SUPPORTED_10000baseT_Full +#define SUPPORTED_10000baseT_Full (1 << 12) +#undef ADVERTISED_10000baseT_Full +#define ADVERTISED_10000baseT_Full (1 << 12) +#undef SPEED_10000 +#define SPEED_10000 10000 + +#undef ethtool_ops +#define ethtool_ops _kc_ethtool_ops + +struct _kc_ethtool_ops { + int (*get_settings)(struct net_device *, struct ethtool_cmd *); + int (*set_settings)(struct net_device *, struct ethtool_cmd *); + void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); + int (*get_regs_len)(struct net_device *); + void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); + void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); + int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); + u32 (*get_msglevel)(struct net_device *); + void (*set_msglevel)(struct net_device *, u32); + int (*nway_reset)(struct net_device *); + u32 (*get_link)(struct net_device *); + int (*get_eeprom_len)(struct net_device *); + int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); + int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); + void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); + int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); + void (*get_pauseparam)(struct net_device *, + struct ethtool_pauseparam*); + int (*set_pauseparam)(struct net_device *, + struct ethtool_pauseparam*); + u32 (*get_rx_csum)(struct net_device *); + int (*set_rx_csum)(struct net_device *, u32); + u32 (*get_tx_csum)(struct net_device *); + int (*set_tx_csum)(struct net_device *, u32); + u32 (*get_sg)(struct net_device *); + int (*set_sg)(struct net_device *, u32); + u32 (*get_tso)(struct net_device *); + int (*set_tso)(struct net_device *, u32); + int (*self_test_count)(struct net_device *); + void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); + void (*get_strings)(struct net_device *, u32 stringset, u8 *); + int (*phys_id)(struct net_device *, u32); + int (*get_stats_count)(struct net_device *); + void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, + u64 *); +} *ethtool_ops = NULL; + +#undef SET_ETHTOOL_OPS +#define SET_ETHTOOL_OPS(netdev, ops) (ethtool_ops = (ops)) + +/* + * Some useful ethtool_ops methods that are device independent. If we find that + * all drivers want to do the same thing here, we can turn these into dev_() + * function calls. + */ + +#undef ethtool_op_get_link +#define ethtool_op_get_link _kc_ethtool_op_get_link +u32 _kc_ethtool_op_get_link(struct net_device *dev) +{ + return netif_carrier_ok(dev) ? 1 : 0; +} + +#undef ethtool_op_get_tx_csum +#define ethtool_op_get_tx_csum _kc_ethtool_op_get_tx_csum +u32 _kc_ethtool_op_get_tx_csum(struct net_device *dev) +{ +#ifdef NETIF_F_IP_CSUM + return (dev->features & NETIF_F_IP_CSUM) != 0; +#else + return 0; +#endif +} + +#undef ethtool_op_set_tx_csum +#define ethtool_op_set_tx_csum _kc_ethtool_op_set_tx_csum +int _kc_ethtool_op_set_tx_csum(struct net_device *dev, u32 data) +{ +#ifdef NETIF_F_IP_CSUM + if (data) +#ifdef NETIF_F_IPV6_CSUM + dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + else + dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); +#else + dev->features |= NETIF_F_IP_CSUM; + else + dev->features &= ~NETIF_F_IP_CSUM; +#endif +#endif + + return 0; +} + +#undef ethtool_op_get_sg +#define ethtool_op_get_sg _kc_ethtool_op_get_sg +u32 _kc_ethtool_op_get_sg(struct net_device *dev) +{ +#ifdef NETIF_F_SG + return (dev->features & NETIF_F_SG) != 0; +#else + return 0; +#endif +} + +#undef ethtool_op_set_sg +#define ethtool_op_set_sg _kc_ethtool_op_set_sg +int _kc_ethtool_op_set_sg(struct net_device *dev, u32 data) +{ +#ifdef NETIF_F_SG + if (data) + dev->features |= NETIF_F_SG; + else + dev->features &= ~NETIF_F_SG; +#endif + + return 0; +} + +#undef ethtool_op_get_tso +#define ethtool_op_get_tso _kc_ethtool_op_get_tso +u32 _kc_ethtool_op_get_tso(struct net_device *dev) +{ +#ifdef NETIF_F_TSO + return (dev->features & NETIF_F_TSO) != 0; +#else + return 0; +#endif +} + +#undef ethtool_op_set_tso +#define ethtool_op_set_tso _kc_ethtool_op_set_tso +int _kc_ethtool_op_set_tso(struct net_device *dev, u32 data) +{ +#ifdef NETIF_F_TSO + if (data) + dev->features |= NETIF_F_TSO; + else + dev->features &= ~NETIF_F_TSO; +#endif + + return 0; +} + +/* Handlers for each ethtool command */ + +static int ethtool_get_settings(struct net_device *dev, void *useraddr) +{ + struct ethtool_cmd cmd = { ETHTOOL_GSET }; + int err; + + if (!ethtool_ops->get_settings) + return -EOPNOTSUPP; + + err = ethtool_ops->get_settings(dev, &cmd); + if (err < 0) + return err; + + if (copy_to_user(useraddr, &cmd, sizeof(cmd))) + return -EFAULT; + return 0; +} + +static int ethtool_set_settings(struct net_device *dev, void *useraddr) +{ + struct ethtool_cmd cmd; + + if (!ethtool_ops->set_settings) + return -EOPNOTSUPP; + + if (copy_from_user(&cmd, useraddr, sizeof(cmd))) + return -EFAULT; + + return ethtool_ops->set_settings(dev, &cmd); +} + +static int ethtool_get_drvinfo(struct net_device *dev, void *useraddr) +{ + struct ethtool_drvinfo info; + struct ethtool_ops *ops = ethtool_ops; + + if (!ops->get_drvinfo) + return -EOPNOTSUPP; + + memset(&info, 0, sizeof(info)); + info.cmd = ETHTOOL_GDRVINFO; + ops->get_drvinfo(dev, &info); + + if (ops->self_test_count) + info.testinfo_len = ops->self_test_count(dev); + if (ops->get_stats_count) + info.n_stats = ops->get_stats_count(dev); + if (ops->get_regs_len) + info.regdump_len = ops->get_regs_len(dev); + if (ops->get_eeprom_len) + info.eedump_len = ops->get_eeprom_len(dev); + + if (copy_to_user(useraddr, &info, sizeof(info))) + return -EFAULT; + return 0; +} + +static int ethtool_get_regs(struct net_device *dev, char *useraddr) +{ + struct ethtool_regs regs; + struct ethtool_ops *ops = ethtool_ops; + void *regbuf; + int reglen, ret; + + if (!ops->get_regs || !ops->get_regs_len) + return -EOPNOTSUPP; + + if (copy_from_user(®s, useraddr, sizeof(regs))) + return -EFAULT; + + reglen = ops->get_regs_len(dev); + if (regs.len > reglen) + regs.len = reglen; + + regbuf = kmalloc(reglen, GFP_USER); + if (!regbuf) + return -ENOMEM; + + ops->get_regs(dev, ®s, regbuf); + + ret = -EFAULT; + if (copy_to_user(useraddr, ®s, sizeof(regs))) + goto out; + useraddr += offsetof(struct ethtool_regs, data); + if (copy_to_user(useraddr, regbuf, reglen)) + goto out; + ret = 0; + +out: + kfree(regbuf); + return ret; +} + +static int ethtool_get_wol(struct net_device *dev, char *useraddr) +{ + struct ethtool_wolinfo wol = { ETHTOOL_GWOL }; + + if (!ethtool_ops->get_wol) + return -EOPNOTSUPP; + + ethtool_ops->get_wol(dev, &wol); + + if (copy_to_user(useraddr, &wol, sizeof(wol))) + return -EFAULT; + return 0; +} + +static int ethtool_set_wol(struct net_device *dev, char *useraddr) +{ + struct ethtool_wolinfo wol; + + if (!ethtool_ops->set_wol) + return -EOPNOTSUPP; + + if (copy_from_user(&wol, useraddr, sizeof(wol))) + return -EFAULT; + + return ethtool_ops->set_wol(dev, &wol); +} + +static int ethtool_get_msglevel(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GMSGLVL }; + + if (!ethtool_ops->get_msglevel) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_msglevel(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_msglevel(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_msglevel) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + ethtool_ops->set_msglevel(dev, edata.data); + return 0; +} + +static int ethtool_nway_reset(struct net_device *dev) +{ + if (!ethtool_ops->nway_reset) + return -EOPNOTSUPP; + + return ethtool_ops->nway_reset(dev); +} + +static int ethtool_get_link(struct net_device *dev, void *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GLINK }; + + if (!ethtool_ops->get_link) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_link(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_get_eeprom(struct net_device *dev, void *useraddr) +{ + struct ethtool_eeprom eeprom; + struct ethtool_ops *ops = ethtool_ops; + u8 *data; + int ret; + + if (!ops->get_eeprom || !ops->get_eeprom_len) + return -EOPNOTSUPP; + + if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) + return -EFAULT; + + /* Check for wrap and zero */ + if (eeprom.offset + eeprom.len <= eeprom.offset) + return -EINVAL; + + /* Check for exceeding total eeprom len */ + if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) + return -EINVAL; + + data = kmalloc(eeprom.len, GFP_USER); + if (!data) + return -ENOMEM; + + ret = -EFAULT; + if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) + goto out; + + ret = ops->get_eeprom(dev, &eeprom, data); + if (ret) + goto out; + + ret = -EFAULT; + if (copy_to_user(useraddr, &eeprom, sizeof(eeprom))) + goto out; + if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +static int ethtool_set_eeprom(struct net_device *dev, void *useraddr) +{ + struct ethtool_eeprom eeprom; + struct ethtool_ops *ops = ethtool_ops; + u8 *data; + int ret; + + if (!ops->set_eeprom || !ops->get_eeprom_len) + return -EOPNOTSUPP; + + if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) + return -EFAULT; + + /* Check for wrap and zero */ + if (eeprom.offset + eeprom.len <= eeprom.offset) + return -EINVAL; + + /* Check for exceeding total eeprom len */ + if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) + return -EINVAL; + + data = kmalloc(eeprom.len, GFP_USER); + if (!data) + return -ENOMEM; + + ret = -EFAULT; + if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) + goto out; + + ret = ops->set_eeprom(dev, &eeprom, data); + if (ret) + goto out; + + if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) + ret = -EFAULT; + +out: + kfree(data); + return ret; +} + +static int ethtool_get_coalesce(struct net_device *dev, void *useraddr) +{ + struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE }; + + if (!ethtool_ops->get_coalesce) + return -EOPNOTSUPP; + + ethtool_ops->get_coalesce(dev, &coalesce); + + if (copy_to_user(useraddr, &coalesce, sizeof(coalesce))) + return -EFAULT; + return 0; +} + +static int ethtool_set_coalesce(struct net_device *dev, void *useraddr) +{ + struct ethtool_coalesce coalesce; + + if (!ethtool_ops->get_coalesce) + return -EOPNOTSUPP; + + if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) + return -EFAULT; + + return ethtool_ops->set_coalesce(dev, &coalesce); +} + +static int ethtool_get_ringparam(struct net_device *dev, void *useraddr) +{ + struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM }; + + if (!ethtool_ops->get_ringparam) + return -EOPNOTSUPP; + + ethtool_ops->get_ringparam(dev, &ringparam); + + if (copy_to_user(useraddr, &ringparam, sizeof(ringparam))) + return -EFAULT; + return 0; +} + +static int ethtool_set_ringparam(struct net_device *dev, void *useraddr) +{ + struct ethtool_ringparam ringparam; + + if (!ethtool_ops->get_ringparam) + return -EOPNOTSUPP; + + if (copy_from_user(&ringparam, useraddr, sizeof(ringparam))) + return -EFAULT; + + return ethtool_ops->set_ringparam(dev, &ringparam); +} + +static int ethtool_get_pauseparam(struct net_device *dev, void *useraddr) +{ + struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM }; + + if (!ethtool_ops->get_pauseparam) + return -EOPNOTSUPP; + + ethtool_ops->get_pauseparam(dev, &pauseparam); + + if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam))) + return -EFAULT; + return 0; +} + +static int ethtool_set_pauseparam(struct net_device *dev, void *useraddr) +{ + struct ethtool_pauseparam pauseparam; + + if (!ethtool_ops->get_pauseparam) + return -EOPNOTSUPP; + + if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam))) + return -EFAULT; + + return ethtool_ops->set_pauseparam(dev, &pauseparam); +} + +static int ethtool_get_rx_csum(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GRXCSUM }; + + if (!ethtool_ops->get_rx_csum) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_rx_csum(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_rx_csum(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_rx_csum) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + ethtool_ops->set_rx_csum(dev, edata.data); + return 0; +} + +static int ethtool_get_tx_csum(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GTXCSUM }; + + if (!ethtool_ops->get_tx_csum) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_tx_csum(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_tx_csum(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_tx_csum) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + return ethtool_ops->set_tx_csum(dev, edata.data); +} + +static int ethtool_get_sg(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GSG }; + + if (!ethtool_ops->get_sg) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_sg(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_sg(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_sg) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + return ethtool_ops->set_sg(dev, edata.data); +} + +static int ethtool_get_tso(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GTSO }; + + if (!ethtool_ops->get_tso) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_tso(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_tso(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_tso) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + return ethtool_ops->set_tso(dev, edata.data); +} + +static int ethtool_self_test(struct net_device *dev, char *useraddr) +{ + struct ethtool_test test; + struct ethtool_ops *ops = ethtool_ops; + u64 *data; + int ret; + + if (!ops->self_test || !ops->self_test_count) + return -EOPNOTSUPP; + + if (copy_from_user(&test, useraddr, sizeof(test))) + return -EFAULT; + + test.len = ops->self_test_count(dev); + data = kmalloc(test.len * sizeof(u64), GFP_USER); + if (!data) + return -ENOMEM; + + ops->self_test(dev, &test, data); + + ret = -EFAULT; + if (copy_to_user(useraddr, &test, sizeof(test))) + goto out; + useraddr += sizeof(test); + if (copy_to_user(useraddr, data, test.len * sizeof(u64))) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +static int ethtool_get_strings(struct net_device *dev, void *useraddr) +{ + struct ethtool_gstrings gstrings; + struct ethtool_ops *ops = ethtool_ops; + u8 *data; + int ret; + + if (!ops->get_strings) + return -EOPNOTSUPP; + + if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) + return -EFAULT; + + switch (gstrings.string_set) { + case ETH_SS_TEST: + if (!ops->self_test_count) + return -EOPNOTSUPP; + gstrings.len = ops->self_test_count(dev); + break; + case ETH_SS_STATS: + if (!ops->get_stats_count) + return -EOPNOTSUPP; + gstrings.len = ops->get_stats_count(dev); + break; + default: + return -EINVAL; + } + + data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); + if (!data) + return -ENOMEM; + + ops->get_strings(dev, gstrings.string_set, data); + + ret = -EFAULT; + if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) + goto out; + useraddr += sizeof(gstrings); + if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN)) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +static int ethtool_phys_id(struct net_device *dev, void *useraddr) +{ + struct ethtool_value id; + + if (!ethtool_ops->phys_id) + return -EOPNOTSUPP; + + if (copy_from_user(&id, useraddr, sizeof(id))) + return -EFAULT; + + return ethtool_ops->phys_id(dev, id.data); +} + +static int ethtool_get_stats(struct net_device *dev, void *useraddr) +{ + struct ethtool_stats stats; + struct ethtool_ops *ops = ethtool_ops; + u64 *data; + int ret; + + if (!ops->get_ethtool_stats || !ops->get_stats_count) + return -EOPNOTSUPP; + + if (copy_from_user(&stats, useraddr, sizeof(stats))) + return -EFAULT; + + stats.n_stats = ops->get_stats_count(dev); + data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER); + if (!data) + return -ENOMEM; + + ops->get_ethtool_stats(dev, &stats, data); + + ret = -EFAULT; + if (copy_to_user(useraddr, &stats, sizeof(stats))) + goto out; + useraddr += sizeof(stats); + if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64))) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +/* The main entry point in this file. Called from net/core/dev.c */ + +#define ETHTOOL_OPS_COMPAT +int ethtool_ioctl(struct ifreq *ifr) +{ + struct net_device *dev = __dev_get_by_name(ifr->ifr_name); + void *useraddr = (void *) ifr->ifr_data; + u32 ethcmd; + + /* + * XXX: This can be pushed down into the ethtool_* handlers that + * need it. Keep existing behavior for the moment. + */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (!dev || !netif_device_present(dev)) + return -ENODEV; + + if (copy_from_user(ðcmd, useraddr, sizeof (ethcmd))) + return -EFAULT; + + switch (ethcmd) { + case ETHTOOL_GSET: + return ethtool_get_settings(dev, useraddr); + case ETHTOOL_SSET: + return ethtool_set_settings(dev, useraddr); + case ETHTOOL_GDRVINFO: + return ethtool_get_drvinfo(dev, useraddr); + case ETHTOOL_GREGS: + return ethtool_get_regs(dev, useraddr); + case ETHTOOL_GWOL: + return ethtool_get_wol(dev, useraddr); + case ETHTOOL_SWOL: + return ethtool_set_wol(dev, useraddr); + case ETHTOOL_GMSGLVL: + return ethtool_get_msglevel(dev, useraddr); + case ETHTOOL_SMSGLVL: + return ethtool_set_msglevel(dev, useraddr); + case ETHTOOL_NWAY_RST: + return ethtool_nway_reset(dev); + case ETHTOOL_GLINK: + return ethtool_get_link(dev, useraddr); + case ETHTOOL_GEEPROM: + return ethtool_get_eeprom(dev, useraddr); + case ETHTOOL_SEEPROM: + return ethtool_set_eeprom(dev, useraddr); + case ETHTOOL_GCOALESCE: + return ethtool_get_coalesce(dev, useraddr); + case ETHTOOL_SCOALESCE: + return ethtool_set_coalesce(dev, useraddr); + case ETHTOOL_GRINGPARAM: + return ethtool_get_ringparam(dev, useraddr); + case ETHTOOL_SRINGPARAM: + return ethtool_set_ringparam(dev, useraddr); + case ETHTOOL_GPAUSEPARAM: + return ethtool_get_pauseparam(dev, useraddr); + case ETHTOOL_SPAUSEPARAM: + return ethtool_set_pauseparam(dev, useraddr); + case ETHTOOL_GRXCSUM: + return ethtool_get_rx_csum(dev, useraddr); + case ETHTOOL_SRXCSUM: + return ethtool_set_rx_csum(dev, useraddr); + case ETHTOOL_GTXCSUM: + return ethtool_get_tx_csum(dev, useraddr); + case ETHTOOL_STXCSUM: + return ethtool_set_tx_csum(dev, useraddr); + case ETHTOOL_GSG: + return ethtool_get_sg(dev, useraddr); + case ETHTOOL_SSG: + return ethtool_set_sg(dev, useraddr); + case ETHTOOL_GTSO: + return ethtool_get_tso(dev, useraddr); + case ETHTOOL_STSO: + return ethtool_set_tso(dev, useraddr); + case ETHTOOL_TEST: + return ethtool_self_test(dev, useraddr); + case ETHTOOL_GSTRINGS: + return ethtool_get_strings(dev, useraddr); + case ETHTOOL_PHYS_ID: + return ethtool_phys_id(dev, useraddr); + case ETHTOOL_GSTATS: + return ethtool_get_stats(dev, useraddr); + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +#define mii_if_info _kc_mii_if_info +struct _kc_mii_if_info { + int phy_id; + int advertising; + int phy_id_mask; + int reg_num_mask; + + unsigned int full_duplex : 1; /* is full duplex? */ + unsigned int force_media : 1; /* is autoneg. disabled? */ + + struct net_device *dev; + int (*mdio_read) (struct net_device *dev, int phy_id, int location); + void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val); +}; + +struct ethtool_cmd; +struct mii_ioctl_data; + +#undef mii_link_ok +#define mii_link_ok _kc_mii_link_ok +#undef mii_nway_restart +#define mii_nway_restart _kc_mii_nway_restart +#undef mii_ethtool_gset +#define mii_ethtool_gset _kc_mii_ethtool_gset +#undef mii_ethtool_sset +#define mii_ethtool_sset _kc_mii_ethtool_sset +#undef mii_check_link +#define mii_check_link _kc_mii_check_link +extern int _kc_mii_link_ok (struct mii_if_info *mii); +extern int _kc_mii_nway_restart (struct mii_if_info *mii); +extern int _kc_mii_ethtool_gset(struct mii_if_info *mii, + struct ethtool_cmd *ecmd); +extern int _kc_mii_ethtool_sset(struct mii_if_info *mii, + struct ethtool_cmd *ecmd); +extern void _kc_mii_check_link (struct mii_if_info *mii); +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) ) +#undef generic_mii_ioctl +#define generic_mii_ioctl _kc_generic_mii_ioctl +extern int _kc_generic_mii_ioctl(struct mii_if_info *mii_if, + struct mii_ioctl_data *mii_data, int cmd, + unsigned int *duplex_changed); +#endif /* > 2.4.6 */ + + +struct _kc_pci_dev_ext { + struct pci_dev *dev; + void *pci_drvdata; + struct pci_driver *driver; +}; + +struct _kc_net_dev_ext { + struct net_device *dev; + unsigned int carrier; +}; + + +/**************************************/ +/* mii support */ + +int _kc_mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) +{ + struct net_device *dev = mii->dev; + u32 advert, bmcr, lpa, nego; + + ecmd->supported = + (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); + + /* only supports twisted-pair */ + ecmd->port = PORT_MII; + + /* only supports internal transceiver */ + ecmd->transceiver = XCVR_INTERNAL; + + /* this isn't fully supported at higher layers */ + ecmd->phy_address = mii->phy_id; + + ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII; + advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); + if (advert & ADVERTISE_10HALF) + ecmd->advertising |= ADVERTISED_10baseT_Half; + if (advert & ADVERTISE_10FULL) + ecmd->advertising |= ADVERTISED_10baseT_Full; + if (advert & ADVERTISE_100HALF) + ecmd->advertising |= ADVERTISED_100baseT_Half; + if (advert & ADVERTISE_100FULL) + ecmd->advertising |= ADVERTISED_100baseT_Full; + + bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); + lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA); + if (bmcr & BMCR_ANENABLE) { + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->autoneg = AUTONEG_ENABLE; + + nego = mii_nway_result(advert & lpa); + if (nego == LPA_100FULL || nego == LPA_100HALF) + ecmd->speed = SPEED_100; + else + ecmd->speed = SPEED_10; + if (nego == LPA_100FULL || nego == LPA_10FULL) { + ecmd->duplex = DUPLEX_FULL; + mii->full_duplex = 1; + } else { + ecmd->duplex = DUPLEX_HALF; + mii->full_duplex = 0; + } + } else { + ecmd->autoneg = AUTONEG_DISABLE; + + ecmd->speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10; + ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; + } + + /* ignore maxtxpkt, maxrxpkt for now */ + + return 0; +} + +int _kc_mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) +{ + struct net_device *dev = mii->dev; + + if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) + return -EINVAL; + if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) + return -EINVAL; + if (ecmd->port != PORT_MII) + return -EINVAL; + if (ecmd->transceiver != XCVR_INTERNAL) + return -EINVAL; + if (ecmd->phy_address != mii->phy_id) + return -EINVAL; + if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) + return -EINVAL; + + /* ignore supported, maxtxpkt, maxrxpkt */ + + if (ecmd->autoneg == AUTONEG_ENABLE) { + u32 bmcr, advert, tmp; + + if ((ecmd->advertising & (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full)) == 0) + return -EINVAL; + + /* advertise only what has been requested */ + advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); + tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); + if (ADVERTISED_10baseT_Half) + tmp |= ADVERTISE_10HALF; + if (ADVERTISED_10baseT_Full) + tmp |= ADVERTISE_10FULL; + if (ADVERTISED_100baseT_Half) + tmp |= ADVERTISE_100HALF; + if (ADVERTISED_100baseT_Full) + tmp |= ADVERTISE_100FULL; + if (advert != tmp) { + mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp); + mii->advertising = tmp; + } + + /* turn on autonegotiation, and force a renegotiate */ + bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); + mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr); + + mii->force_media = 0; + } else { + u32 bmcr, tmp; + + /* turn off auto negotiation, set speed and duplexity */ + bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); + tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX); + if (ecmd->speed == SPEED_100) + tmp |= BMCR_SPEED100; + if (ecmd->duplex == DUPLEX_FULL) { + tmp |= BMCR_FULLDPLX; + mii->full_duplex = 1; + } else + mii->full_duplex = 0; + if (bmcr != tmp) + mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp); + + mii->force_media = 1; + } + return 0; +} + +int _kc_mii_link_ok (struct mii_if_info *mii) +{ + /* first, a dummy read, needed to latch some MII phys */ + mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); + if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS) + return 1; + return 0; +} + +int _kc_mii_nway_restart (struct mii_if_info *mii) +{ + int bmcr; + int r = -EINVAL; + + /* if autoneg is off, it's an error */ + bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR); + + if (bmcr & BMCR_ANENABLE) { + bmcr |= BMCR_ANRESTART; + mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr); + r = 0; + } + + return r; +} + +void _kc_mii_check_link (struct mii_if_info *mii) +{ + int cur_link = mii_link_ok(mii); + int prev_link = netif_carrier_ok(mii->dev); + + if (cur_link && !prev_link) + netif_carrier_on(mii->dev); + else if (prev_link && !cur_link) + netif_carrier_off(mii->dev); +} + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) ) +int _kc_generic_mii_ioctl(struct mii_if_info *mii_if, + struct mii_ioctl_data *mii_data, int cmd, + unsigned int *duplex_chg_out) +{ + int rc = 0; + unsigned int duplex_changed = 0; + + if (duplex_chg_out) + *duplex_chg_out = 0; + + mii_data->phy_id &= mii_if->phy_id_mask; + mii_data->reg_num &= mii_if->reg_num_mask; + + switch(cmd) { + case SIOCDEVPRIVATE: /* binary compat, remove in 2.5 */ + case SIOCGMIIPHY: + mii_data->phy_id = mii_if->phy_id; + /* fall through */ + + case SIOCDEVPRIVATE + 1:/* binary compat, remove in 2.5 */ + case SIOCGMIIREG: + mii_data->val_out = + mii_if->mdio_read(mii_if->dev, mii_data->phy_id, + mii_data->reg_num); + break; + + case SIOCDEVPRIVATE + 2:/* binary compat, remove in 2.5 */ + case SIOCSMIIREG: { + u16 val = mii_data->val_in; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (mii_data->phy_id == mii_if->phy_id) { + switch(mii_data->reg_num) { + case MII_BMCR: { + unsigned int new_duplex = 0; + if (val & (BMCR_RESET|BMCR_ANENABLE)) + mii_if->force_media = 0; + else + mii_if->force_media = 1; + if (mii_if->force_media && + (val & BMCR_FULLDPLX)) + new_duplex = 1; + if (mii_if->full_duplex != new_duplex) { + duplex_changed = 1; + mii_if->full_duplex = new_duplex; + } + break; + } + case MII_ADVERTISE: + mii_if->advertising = val; + break; + default: + /* do nothing */ + break; + } + } + + mii_if->mdio_write(mii_if->dev, mii_data->phy_id, + mii_data->reg_num, val); + break; + } + + default: + rc = -EOPNOTSUPP; + break; + } + + if ((rc == 0) && (duplex_chg_out) && (duplex_changed)) + *duplex_chg_out = 1; + + return rc; +} +#endif /* > 2.4.6 */ +