**/
 static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
 {
-#define IXGBE_MAX_SECRX_POLL 30
-       int i;
-       int secrxreg;
-
        /*
         * Workaround for 82599 silicon errata when enabling the Rx datapath.
         * If traffic is incoming before we enable the Rx unit, it could hang
         * the Rx DMA unit.  Therefore, make sure the security engine is
         * completely disabled prior to enabling the Rx unit.
         */
-       secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
-       secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
-       IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
-       for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
-               secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
-               if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
-                       break;
-               else
-                       /* Use interrupt-safe sleep just in case */
-                       udelay(10);
-       }
-
-       /* For informational purposes only */
-       if (i >= IXGBE_MAX_SECRX_POLL)
-               hw_dbg(hw, "Rx unit being enabled before security "
-                      "path fully disabled.  Continuing with init.\n");
+       hw->mac.ops.disable_rx_buff(hw);
 
        IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
-       secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
-       secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
-       IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
-       IXGBE_WRITE_FLUSH(hw);
+
+       hw->mac.ops.enable_rx_buff(hw);
 
        return 0;
 }
        .get_media_type         = &ixgbe_get_media_type_82599,
        .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599,
        .enable_rx_dma          = &ixgbe_enable_rx_dma_82599,
+       .disable_rx_buff        = &ixgbe_disable_rx_buff_generic,
+       .enable_rx_buff         = &ixgbe_enable_rx_buff_generic,
        .get_mac_addr           = &ixgbe_get_mac_addr_generic,
        .get_san_mac_addr       = &ixgbe_get_san_mac_addr_generic,
        .get_device_caps        = &ixgbe_get_device_caps_generic,
 
        ixgbe_release_eeprom_semaphore(hw);
 }
 
+/**
+ *  ixgbe_disable_rx_buff_generic - Stops the receive data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Stops the receive data path and waits for the HW to internally
+ *  empty the Rx security block.
+ **/
+s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
+{
+#define IXGBE_MAX_SECRX_POLL 40
+       int i;
+       int secrxreg;
+
+       secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+       secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
+       IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
+       for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
+               secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
+               if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
+                       break;
+               else
+                       /* Use interrupt-safe sleep just in case */
+                       udelay(10);
+       }
+
+       /* For informational purposes only */
+       if (i >= IXGBE_MAX_SECRX_POLL)
+               hw_dbg(hw, "Rx unit being enabled before security "
+                      "path fully disabled.  Continuing with init.\n");
+
+       return 0;
+
+}
+
+/**
+ *  ixgbe_enable_rx_buff - Enables the receive data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Enables the receive data path
+ **/
+s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
+{
+       int secrxreg;
+
+       secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+       secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
+       IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return 0;
+}
+
 /**
  *  ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
  *  @hw: pointer to hardware structure
 
                                      struct net_device *netdev);
 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
+s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num);
 s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
 
 
 static void ixgbe_configure(struct ixgbe_adapter *adapter)
 {
+       struct ixgbe_hw *hw = &adapter->hw;
+
        ixgbe_configure_pb(adapter);
 #ifdef CONFIG_IXGBE_DCB
        ixgbe_configure_dcb(adapter);
                ixgbe_configure_fcoe(adapter);
 
 #endif /* IXGBE_FCOE */
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               hw->mac.ops.disable_rx_buff(hw);
+               break;
+       default:
+               break;
+       }
+
        if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
                ixgbe_init_fdir_signature_82599(&adapter->hw,
                                                adapter->fdir_pballoc);
                ixgbe_fdir_filter_restore(adapter);
        }
 
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               hw->mac.ops.enable_rx_buff(hw);
+               break;
+       default:
+               break;
+       }
+
        ixgbe_configure_virtualization(adapter);
 
        ixgbe_configure_tx(adapter);
 
        s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
        s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
        s32 (*setup_sfp)(struct ixgbe_hw *);
+       s32 (*disable_rx_buff)(struct ixgbe_hw *);
+       s32 (*enable_rx_buff)(struct ixgbe_hw *);
        s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
        s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
        void (*release_swfw_sync)(struct ixgbe_hw *, u16);
 
        .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
        .acquire_swfw_sync      = &ixgbe_acquire_swfw_sync_X540,
        .release_swfw_sync      = &ixgbe_release_swfw_sync_X540,
+       .disable_rx_buff        = &ixgbe_disable_rx_buff_generic,
+       .enable_rx_buff         = &ixgbe_enable_rx_buff_generic,
 };
 
 static struct ixgbe_eeprom_operations eeprom_ops_X540 = {