}
  }
  
 -static int
 -vmxnet3_set_flags(struct net_device *netdev, u32 data)
 +int vmxnet3_set_features(struct net_device *netdev, u32 features)
  {
        struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 -      u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1;
 -      u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1;
        unsigned long flags;
 +      u32 changed = features ^ netdev->features;
  
 -      if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO))
 -              return -EINVAL;
 -
 -      if (lro_requested ^ lro_present) {
 -              /* toggle the LRO feature*/
 -              netdev->features ^= NETIF_F_LRO;
 -
 -              /* Update private LRO flag */
 -              adapter->lro = lro_requested;
 +      if (changed & (NETIF_F_RXCSUM|NETIF_F_LRO)) {
 +              if (features & NETIF_F_RXCSUM)
 +                      adapter->shared->devRead.misc.uptFeatures |=
 +                      UPT1_F_RXCSUM;
 +              else
 +                      adapter->shared->devRead.misc.uptFeatures &=
 +                      ~UPT1_F_RXCSUM;
  
 -              if (lro_requested)
+               /* update harware LRO capability accordingly */
 +              if (features & NETIF_F_LRO)
                        adapter->shared->devRead.misc.uptFeatures |=
                                                        UPT1_F_LRO;
                else
 
  static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
  {
        struct ath_softc *sc = hw->priv;
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
        int timeout = 200; /* ms */
        int i, j;
 +      bool drain_txq;
  
 -      ath9k_ps_wakeup(sc);
        mutex_lock(&sc->mutex);
 -
        cancel_delayed_work_sync(&sc->tx_complete_work);
  
+       if (sc->sc_flags & SC_OP_INVALID) {
+               ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
+               mutex_unlock(&sc->mutex);
+               return;
+       }
+ 
        if (drop)
                timeout = 1;
  
 
        features = netdev_fix_features(dev, features);
  
        if (dev->features == features)
 -              return;
 +              return 0;
  
-       netdev_info(dev, "Features changed: 0x%08x -> 0x%08x\n",
+       netdev_dbg(dev, "Features changed: 0x%08x -> 0x%08x\n",
                dev->features, features);
  
        if (dev->netdev_ops->ndo_set_features)
        dev->features |= NETIF_F_SOFT_FEATURES;
        dev->wanted_features = dev->features & dev->hw_features;
  
-       /* Avoid warning from netdev_fix_features() for GSO without SG */
-       if (!(dev->wanted_features & NETIF_F_SG)) {
-               dev->wanted_features &= ~NETIF_F_GSO;
-               dev->features &= ~NETIF_F_GSO;
-       }
- 
 +      /* Turn on no cache copy if HW is doing checksum */
 +      dev->hw_features |= NETIF_F_NOCACHE_COPY;
 +      if ((dev->features & NETIF_F_ALL_CSUM) &&
 +          !(dev->features & NETIF_F_NO_CSUM)) {
 +              dev->wanted_features |= NETIF_F_NOCACHE_COPY;
 +              dev->features |= NETIF_F_NOCACHE_COPY;
 +      }
 +
        /* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
         * vlan_dev_init() will do the dev->features check, so these features
         * are enabled only if supported by underlying device.
 
        .open    = ip_vs_info_open,
        .read    = seq_read,
        .llseek  = seq_lseek,
-       .release = seq_release_private,
+       .release = seq_release_net,
  };
  
 -#endif
 -
 -#ifdef CONFIG_PROC_FS
  static int ip_vs_stats_show(struct seq_file *seq, void *v)
  {
        struct net *net = seq_file_single_net(seq);