for_all_vfs(adapter, vf_cfg, vf) {
                if (!BE3_chip(adapter)) {
                        status = be_cmd_get_profile_config(adapter, &res,
 +                                                         RESOURCE_LIMITS,
                                                           vf + 1);
-                       if (!status)
+                       if (!status) {
                                cap_flags = res.if_cap_flags;
+                               /* Prevent VFs from enabling VLAN promiscuous
+                                * mode
+                                */
+                               cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
+                       }
                }
  
                status = be_if_create(adapter, &vf_cfg->if_handle,
        netdev->ethtool_ops = &be_ethtool_ops;
  }
  
 -static void be_unmap_pci_bars(struct be_adapter *adapter)
 +static void be_cleanup(struct be_adapter *adapter)
  {
 -      if (adapter->csr)
 -              pci_iounmap(adapter->pdev, adapter->csr);
 -      if (adapter->db)
 -              pci_iounmap(adapter->pdev, adapter->db);
 -}
 +      struct net_device *netdev = adapter->netdev;
  
 -static int db_bar(struct be_adapter *adapter)
 -{
 -      if (lancer_chip(adapter) || !be_physfn(adapter))
 -              return 0;
 -      else
 -              return 4;
 -}
 +      rtnl_lock();
 +      netif_device_detach(netdev);
 +      if (netif_running(netdev))
 +              be_close(netdev);
 +      rtnl_unlock();
  
 -static int be_roce_map_pci_bars(struct be_adapter *adapter)
 -{
 -      if (skyhawk_chip(adapter)) {
 -              adapter->roce_db.size = 4096;
 -              adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
 -                                                            db_bar(adapter));
 -              adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
 -                                                             db_bar(adapter));
 -      }
 -      return 0;
 +      be_clear(adapter);
  }
  
 -static int be_map_pci_bars(struct be_adapter *adapter)
 +static int be_resume(struct be_adapter *adapter)
  {
 -      struct pci_dev *pdev = adapter->pdev;
 -      u8 __iomem *addr;
 +      struct net_device *netdev = adapter->netdev;
 +      int status;
  
 -      if (BEx_chip(adapter) && be_physfn(adapter)) {
 -              adapter->csr = pci_iomap(pdev, 2, 0);
 +      status = be_setup(adapter);
 +      if (status)
 +              return status;
 +
 +      if (netif_running(netdev)) {
 +              status = be_open(netdev);
 +              if (status)
 +                      return status;
 +      }
 +
 +      netif_device_attach(netdev);
 +
 +      return 0;
 +}
 +
 +static int be_err_recover(struct be_adapter *adapter)
 +{
 +      struct device *dev = &adapter->pdev->dev;
 +      int status;
 +
 +      status = be_resume(adapter);
 +      if (status)
 +              goto err;
 +
 +      dev_info(dev, "Adapter recovery successful\n");
 +      return 0;
 +err:
 +      if (be_physfn(adapter))
 +              dev_err(dev, "Adapter recovery failed\n");
 +      else
 +              dev_err(dev, "Re-trying adapter recovery\n");
 +
 +      return status;
 +}
 +
 +static void be_err_detection_task(struct work_struct *work)
 +{
 +      struct be_adapter *adapter =
 +                              container_of(work, struct be_adapter,
 +                                           be_err_detection_work.work);
 +      int status = 0;
 +
 +      be_detect_error(adapter);
 +
 +      if (adapter->hw_error) {
 +              be_cleanup(adapter);
 +
 +              /* As of now error recovery support is in Lancer only */
 +              if (lancer_chip(adapter))
 +                      status = be_err_recover(adapter);
 +      }
 +
 +      /* Always attempt recovery on VFs */
 +      if (!status || be_virtfn(adapter))
 +              be_schedule_err_detection(adapter);
 +}
 +
 +static void be_log_sfp_info(struct be_adapter *adapter)
 +{
 +      int status;
 +
 +      status = be_cmd_query_sfp_info(adapter);
 +      if (!status) {
 +              dev_err(&adapter->pdev->dev,
 +                      "Unqualified SFP+ detected on %c from %s part no: %s",
 +                      adapter->port_name, adapter->phy.vendor_name,
 +                      adapter->phy.vendor_pn);
 +      }
 +      adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
 +}
 +
 +static void be_worker(struct work_struct *work)
 +{
 +      struct be_adapter *adapter =
 +              container_of(work, struct be_adapter, work.work);
 +      struct be_rx_obj *rxo;
 +      int i;
 +
 +      /* when interrupts are not yet enabled, just reap any pending
 +       * mcc completions
 +       */
 +      if (!netif_running(adapter->netdev)) {
 +              local_bh_disable();
 +              be_process_mcc(adapter);
 +              local_bh_enable();
 +              goto reschedule;
 +      }
 +
 +      if (!adapter->stats_cmd_sent) {
 +              if (lancer_chip(adapter))
 +                      lancer_cmd_get_pport_stats(adapter,
 +                                                 &adapter->stats_cmd);
 +              else
 +                      be_cmd_get_stats(adapter, &adapter->stats_cmd);
 +      }
 +
 +      if (be_physfn(adapter) &&
 +          MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
 +              be_cmd_get_die_temperature(adapter);
 +
 +      for_all_rx_queues(adapter, rxo, i) {
 +              /* Replenish RX-queues starved due to memory
 +               * allocation failures.
 +               */
 +              if (rxo->rx_post_starved)
 +                      be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
 +      }
 +
 +      be_eqd_update(adapter);
 +
 +      if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
 +              be_log_sfp_info(adapter);
 +
 +reschedule:
 +      adapter->work_counter++;
 +      schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
 +}
 +
 +static void be_unmap_pci_bars(struct be_adapter *adapter)
 +{
 +      if (adapter->csr)
 +              pci_iounmap(adapter->pdev, adapter->csr);
 +      if (adapter->db)
 +              pci_iounmap(adapter->pdev, adapter->db);
 +}
 +
 +static int db_bar(struct be_adapter *adapter)
 +{
 +      if (lancer_chip(adapter) || !be_physfn(adapter))
 +              return 0;
 +      else
 +              return 4;
 +}
 +
 +static int be_roce_map_pci_bars(struct be_adapter *adapter)
 +{
 +      if (skyhawk_chip(adapter)) {
 +              adapter->roce_db.size = 4096;
 +              adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
 +                                                            db_bar(adapter));
 +              adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
 +                                                             db_bar(adapter));
 +      }
 +      return 0;
 +}
 +
 +static int be_map_pci_bars(struct be_adapter *adapter)
 +{
++      struct pci_dev *pdev = adapter->pdev;
 +      u8 __iomem *addr;
 +      u32 sli_intf;
 +
 +      pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
 +      adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
 +                              SLI_INTF_FAMILY_SHIFT;
 +      adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
 +
 +      if (BEx_chip(adapter) && be_physfn(adapter)) {
-               adapter->csr = pci_iomap(adapter->pdev, 2, 0);
++              adapter->csr = pci_iomap(pdev, 2, 0);
                if (!adapter->csr)
                        return -ENOMEM;
        }
 
        mutex_unlock(&inet_diag_table_mutex);
  }
  
 +static void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk)
 +{
 +      r->idiag_family = sk->sk_family;
 +
 +      r->id.idiag_sport = htons(sk->sk_num);
 +      r->id.idiag_dport = sk->sk_dport;
 +      r->id.idiag_if = sk->sk_bound_dev_if;
 +      sock_diag_save_cookie(sk, r->id.idiag_cookie);
 +
 +#if IS_ENABLED(CONFIG_IPV6)
 +      if (sk->sk_family == AF_INET6) {
 +              *(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr;
 +              *(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr;
 +      } else
 +#endif
 +      {
 +      memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
 +      memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
 +
 +      r->id.idiag_src[0] = sk->sk_rcv_saddr;
 +      r->id.idiag_dst[0] = sk->sk_daddr;
 +      }
 +}
 +
+ static size_t inet_sk_attr_size(void)
+ {
+       return    nla_total_size(sizeof(struct tcp_info))
+               + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
+               + nla_total_size(1) /* INET_DIAG_TOS */
+               + nla_total_size(1) /* INET_DIAG_TCLASS */
+               + nla_total_size(sizeof(struct inet_diag_meminfo))
+               + nla_total_size(sizeof(struct inet_diag_msg))
+               + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
+               + nla_total_size(TCP_CA_NAME_MAX)
+               + nla_total_size(sizeof(struct tcpvegas_info))
+               + 64;
+ }
+ 
  int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
 -                            struct sk_buff *skb, struct inet_diag_req_v2 *req,
 -                            struct user_namespace *user_ns,                   
 -                            u32 portid, u32 seq, u16 nlmsg_flags,
 -                            const struct nlmsghdr *unlh)
 +                    struct sk_buff *skb, const struct inet_diag_req_v2 *req,
 +                    struct user_namespace *user_ns,
 +                    u32 portid, u32 seq, u16 nlmsg_flags,
 +                    const struct nlmsghdr *unlh)
  {
        const struct inet_sock *inet = inet_sk(sk);
 +      const struct inet_diag_handler *handler;
 +      int ext = req->idiag_ext;
        struct inet_diag_msg *r;
        struct nlmsghdr  *nlh;
        struct nlattr *attr;