bp->mii_bus->read = &macb_mdio_read;
        bp->mii_bus->write = &macb_mdio_write;
        snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
 -              bp->pdev->name, bp->pdev->id);
 +               bp->pdev->name, bp->pdev->id);
        bp->mii_bus->priv = bp;
-       bp->mii_bus->parent = &bp->dev->dev;
+       bp->mii_bus->parent = &bp->pdev->dev;
        pdata = dev_get_platdata(&bp->pdev->dev);
  
        dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
 
        mlx5e_redirect_rqts(priv);
        mlx5e_update_carrier(priv);
        mlx5e_timestamp_init(priv);
 +#ifdef CONFIG_RFS_ACCEL
 +      priv->netdev->rx_cpu_rmap = priv->mdev->rmap;
 +#endif
  
-       schedule_delayed_work(&priv->update_stats_work, 0);
+       queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
  
        return 0;
  
                goto err_tc_cleanup;
        }
  
 -      if (mlx5e_vxlan_allowed(mdev))
 +      if (mlx5e_vxlan_allowed(mdev)) {
 +              rtnl_lock();
                vxlan_get_rx_port(netdev);
 +              rtnl_unlock();
 +      }
  
        mlx5e_enable_async_events(priv);
-       schedule_work(&priv->set_rx_mode_work);
+       queue_work(priv->wq, &priv->set_rx_mode_work);
  
        return priv;
  
 
        if (sram2_len)
                file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
  
 +      /* Make room for MEM segments */
 +      for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) {
 +              if (fw_dbg_mem[i])
 +                      file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
 +                              le32_to_cpu(fw_dbg_mem[i]->len);
 +      }
 +
        /* Make room for fw's virtual image pages, if it exists */
-       if (mvm->fw->img[mvm->cur_ucode].paging_mem_size)
+       if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
+           mvm->fw_paging_db[0].fw_paging_block)
                file_len += mvm->num_of_paging_blk *
                        (sizeof(*dump_data) +
                         sizeof(struct iwl_fw_error_dump_paging) +
 
        {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)},
  
  /* 9000 Series */
 +      {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9560_2ac_cfg)},
 +      {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)},
 -      {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9260_2ac_cfg)},
 -      {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9260_2ac_cfg)},
 +      {IWL_PCI_DEVICE(0x2526, 0x1420, iwl5165_2ac_cfg)},
 +      {IWL_PCI_DEVICE(0x2526, 0x0010, iwl5165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)},
 
        }
  
        return 0;
+ error:
+       verbose("cannot pass map_type %d into func %d\n",
+               map->map_type, func_id);
+       return -EINVAL;
  }
  
 +static int check_raw_mode(const struct bpf_func_proto *fn)
 +{
 +      int count = 0;
 +
 +      if (fn->arg1_type == ARG_PTR_TO_RAW_STACK)
 +              count++;
 +      if (fn->arg2_type == ARG_PTR_TO_RAW_STACK)
 +              count++;
 +      if (fn->arg3_type == ARG_PTR_TO_RAW_STACK)
 +              count++;
 +      if (fn->arg4_type == ARG_PTR_TO_RAW_STACK)
 +              count++;
 +      if (fn->arg5_type == ARG_PTR_TO_RAW_STACK)
 +              count++;
 +
 +      return count > 1 ? -EINVAL : 0;
 +}
 +
  static int check_call(struct verifier_env *env, int func_id)
  {
        struct verifier_state *state = &env->cur_state;
 
                                                     const struct sock *sk2,
                                                     bool match_wildcard))
  {
+       struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
        struct sock *sk2;
 -      struct hlist_nulls_node *node;
        kuid_t uid = sock_i_uid(sk);
  
 -      sk_nulls_for_each_rcu(sk2, node, &ilb->head) {
 +      sk_for_each_rcu(sk2, &ilb->head) {
                if (sk2 != sk &&
                    sk2->sk_family == sk->sk_family &&
                    ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
 
        }
  
        /* Push Tunnel header. */
 -      skb = gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM));
 -      if (IS_ERR(skb)) {
 -              skb = NULL;
 +      if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
                goto err_free_rt;
 -      }
  
        flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
-       gre_build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB),
 -      build_header(skb, tunnel_hlen, flags, proto,
 -                   tunnel_id_to_key(tun_info->key.tun_id), 0);
++      gre_build_header(skb, tunnel_hlen, flags, proto,
 +                       tunnel_id_to_key(tun_info->key.tun_id), 0);
  
        df = key->tun_flags & TUNNEL_DONT_FRAGMENT ?  htons(IP_DF) : 0;