Fun set of conflict resolutions here...
For the mac80211 stuff, these were fortunately just parallel
adds.  Trivially resolved.
In drivers/net/phy/phy.c we had a bug fix in 'net' that moved the
function phy_disable_interrupts() earlier in the file, whilst in
'net-next' the phy_error() call from this function was removed.
In net/ipv4/xfrm4_policy.c, David Ahern's changes to remove the
'rt_table_id' member of rtable collided with a bug fix in 'net' that
added a new struct member "rt_mtu_locked" which needs to be copied
over here.
The mlxsw driver conflict consisted of net-next separating
the span code and definitions into separate files, whilst
a 'net' bug fix made some changes to that moved code.
The mlx5 infiniband conflict resolution was quite non-trivial,
the RDMA tree's merge commit was used as a guide here, and
here are their notes:
====================
    Due to bug fixes found by the syzkaller bot and taken into the for-rc
    branch after development for the 4.17 merge window had already started
    being taken into the for-next branch, there were fairly non-trivial
    merge issues that would need to be resolved between the for-rc branch
    and the for-next branch.  This merge resolves those conflicts and
    provides a unified base upon which ongoing development for 4.17 can
    be based.
    Conflicts:
            drivers/infiniband/hw/mlx5/main.c - Commit 
42cea83f9524
            (IB/mlx5: Fix cleanup order on unload) added to for-rc and
            commit 
b5ca15ad7e61 (IB/mlx5: Add proper representors support)
            add as part of the devel cycle both needed to modify the
            init/de-init functions used by mlx5.  To support the new
            representors, the new functions added by the cleanup patch
            needed to be made non-static, and the init/de-init list
            added by the representors patch needed to be modified to
            match the init/de-init list changes made by the cleanup
            patch.
    Updates:
            drivers/infiniband/hw/mlx5/mlx5_ib.h - Update function
            prototypes added by representors patch to reflect new function
            names as changed by cleanup patch
            drivers/infiniband/hw/mlx5/ib_rep.c - Update init/de-init
            stage list to match new order from cleanup patch
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
--- /dev/null
-       STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES,
-                    mlx5_ib_stage_umr_res_init,
-                    mlx5_ib_stage_umr_res_cleanup),
 +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
 +/*
 + * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
 + */
 +
 +#include "ib_rep.h"
 +
 +static const struct mlx5_ib_profile rep_profile = {
 +      STAGE_CREATE(MLX5_IB_STAGE_INIT,
 +                   mlx5_ib_stage_init_init,
 +                   mlx5_ib_stage_init_cleanup),
 +      STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
 +                   mlx5_ib_stage_rep_flow_db_init,
 +                   NULL),
 +      STAGE_CREATE(MLX5_IB_STAGE_CAPS,
 +                   mlx5_ib_stage_caps_init,
 +                   NULL),
 +      STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
 +                   mlx5_ib_stage_rep_non_default_cb,
 +                   NULL),
 +      STAGE_CREATE(MLX5_IB_STAGE_ROCE,
 +                   mlx5_ib_stage_rep_roce_init,
 +                   mlx5_ib_stage_rep_roce_cleanup),
 +      STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
 +                   mlx5_ib_stage_dev_res_init,
 +                   mlx5_ib_stage_dev_res_cleanup),
 +      STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
 +                   mlx5_ib_stage_counters_init,
 +                   mlx5_ib_stage_counters_cleanup),
 +      STAGE_CREATE(MLX5_IB_STAGE_BFREG,
 +                   mlx5_ib_stage_bfrag_init,
 +                   mlx5_ib_stage_bfrag_cleanup),
++      STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
++                   NULL,
++                   mlx5_ib_stage_pre_ib_reg_umr_cleanup),
 +      STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
 +                   mlx5_ib_stage_ib_reg_init,
 +                   mlx5_ib_stage_ib_reg_cleanup),
++      STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
++                   mlx5_ib_stage_post_ib_reg_umr_init,
++                   NULL),
 +      STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
 +                   mlx5_ib_stage_class_attr_init,
 +                   NULL),
 +};
 +
 +static int
 +mlx5_ib_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
 +{
 +      return 0;
 +}
 +
 +static void
 +mlx5_ib_nic_rep_unload(struct mlx5_eswitch_rep *rep)
 +{
 +      rep->rep_if[REP_IB].priv = NULL;
 +}
 +
 +static int
 +mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
 +{
 +      struct mlx5_ib_dev *ibdev;
 +
 +      ibdev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*ibdev));
 +      if (!ibdev)
 +              return -ENOMEM;
 +
 +      ibdev->rep = rep;
 +      ibdev->mdev = dev;
 +      ibdev->num_ports = max(MLX5_CAP_GEN(dev, num_ports),
 +                             MLX5_CAP_GEN(dev, num_vhca_ports));
 +      if (!__mlx5_ib_add(ibdev, &rep_profile))
 +              return -EINVAL;
 +
 +      rep->rep_if[REP_IB].priv = ibdev;
 +
 +      return 0;
 +}
 +
 +static void
 +mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
 +{
 +      struct mlx5_ib_dev *dev;
 +
 +      if (!rep->rep_if[REP_IB].priv)
 +              return;
 +
 +      dev = mlx5_ib_rep_to_dev(rep);
 +      __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
 +      rep->rep_if[REP_IB].priv = NULL;
 +}
 +
 +static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
 +{
 +      return mlx5_ib_rep_to_dev(rep);
 +}
 +
 +static void mlx5_ib_rep_register_vf_vports(struct mlx5_ib_dev *dev)
 +{
 +      struct mlx5_eswitch *esw   = dev->mdev->priv.eswitch;
 +      int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev);
 +      int vport;
 +
 +      for (vport = 1; vport < total_vfs; vport++) {
 +              struct mlx5_eswitch_rep_if rep_if = {};
 +
 +              rep_if.load = mlx5_ib_vport_rep_load;
 +              rep_if.unload = mlx5_ib_vport_rep_unload;
 +              rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
 +              mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_IB);
 +      }
 +}
 +
 +static void mlx5_ib_rep_unregister_vf_vports(struct mlx5_ib_dev *dev)
 +{
 +      struct mlx5_eswitch *esw   = dev->mdev->priv.eswitch;
 +      int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev);
 +      int vport;
 +
 +      for (vport = 1; vport < total_vfs; vport++)
 +              mlx5_eswitch_unregister_vport_rep(esw, vport, REP_IB);
 +}
 +
 +void mlx5_ib_register_vport_reps(struct mlx5_ib_dev *dev)
 +{
 +      struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
 +      struct mlx5_eswitch_rep_if rep_if = {};
 +
 +      rep_if.load = mlx5_ib_nic_rep_load;
 +      rep_if.unload = mlx5_ib_nic_rep_unload;
 +      rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
 +      rep_if.priv = dev;
 +
 +      mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_IB);
 +
 +      mlx5_ib_rep_register_vf_vports(dev);
 +}
 +
 +void mlx5_ib_unregister_vport_reps(struct mlx5_ib_dev *dev)
 +{
 +      struct mlx5_eswitch *esw   = dev->mdev->priv.eswitch;
 +
 +      mlx5_ib_rep_unregister_vf_vports(dev); /* VFs vports */
 +      mlx5_eswitch_unregister_vport_rep(esw, 0, REP_IB); /* UPLINK PF*/
 +}
 +
 +u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
 +{
 +      return mlx5_eswitch_mode(esw);
 +}
 +
 +struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
 +                                        int vport_index)
 +{
 +      return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_IB);
 +}
 +
 +struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
 +                                        int vport_index)
 +{
 +      return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_ETH);
 +}
 +
 +struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw)
 +{
 +      return mlx5_eswitch_uplink_get_proto_dev(esw, REP_IB);
 +}
 +
 +struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, int vport)
 +{
 +      return mlx5_eswitch_vport_rep(esw, vport);
 +}
 +
 +int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
 +                            struct mlx5_ib_sq *sq)
 +{
 +      struct mlx5_flow_handle *flow_rule;
 +      struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
 +
 +      if (!dev->rep)
 +              return 0;
 +
 +      flow_rule =
 +              mlx5_eswitch_add_send_to_vport_rule(esw,
 +                                                  dev->rep->vport,
 +                                                  sq->base.mqp.qpn);
 +      if (IS_ERR(flow_rule))
 +              return PTR_ERR(flow_rule);
 +      sq->flow_rule = flow_rule;
 +
 +      return 0;
 +}
 
        return ib_register_device(&dev->ib_dev, NULL);
  }
  
- void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
 -static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
++void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
  {
-       ib_unregister_device(&dev->ib_dev);
+       destroy_umrc_res(dev);
  }
  
- int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev)
 -static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
++void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
  {
-       return create_umr_res(dev);
+       ib_unregister_device(&dev->ib_dev);
  }
  
- void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev)
 -static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
++int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
  {
-       destroy_umrc_res(dev);
+       return create_umr_res(dev);
  }
  
  static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
                     NULL),
  };
  
-       STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES,
-                    mlx5_ib_stage_umr_res_init,
-                    mlx5_ib_stage_umr_res_cleanup),
 +static const struct mlx5_ib_profile nic_rep_profile = {
 +      STAGE_CREATE(MLX5_IB_STAGE_INIT,
 +                   mlx5_ib_stage_init_init,
 +                   mlx5_ib_stage_init_cleanup),
 +      STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
 +                   mlx5_ib_stage_flow_db_init,
 +                   mlx5_ib_stage_flow_db_cleanup),
 +      STAGE_CREATE(MLX5_IB_STAGE_CAPS,
 +                   mlx5_ib_stage_caps_init,
 +                   NULL),
 +      STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
 +                   mlx5_ib_stage_rep_non_default_cb,
 +                   NULL),
 +      STAGE_CREATE(MLX5_IB_STAGE_ROCE,
 +                   mlx5_ib_stage_rep_roce_init,
 +                   mlx5_ib_stage_rep_roce_cleanup),
 +      STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
 +                   mlx5_ib_stage_dev_res_init,
 +                   mlx5_ib_stage_dev_res_cleanup),
 +      STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
 +                   mlx5_ib_stage_counters_init,
 +                   mlx5_ib_stage_counters_cleanup),
 +      STAGE_CREATE(MLX5_IB_STAGE_UAR,
 +                   mlx5_ib_stage_uar_init,
 +                   mlx5_ib_stage_uar_cleanup),
 +      STAGE_CREATE(MLX5_IB_STAGE_BFREG,
 +                   mlx5_ib_stage_bfrag_init,
 +                   mlx5_ib_stage_bfrag_cleanup),
++      STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
++                   NULL,
++                   mlx5_ib_stage_pre_ib_reg_umr_cleanup),
 +      STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
 +                   mlx5_ib_stage_ib_reg_init,
 +                   mlx5_ib_stage_ib_reg_cleanup),
++      STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
++                   mlx5_ib_stage_post_ib_reg_umr_init,
++                   NULL),
 +      STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
 +                   mlx5_ib_stage_class_attr_init,
 +                   NULL),
 +      STAGE_CREATE(MLX5_IB_STAGE_REP_REG,
 +                   mlx5_ib_stage_rep_reg_init,
 +                   mlx5_ib_stage_rep_reg_cleanup),
 +};
 +
  static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num)
  {
        struct mlx5_ib_multiport_info *mpi;
 
        MLX5_IB_STAGE_CONG_DEBUGFS,
        MLX5_IB_STAGE_UAR,
        MLX5_IB_STAGE_BFREG,
+       MLX5_IB_STAGE_PRE_IB_REG_UMR,
        MLX5_IB_STAGE_IB_REG,
-       MLX5_IB_STAGE_UMR_RESOURCES,
+       MLX5_IB_STAGE_POST_IB_REG_UMR,
        MLX5_IB_STAGE_DELAY_DROP,
        MLX5_IB_STAGE_CLASS_ATTR,
 +      MLX5_IB_STAGE_REP_REG,
        MLX5_IB_STAGE_MAX,
  };
  
  
  #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
  
- int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev);
- void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev);
 +/* Needed for rep profile */
 +int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev);
 +void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev);
 +int mlx5_ib_stage_rep_flow_db_init(struct mlx5_ib_dev *dev);
 +int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev);
 +int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev);
 +int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev);
 +void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev);
 +int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev);
 +void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev);
 +int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev);
 +void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev);
 +int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev);
 +void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev);
++void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev);
 +int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev);
 +void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev);
++int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev);
 +int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev);
 +void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
 +                    const struct mlx5_ib_profile *profile,
 +                    int stage);
 +void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
 +                  const struct mlx5_ib_profile *profile);
 +
  int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
                          u8 port, struct ifla_vf_info *info);
  int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
 
--- /dev/null
 +/*
 + * drivers/net/ethernet/mellanox/mlxsw/mlxsw_span.h
 + * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
 + *
 + * Redistribution and use in source and binary forms, with or without
 + * modification, are permitted provided that the following conditions are met:
 + *
 + * 1. Redistributions of source code must retain the above copyright
 + *    notice, this list of conditions and the following disclaimer.
 + * 2. Redistributions in binary form must reproduce the above copyright
 + *    notice, this list of conditions and the following disclaimer in the
 + *    documentation and/or other materials provided with the distribution.
 + * 3. Neither the names of the copyright holders nor the names of its
 + *    contributors may be used to endorse or promote products derived from
 + *    this software without specific prior written permission.
 + *
 + * Alternatively, this software may be distributed under the terms of the
 + * GNU General Public License ("GPL") version 2 as published by the Free
 + * Software Foundation.
 + *
 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 + * POSSIBILITY OF SUCH DAMAGE.
 + */
 +
 +#ifndef _MLXSW_SPECTRUM_SPAN_H
 +#define _MLXSW_SPECTRUM_SPAN_H
 +
 +#include <linux/types.h>
 +#include <linux/if_ether.h>
 +
 +#include "spectrum_router.h"
 +
 +struct mlxsw_sp;
 +struct mlxsw_sp_port;
 +
 +enum mlxsw_sp_span_type {
 +      MLXSW_SP_SPAN_EGRESS,
 +      MLXSW_SP_SPAN_INGRESS
 +};
 +
 +struct mlxsw_sp_span_inspected_port {
 +      struct list_head list;
 +      enum mlxsw_sp_span_type type;
 +      u8 local_port;
++
++      /* Whether this is a directly bound mirror (port-to-port) or an ACL. */
++      bool bound;
 +};
 +
 +struct mlxsw_sp_span_parms {
 +      struct mlxsw_sp_port *dest_port; /* NULL for unoffloaded SPAN. */
 +      unsigned int ttl;
 +      unsigned char dmac[ETH_ALEN];
 +      unsigned char smac[ETH_ALEN];
 +      union mlxsw_sp_l3addr daddr;
 +      union mlxsw_sp_l3addr saddr;
 +};
 +
 +struct mlxsw_sp_span_entry_ops;
 +
 +struct mlxsw_sp_span_entry {
 +      const struct net_device *to_dev;
 +      const struct mlxsw_sp_span_entry_ops *ops;
 +      struct mlxsw_sp_span_parms parms;
 +      struct list_head bound_ports_list;
 +      int ref_count;
 +      int id;
 +};
 +
 +struct mlxsw_sp_span_entry_ops {
 +      bool (*can_handle)(const struct net_device *to_dev);
 +      int (*parms)(const struct net_device *to_dev,
 +                   struct mlxsw_sp_span_parms *sparmsp);
 +      int (*configure)(struct mlxsw_sp_span_entry *span_entry,
 +                       struct mlxsw_sp_span_parms sparms);
 +      void (*deconfigure)(struct mlxsw_sp_span_entry *span_entry);
 +};
 +
 +int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp);
 +void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp);
 +void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp);
 +
 +int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
 +                           const struct net_device *to_dev,
 +                           enum mlxsw_sp_span_type type,
 +                           bool bind, int *p_span_id);
 +void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
 +                            enum mlxsw_sp_span_type type, bool bind);
 +struct mlxsw_sp_span_entry *
 +mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
 +                               const struct net_device *to_dev);
 +
 +void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
 +                                  struct mlxsw_sp_span_entry *span_entry);
 +
 +int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu);
 +
 +#endif
 
  {
        struct rndis_device *rndis_dev = net_dev->extension;
  
-       /* Don't try and setup sub channels if about to halt */
-       cancel_work_sync(&net_dev->subchan_work);
- 
        /* Halt and release the rndis device */
 -      rndis_filter_halt_device(rndis_dev);
 +      rndis_filter_halt_device(net_dev, rndis_dev);
  
        net_dev->extension = NULL;
  
 
        phy_trigger_machine(phydev, false);
  }
  
 -              goto phy_err;
+ /**
+  * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
+  * @phydev: target phy_device struct
+  */
+ static int phy_disable_interrupts(struct phy_device *phydev)
+ {
+       int err;
+ 
+       /* Disable PHY interrupts */
+       err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
+       if (err)
 -      err = phy_clear_interrupt(phydev);
 -      if (err)
 -              goto phy_err;
 -
 -      return 0;
 -
 -phy_err:
 -      phy_error(phydev);
 -
 -      return err;
++              return err;
+ 
+       /* Clear the interrupt */
++      return phy_clear_interrupt(phydev);
+ }
+ 
+ /**
+  * phy_change - Called by the phy_interrupt to handle PHY changes
+  * @phydev: phy_device struct that interrupted
+  */
+ static irqreturn_t phy_change(struct phy_device *phydev)
+ {
+       if (phy_interrupt_is_valid(phydev)) {
+               if (phydev->drv->did_interrupt &&
+                   !phydev->drv->did_interrupt(phydev))
+                       return IRQ_NONE;
+ 
+               if (phydev->state == PHY_HALTED)
+                       if (phy_disable_interrupts(phydev))
+                               goto phy_err;
+       }
+ 
+       mutex_lock(&phydev->lock);
+       if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
+               phydev->state = PHY_CHANGELINK;
+       mutex_unlock(&phydev->lock);
+ 
+       /* reschedule state queue work to run as soon as possible */
+       phy_trigger_machine(phydev, true);
+ 
+       if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev))
+               goto phy_err;
+       return IRQ_HANDLED;
+ 
+ phy_err:
+       phy_error(phydev);
+       return IRQ_NONE;
+ }
+ 
+ /**
+  * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
+  * @work: work_struct that describes the work to be done
+  */
+ void phy_change_work(struct work_struct *work)
+ {
+       struct phy_device *phydev =
+               container_of(work, struct phy_device, phy_queue);
+ 
+       phy_change(phydev);
+ }
+ 
  /**
   * phy_interrupt - PHY interrupt handler
   * @irq: interrupt line
 
   * @IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA: Hardware supports buffer STA on
   *    TDLS links.
   *
 + * @IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP: The driver requires the
 + *    mgd_prepare_tx() callback to be called before transmission of a
 + *    deauthentication frame in case the association was completed but no
 + *    beacon was heard. This is required in multi-channel scenarios, where the
 + *    virtual interface might not be given air time for the transmission of
 + *    the frame, as it is not synced with the AP/P2P GO yet, and thus the
 + *    deauthentication frame might not be transmitted.
++ >
+  * @IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP: The driver (or firmware) doesn't
+  *    support QoS NDP for AP probing - that's most likely a driver bug.
   *
   * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
   */
        IEEE80211_HW_REPORTS_LOW_ACK,
        IEEE80211_HW_SUPPORTS_TX_FRAG,
        IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA,
 +      IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP,
+       IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP,
  
        /* keep last, obviously */
        NUM_IEEE80211_HW_FLAGS
 
        __be32                  rt_gateway;
  
        /* Miscellaneous cached information */
-       u32                     rt_pmtu;
+       u32                     rt_mtu_locked:1,
+                               rt_pmtu:31;
  
 -      u32                     rt_table_id;
 -
        struct list_head        rt_uncached;
        struct uncached_list    *rt_uncached_list;
  };
 
                rt->rt_is_input = 0;
                rt->rt_iif = 0;
                rt->rt_pmtu = 0;
+               rt->rt_mtu_locked = 0;
                rt->rt_gateway = 0;
                rt->rt_uses_gateway = 0;
 -              rt->rt_table_id = 0;
                INIT_LIST_HEAD(&rt->rt_uncached);
  
                rt->dst.output = ip_output;
 
        xdst->u.rt.rt_gateway = rt->rt_gateway;
        xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway;
        xdst->u.rt.rt_pmtu = rt->rt_pmtu;
 -      xdst->u.rt.rt_table_id = rt->rt_table_id;
+       xdst->u.rt.rt_mtu_locked = rt->rt_mtu_locked;
        INIT_LIST_HEAD(&xdst->u.rt.rt_uncached);
+       rt_add_uncached_list(&xdst->u.rt);
  
        return 0;
  }
 
         */
        if (csk->sk_user_data) {
                write_unlock_bh(&csk->sk_callback_lock);
 +              strp_stop(&psock->strp);
                strp_done(&psock->strp);
                kmem_cache_free(kcm_psockp, psock);
-               return -EALREADY;
+               err = -EALREADY;
+               goto out;
        }
  
        psock->save_data_ready = csk->sk_data_ready;
 
        FLAG(REPORTS_LOW_ACK),
        FLAG(SUPPORTS_TX_FRAG),
        FLAG(SUPPORTS_TDLS_BUFFER_STA),
 +      FLAG(DEAUTH_NEED_MGD_TX_PREP),
+       FLAG(DOESNT_SUPPORT_QOS_NDP),
  #undef FLAG
  };