static const struct udp_tunnel_nic_info bnx2x_udp_tunnels = {
        .sync_table     = bnx2x_udp_tunnel_sync,
-       .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
-                         UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+       .flags          = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
        .tables         = {
                { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
                { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
 
 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
        .set_port       = bnxt_udp_tunnel_set_port,
        .unset_port     = bnxt_udp_tunnel_unset_port,
-       .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
-                         UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+       .flags          = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
        .tables         = {
                { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
                { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
 }, bnxt_udp_tunnels_p7 = {
        .set_port       = bnxt_udp_tunnel_set_port,
        .unset_port     = bnxt_udp_tunnel_unset_port,
-       .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
-                         UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+       .flags          = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
        .tables         = {
                { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
                { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
 
 static const struct udp_tunnel_nic_info be_udp_tunnels = {
        .set_port       = be_vxlan_set_port,
        .unset_port     = be_vxlan_unset_port,
-       .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
-                         UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+       .flags          = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
        .tables         = {
                { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
        },
 
 
        pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
        pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
-       pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
        pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared;
        pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS;
        pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
 
 
        pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
        pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
-       pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
        pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
        if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
                pf->hw.udp_tunnel_nic.tables[0].n_entries =
 
 
 static const struct udp_tunnel_nic_info mlx4_udp_tunnels = {
        .sync_table     = mlx4_udp_tunnel_sync,
-       .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
-                         UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
+       .flags          = UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
        .tables         = {
                { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
        },
 
 
        priv->nic_info.set_port = mlx5e_vxlan_set_port;
        priv->nic_info.unset_port = mlx5e_vxlan_unset_port;
-       priv->nic_info.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
-                               UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN;
+       priv->nic_info.flags = UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN;
        priv->nic_info.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN;
        /* Don't count the space hard-coded to the IANA port */
        priv->nic_info.tables[0].n_entries =
 
 
 static const struct udp_tunnel_nic_info nfp_udp_tunnels = {
        .sync_table     = nfp_udp_tunnel_sync,
-       .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
-                         UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+       .flags          = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
        .tables         = {
                {
                        .n_entries      = NFP_NET_N_VXLAN_PORTS,
 
 
 static const struct udp_tunnel_nic_info qede_udp_tunnels_both = {
        .sync_table     = qede_udp_tunnel_sync,
-       .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
        .tables         = {
                { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
                { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
        },
 }, qede_udp_tunnels_vxlan = {
        .sync_table     = qede_udp_tunnel_sync,
-       .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
        .tables         = {
                { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
        },
 }, qede_udp_tunnels_geneve = {
        .sync_table     = qede_udp_tunnel_sync,
-       .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
        .tables         = {
                { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
        },
 
 
 static const struct udp_tunnel_nic_info qlcnic_udp_tunnels = {
        .sync_table     = qlcnic_udp_tunnel_sync,
-       .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
        .tables         = {
                { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
        },
 
 static const struct udp_tunnel_nic_info efx_ef10_udp_tunnels = {
        .set_port       = efx_ef10_udp_tnl_set_port,
        .unset_port     = efx_ef10_udp_tnl_unset_port,
-       .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
        .tables         = {
                {
                        .n_entries = 16,
 
        struct net_device *dev = file->private_data;
        struct netdevsim *ns = netdev_priv(dev);
 
-       rtnl_lock();
        if (dev->reg_state == NETREG_REGISTERED) {
                memset(ns->udp_ports.ports, 0, sizeof(ns->udp_ports.__ports));
                udp_tunnel_nic_reset_ntf(dev);
        }
-       rtnl_unlock();
 
        return count;
 }
                info->sync_table = NULL;
        }
 
-       if (ns->udp_ports.sleep)
-               info->flags |= UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
        if (nsim_dev->udp_ports.open_only)
                info->flags |= UDP_TUNNEL_NIC_INFO_OPEN_ONLY;
        if (nsim_dev->udp_ports.ipv4_only)
 
 void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type);
 void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type);
 
-static inline void udp_tunnel_get_rx_info(struct net_device *dev)
-{
-       ASSERT_RTNL();
-       if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
-               return;
-       call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev);
-}
-
-static inline void udp_tunnel_drop_rx_info(struct net_device *dev)
-{
-       ASSERT_RTNL();
-       if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
-               return;
-       call_netdevice_notifiers(NETDEV_UDP_TUNNEL_DROP_INFO, dev);
-}
-
 /* Transmit the skb using UDP encapsulation. */
 void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
                         __be32 src, __be32 dst, __u8 tos, __u8 ttl,
 #define UDP_TUNNEL_NIC_MAX_TABLES      4
 
 enum udp_tunnel_nic_info_flags {
-       /* Device callbacks may sleep */
-       UDP_TUNNEL_NIC_INFO_MAY_SLEEP   = BIT(0),
        /* Device only supports offloads when it's open, all ports
         * will be removed before close and re-added after open.
         */
-       UDP_TUNNEL_NIC_INFO_OPEN_ONLY   = BIT(1),
+       UDP_TUNNEL_NIC_INFO_OPEN_ONLY   = BIT(0),
        /* Device supports only IPv4 tunnels */
-       UDP_TUNNEL_NIC_INFO_IPV4_ONLY   = BIT(2),
+       UDP_TUNNEL_NIC_INFO_IPV4_ONLY   = BIT(1),
        /* Device has hard-coded the IANA VXLAN port (4789) as VXLAN.
         * This port must not be counted towards n_entries of any table.
         * Driver will not receive any callback associated with port 4789.
         */
-       UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN   = BIT(3),
+       UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN   = BIT(2),
 };
 
 struct udp_tunnel_nic;
        size_t (*dump_size)(struct net_device *dev, unsigned int table);
        int (*dump_write)(struct net_device *dev, unsigned int table,
                          struct sk_buff *skb);
+       void (*assert_locked)(struct net_device *dev);
+       void (*lock)(struct net_device *dev);
+       void (*unlock)(struct net_device *dev);
 };
 
 #ifdef CONFIG_INET
 udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table,
                             unsigned int idx, u8 priv)
 {
-       if (udp_tunnel_nic_ops)
+       if (udp_tunnel_nic_ops) {
+               udp_tunnel_nic_ops->lock(dev);
                udp_tunnel_nic_ops->set_port_priv(dev, table, idx, priv);
+               udp_tunnel_nic_ops->unlock(dev);
+       }
+}
+
+static inline void udp_tunnel_nic_assert_locked(struct net_device *dev)
+{
+       if (udp_tunnel_nic_ops)
+               udp_tunnel_nic_ops->assert_locked(dev);
+}
+
+static inline void udp_tunnel_nic_lock(struct net_device *dev)
+{
+       if (udp_tunnel_nic_ops)
+               udp_tunnel_nic_ops->lock(dev);
+}
+
+static inline void udp_tunnel_nic_unlock(struct net_device *dev)
+{
+       if (udp_tunnel_nic_ops)
+               udp_tunnel_nic_ops->unlock(dev);
 }
 
 static inline void
 static inline size_t
 udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table)
 {
+       size_t ret;
+
        if (!udp_tunnel_nic_ops)
                return 0;
-       return udp_tunnel_nic_ops->dump_size(dev, table);
+
+       udp_tunnel_nic_ops->lock(dev);
+       ret = udp_tunnel_nic_ops->dump_size(dev, table);
+       udp_tunnel_nic_ops->unlock(dev);
+
+       return ret;
 }
 
 static inline int
 udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table,
                          struct sk_buff *skb)
 {
+       int ret;
+
        if (!udp_tunnel_nic_ops)
                return 0;
-       return udp_tunnel_nic_ops->dump_write(dev, table, skb);
+
+       udp_tunnel_nic_ops->lock(dev);
+       ret = udp_tunnel_nic_ops->dump_write(dev, table, skb);
+       udp_tunnel_nic_ops->unlock(dev);
+
+       return ret;
+}
+
+static inline void udp_tunnel_get_rx_info(struct net_device *dev)
+{
+       ASSERT_RTNL();
+       if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
+               return;
+       udp_tunnel_nic_assert_locked(dev);
+       call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev);
 }
+
+static inline void udp_tunnel_drop_rx_info(struct net_device *dev)
+{
+       ASSERT_RTNL();
+       if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
+               return;
+       udp_tunnel_nic_assert_locked(dev);
+       call_netdevice_notifiers(NETDEV_UDP_TUNNEL_DROP_INFO, dev);
+}
+
 #endif
 
                         * *before* calling udp_tunnel_get_rx_info,
                         * but *after* calling udp_tunnel_drop_rx_info.
                         */
+                       udp_tunnel_nic_lock(dev);
                        if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
                                dev->features = features;
                                udp_tunnel_get_rx_info(dev);
                        } else {
                                udp_tunnel_drop_rx_info(dev);
                        }
+                       udp_tunnel_nic_unlock(dev);
                }
 
                if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
 
        struct udp_tunnel_info ti;
        struct net_device *dev;
 
+       ASSERT_RTNL();
+
        ti.type = type;
        ti.sa_family = sk->sk_family;
        ti.port = inet_sk(sk)->inet_sport;
 
-       rcu_read_lock();
-       for_each_netdev_rcu(net, dev) {
+       for_each_netdev(net, dev) {
+               udp_tunnel_nic_lock(dev);
                udp_tunnel_nic_add_port(dev, &ti);
+               udp_tunnel_nic_unlock(dev);
        }
-       rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(udp_tunnel_notify_add_rx_port);
 
        struct udp_tunnel_info ti;
        struct net_device *dev;
 
+       ASSERT_RTNL();
+
        ti.type = type;
        ti.sa_family = sk->sk_family;
        ti.port = inet_sk(sk)->inet_sport;
 
-       rcu_read_lock();
-       for_each_netdev_rcu(net, dev) {
+       for_each_netdev(net, dev) {
+               udp_tunnel_nic_lock(dev);
                udp_tunnel_nic_del_port(dev, &ti);
+               udp_tunnel_nic_unlock(dev);
        }
-       rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(udp_tunnel_notify_del_rx_port);
 
 
  * struct udp_tunnel_nic - UDP tunnel port offload state
  * @work:      async work for talking to hardware from process context
  * @dev:       netdev pointer
+ * @lock:      protects all fields
  * @need_sync: at least one port start changed
  * @need_replay: space was freed, we need a replay of all ports
  * @work_pending: @work is currently scheduled
 
        struct net_device *dev;
 
+       struct mutex lock;
+
        u8 need_sync:1;
        u8 need_replay:1;
        u8 work_pending:1;
 static void
 udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn)
 {
-       const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
-       bool may_sleep;
-
        if (!utn->need_sync)
                return;
 
-       /* Drivers which sleep in the callback need to update from
-        * the workqueue, if we come from the tunnel driver's notification.
-        */
-       may_sleep = info->flags & UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
-       if (!may_sleep)
-               __udp_tunnel_nic_device_sync(dev, utn);
-       if (may_sleep || utn->need_replay) {
-               queue_work(udp_tunnel_nic_workqueue, &utn->work);
-               utn->work_pending = 1;
-       }
+       queue_work(udp_tunnel_nic_workqueue, &utn->work);
+       utn->work_pending = 1;
 }
 
 static bool
        struct udp_tunnel_nic *utn;
        unsigned int i, j;
 
-       ASSERT_RTNL();
-
        utn = dev->udp_tunnel_nic;
        if (!utn)
                return;
 
+       mutex_lock(&utn->lock);
+
        utn->need_sync = false;
        for (i = 0; i < utn->n_tables; i++)
                for (j = 0; j < info->tables[i].n_entries; j++) {
 
                        entry->flags &= ~(UDP_TUNNEL_NIC_ENTRY_DEL |
                                          UDP_TUNNEL_NIC_ENTRY_OP_FAIL);
-                       /* We don't release rtnl across ops */
+                       /* We don't release utn lock across ops */
                        WARN_ON(entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN);
                        if (!entry->use_cnt)
                                continue;
                }
 
        __udp_tunnel_nic_device_sync(dev, utn);
+
+       mutex_unlock(&utn->lock);
 }
 
 static size_t
        return -EMSGSIZE;
 }
 
+static void __udp_tunnel_nic_assert_locked(struct net_device *dev)
+{
+       struct udp_tunnel_nic *utn;
+
+       utn = dev->udp_tunnel_nic;
+       if (utn)
+               lockdep_assert_held(&utn->lock);
+}
+
+static void __udp_tunnel_nic_lock(struct net_device *dev)
+{
+       struct udp_tunnel_nic *utn;
+
+       utn = dev->udp_tunnel_nic;
+       if (utn)
+               mutex_lock(&utn->lock);
+}
+
+static void __udp_tunnel_nic_unlock(struct net_device *dev)
+{
+       struct udp_tunnel_nic *utn;
+
+       utn = dev->udp_tunnel_nic;
+       if (utn)
+               mutex_unlock(&utn->lock);
+}
+
 static const struct udp_tunnel_nic_ops __udp_tunnel_nic_ops = {
        .get_port       = __udp_tunnel_nic_get_port,
        .set_port_priv  = __udp_tunnel_nic_set_port_priv,
        .reset_ntf      = __udp_tunnel_nic_reset_ntf,
        .dump_size      = __udp_tunnel_nic_dump_size,
        .dump_write     = __udp_tunnel_nic_dump_write,
+       .assert_locked  = __udp_tunnel_nic_assert_locked,
+       .lock           = __udp_tunnel_nic_lock,
+       .unlock         = __udp_tunnel_nic_unlock,
 };
 
 static void
                container_of(work, struct udp_tunnel_nic, work);
 
        rtnl_lock();
+       mutex_lock(&utn->lock);
+
        utn->work_pending = 0;
        __udp_tunnel_nic_device_sync(utn->dev, utn);
 
        if (utn->need_replay)
                udp_tunnel_nic_replay(utn->dev, utn);
+
+       mutex_unlock(&utn->lock);
        rtnl_unlock();
 }
 
                return NULL;
        utn->n_tables = n_tables;
        INIT_WORK(&utn->work, udp_tunnel_nic_device_sync_work);
+       mutex_init(&utn->lock);
 
        for (i = 0; i < n_tables; i++) {
                utn->entries[i] = kcalloc(info->tables[i].n_entries,
        dev_hold(dev);
        dev->udp_tunnel_nic = utn;
 
-       if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY))
+       if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY)) {
+               udp_tunnel_nic_lock(dev);
                udp_tunnel_get_rx_info(dev);
+               udp_tunnel_nic_unlock(dev);
+       }
 
        return 0;
 }
 {
        const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
 
+       udp_tunnel_nic_lock(dev);
+
        /* For a shared table remove this dev from the list of sharing devices
         * and if there are other devices just detach.
         */
                list_for_each_entry(node, &info->shared->devices, list)
                        if (node->dev == dev)
                                break;
-               if (list_entry_is_head(node, &info->shared->devices, list))
+               if (list_entry_is_head(node, &info->shared->devices, list)) {
+                       udp_tunnel_nic_unlock(dev);
                        return;
+               }
 
                list_del(&node->list);
                kfree(node);
                if (first) {
                        udp_tunnel_drop_rx_info(dev);
                        utn->dev = first->dev;
+                       udp_tunnel_nic_unlock(dev);
                        goto release_dev;
                }
 
         * from the work which we will boot immediately.
         */
        udp_tunnel_nic_flush(dev, utn);
+       udp_tunnel_nic_unlock(dev);
 
        /* Wait for the work to be done using the state, netdev core will
         * retry unregister until we give up our reference on this device.
                return NOTIFY_DONE;
 
        if (event == NETDEV_UP) {
+               udp_tunnel_nic_lock(dev);
                WARN_ON(!udp_tunnel_nic_is_empty(dev, utn));
                udp_tunnel_get_rx_info(dev);
+               udp_tunnel_nic_unlock(dev);
                return NOTIFY_OK;
        }
        if (event == NETDEV_GOING_DOWN) {
+               udp_tunnel_nic_lock(dev);
                udp_tunnel_nic_flush(dev, utn);
+               udp_tunnel_nic_unlock(dev);
                return NOTIFY_OK;
        }