For the drivers that use shaper API, switch to the mode where
core stack holds the netdev lock. This affects two drivers:
* iavf - already grabs netdev lock in ndo_open/ndo_stop, so mostly
remove these
* netdevsim - switch to _locked APIs to avoid deadlock
iavf_close diff is a bit confusing, the existing call looks like this:
iavf_close() {
netdev_lock()
..
netdev_unlock()
wait_event_timeout(down_waitqueue)
}
I change it to the following:
netdev_lock()
iavf_close() {
..
netdev_unlock()
wait_event_timeout(down_waitqueue)
netdev_lock() // reusing this lock call
}
netdev_unlock()
Since I'm reusing existing netdev_lock call, so it looks like I only
add netdev_unlock.
Cc: Saeed Mahameed <saeed@kernel.org>
Signed-off-by: Stanislav Fomichev <sdf@fomichev.me>
Link: https://patch.msgid.link/20250305163732.2766420-2-sdf@fomichev.me
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
struct iavf_adapter *adapter = netdev_priv(netdev);
int err;
+ netdev_assert_locked(netdev);
+
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
return -EIO;
}
- netdev_lock(netdev);
while (!mutex_trylock(&adapter->crit_lock)) {
/* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
* is already taken and iavf_open is called from an upper
* device's notifier reacting on NETDEV_REGISTER event.
* We have to leave here to avoid dead lock.
*/
- if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) {
- netdev_unlock(netdev);
+ if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
return -EBUSY;
- }
usleep_range(500, 1000);
}
iavf_irq_enable(adapter, true);
mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
return 0;
iavf_free_all_tx_resources(adapter);
err_unlock:
mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
return err;
}
u64 aq_to_restore;
int status;
- netdev_lock(netdev);
+ netdev_assert_locked(netdev);
+
mutex_lock(&adapter->crit_lock);
if (adapter->state <= __IAVF_DOWN_PENDING) {
mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
return 0;
}
if (!status)
netdev_warn(netdev, "Device resources not yet released\n");
+ netdev_lock(netdev);
mutex_lock(&adapter->crit_lock);
adapter->aq_required |= aq_to_restore;
mutex_unlock(&adapter->crit_lock);
for (i = 0; i < dev->num_rx_queues; i++) {
rq = ns->rq[i];
- netif_napi_add_config(dev, &rq->napi, nsim_poll, i);
+ netif_napi_add_config_locked(dev, &rq->napi, nsim_poll, i);
}
for (i = 0; i < dev->num_rx_queues; i++) {
}
for (i = 0; i < dev->num_rx_queues; i++)
- __netif_napi_del(&ns->rq[i]->napi);
+ __netif_napi_del_locked(&ns->rq[i]->napi);
return err;
}
struct nsim_rq *rq = ns->rq[i];
netif_queue_set_napi(dev, i, NETDEV_QUEUE_TYPE_RX, &rq->napi);
- napi_enable(&rq->napi);
+ napi_enable_locked(&rq->napi);
}
}
struct netdevsim *ns = netdev_priv(dev);
int err;
+ netdev_assert_locked(dev);
+
err = nsim_init_napi(ns);
if (err)
return err;
for (i = 0; i < dev->num_rx_queues; i++) {
struct nsim_rq *rq = ns->rq[i];
- napi_disable(&rq->napi);
- __netif_napi_del(&rq->napi);
+ napi_disable_locked(&rq->napi);
+ __netif_napi_del_locked(&rq->napi);
}
synchronize_net();
struct netdevsim *ns = netdev_priv(dev);
struct netdevsim *peer;
+ netdev_assert_locked(dev);
+
netif_carrier_off(dev);
peer = rtnl_dereference(ns->peer);
if (peer)
netdev_assert_locked(dev);
}
+static inline bool netdev_need_ops_lock(struct net_device *dev)
+{
+ bool ret = false;
+
+#if IS_ENABLED(CONFIG_NET_SHAPER)
+ ret |= !!dev->netdev_ops->net_shaper_ops;
+#endif
+
+ return ret;
+}
+
+static inline void netdev_lock_ops(struct net_device *dev)
+{
+ if (netdev_need_ops_lock(dev))
+ netdev_lock(dev);
+}
+
+static inline void netdev_unlock_ops(struct net_device *dev)
+{
+ if (netdev_need_ops_lock(dev))
+ netdev_unlock(dev);
+}
+
void netif_napi_set_irq_locked(struct napi_struct *napi, int irq);
static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
if (ret)
return ret;
+ netdev_lock_ops(dev);
+
set_bit(__LINK_STATE_START, &dev->state);
if (ops->ndo_validate_addr)
add_device_randomness(dev->dev_addr, dev->addr_len);
}
+ netdev_unlock_ops(dev);
+
return ret;
}
* We allow it to be called even after a DETACH hot-plug
* event.
*/
+
+ /* TODO: move the lock up before clearing __LINK_STATE_START.
+ * Generates spurious lockdep warning.
+ */
+ netdev_lock_ops(dev);
+
if (ops->ndo_stop)
ops->ndo_stop(dev);
netif_set_up(dev, false);
netpoll_poll_enable(dev);
+
+ netdev_unlock_ops(dev);
}
}
else
dev->flags &= ~IFF_UP;
- netdev_lock(dev);
+ if (!netdev_need_ops_lock(dev))
+ netdev_lock(dev);
dev->up = value;
- netdev_unlock(dev);
+ if (!netdev_need_ops_lock(dev))
+ netdev_unlock(dev);
}
static inline void netif_set_gso_max_size(struct net_device *dev,