A few variables linked to the Path-Managers are confusing, and it would
help current and future developers, to clarify them.
One of them is 'subflows', which in fact represents the number of extra
subflows: all the additional subflows created after the initial one, and
not the total number of subflows.
While at it, add an additional name for the corresponding variable in
MPTCP INFO: mptcpi_extra_subflows. Not to break the current uAPI, the
new name is added as a 'define' pointing to the former name. This will
then also help userspace devs.
No functional changes intended.
Reviewed-by: Geliang Tang <geliang@kernel.org>
Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Link: https://patch.msgid.link/20250925-net-next-mptcp-c-flag-laminar-v1-5-ad126cc47c6b@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
struct mptcp_info {
__u8 mptcpi_subflows;
+ #define mptcpi_extra_subflows mptcpi_subflows
__u8 mptcpi_add_addr_signal;
__u8 mptcpi_add_addr_accepted;
__u8 mptcpi_subflows_max;
if (mptcp_pm_is_userspace(msk)) {
if (mptcp_userspace_pm_active(msk)) {
spin_lock_bh(&pm->lock);
- pm->subflows++;
+ pm->extra_subflows++;
spin_unlock_bh(&pm->lock);
return true;
}
subflows_max = mptcp_pm_get_subflows_max(msk);
- pr_debug("msk=%p subflows=%d max=%d allow=%d\n", msk, pm->subflows,
- subflows_max, READ_ONCE(pm->accept_subflow));
+ pr_debug("msk=%p subflows=%d max=%d allow=%d\n", msk,
+ pm->extra_subflows, subflows_max,
+ READ_ONCE(pm->accept_subflow));
/* try to avoid acquiring the lock below */
if (!READ_ONCE(pm->accept_subflow))
spin_lock_bh(&pm->lock);
if (READ_ONCE(pm->accept_subflow)) {
- ret = pm->subflows < subflows_max;
- if (ret && ++pm->subflows == subflows_max)
+ ret = pm->extra_subflows < subflows_max;
+ if (ret && ++pm->extra_subflows == subflows_max)
WRITE_ONCE(pm->accept_subflow, false);
}
spin_unlock_bh(&pm->lock);
if (mptcp_pm_is_userspace(msk)) {
if (update_subflows) {
spin_lock_bh(&pm->lock);
- pm->subflows--;
+ pm->extra_subflows--;
spin_unlock_bh(&pm->lock);
}
return;
if (!mptcp_pm_addr_families_match(sk, local, &remote))
return 0;
- msk->pm.subflows++;
+ msk->pm.extra_subflows++;
*addrs = remote;
return 1;
/* forbid creating multiple address towards this id */
__set_bit(addrs[i].id, unavail_id);
- msk->pm.subflows++;
+ msk->pm.extra_subflows++;
i++;
- if (msk->pm.subflows >= subflows_max)
+ if (msk->pm.extra_subflows >= subflows_max)
break;
}
pr_debug("local %d:%d signal %d:%d subflows %d:%d\n",
msk->pm.local_addr_used, local_addr_max,
msk->pm.add_addr_signaled, add_addr_signal_max,
- msk->pm.subflows, subflows_max);
+ msk->pm.extra_subflows, subflows_max);
/* check first for announce */
if (msk->pm.add_addr_signaled < add_addr_signal_max) {
subflow:
/* check if should create a new subflow */
while (msk->pm.local_addr_used < local_addr_max &&
- msk->pm.subflows < subflows_max) {
+ msk->pm.extra_subflows < subflows_max) {
struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX];
bool fullmesh;
int i, nr;
if (is_id0)
local->addr.id = 0;
- msk->pm.subflows++;
+ msk->pm.extra_subflows++;
i++;
- if (msk->pm.subflows >= subflows_max)
+ if (msk->pm.extra_subflows >= subflows_max)
break;
}
rcu_read_unlock();
continue;
msk->pm.local_addr_used++;
- msk->pm.subflows++;
+ msk->pm.extra_subflows++;
i++;
- if (msk->pm.subflows >= subflows_max)
+ if (msk->pm.extra_subflows >= subflows_max)
break;
}
if (!mptcp_pm_addr_families_match(sk, &local->addr, remote))
return 0;
- msk->pm.subflows++;
+ msk->pm.extra_subflows++;
return 1;
}
if (remote.id)
msk->pm.add_addr_accepted++;
if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
- msk->pm.subflows >= subflows_max)
+ msk->pm.extra_subflows >= subflows_max)
WRITE_ONCE(msk->pm.accept_addr, false);
}
}
{
struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
- if (msk->pm.subflows == mptcp_pm_get_subflows_max(msk) ||
+ if (msk->pm.extra_subflows == mptcp_pm_get_subflows_max(msk) ||
(find_next_and_bit(pernet->id_bitmap, msk->pm.id_avail_bitmap,
MPTCP_PM_MAX_ADDR_ID + 1, 0) == MPTCP_PM_MAX_ADDR_ID + 1)) {
WRITE_ONCE(msk->pm.work_pending, false);
if (err)
mptcp_userspace_pm_delete_local_addr(msk, &entry);
else
- msk->pm.subflows++;
+ msk->pm.extra_subflows++;
spin_unlock_bh(&msk->pm.lock);
create_err:
u8 add_addr_accepted;
u8 local_addr_used;
u8 pm_type;
- u8 subflows;
+ u8 extra_subflows;
u8 status;
);
/* called under PM lock */
static inline void __mptcp_pm_close_subflow(struct mptcp_sock *msk)
{
- if (--msk->pm.subflows < mptcp_pm_get_subflows_max(msk))
+ if (--msk->pm.extra_subflows < mptcp_pm_get_subflows_max(msk))
WRITE_ONCE(msk->pm.accept_subflow, true);
}
return READ_ONCE(msk->pm.remote_deny_join_id0) &&
msk->pm.local_addr_used == 0 &&
mptcp_pm_get_add_addr_accept_max(msk) == 0 &&
- msk->pm.subflows < mptcp_pm_get_subflows_max(msk);
+ msk->pm.extra_subflows < mptcp_pm_get_subflows_max(msk);
}
void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk);
memset(info, 0, sizeof(*info));
- info->mptcpi_subflows = READ_ONCE(msk->pm.subflows);
+ info->mptcpi_extra_subflows = READ_ONCE(msk->pm.extra_subflows);
info->mptcpi_add_addr_signal = READ_ONCE(msk->pm.add_addr_signaled);
info->mptcpi_add_addr_accepted = READ_ONCE(msk->pm.add_addr_accepted);
info->mptcpi_local_addr_used = READ_ONCE(msk->pm.local_addr_used);
info->mptcpi_bytes_sent = msk->bytes_sent;
info->mptcpi_bytes_received = msk->bytes_received;
info->mptcpi_bytes_retrans = msk->bytes_retrans;
- info->mptcpi_subflows_total = info->mptcpi_subflows +
+ info->mptcpi_subflows_total = info->mptcpi_extra_subflows +
__mptcp_has_initial_subflow(msk);
now = tcp_jiffies32;
info->mptcpi_last_data_sent = jiffies_to_msecs(now - msk->last_data_sent);
return 1;
msk = bpf_core_cast(sk, struct mptcp_sock);
- if (msk->pm.subflows != 1) {
+ if (msk->pm.extra_subflows != 1) {
ctx->retval = -1;
return 1;
}