bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
{
struct mptcp_pm_data *pm = &msk->pm;
- unsigned int subflows_max;
+ unsigned int limit_extra_subflows;
int ret = 0;
if (mptcp_pm_is_userspace(msk)) {
return false;
}
- subflows_max = mptcp_pm_get_subflows_max(msk);
+ limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk);
pr_debug("msk=%p subflows=%d max=%d allow=%d\n", msk,
- pm->extra_subflows, subflows_max,
+ pm->extra_subflows, limit_extra_subflows,
READ_ONCE(pm->accept_subflow));
/* try to avoid acquiring the lock below */
spin_lock_bh(&pm->lock);
if (READ_ONCE(pm->accept_subflow)) {
- ret = pm->extra_subflows < subflows_max;
- if (ret && ++pm->extra_subflows == subflows_max)
+ ret = pm->extra_subflows < limit_extra_subflows;
+ if (ret && ++pm->extra_subflows == limit_extra_subflows)
WRITE_ONCE(pm->accept_subflow, false);
}
spin_unlock_bh(&pm->lock);
WRITE_ONCE(pm->pm_type, pm_type);
if (pm_type == MPTCP_PM_TYPE_KERNEL) {
- bool subflows_allowed = !!mptcp_pm_get_subflows_max(msk);
+ bool subflows_allowed = !!mptcp_pm_get_limit_extra_subflows(msk);
/* pm->work_pending must be only be set to 'true' when
* pm->pm_type is set to MPTCP_PM_TYPE_KERNEL
unsigned int add_addr_signal_max;
unsigned int add_addr_accept_max;
unsigned int local_addr_max;
- unsigned int subflows_max;
+ unsigned int limit_extra_subflows;
unsigned int next_id;
DECLARE_BITMAP(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
};
}
EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_accept_max);
-unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk)
+unsigned int mptcp_pm_get_limit_extra_subflows(const struct mptcp_sock *msk)
{
struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
- return READ_ONCE(pernet->subflows_max);
+ return READ_ONCE(pernet->limit_extra_subflows);
}
-EXPORT_SYMBOL_GPL(mptcp_pm_get_subflows_max);
+EXPORT_SYMBOL_GPL(mptcp_pm_get_limit_extra_subflows);
unsigned int mptcp_pm_get_local_addr_max(const struct mptcp_sock *msk)
{
DECLARE_BITMAP(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1);
struct sock *sk = (struct sock *)msk, *ssk;
struct mptcp_subflow_context *subflow;
- unsigned int subflows_max;
+ unsigned int limit_extra_subflows;
int i = 0;
- subflows_max = mptcp_pm_get_subflows_max(msk);
+ limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk);
/* Forbid creation of new subflows matching existing ones, possibly
* already created by incoming ADD_ADDR
msk->pm.extra_subflows++;
i++;
- if (msk->pm.extra_subflows >= subflows_max)
+ if (msk->pm.extra_subflows >= limit_extra_subflows)
break;
}
static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
{
struct sock *sk = (struct sock *)msk;
+ unsigned int limit_extra_subflows;
unsigned int add_addr_signal_max;
bool signal_and_subflow = false;
unsigned int local_addr_max;
struct pm_nl_pernet *pernet;
struct mptcp_pm_local local;
- unsigned int subflows_max;
pernet = pm_nl_get_pernet(sock_net(sk));
add_addr_signal_max = mptcp_pm_get_add_addr_signal_max(msk);
local_addr_max = mptcp_pm_get_local_addr_max(msk);
- subflows_max = mptcp_pm_get_subflows_max(msk);
+ limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk);
/* do lazy endpoint usage accounting for the MPC subflows */
if (unlikely(!(msk->pm.status & BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED))) && msk->first) {
pr_debug("local %d:%d signal %d:%d subflows %d:%d\n",
msk->pm.local_addr_used, local_addr_max,
msk->pm.add_addr_signaled, add_addr_signal_max,
- msk->pm.extra_subflows, subflows_max);
+ msk->pm.extra_subflows, limit_extra_subflows);
/* check first for announce */
if (msk->pm.add_addr_signaled < add_addr_signal_max) {
subflow:
/* check if should create a new subflow */
while (msk->pm.local_addr_used < local_addr_max &&
- msk->pm.extra_subflows < subflows_max) {
+ msk->pm.extra_subflows < limit_extra_subflows) {
struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX];
bool fullmesh;
int i, nr;
bool c_flag_case)
{
struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
- unsigned int subflows_max = mptcp_pm_get_subflows_max(msk);
struct sock *sk = (struct sock *)msk;
struct mptcp_pm_addr_entry *entry;
+ unsigned int limit_extra_subflows;
struct mptcp_addr_info mpc_addr;
struct mptcp_pm_local *local;
int i = 0;
mptcp_local_address((struct sock_common *)msk, &mpc_addr);
+ limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk);
rcu_read_lock();
list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
msk->pm.extra_subflows++;
i++;
- if (msk->pm.extra_subflows >= subflows_max)
+ if (msk->pm.extra_subflows >= limit_extra_subflows)
break;
}
rcu_read_unlock();
{
unsigned int local_addr_max = mptcp_pm_get_local_addr_max(msk);
struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
- unsigned int subflows_max = mptcp_pm_get_subflows_max(msk);
struct sock *sk = (struct sock *)msk;
+ unsigned int limit_extra_subflows;
struct mptcp_addr_info mpc_addr;
struct mptcp_pm_local *local;
int i = 0;
mptcp_local_address((struct sock_common *)msk, &mpc_addr);
+ limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk);
while (msk->pm.local_addr_used < local_addr_max) {
local = &locals[i];
msk->pm.extra_subflows++;
i++;
- if (msk->pm.extra_subflows >= subflows_max)
+ if (msk->pm.extra_subflows >= limit_extra_subflows)
break;
}
{
struct mptcp_pm_local locals[MPTCP_PM_ADDR_MAX];
struct sock *sk = (struct sock *)msk;
+ unsigned int limit_extra_subflows;
unsigned int add_addr_accept_max;
struct mptcp_addr_info remote;
- unsigned int subflows_max;
bool sf_created = false;
int i, nr;
add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
- subflows_max = mptcp_pm_get_subflows_max(msk);
+ limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk);
pr_debug("accepted %d:%d remote family %d\n",
msk->pm.add_addr_accepted, add_addr_accept_max,
if (remote.id)
msk->pm.add_addr_accepted++;
if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
- msk->pm.extra_subflows >= subflows_max)
+ msk->pm.extra_subflows >= limit_extra_subflows)
WRITE_ONCE(msk->pm.accept_addr, false);
}
}
if (ret)
goto unlock;
- subflows = pernet->subflows_max;
+ subflows = pernet->limit_extra_subflows;
ret = parse_limit(info, MPTCP_PM_ATTR_SUBFLOWS, &subflows);
if (ret)
goto unlock;
WRITE_ONCE(pernet->add_addr_accept_max, rcv_addrs);
- WRITE_ONCE(pernet->subflows_max, subflows);
+ WRITE_ONCE(pernet->limit_extra_subflows, subflows);
unlock:
spin_unlock_bh(&pernet->lock);
goto fail;
if (nla_put_u32(msg, MPTCP_PM_ATTR_SUBFLOWS,
- READ_ONCE(pernet->subflows_max)))
+ READ_ONCE(pernet->limit_extra_subflows)))
goto fail;
genlmsg_end(msg, reply);
{
struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
- if (msk->pm.extra_subflows == mptcp_pm_get_subflows_max(msk) ||
+ if (msk->pm.extra_subflows == mptcp_pm_get_limit_extra_subflows(msk) ||
(find_next_and_bit(pernet->id_bitmap, msk->pm.id_avail_bitmap,
MPTCP_PM_MAX_ADDR_ID + 1, 0) == MPTCP_PM_MAX_ADDR_ID + 1)) {
WRITE_ONCE(msk->pm.work_pending, false);
INIT_LIST_HEAD_RCU(&pernet->local_addr_list);
/* Cit. 2 subflows ought to be enough for anybody. */
- pernet->subflows_max = 2;
+ pernet->limit_extra_subflows = 2;
pernet->next_id = 1;
pernet->stale_loss_cnt = 4;
spin_lock_init(&pernet->lock);