#include <linux/tcp.h>
 #include <linux/types.h>
 
+struct seq_file;
+
 /* MPTCP sk_buff extension data */
 struct mptcp_ext {
        u64             data_ack;
 
 bool mptcp_sk_is_subflow(const struct sock *sk);
 
+void mptcp_seq_show(struct seq_file *seq);
 #else
 
 static inline void mptcp_init(void)
        return false;
 }
 
+static inline void mptcp_seq_show(struct seq_file *seq) { }
 #endif /* CONFIG_MPTCP */
 
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
 
 #if IS_ENABLED(CONFIG_TLS)
        DEFINE_SNMP_STAT(struct linux_tls_mib, tls_statistics);
 #endif
+#ifdef CONFIG_MPTCP
+       DEFINE_SNMP_STAT(struct mptcp_mib, mptcp_statistics);
+#endif
 };
 
 #endif
 
        free_percpu(net->mib.net_statistics);
        free_percpu(net->mib.ip_statistics);
        free_percpu(net->mib.tcp_statistics);
+#ifdef CONFIG_MPTCP
+       /* allocated on demand, see mptcp_init_sock() */
+       free_percpu(net->mib.mptcp_statistics);
+#endif
 }
 
 static __net_initdata struct pernet_operations ipv4_mib_ops = {
 
 #include <net/icmp.h>
 #include <net/protocol.h>
 #include <net/tcp.h>
+#include <net/mptcp.h>
 #include <net/udp.h>
 #include <net/udplite.h>
 #include <linux/bottom_half.h>
                                             offsetof(struct ipstats_mib, syncp)));
 
        seq_putc(seq, '\n');
+       mptcp_seq_show(seq);
        return 0;
 }
 
 
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_MPTCP) += mptcp.o
 
-mptcp-y := protocol.o subflow.o options.o token.o crypto.o ctrl.o pm.o diag.o
+mptcp-y := protocol.o subflow.o options.o token.o crypto.o ctrl.o pm.o diag.o mib.o
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/seq_file.h>
+#include <net/ip.h>
+#include <net/mptcp.h>
+#include <net/snmp.h>
+#include <net/net_namespace.h>
+
+#include "mib.h"
+
+static const struct snmp_mib mptcp_snmp_list[] = {
+       SNMP_MIB_ITEM("MPCapableSYNRX", MPTCP_MIB_MPCAPABLEPASSIVE),
+       SNMP_MIB_ITEM("MPCapableACKRX", MPTCP_MIB_MPCAPABLEPASSIVEACK),
+       SNMP_MIB_ITEM("MPCapableFallbackACK", MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK),
+       SNMP_MIB_ITEM("MPCapableFallbackSYNACK", MPTCP_MIB_MPCAPABLEACTIVEFALLBACK),
+       SNMP_MIB_ITEM("MPTCPRetrans", MPTCP_MIB_RETRANSSEGS),
+       SNMP_MIB_ITEM("MPJoinNoTokenFound", MPTCP_MIB_JOINNOTOKEN),
+       SNMP_MIB_ITEM("MPJoinSynRx", MPTCP_MIB_JOINSYNRX),
+       SNMP_MIB_ITEM("MPJoinSynAckRx", MPTCP_MIB_JOINSYNACKRX),
+       SNMP_MIB_ITEM("MPJoinSynAckHMacFailure", MPTCP_MIB_JOINSYNACKMAC),
+       SNMP_MIB_ITEM("MPJoinAckRx", MPTCP_MIB_JOINACKRX),
+       SNMP_MIB_ITEM("MPJoinAckHMacFailure", MPTCP_MIB_JOINACKMAC),
+       SNMP_MIB_ITEM("DSSNotMatching", MPTCP_MIB_DSSNOMATCH),
+       SNMP_MIB_ITEM("InfiniteMapRx", MPTCP_MIB_INFINITEMAPRX),
+       SNMP_MIB_SENTINEL
+};
+
+/* mptcp_mib_alloc - allocate percpu mib counters
+ *
+ * These are allocated when the first mptcp socket is created so
+ * we do not waste percpu memory if mptcp isn't in use.
+ */
+bool mptcp_mib_alloc(struct net *net)
+{
+       struct mptcp_mib __percpu *mib = alloc_percpu(struct mptcp_mib);
+
+       if (!mib)
+               return false;
+
+       if (cmpxchg(&net->mib.mptcp_statistics, NULL, mib))
+               free_percpu(mib);
+
+       return true;
+}
+
+void mptcp_seq_show(struct seq_file *seq)
+{
+       struct net *net = seq->private;
+       int i;
+
+       seq_puts(seq, "MPTcpExt:");
+       for (i = 0; mptcp_snmp_list[i].name; i++)
+               seq_printf(seq, " %s", mptcp_snmp_list[i].name);
+
+       seq_puts(seq, "\nMPTcpExt:");
+
+       if (!net->mib.mptcp_statistics) {
+               for (i = 0; mptcp_snmp_list[i].name; i++)
+                       seq_puts(seq, " 0");
+
+               return;
+       }
+
+       for (i = 0; mptcp_snmp_list[i].name; i++)
+               seq_printf(seq, " %lu",
+                          snmp_fold_field(net->mib.mptcp_statistics,
+                                          mptcp_snmp_list[i].entry));
+       seq_putc(seq, '\n');
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+enum linux_mptcp_mib_field {
+       MPTCP_MIB_NUM = 0,
+       MPTCP_MIB_MPCAPABLEPASSIVE,     /* Received SYN with MP_CAPABLE */
+       MPTCP_MIB_MPCAPABLEPASSIVEACK,  /* Received third ACK with MP_CAPABLE */
+       MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK,/* Server-side fallback during 3-way handshake */
+       MPTCP_MIB_MPCAPABLEACTIVEFALLBACK, /* Client-side fallback during 3-way handshake */
+       MPTCP_MIB_RETRANSSEGS,          /* Segments retransmitted at the MPTCP-level */
+       MPTCP_MIB_JOINNOTOKEN,          /* Received MP_JOIN but the token was not found */
+       MPTCP_MIB_JOINSYNRX,            /* Received a SYN + MP_JOIN */
+       MPTCP_MIB_JOINSYNACKRX,         /* Received a SYN/ACK + MP_JOIN */
+       MPTCP_MIB_JOINSYNACKMAC,        /* HMAC was wrong on SYN/ACK + MP_JOIN */
+       MPTCP_MIB_JOINACKRX,            /* Received an ACK + MP_JOIN */
+       MPTCP_MIB_JOINACKMAC,           /* HMAC was wrong on ACK + MP_JOIN */
+       MPTCP_MIB_DSSNOMATCH,           /* Received a new mapping that did not match the previous one */
+       MPTCP_MIB_INFINITEMAPRX,        /* Received an infinite mapping */
+       __MPTCP_MIB_MAX
+};
+
+#define LINUX_MIB_MPTCP_MAX    __MPTCP_MIB_MAX
+struct mptcp_mib {
+       unsigned long mibs[LINUX_MIB_MPTCP_MAX];
+};
+
+static inline void MPTCP_INC_STATS(struct net *net,
+                                  enum linux_mptcp_mib_field field)
+{
+       if (likely(net->mib.mptcp_statistics))
+               SNMP_INC_STATS(net->mib.mptcp_statistics, field);
+}
+
+static inline void __MPTCP_INC_STATS(struct net *net,
+                                    enum linux_mptcp_mib_field field)
+{
+       if (likely(net->mib.mptcp_statistics))
+               __SNMP_INC_STATS(net->mib.mptcp_statistics, field);
+}
+
+bool mptcp_mib_alloc(struct net *net);
 
 #endif
 #include <net/mptcp.h>
 #include "protocol.h"
+#include "mib.h"
 
 #define MPTCP_SAME_STATE TCP_MAX_STATES
 
                if (ret < 0)
                        break;
 
+               MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS);
                copied += ret;
                dfrag->data_len -= ret;
                dfrag->offset += ret;
 
 static int mptcp_init_sock(struct sock *sk)
 {
-       int ret = __mptcp_init_sock(sk);
+       struct net *net = sock_net(sk);
+       int ret;
 
+       if (!mptcp_is_enabled(net))
+               return -ENOPROTOOPT;
+
+       if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net))
+               return -ENOMEM;
+
+       ret = __mptcp_init_sock(sk);
        if (ret)
                return ret;
 
        sk_sockets_allocated_inc(sk);
        sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[2];
 
-       if (!mptcp_is_enabled(sock_net(sk)))
-               return -ENOPROTOOPT;
-
        return 0;
 }
 
                list_add(&subflow->node, &msk->conn_list);
 
                bh_unlock_sock(new_mptcp_sock);
+
+               __MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
                local_bh_enable();
+       } else {
+               MPTCP_INC_STATS(sock_net(sk),
+                               MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
        }
 
        return newsk;
        u64 ack_seq;
 
        subflow = mptcp_subflow_ctx(ssk);
-
-       if (!subflow->mp_capable)
-               return;
-
        sk = subflow->conn;
        msk = mptcp_sk(sk);
 
+       if (!subflow->mp_capable) {
+               MPTCP_INC_STATS(sock_net(sk),
+                               MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
+               return;
+       }
+
        pr_debug("msk=%p, token=%u", sk, subflow->token);
 
        mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
 
 #endif
 #include <net/mptcp.h>
 #include "protocol.h"
+#include "mib.h"
+
+static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
+                                 enum linux_mptcp_mib_field field)
+{
+       MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
+}
 
 static int subflow_rebuild_header(struct sock *sk)
 {
 
        msk = mptcp_token_get_sock(subflow_req->token);
        if (!msk) {
-               pr_debug("subflow_req=%p, token=%u - not found\n",
-                        subflow_req, subflow_req->token);
+               SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
                return false;
        }
 
                return;
 #endif
 
-       if (rx_opt.mptcp.mp_capable && rx_opt.mptcp.mp_join)
-               return;
+       if (rx_opt.mptcp.mp_capable) {
+               SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
+
+               if (rx_opt.mptcp.mp_join)
+                       return;
+       } else if (rx_opt.mptcp.mp_join) {
+               SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
+       }
 
        if (rx_opt.mptcp.mp_capable && listener->request_mptcp) {
                int err;
                         subflow, subflow->thmac,
                         subflow->remote_nonce);
                if (!subflow_thmac_valid(subflow)) {
+                       MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
                        subflow->mp_join = 0;
                        goto do_reset;
                }
                        goto do_reset;
 
                subflow->conn_finished = 1;
+               MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
        } else {
 do_reset:
                tcp_send_active_reset(sk, GFP_ATOMIC);
                opt_rx.mptcp.mp_join = 0;
                mptcp_get_options(skb, &opt_rx);
                if (!opt_rx.mptcp.mp_join ||
-                   !subflow_hmac_valid(req, &opt_rx))
+                   !subflow_hmac_valid(req, &opt_rx)) {
+                       SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
                        return NULL;
+               }
        }
 
 create_child:
                        ctx->conn = (struct sock *)owner;
                        if (!mptcp_finish_join(child))
                                goto close_child;
+
+                       SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
                }
        }
 
        data_len = mpext->data_len;
        if (data_len == 0) {
                pr_err("Infinite mapping not handled");
+               MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
                return MAPPING_INVALID;
        }
 
                /* If this skb data are fully covered by the current mapping,
                 * the new map would need caching, which is not supported
                 */
-               if (skb_is_fully_mapped(ssk, skb))
+               if (skb_is_fully_mapped(ssk, skb)) {
+                       MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
                        return MAPPING_INVALID;
+               }
 
                /* will validate the next map after consuming the current one */
                return MAPPING_OK;