enum {
        BRIDGE_XSTATS_UNSPEC,
        BRIDGE_XSTATS_VLAN,
+       BRIDGE_XSTATS_MCAST,
+       BRIDGE_XSTATS_PAD,
        __BRIDGE_XSTATS_MAX
 };
 #define BRIDGE_XSTATS_MAX (__BRIDGE_XSTATS_MAX - 1)
 
+enum {
+       BR_MCAST_DIR_RX,
+       BR_MCAST_DIR_TX,
+       BR_MCAST_DIR_SIZE
+};
+
+/* IGMP/MLD statistics */
+struct br_mcast_stats {
+       __u64 igmp_queries[BR_MCAST_DIR_SIZE];
+       __u64 igmp_leaves[BR_MCAST_DIR_SIZE];
+       __u64 igmp_v1reports[BR_MCAST_DIR_SIZE];
+       __u64 igmp_v2reports[BR_MCAST_DIR_SIZE];
+       __u64 igmp_v3reports[BR_MCAST_DIR_SIZE];
+       __u64 igmp_parse_errors;
+
+       __u64 mld_queries[BR_MCAST_DIR_SIZE];
+       __u64 mld_leaves[BR_MCAST_DIR_SIZE];
+       __u64 mld_v1reports[BR_MCAST_DIR_SIZE];
+       __u64 mld_v2reports[BR_MCAST_DIR_SIZE];
+       __u64 mld_parse_errors;
+
+       __u64 mcast_bytes[BR_MCAST_DIR_SIZE];
+       __u64 mcast_packets[BR_MCAST_DIR_SIZE];
+};
 #endif /* _UAPI_LINUX_IF_BRIDGE_H */
 
        IFLA_BR_VLAN_DEFAULT_PVID,
        IFLA_BR_PAD,
        IFLA_BR_VLAN_STATS_ENABLED,
+       IFLA_BR_MCAST_STATS_ENABLED,
        __IFLA_BR_MAX,
 };
 
 
                return -ENOMEM;
 
        err = br_vlan_init(br);
-       if (err)
+       if (err) {
                free_percpu(br->stats);
+               return err;
+       }
+
+       err = br_multicast_init_stats(br);
+       if (err) {
+               free_percpu(br->stats);
+               br_vlan_flush(br);
+       }
        br_set_lockdep_class(dev);
 
        return err;
 
                                           struct sk_buff *skb),
                     bool unicast)
 {
-       struct net_bridge_port *p;
+       u8 igmp_type = br_multicast_igmp_type(skb);
+       __be16 proto = skb->protocol;
        struct net_bridge_port *prev;
+       struct net_bridge_port *p;
 
        prev = NULL;
 
                prev = maybe_deliver(prev, p, skb, __packet_hook);
                if (IS_ERR(prev))
                        goto out;
+               if (prev == p)
+                       br_multicast_count(p->br, p, proto, igmp_type,
+                                          BR_MCAST_DIR_TX);
        }
 
        if (!prev)
                                        struct sk_buff *skb))
 {
        struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
+       u8 igmp_type = br_multicast_igmp_type(skb);
        struct net_bridge *br = netdev_priv(dev);
        struct net_bridge_port *prev = NULL;
        struct net_bridge_port_group *p;
+       __be16 proto = skb->protocol;
+
        struct hlist_node *rp;
 
        rp = rcu_dereference(hlist_first_rcu(&br->router_list));
                prev = maybe_deliver(prev, port, skb, __packet_hook);
                if (IS_ERR(prev))
                        goto out;
+               if (prev == port)
+                       br_multicast_count(port->br, port, proto, igmp_type,
+                                          BR_MCAST_DIR_TX);
 
                if ((unsigned long)lport >= (unsigned long)port)
                        p = rcu_dereference(p->next);
 
 static struct net_bridge_port *new_nbp(struct net_bridge *br,
                                       struct net_device *dev)
 {
-       int index;
        struct net_bridge_port *p;
+       int index, err;
 
        index = find_portno(br);
        if (index < 0)
        br_init_port(p);
        br_set_state(p, BR_STATE_DISABLED);
        br_stp_port_timer_init(p);
-       br_multicast_add_port(p);
+       err = br_multicast_add_port(p);
+       if (err) {
+               dev_put(dev);
+               kfree(p);
+               p = ERR_PTR(err);
+       }
 
        return p;
 }
 
        skb = br_handle_vlan(br, vg, skb);
        if (!skb)
                return NET_RX_DROP;
+       /* update the multicast stats if the packet is IGMP/MLD */
+       br_multicast_count(br, NULL, skb->protocol, br_multicast_igmp_type(skb),
+                          BR_MCAST_DIR_TX);
 
        return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
                       dev_net(indev), NULL, skb, indev, NULL,
 
 }
 
 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
-                                                   __be32 group)
+                                                   __be32 group,
+                                                   u8 *igmp_type)
 {
        struct sk_buff *skb;
        struct igmphdr *ih;
 
        skb_set_transport_header(skb, skb->len);
        ih = igmp_hdr(skb);
+       *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
        ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
        ih->code = (group ? br->multicast_last_member_interval :
                            br->multicast_query_response_interval) /
 
 #if IS_ENABLED(CONFIG_IPV6)
 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
-                                                   const struct in6_addr *group)
+                                                   const struct in6_addr *grp,
+                                                   u8 *igmp_type)
 {
        struct sk_buff *skb;
        struct ipv6hdr *ip6h;
        skb_set_transport_header(skb, skb->len);
        mldq = (struct mld_msg *) icmp6_hdr(skb);
 
-       interval = ipv6_addr_any(group) ?
+       interval = ipv6_addr_any(grp) ?
                        br->multicast_query_response_interval :
                        br->multicast_last_member_interval;
 
+       *igmp_type = ICMPV6_MGM_QUERY;
        mldq->mld_type = ICMPV6_MGM_QUERY;
        mldq->mld_code = 0;
        mldq->mld_cksum = 0;
        mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
        mldq->mld_reserved = 0;
-       mldq->mld_mca = *group;
+       mldq->mld_mca = *grp;
 
        /* checksum */
        mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
 #endif
 
 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
-                                               struct br_ip *addr)
+                                               struct br_ip *addr,
+                                               u8 *igmp_type)
 {
        switch (addr->proto) {
        case htons(ETH_P_IP):
-               return br_ip4_multicast_alloc_query(br, addr->u.ip4);
+               return br_ip4_multicast_alloc_query(br, addr->u.ip4, igmp_type);
 #if IS_ENABLED(CONFIG_IPV6)
        case htons(ETH_P_IPV6):
-               return br_ip6_multicast_alloc_query(br, &addr->u.ip6);
+               return br_ip6_multicast_alloc_query(br, &addr->u.ip6,
+                                                   igmp_type);
 #endif
        }
        return NULL;
                                      struct br_ip *ip)
 {
        struct sk_buff *skb;
+       u8 igmp_type;
 
-       skb = br_multicast_alloc_query(br, ip);
+       skb = br_multicast_alloc_query(br, ip, &igmp_type);
        if (!skb)
                return;
 
        if (port) {
                skb->dev = port->dev;
+               br_multicast_count(br, port, skb->protocol, igmp_type,
+                                  BR_MCAST_DIR_TX);
                NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
                        dev_net(port->dev), NULL, skb, NULL, skb->dev,
                        br_dev_queue_push_xmit);
        } else {
                br_multicast_select_own_querier(br, ip, skb);
+               br_multicast_count(br, port, skb->protocol, igmp_type,
+                                  BR_MCAST_DIR_RX);
                netif_rx(skb);
        }
 }
 }
 #endif
 
-void br_multicast_add_port(struct net_bridge_port *port)
+int br_multicast_add_port(struct net_bridge_port *port)
 {
        port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
 
        setup_timer(&port->ip6_own_query.timer,
                    br_ip6_multicast_port_query_expired, (unsigned long)port);
 #endif
+       port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
+       if (!port->mcast_stats)
+               return -ENOMEM;
+
+       return 0;
 }
 
 void br_multicast_del_port(struct net_bridge_port *port)
                br_multicast_del_pg(br, pg);
        spin_unlock_bh(&br->multicast_lock);
        del_timer_sync(&port->multicast_router_timer);
+       free_percpu(port->mcast_stats);
 }
 
 static void br_multicast_enable(struct bridge_mcast_own_query *query)
 }
 #endif
 
+static void br_multicast_err_count(const struct net_bridge *br,
+                                  const struct net_bridge_port *p,
+                                  __be16 proto)
+{
+       struct bridge_mcast_stats __percpu *stats;
+       struct bridge_mcast_stats *pstats;
+
+       if (!br->multicast_stats_enabled)
+               return;
+
+       if (p)
+               stats = p->mcast_stats;
+       else
+               stats = br->mcast_stats;
+       if (WARN_ON(!stats))
+               return;
+
+       pstats = this_cpu_ptr(stats);
+
+       u64_stats_update_begin(&pstats->syncp);
+       switch (proto) {
+       case htons(ETH_P_IP):
+               pstats->mstats.igmp_parse_errors++;
+               break;
+#if IS_ENABLED(CONFIG_IPV6)
+       case htons(ETH_P_IPV6):
+               pstats->mstats.mld_parse_errors++;
+               break;
+#endif
+       }
+       u64_stats_update_end(&pstats->syncp);
+}
+
 static int br_multicast_ipv4_rcv(struct net_bridge *br,
                                 struct net_bridge_port *port,
                                 struct sk_buff *skb,
                        BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
                return 0;
        } else if (err < 0) {
+               br_multicast_err_count(br, port, skb->protocol);
                return err;
        }
 
-       BR_INPUT_SKB_CB(skb)->igmp = 1;
        ih = igmp_hdr(skb);
+       BR_INPUT_SKB_CB(skb)->igmp = ih->type;
 
        switch (ih->type) {
        case IGMP_HOST_MEMBERSHIP_REPORT:
        if (skb_trimmed && skb_trimmed != skb)
                kfree_skb(skb_trimmed);
 
+       br_multicast_count(br, port, skb->protocol, BR_INPUT_SKB_CB(skb)->igmp,
+                          BR_MCAST_DIR_RX);
+
        return err;
 }
 
                        BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
                return 0;
        } else if (err < 0) {
+               br_multicast_err_count(br, port, skb->protocol);
                return err;
        }
 
-       BR_INPUT_SKB_CB(skb)->igmp = 1;
        mld = (struct mld_msg *)skb_transport_header(skb);
+       BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
 
        switch (mld->mld_type) {
        case ICMPV6_MGM_REPORT:
        if (skb_trimmed && skb_trimmed != skb)
                kfree_skb(skb_trimmed);
 
+       br_multicast_count(br, port, skb->protocol, BR_INPUT_SKB_CB(skb)->igmp,
+                          BR_MCAST_DIR_RX);
+
        return err;
 }
 #endif
 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
                     struct sk_buff *skb, u16 vid)
 {
+       int ret = 0;
+
        BR_INPUT_SKB_CB(skb)->igmp = 0;
        BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
 
 
        switch (skb->protocol) {
        case htons(ETH_P_IP):
-               return br_multicast_ipv4_rcv(br, port, skb, vid);
+               ret = br_multicast_ipv4_rcv(br, port, skb, vid);
+               break;
 #if IS_ENABLED(CONFIG_IPV6)
        case htons(ETH_P_IPV6):
-               return br_multicast_ipv6_rcv(br, port, skb, vid);
+               ret = br_multicast_ipv6_rcv(br, port, skb, vid);
+               break;
 #endif
        }
 
-       return 0;
+       return ret;
 }
 
 static void br_multicast_query_expired(struct net_bridge *br,
 
 out:
        spin_unlock_bh(&br->multicast_lock);
+
+       free_percpu(br->mcast_stats);
 }
 
 int br_multicast_set_router(struct net_bridge *br, unsigned long val)
        return ret;
 }
 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
+
+static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
+                              __be16 proto, u8 type, u8 dir)
+{
+       struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
+
+       u64_stats_update_begin(&pstats->syncp);
+       switch (proto) {
+       case htons(ETH_P_IP):
+               switch (type) {
+               case IGMP_HOST_MEMBERSHIP_REPORT:
+                       pstats->mstats.igmp_v1reports[dir]++;
+                       break;
+               case IGMPV2_HOST_MEMBERSHIP_REPORT:
+                       pstats->mstats.igmp_v2reports[dir]++;
+                       break;
+               case IGMPV3_HOST_MEMBERSHIP_REPORT:
+                       pstats->mstats.igmp_v3reports[dir]++;
+                       break;
+               case IGMP_HOST_MEMBERSHIP_QUERY:
+                       pstats->mstats.igmp_queries[dir]++;
+                       break;
+               case IGMP_HOST_LEAVE_MESSAGE:
+                       pstats->mstats.igmp_leaves[dir]++;
+                       break;
+               }
+               break;
+#if IS_ENABLED(CONFIG_IPV6)
+       case htons(ETH_P_IPV6):
+               switch (type) {
+               case ICMPV6_MGM_REPORT:
+                       pstats->mstats.mld_v1reports[dir]++;
+                       break;
+               case ICMPV6_MLD2_REPORT:
+                       pstats->mstats.mld_v2reports[dir]++;
+                       break;
+               case ICMPV6_MGM_QUERY:
+                       pstats->mstats.mld_queries[dir]++;
+                       break;
+               case ICMPV6_MGM_REDUCTION:
+                       pstats->mstats.mld_leaves[dir]++;
+                       break;
+               }
+               break;
+#endif /* CONFIG_IPV6 */
+       }
+       u64_stats_update_end(&pstats->syncp);
+}
+
+void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
+                       __be16 proto, u8 type, u8 dir)
+{
+       struct bridge_mcast_stats __percpu *stats;
+
+       /* if multicast_disabled is true then igmp type can't be set */
+       if (!type || !br->multicast_stats_enabled)
+               return;
+
+       if (p)
+               stats = p->mcast_stats;
+       else
+               stats = br->mcast_stats;
+       if (WARN_ON(!stats))
+               return;
+
+       br_mcast_stats_add(stats, proto, type, dir);
+}
+
+int br_multicast_init_stats(struct net_bridge *br)
+{
+       br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
+       if (!br->mcast_stats)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void mcast_stats_add_dir(u64 *dst, u64 *src)
+{
+       dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
+       dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
+}
+
+void br_multicast_get_stats(const struct net_bridge *br,
+                           const struct net_bridge_port *p,
+                           struct br_mcast_stats *dest)
+{
+       struct bridge_mcast_stats __percpu *stats;
+       struct br_mcast_stats tdst;
+       int i;
+
+       memset(dest, 0, sizeof(*dest));
+       if (p)
+               stats = p->mcast_stats;
+       else
+               stats = br->mcast_stats;
+       if (WARN_ON(!stats))
+               return;
+
+       memset(&tdst, 0, sizeof(tdst));
+       for_each_possible_cpu(i) {
+               struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
+               struct br_mcast_stats temp;
+               unsigned int start;
+
+               do {
+                       start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+                       memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
+               } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+
+               mcast_stats_add_dir(tdst.igmp_queries, temp.igmp_queries);
+               mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
+               mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
+               mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
+               mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
+               tdst.igmp_parse_errors += temp.igmp_parse_errors;
+
+               mcast_stats_add_dir(tdst.mld_queries, temp.mld_queries);
+               mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
+               mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
+               mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
+               tdst.mld_parse_errors += temp.mld_parse_errors;
+       }
+       memcpy(dest, &tdst, sizeof(*dest));
+}
 
        [IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 },
        [IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 },
        [IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 },
+       [IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 },
 };
 
 static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
 
                br->multicast_startup_query_interval = clock_t_to_jiffies(val);
        }
+
+       if (data[IFLA_BR_MCAST_STATS_ENABLED]) {
+               __u8 mcast_stats;
+
+               mcast_stats = nla_get_u8(data[IFLA_BR_MCAST_STATS_ENABLED]);
+               br->multicast_stats_enabled = !!mcast_stats;
+       }
 #endif
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
        if (data[IFLA_BR_NF_CALL_IPTABLES]) {
               nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_SNOOPING */
               nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_QUERY_USE_IFADDR */
               nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_QUERIER */
+              nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_STATS_ENABLED */
               nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_HASH_ELASTICITY */
               nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_HASH_MAX */
               nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_LAST_MEMBER_CNT */
            nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR,
                       br->multicast_query_use_ifaddr) ||
            nla_put_u8(skb, IFLA_BR_MCAST_QUERIER, br->multicast_querier) ||
+           nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED,
+                      br->multicast_stats_enabled) ||
            nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY,
                        br->hash_elasticity) ||
            nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) ||
        int numvls = 0;
 
        vg = br_vlan_group(br);
-       if (!vg)
-               return 0;
-
-       /* we need to count all, even placeholder entries */
-       list_for_each_entry(v, &vg->vlan_list, vlist)
-               numvls++;
+       if (vg) {
+               /* we need to count all, even placeholder entries */
+               list_for_each_entry(v, &vg->vlan_list, vlist)
+                       numvls++;
+       }
 
-       /* account for the vlans and the link xstats type nest attribute */
        return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) +
+              nla_total_size(sizeof(struct br_mcast_stats)) +
               nla_total_size(0);
 }
 
 static size_t brport_get_linkxstats_size(const struct net_device *dev)
 {
-       return nla_total_size(0);
+       return nla_total_size(sizeof(struct br_mcast_stats)) +
+              nla_total_size(0);
 }
 
 static size_t br_get_linkxstats_size(const struct net_device *dev, int attr)
                                  int *prividx)
 {
        struct net_bridge *br = netdev_priv(dev);
+       struct nlattr *nla __maybe_unused;
        struct net_bridge_vlan_group *vg;
        struct net_bridge_vlan *v;
        struct nlattr *nest;
        int vl_idx = 0;
 
-       vg = br_vlan_group(br);
-       if (!vg)
-               goto out;
        nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE);
        if (!nest)
                return -EMSGSIZE;
-       list_for_each_entry(v, &vg->vlan_list, vlist) {
-               struct bridge_vlan_xstats vxi;
-               struct br_vlan_stats stats;
 
-               if (++vl_idx < *prividx)
-                       continue;
-               memset(&vxi, 0, sizeof(vxi));
-               vxi.vid = v->vid;
-               br_vlan_get_stats(v, &stats);
-               vxi.rx_bytes = stats.rx_bytes;
-               vxi.rx_packets = stats.rx_packets;
-               vxi.tx_bytes = stats.tx_bytes;
-               vxi.tx_packets = stats.tx_packets;
-
-               if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi))
+       vg = br_vlan_group(br);
+       if (vg) {
+               list_for_each_entry(v, &vg->vlan_list, vlist) {
+                       struct bridge_vlan_xstats vxi;
+                       struct br_vlan_stats stats;
+
+                       if (++vl_idx < *prividx)
+                               continue;
+                       memset(&vxi, 0, sizeof(vxi));
+                       vxi.vid = v->vid;
+                       br_vlan_get_stats(v, &stats);
+                       vxi.rx_bytes = stats.rx_bytes;
+                       vxi.rx_packets = stats.rx_packets;
+                       vxi.tx_bytes = stats.tx_bytes;
+                       vxi.tx_packets = stats.tx_packets;
+
+                       if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi))
+                               goto nla_put_failure;
+               }
+       }
+
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+       if (++vl_idx >= *prividx) {
+               nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST,
+                                       sizeof(struct br_mcast_stats),
+                                       BRIDGE_XSTATS_PAD);
+               if (!nla)
                        goto nla_put_failure;
+               br_multicast_get_stats(br, NULL, nla_data(nla));
        }
+#endif
        nla_nest_end(skb, nest);
        *prividx = 0;
-out:
+
        return 0;
 
 nla_put_failure:
                                  const struct net_device *dev,
                                  int *prividx)
 {
+       struct net_bridge_port *p = br_port_get_rtnl(dev);
+       struct nlattr *nla __maybe_unused;
        struct nlattr *nest;
 
+       if (!p)
+               return 0;
+
        nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE);
        if (!nest)
                return -EMSGSIZE;
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+       nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST,
+                               sizeof(struct br_mcast_stats),
+                               BRIDGE_XSTATS_PAD);
+       if (!nla) {
+               nla_nest_end(skb, nest);
+               return -EMSGSIZE;
+       }
+       br_multicast_get_stats(p->br, p, nla_data(nla));
+#endif
        nla_nest_end(skb, nest);
 
        return 0;
 
        struct br_ip addr;
        struct net_bridge_port __rcu    *port;
 };
+
+/* IGMP/MLD statistics */
+struct bridge_mcast_stats {
+       struct br_mcast_stats mstats;
+       struct u64_stats_sync syncp;
+};
 #endif
 
 struct br_vlan_stats {
        struct bridge_mcast_own_query   ip6_own_query;
 #endif /* IS_ENABLED(CONFIG_IPV6) */
        unsigned char                   multicast_router;
+       struct bridge_mcast_stats       __percpu *mcast_stats;
        struct timer_list               multicast_router_timer;
        struct hlist_head               mglist;
        struct hlist_node               rlist;
        u8                              multicast_querier:1;
        u8                              multicast_query_use_ifaddr:1;
        u8                              has_ipv6_addr:1;
+       u8                              multicast_stats_enabled:1;
 
        u32                             hash_elasticity;
        u32                             hash_max;
        struct bridge_mcast_other_query ip4_other_query;
        struct bridge_mcast_own_query   ip4_own_query;
        struct bridge_mcast_querier     ip4_querier;
+       struct bridge_mcast_stats       __percpu *mcast_stats;
 #if IS_ENABLED(CONFIG_IPV6)
        struct bridge_mcast_other_query ip6_other_query;
        struct bridge_mcast_own_query   ip6_own_query;
                     struct sk_buff *skb, u16 vid);
 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
                                        struct sk_buff *skb, u16 vid);
-void br_multicast_add_port(struct net_bridge_port *port);
+int br_multicast_add_port(struct net_bridge_port *port);
 void br_multicast_del_port(struct net_bridge_port *port);
 void br_multicast_enable_port(struct net_bridge_port *port);
 void br_multicast_disable_port(struct net_bridge_port *port);
                   struct br_ip *group, int type, u8 flags);
 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
                   int type);
+void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
+                       __be16 proto, u8 type, u8 dir);
+int br_multicast_init_stats(struct net_bridge *br);
+void br_multicast_get_stats(const struct net_bridge *br,
+                           const struct net_bridge_port *p,
+                           struct br_mcast_stats *dest);
 
 #define mlock_dereference(X, br) \
        rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
                return false;
        }
 }
+
+static inline int br_multicast_igmp_type(const struct sk_buff *skb)
+{
+       return BR_INPUT_SKB_CB(skb)->igmp;
+}
 #else
 static inline int br_multicast_rcv(struct net_bridge *br,
                                   struct net_bridge_port *port,
        return NULL;
 }
 
-static inline void br_multicast_add_port(struct net_bridge_port *port)
+static inline int br_multicast_add_port(struct net_bridge_port *port)
 {
+       return 0;
 }
 
 static inline void br_multicast_del_port(struct net_bridge_port *port)
 static inline void br_mdb_uninit(void)
 {
 }
+
+static inline void br_multicast_count(struct net_bridge *br,
+                                     const struct net_bridge_port *p,
+                                     __be16 proto, u8 type, u8 dir)
+{
+}
+
+static inline int br_multicast_init_stats(struct net_bridge *br)
+{
+       return 0;
+}
+
+static inline int br_multicast_igmp_type(const struct sk_buff *skb)
+{
+       return 0;
+}
 #endif
 
 /* br_vlan.c */
 
        return store_bridge_parm(d, buf, len, set_startup_query_interval);
 }
 static DEVICE_ATTR_RW(multicast_startup_query_interval);
+
+static ssize_t multicast_stats_enabled_show(struct device *d,
+                                           struct device_attribute *attr,
+                                           char *buf)
+{
+       struct net_bridge *br = to_bridge(d);
+
+       return sprintf(buf, "%u\n", br->multicast_stats_enabled);
+}
+
+static int set_stats_enabled(struct net_bridge *br, unsigned long val)
+{
+       br->multicast_stats_enabled = !!val;
+       return 0;
+}
+
+static ssize_t multicast_stats_enabled_store(struct device *d,
+                                            struct device_attribute *attr,
+                                            const char *buf,
+                                            size_t len)
+{
+       return store_bridge_parm(d, buf, len, set_stats_enabled);
+}
+static DEVICE_ATTR_RW(multicast_stats_enabled);
 #endif
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 static ssize_t nf_call_iptables_show(
        &dev_attr_multicast_query_interval.attr,
        &dev_attr_multicast_query_response_interval.attr,
        &dev_attr_multicast_startup_query_interval.attr,
+       &dev_attr_multicast_stats_enabled.attr,
 #endif
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
        &dev_attr_nf_call_iptables.attr,