return true;
 }
 
+static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *skb,
+                                             bool csum_reqd)
+{
+       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+       struct csum_pseudo_header header;
+       u32 offset, seq, delta;
+       __wsum csum;
+       int len;
+
+       if (!csum_reqd)
+               return MAPPING_OK;
+
+       /* mapping already validated on previous traversal */
+       if (subflow->map_csum_len == subflow->map_data_len)
+               return MAPPING_OK;
+
+       /* traverse the receive queue, ensuring it contains a full
+        * DSS mapping and accumulating the related csum.
+        * Preserve the accoumlate csum across multiple calls, to compute
+        * the csum only once
+        */
+       delta = subflow->map_data_len - subflow->map_csum_len;
+       for (;;) {
+               seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len;
+               offset = seq - TCP_SKB_CB(skb)->seq;
+
+               /* if the current skb has not been accounted yet, csum its contents
+                * up to the amount covered by the current DSS
+                */
+               if (offset < skb->len) {
+                       __wsum csum;
+
+                       len = min(skb->len - offset, delta);
+                       csum = skb_checksum(skb, offset, len, 0);
+                       subflow->map_data_csum = csum_block_add(subflow->map_data_csum, csum,
+                                                               subflow->map_csum_len);
+
+                       delta -= len;
+                       subflow->map_csum_len += len;
+               }
+               if (delta == 0)
+                       break;
+
+               if (skb_queue_is_last(&ssk->sk_receive_queue, skb)) {
+                       /* if this subflow is closed, the partial mapping
+                        * will be never completed; flush the pending skbs, so
+                        * that subflow_sched_work_if_closed() can kick in
+                        */
+                       if (unlikely(ssk->sk_state == TCP_CLOSE))
+                               while ((skb = skb_peek(&ssk->sk_receive_queue)))
+                                       sk_eat_skb(ssk, skb);
+
+                       /* not enough data to validate the csum */
+                       return MAPPING_EMPTY;
+               }
+
+               /* the DSS mapping for next skbs will be validated later,
+                * when a get_mapping_status call will process such skb
+                */
+               skb = skb->next;
+       }
+
+       /* note that 'map_data_len' accounts only for the carried data, does
+        * not include the eventual seq increment due to the data fin,
+        * while the pseudo header requires the original DSS data len,
+        * including that
+        */
+       header.data_seq = cpu_to_be64(subflow->map_seq);
+       header.subflow_seq = htonl(subflow->map_subflow_seq);
+       header.data_len = htons(subflow->map_data_len + subflow->map_data_fin);
+       header.csum = 0;
+
+       csum = csum_partial(&header, sizeof(header), subflow->map_data_csum);
+       if (unlikely(csum_fold(csum)))
+               return subflow->mp_join ? MAPPING_INVALID : MAPPING_DUMMY;
+
+       return MAPPING_OK;
+}
+
 static enum mapping_status get_mapping_status(struct sock *ssk,
                                              struct mptcp_sock *msk)
 {
        struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+       bool csum_reqd = READ_ONCE(msk->csum_enabled);
        struct mptcp_ext *mpext;
        struct sk_buff *skb;
        u16 data_len;
                /* Allow replacing only with an identical map */
                if (subflow->map_seq == map_seq &&
                    subflow->map_subflow_seq == mpext->subflow_seq &&
-                   subflow->map_data_len == data_len) {
+                   subflow->map_data_len == data_len &&
+                   subflow->map_csum_reqd == mpext->csum_reqd) {
                        skb_ext_del(skb, SKB_EXT_MPTCP);
-                       return MAPPING_OK;
+                       goto validate_csum;
                }
 
                /* If this skb data are fully covered by the current mapping,
                }
 
                /* will validate the next map after consuming the current one */
-               return MAPPING_OK;
+               goto validate_csum;
        }
 
        subflow->map_seq = map_seq;
        subflow->map_subflow_seq = mpext->subflow_seq;
        subflow->map_data_len = data_len;
        subflow->map_valid = 1;
+       subflow->map_data_fin = mpext->data_fin;
        subflow->mpc_map = mpext->mpc_map;
-       pr_debug("new map seq=%llu subflow_seq=%u data_len=%u",
+       subflow->map_csum_reqd = mpext->csum_reqd;
+       subflow->map_csum_len = 0;
+       subflow->map_data_csum = csum_unfold(mpext->csum);
+
+       /* Cfr RFC 8684 Section 3.3.0 */
+       if (unlikely(subflow->map_csum_reqd != csum_reqd))
+               return MAPPING_INVALID;
+
+       pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
                 subflow->map_seq, subflow->map_subflow_seq,
-                subflow->map_data_len);
+                subflow->map_data_len, subflow->map_csum_reqd,
+                subflow->map_data_csum);
 
 validate_seq:
        /* we revalidate valid mapping on new skb, because we must ensure
                return MAPPING_INVALID;
 
        skb_ext_del(skb, SKB_EXT_MPTCP);
-       return MAPPING_OK;
+
+validate_csum:
+       return validate_data_csum(ssk, skb, csum_reqd);
 }
 
 static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,