struct rmnet_port *port, u8 mux_id,
                                    struct net_device *orig_dev)
 {
-       int required_headroom, additional_header_len;
+       int required_headroom, additional_header_len, csum_type = 0;
        struct rmnet_map_header *map_header;
 
        additional_header_len = 0;
 
        if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
                additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
-               required_headroom += additional_header_len;
+               csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4;
+       } else if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) {
+               additional_header_len = sizeof(struct rmnet_map_v5_csum_header);
+               csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV5;
        }
 
-       if (skb_headroom(skb) < required_headroom) {
-               if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
-                       return -ENOMEM;
-       }
+       required_headroom += additional_header_len;
+
+       if (skb_cow_head(skb, required_headroom) < 0)
+               return -ENOMEM;
 
-       if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
-               rmnet_map_checksum_uplink_packet(skb, orig_dev);
+       if (csum_type)
+               rmnet_map_checksum_uplink_packet(skb, port, orig_dev,
+                                                csum_type);
 
-       map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
+       map_header = rmnet_map_add_map_header(skb, additional_header_len,
+                                             port, 0);
        if (!map_header)
                return -ENOMEM;
 
 
 struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
                                      struct rmnet_port *port);
 struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
-                                                 int hdrlen, int pad);
+                                                 int hdrlen,
+                                                 struct rmnet_port *port,
+                                                 int pad);
 void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
 int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
 void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
-                                     struct net_device *orig_dev);
+                                     struct rmnet_port *port,
+                                     struct net_device *orig_dev,
+                                     int csum_type);
 int rmnet_map_process_next_hdr_packet(struct sk_buff *skb, u16 len);
 
 #endif /* _RMNET_MAP_H_ */
 
 }
 #endif
 
+static void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
+                                               struct rmnet_port *port,
+                                               struct net_device *orig_dev)
+{
+       struct rmnet_priv *priv = netdev_priv(orig_dev);
+       struct rmnet_map_v5_csum_header *ul_header;
+
+       ul_header = skb_push(skb, sizeof(*ul_header));
+       memset(ul_header, 0, sizeof(*ul_header));
+       ul_header->header_info = u8_encode_bits(RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD,
+                                               MAPV5_HDRINFO_HDR_TYPE_FMASK);
+
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               void *iph = ip_hdr(skb);
+               __sum16 *check;
+               void *trans;
+               u8 proto;
+
+               if (skb->protocol != htons(ETH_P_IP) &&
+                   skb->protocol != htons(ETH_P_IPV6)) {
+                       priv->stats.csum_err_invalid_ip_version++;
+                       goto sw_csum;
+               }
+
+               if (skb->protocol == htons(ETH_P_IP)) {
+                       u16 ip_len = ((struct iphdr *)iph)->ihl * 4;
+
+                       proto = ((struct iphdr *)iph)->protocol;
+                       trans = iph + ip_len;
+               } else if (skb->protocol == htons(ETH_P_IPV6)) {
+#if IS_ENABLED(CONFIG_IPV6)
+                       u16 ip_len = sizeof(struct ipv6hdr);
+
+                       proto = ((struct ipv6hdr *)iph)->nexthdr;
+                       trans = iph + ip_len;
+#else
+                       priv->stats.csum_err_invalid_ip_version++;
+                       goto sw_csum;
+#endif /* CONFIG_IPV6 */
+               }
+
+               check = rmnet_map_get_csum_field(proto, trans);
+               if (check) {
+                       skb->ip_summed = CHECKSUM_NONE;
+                       /* Ask for checksum offloading */
+                       ul_header->csum_info |= MAPV5_CSUMINFO_VALID_FLAG;
+                       priv->stats.csum_hw++;
+                       return;
+               }
+       }
+
+sw_csum:
+       priv->stats.csum_sw++;
+}
+
 /* Adds MAP header to front of skb->data
  * Padding is calculated and set appropriately in MAP header. Mux ID is
  * initialized to 0.
  */
 struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
-                                                 int hdrlen, int pad)
+                                                 int hdrlen,
+                                                 struct rmnet_port *port,
+                                                 int pad)
 {
        struct rmnet_map_header *map_header;
        u32 padding, map_datalen;
                        skb_push(skb, sizeof(struct rmnet_map_header));
        memset(map_header, 0, sizeof(struct rmnet_map_header));
 
+       /* Set next_hdr bit for csum offload packets */
+       if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5)
+               map_header->flags |= MAP_NEXT_HEADER_FLAG;
+
        if (pad == RMNET_MAP_NO_PAD_BYTES) {
                map_header->pkt_len = htons(map_datalen);
                return map_header;
        return 0;
 }
 
-/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
- * packets that are supported for UL checksum offload.
- */
-void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
-                                     struct net_device *orig_dev)
+static void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
+                                               struct net_device *orig_dev)
 {
        struct rmnet_priv *priv = netdev_priv(orig_dev);
        struct rmnet_map_ul_csum_header *ul_header;
 
                if (skb->protocol == htons(ETH_P_IP)) {
                        rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
+                       priv->stats.csum_hw++;
                        return;
                } else if (skb->protocol == htons(ETH_P_IPV6)) {
 #if IS_ENABLED(CONFIG_IPV6)
                        rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
+                       priv->stats.csum_hw++;
                        return;
 #else
                        priv->stats.csum_err_invalid_ip_version++;
        priv->stats.csum_sw++;
 }
 
+/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
+ * packets that are supported for UL checksum offload.
+ */
+void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+                                     struct rmnet_port *port,
+                                     struct net_device *orig_dev,
+                                     int csum_type)
+{
+       switch (csum_type) {
+       case RMNET_FLAGS_EGRESS_MAP_CKSUMV4:
+               rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
+               break;
+       case RMNET_FLAGS_EGRESS_MAP_CKSUMV5:
+               rmnet_map_v5_checksum_uplink_packet(skb, port, orig_dev);
+               break;
+       default:
+               break;
+       }
+}
+
 /* Process a MAPv5 packet header */
 int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
                                      u16 len)