// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 /* Copyright (C) 2021 Corigine, Inc. */
 
+#include <net/tc_act/tc_csum.h>
+#include <net/tc_act/tc_ct.h>
+
 #include "conntrack.h"
 #include "../nfp_port.h"
 
        int i;
 
        flow_action_for_each(i, act, &flow->rule->action) {
-               if (act->id == FLOW_ACTION_CT && !act->ct.action)
-                       return true;
+               if (act->id == FLOW_ACTION_CT) {
+                       /* The pre_ct rule only have the ct or ct nat action, cannot
+                        * contains other ct action e.g ct commit and so on.
+                        */
+                       if ((!act->ct.action || act->ct.action == TCA_CT_ACT_NAT))
+                               return true;
+                       else
+                               return false;
+               }
        }
+
        return false;
 }
 
 {
        struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
        struct flow_dissector *dissector = rule->match.dissector;
+       struct flow_action_entry *act;
+       bool exist_ct_clear = false;
        struct flow_match_ct ct;
+       int i;
+
+       /* post ct entry cannot contains any ct action except ct_clear. */
+       flow_action_for_each(i, act, &flow->rule->action) {
+               if (act->id == FLOW_ACTION_CT) {
+                       /* ignore ct clear action. */
+                       if (act->ct.action == TCA_CT_ACT_CLEAR) {
+                               exist_ct_clear = true;
+                               continue;
+                       }
+
+                       return false;
+               }
+       }
 
        if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) {
                flow_rule_match_ct(rule, &ct);
                if (ct.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED)
                        return true;
+       } else {
+               /* when do nat with ct, the post ct entry ignore the ct status,
+                * will match the nat field(sip/dip) instead. In this situation,
+                * the flow chain index is not zero and contains ct clear action.
+                */
+               if (flow->common.chain_index && exist_ct_clear)
+                       return true;
        }
+
        return false;
 }
 
        return buf;
 }
 
+/* Note entry1 and entry2 are not swappable. only skip ip and
+ * tport merge check for pre_ct and post_ct when pre_ct do nat.
+ */
+static bool nfp_ct_merge_check_cannot_skip(struct nfp_fl_ct_flow_entry *entry1,
+                                          struct nfp_fl_ct_flow_entry *entry2)
+{
+       /* only pre_ct have NFP_FL_ACTION_DO_NAT flag. */
+       if ((entry1->flags & NFP_FL_ACTION_DO_NAT) &&
+           entry2->type == CT_TYPE_POST_CT)
+               return false;
+
+       return true;
+}
+
 /* Note entry1 and entry2 are not swappable, entry1 should be
  * the former flow whose mangle action need be taken into account
  * if existed, and entry2 should be the latter flow whose action
                        goto check_failed;
        }
 
-       if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+       /* if pre ct entry do nat, the nat ip exists in nft entry,
+        * will be do merge check when do nft and post ct merge,
+        * so skip this ip merge check here.
+        */
+       if ((ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS)) &&
+           nfp_ct_merge_check_cannot_skip(entry1, entry2)) {
                struct flow_match_ipv4_addrs match1, match2;
 
                flow_rule_match_ipv4_addrs(entry1->rule, &match1);
                        goto check_failed;
        }
 
-       if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+       /* if pre ct entry do nat, the nat ip exists in nft entry,
+        * will be do merge check when do nft and post ct merge,
+        * so skip this ip merge check here.
+        */
+       if ((ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS)) &&
+           nfp_ct_merge_check_cannot_skip(entry1, entry2)) {
                struct flow_match_ipv6_addrs match1, match2;
 
                flow_rule_match_ipv6_addrs(entry1->rule, &match1);
                        goto check_failed;
        }
 
-       if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_PORTS)) {
+       /* if pre ct entry do nat, the nat tport exists in nft entry,
+        * will be do merge check when do nft and post ct merge,
+        * so skip this tport merge check here.
+        */
+       if ((ovlp_keys & BIT(FLOW_DISSECTOR_KEY_PORTS)) &&
+           nfp_ct_merge_check_cannot_skip(entry1, entry2)) {
                enum flow_action_mangle_base htype = FLOW_ACT_MANGLE_UNSPEC;
                struct flow_match_ports match1, match2;
 
                        return -EINVAL;
 
                return 0;
+       } else {
+               /* post_ct with ct clear action will not match the
+                * ct status when nft is nat entry.
+                */
+               if (nft_entry->flags & NFP_FL_ACTION_DO_MANGLE)
+                       return 0;
        }
 
        return -EINVAL;
        return key_size;
 }
 
+/* get the csum flag according the ip proto and mangle action. */
+static void nfp_fl_get_csum_flag(struct flow_action_entry *a_in, u8 ip_proto, u32 *csum)
+{
+       if (a_in->id != FLOW_ACTION_MANGLE)
+               return;
+
+       switch (a_in->mangle.htype) {
+       case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+               *csum |= TCA_CSUM_UPDATE_FLAG_IPV4HDR;
+               if (ip_proto == IPPROTO_TCP)
+                       *csum |= TCA_CSUM_UPDATE_FLAG_TCP;
+               else if (ip_proto == IPPROTO_UDP)
+                       *csum |= TCA_CSUM_UPDATE_FLAG_UDP;
+               break;
+       case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+               *csum |= TCA_CSUM_UPDATE_FLAG_TCP;
+               break;
+       case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+               *csum |= TCA_CSUM_UPDATE_FLAG_UDP;
+               break;
+       default:
+               break;
+       }
+}
+
 static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
                                        struct nfp_flower_priv *priv,
                                        struct net_device *netdev,
                                        struct nfp_fl_payload *flow_pay)
 {
+       enum flow_action_hw_stats tmp_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
        struct flow_action_entry *a_in;
        int i, j, num_actions, id;
        struct flow_rule *a_rule;
                      rules[CT_TYPE_NFT]->action.num_entries +
                      rules[CT_TYPE_POST_CT]->action.num_entries;
 
-       a_rule = flow_rule_alloc(num_actions);
+       /* Add one action to make sure there is enough room to add an checksum action
+        * when do nat.
+        */
+       a_rule = flow_rule_alloc(num_actions + 1);
        if (!a_rule)
                return -ENOMEM;
 
        /* Actions need a BASIC dissector. */
        a_rule->match = rules[CT_TYPE_PRE_CT]->match;
+       /* post_ct entry have one action at least. */
+       if (rules[CT_TYPE_POST_CT]->action.num_entries != 0) {
+               tmp_stats = rules[CT_TYPE_POST_CT]->action.entries[0].hw_stats;
+       }
 
        /* Copy actions */
        for (j = 0; j < _CT_TYPE_MAX; j++) {
+               u32 csum_updated = 0;
+               u8 ip_proto = 0;
+
                if (flow_rule_match_key(rules[j], FLOW_DISSECTOR_KEY_BASIC)) {
                        struct flow_match_basic match;
 
                         * through the subflows and assign the proper subflow to a_rule
                         */
                        flow_rule_match_basic(rules[j], &match);
-                       if (match.mask->ip_proto)
+                       if (match.mask->ip_proto) {
                                a_rule->match = rules[j]->match;
+                               ip_proto = match.key->ip_proto;
+                       }
                }
 
                for (i = 0; i < rules[j]->action.num_entries; i++) {
                        case FLOW_ACTION_CT_METADATA:
                                continue;
                        default:
+                               /* nft entry is generated by tc ct, which mangle action do not care
+                                * the stats, inherit the post entry stats to meet the
+                                * flow_action_hw_stats_check.
+                                */
+                               if (j == CT_TYPE_NFT) {
+                                       if (a_in->hw_stats == FLOW_ACTION_HW_STATS_DONT_CARE)
+                                               a_in->hw_stats = tmp_stats;
+                                       nfp_fl_get_csum_flag(a_in, ip_proto, &csum_updated);
+                               }
                                memcpy(&a_rule->action.entries[offset++],
                                       a_in, sizeof(struct flow_action_entry));
                                break;
                        }
                }
+               /* nft entry have mangle action, but do not have checksum action when do NAT,
+                * hardware will automatically fix IPv4 and TCP/UDP checksum. so add an csum action
+                * to meet csum action check.
+                */
+               if (csum_updated) {
+                       struct flow_action_entry *csum_action;
+
+                       csum_action = &a_rule->action.entries[offset++];
+                       csum_action->id = FLOW_ACTION_CSUM;
+                       csum_action->csum_flags = csum_updated;
+                       csum_action->hw_stats = tmp_stats;
+               }
        }
 
        /* Some actions would have been ignored, so update the num_entries field */
        return NULL;
 }
 
+static void nfp_nft_ct_translate_mangle_action(struct flow_action_entry *mangle_action)
+{
+       if (mangle_action->id != FLOW_ACTION_MANGLE)
+               return;
+
+       switch (mangle_action->mangle.htype) {
+       case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+       case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+               mangle_action->mangle.val = (__force u32)cpu_to_be32(mangle_action->mangle.val);
+               mangle_action->mangle.mask = (__force u32)cpu_to_be32(mangle_action->mangle.mask);
+               return;
+
+       case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+       case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+               mangle_action->mangle.val = (__force u16)cpu_to_be16(mangle_action->mangle.val);
+               mangle_action->mangle.mask = (__force u16)cpu_to_be16(mangle_action->mangle.mask);
+               return;
+
+       default:
+               return;
+       }
+}
+
+static int nfp_nft_ct_set_flow_flag(struct flow_action_entry *act,
+                                   struct nfp_fl_ct_flow_entry *entry)
+{
+       switch (act->id) {
+       case FLOW_ACTION_CT:
+               if (act->ct.action == TCA_CT_ACT_NAT)
+                       entry->flags |= NFP_FL_ACTION_DO_NAT;
+               break;
+
+       case FLOW_ACTION_MANGLE:
+               entry->flags |= NFP_FL_ACTION_DO_MANGLE;
+               break;
+
+       default:
+               break;
+       }
+
+       return 0;
+}
+
 static struct
 nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,
                                         struct net_device *netdev,
 
                new_act = &entry->rule->action.entries[i];
                memcpy(new_act, act, sizeof(struct flow_action_entry));
+               /* nft entry mangle field is host byte order, need translate to
+                * network byte order.
+                */
+               if (is_nft)
+                       nfp_nft_ct_translate_mangle_action(new_act);
+
+               nfp_nft_ct_set_flow_flag(new_act, entry);
                /* Entunnel is a special case, need to allocate and copy
                 * tunnel info.
                 */