static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
                                        struct nfp_flower_priv *priv,
                                        struct net_device *netdev,
-                                       struct nfp_fl_payload *flow_pay)
+                                       struct nfp_fl_payload *flow_pay,
+                                       int num_rules)
 {
        enum flow_action_hw_stats tmp_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
        struct flow_action_entry *a_in;
-       int i, j, num_actions, id;
+       int i, j, id, num_actions = 0;
        struct flow_rule *a_rule;
        int err = 0, offset = 0;
 
-       num_actions = rules[CT_TYPE_PRE_CT]->action.num_entries +
-                     rules[CT_TYPE_NFT]->action.num_entries +
-                     rules[CT_TYPE_POST_CT]->action.num_entries;
+       for (i = 0; i < num_rules; i++)
+               num_actions += rules[i]->action.num_entries;
 
        /* Add one action to make sure there is enough room to add an checksum action
         * when do nat.
         */
-       a_rule = flow_rule_alloc(num_actions + 1);
+       a_rule = flow_rule_alloc(num_actions + (num_rules / 2));
        if (!a_rule)
                return -ENOMEM;
 
-       /* Actions need a BASIC dissector. */
-       a_rule->match = rules[CT_TYPE_PRE_CT]->match;
        /* post_ct entry have one action at least. */
-       if (rules[CT_TYPE_POST_CT]->action.num_entries != 0) {
-               tmp_stats = rules[CT_TYPE_POST_CT]->action.entries[0].hw_stats;
-       }
+       if (rules[num_rules - 1]->action.num_entries != 0)
+               tmp_stats = rules[num_rules - 1]->action.entries[0].hw_stats;
+
+       /* Actions need a BASIC dissector. */
+       a_rule->match = rules[0]->match;
 
        /* Copy actions */
-       for (j = 0; j < _CT_TYPE_MAX; j++) {
+       for (j = 0; j < num_rules; j++) {
                u32 csum_updated = 0;
                u8 ip_proto = 0;
 
                                /* nft entry is generated by tc ct, which mangle action do not care
                                 * the stats, inherit the post entry stats to meet the
                                 * flow_action_hw_stats_check.
+                                * nft entry flow rules are at odd array index.
                                 */
-                               if (j == CT_TYPE_NFT) {
+                               if (j & 0x01) {
                                        if (a_in->hw_stats == FLOW_ACTION_HW_STATS_DONT_CARE)
                                                a_in->hw_stats = tmp_stats;
                                        nfp_fl_get_csum_flag(a_in, ip_proto, &csum_updated);
        struct nfp_fl_payload *flow_pay;
 
        struct flow_rule *rules[_CT_TYPE_MAX];
+       int num_rules = _CT_TYPE_MAX;
        u8 *key, *msk, *kdata, *mdata;
        struct nfp_port *port = NULL;
        struct net_device *netdev;
        memset(&key_map, 0, sizeof(key_map));
 
        /* Calculate the resultant key layer and size for offload */
-       for (i = 0; i < _CT_TYPE_MAX; i++) {
+       for (i = 0; i < num_rules; i++) {
                err = nfp_flower_calculate_key_layers(priv->app,
                                                      m_entry->netdev,
                                                      &tmp_layer, rules[i],
         * that the layer is not present.
         */
        if (!qinq_sup) {
-               for (i = 0; i < _CT_TYPE_MAX; i++) {
+               for (i = 0; i < num_rules; i++) {
                        offset = key_map[FLOW_PAY_META_TCI];
                        key = kdata + offset;
                        msk = mdata + offset;
                offset = key_map[FLOW_PAY_MAC_MPLS];
                key = kdata + offset;
                msk = mdata + offset;
-               for (i = 0; i < _CT_TYPE_MAX; i++) {
+               for (i = 0; i < num_rules; i++) {
                        nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)key,
                                               (struct nfp_flower_mac_mpls *)msk,
                                               rules[i]);
                offset = key_map[FLOW_PAY_IPV4];
                key = kdata + offset;
                msk = mdata + offset;
-               for (i = 0; i < _CT_TYPE_MAX; i++) {
+               for (i = 0; i < num_rules; i++) {
                        nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)key,
                                                (struct nfp_flower_ipv4 *)msk,
                                                rules[i]);
                offset = key_map[FLOW_PAY_IPV6];
                key = kdata + offset;
                msk = mdata + offset;
-               for (i = 0; i < _CT_TYPE_MAX; i++) {
+               for (i = 0; i < num_rules; i++) {
                        nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)key,
                                                (struct nfp_flower_ipv6 *)msk,
                                                rules[i]);
                offset = key_map[FLOW_PAY_L4];
                key = kdata + offset;
                msk = mdata + offset;
-               for (i = 0; i < _CT_TYPE_MAX; i++) {
+               for (i = 0; i < num_rules; i++) {
                        nfp_flower_compile_tport((struct nfp_flower_tp_ports *)key,
                                                 (struct nfp_flower_tp_ports *)msk,
                                                 rules[i]);
                offset = key_map[FLOW_PAY_QINQ];
                key = kdata + offset;
                msk = mdata + offset;
-               for (i = 0; i < _CT_TYPE_MAX; i++) {
+               for (i = 0; i < num_rules; i++) {
                        nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
                                                (struct nfp_flower_vlan *)msk,
                                                rules[i]);
                        struct nfp_ipv6_addr_entry *entry;
                        struct in6_addr *dst;
 
-                       for (i = 0; i < _CT_TYPE_MAX; i++) {
+                       for (i = 0; i < num_rules; i++) {
                                nfp_flower_compile_ipv6_gre_tun((void *)key,
                                                                (void *)msk, rules[i]);
                        }
                } else {
                        __be32 dst;
 
-                       for (i = 0; i < _CT_TYPE_MAX; i++) {
+                       for (i = 0; i < num_rules; i++) {
                                nfp_flower_compile_ipv4_gre_tun((void *)key,
                                                                (void *)msk, rules[i]);
                        }
                        struct nfp_ipv6_addr_entry *entry;
                        struct in6_addr *dst;
 
-                       for (i = 0; i < _CT_TYPE_MAX; i++) {
+                       for (i = 0; i < num_rules; i++) {
                                nfp_flower_compile_ipv6_udp_tun((void *)key,
                                                                (void *)msk, rules[i]);
                        }
                } else {
                        __be32 dst;
 
-                       for (i = 0; i < _CT_TYPE_MAX; i++) {
+                       for (i = 0; i < num_rules; i++) {
                                nfp_flower_compile_ipv4_udp_tun((void *)key,
                                                                (void *)msk, rules[i]);
                        }
                        offset = key_map[FLOW_PAY_GENEVE_OPT];
                        key = kdata + offset;
                        msk = mdata + offset;
-                       for (i = 0; i < _CT_TYPE_MAX; i++)
+                       for (i = 0; i < num_rules; i++)
                                nfp_flower_compile_geneve_opt(key, msk, rules[i]);
                }
        }
 
        /* Merge actions into flow_pay */
-       err = nfp_fl_merge_actions_offload(rules, priv, netdev, flow_pay);
+       err = nfp_fl_merge_actions_offload(rules, priv, netdev, flow_pay, num_rules);
        if (err)
                goto ct_offload_err;