u8 *intf, u8 *ena);
  bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
  u32  rvu_cgx_get_fifolen(struct rvu *rvu);
+ void *rvu_first_cgx_pdata(struct rvu *rvu);
  
 +int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
 +                           int type);
 +bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr,
 +                         int index);
 +
  /* CPT APIs */
  int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot);
  
 
                if (match.mask->flags)
                        *match_level = MLX5_MATCH_L4;
        }
 +      if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
 +              struct flow_match_icmp match;
  
 +              flow_rule_match_icmp(rule, &match);
 +              switch (ip_proto) {
 +              case IPPROTO_ICMP:
 +                      if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
 +                            MLX5_FLEX_PROTO_ICMP))
 +                              return -EOPNOTSUPP;
 +                      MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type,
 +                               match.mask->type);
 +                      MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type,
 +                               match.key->type);
 +                      MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_code,
 +                               match.mask->code);
 +                      MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_code,
 +                               match.key->code);
 +                      break;
 +              case IPPROTO_ICMPV6:
 +                      if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
 +                            MLX5_FLEX_PROTO_ICMPV6))
 +                              return -EOPNOTSUPP;
 +                      MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type,
 +                               match.mask->type);
 +                      MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type,
 +                               match.key->type);
 +                      MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_code,
 +                               match.mask->code);
 +                      MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_code,
 +                               match.key->code);
 +                      break;
 +              default:
 +                      NL_SET_ERR_MSG_MOD(extack,
 +                                         "Code and type matching only with ICMP and ICMPv6");
 +                      netdev_err(priv->netdev,
 +                                 "Code and type matching only with ICMP and ICMPv6\n");
 +                      return -EINVAL;
 +              }
 +              if (match.mask->code || match.mask->type) {
 +                      *match_level = MLX5_MATCH_L4;
 +                      spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
 +              }
 +      }
+       /* Currenlty supported only for MPLS over UDP */
+       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) &&
+           !netif_is_bareudp(filter_dev)) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Matching on MPLS is supported only for MPLS over UDP");
+               netdev_err(priv->netdev,
+                          "Matching on MPLS is supported only for MPLS over UDP\n");
+               return -EOPNOTSUPP;
+       }
+ 
        return 0;
  }
  
 
  #include "priv.h"
  #include "sf.h"
  #include "mlx5_ifc_vhca_event.h"
 +#include "ecpf.h"
+ #include "vhca_event.h"
+ #include "mlx5_core.h"
  
  struct mlx5_sf_hw {
        u32 usr_sfnum;
 
  
  static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
  {
 -      int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems;
        struct ionic_tx_stats *stats = q_to_tx_stats(q);
+       int ndescs;
        int err;
  
-       /* If TSO, need roundup(skb->len/mss) descs */
+       /* Each desc is mss long max, so a descriptor for each gso_seg */
        if (skb_is_gso(skb))
-               return (skb->len / skb_shinfo(skb)->gso_size) + 1;
+               ndescs = skb_shinfo(skb)->gso_segs;
+       else
+               ndescs = 1;
  
 -      if (skb_shinfo(skb)->nr_frags <= sg_elems)
 +      /* If non-TSO, just need 1 desc and nr_frags sg elems */
 +      if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems)
-               return 1;
+               return ndescs;
  
        /* Too many frags, so linearize */
        err = skb_linearize(skb);
 
        PSAMPLE_ATTR_GROUP_SEQ,
        PSAMPLE_ATTR_SAMPLE_RATE,
        PSAMPLE_ATTR_DATA,
-       PSAMPLE_ATTR_TUNNEL,
- 
-       /* commands attributes */
        PSAMPLE_ATTR_GROUP_REFCOUNT,
+       PSAMPLE_ATTR_TUNNEL,
  
 +      PSAMPLE_ATTR_PAD,
 +      PSAMPLE_ATTR_OUT_TC,            /* u16 */
 +      PSAMPLE_ATTR_OUT_TC_OCC,        /* u64, bytes */
 +      PSAMPLE_ATTR_LATENCY,           /* u64, nanoseconds */
 +      PSAMPLE_ATTR_TIMESTAMP,         /* u64, nanoseconds */
 +      PSAMPLE_ATTR_PROTO,             /* u16 */
 +
        __PSAMPLE_ATTR_MAX
  };
  
 
                if (mask_to_left)
                        *ptr_limit = MAX_BPF_STACK + off;
                else
-                       *ptr_limit = -off;
-               return 0;
+                       *ptr_limit = -off - 1;
+               return *ptr_limit >= max ? -ERANGE : 0;
 +      case PTR_TO_MAP_KEY:
 +              /* Currently, this code is not exercised as the only use
 +               * is bpf_for_each_map_elem() helper which requires
 +               * bpf_capble. The code has been tested manually for
 +               * future use.
 +               */
 +              if (mask_to_left) {
 +                      *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
 +              } else {
 +                      off = ptr_reg->smin_value + ptr_reg->off;
 +                      *ptr_limit = ptr_reg->map_ptr->key_size - off;
 +              }
 +              return 0;
        case PTR_TO_MAP_VALUE:
+               max = ptr_reg->map_ptr->value_size;
                if (mask_to_left) {
                        *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
                } else {
 
        } r[5];
        struct struct_in_struct s[10];
        int t[11];
+       struct struct_in_array (*u)[2];
+       struct_in_array_t *v;
  };
  
 +struct float_struct {
 +      float f;
 +      const double *d;
 +      volatile long double *ld;
 +};
 +
  struct root_struct {
        enum e1 _1;
        enum e2 _2;