return 0;
 }
 
+/**
+ * ice_find_prof_id - find profile ID for a given field vector
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @fv: field vector to search for
+ * @prof_id: receives the profile ID
+ */
+static enum ice_status
+ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
+                struct ice_fv_word *fv, u8 *prof_id)
+{
+       struct ice_es *es = &hw->blk[blk].es;
+       u16 off, i;
+
+       for (i = 0; i < es->count; i++) {
+               off = i * es->fvw;
+
+               if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
+                       continue;
+
+               *prof_id = i;
+               return 0;
+       }
+
+       return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_prof_id_rsrc_type - get profile ID resource type for a block type
+ * @blk: the block type
+ * @rsrc_type: pointer to variable to receive the resource type
+ */
+static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
+{
+       switch (blk) {
+       case ICE_BLK_RSS:
+               *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
+               break;
+       default:
+               return false;
+       }
+       return true;
+}
+
+/**
+ * ice_alloc_prof_id - allocate profile ID
+ * @hw: pointer to the HW struct
+ * @blk: the block to allocate the profile ID for
+ * @prof_id: pointer to variable to receive the profile ID
+ *
+ * This function allocates a new profile ID, which also corresponds to a Field
+ * Vector (Extraction Sequence) entry.
+ */
+static enum ice_status
+ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
+{
+       enum ice_status status;
+       u16 res_type;
+       u16 get_prof;
+
+       if (!ice_prof_id_rsrc_type(blk, &res_type))
+               return ICE_ERR_PARAM;
+
+       status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
+       if (!status)
+               *prof_id = (u8)get_prof;
+
+       return status;
+}
+
+/**
+ * ice_prof_inc_ref - increment reference count for profile
+ * @hw: pointer to the HW struct
+ * @blk: the block from which to free the profile ID
+ * @prof_id: the profile ID for which to increment the reference count
+ */
+static enum ice_status
+ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+{
+       if (prof_id > hw->blk[blk].es.count)
+               return ICE_ERR_PARAM;
+
+       hw->blk[blk].es.ref_count[prof_id]++;
+
+       return 0;
+}
+
+/**
+ * ice_write_es - write an extraction sequence to hardware
+ * @hw: pointer to the HW struct
+ * @blk: the block in which to write the extraction sequence
+ * @prof_id: the profile ID to write
+ * @fv: pointer to the extraction sequence to write - NULL to clear extraction
+ */
+static void
+ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
+            struct ice_fv_word *fv)
+{
+       u16 off;
+
+       off = prof_id * hw->blk[blk].es.fvw;
+       if (!fv) {
+               memset(&hw->blk[blk].es.t[off], 0,
+                      hw->blk[blk].es.fvw * sizeof(*fv));
+               hw->blk[blk].es.written[prof_id] = false;
+       } else {
+               memcpy(&hw->blk[blk].es.t[off], fv,
+                      hw->blk[blk].es.fvw * sizeof(*fv));
+       }
+}
+
 /* Block / table section IDs */
 static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
        /* SWITCH */
        ice_free_hw_tbls(hw);
        return ICE_ERR_NO_MEMORY;
 }
+
+/**
+ * ice_add_prof - add profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
+ * @es: extraction sequence (length of array is determined by the block)
+ *
+ * This function registers a profile, which matches a set of PTGs with a
+ * particular extraction sequence. While the hardware profile is allocated
+ * it will not be written until the first call to ice_add_flow that specifies
+ * the ID value used here.
+ */
+enum ice_status
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
+            struct ice_fv_word *es)
+{
+       u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
+       DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
+       struct ice_prof_map *prof;
+       enum ice_status status;
+       u32 byte = 0;
+       u8 prof_id;
+
+       bitmap_zero(ptgs_used, ICE_XLT1_CNT);
+
+       mutex_lock(&hw->blk[blk].es.prof_map_lock);
+
+       /* search for existing profile */
+       status = ice_find_prof_id(hw, blk, es, &prof_id);
+       if (status) {
+               /* allocate profile ID */
+               status = ice_alloc_prof_id(hw, blk, &prof_id);
+               if (status)
+                       goto err_ice_add_prof;
+
+               /* and write new es */
+               ice_write_es(hw, blk, prof_id, es);
+       }
+
+       ice_prof_inc_ref(hw, blk, prof_id);
+
+       /* add profile info */
+       prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
+       if (!prof)
+               goto err_ice_add_prof;
+
+       prof->profile_cookie = id;
+       prof->prof_id = prof_id;
+       prof->ptg_cnt = 0;
+       prof->context = 0;
+
+       /* build list of ptgs */
+       while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
+               u32 bit;
+
+               if (!ptypes[byte]) {
+                       bytes--;
+                       byte++;
+                       continue;
+               }
+
+               /* Examine 8 bits per byte */
+               for_each_set_bit(bit, (unsigned long *)&ptypes[byte],
+                                BITS_PER_BYTE) {
+                       u16 ptype;
+                       u8 ptg;
+                       u8 m;
+
+                       ptype = byte * BITS_PER_BYTE + bit;
+
+                       /* The package should place all ptypes in a non-zero
+                        * PTG, so the following call should never fail.
+                        */
+                       if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
+                               continue;
+
+                       /* If PTG is already added, skip and continue */
+                       if (test_bit(ptg, ptgs_used))
+                               continue;
+
+                       set_bit(ptg, ptgs_used);
+                       prof->ptg[prof->ptg_cnt] = ptg;
+
+                       if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
+                               break;
+
+                       /* nothing left in byte, then exit */
+                       m = ~((1 << (bit + 1)) - 1);
+                       if (!(ptypes[byte] & m))
+                               break;
+               }
+
+               bytes--;
+               byte++;
+       }
+
+       list_add(&prof->list, &hw->blk[blk].es.prof_map);
+       status = 0;
+
+err_ice_add_prof:
+       mutex_unlock(&hw->blk[blk].es.prof_map_lock);
+       return status;
+}
 
        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, sizeof(__be16)),
 };
 
+/* Bitmaps indicating relevant packet types for a particular protocol header
+ *
+ * Packet types for packets with an Outer/First/Single IPv4 header
+ */
+static const u32 ice_ptypes_ipv4_ofos[] = {
+       0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv4 header */
+static const u32 ice_ptypes_ipv4_il[] = {
+       0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
+       0x0000000E, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv6 header */
+static const u32 ice_ptypes_ipv6_ofos[] = {
+       0x00000000, 0x00000000, 0x77000000, 0x10002000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv6 header */
+static const u32 ice_ptypes_ipv6_il[] = {
+       0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
+       0x00000770, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* UDP Packet types for non-tunneled packets or tunneled
+ * packets with inner UDP.
+ */
+static const u32 ice_ptypes_udp_il[] = {
+       0x81000000, 0x20204040, 0x04000010, 0x80810102,
+       0x00000040, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last TCP header */
+static const u32 ice_ptypes_tcp_il[] = {
+       0x04000000, 0x80810102, 0x10000040, 0x02040408,
+       0x00000102, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last SCTP header */
+static const u32 ice_ptypes_sctp_il[] = {
+       0x08000000, 0x01020204, 0x20000081, 0x04080810,
+       0x00000204, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Manage parameters and info. used during the creation of a flow profile */
+struct ice_flow_prof_params {
+       enum ice_block blk;
+       u16 entry_length; /* # of bytes formatted entry will require */
+       u8 es_cnt;
+       struct ice_flow_prof *prof;
+
+       /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
+        * This will give us the direction flags.
+        */
+       struct ice_fv_word es[ICE_MAX_FV_WORDS];
+       DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX);
+};
+
+#define ICE_FLOW_SEG_HDRS_L3_MASK      \
+       (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
+#define ICE_FLOW_SEG_HDRS_L4_MASK      \
+       (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
+
+/**
+ * ice_flow_val_hdrs - validates packet segments for valid protocol headers
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ */
+static enum ice_status
+ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
+{
+       u8 i;
+
+       for (i = 0; i < segs_cnt; i++) {
+               /* Multiple L3 headers */
+               if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
+                   !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
+                       return ICE_ERR_PARAM;
+
+               /* Multiple L4 headers */
+               if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
+                   !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
+                       return ICE_ERR_PARAM;
+       }
+
+       return 0;
+}
+
+/**
+ * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
+ * @params: information about the flow to be processed
+ *
+ * This function identifies the packet types associated with the protocol
+ * headers being present in packet segments of the specified flow profile.
+ */
+static enum ice_status
+ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
+{
+       struct ice_flow_prof *prof;
+       u8 i;
+
+       memset(params->ptypes, 0xff, sizeof(params->ptypes));
+
+       prof = params->prof;
+
+       for (i = 0; i < params->prof->segs_cnt; i++) {
+               const unsigned long *src;
+               u32 hdrs;
+
+               hdrs = prof->segs[i].hdrs;
+
+               if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
+                       src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos :
+                               (const unsigned long *)ice_ptypes_ipv4_il;
+                       bitmap_and(params->ptypes, params->ptypes, src,
+                                  ICE_FLOW_PTYPE_MAX);
+               } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
+                       src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos :
+                               (const unsigned long *)ice_ptypes_ipv6_il;
+                       bitmap_and(params->ptypes, params->ptypes, src,
+                                  ICE_FLOW_PTYPE_MAX);
+               }
+
+               if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
+                       src = (const unsigned long *)ice_ptypes_udp_il;
+                       bitmap_and(params->ptypes, params->ptypes, src,
+                                  ICE_FLOW_PTYPE_MAX);
+               } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
+                       bitmap_and(params->ptypes, params->ptypes,
+                                  (const unsigned long *)ice_ptypes_tcp_il,
+                                  ICE_FLOW_PTYPE_MAX);
+               } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
+                       src = (const unsigned long *)ice_ptypes_sctp_il;
+                       bitmap_and(params->ptypes, params->ptypes, src,
+                                  ICE_FLOW_PTYPE_MAX);
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
+ * @hw: pointer to the HW struct
+ * @params: information about the flow to be processed
+ * @seg: packet segment index of the field to be extracted
+ * @fld: ID of field to be extracted
+ *
+ * This function determines the protocol ID, offset, and size of the given
+ * field. It then allocates one or more extraction sequence entries for the
+ * given field, and fill the entries with protocol ID and offset information.
+ */
+static enum ice_status
+ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
+                   u8 seg, enum ice_flow_field fld)
+{
+       enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
+       u8 fv_words = hw->blk[params->blk].es.fvw;
+       struct ice_flow_fld_info *flds;
+       u16 cnt, ese_bits, i;
+       u16 off;
+
+       flds = params->prof->segs[seg].fields;
+
+       switch (fld) {
+       case ICE_FLOW_FIELD_IDX_IPV4_SA:
+       case ICE_FLOW_FIELD_IDX_IPV4_DA:
+               prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
+               break;
+       case ICE_FLOW_FIELD_IDX_IPV6_SA:
+       case ICE_FLOW_FIELD_IDX_IPV6_DA:
+               prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
+               break;
+       case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
+       case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
+               prot_id = ICE_PROT_TCP_IL;
+               break;
+       case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
+       case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
+               prot_id = ICE_PROT_UDP_IL_OR_S;
+               break;
+       default:
+               return ICE_ERR_NOT_IMPL;
+       }
+
+       /* Each extraction sequence entry is a word in size, and extracts a
+        * word-aligned offset from a protocol header.
+        */
+       ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
+
+       flds[fld].xtrct.prot_id = prot_id;
+       flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
+               ICE_FLOW_FV_EXTRACT_SZ;
+       flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
+       flds[fld].xtrct.idx = params->es_cnt;
+
+       /* Adjust the next field-entry index after accommodating the number of
+        * entries this field consumes
+        */
+       cnt = DIV_ROUND_UP(flds[fld].xtrct.disp + ice_flds_info[fld].size,
+                          ese_bits);
+
+       /* Fill in the extraction sequence entries needed for this field */
+       off = flds[fld].xtrct.off;
+       for (i = 0; i < cnt; i++) {
+               u8 idx;
+
+               /* Make sure the number of extraction sequence required
+                * does not exceed the block's capability
+                */
+               if (params->es_cnt >= fv_words)
+                       return ICE_ERR_MAX_LIMIT;
+
+               /* some blocks require a reversed field vector layout */
+               if (hw->blk[params->blk].es.reverse)
+                       idx = fv_words - params->es_cnt - 1;
+               else
+                       idx = params->es_cnt;
+
+               params->es[idx].prot_id = prot_id;
+               params->es[idx].off = off;
+               params->es_cnt++;
+
+               off += ICE_FLOW_FV_EXTRACT_SZ;
+       }
+
+       return 0;
+}
+
+/**
+ * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
+ * @hw: pointer to the HW struct
+ * @params: information about the flow to be processed
+ *
+ * This function iterates through all matched fields in the given segments, and
+ * creates an extraction sequence for the fields.
+ */
+static enum ice_status
+ice_flow_create_xtrct_seq(struct ice_hw *hw,
+                         struct ice_flow_prof_params *params)
+{
+       struct ice_flow_prof *prof = params->prof;
+       enum ice_status status = 0;
+       u8 i;
+
+       for (i = 0; i < prof->segs_cnt; i++) {
+               u8 j;
+
+               for_each_set_bit(j, (unsigned long *)&prof->segs[i].match,
+                                ICE_FLOW_FIELD_IDX_MAX) {
+                       status = ice_flow_xtract_fld(hw, params, i,
+                                                    (enum ice_flow_field)j);
+                       if (status)
+                               return status;
+               }
+       }
+
+       return status;
+}
+
+/**
+ * ice_flow_proc_segs - process all packet segments associated with a profile
+ * @hw: pointer to the HW struct
+ * @params: information about the flow to be processed
+ */
+static enum ice_status
+ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
+{
+       enum ice_status status;
+
+       status = ice_flow_proc_seg_hdrs(params);
+       if (status)
+               return status;
+
+       status = ice_flow_create_xtrct_seq(hw, params);
+       if (status)
+               return status;
+
+       switch (params->blk) {
+       case ICE_BLK_RSS:
+               /* Only header information is provided for RSS configuration.
+                * No further processing is needed.
+                */
+               status = 0;
+               break;
+       default:
+               return ICE_ERR_NOT_IMPL;
+       }
+
+       return status;
+}
+
+/**
+ * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @dir: flow direction
+ * @prof_id: unique ID to identify this flow profile
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ * @prof: stores the returned flow profile added
+ *
+ * Assumption: the caller has acquired the lock to the profile list
+ */
+static enum ice_status
+ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
+                      enum ice_flow_dir dir, u64 prof_id,
+                      struct ice_flow_seg_info *segs, u8 segs_cnt,
+                      struct ice_flow_prof **prof)
+{
+       struct ice_flow_prof_params params;
+       enum ice_status status;
+       u8 i;
+
+       if (!prof)
+               return ICE_ERR_BAD_PTR;
+
+       memset(¶ms, 0, sizeof(params));
+       params.prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params.prof),
+                                  GFP_KERNEL);
+       if (!params.prof)
+               return ICE_ERR_NO_MEMORY;
+
+       /* initialize extraction sequence to all invalid (0xff) */
+       for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
+               params.es[i].prot_id = ICE_PROT_INVALID;
+               params.es[i].off = ICE_FV_OFFSET_INVAL;
+       }
+
+       params.blk = blk;
+       params.prof->id = prof_id;
+       params.prof->dir = dir;
+       params.prof->segs_cnt = segs_cnt;
+
+       /* Make a copy of the segments that need to be persistent in the flow
+        * profile instance
+        */
+       for (i = 0; i < segs_cnt; i++)
+               memcpy(¶ms.prof->segs[i], &segs[i], sizeof(*segs));
+
+       status = ice_flow_proc_segs(hw, ¶ms);
+       if (status) {
+               ice_debug(hw, ICE_DBG_FLOW,
+                         "Error processing a flow's packet segments\n");
+               goto out;
+       }
+
+       /* Add a HW profile for this flow profile */
+       status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes, params.es);
+       if (status) {
+               ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
+               goto out;
+       }
+
+       INIT_LIST_HEAD(¶ms.prof->entries);
+       mutex_init(¶ms.prof->entries_lock);
+       *prof = params.prof;
+
+out:
+       if (status)
+               devm_kfree(ice_hw_to_dev(hw), params.prof);
+
+       return status;
+}
+
+/**
+ * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @dir: flow direction
+ * @prof_id: unique ID to identify this flow profile
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ */
+static enum ice_status
+ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
+                 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt)
+{
+       struct ice_flow_prof *prof = NULL;
+       enum ice_status status;
+
+       if (segs_cnt > ICE_FLOW_SEG_MAX)
+               return ICE_ERR_MAX_LIMIT;
+
+       if (!segs_cnt)
+               return ICE_ERR_PARAM;
+
+       if (!segs)
+               return ICE_ERR_BAD_PTR;
+
+       status = ice_flow_val_hdrs(segs, segs_cnt);
+       if (status)
+               return status;
+
+       mutex_lock(&hw->fl_profs_locks[blk]);
+
+       status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
+                                       &prof);
+       if (!status)
+               list_add(&prof->l_entry, &hw->fl_profs[blk]);
+
+       mutex_unlock(&hw->fl_profs_locks[blk]);
+
+       return status;
+}
+
 /**
  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
  * @seg: packet segment the field being set belongs to
        return 0;
 }
 
+#define ICE_FLOW_PROF_HASH_S   0
+#define ICE_FLOW_PROF_HASH_M   (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
+#define ICE_FLOW_PROF_HDR_S    32
+#define ICE_FLOW_PROF_HDR_M    (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
+#define ICE_FLOW_PROF_ENCAP_S  63
+#define ICE_FLOW_PROF_ENCAP_M  (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
+
 #define ICE_RSS_OUTER_HEADERS  1
 
+/* Flow profile ID format:
+ * [0:31] - Packet match fields
+ * [32:62] - Protocol header
+ * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
+ */
+#define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
+       (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
+             (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
+             ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
+
 /**
  * ice_add_rss_cfg_sync - add an RSS configuration
+ * @hw: pointer to the hardware structure
  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
  * @addl_hdrs: protocol header fields
  * @segs_cnt: packet segment count
  * Assumption: lock has already been acquired for RSS list
  */
 static enum ice_status
-ice_add_rss_cfg_sync(u64 hashed_flds, u32 addl_hdrs, u8 segs_cnt)
+ice_add_rss_cfg_sync(struct ice_hw *hw, u64 hashed_flds, u32 addl_hdrs,
+                    u8 segs_cnt)
 {
        struct ice_flow_seg_info *segs;
        enum ice_status status;
        /* Construct the packet segment info from the hashed fields */
        status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
                                           addl_hdrs);
-
+       if (status)
+               goto exit;
+
+       /* Create a new flow profile with generated profile and packet
+        * segment information.
+        */
+       status = ice_flow_add_prof(hw, ICE_BLK_RSS, ICE_FLOW_RX,
+                                  ICE_FLOW_GEN_PROFID(hashed_flds,
+                                                      segs[segs_cnt - 1].hdrs,
+                                                      segs_cnt),
+                                  segs, segs_cnt);
+
+exit:
        kfree(segs);
        return status;
 }
                return ICE_ERR_PARAM;
 
        mutex_lock(&hw->rss_locks);
-       status = ice_add_rss_cfg_sync(hashed_flds, addl_hdrs,
+       status = ice_add_rss_cfg_sync(hw, hashed_flds, addl_hdrs,
                                      ICE_RSS_OUTER_HEADERS);
        mutex_unlock(&hw->rss_locks);
 
        mutex_lock(&hw->rss_locks);
        list_for_each_entry(r, &hw->rss_list_head, l_entry) {
                if (test_bit(vsi_handle, r->vsis)) {
-                       status = ice_add_rss_cfg_sync(r->hashed_flds,
+                       status = ice_add_rss_cfg_sync(hw, r->hashed_flds,
                                                      r->packet_hdr,
                                                      ICE_RSS_OUTER_HEADERS);
                        if (status)