continue;
 
                for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
-                       u64 prof_id;
-
-                       prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
+                       u64 prof_id = prof->prof_id[tun];
 
                        for (i = 0; i < prof->cnt; i++) {
                                if (prof->vsi_h[i] != vsi_idx)
                return;
 
        for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
-               u64 prof_id;
+               u64 prof_id = prof->prof_id[tun];
                int j;
 
-               prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
                for (j = 0; j < prof->cnt; j++) {
                        u16 vsi_num;
 
                for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
                        struct ice_flow_prof *hw_prof;
                        struct ice_fd_hw_prof *prof;
-                       u64 prof_id;
                        int j;
 
                        prof = hw->fdir_prof[flow];
-                       prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
-                       ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id,
+                       ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX,
                                          prof->fdir_seg[tun], TNL_SEG_CNT(tun),
                                          &hw_prof);
                        for (j = 0; j < prof->cnt; j++) {
 
                                prio = ICE_FLOW_PRIO_NORMAL;
                                err = ice_flow_add_entry(hw, ICE_BLK_FD,
-                                                        prof_id,
+                                                        hw_prof->id,
                                                         prof->vsi_h[0],
                                                         prof->vsi_h[j],
                                                         prio, prof->fdir_seg,
                                                flow);
                                        continue;
                                }
+                               prof->prof_id[tun] = hw_prof->id;
                                prof->entry_h[j][tun] = entry_h;
                        }
                }
        u64 entry1_h = 0;
        u64 entry2_h = 0;
        bool del_last;
-       u64 prof_id;
        int err;
        int idx;
 
         * That is the final parameters are 1 header (segment), no
         * actions (NULL) and zero actions 0.
         */
-       prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
-       err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
+       err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
                                TNL_SEG_CNT(tun), &prof);
        if (err)
                return err;
-       err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
+       err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx,
                                 main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
                                 seg, &entry1_h);
        if (err)
                goto err_prof;
-       err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
+       err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx,
                                 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
                                 seg, &entry2_h);
        if (err)
                goto err_entry;
 
        hw_prof->fdir_seg[tun] = seg;
+       hw_prof->prof_id[tun] = prof->id;
        hw_prof->entry_h[0][tun] = entry1_h;
        hw_prof->entry_h[1][tun] = entry2_h;
        hw_prof->vsi_h[0] = main_vsi->idx;
 
                entry1_h = 0;
                vsi_h = main_vsi->tc_map_vsi[idx]->idx;
-               err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
+               err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id,
                                         main_vsi->idx, vsi_h,
                                         ICE_FLOW_PRIO_NORMAL, seg,
                                         &entry1_h);
 
                if (!hw_prof->entry_h[idx][tun])
                        continue;
-               ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
+               ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof->id);
                ice_flow_rem_entry(hw, ICE_BLK_FD, hw_prof->entry_h[idx][tun]);
                hw_prof->entry_h[idx][tun] = 0;
                if (del_last)
                hw_prof->cnt = 0;
 err_entry:
        ice_rem_prof_id_flow(hw, ICE_BLK_FD,
-                            ice_get_hw_vsi_num(hw, main_vsi->idx), prof_id);
+                            ice_get_hw_vsi_num(hw, main_vsi->idx), prof->id);
        ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
 err_prof:
-       ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
+       ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
        dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
 
        return err;
 
                devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
                devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
                devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.mask_ena);
+               devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_id.id);
        }
 
        list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
 
        for (i = 0; i < ICE_BLK_COUNT; i++) {
                struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
+               struct ice_prof_id *prof_id = &hw->blk[i].prof_id;
                struct ice_prof_tcam *prof = &hw->blk[i].prof;
                struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
                struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
                memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
                memset(es->written, 0, es->count * sizeof(*es->written));
                memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena));
+
+               memset(prof_id->id, 0, prof_id->count * sizeof(*prof_id->id));
        }
 }
 
        ice_init_all_prof_masks(hw);
        for (i = 0; i < ICE_BLK_COUNT; i++) {
                struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
+               struct ice_prof_id *prof_id = &hw->blk[i].prof_id;
                struct ice_prof_tcam *prof = &hw->blk[i].prof;
                struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
                struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
                                            sizeof(*es->mask_ena), GFP_KERNEL);
                if (!es->mask_ena)
                        goto err;
+
+               prof_id->count = blk_sizes[i].prof_id;
+               prof_id->id = devm_kcalloc(ice_hw_to_dev(hw), prof_id->count,
+                                          sizeof(*prof_id->id), GFP_KERNEL);
+               if (!prof_id->id)
+                       goto err;
        }
        return 0;
 
 
        struct ice_mask masks[ICE_PROF_MASK_COUNT];
 };
 
+struct ice_prof_id {
+       unsigned long *id;
+       int count;
+};
+
 /* Tables per block */
 struct ice_blk_info {
        struct ice_xlt1 xlt1;
        struct ice_xlt2 xlt2;
+       struct ice_prof_id prof_id;
        struct ice_prof_tcam prof;
        struct ice_prof_redir prof_redir;
        struct ice_es es;
 
  * @hw: pointer to the HW struct
  * @blk: classification stage
  * @dir: flow direction
- * @prof_id: unique ID to identify this flow profile
  * @segs: array of one or more packet segments that describe the flow
  * @segs_cnt: number of packet segments provided
  * @prof: stores the returned flow profile added
  */
 static int
 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
-                      enum ice_flow_dir dir, u64 prof_id,
+                      enum ice_flow_dir dir,
                       struct ice_flow_seg_info *segs, u8 segs_cnt,
                       struct ice_flow_prof **prof)
 {
        struct ice_flow_prof_params *params;
+       struct ice_prof_id *ids;
        int status;
+       u64 prof_id;
        u8 i;
 
        if (!prof)
                return -EINVAL;
 
+       ids = &hw->blk[blk].prof_id;
+       prof_id = find_first_zero_bit(ids->id, ids->count);
+       if (prof_id >= ids->count)
+               return -ENOSPC;
+
        params = kzalloc(sizeof(*params), GFP_KERNEL);
        if (!params)
                return -ENOMEM;
 
        INIT_LIST_HEAD(¶ms->prof->entries);
        mutex_init(¶ms->prof->entries_lock);
+       set_bit(prof_id, ids->id);
        *prof = params->prof;
 
 out:
        /* Remove all hardware profiles associated with this flow profile */
        status = ice_rem_prof(hw, blk, prof->id);
        if (!status) {
+               clear_bit(prof->id, hw->blk[blk].prof_id.id);
                list_del(&prof->l_entry);
                mutex_destroy(&prof->entries_lock);
                devm_kfree(ice_hw_to_dev(hw), prof);
  * @hw: pointer to the HW struct
  * @blk: classification stage
  * @dir: flow direction
- * @prof_id: unique ID to identify this flow profile
  * @segs: array of one or more packet segments that describe the flow
  * @segs_cnt: number of packet segments provided
  * @prof: stores the returned flow profile added
  */
 int
 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
-                 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
+                 struct ice_flow_seg_info *segs, u8 segs_cnt,
                  struct ice_flow_prof **prof)
 {
        int status;
 
        mutex_lock(&hw->fl_profs_locks[blk]);
 
-       status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
-                                       prof);
+       status = ice_flow_add_prof_sync(hw, blk, dir, segs, segs_cnt, prof);
        if (!status)
                list_add(&(*prof)->l_entry, &hw->fl_profs[blk]);
 
        return 0;
 }
 
-#define ICE_FLOW_PROF_HASH_S   0
-#define ICE_FLOW_PROF_HASH_M   GENMASK_ULL(31, 0)
-#define ICE_FLOW_PROF_HDR_S    32
-#define ICE_FLOW_PROF_HDR_M    GENMASK_ULL(61, 32)
-#define ICE_FLOW_PROF_ENCAP_S  62
-#define ICE_FLOW_PROF_ENCAP_M  GENMASK_ULL(63, 62)
-
-/* Flow profile ID format:
- * [0:31] - Packet match fields
- * [32:61] - Protocol header
- * [62:63] - Encapsulation flag:
- *          0 if non-tunneled
- *          1 if tunneled
- *          2 for tunneled with outer ipv4
- *          3 for tunneled with outer ipv6
- */
-#define ICE_FLOW_GEN_PROFID(hash, hdr, encap)                                \
-       ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) |                        \
-              (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
-              (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) &                    \
-               ICE_FLOW_PROF_ENCAP_M)))
-
 /**
  * ice_add_rss_cfg_sync - add an RSS configuration
  * @hw: pointer to the hardware structure
                goto exit;
        }
 
-       /* Create a new flow profile with generated profile and packet
-        * segment information.
-        */
+       /* Create a new flow profile with packet segment information. */
        status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
-                                  ICE_FLOW_GEN_PROFID(cfg->hash_flds,
-                                                      segs[segs_cnt - 1].hdrs,
-                                                      cfg->hdr_type),
                                   segs, segs_cnt, &prof);
        if (status)
                goto exit;
 
 
 int
 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
-                 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
+                 struct ice_flow_seg_info *segs, u8 segs_cnt,
                  struct ice_flow_prof **prof);
 int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
 int
 
 
                for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
                        enum ice_flow_priority prio;
-                       u64 prof_id;
 
                        /* add this VSI to FDir profile for this flow */
                        prio = ICE_FLOW_PRIO_NORMAL;
                        prof = hw->fdir_prof[flow];
-                       prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
-                       status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
+                       status = ice_flow_add_entry(hw, ICE_BLK_FD,
+                                                   prof->prof_id[tun],
                                                    prof->vsi_h[0], vsi->idx,
                                                    prio, prof->fdir_seg[tun],
                                                    &entry_h);
 
        int cnt;
        u64 entry_h[ICE_MAX_FDIR_VSI_PER_FILTER][ICE_FD_HW_SEG_MAX];
        u16 vsi_h[ICE_MAX_FDIR_VSI_PER_FILTER];
+       u64 prof_id[ICE_FD_HW_SEG_MAX];
 };
 
 /* Common HW capabilities for SW use */
 
 #define to_fltr_conf_from_desc(p) \
        container_of(p, struct virtchnl_fdir_fltr_conf, input)
 
-#define ICE_FLOW_PROF_TYPE_S   0
-#define ICE_FLOW_PROF_TYPE_M   (0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S)
-#define ICE_FLOW_PROF_VSI_S    32
-#define ICE_FLOW_PROF_VSI_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S)
-
-/* Flow profile ID format:
- * [0:31] - flow type, flow + tun_offs
- * [32:63] - VSI index
- */
-#define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \
-       ((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \
-             (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M)))
-
 #define GTPU_TEID_OFFSET 4
 #define GTPU_EH_QFI_OFFSET 1
 #define GTPU_EH_QFI_MASK 0x3F
                return;
 
        vf_prof = fdir->fdir_prof[flow];
+       prof_id = vf_prof->prof_id[tun];
 
        vf_vsi = ice_get_vf_vsi(vf);
        if (!vf_vsi) {
        if (!fdir->prof_entry_cnt[flow][tun])
                return;
 
-       prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num,
-                                  flow, tun ? ICE_FLTR_PTYPE_MAX : 0);
-
        for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
                if (vf_prof->entry_h[i][tun]) {
                        u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
        struct ice_hw *hw;
        u64 entry1_h = 0;
        u64 entry2_h = 0;
-       u64 prof_id;
        int ret;
 
        pf = vf->pf;
                ice_vc_fdir_rem_prof(vf, flow, tun);
        }
 
-       prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow,
-                                  tun ? ICE_FLTR_PTYPE_MAX : 0);
-
-       ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
+       ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
                                tun + 1, &prof);
        if (ret) {
                dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
                goto err_exit;
        }
 
-       ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
+       ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
                                 vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
                                 seg, &entry1_h);
        if (ret) {
                goto err_prof;
        }
 
-       ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
+       ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
                                 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
                                 seg, &entry2_h);
        if (ret) {
        vf_prof->cnt++;
        fdir->prof_entry_cnt[flow][tun]++;
 
+       vf_prof->prof_id[tun] = prof->id;
+
        return 0;
 
 err_entry_1:
        ice_rem_prof_id_flow(hw, ICE_BLK_FD,
-                            ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id);
+                            ice_get_hw_vsi_num(hw, vf_vsi->idx), prof->id);
        ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
 err_prof:
-       ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
+       ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
 err_exit:
        return ret;
 }