}
 
 /* Size of the Rx flow classification key */
-int dpaa2_eth_cls_key_size(void)
+int dpaa2_eth_cls_key_size(u64 fields)
 {
        int i, size = 0;
 
-       for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
+       for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+               if (!(fields & dist_fields[i].id))
+                       continue;
                size += dist_fields[i].size;
+       }
 
        return size;
 }
        return 0;
 }
 
+/* Prune unused fields from the classification rule.
+ * Used when masking is not supported
+ */
+void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
+{
+       int off = 0, new_off = 0;
+       int i, size;
+
+       for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+               size = dist_fields[i].size;
+               if (dist_fields[i].id & fields) {
+                       memcpy(key_mem + new_off, key_mem + off, size);
+                       new_off += size;
+               }
+               off += size;
+       }
+}
+
 /* Set Rx distribution (hash or flow classification) key
  * flags is a combination of RXH_ bits
  */
                struct dpkg_extract *key =
                        &cls_cfg.extracts[cls_cfg.num_extracts];
 
-               /* For Rx hashing key we set only the selected fields.
-                * For Rx flow classification key we set all supported fields
+               /* For both Rx hashing and classification keys
+                * we set only the selected fields.
                 */
-               if (type == DPAA2_ETH_RX_DIST_HASH) {
-                       if (!(flags & dist_fields[i].id))
-                               continue;
+               if (!(flags & dist_fields[i].id))
+                       continue;
+               if (type == DPAA2_ETH_RX_DIST_HASH)
                        rx_hash_fields |= dist_fields[i].rxnfc_field;
-               }
 
                if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
                        dev_err(dev, "error adding key extraction rule, too many rules?\n");
        return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
 }
 
-static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
+int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
+{
+       return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
+}
+
+static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
 {
        struct device *dev = priv->net_dev->dev.parent;
        int err;
                return -EOPNOTSUPP;
        }
 
-       if (!dpaa2_eth_fs_enabled(priv) || !dpaa2_eth_fs_mask_enabled(priv)) {
+       if (!dpaa2_eth_fs_enabled(priv)) {
                dev_dbg(dev, "Rx cls disabled in DPNI options\n");
                return -EOPNOTSUPP;
        }
                return -EOPNOTSUPP;
        }
 
-       err = dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0);
+       /* If there is no support for masking in the classification table,
+        * we don't set a default key, as it will depend on the rules
+        * added by the user at runtime.
+        */
+       if (!dpaa2_eth_fs_mask_enabled(priv))
+               goto out;
+
+       err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
        if (err)
                return err;
 
+out:
        priv->rx_cls_enabled = 1;
 
        return 0;
        /* Configure the flow classification key; it includes all
         * supported header fields and cannot be modified at runtime
         */
-       err = dpaa2_eth_set_cls(priv);
+       err = dpaa2_eth_set_default_cls(priv);
        if (err && err != -EOPNOTSUPP)
                dev_err(dev, "Failed to configure Rx classification key\n");
 
 
 }
 
 static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
-                        void *key, void *mask)
+                        void *key, void *mask, u64 *fields)
 {
        int off;
 
                off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
                *(__be16 *)(key + off) = eth_value->h_proto;
                *(__be16 *)(mask + off) = eth_mask->h_proto;
+               *fields |= DPAA2_ETH_DIST_ETHTYPE;
        }
 
        if (!is_zero_ether_addr(eth_mask->h_source)) {
                off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
                ether_addr_copy(key + off, eth_value->h_source);
                ether_addr_copy(mask + off, eth_mask->h_source);
+               *fields |= DPAA2_ETH_DIST_ETHSRC;
        }
 
        if (!is_zero_ether_addr(eth_mask->h_dest)) {
                off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
                ether_addr_copy(key + off, eth_value->h_dest);
                ether_addr_copy(mask + off, eth_mask->h_dest);
+               *fields |= DPAA2_ETH_DIST_ETHDST;
        }
 
        return 0;
 
 static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
                         struct ethtool_usrip4_spec *uip_mask,
-                        void *key, void *mask)
+                        void *key, void *mask, u64 *fields)
 {
        int off;
        u32 tmp_value, tmp_mask;
                off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
                *(__be32 *)(key + off) = uip_value->ip4src;
                *(__be32 *)(mask + off) = uip_mask->ip4src;
+               *fields |= DPAA2_ETH_DIST_IPSRC;
        }
 
        if (uip_mask->ip4dst) {
                off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
                *(__be32 *)(key + off) = uip_value->ip4dst;
                *(__be32 *)(mask + off) = uip_mask->ip4dst;
+               *fields |= DPAA2_ETH_DIST_IPDST;
        }
 
        if (uip_mask->proto) {
                off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
                *(u8 *)(key + off) = uip_value->proto;
                *(u8 *)(mask + off) = uip_mask->proto;
+               *fields |= DPAA2_ETH_DIST_IPPROTO;
        }
 
        if (uip_mask->l4_4_bytes) {
                off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
                *(__be16 *)(key + off) = htons(tmp_value >> 16);
                *(__be16 *)(mask + off) = htons(tmp_mask >> 16);
+               *fields |= DPAA2_ETH_DIST_L4SRC;
 
                off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
                *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
                *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
+               *fields |= DPAA2_ETH_DIST_L4DST;
        }
 
        /* Only apply the rule for IPv4 frames */
        off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
        *(__be16 *)(key + off) = htons(ETH_P_IP);
        *(__be16 *)(mask + off) = htons(0xFFFF);
+       *fields |= DPAA2_ETH_DIST_ETHTYPE;
 
        return 0;
 }
 
 static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
                        struct ethtool_tcpip4_spec *l4_mask,
-                       void *key, void *mask, u8 l4_proto)
+                       void *key, void *mask, u8 l4_proto, u64 *fields)
 {
        int off;
 
                off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
                *(__be32 *)(key + off) = l4_value->ip4src;
                *(__be32 *)(mask + off) = l4_mask->ip4src;
+               *fields |= DPAA2_ETH_DIST_IPSRC;
        }
 
        if (l4_mask->ip4dst) {
                off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
                *(__be32 *)(key + off) = l4_value->ip4dst;
                *(__be32 *)(mask + off) = l4_mask->ip4dst;
+               *fields |= DPAA2_ETH_DIST_IPDST;
        }
 
        if (l4_mask->psrc) {
                off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
                *(__be16 *)(key + off) = l4_value->psrc;
                *(__be16 *)(mask + off) = l4_mask->psrc;
+               *fields |= DPAA2_ETH_DIST_L4SRC;
        }
 
        if (l4_mask->pdst) {
                off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
                *(__be16 *)(key + off) = l4_value->pdst;
                *(__be16 *)(mask + off) = l4_mask->pdst;
+               *fields |= DPAA2_ETH_DIST_L4DST;
        }
 
        /* Only apply the rule for IPv4 frames with the specified L4 proto */
        off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
        *(__be16 *)(key + off) = htons(ETH_P_IP);
        *(__be16 *)(mask + off) = htons(0xFFFF);
+       *fields |= DPAA2_ETH_DIST_ETHTYPE;
 
        off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
        *(u8 *)(key + off) = l4_proto;
        *(u8 *)(mask + off) = 0xFF;
+       *fields |= DPAA2_ETH_DIST_IPPROTO;
 
        return 0;
 }
 
 static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
                         struct ethtool_flow_ext *ext_mask,
-                        void *key, void *mask)
+                        void *key, void *mask, u64 *fields)
 {
        int off;
 
                off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
                *(__be16 *)(key + off) = ext_value->vlan_tci;
                *(__be16 *)(mask + off) = ext_mask->vlan_tci;
+               *fields |= DPAA2_ETH_DIST_VLAN;
        }
 
        return 0;
 
 static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
                             struct ethtool_flow_ext *ext_mask,
-                            void *key, void *mask)
+                            void *key, void *mask, u64 *fields)
 {
        int off;
 
                off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
                ether_addr_copy(key + off, ext_value->h_dest);
                ether_addr_copy(mask + off, ext_mask->h_dest);
+               *fields |= DPAA2_ETH_DIST_ETHDST;
        }
 
        return 0;
 }
 
-static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask)
+static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
+                        u64 *fields)
 {
        int err;
 
        switch (fs->flow_type & 0xFF) {
        case ETHER_FLOW:
                err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
-                                   key, mask);
+                                   key, mask, fields);
                break;
        case IP_USER_FLOW:
                err = prep_uip_rule(&fs->h_u.usr_ip4_spec,
-                                   &fs->m_u.usr_ip4_spec, key, mask);
+                                   &fs->m_u.usr_ip4_spec, key, mask, fields);
                break;
        case TCP_V4_FLOW:
                err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
-                                  key, mask, IPPROTO_TCP);
+                                  key, mask, IPPROTO_TCP, fields);
                break;
        case UDP_V4_FLOW:
                err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
-                                  key, mask, IPPROTO_UDP);
+                                  key, mask, IPPROTO_UDP, fields);
                break;
        case SCTP_V4_FLOW:
                err = prep_l4_rule(&fs->h_u.sctp_ip4_spec,
                                   &fs->m_u.sctp_ip4_spec, key, mask,
-                                  IPPROTO_SCTP);
+                                  IPPROTO_SCTP, fields);
                break;
        default:
                return -EOPNOTSUPP;
                return err;
 
        if (fs->flow_type & FLOW_EXT) {
-               err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask);
+               err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
                if (err)
                        return err;
        }
 
        if (fs->flow_type & FLOW_MAC_EXT) {
-               err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask);
+               err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask,
+                                       fields);
                if (err)
                        return err;
        }
        struct dpni_rule_cfg rule_cfg = { 0 };
        struct dpni_fs_action_cfg fs_act = { 0 };
        dma_addr_t key_iova;
+       u64 fields = 0;
        void *key_buf;
        int err;
 
            fs->ring_cookie >= dpaa2_eth_queue_count(priv))
                return -EINVAL;
 
-       rule_cfg.key_size = dpaa2_eth_cls_key_size();
+       rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL);
 
        /* allocate twice the key size, for the actual key and for mask */
        key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
                return -ENOMEM;
 
        /* Fill the key and mask memory areas */
-       err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size);
+       err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
        if (err)
                goto free_mem;
 
+       if (!dpaa2_eth_fs_mask_enabled(priv)) {
+               /* Masking allows us to configure a maximal key during init and
+                * use it for all flow steering rules. Without it, we include
+                * in the key only the fields actually used, so we need to
+                * extract the others from the final key buffer.
+                *
+                * Program the FS key if needed, or return error if previously
+                * set key can't be used for the current rule. User needs to
+                * delete existing rules in this case to allow for the new one.
+                */
+               if (!priv->rx_cls_fields) {
+                       err = dpaa2_eth_set_cls(net_dev, fields);
+                       if (err)
+                               goto free_mem;
+
+                       priv->rx_cls_fields = fields;
+               } else if (priv->rx_cls_fields != fields) {
+                       netdev_err(net_dev, "No support for multiple FS keys, need to delete existing rules\n");
+                       err = -EOPNOTSUPP;
+                       goto free_mem;
+               }
+
+               dpaa2_eth_cls_trim_rule(key_buf, fields);
+               rule_cfg.key_size = dpaa2_eth_cls_key_size(fields);
+       }
+
        key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
                                  DMA_TO_DEVICE);
        if (dma_mapping_error(dev, key_iova)) {
        }
 
        rule_cfg.key_iova = key_iova;
-       rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
+       if (dpaa2_eth_fs_mask_enabled(priv))
+               rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
 
        if (add) {
                if (fs->ring_cookie == RX_CLS_FLOW_DISC)
        return err;
 }
 
+static int num_rules(struct dpaa2_eth_priv *priv)
+{
+       int i, rules = 0;
+
+       for (i = 0; i < dpaa2_eth_fs_count(priv); i++)
+               if (priv->cls_rules[i].in_use)
+                       rules++;
+
+       return rules;
+}
+
 static int update_cls_rule(struct net_device *net_dev,
                           struct ethtool_rx_flow_spec *new_fs,
                           int location)
                        return err;
 
                rule->in_use = 0;
+
+               if (!dpaa2_eth_fs_mask_enabled(priv) && !num_rules(priv))
+                       priv->rx_cls_fields = 0;
        }
 
        /* If no new entry to add, return here */
                break;
        case ETHTOOL_GRXCLSRLCNT:
                rxnfc->rule_cnt = 0;
-               for (i = 0; i < max_rules; i++)
-                       if (priv->cls_rules[i].in_use)
-                               rxnfc->rule_cnt++;
+               rxnfc->rule_cnt = num_rules(priv);
                rxnfc->data = max_rules;
                break;
        case ETHTOOL_GRXCLSRULE: