mlx5 driver support devlink port function attr mechanism to setup ipsec_crypto
 capability. (refer to Documentation/networking/devlink/devlink-port.rst)
 
+IPsec packet capability setup
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+User who wants mlx5 PCI VFs to be able to perform IPsec packet offloading need
+to explicitly enable the VF ipsec_packet capability. Enabling IPsec capability
+for VFs is supported starting with ConnectX6dx devices and above. When a VF has
+IPsec capability enabled, any IPsec offloading is blocked on the PF.
+
+mlx5 driver support devlink port function attr mechanism to setup ipsec_packet
+capability. (refer to Documentation/networking/devlink/devlink-port.rst)
+
 SF state setup
 --------------
 
 
 #ifdef CONFIG_XFRM_OFFLOAD
        .port_fn_ipsec_crypto_get = mlx5_devlink_port_fn_ipsec_crypto_get,
        .port_fn_ipsec_crypto_set = mlx5_devlink_port_fn_ipsec_crypto_set,
+       .port_fn_ipsec_packet_get = mlx5_devlink_port_fn_ipsec_packet_get,
+       .port_fn_ipsec_packet_set = mlx5_devlink_port_fn_ipsec_packet_set,
 #endif /* CONFIG_XFRM_OFFLOAD */
 };
 
 
 
 enum esw_vport_ipsec_offload {
        MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD,
+       MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD,
 };
 
 int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev, struct mlx5_vport *vport)
 
        if (!ipsec_enabled) {
                vport->info.ipsec_crypto_enabled = false;
+               vport->info.ipsec_packet_enabled = false;
                return 0;
        }
 
        hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
        vport->info.ipsec_crypto_enabled =
                MLX5_GET(ipsec_cap, hca_cap, ipsec_crypto_offload);
+       vport->info.ipsec_packet_enabled =
+               MLX5_GET(ipsec_cap, hca_cap, ipsec_full_offload);
 free:
        kvfree(query_cap);
        return err;
        case MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD:
                MLX5_SET(ipsec_cap, cap, ipsec_crypto_offload, enable);
                break;
+       case MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD:
+               MLX5_SET(ipsec_cap, cap, ipsec_full_offload, enable);
+               break;
        default:
                ret = -EOPNOTSUPP;
                goto free;
                err = esw_ipsec_vf_set_bytype(dev, vport, enable, type);
                if (err)
                        return err;
-               err = esw_ipsec_vf_set_generic(dev, vport->vport, enable);
+               err = mlx5_esw_ipsec_vf_offload_get(dev, vport);
                if (err)
                        return err;
+
+               /* The generic ipsec_offload cap can be disabled only if both
+                * ipsec_crypto_offload and ipsec_full_offload aren't enabled.
+                */
+               if (!vport->info.ipsec_crypto_enabled &&
+                   !vport->info.ipsec_packet_enabled) {
+                       err = esw_ipsec_vf_set_generic(dev, vport->vport, enable);
+                       if (err)
+                               return err;
+               }
        }
 
        switch (type) {
        case MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD:
                vport->info.ipsec_crypto_enabled = enable;
                break;
+       case MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD:
+               vport->info.ipsec_packet_enabled = enable;
+               break;
        default:
                return -EINVAL;
        }
        return err;
 }
 
+int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
+                                              u16 vport_num)
+{
+       int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+       void *hca_cap, *query_cap;
+       int ret;
+
+       if (!mlx5_esw_ipsec_vf_offload_supported(dev))
+               return -EOPNOTSUPP;
+
+       ret = esw_ipsec_offload_supported(dev, vport_num);
+       if (ret)
+               return ret;
+
+       query_cap = kvzalloc(query_sz, GFP_KERNEL);
+       if (!query_cap)
+               return -ENOMEM;
+
+       ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_FLOW_TABLE);
+       if (ret)
+               goto out;
+
+       hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+       if (!MLX5_GET(flow_table_nic_cap, hca_cap, flow_table_properties_nic_receive.decap)) {
+               ret = -EOPNOTSUPP;
+               goto out;
+       }
+
+out:
+       kvfree(query_cap);
+       return ret;
+}
+
 int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
                                         bool enable)
 {
        return esw_ipsec_vf_offload_set_bytype(esw, vport, enable,
                                               MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD);
 }
+
+int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+                                        bool enable)
+{
+       return esw_ipsec_vf_offload_set_bytype(esw, vport, enable,
+                                              MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD);
+}
 
        vport->info.mig_enabled = MLX5_GET(cmd_hca_cap_2, hca_caps, migratable);
 
        err = mlx5_esw_ipsec_vf_offload_get(esw->dev, vport);
-
 out_free:
        kfree(query_ctx);
        return err;
        /* Sync with current vport context */
        vport->enabled_events = enabled_events;
        vport->enabled = true;
-       if (vport->vport != MLX5_VPORT_PF && vport->info.ipsec_crypto_enabled)
+       if (vport->vport != MLX5_VPORT_PF &&
+           (vport->info.ipsec_crypto_enabled || vport->info.ipsec_packet_enabled))
                esw->enabled_ipsec_vf_count++;
 
        /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
            MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
                mlx5_esw_vport_vhca_id_clear(esw, vport_num);
 
-       if (vport->vport != MLX5_VPORT_PF && vport->info.ipsec_crypto_enabled)
+       if (vport->vport != MLX5_VPORT_PF &&
+           (vport->info.ipsec_crypto_enabled || vport->info.ipsec_packet_enabled))
                esw->enabled_ipsec_vf_count--;
 
        /* We don't assume VFs will cleanup after themselves.
 
        u8                      roce_enabled: 1;
        u8                      mig_enabled: 1;
        u8                      ipsec_crypto_enabled: 1;
+       u8                      ipsec_packet_enabled: 1;
 };
 
 /* Vport context events */
                                          struct netlink_ext_ack *extack);
 int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable,
                                          struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled,
+                                         struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, bool enable,
+                                         struct netlink_ext_ack *extack);
 #endif /* CONFIG_XFRM_OFFLOAD */
 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
 
                                               u16 vport_num);
 int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
                                         bool enable);
+int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+                                        bool enable);
+int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
+                                              u16 vport_num);
+void mlx5_esw_vport_ipsec_offload_enable(struct mlx5_eswitch *esw);
+void mlx5_esw_vport_ipsec_offload_disable(struct mlx5_eswitch *esw);
+
 #else  /* CONFIG_MLX5_ESWITCH */
 /* eswitch API stubs */
 static inline int  mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
 
        mutex_unlock(&esw->state_lock);
        return err;
 }
+
+int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled,
+                                         struct netlink_ext_ack *extack)
+{
+       struct mlx5_eswitch *esw;
+       struct mlx5_vport *vport;
+       int err = 0;
+
+       esw = mlx5_devlink_eswitch_get(port->devlink);
+       if (IS_ERR(esw))
+               return PTR_ERR(esw);
+
+       if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) {
+               NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPsec packet");
+               return -EOPNOTSUPP;
+       }
+
+       vport = mlx5_devlink_port_vport_get(port);
+
+       mutex_lock(&esw->state_lock);
+       if (!vport->enabled) {
+               err = -EOPNOTSUPP;
+               goto unlock;
+       }
+
+       *is_enabled = vport->info.ipsec_packet_enabled;
+unlock:
+       mutex_unlock(&esw->state_lock);
+       return err;
+}
+
+int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port,
+                                         bool enable,
+                                         struct netlink_ext_ack *extack)
+{
+       struct mlx5_eswitch *esw;
+       struct mlx5_vport *vport;
+       u16 vport_num;
+       int err;
+
+       esw = mlx5_devlink_eswitch_get(port->devlink);
+       if (IS_ERR(esw))
+               return PTR_ERR(esw);
+
+       vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
+       err = mlx5_esw_ipsec_vf_packet_offload_supported(esw->dev, vport_num);
+       if (err) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Device doesn't support IPsec packet mode");
+               return err;
+       }
+
+       vport = mlx5_devlink_port_vport_get(port);
+       mutex_lock(&esw->state_lock);
+       if (!vport->enabled) {
+               err = -EOPNOTSUPP;
+               NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
+               goto unlock;
+       }
+
+       if (vport->info.ipsec_packet_enabled == enable)
+               goto unlock;
+
+       if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) {
+               err = -EBUSY;
+               goto unlock;
+       }
+
+       err = mlx5_esw_ipsec_vf_packet_offload_set(esw, vport, enable);
+       if (err) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Failed to set IPsec packet mode");
+               goto unlock;
+       }
+
+       vport->info.ipsec_packet_enabled = enable;
+       if (enable)
+               esw->enabled_ipsec_vf_count++;
+       else
+               esw->enabled_ipsec_vf_count--;
+unlock:
+       mutex_unlock(&esw->state_lock);
+       return err;
+}
 #endif /* CONFIG_XFRM_OFFLOAD */