udp_hdr(skb)->len = htons(payload_len);
 }
 
-static inline struct sk_buff *
-mlx5e_accel_handle_tx(struct sk_buff *skb,
-                     struct mlx5e_txqsq *sq,
-                     struct net_device *dev,
-                     struct mlx5e_tx_wqe **wqe,
-                     u16 *pi)
+static inline bool mlx5e_accel_handle_tx(struct sk_buff *skb,
+                                        struct mlx5e_txqsq *sq,
+                                        struct net_device *dev,
+                                        struct mlx5e_tx_wqe **wqe,
+                                        u16 *pi)
 {
 #ifdef CONFIG_MLX5_EN_TLS
        if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
-               skb = mlx5e_tls_handle_tx_skb(dev, sq, skb, wqe, pi);
-               if (unlikely(!skb))
-                       return NULL;
+               if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, wqe, pi)))
+                       return false;
        }
 #endif
 
 #ifdef CONFIG_MLX5_EN_IPSEC
        if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) {
-               skb = mlx5e_ipsec_handle_tx_skb(dev, *wqe, skb);
-               if (unlikely(!skb))
-                       return NULL;
+               if (unlikely(!mlx5e_ipsec_handle_tx_skb(dev, *wqe, skb)))
+                       return false;
        }
 #endif
 
        if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
                mlx5e_udp_gso_handle_tx_skb(skb);
 
-       return skb;
+       return true;
 }
 
 #endif /* __MLX5E_EN_ACCEL_H__ */
 
                   ntohs(mdata->content.tx.seq));
 }
 
-struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
-                                         struct mlx5e_tx_wqe *wqe,
-                                         struct sk_buff *skb)
+bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
+                              struct mlx5e_tx_wqe *wqe,
+                              struct sk_buff *skb)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct xfrm_offload *xo = xfrm_offload(skb);
        struct sec_path *sp;
 
        if (!xo)
-               return skb;
+               return true;
 
        sp = skb_sec_path(skb);
        if (unlikely(sp->len != 1)) {
        sa_entry->set_iv_op(skb, x, xo);
        mlx5e_ipsec_set_metadata(skb, mdata, xo);
 
-       return skb;
+       return true;
 
 drop:
        kfree_skb(skb);
-       return NULL;
+       return false;
 }
 
 static inline struct xfrm_state *
 
                            struct xfrm_offload *xo);
 void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
                        struct xfrm_offload *xo);
-struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
-                                         struct mlx5e_tx_wqe *wqe,
-                                         struct sk_buff *skb);
+bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
+                              struct mlx5e_tx_wqe *wqe,
+                              struct sk_buff *skb);
 
 #endif /* CONFIG_MLX5_EN_IPSEC */
 
 
 void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
 void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx);
 
-struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
-                                        struct mlx5e_txqsq *sq,
-                                        struct sk_buff *skb,
-                                        struct mlx5e_tx_wqe **wqe, u16 *pi);
+bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
+                             struct sk_buff *skb, struct mlx5e_tx_wqe **wqe,
+                             u16 *pi);
 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
                                           struct mlx5e_tx_wqe_info *wi,
                                           u32 *dma_fifo_cc);
 
        return MLX5E_KTLS_SYNC_FAIL;
 }
 
-struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
-                                        struct mlx5e_txqsq *sq,
-                                        struct sk_buff *skb,
-                                        struct mlx5e_tx_wqe **wqe, u16 *pi)
+bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
+                             struct sk_buff *skb, struct mlx5e_tx_wqe **wqe,
+                             u16 *pi)
 {
        struct mlx5e_ktls_offload_context_tx *priv_tx;
        struct mlx5e_sq_stats *stats = sq->stats;
        stats->tls_encrypted_bytes   += datalen;
 
 out:
-       return skb;
+       return true;
 
 err_out:
        dev_kfree_skb_any(skb);
-       return NULL;
+       return false;
 }
 
        nskb->queue_mapping = skb->queue_mapping;
 }
 
-static struct sk_buff *
-mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
-                    struct mlx5e_txqsq *sq, struct sk_buff *skb,
-                    struct mlx5e_tx_wqe **wqe,
-                    u16 *pi,
-                    struct mlx5e_tls *tls)
+static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
+                                struct mlx5e_txqsq *sq, struct sk_buff *skb,
+                                struct mlx5e_tx_wqe **wqe, u16 *pi,
+                                struct mlx5e_tls *tls)
 {
        u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
        struct sync_info info;
                if (likely(payload <= -info.sync_len))
                        /* SKB payload doesn't require offload
                         */
-                       return skb;
+                       return true;
 
                atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required);
                goto err_out;
        mlx5e_sq_xmit(sq, nskb, *wqe, *pi, true);
        *pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
        *wqe = MLX5E_TX_FETCH_WQE(sq, *pi);
-       return skb;
+       return true;
 
 err_out:
        dev_kfree_skb_any(skb);
-       return NULL;
+       return false;
 }
 
-struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
-                                       struct mlx5e_txqsq *sq,
-                                       struct sk_buff *skb,
-                                       struct mlx5e_tx_wqe **wqe,
-                                       u16 *pi)
+bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
+                            struct sk_buff *skb, struct mlx5e_tx_wqe **wqe,
+                            u16 *pi)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5e_tls_offload_context_tx *context;
        int datalen;
        u32 skb_seq;
 
-       if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx)) {
-               skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi);
-               goto out;
-       }
+       if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx))
+               return mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi);
 
        if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
-               goto out;
+               return true;
 
        datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
        if (!datalen)
-               goto out;
+               return true;
 
        tls_ctx = tls_get_ctx(skb->sk);
        if (unlikely(tls_ctx->netdev != netdev))
-               goto out;
+               return true;
 
        skb_seq = ntohl(tcp_hdr(skb)->seq);
        context = mlx5e_get_tls_tx_context(tls_ctx);
        expected_seq = context->expected_seq;
 
-       if (unlikely(expected_seq != skb_seq)) {
-               skb = mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi, priv->tls);
-               goto out;
-       }
+       if (unlikely(expected_seq != skb_seq))
+               return mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi, priv->tls);
 
        if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
                atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata);
                dev_kfree_skb_any(skb);
-               skb = NULL;
-               goto out;
+               return false;
        }
 
        context->expected_seq = skb_seq + datalen;
-out:
-       return skb;
+       return true;
 }
 
 static int tls_update_resync_sn(struct net_device *netdev,
 
 #include "en.h"
 #include "en/txrx.h"
 
-struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
-                                       struct mlx5e_txqsq *sq,
-                                       struct sk_buff *skb,
-                                       struct mlx5e_tx_wqe **wqe,
-                                       u16 *pi);
+bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
+                            struct sk_buff *skb, struct mlx5e_tx_wqe **wqe,
+                            u16 *pi);
 
 void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
                             u32 *cqe_bcnt);
 
        wqe = MLX5E_TX_FETCH_WQE(sq, pi);
 
        /* might send skbs and update wqe and pi */
-       skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi);
-       if (unlikely(!skb))
+       if (unlikely(!mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi)))
                return NETDEV_TX_OK;
 
        return mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());