ring->tx_max_pending = adapter->max_tx_ring_size;
        ring->rx_max_pending = adapter->max_rx_ring_size;
+       if (adapter->ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+               bool large_llq_supported = adapter->large_llq_header_supported;
+
+               kernel_ring->tx_push_buf_len = adapter->ena_dev->tx_max_header_size;
+               if (large_llq_supported)
+                       kernel_ring->tx_push_buf_max_len = ENA_LLQ_LARGE_HEADER;
+               else
+                       kernel_ring->tx_push_buf_max_len = ENA_LLQ_HEADER;
+       } else {
+               kernel_ring->tx_push_buf_max_len = 0;
+               kernel_ring->tx_push_buf_len = 0;
+       }
+
        ring->tx_pending = adapter->tx_ring[0].ring_size;
        ring->rx_pending = adapter->rx_ring[0].ring_size;
 }
                             struct netlink_ext_ack *extack)
 {
        struct ena_adapter *adapter = netdev_priv(netdev);
-       u32 new_tx_size, new_rx_size;
+       u32 new_tx_size, new_rx_size, new_tx_push_buf_len;
+       bool changed = false;
 
        new_tx_size = ring->tx_pending < ENA_MIN_RING_SIZE ?
                        ENA_MIN_RING_SIZE : ring->tx_pending;
                        ENA_MIN_RING_SIZE : ring->rx_pending;
        new_rx_size = rounddown_pow_of_two(new_rx_size);
 
-       if (new_tx_size == adapter->requested_tx_ring_size &&
-           new_rx_size == adapter->requested_rx_ring_size)
+       changed |= new_tx_size != adapter->requested_tx_ring_size ||
+                  new_rx_size != adapter->requested_rx_ring_size;
+
+       /* This value is ignored if LLQ is not supported */
+       new_tx_push_buf_len = adapter->ena_dev->tx_max_header_size;
+
+       /* Validate that the push buffer is supported on the underlying device */
+       if (kernel_ring->tx_push_buf_len) {
+               enum ena_admin_placement_policy_type placement;
+
+               new_tx_push_buf_len = kernel_ring->tx_push_buf_len;
+
+               placement = adapter->ena_dev->tx_mem_queue_type;
+               if (placement == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+                       return -EOPNOTSUPP;
+
+               if (new_tx_push_buf_len != ENA_LLQ_HEADER &&
+                   new_tx_push_buf_len != ENA_LLQ_LARGE_HEADER) {
+                       bool large_llq_sup = adapter->large_llq_header_supported;
+                       char large_llq_size_str[40];
+
+                       snprintf(large_llq_size_str, 40, ", %lu", ENA_LLQ_LARGE_HEADER);
+
+                       NL_SET_ERR_MSG_FMT_MOD(extack,
+                                              "Supported tx push buff values: [%lu%s]",
+                                              ENA_LLQ_HEADER,
+                                              large_llq_sup ? large_llq_size_str : "");
+
+                       return -EINVAL;
+               }
+
+               changed |= new_tx_push_buf_len != adapter->ena_dev->tx_max_header_size;
+       }
+
+       if (!changed)
                return 0;
 
-       return ena_update_queue_sizes(adapter, new_tx_size, new_rx_size);
+       return ena_update_queue_params(adapter, new_tx_size, new_rx_size,
+                                      new_tx_push_buf_len);
 }
 
 static u32 ena_flow_hash_to_flow_type(u16 hash_fields)
 static const struct ethtool_ops ena_ethtool_ops = {
        .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
                                     ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
+       .supported_ring_params  = ETHTOOL_RING_USE_TX_PUSH_BUF_LEN,
        .get_link_ksettings     = ena_get_link_ksettings,
        .get_drvinfo            = ena_get_drvinfo,
        .get_msglevel           = ena_get_msglevel,
 
        return 0;
 }
 
-int ena_update_queue_sizes(struct ena_adapter *adapter,
-                          u32 new_tx_size,
-                          u32 new_rx_size)
+int ena_update_queue_params(struct ena_adapter *adapter,
+                           u32 new_tx_size,
+                           u32 new_rx_size,
+                           u32 new_llq_header_len)
 {
-       bool dev_was_up;
+       bool dev_was_up, large_llq_changed = false;
+       int rc = 0;
 
        dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
        ena_close(adapter->netdev);
                          0,
                          adapter->xdp_num_queues +
                          adapter->num_io_queues);
-       return dev_was_up ? ena_up(adapter) : 0;
+
+       large_llq_changed = adapter->ena_dev->tx_mem_queue_type ==
+                           ENA_ADMIN_PLACEMENT_POLICY_DEV;
+       large_llq_changed &=
+               new_llq_header_len != adapter->ena_dev->tx_max_header_size;
+
+       /* a check that the configuration is valid is done by caller */
+       if (large_llq_changed) {
+               adapter->large_llq_header_enabled = !adapter->large_llq_header_enabled;
+
+               ena_destroy_device(adapter, false);
+               rc = ena_restore_device(adapter);
+       }
+
+       return dev_was_up && !rc ? ena_up(adapter) : rc;
 }
 
 int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak)