struct ena_admin_acq_get_stats_resp get_resp;
 };
 
-static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
+static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
                                       struct ena_common_mem_addr *ena_addr,
                                       dma_addr_t addr)
 {
        return 0;
 }
 
-static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
+static void comp_ctxt_release(struct ena_com_admin_queue *queue,
                                     struct ena_comp_ctx *comp_ctx)
 {
        comp_ctx->occupied = false;
        return comp_ctx;
 }
 
-static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
+static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
 {
        size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
        struct ena_comp_ctx *comp_ctx;
 
 
 #include "ena_eth_com.h"
 
-static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
+static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
        struct ena_com_io_cq *io_cq)
 {
        struct ena_eth_io_rx_cdesc_base *cdesc;
        return cdesc;
 }
 
-static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
+static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
 {
        u16 tail_masked;
        u32 offset;
        return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
 }
 
-static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
+static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
                                                     u8 *bounce_buffer)
 {
        struct ena_com_llq_info *llq_info = &io_sq->llq_info;
        return 0;
 }
 
-static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
+static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
                                                 u8 *header_src,
                                                 u16 header_len)
 {
        return 0;
 }
 
-static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
+static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
 {
        struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
        u8 *bounce_buffer;
        return sq_desc;
 }
 
-static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
+static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
 {
        struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
        struct ena_com_llq_info *llq_info = &io_sq->llq_info;
        return 0;
 }
 
-static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
+static void *get_sq_desc(struct ena_com_io_sq *io_sq)
 {
        if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
                return get_sq_desc_llq(io_sq);
        return get_sq_desc_regular_queue(io_sq);
 }
 
-static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
+static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
 {
        struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
        struct ena_com_llq_info *llq_info = &io_sq->llq_info;
        return 0;
 }
 
-static inline int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
+static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
 {
        if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
                return ena_com_sq_update_llq_tail(io_sq);
        return 0;
 }
 
-static inline struct ena_eth_io_rx_cdesc_base *
+static struct ena_eth_io_rx_cdesc_base *
        ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
 {
        idx &= (io_cq->q_depth - 1);
                idx * io_cq->cdesc_entry_size_in_bytes);
 }
 
-static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
+static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
                                           u16 *first_cdesc_idx)
 {
        struct ena_eth_io_rx_cdesc_base *cdesc;
        return count;
 }
 
-static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
+static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
                                                        struct ena_com_tx_ctx *ena_tx_ctx)
 {
        struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
        return ena_com_sq_update_tail(io_sq);
 }
 
-static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
+static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
                                        struct ena_eth_io_rx_cdesc_base *cdesc)
 {
        ena_rx_ctx->l3_proto = cdesc->status &
 
                ena_free_tx_resources(adapter, i);
 }
 
-static inline int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
+static int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
 {
        if (likely(req_id < rx_ring->ring_size))
                return 0;
                ena_free_rx_resources(adapter, i);
 }
 
-static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
+static int ena_alloc_rx_page(struct ena_ring *rx_ring,
                                    struct ena_rx_buffer *rx_info, gfp_t gfp)
 {
        struct ena_com_buf *ena_buf;
                ena_free_rx_bufs(adapter, i);
 }
 
-static inline void ena_unmap_tx_skb(struct ena_ring *tx_ring,
+static void ena_unmap_tx_skb(struct ena_ring *tx_ring,
                                    struct ena_tx_buffer *tx_info)
 {
        struct ena_com_buf *ena_buf;
  * @ena_rx_ctx: received packet context/metadata
  * @skb: skb currently being received and modified
  */
-static inline void ena_rx_checksum(struct ena_ring *rx_ring,
+static void ena_rx_checksum(struct ena_ring *rx_ring,
                                   struct ena_com_rx_ctx *ena_rx_ctx,
                                   struct sk_buff *skb)
 {
        return 0;
 }
 
-inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring,
+void ena_adjust_intr_moderation(struct ena_ring *rx_ring,
                                       struct ena_ring *tx_ring)
 {
        /* We apply adaptive moderation on Rx path only.
        rx_ring->per_napi_bytes = 0;
 }
 
-static inline void ena_unmask_interrupt(struct ena_ring *tx_ring,
+static void ena_unmask_interrupt(struct ena_ring *tx_ring,
                                        struct ena_ring *rx_ring)
 {
        struct ena_eth_io_intr_reg intr_reg;
        ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
 }
 
-static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring,
+static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
                                             struct ena_ring *rx_ring)
 {
        int cpu = get_cpu();
        pci_release_selected_regions(pdev, release_bars);
 }
 
-static inline void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
+static void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
 {
        llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
        llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;