struct ena_com_admin_sq *sq = &admin_queue->sq;
        u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
 
-       sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
-                                        &sq->dma_addr, GFP_KERNEL);
+       sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &sq->dma_addr, GFP_KERNEL);
 
        if (!sq->entries) {
                netdev_err(ena_dev->net_device, "Memory allocation failed\n");
        struct ena_com_admin_cq *cq = &admin_queue->cq;
        u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
 
-       cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
-                                        &cq->dma_addr, GFP_KERNEL);
+       cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &cq->dma_addr, GFP_KERNEL);
 
        if (!cq->entries) {
                netdev_err(ena_dev->net_device, "Memory allocation failed\n");
 
        ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
        size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
-       aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size,
-                                          &aenq->dma_addr, GFP_KERNEL);
+       aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size, &aenq->dma_addr, GFP_KERNEL);
 
        if (!aenq->entries) {
                netdev_err(ena_dev->net_device, "Memory allocation failed\n");
 
        aenq_caps = 0;
        aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
-       aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
-                     << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
-                    ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
+       aenq_caps |=
+               (sizeof(struct ena_admin_aenq_entry) << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
+               ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
        writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
 
        if (unlikely(!aenq_handlers)) {
-               netdev_err(ena_dev->net_device,
-                          "AENQ handlers pointer is NULL\n");
+               netdev_err(ena_dev->net_device, "AENQ handlers pointer is NULL\n");
                return -EINVAL;
        }
 
        }
 
        if (unlikely(!admin_queue->comp_ctx)) {
-               netdev_err(admin_queue->ena_dev->net_device,
-                          "Completion context is NULL\n");
+               netdev_err(admin_queue->ena_dev->net_device, "Completion context is NULL\n");
                return NULL;
        }
 
        if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
-               netdev_err(admin_queue->ena_dev->net_device,
-                          "Completion context is occupied\n");
+               netdev_err(admin_queue->ena_dev->net_device, "Completion context is occupied\n");
                return NULL;
        }
 
        /* In case of queue FULL */
        cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
        if (cnt >= admin_queue->q_depth) {
-               netdev_dbg(admin_queue->ena_dev->net_device,
-                          "Admin queue is full.\n");
+               netdev_dbg(admin_queue->ena_dev->net_device, "Admin queue is full.\n");
                admin_queue->stats.out_of_space++;
                return ERR_PTR(-ENOSPC);
        }
        struct ena_comp_ctx *comp_ctx;
        u16 i;
 
-       admin_queue->comp_ctx =
-               devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
+       admin_queue->comp_ctx = devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
        if (unlikely(!admin_queue->comp_ctx)) {
                netdev_err(ena_dev->net_device, "Memory allocation failed\n");
                return -ENOMEM;
                dev_node = dev_to_node(ena_dev->dmadev);
                set_dev_node(ena_dev->dmadev, ctx->numa_node);
                io_sq->desc_addr.virt_addr =
-                       dma_alloc_coherent(ena_dev->dmadev, size,
-                                          &io_sq->desc_addr.phys_addr,
+                       dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr,
                                           GFP_KERNEL);
                set_dev_node(ena_dev->dmadev, dev_node);
                if (!io_sq->desc_addr.virt_addr) {
                        io_sq->desc_addr.virt_addr =
                                dma_alloc_coherent(ena_dev->dmadev, size,
-                                                  &io_sq->desc_addr.phys_addr,
-                                                  GFP_KERNEL);
+                                                  &io_sq->desc_addr.phys_addr, GFP_KERNEL);
                }
 
                if (!io_sq->desc_addr.virt_addr) {
-                       netdev_err(ena_dev->net_device,
-                                  "Memory allocation failed\n");
+                       netdev_err(ena_dev->net_device, "Memory allocation failed\n");
                        return -ENOMEM;
                }
        }
 
                dev_node = dev_to_node(ena_dev->dmadev);
                set_dev_node(ena_dev->dmadev, ctx->numa_node);
-               io_sq->bounce_buf_ctrl.base_buffer =
-                       devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+               io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
                set_dev_node(ena_dev->dmadev, dev_node);
                if (!io_sq->bounce_buf_ctrl.base_buffer)
                        io_sq->bounce_buf_ctrl.base_buffer =
                                devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
 
                if (!io_sq->bounce_buf_ctrl.base_buffer) {
-                       netdev_err(ena_dev->net_device,
-                                  "Bounce buffer memory allocation failed\n");
+                       netdev_err(ena_dev->net_device, "Bounce buffer memory allocation failed\n");
                        return -ENOMEM;
                }
 
        prev_node = dev_to_node(ena_dev->dmadev);
        set_dev_node(ena_dev->dmadev, ctx->numa_node);
        io_cq->cdesc_addr.virt_addr =
-               dma_alloc_coherent(ena_dev->dmadev, size,
-                                  &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
+               dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
        set_dev_node(ena_dev->dmadev, prev_node);
        if (!io_cq->cdesc_addr.virt_addr) {
                io_cq->cdesc_addr.virt_addr =
-                       dma_alloc_coherent(ena_dev->dmadev, size,
-                                          &io_cq->cdesc_addr.phys_addr,
+                       dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr,
                                           GFP_KERNEL);
        }
 
                                        u8 comp_status)
 {
        if (unlikely(comp_status != 0))
-               netdev_err(admin_queue->ena_dev->net_device,
-                          "Admin command failed[%u]\n", comp_status);
+               netdev_err(admin_queue->ena_dev->net_device, "Admin command failed[%u]\n",
+                          comp_status);
 
        switch (comp_status) {
        case ENA_ADMIN_SUCCESS:
        }
 
        if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
-               netdev_err(admin_queue->ena_dev->net_device,
-                          "Command was aborted\n");
+               netdev_err(admin_queue->ena_dev->net_device, "Command was aborted\n");
                spin_lock_irqsave(&admin_queue->q_lock, flags);
                admin_queue->stats.aborted_cmd++;
                spin_unlock_irqrestore(&admin_queue->q_lock, flags);
                goto err;
        }
 
-       WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
-            comp_ctx->status);
+       WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", comp_ctx->status);
 
        ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
 err:
                                            sizeof(resp));
 
        if (unlikely(ret))
-               netdev_err(ena_dev->net_device,
-                          "Failed to set LLQ configurations: %d\n", ret);
+               netdev_err(ena_dev->net_device, "Failed to set LLQ configurations: %d\n", ret);
 
        return ret;
 }
                        llq_default_cfg->llq_header_location;
        } else {
                netdev_err(ena_dev->net_device,
-                          "Invalid header location control, supported: 0x%x\n",
-                          supported_feat);
+                          "Invalid header location control, supported: 0x%x\n", supported_feat);
                return -EINVAL;
        }
 
 
                        netdev_err(ena_dev->net_device,
                                   "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
-                                  llq_default_cfg->llq_stride_ctrl,
-                                  supported_feat, llq_info->desc_stride_ctrl);
+                                  llq_default_cfg->llq_stride_ctrl, supported_feat,
+                                  llq_info->desc_stride_ctrl);
                }
        } else {
                llq_info->desc_stride_ctrl = 0;
                        llq_info->desc_list_entry_size = 256;
                } else {
                        netdev_err(ena_dev->net_device,
-                                  "Invalid entry_size_ctrl, supported: 0x%x\n",
-                                  supported_feat);
+                                  "Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat);
                        return -EINVAL;
                }
 
 
                netdev_err(ena_dev->net_device,
                           "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
-                          llq_default_cfg->llq_num_decs_before_header,
-                          supported_feat, llq_info->descs_num_before_header);
+                          llq_default_cfg->llq_num_decs_before_header, supported_feat,
+                          llq_info->descs_num_before_header);
        }
        /* Check for accelerated queue supported */
        llq_accel_mode_get = llq_features->accel_mode.u.get;
 
        rc = ena_com_set_llq(ena_dev);
        if (rc)
-               netdev_err(ena_dev->net_device,
-                          "Cannot set LLQ configuration: %d\n", rc);
+               netdev_err(ena_dev->net_device, "Cannot set LLQ configuration: %d\n", rc);
 
        return rc;
 }
        int ret;
 
        wait_for_completion_timeout(&comp_ctx->wait_event,
-                                   usecs_to_jiffies(
-                                           admin_queue->completion_timeout));
+                                   usecs_to_jiffies(admin_queue->completion_timeout));
 
        /* In case the command wasn't completed find out the root cause.
         * There might be 2 kinds of errors
                if (comp_ctx->status == ENA_CMD_COMPLETED) {
                        netdev_err(admin_queue->ena_dev->net_device,
                                   "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
-                                  comp_ctx->cmd_opcode,
-                                  admin_queue->auto_polling ? "ON" : "OFF");
+                                  comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
                        /* Check if fallback to polling is enabled */
                        if (admin_queue->auto_polling)
                                admin_queue->polling = true;
        if (unlikely(i == timeout)) {
                netdev_err(ena_dev->net_device,
                           "Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n",
-                          mmio_read->seq_num, offset, read_resp->req_id,
-                          read_resp->reg_off);
+                          mmio_read->seq_num, offset, read_resp->req_id, read_resp->reg_off);
                ret = ENA_MMIO_READ_TIMEOUT;
                goto err;
        }
 
        if (read_resp->reg_off != offset) {
-               netdev_err(ena_dev->net_device,
-                          "Read failure: wrong offset provided\n");
+               netdev_err(ena_dev->net_device, "Read failure: wrong offset provided\n");
                ret = ENA_MMIO_READ_TIMEOUT;
        } else {
                ret = read_resp->reg_val;
                                            sizeof(destroy_resp));
 
        if (unlikely(ret && (ret != -ENODEV)))
-               netdev_err(ena_dev->net_device,
-                          "Failed to destroy io sq error: %d\n", ret);
+               netdev_err(ena_dev->net_device, "Failed to destroy io sq error: %d\n", ret);
 
        return ret;
 }
        if (io_cq->cdesc_addr.virt_addr) {
                size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
 
-               dma_free_coherent(ena_dev->dmadev, size,
-                                 io_cq->cdesc_addr.virt_addr,
+               dma_free_coherent(ena_dev->dmadev, size, io_cq->cdesc_addr.virt_addr,
                                  io_cq->cdesc_addr.phys_addr);
 
                io_cq->cdesc_addr.virt_addr = NULL;
        if (io_sq->desc_addr.virt_addr) {
                size = io_sq->desc_entry_size * io_sq->q_depth;
 
-               dma_free_coherent(ena_dev->dmadev, size,
-                                 io_sq->desc_addr.virt_addr,
+               dma_free_coherent(ena_dev->dmadev, size, io_sq->desc_addr.virt_addr,
                                  io_sq->desc_addr.phys_addr);
 
                io_sq->desc_addr.virt_addr = NULL;
                val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
 
                if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
-                       netdev_err(ena_dev->net_device,
-                                  "Reg read timeout occurred\n");
+                       netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
                        return -ETIME;
                }
 
        int ret;
 
        if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
-               netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
-                          feature_id);
+               netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", feature_id);
                return -EOPNOTSUPP;
        }
 
 
        if (unlikely(ret))
                netdev_err(ena_dev->net_device,
-                          "Failed to submit get_feature command %d error: %d\n",
-                          feature_id, ret);
+                          "Failed to submit get_feature command %d error: %d\n", feature_id, ret);
 
        return ret;
 }
 {
        struct ena_rss *rss = &ena_dev->rss;
 
-       if (!ena_com_check_supported_feature_id(ena_dev,
-                                               ENA_ADMIN_RSS_HASH_FUNCTION))
+       if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION))
                return -EOPNOTSUPP;
 
-       rss->hash_key =
-               dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
-                                  &rss->hash_key_dma_addr, GFP_KERNEL);
+       rss->hash_key = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
+                                          &rss->hash_key_dma_addr, GFP_KERNEL);
 
        if (unlikely(!rss->hash_key))
                return -ENOMEM;
        struct ena_rss *rss = &ena_dev->rss;
 
        if (rss->hash_key)
-               dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
-                                 rss->hash_key, rss->hash_key_dma_addr);
+               dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), rss->hash_key,
+                                 rss->hash_key_dma_addr);
        rss->hash_key = NULL;
 }
 
 {
        struct ena_rss *rss = &ena_dev->rss;
 
-       rss->hash_ctrl =
-               dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
-                                  &rss->hash_ctrl_dma_addr, GFP_KERNEL);
+       rss->hash_ctrl = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
+                                           &rss->hash_ctrl_dma_addr, GFP_KERNEL);
 
        if (unlikely(!rss->hash_ctrl))
                return -ENOMEM;
        struct ena_rss *rss = &ena_dev->rss;
 
        if (rss->hash_ctrl)
-               dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
-                                 rss->hash_ctrl, rss->hash_ctrl_dma_addr);
+               dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), rss->hash_ctrl,
+                                 rss->hash_ctrl_dma_addr);
        rss->hash_ctrl = NULL;
 }
 
        tbl_size = (1ULL << log_size) *
                sizeof(struct ena_admin_rss_ind_table_entry);
 
-       rss->rss_ind_tbl =
-               dma_alloc_coherent(ena_dev->dmadev, tbl_size,
-                                  &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
+       rss->rss_ind_tbl = dma_alloc_coherent(ena_dev->dmadev, tbl_size, &rss->rss_ind_tbl_dma_addr,
+                                             GFP_KERNEL);
        if (unlikely(!rss->rss_ind_tbl))
                goto mem_err1;
 
        tbl_size = (1ULL << log_size) * sizeof(u16);
-       rss->host_rss_ind_tbl =
-               devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
+       rss->host_rss_ind_tbl = devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
        if (unlikely(!rss->host_rss_ind_tbl))
                goto mem_err2;
 
        tbl_size = (1ULL << log_size) *
                sizeof(struct ena_admin_rss_ind_table_entry);
 
-       dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
-                         rss->rss_ind_tbl_dma_addr);
+       dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, rss->rss_ind_tbl_dma_addr);
        rss->rss_ind_tbl = NULL;
 mem_err1:
        rss->tbl_log_size = 0;
                                           &create_cmd.sq_ba,
                                           io_sq->desc_addr.phys_addr);
                if (unlikely(ret)) {
-                       netdev_err(ena_dev->net_device,
-                                  "Memory address set failed\n");
+                       netdev_err(ena_dev->net_device, "Memory address set failed\n");
                        return ret;
                }
        }
                                            (struct ena_admin_acq_entry *)&cmd_completion,
                                            sizeof(cmd_completion));
        if (unlikely(ret)) {
-               netdev_err(ena_dev->net_device,
-                          "Failed to create IO SQ. error: %d\n", ret);
+               netdev_err(ena_dev->net_device, "Failed to create IO SQ. error: %d\n", ret);
                return ret;
        }
 
                        cmd_completion.llq_descriptors_offset);
        }
 
-       netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n",
-                  io_sq->idx, io_sq->q_depth);
+       netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
 
        return ret;
 }
                                            (struct ena_admin_acq_entry *)&cmd_completion,
                                            sizeof(cmd_completion));
        if (unlikely(ret)) {
-               netdev_err(ena_dev->net_device,
-                          "Failed to create IO CQ. error: %d\n", ret);
+               netdev_err(ena_dev->net_device, "Failed to create IO CQ. error: %d\n", ret);
                return ret;
        }
 
                        (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
                        cmd_completion.numa_node_register_offset);
 
-       netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n",
-                  io_cq->idx, io_cq->q_depth);
+       netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
 
        return ret;
 }
                            struct ena_com_io_cq **io_cq)
 {
        if (qid >= ENA_TOTAL_NUM_QUEUES) {
-               netdev_err(ena_dev->net_device,
-                          "Invalid queue number %d but the max is %d\n", qid,
+               netdev_err(ena_dev->net_device, "Invalid queue number %d but the max is %d\n", qid,
                           ENA_TOTAL_NUM_QUEUES);
                return -EINVAL;
        }
        spin_lock_irqsave(&admin_queue->q_lock, flags);
        while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
                spin_unlock_irqrestore(&admin_queue->q_lock, flags);
-               ena_delay_exponential_backoff_us(exp++,
-                                                ena_dev->ena_min_poll_delay_us);
+               ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
                spin_lock_irqsave(&admin_queue->q_lock, flags);
        }
        spin_unlock_irqrestore(&admin_queue->q_lock, flags);
                                            sizeof(destroy_resp));
 
        if (unlikely(ret && (ret != -ENODEV)))
-               netdev_err(ena_dev->net_device,
-                          "Failed to destroy IO CQ. error: %d\n", ret);
+               netdev_err(ena_dev->net_device, "Failed to destroy IO CQ. error: %d\n", ret);
 
        return ret;
 }
                                            sizeof(resp));
 
        if (unlikely(ret))
-               netdev_err(ena_dev->net_device,
-                          "Failed to config AENQ ret: %d\n", ret);
+               netdev_err(ena_dev->net_device, "Failed to config AENQ ret: %d\n", ret);
 
        return ret;
 }
        netdev_dbg(ena_dev->net_device, "ENA dma width: %d\n", width);
 
        if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
-               netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n",
-                          width);
+               netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n", width);
                return -EINVAL;
        }
 
        ctrl_ver = ena_com_reg_bar_read32(ena_dev,
                                          ENA_REGS_CONTROLLER_VERSION_OFF);
 
-       if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
-                    (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
+       if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
                netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
                return -ETIME;
        }
 
        dev_info(ena_dev->dmadev, "ENA device version: %d.%d\n",
-                (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
-                        ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
+                (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
                 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
 
-       dev_info(ena_dev->dmadev,
-                "ENA controller version: %d.%d.%d implementation version %d\n",
+       dev_info(ena_dev->dmadev, "ENA controller version: %d.%d.%d implementation version %d\n",
                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
                         ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
 
        size = ADMIN_SQ_SIZE(admin_queue->q_depth);
        if (sq->entries)
-               dma_free_coherent(ena_dev->dmadev, size, sq->entries,
-                                 sq->dma_addr);
+               dma_free_coherent(ena_dev->dmadev, size, sq->entries, sq->dma_addr);
        sq->entries = NULL;
 
        size = ADMIN_CQ_SIZE(admin_queue->q_depth);
        if (cq->entries)
-               dma_free_coherent(ena_dev->dmadev, size, cq->entries,
-                                 cq->dma_addr);
+               dma_free_coherent(ena_dev->dmadev, size, cq->entries, cq->dma_addr);
        cq->entries = NULL;
 
        size = ADMIN_AENQ_SIZE(aenq->q_depth);
        if (ena_dev->aenq.entries)
-               dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
-                                 aenq->dma_addr);
+               dma_free_coherent(ena_dev->dmadev, size, aenq->entries, aenq->dma_addr);
        aenq->entries = NULL;
 }
 
        struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
 
        spin_lock_init(&mmio_read->lock);
-       mmio_read->read_resp =
-               dma_alloc_coherent(ena_dev->dmadev,
-                                  sizeof(*mmio_read->read_resp),
-                                  &mmio_read->read_resp_dma_addr, GFP_KERNEL);
+       mmio_read->read_resp = dma_alloc_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
+                                                 &mmio_read->read_resp_dma_addr, GFP_KERNEL);
        if (unlikely(!mmio_read->read_resp))
                goto err;
 
        writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
        writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
 
-       dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
-                         mmio_read->read_resp, mmio_read->read_resp_dma_addr);
+       dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), mmio_read->read_resp,
+                         mmio_read->read_resp_dma_addr);
 
        mmio_read->read_resp = NULL;
 }
        }
 
        if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
-               netdev_err(ena_dev->net_device,
-                          "Device isn't ready, abort com init\n");
+               netdev_err(ena_dev->net_device, "Device isn't ready, abort com init\n");
                return -ENODEV;
        }
 
        int ret;
 
        if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
-               netdev_err(ena_dev->net_device,
-                          "Qid (%d) is bigger than max num of queues (%d)\n",
+               netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n",
                           ctx->qid, ENA_TOTAL_NUM_QUEUES);
                return -EINVAL;
        }
 
        if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
                /* header length is limited to 8 bits */
-               io_sq->tx_max_header_size =
-                       min_t(u32, ena_dev->tx_max_header_size, SZ_256);
+               io_sq->tx_max_header_size = min_t(u32, ena_dev->tx_max_header_size, SZ_256);
 
        ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
        if (ret)
        struct ena_com_io_cq *io_cq;
 
        if (qid >= ENA_TOTAL_NUM_QUEUES) {
-               netdev_err(ena_dev->net_device,
-                          "Qid (%d) is bigger than max num of queues (%d)\n",
+               netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n",
                           qid, ENA_TOTAL_NUM_QUEUES);
                return;
        }
                if (rc)
                        return rc;
 
-               if (get_resp.u.max_queue_ext.version !=
-                   ENA_FEATURE_MAX_QUEUE_EXT_VER)
+               if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
                        return -EINVAL;
 
                memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
        rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
 
        if (!rc)
-               memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
-                      sizeof(get_resp.u.hw_hints));
+               memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, sizeof(get_resp.u.hw_hints));
        else if (rc == -EOPNOTSUPP)
-               memset(&get_feat_ctx->hw_hints, 0x0,
-                      sizeof(get_feat_ctx->hw_hints));
+               memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
        else
                return rc;
 
        rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
        if (!rc)
-               memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
-                      sizeof(get_resp.u.llq));
+               memcpy(&get_feat_ctx->llq, &get_resp.u.llq, sizeof(get_resp.u.llq));
        else if (rc == -EOPNOTSUPP)
                memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
        else
        aenq_common = &aenq_e->aenq_common_desc;
 
        /* Go over all the events */
-       while ((READ_ONCE(aenq_common->flags) &
-               ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
+       while ((READ_ONCE(aenq_common->flags) & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
                /* Make sure the phase bit (ownership) is as expected before
                 * reading the rest of the descriptor.
                 */
                timestamp = (u64)aenq_common->timestamp_low |
                        ((u64)aenq_common->timestamp_high << 32);
 
-               netdev_dbg(ena_dev->net_device,
-                          "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
+               netdev_dbg(ena_dev->net_device, "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
                           aenq_common->group, aenq_common->syndrome, timestamp);
 
                /* Handle specific event*/
 
        /* write the aenq doorbell after all AENQ descriptors were read */
        mb();
-       writel_relaxed((u32)aenq->head,
-                      ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+       writel_relaxed((u32)aenq->head, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
 }
 
 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
        stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
        cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
 
-       if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
-                    (cap == ENA_MMIO_READ_TIMEOUT))) {
+       if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || (cap == ENA_MMIO_READ_TIMEOUT))) {
                netdev_err(ena_dev->net_device, "Reg read32 timeout occurred\n");
                return -ETIME;
        }
 
        if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
-               netdev_err(ena_dev->net_device,
-                          "Device isn't ready, can't reset device\n");
+               netdev_err(ena_dev->net_device, "Device isn't ready, can't reset device\n");
                return -EINVAL;
        }
 
        rc = wait_for_reset_state(ena_dev, timeout,
                                  ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
        if (rc != 0) {
-               netdev_err(ena_dev->net_device,
-                          "Reset indication didn't turn on\n");
+               netdev_err(ena_dev->net_device, "Reset indication didn't turn on\n");
                return rc;
        }
 
        writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
        rc = wait_for_reset_state(ena_dev, timeout, 0);
        if (rc != 0) {
-               netdev_err(ena_dev->net_device,
-                          "Reset indication didn't turn off\n");
+               netdev_err(ena_dev->net_device, "Reset indication didn't turn off\n");
                return rc;
        }
 
                                             sizeof(*get_resp));
 
        if (unlikely(ret))
-               netdev_err(ena_dev->net_device,
-                          "Failed to get stats. error: %d\n", ret);
+               netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret);
 
        return ret;
 }
        int ret;
 
        if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
-               netdev_err(ena_dev->net_device,
-                          "Capability %d isn't supported\n",
+               netdev_err(ena_dev->net_device, "Capability %d isn't supported\n",
                           ENA_ADMIN_ENI_STATS);
                return -EOPNOTSUPP;
        }
        int ret;
 
        if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
-               netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
-                          ENA_ADMIN_MTU);
+               netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_MTU);
                return -EOPNOTSUPP;
        }
 
                                            sizeof(resp));
 
        if (unlikely(ret))
-               netdev_err(ena_dev->net_device,
-                          "Failed to set mtu %d. error: %d\n", mtu, ret);
+               netdev_err(ena_dev->net_device, "Failed to set mtu %d. error: %d\n", mtu, ret);
 
        return ret;
 }
        ret = ena_com_get_feature(ena_dev, &resp,
                                  ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
        if (unlikely(ret)) {
-               netdev_err(ena_dev->net_device,
-                          "Failed to get offload capabilities %d\n", ret);
+               netdev_err(ena_dev->net_device, "Failed to get offload capabilities %d\n", ret);
                return ret;
        }
 
        struct ena_admin_get_feat_resp get_resp;
        int ret;
 
-       if (!ena_com_check_supported_feature_id(ena_dev,
-                                               ENA_ADMIN_RSS_HASH_FUNCTION)) {
+       if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) {
                netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
                           ENA_ADMIN_RSS_HASH_FUNCTION);
                return -EOPNOTSUPP;
                return ret;
 
        if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
-               netdev_err(ena_dev->net_device,
-                          "Func hash %d isn't supported by device, abort\n",
+               netdev_err(ena_dev->net_device, "Func hash %d isn't supported by device, abort\n",
                           rss->hash_func);
                return -EOPNOTSUPP;
        }
                                            (struct ena_admin_acq_entry *)&resp,
                                            sizeof(resp));
        if (unlikely(ret)) {
-               netdev_err(ena_dev->net_device,
-                          "Failed to set hash function %d. error: %d\n",
+               netdev_err(ena_dev->net_device, "Failed to set hash function %d. error: %d\n",
                           rss->hash_func, ret);
                return -EINVAL;
        }
                return rc;
 
        if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
-               netdev_err(ena_dev->net_device,
-                          "Flow hash function %d isn't supported\n", func);
+               netdev_err(ena_dev->net_device, "Flow hash function %d isn't supported\n", func);
                return -EOPNOTSUPP;
        }
 
        if ((func == ENA_ADMIN_TOEPLITZ) && key) {
                if (key_len != sizeof(hash_key->key)) {
                        netdev_err(ena_dev->net_device,
-                                  "key len (%u) doesn't equal the supported size (%zu)\n",
-                                  key_len, sizeof(hash_key->key));
+                                  "key len (%u) doesn't equal the supported size (%zu)\n", key_len,
+                                  sizeof(hash_key->key));
                        return -EINVAL;
                }
                memcpy(hash_key->key, key, key_len);
        struct ena_admin_set_feat_resp resp;
        int ret;
 
-       if (!ena_com_check_supported_feature_id(ena_dev,
-                                               ENA_ADMIN_RSS_HASH_INPUT)) {
+       if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_INPUT)) {
                netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
                           ENA_ADMIN_RSS_HASH_INPUT);
                return -EOPNOTSUPP;
                                            (struct ena_admin_acq_entry *)&resp,
                                            sizeof(resp));
        if (unlikely(ret))
-               netdev_err(ena_dev->net_device,
-                          "Failed to set hash input. error: %d\n", ret);
+               netdev_err(ena_dev->net_device, "Failed to set hash input. error: %d\n", ret);
 
        return ret;
 }
        int rc;
 
        if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
-               netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n",
-                          proto);
+               netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n", proto);
                return -EINVAL;
        }
 
        struct ena_admin_set_feat_resp resp;
        int ret;
 
-       if (!ena_com_check_supported_feature_id(
-                   ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
+       if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
                netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
                           ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
                return -EOPNOTSUPP;
                                            sizeof(resp));
 
        if (unlikely(ret))
-               netdev_err(ena_dev->net_device,
-                          "Failed to set indirect table. error: %d\n", ret);
+               netdev_err(ena_dev->net_device, "Failed to set indirect table. error: %d\n", ret);
 
        return ret;
 }
 {
        struct ena_host_attribute *host_attr = &ena_dev->host_attr;
 
-       host_attr->host_info =
-               dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
-                                  &host_attr->host_info_dma_addr, GFP_KERNEL);
+       host_attr->host_info = dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
+                                                 &host_attr->host_info_dma_addr, GFP_KERNEL);
        if (unlikely(!host_attr->host_info))
                return -ENOMEM;
 
 
        if (host_attr->debug_area_virt_addr) {
                dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
-                                 host_attr->debug_area_virt_addr,
-                                 host_attr->debug_area_dma_addr);
+                                 host_attr->debug_area_virt_addr, host_attr->debug_area_dma_addr);
                host_attr->debug_area_virt_addr = NULL;
        }
 }
                                            sizeof(resp));
 
        if (unlikely(ret))
-               netdev_err(ena_dev->net_device,
-                          "Failed to set host attributes: %d\n", ret);
+               netdev_err(ena_dev->net_device, "Failed to set host attributes: %d\n", ret);
 
        return ret;
 }
                                                          u32 *intr_moder_interval)
 {
        if (!intr_delay_resolution) {
-               netdev_err(ena_dev->net_device,
-                          "Illegal interrupt delay granularity value\n");
+               netdev_err(ena_dev->net_device, "Illegal interrupt delay granularity value\n");
                return -EFAULT;
        }
 
 
        if (rc) {
                if (rc == -EOPNOTSUPP) {
-                       netdev_dbg(ena_dev->net_device,
-                                  "Feature %d isn't supported\n",
+                       netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
                                   ENA_ADMIN_INTERRUPT_MODERATION);
                        rc = 0;
                } else {
                        netdev_err(ena_dev->net_device,
-                                  "Failed to get interrupt moderation admin cmd. rc: %d\n",
-                                  rc);
+                                  "Failed to get interrupt moderation admin cmd. rc: %d\n", rc);
                }
 
                /* no moderation supported, disable adaptive support */
                (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
 
        if (unlikely(ena_dev->tx_max_header_size == 0)) {
-               netdev_err(ena_dev->net_device,
-                          "The size of the LLQ entry is smaller than needed\n");
+               netdev_err(ena_dev->net_device, "The size of the LLQ entry is smaller than needed\n");
                return -EINVAL;
        }