tail_masked = admin_queue->sq.tail & queue_size_mask;
 
        /* In case of queue FULL */
-       cnt = atomic_read(&admin_queue->outstanding_cmds);
+       cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
        if (cnt >= admin_queue->q_depth) {
                pr_debug("admin queue is full.\n");
                admin_queue->stats.out_of_space++;
                                                     struct ena_admin_acq_entry *comp,
                                                     size_t comp_size_in_bytes)
 {
-       unsigned long flags;
+       unsigned long flags = 0;
        struct ena_comp_ctx *comp_ctx;
 
        spin_lock_irqsave(&admin_queue->q_lock, flags);
 
        memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
 
-       io_sq->dma_addr_bits = ena_dev->dma_addr_bits;
+       io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
        io_sq->desc_entry_size =
                (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
                sizeof(struct ena_eth_io_tx_desc) :
 
        /* Go over all the completions */
        while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
-                       ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
+               ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
                /* Do not read the rest of the completion entry before the
                 * phase bit was validated
                 */
 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
                                                     struct ena_com_admin_queue *admin_queue)
 {
-       unsigned long flags, timeout;
+       unsigned long flags = 0;
+       unsigned long timeout;
        int ret;
 
        timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
                                                        struct ena_com_admin_queue *admin_queue)
 {
-       unsigned long flags;
+       unsigned long flags = 0;
        int ret;
 
        wait_for_completion_timeout(&comp_ctx->wait_event,
        volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
                mmio_read->read_resp;
        u32 mmio_read_reg, ret, i;
-       unsigned long flags;
+       unsigned long flags = 0;
        u32 timeout = mmio_read->reg_read_to;
 
        might_sleep();
 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
 {
        struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
-       unsigned long flags;
+       unsigned long flags = 0;
 
        spin_lock_irqsave(&admin_queue->q_lock, flags);
        while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
 {
        struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
-       unsigned long flags;
+       unsigned long flags = 0;
 
        spin_lock_irqsave(&admin_queue->q_lock, flags);
        ena_dev->admin_queue.running_state = state;
        }
 
        if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
-               pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
+               pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
                        get_resp.u.aenq.supported_groups, groups_flag);
                return -EOPNOTSUPP;
        }
                                    sizeof(*mmio_read->read_resp),
                                    &mmio_read->read_resp_dma_addr, GFP_KERNEL);
        if (unlikely(!mmio_read->read_resp))
-               return -ENOMEM;
+               goto err;
 
        ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
 
        mmio_read->readless_supported = true;
 
        return 0;
+
+err:
+
+       return -ENOMEM;
 }
 
 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
        struct ena_admin_aenq_entry *aenq_e;
        struct ena_admin_aenq_common_desc *aenq_common;
        struct ena_com_aenq *aenq  = &dev->aenq;
+       unsigned long long timestamp;
        ena_aenq_handler handler_cb;
        u16 masked_head, processed = 0;
        u8 phase;
                 */
                dma_rmb();
 
+               timestamp =
+                       (unsigned long long)aenq_common->timestamp_low |
+                       ((unsigned long long)aenq_common->timestamp_high << 32);
                pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
-                        aenq_common->group, aenq_common->syndrom,
-                        (u64)aenq_common->timestamp_low +
-                                ((u64)aenq_common->timestamp_high << 32));
+                        aenq_common->group, aenq_common->syndrom, timestamp);
 
                /* Handle specific event*/
                handler_cb = ena_com_get_specific_aenq_cb(dev,
        if (unlikely(!host_attr->host_info))
                return -ENOMEM;
 
-       host_attr->host_info->ena_spec_version =
-               ((ENA_COMMON_SPEC_VERSION_MAJOR << ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
+       host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
+               ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
                (ENA_COMMON_SPEC_VERSION_MINOR));
 
        return 0;