* @post_cq: update cq tail
  */
 static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
-                                                      struct i40iw_cq_poll_info *info,
-                                                      bool post_cq)
+                                                      struct i40iw_cq_poll_info *info)
 {
        u64 comp_ctx, qword0, qword2, qword3, wqe_qword;
        u64 *cqe, *sw_wqe;
                if (I40IW_RING_GETCURRENT_HEAD(cq->cq_ring) == 0)
                        cq->polarity ^= 1;
 
-               if (post_cq) {
-                       I40IW_RING_MOVE_TAIL(cq->cq_ring);
-                       set_64bit_val(cq->shadow_area, 0,
-                                     I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
-               }
+               I40IW_RING_MOVE_TAIL(cq->cq_ring);
+               set_64bit_val(cq->shadow_area, 0,
+                             I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
        } else {
                if (info->is_srq)
                        return ret_code;
 
        void (*iw_cq_request_notification)(struct i40iw_cq_uk *,
                                           enum i40iw_completion_notify);
        enum i40iw_status_code (*iw_cq_poll_completion)(struct i40iw_cq_uk *,
-                                                       struct i40iw_cq_poll_info *, bool);
+                                                       struct i40iw_cq_poll_info *);
        enum i40iw_status_code (*iw_cq_post_entries)(struct i40iw_cq_uk *, u8 count);
        void (*iw_cq_clean)(void *, struct i40iw_cq_uk *);
 };
 
 
        spin_lock_irqsave(&iwcq->lock, flags);
        while (cqe_count < num_entries) {
-               ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info, true);
+               ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info);
                if (ret == I40IW_ERR_QUEUE_EMPTY) {
                        break;
                } else if (ret == I40IW_ERR_QUEUE_DESTROYED) {