return new_pkts;
 }
 
+/**
+ * octep_oq_next_pkt() - Move to the next packet in Rx queue.
+ *
+ * @oq: Octeon Rx queue data structure.
+ * @buff_info: Current packet buffer info.
+ * @read_idx: Current packet index in the ring.
+ * @desc_used: Current packet descriptor number.
+ *
+ * Free the resources associated with a packet.
+ * Increment packet index in the ring and packet descriptor number.
+ */
+static void octep_oq_next_pkt(struct octep_oq *oq,
+                             struct octep_rx_buffer *buff_info,
+                             u32 *read_idx, u32 *desc_used)
+{
+       dma_unmap_page(oq->dev, oq->desc_ring[*read_idx].buffer_ptr,
+                      PAGE_SIZE, DMA_FROM_DEVICE);
+       buff_info->page = NULL;
+       (*read_idx)++;
+       (*desc_used)++;
+       if (*read_idx == oq->max_count)
+               *read_idx = 0;
+}
+
 /**
  * __octep_oq_process_rx() - Process hardware Rx queue and push to stack.
  *
        desc_used = 0;
        for (pkt = 0; pkt < pkts_to_process; pkt++) {
                buff_info = (struct octep_rx_buffer *)&oq->buff_info[read_idx];
-               dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
-                              PAGE_SIZE, DMA_FROM_DEVICE);
                resp_hw = page_address(buff_info->page);
-               buff_info->page = NULL;
 
                /* Swap the length field that is in Big-Endian to CPU */
                buff_info->len = be64_to_cpu(resp_hw->length);
                        data_offset = OCTEP_OQ_RESP_HW_SIZE;
                        rx_ol_flags = 0;
                }
+
+               octep_oq_next_pkt(oq, buff_info, &read_idx, &desc_used);
+
+               skb = build_skb((void *)resp_hw, PAGE_SIZE);
+               skb_reserve(skb, data_offset);
+
                rx_bytes += buff_info->len;
 
                if (buff_info->len <= oq->max_single_buffer_size) {
-                       skb = build_skb((void *)resp_hw, PAGE_SIZE);
-                       skb_reserve(skb, data_offset);
                        skb_put(skb, buff_info->len);
-                       read_idx++;
-                       desc_used++;
-                       if (read_idx == oq->max_count)
-                               read_idx = 0;
                } else {
                        struct skb_shared_info *shinfo;
                        u16 data_len;
 
-                       skb = build_skb((void *)resp_hw, PAGE_SIZE);
-                       skb_reserve(skb, data_offset);
                        /* Head fragment includes response header(s);
                         * subsequent fragments contains only data.
                         */
                        skb_put(skb, oq->max_single_buffer_size);
-                       read_idx++;
-                       desc_used++;
-                       if (read_idx == oq->max_count)
-                               read_idx = 0;
-
                        shinfo = skb_shinfo(skb);
                        data_len = buff_info->len - oq->max_single_buffer_size;
                        while (data_len) {
-                               dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
-                                              PAGE_SIZE, DMA_FROM_DEVICE);
                                buff_info = (struct octep_rx_buffer *)
                                            &oq->buff_info[read_idx];
                                if (data_len < oq->buffer_size) {
                                                buff_info->page, 0,
                                                buff_info->len,
                                                buff_info->len);
-                               buff_info->page = NULL;
-                               read_idx++;
-                               desc_used++;
-                               if (read_idx == oq->max_count)
-                                       read_idx = 0;
+
+                               octep_oq_next_pkt(oq, buff_info, &read_idx, &desc_used);
                        }
                }