rx->data.page_info = NULL;
 }
 
+static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
+{
+       ctx->skb_head = NULL;
+       ctx->skb_tail = NULL;
+       ctx->total_size = 0;
+       ctx->frag_cnt = 0;
+       ctx->drop_pkt = false;
+}
+
+static void gve_rx_init_ring_state_gqi(struct gve_rx_ring *rx)
+{
+       rx->desc.seqno = 1;
+       rx->cnt = 0;
+       gve_rx_ctx_clear(&rx->ctx);
+}
+
+static void gve_rx_reset_ring_gqi(struct gve_priv *priv, int idx)
+{
+       struct gve_rx_ring *rx = &priv->rx[idx];
+       const u32 slots = priv->rx_desc_cnt;
+       size_t size;
+
+       /* Reset desc ring */
+       if (rx->desc.desc_ring) {
+               size = slots * sizeof(rx->desc.desc_ring[0]);
+               memset(rx->desc.desc_ring, 0, size);
+       }
+
+       /* Reset q_resources */
+       if (rx->q_resources)
+               memset(rx->q_resources, 0, sizeof(*rx->q_resources));
+
+       gve_rx_init_ring_state_gqi(rx);
+}
+
 void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx)
 {
        int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
 
        gve_remove_napi(priv, ntfy_idx);
        gve_rx_remove_from_block(priv, idx);
+       gve_rx_reset_ring_gqi(priv, idx);
 }
 
 static void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
        return err;
 }
 
-static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
-{
-       ctx->skb_head = NULL;
-       ctx->skb_tail = NULL;
-       ctx->total_size = 0;
-       ctx->frag_cnt = 0;
-       ctx->drop_pkt = false;
-}
-
 void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx)
 {
        int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
                err = -ENOMEM;
                goto abort_with_q_resources;
        }
-       rx->cnt = 0;
        rx->db_threshold = slots / 2;
-       rx->desc.seqno = 1;
+       gve_rx_init_ring_state_gqi(rx);
 
        rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
        gve_rx_ctx_clear(&rx->ctx);
 
        }
 }
 
+static void gve_rx_init_ring_state_dqo(struct gve_rx_ring *rx,
+                                      const u32 buffer_queue_slots,
+                                      const u32 completion_queue_slots)
+{
+       int i;
+
+       /* Set buffer queue state */
+       rx->dqo.bufq.mask = buffer_queue_slots - 1;
+       rx->dqo.bufq.head = 0;
+       rx->dqo.bufq.tail = 0;
+
+       /* Set completion queue state */
+       rx->dqo.complq.num_free_slots = completion_queue_slots;
+       rx->dqo.complq.mask = completion_queue_slots - 1;
+       rx->dqo.complq.cur_gen_bit = 0;
+       rx->dqo.complq.head = 0;
+
+       /* Set RX SKB context */
+       rx->ctx.skb_head = NULL;
+       rx->ctx.skb_tail = NULL;
+
+       /* Set up linked list of buffer IDs */
+       if (rx->dqo.buf_states) {
+               for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
+                       rx->dqo.buf_states[i].next = i + 1;
+               rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1;
+       }
+
+       rx->dqo.free_buf_states = 0;
+       rx->dqo.recycled_buf_states.head = -1;
+       rx->dqo.recycled_buf_states.tail = -1;
+       rx->dqo.used_buf_states.head = -1;
+       rx->dqo.used_buf_states.tail = -1;
+}
+
+static void gve_rx_reset_ring_dqo(struct gve_priv *priv, int idx)
+{
+       struct gve_rx_ring *rx = &priv->rx[idx];
+       size_t size;
+       int i;
+
+       const u32 buffer_queue_slots = priv->rx_desc_cnt;
+       const u32 completion_queue_slots = priv->rx_desc_cnt;
+
+       /* Reset buffer queue */
+       if (rx->dqo.bufq.desc_ring) {
+               size = sizeof(rx->dqo.bufq.desc_ring[0]) *
+                       buffer_queue_slots;
+               memset(rx->dqo.bufq.desc_ring, 0, size);
+       }
+
+       /* Reset completion queue */
+       if (rx->dqo.complq.desc_ring) {
+               size = sizeof(rx->dqo.complq.desc_ring[0]) *
+                       completion_queue_slots;
+               memset(rx->dqo.complq.desc_ring, 0, size);
+       }
+
+       /* Reset q_resources */
+       if (rx->q_resources)
+               memset(rx->q_resources, 0, sizeof(*rx->q_resources));
+
+       /* Reset buf states */
+       if (rx->dqo.buf_states) {
+               for (i = 0; i < rx->dqo.num_buf_states; i++) {
+                       struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i];
+
+                       if (bs->page_info.page)
+                               gve_free_page_dqo(priv, bs, !rx->dqo.qpl);
+               }
+       }
+
+       gve_rx_init_ring_state_dqo(rx, buffer_queue_slots,
+                                  completion_queue_slots);
+}
+
 void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
 {
        int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
 
        gve_remove_napi(priv, ntfy_idx);
        gve_rx_remove_from_block(priv, idx);
+       gve_rx_reset_ring_dqo(priv, idx);
 }
 
 static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
        netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
 }
 
-static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
+static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx,
+                                const u32 buf_count)
 {
        struct device *hdev = &priv->pdev->dev;
-       int buf_count = rx->dqo.bufq.mask + 1;
 
        rx->dqo.hdr_bufs.data = dma_alloc_coherent(hdev, priv->header_buf_size * buf_count,
                                                   &rx->dqo.hdr_bufs.addr, GFP_KERNEL);
 {
        struct device *hdev = &priv->pdev->dev;
        size_t size;
-       int i;
 
        const u32 buffer_queue_slots = cfg->ring_size;
        const u32 completion_queue_slots = cfg->ring_size;
        memset(rx, 0, sizeof(*rx));
        rx->gve = priv;
        rx->q_num = idx;
-       rx->dqo.bufq.mask = buffer_queue_slots - 1;
-       rx->dqo.complq.num_free_slots = completion_queue_slots;
-       rx->dqo.complq.mask = completion_queue_slots - 1;
-       rx->ctx.skb_head = NULL;
-       rx->ctx.skb_tail = NULL;
 
        rx->dqo.num_buf_states = cfg->raw_addressing ?
                min_t(s16, S16_MAX, buffer_queue_slots * 4) :
 
        /* Allocate header buffers for header-split */
        if (cfg->enable_header_split)
-               if (gve_rx_alloc_hdr_bufs(priv, rx))
+               if (gve_rx_alloc_hdr_bufs(priv, rx, buffer_queue_slots))
                        goto err;
 
-       /* Set up linked list of buffer IDs */
-       for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
-               rx->dqo.buf_states[i].next = i + 1;
-
-       rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1;
-       rx->dqo.recycled_buf_states.head = -1;
-       rx->dqo.recycled_buf_states.tail = -1;
-       rx->dqo.used_buf_states.head = -1;
-       rx->dqo.used_buf_states.tail = -1;
-
        /* Allocate RX completion queue */
        size = sizeof(rx->dqo.complq.desc_ring[0]) *
                completion_queue_slots;
        if (!rx->q_resources)
                goto err;
 
+       gve_rx_init_ring_state_dqo(rx, buffer_queue_slots,
+                                  completion_queue_slots);
+
        return 0;
 
 err: