#define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
 
 #define DQO_QPL_DEFAULT_TX_PAGES 512
-#define DQO_QPL_DEFAULT_RX_PAGES 2048
 
 /* Maximum TSO size supported on DQO */
 #define GVE_DQO_TX_MAX 0x3FFFF
        u16 tx_desc_cnt; /* num desc per ring */
        u16 rx_desc_cnt; /* num desc per ring */
        u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
-       u16 rx_pages_per_qpl; /* Suggested number of pages per qpl for RX queues by NIC */
-       u16 rx_data_slot_cnt; /* rx buffer length */
        u64 max_registered_pages;
        u64 num_registered_pages; /* num pages registered with NIC */
        struct bpf_prog *xdp_prog; /* XDP BPF program */
        return gve_get_rx_qpl_id(tx_cfg, 0);
 }
 
+static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)
+{
+       /* For DQO, page count should be more than ring size for
+        * out-of-order completions. Set it to two times of ring size.
+        */
+       return 2 * rx_desc_cnt;
+}
+
 /* Returns a pointer to the next available tx qpl in the list of qpls */
 static inline
 struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_tx_alloc_rings_cfg *cfg,
 
        if (dev_op_dqo_qpl) {
                priv->tx_pages_per_qpl =
                        be16_to_cpu(dev_op_dqo_qpl->tx_pages_per_qpl);
-               priv->rx_pages_per_qpl =
-                       be16_to_cpu(dev_op_dqo_qpl->rx_pages_per_qpl);
                if (priv->tx_pages_per_qpl == 0)
                        priv->tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES;
-               if (priv->rx_pages_per_qpl == 0)
-                       priv->rx_pages_per_qpl = DQO_QPL_DEFAULT_RX_PAGES;
        }
 
        if (dev_op_buffer_sizes &&
        mac = descriptor->mac;
        dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
        priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
-       priv->rx_data_slot_cnt = be16_to_cpu(descriptor->rx_pages_per_qpl);
-
-       if (gve_is_gqi(priv) && priv->rx_data_slot_cnt < priv->rx_desc_cnt) {
-               dev_err(&priv->pdev->dev, "rx_data_slot_cnt cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
-                       priv->rx_data_slot_cnt);
-               priv->rx_desc_cnt = priv->rx_data_slot_cnt;
-       }
        priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
 
        gve_enable_supported_features(priv, supported_features_mask,
 
        return err;
 }
 
-static int gve_alloc_qpls(struct gve_priv *priv,
-                         struct gve_qpls_alloc_cfg *cfg)
+static int gve_alloc_qpls(struct gve_priv *priv, struct gve_qpls_alloc_cfg *cfg,
+                         struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
 {
        int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
        int rx_start_id, tx_num_qpls, rx_num_qpls;
        struct gve_queue_page_list *qpls;
-       int page_count;
+       u32 page_count;
        int err;
 
        if (cfg->raw_addressing)
        /* For GQI_QPL number of pages allocated have 1:1 relationship with
         * number of descriptors. For DQO, number of pages required are
         * more than descriptors (because of out of order completions).
+        * Set it to twice the number of descriptors.
         */
-       page_count = cfg->is_gqi ? priv->rx_data_slot_cnt : priv->rx_pages_per_qpl;
+       if (cfg->is_gqi)
+               page_count = rx_alloc_cfg->ring_size;
+       else
+               page_count = gve_get_rx_pages_per_qpl_dqo(rx_alloc_cfg->ring_size);
        rx_num_qpls = gve_num_rx_qpls(cfg->rx_cfg, gve_is_qpl(priv));
        err = gve_alloc_n_qpls(priv, qpls, page_count, rx_start_id, rx_num_qpls);
        if (err)
 {
        int err;
 
-       err = gve_alloc_qpls(priv, qpls_alloc_cfg);
+       err = gve_alloc_qpls(priv, qpls_alloc_cfg, rx_alloc_cfg);
        if (err) {
                netif_err(priv, drv, priv->dev, "Failed to alloc QPLs\n");
                return err;
 
                                 int idx)
 {
        struct device *hdev = &priv->pdev->dev;
-       u32 slots = priv->rx_data_slot_cnt;
+       u32 slots = cfg->ring_size;
        int filled_pages;
        size_t bytes;
        int err;
 
                        return err;
        } else {
                idx = rx->dqo.next_qpl_page_idx;
-               if (idx >= priv->rx_pages_per_qpl) {
+               if (idx >= gve_get_rx_pages_per_qpl_dqo(priv->rx_desc_cnt)) {
                        net_err_ratelimited("%s: Out of QPL pages\n",
                                            priv->dev->name);
                        return -ENOMEM;
 
        rx->dqo.num_buf_states = cfg->raw_addressing ?
                min_t(s16, S16_MAX, buffer_queue_slots * 4) :
-               priv->rx_pages_per_qpl;
+               gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
        rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
                                      sizeof(rx->dqo.buf_states[0]),
                                      GFP_KERNEL);