return option_end > descriptor_end ? NULL : (struct gve_device_option *)option_end;
 }
 
+#define GVE_DEVICE_OPTION_NO_MIN_RING_SIZE     8
+
 static
 void gve_parse_device_option(struct gve_priv *priv,
                             struct gve_device_descriptor *device_descriptor,
                             struct gve_device_option_dqo_rda **dev_op_dqo_rda,
                             struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
                             struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
-                            struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
+                            struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
+                            struct gve_device_option_modify_ring **dev_op_modify_ring)
 {
        u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
        u16 option_length = be16_to_cpu(option->option_length);
                                 "Buffer Sizes");
                *dev_op_buffer_sizes = (void *)(option + 1);
                break;
+       case GVE_DEV_OPT_ID_MODIFY_RING:
+               if (option_length < GVE_DEVICE_OPTION_NO_MIN_RING_SIZE ||
+                   req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING) {
+                       dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+                                "Modify Ring", (int)sizeof(**dev_op_modify_ring),
+                                GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING,
+                                option_length, req_feat_mask);
+                       break;
+               }
+
+               if (option_length > sizeof(**dev_op_modify_ring)) {
+                       dev_warn(&priv->pdev->dev,
+                                GVE_DEVICE_OPTION_TOO_BIG_FMT, "Modify Ring");
+               }
+
+               *dev_op_modify_ring = (void *)(option + 1);
+
+               /* device has not provided min ring size */
+               if (option_length == GVE_DEVICE_OPTION_NO_MIN_RING_SIZE)
+                       priv->default_min_ring_size = true;
+               break;
        default:
                /* If we don't recognize the option just continue
                 * without doing anything.
                           struct gve_device_option_dqo_rda **dev_op_dqo_rda,
                           struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
                           struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
-                          struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
+                          struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
+                          struct gve_device_option_modify_ring **dev_op_modify_ring)
 {
        const int num_options = be16_to_cpu(descriptor->num_device_options);
        struct gve_device_option *dev_opt;
                gve_parse_device_option(priv, descriptor, dev_opt,
                                        dev_op_gqi_rda, dev_op_gqi_qpl,
                                        dev_op_dqo_rda, dev_op_jumbo_frames,
-                                       dev_op_dqo_qpl, dev_op_buffer_sizes);
+                                       dev_op_dqo_qpl, dev_op_buffer_sizes,
+                                       dev_op_modify_ring);
                dev_opt = next_opt;
        }
 
 {
        priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
        priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
+
+       /* set default ranges */
+       priv->max_tx_desc_cnt = priv->tx_desc_cnt;
+       priv->max_rx_desc_cnt = priv->rx_desc_cnt;
+       priv->min_tx_desc_cnt = priv->tx_desc_cnt;
+       priv->min_rx_desc_cnt = priv->rx_desc_cnt;
 }
 
 static void gve_enable_supported_features(struct gve_priv *priv,
                                          const struct gve_device_option_dqo_qpl
                                          *dev_op_dqo_qpl,
                                          const struct gve_device_option_buffer_sizes
-                                         *dev_op_buffer_sizes)
+                                         *dev_op_buffer_sizes,
+                                         const struct gve_device_option_modify_ring
+                                         *dev_op_modify_ring)
 {
        /* Before control reaches this point, the page-size-capped max MTU from
         * the gve_device_descriptor field has already been stored in
                         "BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n",
                         priv->max_rx_buffer_size, priv->header_buf_size);
        }
+
+       /* Read and store ring size ranges given by device */
+       if (dev_op_modify_ring &&
+           (supported_features_mask & GVE_SUP_MODIFY_RING_MASK)) {
+               priv->modify_ring_size_enabled = true;
+
+               /* max ring size for DQO QPL should not be overwritten because of device limit */
+               if (priv->queue_format != GVE_DQO_QPL_FORMAT) {
+                       priv->max_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_rx_ring_size);
+                       priv->max_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_tx_ring_size);
+               }
+               if (priv->default_min_ring_size) {
+                       /* If device hasn't provided minimums, use default minimums */
+                       priv->min_tx_desc_cnt = GVE_DEFAULT_MIN_TX_RING_SIZE;
+                       priv->min_rx_desc_cnt = GVE_DEFAULT_MIN_RX_RING_SIZE;
+               } else {
+                       priv->min_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_rx_ring_size);
+                       priv->min_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_tx_ring_size);
+               }
+       }
 }
 
 int gve_adminq_describe_device(struct gve_priv *priv)
 {
        struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
        struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
+       struct gve_device_option_modify_ring *dev_op_modify_ring = NULL;
        struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
        struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
        struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
 
        err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
                                         &dev_op_gqi_qpl, &dev_op_dqo_rda,
-                                        &dev_op_jumbo_frames,
-                                        &dev_op_dqo_qpl,
-                                        &dev_op_buffer_sizes);
+                                        &dev_op_jumbo_frames, &dev_op_dqo_qpl,
+                                        &dev_op_buffer_sizes,
+                                        &dev_op_modify_ring);
        if (err)
                goto free_device_descriptor;
 
 
        gve_enable_supported_features(priv, supported_features_mask,
                                      dev_op_jumbo_frames, dev_op_dqo_qpl,
-                                     dev_op_buffer_sizes);
+                                     dev_op_buffer_sizes, dev_op_modify_ring);
 
 free_device_descriptor:
        dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
 
 
 static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8);
 
+struct gve_device_option_modify_ring {
+       __be32 supported_featured_mask;
+       __be16 max_rx_ring_size;
+       __be16 max_tx_ring_size;
+       __be16 min_rx_ring_size;
+       __be16 min_tx_ring_size;
+};
+
+static_assert(sizeof(struct gve_device_option_modify_ring) == 12);
+
 /* Terminology:
  *
  * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
  *       the device for read/write and data is copied from/to SKBs.
  */
 enum gve_dev_opt_id {
-       GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1,
-       GVE_DEV_OPT_ID_GQI_RDA = 0x2,
-       GVE_DEV_OPT_ID_GQI_QPL = 0x3,
-       GVE_DEV_OPT_ID_DQO_RDA = 0x4,
-       GVE_DEV_OPT_ID_DQO_QPL = 0x7,
-       GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
-       GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
+       GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING       = 0x1,
+       GVE_DEV_OPT_ID_GQI_RDA                  = 0x2,
+       GVE_DEV_OPT_ID_GQI_QPL                  = 0x3,
+       GVE_DEV_OPT_ID_DQO_RDA                  = 0x4,
+       GVE_DEV_OPT_ID_MODIFY_RING              = 0x6,
+       GVE_DEV_OPT_ID_DQO_QPL                  = 0x7,
+       GVE_DEV_OPT_ID_JUMBO_FRAMES             = 0x8,
+       GVE_DEV_OPT_ID_BUFFER_SIZES             = 0xa,
 };
 
 enum gve_dev_opt_req_feat_mask {
-       GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0,
-       GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
-       GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
-       GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
-       GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
-       GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
-       GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
+       GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING    = 0x0,
+       GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA               = 0x0,
+       GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL               = 0x0,
+       GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA               = 0x0,
+       GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES          = 0x0,
+       GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL               = 0x0,
+       GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES          = 0x0,
+       GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING           = 0x0,
 };
 
 enum gve_sup_feature_mask {
-       GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
-       GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
+       GVE_SUP_MODIFY_RING_MASK        = 1 << 0,
+       GVE_SUP_JUMBO_FRAMES_MASK       = 1 << 2,
+       GVE_SUP_BUFFER_SIZES_MASK       = 1 << 4,
 };
 
 #define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0