// SPDX-License-Identifier: (GPL-2.0 OR MIT)
 /* Google virtual Ethernet (gve) driver
  *
- * Copyright (C) 2015-2019 Google, Inc.
+ * Copyright (C) 2015-2021 Google, Inc.
  */
 
 #include <linux/etherdevice.h>
 "Expected: length=%d, feature_mask=%x.\n" \
 "Actual: length=%d, feature_mask=%x.\n"
 
+#define GVE_DEVICE_OPTION_TOO_BIG_FMT "Length of %s option larger than expected. Possible older version of guest driver.\n"
+
 static
 struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *descriptor,
                                              struct gve_device_option *option)
 static
 void gve_parse_device_option(struct gve_priv *priv,
                             struct gve_device_descriptor *device_descriptor,
-                            struct gve_device_option *option)
+                            struct gve_device_option *option,
+                            struct gve_device_option_gqi_rda **dev_op_gqi_rda,
+                            struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
+                            struct gve_device_option_dqo_rda **dev_op_dqo_rda)
 {
+       u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
        u16 option_length = be16_to_cpu(option->option_length);
        u16 option_id = be16_to_cpu(option->option_id);
 
+       /* If the length or feature mask doesn't match, continue without
+        * enabling the feature.
+        */
        switch (option_id) {
-       case GVE_DEV_OPT_ID_RAW_ADDRESSING:
-               /* If the length or feature mask doesn't match,
-                * continue without enabling the feature.
-                */
-               if (option_length != GVE_DEV_OPT_LEN_RAW_ADDRESSING ||
-                   option->feat_mask != cpu_to_be32(GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING)) {
-                       dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, "Raw Addressing",
-                                GVE_DEV_OPT_LEN_RAW_ADDRESSING,
-                                cpu_to_be32(GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING),
-                                option_length, option->feat_mask);
-                       priv->raw_addressing = 0;
-               } else {
-                       dev_info(&priv->pdev->dev,
-                                "Raw addressing device option enabled.\n");
-                       priv->raw_addressing = 1;
+       case GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING:
+               if (option_length != GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING ||
+                   req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING) {
+                       dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+                                "Raw Addressing",
+                                GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING,
+                                GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING,
+                                option_length, req_feat_mask);
+                       break;
+               }
+
+               dev_info(&priv->pdev->dev,
+                        "Gqi raw addressing device option enabled.\n");
+               priv->raw_addressing = 1;
+               break;
+       case GVE_DEV_OPT_ID_GQI_RDA:
+               if (option_length < sizeof(**dev_op_gqi_rda) ||
+                   req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA) {
+                       dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+                                "GQI RDA", (int)sizeof(**dev_op_gqi_rda),
+                                GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA,
+                                option_length, req_feat_mask);
+                       break;
+               }
+
+               if (option_length > sizeof(**dev_op_gqi_rda)) {
+                       dev_warn(&priv->pdev->dev,
+                                GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI RDA");
+               }
+               *dev_op_gqi_rda = (void *)(option + 1);
+               break;
+       case GVE_DEV_OPT_ID_GQI_QPL:
+               if (option_length < sizeof(**dev_op_gqi_qpl) ||
+                   req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL) {
+                       dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+                                "GQI QPL", (int)sizeof(**dev_op_gqi_qpl),
+                                GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL,
+                                option_length, req_feat_mask);
+                       break;
+               }
+
+               if (option_length > sizeof(**dev_op_gqi_qpl)) {
+                       dev_warn(&priv->pdev->dev,
+                                GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI QPL");
+               }
+               *dev_op_gqi_qpl = (void *)(option + 1);
+               break;
+       case GVE_DEV_OPT_ID_DQO_RDA:
+               if (option_length < sizeof(**dev_op_dqo_rda) ||
+                   req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA) {
+                       dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+                                "DQO RDA", (int)sizeof(**dev_op_dqo_rda),
+                                GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA,
+                                option_length, req_feat_mask);
+                       break;
+               }
+
+               if (option_length > sizeof(**dev_op_dqo_rda)) {
+                       dev_warn(&priv->pdev->dev,
+                                GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO RDA");
                }
+               *dev_op_dqo_rda = (void *)(option + 1);
                break;
        default:
                /* If we don't recognize the option just continue
        }
 }
 
+/* Process all device options for a given describe device call. */
+static int
+gve_process_device_options(struct gve_priv *priv,
+                          struct gve_device_descriptor *descriptor,
+                          struct gve_device_option_gqi_rda **dev_op_gqi_rda,
+                          struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
+                          struct gve_device_option_dqo_rda **dev_op_dqo_rda)
+{
+       const int num_options = be16_to_cpu(descriptor->num_device_options);
+       struct gve_device_option *dev_opt;
+       int i;
+
+       /* The options struct directly follows the device descriptor. */
+       dev_opt = (void *)(descriptor + 1);
+       for (i = 0; i < num_options; i++) {
+               struct gve_device_option *next_opt;
+
+               next_opt = gve_get_next_option(descriptor, dev_opt);
+               if (!next_opt) {
+                       dev_err(&priv->dev->dev,
+                               "options exceed device_descriptor's total length.\n");
+                       return -EINVAL;
+               }
+
+               gve_parse_device_option(priv, descriptor, dev_opt,
+                                       dev_op_gqi_rda, dev_op_gqi_qpl,
+                                       dev_op_dqo_rda);
+               dev_opt = next_opt;
+       }
+
+       return 0;
+}
+
 int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
 {
        priv->adminq = dma_alloc_coherent(dev, PAGE_SIZE,
 
 int gve_adminq_describe_device(struct gve_priv *priv)
 {
+       struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
+       struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
+       struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
        struct gve_device_descriptor *descriptor;
-       struct gve_device_option *dev_opt;
        union gve_adminq_command cmd;
        dma_addr_t descriptor_bus;
-       u16 num_options;
        int err = 0;
        u8 *mac;
        u16 mtu;
-       int i;
 
        memset(&cmd, 0, sizeof(cmd));
        descriptor = dma_alloc_coherent(&priv->pdev->dev, PAGE_SIZE,
        if (err)
                goto free_device_descriptor;
 
+       priv->raw_addressing = 0;
+       err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
+                                        &dev_op_gqi_qpl, &dev_op_dqo_rda);
+       if (err)
+               goto free_device_descriptor;
+
+       /* If the GQI_RAW_ADDRESSING option is not enabled and the queue format
+        * is not set to GqiRda, choose the queue format in a priority order:
+        * DqoRda, GqiRda, GqiQpl. Use GqiQpl as default.
+        */
+       if (priv->raw_addressing == 1) {
+               dev_info(&priv->pdev->dev,
+                        "Driver is running with GQI RDA queue format.\n");
+       } else if (dev_op_dqo_rda) {
+               dev_info(&priv->pdev->dev,
+                        "Driver is running with DQO RDA queue format.\n");
+       } else if (dev_op_gqi_rda) {
+               dev_info(&priv->pdev->dev,
+                        "Driver is running with GQI RDA queue format.\n");
+               priv->raw_addressing = 1;
+       } else {
+               dev_info(&priv->pdev->dev,
+                        "Driver is running with GQI QPL queue format.\n");
+       }
+
        priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
        if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) {
                dev_err(&priv->pdev->dev, "Tx desc count %d too low\n", priv->tx_desc_cnt);
                priv->rx_desc_cnt = priv->rx_data_slot_cnt;
        }
        priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
-       dev_opt = (void *)(descriptor + 1);
-
-       num_options = be16_to_cpu(descriptor->num_device_options);
-       for (i = 0; i < num_options; i++) {
-               struct gve_device_option *next_opt;
-
-               next_opt = gve_get_next_option(descriptor, dev_opt);
-               if (!next_opt) {
-                       dev_err(&priv->dev->dev,
-                               "options exceed device_descriptor's total length.\n");
-                       err = -EINVAL;
-                       goto free_device_descriptor;
-               }
-
-               gve_parse_device_option(priv, descriptor, dev_opt);
-               dev_opt = next_opt;
-       }
 
 free_device_descriptor:
-       dma_free_coherent(&priv->pdev->dev, sizeof(*descriptor), descriptor,
+       dma_free_coherent(&priv->pdev->dev, PAGE_SIZE, descriptor,
                          descriptor_bus);
        return err;
 }
 
 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
  * Google virtual Ethernet (gve) driver
  *
- * Copyright (C) 2015-2019 Google, Inc.
+ * Copyright (C) 2015-2021 Google, Inc.
  */
 
 #ifndef _GVE_ADMINQ_H
 struct gve_device_option {
        __be16 option_id;
        __be16 option_length;
-       __be32 feat_mask;
+       __be32 required_features_mask;
 };
 
 static_assert(sizeof(struct gve_device_option) == 8);
 
-#define GVE_DEV_OPT_ID_RAW_ADDRESSING 0x1
-#define GVE_DEV_OPT_LEN_RAW_ADDRESSING 0x0
-#define GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING 0x0
+struct gve_device_option_gqi_rda {
+       __be32 supported_features_mask;
+};
+
+static_assert(sizeof(struct gve_device_option_gqi_rda) == 4);
+
+struct gve_device_option_gqi_qpl {
+       __be32 supported_features_mask;
+};
+
+static_assert(sizeof(struct gve_device_option_gqi_qpl) == 4);
+
+struct gve_device_option_dqo_rda {
+       __be32 supported_features_mask;
+       __be16 tx_comp_ring_entries;
+       __be16 rx_buff_ring_entries;
+};
+
+static_assert(sizeof(struct gve_device_option_dqo_rda) == 8);
+
+/* Terminology:
+ *
+ * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
+ *       mapped and read/updated by the device.
+ *
+ * QPL - Queue Page Lists - Driver uses bounce buffers which are DMA mapped with
+ *       the device for read/write and data is copied from/to SKBs.
+ */
+enum gve_dev_opt_id {
+       GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1,
+       GVE_DEV_OPT_ID_GQI_RDA = 0x2,
+       GVE_DEV_OPT_ID_GQI_QPL = 0x3,
+       GVE_DEV_OPT_ID_DQO_RDA = 0x4,
+};
+
+enum gve_dev_opt_req_feat_mask {
+       GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0,
+       GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
+       GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
+       GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
+};
+
+#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
 
 struct gve_adminq_configure_device_resources {
        __be64 counter_array;