const char *cmd_buf)
 {
        struct hns3_nic_priv *priv = h->priv;
-       struct hns3_nic_ring_data *ring_data;
        struct hns3_enet_ring *ring;
        u32 base_add_l, base_add_h;
        u32 queue_num, queue_max;
        u32 value, i = 0;
        int cnt;
 
-       if (!priv->ring_data) {
-               dev_err(&h->pdev->dev, "ring_data is NULL\n");
+       if (!priv->ring) {
+               dev_err(&h->pdev->dev, "priv->ring is NULL\n");
                return -EFAULT;
        }
 
                return -EINVAL;
        }
 
-       ring_data = priv->ring_data;
        for (i = queue_num; i < queue_max; i++) {
                /* Each cycle needs to determine whether the instance is reset,
                 * to prevent reference to invalid memory. And need to ensure
                    test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
                        return -EPERM;
 
-               ring = ring_data[(u32)(i + h->kinfo.num_tqps)].ring;
+               ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
                base_add_h = readl_relaxed(ring->tqp->io_base +
                                           HNS3_RING_RX_RING_BASEADDR_H_REG);
                base_add_l = readl_relaxed(ring->tqp->io_base +
                                      HNS3_RING_RX_RING_PKTNUM_RECORD_REG);
                dev_info(&h->pdev->dev, "RX(%d) RING PKTNUM: %u\n", i, value);
 
-               ring = ring_data[i].ring;
+               ring = &priv->ring[i];
                base_add_h = readl_relaxed(ring->tqp->io_base +
                                           HNS3_RING_TX_RING_BASEADDR_H_REG);
                base_add_l = readl_relaxed(ring->tqp->io_base +
 static int hns3_dbg_queue_map(struct hnae3_handle *h)
 {
        struct hns3_nic_priv *priv = h->priv;
-       struct hns3_nic_ring_data *ring_data;
        int i;
 
        if (!h->ae_algo->ops->get_global_queue_id)
                u16 global_qid;
 
                global_qid = h->ae_algo->ops->get_global_queue_id(h, i);
-               ring_data = &priv->ring_data[i];
-               if (!ring_data || !ring_data->ring ||
-                   !ring_data->ring->tqp_vector)
+               if (!priv->ring || !priv->ring[i].tqp_vector)
                        continue;
 
                dev_info(&h->pdev->dev,
                         "      %4d            %4d            %4d\n",
-                        i, global_qid,
-                        ring_data->ring->tqp_vector->vector_irq);
+                        i, global_qid, priv->ring[i].tqp_vector->vector_irq);
        }
 
        return 0;
 static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
 {
        struct hns3_nic_priv *priv = h->priv;
-       struct hns3_nic_ring_data *ring_data;
        struct hns3_desc *rx_desc, *tx_desc;
        struct device *dev = &h->pdev->dev;
        struct hns3_enet_ring *ring;
                return -EINVAL;
        }
 
-       ring_data = priv->ring_data;
-       ring  = ring_data[q_num].ring;
+       ring  = &priv->ring[q_num];
        value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
        tx_index = (cnt == 1) ? value : tx_index;
 
        dev_info(dev, "(TX)vld_ra_ri: %u\n", tx_desc->tx.bdtp_fe_sc_vld_ra_ri);
        dev_info(dev, "(TX)mss: %u\n", tx_desc->tx.mss);
 
-       ring  = ring_data[q_num + h->kinfo.num_tqps].ring;
+       ring  = &priv->ring[q_num + h->kinfo.num_tqps];
        value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG);
        rx_index = (cnt == 1) ? value : tx_index;
        rx_desc  = &ring->desc[rx_index];
 
 
        for (i = 0; i < h->kinfo.num_tqps; i++) {
                dev_queue = netdev_get_tx_queue(ndev,
-                                               priv->ring_data[i].queue_index);
+                                               priv->ring[i].queue_index);
                netdev_tx_reset_queue(dev_queue);
        }
 }
 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
 {
        struct hns3_nic_priv *priv = netdev_priv(netdev);
-       struct hns3_nic_ring_data *ring_data =
-               &tx_ring_data(priv, skb->queue_mapping);
-       struct hns3_enet_ring *ring = ring_data->ring;
+       struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
        struct netdev_queue *dev_queue;
        int pre_ntu, next_to_use_head;
        struct sk_buff *frag_skb;
                                cpu_to_le16(BIT(HNS3_TXD_FE_B));
 
        /* Complete translate all packets */
-       dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
+       dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
        netdev_tx_sent_queue(dev_queue, skb->len);
 
        wmb(); /* Commit all data before submit */
        return NETDEV_TX_OK;
 
 out_net_tx_busy:
-       netif_stop_subqueue(netdev, ring_data->queue_index);
+       netif_stop_subqueue(netdev, ring->queue_index);
        smp_mb(); /* Commit all data before submit */
 
        return NETDEV_TX_BUSY;
 
        for (idx = 0; idx < queue_num; idx++) {
                /* fetch the tx stats */
-               ring = priv->ring_data[idx].ring;
+               ring = &priv->ring[idx];
                do {
                        start = u64_stats_fetch_begin_irq(&ring->syncp);
                        tx_bytes += ring->stats.tx_bytes;
                } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
 
                /* fetch the rx stats */
-               ring = priv->ring_data[idx + queue_num].ring;
+               ring = &priv->ring[idx + queue_num];
                do {
                        start = u64_stats_fetch_begin_irq(&ring->syncp);
                        rx_bytes += ring->stats.rx_bytes;
 
        priv->tx_timeout_count++;
 
-       tx_ring = priv->ring_data[timeout_queue].ring;
+       tx_ring = &priv->ring[timeout_queue];
        napi = &tx_ring->tqp_vector->napi;
 
        netdev_info(ndev,
                tqp_vector = &priv->tqp_vector[vector_i];
 
                hns3_add_ring_to_group(&tqp_vector->tx_group,
-                                      priv->ring_data[i].ring);
+                                      &priv->ring[i]);
 
                hns3_add_ring_to_group(&tqp_vector->rx_group,
-                                      priv->ring_data[i + tqp_num].ring);
+                                      &priv->ring[i + tqp_num]);
 
-               priv->ring_data[i].ring->tqp_vector = tqp_vector;
-               priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
+               priv->ring[i].tqp_vector = tqp_vector;
+               priv->ring[i + tqp_num].tqp_vector = tqp_vector;
                tqp_vector->num_tqps++;
        }
 
        return 0;
 }
 
-static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
-                            unsigned int ring_type)
+static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
+                             unsigned int ring_type)
 {
-       struct hns3_nic_ring_data *ring_data = priv->ring_data;
        int queue_num = priv->ae_handle->kinfo.num_tqps;
-       struct pci_dev *pdev = priv->ae_handle->pdev;
        struct hns3_enet_ring *ring;
        int desc_num;
 
-       ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
-       if (!ring)
-               return -ENOMEM;
-
        if (ring_type == HNAE3_RING_TYPE_TX) {
+               ring = &priv->ring[q->tqp_index];
                desc_num = priv->ae_handle->kinfo.num_tx_desc;
-               ring_data[q->tqp_index].ring = ring;
-               ring_data[q->tqp_index].queue_index = q->tqp_index;
+               ring->queue_index = q->tqp_index;
                ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
        } else {
+               ring = &priv->ring[q->tqp_index + queue_num];
                desc_num = priv->ae_handle->kinfo.num_rx_desc;
-               ring_data[q->tqp_index + queue_num].ring = ring;
-               ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
+               ring->queue_index = q->tqp_index;
                ring->io_base = q->io_base;
        }
 
        ring->desc_num = desc_num;
        ring->next_to_use = 0;
        ring->next_to_clean = 0;
-
-       return 0;
 }
 
-static int hns3_queue_to_ring(struct hnae3_queue *tqp,
-                             struct hns3_nic_priv *priv)
+static void hns3_queue_to_ring(struct hnae3_queue *tqp,
+                              struct hns3_nic_priv *priv)
 {
-       int ret;
-
-       ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
-       if (ret)
-               return ret;
-
-       ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
-       if (ret) {
-               devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring);
-               return ret;
-       }
-
-       return 0;
+       hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
+       hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
 }
 
 static int hns3_get_ring_config(struct hns3_nic_priv *priv)
 {
        struct hnae3_handle *h = priv->ae_handle;
        struct pci_dev *pdev = h->pdev;
-       int i, ret;
+       int i;
 
-       priv->ring_data =  devm_kzalloc(&pdev->dev,
-                                       array3_size(h->kinfo.num_tqps,
-                                                   sizeof(*priv->ring_data),
-                                                   2),
-                                       GFP_KERNEL);
-       if (!priv->ring_data)
+       priv->ring = devm_kzalloc(&pdev->dev,
+                                 array3_size(h->kinfo.num_tqps,
+                                             sizeof(*priv->ring), 2),
+                                 GFP_KERNEL);
+       if (!priv->ring)
                return -ENOMEM;
 
-       for (i = 0; i < h->kinfo.num_tqps; i++) {
-               ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
-               if (ret)
-                       goto err;
-       }
+       for (i = 0; i < h->kinfo.num_tqps; i++)
+               hns3_queue_to_ring(h->kinfo.tqp[i], priv);
 
        return 0;
-err:
-       while (i--) {
-               devm_kfree(priv->dev, priv->ring_data[i].ring);
-               devm_kfree(priv->dev,
-                          priv->ring_data[i + h->kinfo.num_tqps].ring);
-       }
-
-       devm_kfree(&pdev->dev, priv->ring_data);
-       priv->ring_data = NULL;
-       return ret;
 }
 
 static void hns3_put_ring_config(struct hns3_nic_priv *priv)
 {
-       struct hnae3_handle *h = priv->ae_handle;
-       int i;
-
-       if (!priv->ring_data)
+       if (!priv->ring)
                return;
 
-       for (i = 0; i < h->kinfo.num_tqps; i++) {
-               devm_kfree(priv->dev, priv->ring_data[i].ring);
-               devm_kfree(priv->dev,
-                          priv->ring_data[i + h->kinfo.num_tqps].ring);
-       }
-       devm_kfree(priv->dev, priv->ring_data);
-       priv->ring_data = NULL;
+       devm_kfree(priv->dev, priv->ring);
+       priv->ring = NULL;
 }
 
 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
                for (j = 0; j < tc_info->tqp_count; j++) {
                        struct hnae3_queue *q;
 
-                       q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp;
+                       q = priv->ring[tc_info->tqp_offset + j].tqp;
                        hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
                                       tc_info->tc);
                }
        int ret;
 
        for (i = 0; i < ring_num; i++) {
-               ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
+               ret = hns3_alloc_ring_memory(&priv->ring[i]);
                if (ret) {
                        dev_err(priv->dev,
                                "Alloc ring memory fail! ret=%d\n", ret);
                        goto out_when_alloc_ring_memory;
                }
 
-               u64_stats_init(&priv->ring_data[i].ring->syncp);
+               u64_stats_init(&priv->ring[i].syncp);
        }
 
        return 0;
 
 out_when_alloc_ring_memory:
        for (j = i - 1; j >= 0; j--)
-               hns3_fini_ring(priv->ring_data[j].ring);
+               hns3_fini_ring(&priv->ring[j]);
 
        return -ENOMEM;
 }
        int i;
 
        for (i = 0; i < h->kinfo.num_tqps; i++) {
-               hns3_fini_ring(priv->ring_data[i].ring);
-               hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
+               hns3_fini_ring(&priv->ring[i]);
+               hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]);
        }
        return 0;
 }
        ret = hns3_init_all_ring(priv);
        if (ret) {
                ret = -ENOMEM;
-               goto out_init_ring_data;
+               goto out_init_ring;
        }
 
        ret = hns3_init_phy(netdev);
        hns3_uninit_phy(netdev);
 out_init_phy:
        hns3_uninit_all_ring(priv);
-out_init_ring_data:
+out_init_ring:
        hns3_nic_uninit_vector_data(priv);
 out_init_vector_data:
        hns3_nic_dealloc_vector_data(priv);
 out_alloc_vector_data:
-       priv->ring_data = NULL;
+       priv->ring = NULL;
 out_get_ring_cfg:
        priv->ae_handle = NULL;
        free_netdev(netdev);
        for (i = 0; i < h->kinfo.num_tqps; i++) {
                struct hns3_enet_ring *ring;
 
-               ring = priv->ring_data[i].ring;
+               ring = &priv->ring[i];
                hns3_clear_tx_ring(ring);
 
-               ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
+               ring = &priv->ring[i + h->kinfo.num_tqps];
                /* Continue to clear other rings even if clearing some
                 * rings failed.
                 */
                if (ret)
                        return ret;
 
-               hns3_init_ring_hw(priv->ring_data[i].ring);
+               hns3_init_ring_hw(&priv->ring[i]);
 
                /* We need to clear tx ring here because self test will
                 * use the ring and will not run down before up
                 */
-               hns3_clear_tx_ring(priv->ring_data[i].ring);
-               priv->ring_data[i].ring->next_to_clean = 0;
-               priv->ring_data[i].ring->next_to_use = 0;
+               hns3_clear_tx_ring(&priv->ring[i]);
+               priv->ring[i].next_to_clean = 0;
+               priv->ring[i].next_to_use = 0;
 
-               rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
+               rx_ring = &priv->ring[i + h->kinfo.num_tqps];
                hns3_init_ring_hw(rx_ring);
                ret = hns3_clear_rx_ring(rx_ring);
                if (ret)
 
 
        kinfo = &h->kinfo;
        for (i = kinfo->num_tqps; i < kinfo->num_tqps * 2; i++) {
-               struct hns3_enet_ring *ring = priv->ring_data[i].ring;
+               struct hns3_enet_ring *ring = &priv->ring[i];
                struct hns3_enet_ring_group *rx_group;
                u64 pre_rx_pkt;
 
        u32 i;
 
        for (i = start_ringid; i <= end_ringid; i++) {
-               struct hns3_enet_ring *ring = priv->ring_data[i].ring;
+               struct hns3_enet_ring *ring = &priv->ring[i];
 
                hns3_clean_tx_ring(ring);
        }
 
        /* get stats for Tx */
        for (i = 0; i < kinfo->num_tqps; i++) {
-               ring = nic_priv->ring_data[i].ring;
+               ring = &nic_priv->ring[i];
                for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) {
                        stat = (u8 *)ring + hns3_txq_stats[j].stats_offset;
                        *data++ = *(u64 *)stat;
 
        /* get stats for Rx */
        for (i = 0; i < kinfo->num_tqps; i++) {
-               ring = nic_priv->ring_data[i + kinfo->num_tqps].ring;
+               ring = &nic_priv->ring[i + kinfo->num_tqps];
                for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) {
                        stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset;
                        *data++ = *(u64 *)stat;
        param->tx_max_pending = HNS3_RING_MAX_PENDING;
        param->rx_max_pending = HNS3_RING_MAX_PENDING;
 
-       param->tx_pending = priv->ring_data[0].ring->desc_num;
-       param->rx_pending = priv->ring_data[queue_num].ring->desc_num;
+       param->tx_pending = priv->ring[0].desc_num;
+       param->rx_pending = priv->ring[queue_num].desc_num;
 }
 
 static void hns3_get_pauseparam(struct net_device *netdev,
        h->kinfo.num_rx_desc = rx_desc_num;
 
        for (i = 0; i < h->kinfo.num_tqps; i++) {
-               priv->ring_data[i].ring->desc_num = tx_desc_num;
-               priv->ring_data[i + h->kinfo.num_tqps].ring->desc_num =
-                       rx_desc_num;
+               priv->ring[i].desc_num = tx_desc_num;
+               priv->ring[i + h->kinfo.num_tqps].desc_num = rx_desc_num;
        }
 }
 
                return NULL;
 
        for (i = 0; i < handle->kinfo.num_tqps * 2; i++) {
-               memcpy(&tmp_rings[i], priv->ring_data[i].ring,
+               memcpy(&tmp_rings[i], &priv->ring[i],
                       sizeof(struct hns3_enet_ring));
                tmp_rings[i].skb = NULL;
        }
        /* Hardware requires that its descriptors must be multiple of eight */
        new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE);
        new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE);
-       old_tx_desc_num = priv->ring_data[0].ring->desc_num;
-       old_rx_desc_num = priv->ring_data[queue_num].ring->desc_num;
+       old_tx_desc_num = priv->ring[0].desc_num;
+       old_rx_desc_num = priv->ring[queue_num].desc_num;
        if (old_tx_desc_num == new_tx_desc_num &&
            old_rx_desc_num == new_rx_desc_num)
                return 0;
                hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
                                            old_rx_desc_num);
                for (i = 0; i < h->kinfo.num_tqps * 2; i++)
-                       memcpy(priv->ring_data[i].ring, &tmp_rings[i],
+                       memcpy(&priv->ring[i], &tmp_rings[i],
                               sizeof(struct hns3_enet_ring));
        } else {
                for (i = 0; i < h->kinfo.num_tqps * 2; i++)
                return -EINVAL;
        }
 
-       tx_vector = priv->ring_data[queue].ring->tqp_vector;
-       rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
+       tx_vector = priv->ring[queue].tqp_vector;
+       rx_vector = priv->ring[queue_num + queue].tqp_vector;
 
        cmd->use_adaptive_tx_coalesce =
                        tx_vector->tx_group.coal.gl_adapt_enable;
        struct hnae3_handle *h = priv->ae_handle;
        int queue_num = h->kinfo.num_tqps;
 
-       tx_vector = priv->ring_data[queue].ring->tqp_vector;
-       rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
+       tx_vector = priv->ring[queue].tqp_vector;
+       rx_vector = priv->ring[queue_num + queue].tqp_vector;
 
        tx_vector->tx_group.coal.gl_adapt_enable =
                                cmd->use_adaptive_tx_coalesce;