]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
Revert "Merge branch 'there-are-some-bugfix-for-the-hns3-ethernet-driver'"
authorJakub Kicinski <kuba@kernel.org>
Tue, 5 Nov 2024 02:03:52 +0000 (18:03 -0800)
committerJakub Kicinski <kuba@kernel.org>
Tue, 5 Nov 2024 02:07:54 +0000 (18:07 -0800)
This reverts commit d80a3091308491455b6501b1c4b68698c4a7cd24, reversing
changes made to 637f41476384c76d3cd7dcf5947caf2c8b8d7a9b:

2cf246143519 ("net: hns3: fix kernel crash when 1588 is sent on HIP08 devices")
3e22b7de34cb ("net: hns3: fixed hclge_fetch_pf_reg accesses bar space out of bounds issue")
d1c2e2961ab4 ("net: hns3: initialize reset_timer before hclgevf_misc_irq_init()")
5f62009ff108 ("net: hns3: don't auto enable misc vector")
2758f18a83ef ("net: hns3: Resolved the issue that the debugfs query result is inconsistent.")
662ecfc46690 ("net: hns3: fix missing features due to dev->features configuration too early")
3e0f7cc887b7 ("net: hns3: fixed reset failure issues caused by the incorrect reset type")
f2c14899caba ("net: hns3: add sync command to sync io-pgtable")
e6ab19443b36 ("net: hns3: default enable tx bounce buffer when smmu enabled")

The series is making the driver poke into IOMMU internals instead of
implementing appropriate IOMMU workarounds.

Link: https://lore.kernel.org/069c9838-b781-4012-934a-d2626fa78212@arm.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c

index 841e5af7b2beeff047d0835275f57adceaf6ba46..807eb3bbb11c0408fbdcfa678586646fb92f8861 100644 (file)
@@ -1293,10 +1293,8 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
 
                /* save the buffer addr until the last read operation */
                *save_buf = read_buf;
-       }
 
-       /* get data ready for the first time to read */
-       if (!*ppos) {
+               /* get data ready for the first time to read */
                ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
                                        read_buf, hns3_dbg_cmd[index].buf_len);
                if (ret)
index b09f0cca34dc68604dc00b7e7ed233a16fcdadd7..4cbc4d069a1f369eaec2fec6b3412a0fce186782 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/irq.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
-#include <linux/iommu.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/skbuff.h>
@@ -381,24 +380,6 @@ static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {
 #define HNS3_INVALID_PTYPE \
                ARRAY_SIZE(hns3_rx_ptype_tbl)
 
-static void hns3_dma_map_sync(struct device *dev, unsigned long iova)
-{
-       struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
-       struct iommu_iotlb_gather iotlb_gather;
-       size_t granule;
-
-       if (!domain || !iommu_is_dma_domain(domain))
-               return;
-
-       granule = 1 << __ffs(domain->pgsize_bitmap);
-       iova = ALIGN_DOWN(iova, granule);
-       iotlb_gather.start = iova;
-       iotlb_gather.end = iova + granule - 1;
-       iotlb_gather.pgsize = granule;
-
-       iommu_iotlb_sync(domain, &iotlb_gather);
-}
-
 static irqreturn_t hns3_irq_handle(int irq, void *vector)
 {
        struct hns3_enet_tqp_vector *tqp_vector = vector;
@@ -1051,8 +1032,6 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
 static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
 {
        u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
-       struct net_device *netdev = ring_to_netdev(ring);
-       struct hns3_nic_priv *priv = netdev_priv(netdev);
        struct hns3_tx_spare *tx_spare;
        struct page *page;
        dma_addr_t dma;
@@ -1094,7 +1073,6 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
        tx_spare->buf = page_address(page);
        tx_spare->len = PAGE_SIZE << order;
        ring->tx_spare = tx_spare;
-       ring->tx_copybreak = priv->tx_copybreak;
        return;
 
 dma_mapping_error:
@@ -1746,9 +1724,7 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
                                  unsigned int type)
 {
        struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
-       struct hnae3_handle *handle = ring->tqp->handle;
        struct device *dev = ring_to_dev(ring);
-       struct hnae3_ae_dev *ae_dev;
        unsigned int size;
        dma_addr_t dma;
 
@@ -1780,13 +1756,6 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
                return -ENOMEM;
        }
 
-       /* Add a SYNC command to sync io-pgtale to avoid errors in pgtable
-        * prefetch
-        */
-       ae_dev = hns3_get_ae_dev(handle);
-       if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
-               hns3_dma_map_sync(dev, dma);
-
        desc_cb->priv = priv;
        desc_cb->length = size;
        desc_cb->dma = dma;
@@ -2483,6 +2452,7 @@ static int hns3_nic_set_features(struct net_device *netdev,
                        return ret;
        }
 
+       netdev->features = features;
        return 0;
 }
 
@@ -4898,30 +4868,6 @@ static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
        devm_kfree(&pdev->dev, priv->tqp_vector);
 }
 
-static void hns3_update_tx_spare_buf_config(struct hns3_nic_priv *priv)
-{
-#define HNS3_MIN_SPARE_BUF_SIZE (2 * 1024 * 1024)
-#define HNS3_MAX_PACKET_SIZE (64 * 1024)
-
-       struct iommu_domain *domain = iommu_get_domain_for_dev(priv->dev);
-       struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
-       struct hnae3_handle *handle = priv->ae_handle;
-
-       if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
-               return;
-
-       if (!(domain && iommu_is_dma_domain(domain)))
-               return;
-
-       priv->min_tx_copybreak = HNS3_MAX_PACKET_SIZE;
-       priv->min_tx_spare_buf_size = HNS3_MIN_SPARE_BUF_SIZE;
-
-       if (priv->tx_copybreak < priv->min_tx_copybreak)
-               priv->tx_copybreak = priv->min_tx_copybreak;
-       if (handle->kinfo.tx_spare_buf_size < priv->min_tx_spare_buf_size)
-               handle->kinfo.tx_spare_buf_size = priv->min_tx_spare_buf_size;
-}
-
 static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
                              unsigned int ring_type)
 {
@@ -5155,7 +5101,6 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
        int i, j;
        int ret;
 
-       hns3_update_tx_spare_buf_config(priv);
        for (i = 0; i < ring_num; i++) {
                ret = hns3_alloc_ring_memory(&priv->ring[i]);
                if (ret) {
@@ -5360,8 +5305,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
        priv->ae_handle = handle;
        priv->tx_timeout_count = 0;
        priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
-       priv->min_tx_copybreak = 0;
-       priv->min_tx_spare_buf_size = 0;
        set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
 
        handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
index caf7a4df8585275c8ae47b5f78d613bf26d5a425..d36c4ed16d8dd25251df92377f91a24cf3961e01 100644 (file)
@@ -596,8 +596,6 @@ struct hns3_nic_priv {
        struct hns3_enet_coalesce rx_coal;
        u32 tx_copybreak;
        u32 rx_copybreak;
-       u32 min_tx_copybreak;
-       u32 min_tx_spare_buf_size;
 };
 
 union l3_hdr_info {
index 97eaeec1952bb572534f87338982962073ee89f8..b1e9883473476956d793f198c1c45f01fc16b343 100644 (file)
@@ -1933,31 +1933,6 @@ static int hns3_set_tx_spare_buf_size(struct net_device *netdev,
        return ret;
 }
 
-static int hns3_check_tx_copybreak(struct net_device *netdev, u32 copybreak)
-{
-       struct hns3_nic_priv *priv = netdev_priv(netdev);
-
-       if (copybreak < priv->min_tx_copybreak) {
-               netdev_err(netdev, "tx copybreak %u should be no less than %u!\n",
-                          copybreak, priv->min_tx_copybreak);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static int hns3_check_tx_spare_buf_size(struct net_device *netdev, u32 buf_size)
-{
-       struct hns3_nic_priv *priv = netdev_priv(netdev);
-
-       if (buf_size < priv->min_tx_spare_buf_size) {
-               netdev_err(netdev,
-                          "tx spare buf size %u should be no less than %u!\n",
-                          buf_size, priv->min_tx_spare_buf_size);
-               return -EINVAL;
-       }
-       return 0;
-}
-
 static int hns3_set_tunable(struct net_device *netdev,
                            const struct ethtool_tunable *tuna,
                            const void *data)
@@ -1974,10 +1949,6 @@ static int hns3_set_tunable(struct net_device *netdev,
 
        switch (tuna->id) {
        case ETHTOOL_TX_COPYBREAK:
-               ret = hns3_check_tx_copybreak(netdev, *(u32 *)data);
-               if (ret)
-                       return ret;
-
                priv->tx_copybreak = *(u32 *)data;
 
                for (i = 0; i < h->kinfo.num_tqps; i++)
@@ -1992,10 +1963,6 @@ static int hns3_set_tunable(struct net_device *netdev,
 
                break;
        case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
-               ret = hns3_check_tx_spare_buf_size(netdev, *(u32 *)data);
-               if (ret)
-                       return ret;
-
                old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
                new_tx_spare_buf_size = *(u32 *)data;
                netdev_info(netdev, "request to set tx spare buf size from %u to %u\n",
index 728f4777e51f0a1ee9f3c59d6d8f473ebc0b7c64..bd86efd92a5a7d0eb92b97b82b5e03dc875d1049 100644 (file)
@@ -6,7 +6,6 @@
 #include <linux/etherdevice.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
-#include <linux/irq.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
@@ -3585,17 +3584,6 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
        return ret;
 }
 
-static void hclge_set_reset_pending(struct hclge_dev *hdev,
-                                   enum hnae3_reset_type reset_type)
-{
-       /* When an incorrect reset type is executed, the get_reset_level
-        * function generates the HNAE3_NONE_RESET flag. As a result, this
-        * type do not need to pending.
-        */
-       if (reset_type != HNAE3_NONE_RESET)
-               set_bit(reset_type, &hdev->reset_pending);
-}
-
 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
 {
        u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
@@ -3616,7 +3604,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
         */
        if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
                dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
-               hclge_set_reset_pending(hdev, HNAE3_IMP_RESET);
+               set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
                set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
                *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
                hdev->rst_stats.imp_rst_cnt++;
@@ -3626,7 +3614,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
        if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
                dev_info(&hdev->pdev->dev, "global reset interrupt\n");
                set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
-               hclge_set_reset_pending(hdev, HNAE3_GLOBAL_RESET);
+               set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
                *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
                hdev->rst_stats.global_rst_cnt++;
                return HCLGE_VECTOR0_EVENT_RST;
@@ -3781,7 +3769,7 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
        snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
                 HCLGE_NAME, pci_name(hdev->pdev));
        ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
-                         IRQ_NOAUTOEN, hdev->misc_vector.name, hdev);
+                         0, hdev->misc_vector.name, hdev);
        if (ret) {
                hclge_free_vector(hdev, 0);
                dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
@@ -4074,7 +4062,7 @@ static void hclge_do_reset(struct hclge_dev *hdev)
        case HNAE3_FUNC_RESET:
                dev_info(&pdev->dev, "PF reset requested\n");
                /* schedule again to check later */
-               hclge_set_reset_pending(hdev, HNAE3_FUNC_RESET);
+               set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
                hclge_reset_task_schedule(hdev);
                break;
        default:
@@ -4108,8 +4096,6 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
                clear_bit(HNAE3_FLR_RESET, addr);
        }
 
-       clear_bit(HNAE3_NONE_RESET, addr);
-
        if (hdev->reset_type != HNAE3_NONE_RESET &&
            rst_level < hdev->reset_type)
                return HNAE3_NONE_RESET;
@@ -4251,7 +4237,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
                return false;
        } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
                hdev->rst_stats.reset_fail_cnt++;
-               hclge_set_reset_pending(hdev, hdev->reset_type);
+               set_bit(hdev->reset_type, &hdev->reset_pending);
                dev_info(&hdev->pdev->dev,
                         "re-schedule reset task(%u)\n",
                         hdev->rst_stats.reset_fail_cnt);
@@ -4494,20 +4480,8 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
                                        enum hnae3_reset_type rst_type)
 {
-#define HCLGE_SUPPORT_RESET_TYPE \
-       (BIT(HNAE3_FLR_RESET) | BIT(HNAE3_FUNC_RESET) | \
-       BIT(HNAE3_GLOBAL_RESET) | BIT(HNAE3_IMP_RESET))
-
        struct hclge_dev *hdev = ae_dev->priv;
 
-       if (!(BIT(rst_type) & HCLGE_SUPPORT_RESET_TYPE)) {
-               /* To prevent reset triggered by hclge_reset_event */
-               set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
-               dev_warn(&hdev->pdev->dev, "unsupported reset type %d\n",
-                        rst_type);
-               return;
-       }
-
        set_bit(rst_type, &hdev->default_reset_request);
 }
 
@@ -11917,6 +11891,9 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
 
        hclge_init_rxd_adv_layout(hdev);
 
+       /* Enable MISC vector(vector0) */
+       hclge_enable_vector(&hdev->misc_vector, true);
+
        ret = hclge_init_wol(hdev);
        if (ret)
                dev_warn(&pdev->dev,
@@ -11929,10 +11906,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
        hclge_state_init(hdev);
        hdev->last_reset_time = jiffies;
 
-       /* Enable MISC vector(vector0) */
-       enable_irq(hdev->misc_vector.vector_irq);
-       hclge_enable_vector(&hdev->misc_vector, true);
-
        dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
                 HCLGE_DRIVER_NAME);
 
@@ -12338,7 +12311,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
 
        /* Disable MISC vector(vector0) */
        hclge_enable_vector(&hdev->misc_vector, false);
-       disable_irq(hdev->misc_vector.vector_irq);
+       synchronize_irq(hdev->misc_vector.vector_irq);
 
        /* Disable all hw interrupts */
        hclge_config_mac_tnl_int(hdev, false);
index bab16c2191b2f09363740c8cd510673285f4e798..5505caea88e981e0bd9407ae9156f45d660edc51 100644 (file)
@@ -58,9 +58,6 @@ bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb)
        struct hclge_dev *hdev = vport->back;
        struct hclge_ptp *ptp = hdev->ptp;
 
-       if (!ptp)
-               return false;
-
        if (!test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ||
            test_and_set_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) {
                ptp->tx_skipped++;
index 8c057192aae6e16abe24fdd1bbed3f088c111f8e..43c1c18fa81f8d572d37d15f2c7cc6d4dcbf3391 100644 (file)
@@ -510,9 +510,9 @@ out:
 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
                              struct hnae3_knic_private_info *kinfo)
 {
+#define HCLGE_RING_REG_OFFSET          0x200
 #define HCLGE_RING_INT_REG_OFFSET      0x4
 
-       struct hnae3_queue *tqp;
        int i, j, reg_num;
        int data_num_sum;
        u32 *reg = data;
@@ -533,11 +533,10 @@ static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
        reg_num = ARRAY_SIZE(ring_reg_addr_list);
        for (j = 0; j < kinfo->num_tqps; j++) {
                reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RING, reg_num, reg);
-               tqp = kinfo->tqp[j];
                for (i = 0; i < reg_num; i++)
-                       *reg++ = readl_relaxed(tqp->io_base -
-                                              HCLGE_TQP_REG_OFFSET +
-                                              ring_reg_addr_list[i]);
+                       *reg++ = hclge_read_dev(&hdev->hw,
+                                               ring_reg_addr_list[i] +
+                                               HCLGE_RING_REG_OFFSET * j);
        }
        data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) * kinfo->num_tqps;
 
index 896f1eb172d30e0769000db6c71a177f08133508..094a7c7b55921f9a0ddb3637ebe8d5ca9eb46c38 100644 (file)
@@ -1395,17 +1395,6 @@ static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
        return ret;
 }
 
-static void hclgevf_set_reset_pending(struct hclgevf_dev *hdev,
-                                     enum hnae3_reset_type reset_type)
-{
-       /* When an incorrect reset type is executed, the get_reset_level
-        * function generates the HNAE3_NONE_RESET flag. As a result, this
-        * type do not need to pending.
-        */
-       if (reset_type != HNAE3_NONE_RESET)
-               set_bit(reset_type, &hdev->reset_pending);
-}
-
 static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
 {
 #define HCLGEVF_RESET_WAIT_US  20000
@@ -1555,7 +1544,7 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
                hdev->rst_stats.rst_fail_cnt);
 
        if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
-               hclgevf_set_reset_pending(hdev, hdev->reset_type);
+               set_bit(hdev->reset_type, &hdev->reset_pending);
 
        if (hclgevf_is_reset_pending(hdev)) {
                set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
@@ -1675,8 +1664,6 @@ static enum hnae3_reset_type hclgevf_get_reset_level(unsigned long *addr)
                clear_bit(HNAE3_FLR_RESET, addr);
        }
 
-       clear_bit(HNAE3_NONE_RESET, addr);
-
        return rst_level;
 }
 
@@ -1686,15 +1673,14 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
        struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
        struct hclgevf_dev *hdev = ae_dev->priv;
 
+       dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
+
        if (hdev->default_reset_request)
                hdev->reset_level =
                        hclgevf_get_reset_level(&hdev->default_reset_request);
        else
                hdev->reset_level = HNAE3_VF_FUNC_RESET;
 
-       dev_info(&hdev->pdev->dev, "received reset request from VF enet, reset level is %d\n",
-                hdev->reset_level);
-
        /* reset of this VF requested */
        set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
        hclgevf_reset_task_schedule(hdev);
@@ -1705,20 +1691,8 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
                                          enum hnae3_reset_type rst_type)
 {
-#define HCLGEVF_SUPPORT_RESET_TYPE \
-       (BIT(HNAE3_VF_RESET) | BIT(HNAE3_VF_FUNC_RESET) | \
-       BIT(HNAE3_VF_PF_FUNC_RESET) | BIT(HNAE3_VF_FULL_RESET) | \
-       BIT(HNAE3_FLR_RESET) | BIT(HNAE3_VF_EXP_RESET))
-
        struct hclgevf_dev *hdev = ae_dev->priv;
 
-       if (!(BIT(rst_type) & HCLGEVF_SUPPORT_RESET_TYPE)) {
-               /* To prevent reset triggered by hclge_reset_event */
-               set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
-               dev_info(&hdev->pdev->dev, "unsupported reset type %d\n",
-                        rst_type);
-               return;
-       }
        set_bit(rst_type, &hdev->default_reset_request);
 }
 
@@ -1875,14 +1849,14 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
                 */
                if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
                        /* prepare for full reset of stack + pcie interface */
-                       hclgevf_set_reset_pending(hdev, HNAE3_VF_FULL_RESET);
+                       set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
 
                        /* "defer" schedule the reset task again */
                        set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
                } else {
                        hdev->reset_attempts++;
 
-                       hclgevf_set_reset_pending(hdev, hdev->reset_level);
+                       set_bit(hdev->reset_level, &hdev->reset_pending);
                        set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
                }
                hclgevf_reset_task_schedule(hdev);
@@ -2005,7 +1979,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
                rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
                dev_info(&hdev->pdev->dev,
                         "receive reset interrupt 0x%x!\n", rst_ing_reg);
-               hclgevf_set_reset_pending(hdev, HNAE3_VF_RESET);
+               set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
                set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
                set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
                *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
@@ -2315,7 +2289,6 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev)
        clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
 
        INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
-       timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
 
        mutex_init(&hdev->mbx_resp.mbx_mutex);
        sema_init(&hdev->reset_sem, 1);
@@ -3015,6 +2988,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
                 HCLGEVF_DRIVER_NAME);
 
        hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
+       timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
 
        return 0;
 
index 7d9d9dbc75603a3d6e8d2add3be51776bc1400ce..6db415d8b9176cfb2b480518991fa2f14f097551 100644 (file)
@@ -123,10 +123,10 @@ int hclgevf_get_regs_len(struct hnae3_handle *handle)
 void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
                      void *data)
 {
+#define HCLGEVF_RING_REG_OFFSET                0x200
 #define HCLGEVF_RING_INT_REG_OFFSET    0x4
 
        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
-       struct hnae3_queue *tqp;
        int i, j, reg_um;
        u32 *reg = data;
 
@@ -147,11 +147,10 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
        reg_um = ARRAY_SIZE(ring_reg_addr_list);
        for (j = 0; j < hdev->num_tqps; j++) {
                reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg);
-               tqp = &hdev->htqp[j].q;
                for (i = 0; i < reg_um; i++)
-                       *reg++ = readl_relaxed(tqp->io_base -
-                                              HCLGEVF_TQP_REG_OFFSET +
-                                              ring_reg_addr_list[i]);
+                       *reg++ = hclgevf_read_dev(&hdev->hw,
+                                                 ring_reg_addr_list[i] +
+                                                 HCLGEVF_RING_REG_OFFSET * j);
        }
 
        reg_um = ARRAY_SIZE(tqp_intr_reg_addr_list);