]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
wifi: ath12k: refactor rx descriptor CMEM configuration
authorKarthikeyan Periyasamy <quic_periyasa@quicinc.com>
Tue, 21 May 2024 08:08:10 +0000 (11:08 +0300)
committerKalle Valo <quic_kvalo@quicinc.com>
Sat, 25 May 2024 08:54:41 +0000 (11:54 +0300)
Currently, data path Rx descriptor placed in the CMEM memory respective
to the Rx offset. This Rx descriptor CMEM placement is not meeting the
requirements to support inter device MLO. In inter device MLO, multiple
devices participate in a group. The device specific Rx descriptor buffers
transmit to multiple device REO rings. To distinguish between device
specific Rx descriptor buffers, the CMEM setup configuration need to
choose a different placement based on the unique identifier (device index).

Below are the CMEM configuration placement representation:

Intra-Device MLO scenario:

        Device 0                                Device 1

     +-------------+                         +-------------+
     |             |                         |             |
     |     Tx      |                         |     Tx      |
     |             |                         |             |
     +-------------+                         +-------------+
     |             |                         |             |
     |     Rx      |                         |     Rx      |
     |             |                         |             |
     +-------------+                         +-------------+

Inter-Device MLO scenario:

        Device 0                                Device 1

     +-------------+                         +-------------+
     |             |                         |             |
     |     Tx      |                         |     Tx      |
     |             |                         |             |
     +-------------+                         +-------------+
     |             |                         |             |
     |     Rx      |                         |   Reserved  |
     |  Device 0   |                         |             |
     |             |                         |             |
     +-------------+                         +-------------+
     |             |                         |     Rx      |
     |   Reserved  |                         |   Device 1  |
     |             |                         |             |
     +-------------+                         +-------------+

Tested-on: QCN9274 hw2.0 PCI WLAN.WBE.1.0.1-00029-QCAHKSWPL_SILICONZ-1
Tested-on: WCN7850 HW2.0 PCI WLAN.HMT.1.0.c5-00481-QCAHMTSWPL_V1.0_V2.0_SILICONZ-3

Signed-off-by: Karthikeyan Periyasamy <quic_periyasa@quicinc.com>
Acked-by: Jeff Johnson <quic_jjohnson@quicinc.com>
Signed-off-by: Kalle Valo <quic_kvalo@quicinc.com>
Link: https://msgid.link/20240516000807.1704913-2-quic_periyasa@quicinc.com
drivers/net/wireless/ath/ath12k/dp.c
drivers/net/wireless/ath/ath12k/dp.h

index c04fa3eb57d690cdbdf7a72ce33efce4311febf8..61aa78d8bd8c8fef4872e15e63a32a76c927c934 100644 (file)
@@ -1349,13 +1349,14 @@ static inline void *ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_base *ab,
 struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
                                                  u32 cookie)
 {
+       struct ath12k_dp *dp = &ab->dp;
        struct ath12k_rx_desc_info **desc_addr_ptr;
        u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx;
 
        ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
        spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT);
 
-       start_ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET;
+       start_ppt_idx = dp->rx_ppt_base + ATH12K_RX_SPT_PAGE_OFFSET;
        end_ppt_idx = start_ppt_idx + ATH12K_NUM_RX_SPT_PAGES;
 
        if (ppt_idx < start_ppt_idx ||
@@ -1363,6 +1364,7 @@ struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
            spt_idx > ATH12K_MAX_SPT_ENTRIES)
                return NULL;
 
+       ppt_idx = ppt_idx - dp->rx_ppt_base;
        desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
 
        return *desc_addr_ptr;
@@ -1397,7 +1399,7 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
        struct ath12k_rx_desc_info *rx_descs, **rx_desc_addr;
        struct ath12k_tx_desc_info *tx_descs, **tx_desc_addr;
        u32 i, j, pool_id, tx_spt_page;
-       u32 ppt_idx;
+       u32 ppt_idx, cookie_ppt_idx;
 
        spin_lock_bh(&dp->rx_desc_lock);
 
@@ -1412,10 +1414,11 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
                }
 
                ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET + i;
+               cookie_ppt_idx = dp->rx_ppt_base + ppt_idx;
                dp->spt_info->rxbaddr[i] = &rx_descs[0];
 
                for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
-                       rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(ppt_idx, j);
+                       rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(cookie_ppt_idx, j);
                        rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
                        list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list);
 
@@ -1476,6 +1479,7 @@ static int ath12k_dp_cmem_init(struct ath12k_base *ab,
                end = start + ATH12K_NUM_TX_SPT_PAGES;
                break;
        case ATH12K_DP_RX_DESC:
+               cmem_base += ATH12K_PPT_ADDR_OFFSET(dp->rx_ppt_base);
                start = ATH12K_RX_SPT_PAGE_OFFSET;
                end = start + ATH12K_NUM_RX_SPT_PAGES;
                break;
@@ -1518,6 +1522,8 @@ static int ath12k_dp_cc_init(struct ath12k_base *ab)
                return -ENOMEM;
        }
 
+       dp->rx_ppt_base = ab->device_id * ATH12K_NUM_RX_SPT_PAGES;
+
        for (i = 0; i < dp->num_spt_pages; i++) {
                dp->spt_info[i].vaddr = dma_alloc_coherent(ab->dev,
                                                           ATH12K_PAGE_SIZE,
index 43589c3dd4bc134867edeca07726cbea8582f85c..742094545089f8602a3731c4b74da4894c987e75 100644 (file)
@@ -350,6 +350,7 @@ struct ath12k_dp {
        struct ath12k_hp_update_timer tx_ring_timer[DP_TCL_NUM_RING_MAX];
        struct ath12k_spt_info *spt_info;
        u32 num_spt_pages;
+       u32 rx_ppt_base;
        struct list_head rx_desc_free_list;
        /* protects the free desc list */
        spinlock_t rx_desc_lock;