]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
idpf: set scheduling mode for completion queue
authorMichal Kubiak <michal.kubiak@intel.com>
Mon, 23 Oct 2023 20:26:54 +0000 (13:26 -0700)
committerJakub Kicinski <kuba@kernel.org>
Mon, 23 Oct 2023 22:55:31 +0000 (15:55 -0700)
The HW must be programmed differently for queue-based scheduling mode.
To program the completion queue context correctly, the control plane
must know the scheduling mode not only for the Tx queue, but also for
the completion queue.
Unfortunately, currently the driver sets the scheduling mode only for
the Tx queues.

Propagate the scheduling mode data for the completion queue as
well when sending the queue configuration messages.

Fixes: 1c325aac10a8 ("idpf: configure resources for TX queues")
Reviewed-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Signed-off-by: Michal Kubiak <michal.kubiak@intel.com>
Reviewed-by: Alan Brady <alan.brady@intel.com>
Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
Tested-by: Krishneil Singh <krishneil.k.singh@intel.com>
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Link: https://lore.kernel.org/r/20231023202655.173369-2-jacob.e.keller@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/intel/idpf/idpf_txrx.c
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c

index 6fa79898c42c50c6d075c0155db4c84db1d61fa8..58c5412d317361345a210bd6c6d4c75f883325a5 100644 (file)
@@ -1160,6 +1160,7 @@ static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q)
  */
 static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
 {
+       bool flow_sch_en;
        int err, i;
 
        vport->txq_grps = kcalloc(vport->num_txq_grp,
@@ -1167,6 +1168,9 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
        if (!vport->txq_grps)
                return -ENOMEM;
 
+       flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
+                                      VIRTCHNL2_CAP_SPLITQ_QSCHED);
+
        for (i = 0; i < vport->num_txq_grp; i++) {
                struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
                struct idpf_adapter *adapter = vport->adapter;
@@ -1195,8 +1199,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
                        q->txq_grp = tx_qgrp;
                        hash_init(q->sched_buf_hash);
 
-                       if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS,
-                                            VIRTCHNL2_CAP_SPLITQ_QSCHED))
+                       if (flow_sch_en)
                                set_bit(__IDPF_Q_FLOW_SCH_EN, q->flags);
                }
 
@@ -1215,6 +1218,9 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
                tx_qgrp->complq->desc_count = vport->complq_desc_count;
                tx_qgrp->complq->vport = vport;
                tx_qgrp->complq->txq_grp = tx_qgrp;
+
+               if (flow_sch_en)
+                       __set_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags);
        }
 
        return 0;
index 9bc85b2f1709daf950ce0849ee455b5dcb64e729..e276b5360c2ed81be070f7a01b4e0b9cb3a8c06f 100644 (file)
@@ -1473,7 +1473,7 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
        /* Populate the queue info buffer with all queue context info */
        for (i = 0; i < vport->num_txq_grp; i++) {
                struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
-               int j;
+               int j, sched_mode;
 
                for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
                        qi[k].queue_id =
@@ -1514,6 +1514,12 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
                qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count);
                qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma);
 
+               if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags))
+                       sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+               else
+                       sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+               qi[k].sched_mode = cpu_to_le16(sched_mode);
+
                k++;
        }