*/
static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
{
+ bool flow_sch_en;
int err, i;
vport->txq_grps = kcalloc(vport->num_txq_grp,
if (!vport->txq_grps)
return -ENOMEM;
+ flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
+ VIRTCHNL2_CAP_SPLITQ_QSCHED);
+
for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
struct idpf_adapter *adapter = vport->adapter;
q->txq_grp = tx_qgrp;
hash_init(q->sched_buf_hash);
- if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS,
- VIRTCHNL2_CAP_SPLITQ_QSCHED))
+ if (flow_sch_en)
set_bit(__IDPF_Q_FLOW_SCH_EN, q->flags);
}
tx_qgrp->complq->desc_count = vport->complq_desc_count;
tx_qgrp->complq->vport = vport;
tx_qgrp->complq->txq_grp = tx_qgrp;
+
+ if (flow_sch_en)
+ __set_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags);
}
return 0;
/* Populate the queue info buffer with all queue context info */
for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- int j;
+ int j, sched_mode;
for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
qi[k].queue_id =
qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count);
qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma);
+ if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags))
+ sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+ else
+ sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+ qi[k].sched_mode = cpu_to_le16(sched_mode);
+
k++;
}