void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
                               struct qed_queue_cid *p_cid)
 {
-       /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */
-       if ((p_cid->vfid == QED_QUEUE_CID_SELF) &&
-           IS_PF(p_hwfn->cdev))
-               qed_cxt_release_cid(p_hwfn, p_cid->cid);
+       bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_CID);
+
+       if (IS_PF(p_hwfn->cdev) && !b_legacy_vf)
+               _qed_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
 
        /* For PF's VFs we maintain the index inside queue-zone in IOV */
        if (p_cid->vfid == QED_QUEUE_CID_SELF)
                     struct qed_queue_cid_vf_params *p_vf_params)
 {
        struct qed_queue_cid *p_cid;
+       u8 vfid = QED_CXT_PF_CID;
        bool b_legacy_vf = false;
        u32 cid = 0;
 
-       /* Currently, PF doesn't need to allocate CIDs for any VF */
-       if (p_vf_params)
-               b_legacy_vf = true;
+       /* In case of legacy VFs, The CID can be derived from the additional
+        * VF parameters - the VF assumes queue X uses CID X, so we can simply
+        * use the vf_qid for this purpose as well.
+        */
+       if (p_vf_params) {
+               vfid = p_vf_params->vfid;
+
+               if (p_vf_params->vf_legacy & QED_QCID_LEGACY_VF_CID) {
+                       b_legacy_vf = true;
+                       cid = p_vf_params->vf_qid;
+               }
+       }
+
        /* Get a unique firmware CID for this queue, in case it's a PF.
         * VF's don't need a CID as the queue configuration will be done
         * by PF.
         */
        if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) {
-               if (qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &cid)) {
+               if (_qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+                                        &cid, vfid)) {
                        DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
                        return NULL;
                }
        p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
                                      p_params, b_is_rx, p_vf_params);
        if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf)
-               qed_cxt_release_cid(p_hwfn, cid);
+               _qed_cxt_release_cid(p_hwfn, cid, vfid);
 
        return p_cid;
 }
 
 
 static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
 {
-       u8 legacy = QED_QCID_LEGACY_VF_CID;
+       u8 legacy = 0;
 
        if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
            ETH_HSI_VER_NO_PKT_LEN_TUNN)
                legacy |= QED_QCID_LEGACY_VF_RX_PROD;
 
+       if (!(p_vf->acquire.vfdev_info.capabilities &
+             VFPF_ACQUIRE_CAP_QUEUE_QIDS))
+               legacy |= QED_QCID_LEGACY_VF_CID;
+
        return legacy;
 }
 
        p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
                                         p_req->num_vlan_filters);
 
+       p_resp->num_cids =
+           min_t(u8, p_req->num_cids,
+                 p_hwfn->pf_params.eth_pf_params.num_vf_cons);
+
        /* This isn't really needed/enforced, but some legacy VFs might depend
         * on the correct filling of this field.
         */
            p_resp->num_sbs < p_req->num_sbs ||
            p_resp->num_mac_filters < p_req->num_mac_filters ||
            p_resp->num_vlan_filters < p_req->num_vlan_filters ||
-           p_resp->num_mc_filters < p_req->num_mc_filters) {
+           p_resp->num_mc_filters < p_req->num_mc_filters ||
+           p_resp->num_cids < p_req->num_cids) {
                DP_VERBOSE(p_hwfn,
                           QED_MSG_IOV,
-                          "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n",
+                          "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
                           p_vf->abs_vf_id,
                           p_req->num_rxqs,
                           p_resp->num_rxqs,
                           p_resp->num_mac_filters,
                           p_req->num_vlan_filters,
                           p_resp->num_vlan_filters,
-                          p_req->num_mc_filters, p_resp->num_mc_filters);
+                          p_req->num_mc_filters,
+                          p_resp->num_mc_filters,
+                          p_req->num_cids, p_resp->num_cids);
 
                /* Some legacy OSes are incapable of correctly handling this
                 * failure.
        if (p_hwfn->cdev->num_hwfns > 1)
                pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
 
+       /* Share our ability to use multiple queue-ids only with VFs
+        * that request it.
+        */
+       if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
+               pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
+
        qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
 
        memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
 static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn,
                             struct qed_vf_info *p_vf, bool b_is_tx)
 {
-       if (b_is_tx)
-               return QED_IOV_LEGACY_QID_TX;
-       else
-               return QED_IOV_LEGACY_QID_RX;
+       struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
+       struct vfpf_qid_tlv *p_qid_tlv;
+
+       /* Search for the qid if the VF published its going to provide it */
+       if (!(p_vf->acquire.vfdev_info.capabilities &
+             VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
+               if (b_is_tx)
+                       return QED_IOV_LEGACY_QID_TX;
+               else
+                       return QED_IOV_LEGACY_QID_RX;
+       }
+
+       p_qid_tlv = (struct vfpf_qid_tlv *)
+                   qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
+                                            CHANNEL_TLV_QID);
+       if (!p_qid_tlv) {
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                          "VF[%2x]: Failed to provide qid\n",
+                          p_vf->relative_vf_id);
+
+               return QED_IOV_QID_INVALID;
+       }
+
+       if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                          "VF[%02x]: Provided qid out-of-bounds %02x\n",
+                          p_vf->relative_vf_id, p_qid_tlv->qid);
+               return QED_IOV_QID_INVALID;
+       }
+
+       return p_qid_tlv->qid;
 }
 
 static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
                goto out;
 
        qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
+       if (qid_usage_idx == QED_IOV_QID_INVALID)
+               goto out;
+
        p_queue = &vf->vf_queues[req->rx_qid];
+       if (p_queue->cids[qid_usage_idx].p_cid)
+               goto out;
 
        vf_legacy = qed_vf_calculate_legacy(vf);
 
        req = &mbx->req_virt->start_txq;
 
        if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid,
-                                 QED_IOV_VALIDATE_Q_DISABLE) ||
+                                 QED_IOV_VALIDATE_Q_NA) ||
            !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
                goto out;
 
        qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
+       if (qid_usage_idx == QED_IOV_QID_INVALID)
+               goto out;
+
        p_queue = &vf->vf_queues[req->tx_qid];
+       if (p_queue->cids[qid_usage_idx].p_cid)
+               goto out;
 
        vf_legacy = qed_vf_calculate_legacy(vf);
 
        struct qed_vf_queue *p_queue;
        int rc = 0;
 
-       if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id,
-                                 QED_IOV_VALIDATE_Q_ENABLE)) {
+       if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, QED_IOV_VALIDATE_Q_NA)) {
                DP_VERBOSE(p_hwfn,
                           QED_MSG_IOV,
-                          "VF[%d] Tried Closing Rx 0x%04x which is inactive\n",
-                          vf->relative_vf_id, rxq_id);
+                          "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
+                          vf->relative_vf_id, rxq_id, qid_usage_idx);
                return -EINVAL;
        }
 
        p_queue = &vf->vf_queues[rxq_id];
 
+       /* We've validated the index and the existence of the active RXQ -
+        * now we need to make sure that it's using the correct qid.
+        */
+       if (!p_queue->cids[qid_usage_idx].p_cid ||
+           p_queue->cids[qid_usage_idx].b_is_tx) {
+               struct qed_queue_cid *p_cid;
+
+               p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_IOV,
+                          "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
+                          vf->relative_vf_id,
+                          rxq_id, qid_usage_idx, rxq_id, p_cid->qid_usage_idx);
+               return -EINVAL;
+       }
+
+       /* Now that we know we have a valid Rx-queue - close it */
        rc = qed_eth_rx_queue_stop(p_hwfn,
                                   p_queue->cids[qid_usage_idx].p_cid,
                                   false, cqe_completion);
        struct qed_vf_queue *p_queue;
        int rc = 0;
 
-       if (!qed_iov_validate_txq(p_hwfn, vf, txq_id,
-                                 QED_IOV_VALIDATE_Q_ENABLE))
+       if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, QED_IOV_VALIDATE_Q_NA))
                return -EINVAL;
 
        p_queue = &vf->vf_queues[txq_id];
+       if (!p_queue->cids[qid_usage_idx].p_cid ||
+           !p_queue->cids[qid_usage_idx].b_is_tx)
+               return -EINVAL;
 
        rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid);
        if (rc)
 
        /* Find which qid-index is associated with the queue */
        qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
+       if (qid_usage_idx == QED_IOV_QID_INVALID)
+               goto out;
 
        rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
                                  qid_usage_idx, req->cqe_completion);
 
        /* Find which qid-index is associated with the queue */
        qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
+       if (qid_usage_idx == QED_IOV_QID_INVALID)
+               goto out;
 
        rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx);
        if (!rc)
        complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
 
        qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
+       if (qid_usage_idx == QED_IOV_QID_INVALID)
+               goto out;
 
-       /* Validate inputs */
-       for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++)
+       /* There shouldn't exist a VF that uses queue-qids yet uses this
+        * API with multiple Rx queues. Validate this.
+        */
+       if ((vf->acquire.vfdev_info.capabilities &
+            VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) {
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                          "VF[%d] supports QIDs but sends multiple queues\n",
+                          vf->relative_vf_id);
+               goto out;
+       }
+
+       /* Validate inputs - for the legacy case this is still true since
+        * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
+        */
+       for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
                if (!qed_iov_validate_rxq(p_hwfn, vf, i,
-                                         QED_IOV_VALIDATE_Q_ENABLE)) {
-                       DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
-                               vf->relative_vf_id, req->rx_qid, req->num_rxqs);
+                                         QED_IOV_VALIDATE_Q_NA) ||
+                   !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
+                   vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
+                       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                                  "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
+                                  vf->relative_vf_id, req->rx_qid,
+                                  req->num_rxqs);
                        goto out;
                }
+       }
 
        /* Prepare the handlers */
        for (i = 0; i < req->num_rxqs; i++) {
 
        return rc;
 }
 
+static void qed_vf_pf_add_qid(struct qed_hwfn *p_hwfn,
+                             struct qed_queue_cid *p_cid)
+{
+       struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct vfpf_qid_tlv *p_qid_tlv;
+
+       /* Only add QIDs for the queue if it was negotiated with PF */
+       if (!(p_iov->acquire_resp.pfdev_info.capabilities &
+             PFVF_ACQUIRE_CAP_QUEUE_QIDS))
+               return;
+
+       p_qid_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
+                               CHANNEL_TLV_QID, sizeof(*p_qid_tlv));
+       p_qid_tlv->qid = p_cid->qid_usage_idx;
+}
+
 #define VF_ACQUIRE_THRESH 3
 static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
                                          struct vf_pf_resc_request *p_req,
 {
        DP_VERBOSE(p_hwfn,
                   QED_MSG_IOV,
-                  "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n",
+                  "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n",
                   p_req->num_rxqs,
                   p_resp->num_rxqs,
                   p_req->num_rxqs,
                   p_resp->num_mac_filters,
                   p_req->num_vlan_filters,
                   p_resp->num_vlan_filters,
-                  p_req->num_mc_filters, p_resp->num_mc_filters);
+                  p_req->num_mc_filters,
+                  p_resp->num_mc_filters, p_req->num_cids, p_resp->num_cids);
 
        /* humble our request */
        p_req->num_txqs = p_resp->num_txqs;
        p_req->num_mac_filters = p_resp->num_mac_filters;
        p_req->num_vlan_filters = p_resp->num_vlan_filters;
        p_req->num_mc_filters = p_resp->num_mc_filters;
+       p_req->num_cids = p_resp->num_cids;
 }
 
 static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
        p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF;
        p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
        p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
+       p_resc->num_cids = QED_ETH_VF_DEFAULT_NUM_CIDS;
 
        req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
        req->vfdev_info.fw_major = FW_MAJOR_VERSION;
        if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI)
                p_iov->b_pre_fp_hsi = true;
 
+       /* In case PF doesn't support multi-queue Tx, update the number of
+        * CIDs to reflect the number of queues [older PFs didn't fill that
+        * field].
+        */
+       if (!(resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_QUEUE_QIDS))
+               resp->resc.num_cids = resp->resc.num_rxqs + resp->resc.num_txqs;
+
        /* Update bulletin board size with response from PF */
        p_iov->bulletin.size = resp->bulletin_size;
 
                __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
                                  (u32 *)(&init_prod_val));
        }
+
+       qed_vf_pf_add_qid(p_hwfn, p_cid);
+
        /* add list termination tlv */
        qed_add_tlv(p_hwfn, &p_iov->offset,
                    CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
        req->num_rxqs = 1;
        req->cqe_completion = cqe_completion;
 
+       qed_vf_pf_add_qid(p_hwfn, p_cid);
+
        /* add list termination tlv */
        qed_add_tlv(p_hwfn, &p_iov->offset,
                    CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
        req->hw_sb = p_cid->sb_igu_id;
        req->sb_index = p_cid->sb_idx;
 
+       qed_vf_pf_add_qid(p_hwfn, p_cid);
+
        /* add list termination tlv */
        qed_add_tlv(p_hwfn, &p_iov->offset,
                    CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
        req->tx_qid = p_cid->rel.queue_id;
        req->num_txqs = 1;
 
+       qed_vf_pf_add_qid(p_hwfn, p_cid);
+
        /* add list termination tlv */
        qed_add_tlv(p_hwfn, &p_iov->offset,
                    CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));