/* Skip the default queue, and update the outbound handler
         * queues if they changed.
         */
-       cqicb = (struct cqicb *)&qdev->rx_ring[1];
+       cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count];
        if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs ||
-           le16_to_cpu(cqicb->pkt_delay) != qdev->tx_max_coalesced_frames) {
-               for (i = 1; i < qdev->rss_ring_first_cq_id; i++, rx_ring++) {
+               le16_to_cpu(cqicb->pkt_delay) !=
+                               qdev->tx_max_coalesced_frames) {
+               for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
                        rx_ring = &qdev->rx_ring[i];
                        cqicb = (struct cqicb *)rx_ring;
                        cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
        }
 
        /* Update the inbound (RSS) handler queues if they changed. */
-       cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_first_cq_id];
+       cqicb = (struct cqicb *)&qdev->rx_ring[0];
        if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs ||
-           le16_to_cpu(cqicb->pkt_delay) != qdev->rx_max_coalesced_frames) {
-               for (i = qdev->rss_ring_first_cq_id;
-                    i <= qdev->rss_ring_first_cq_id + qdev->rss_ring_count;
-                    i++) {
+               le16_to_cpu(cqicb->pkt_delay) !=
+                                       qdev->rx_max_coalesced_frames) {
+               for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
                        rx_ring = &qdev->rx_ring[i];
                        cqicb = (struct cqicb *)rx_ring;
                        cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
 
                                cam_output = (CAM_OUT_ROUTE_NIC |
                                              (qdev->
                                               func << CAM_OUT_FUNC_SHIFT) |
-                                             (qdev->
-                                              rss_ring_first_cq_id <<
-                                              CAM_OUT_CQ_ID_SHIFT));
+                                               (0 << CAM_OUT_CQ_ID_SHIFT));
                                if (qdev->vlgrp)
                                        cam_output |= CAM_OUT_RV;
                                /* route to NIC core */
 
        qdev->stats.rx_packets++;
        qdev->stats.rx_bytes += skb->len;
-       skb_record_rx_queue(skb,
-               rx_ring->cq_id - qdev->rss_ring_first_cq_id);
+       skb_record_rx_queue(skb, rx_ring->cq_id);
        if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
                if (qdev->vlgrp &&
                        (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
                                ql_disable_completion_interrupt(qdev,
                                                                intr_context->
                                                                intr);
-                               if (i < qdev->rss_ring_first_cq_id)
+                               if (i >= qdev->rss_ring_count)
                                        queue_delayed_work_on(rx_ring->cpu,
                                                              qdev->q_workqueue,
                                                              &rx_ring->rx_work,
                            INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
                            i;
 
-                       if (i == 0) {
+                       if (i < qdev->rss_ring_count) {
                                /*
-                                * Default queue handles bcast/mcast plus
-                                * async events.  Needs buffers.
+                                * Inbound queues handle unicast frames only.
                                 */
-                               intr_context->handler = qlge_isr;
-                               sprintf(intr_context->name, "%s-default-queue",
-                                       qdev->ndev->name);
-                       } else if (i < qdev->rss_ring_first_cq_id) {
+                               intr_context->handler = qlge_msix_rx_isr;
+                               sprintf(intr_context->name, "%s-rx-%d",
+                                       qdev->ndev->name, i);
+                       } else {
                                /*
                                 * Outbound queue is for outbound completions only.
                                 */
                                intr_context->handler = qlge_msix_tx_isr;
                                sprintf(intr_context->name, "%s-tx-%d",
                                        qdev->ndev->name, i);
-                       } else {
-                               /*
-                                * Inbound queues handle unicast frames only.
-                                */
-                               intr_context->handler = qlge_msix_rx_isr;
-                               sprintf(intr_context->name, "%s-rx-%d",
-                                       qdev->ndev->name, i);
                        }
                }
        } else {
 
        memset((void *)ricb, 0, sizeof(*ricb));
 
-       ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K;
+       ricb->base_cq = RSS_L4K;
        ricb->flags =
            (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
             RSS_RT6);
        }
 
        /* Start NAPI for the RSS queues. */
-       for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) {
+       for (i = 0; i < qdev->rss_ring_count; i++) {
                QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
                        i);
                napi_enable(&qdev->rx_ring[i].napi);
                 * environment.  Outbound completion processing
                 * is done in interrupt context.
                 */
-               if (i >= qdev->rss_ring_first_cq_id) {
+               if (i <= qdev->rss_ring_count) {
                        napi_disable(&rx_ring->napi);
                } else {
                        cancel_delayed_work_sync(&rx_ring->rx_work);
 
        /* Call netif_napi_del() from common point.
         */
-       for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++)
+       for (i = 0; i < qdev->rss_ring_count; i++)
                netif_napi_del(&qdev->rx_ring[i].napi);
 
        ql_free_rx_buffers(qdev);
        qdev->tx_ring_count = cpu_cnt;
        /* Allocate inbound completion (RSS) ring for each CPU. */
        qdev->rss_ring_count = cpu_cnt;
-       /* cq_id for the first inbound ring handler. */
-       qdev->rss_ring_first_cq_id = cpu_cnt + 1;
        /*
         * qdev->rx_ring_count:
         * Total number of rx_rings.  This includes the one
         * handler rx_rings, and the number of inbound
         * completion handler rx_rings.
         */
-       qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
+       qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
 
        for (i = 0; i < qdev->tx_ring_count; i++) {
                tx_ring = &qdev->tx_ring[i];
                 * The completion queue ID for the tx rings start
                 * immediately after the default Q ID, which is zero.
                 */
-               tx_ring->cq_id = i + 1;
+               tx_ring->cq_id = i + qdev->rss_ring_count;
        }
 
        for (i = 0; i < qdev->rx_ring_count; i++) {
                rx_ring->qdev = qdev;
                rx_ring->cq_id = i;
                rx_ring->cpu = i % cpu_cnt;     /* CPU to run handler on. */
-               if (i == 0) {   /* Default queue at index 0. */
-                       /*
-                        * Default queue handles bcast/mcast plus
-                        * async events.  Needs buffers.
-                        */
+               if (i < qdev->rss_ring_count) {
+                       /* Inbound completions (RSS) queues */
                        rx_ring->cq_len = qdev->rx_ring_size;
                        rx_ring->cq_size =
                            rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
                        rx_ring->sbq_size =
                            rx_ring->sbq_len * sizeof(__le64);
                        rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
-                       rx_ring->type = DEFAULT_Q;
-               } else if (i < qdev->rss_ring_first_cq_id) {
+                       rx_ring->type = RX_Q;
+               } else {
                        /*
                         * Outbound queue handles outbound completions only.
                         */
                        rx_ring->sbq_size = 0;
                        rx_ring->sbq_buf_size = 0;
                        rx_ring->type = TX_Q;
-               } else {        /* Inbound completions (RSS) queues */
-                       /*
-                        * Inbound queues handle unicast frames only.
-                        */
-                       rx_ring->cq_len = qdev->rx_ring_size;
-                       rx_ring->cq_size =
-                           rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
-                       rx_ring->lbq_len = NUM_LARGE_BUFFERS;
-                       rx_ring->lbq_size =
-                           rx_ring->lbq_len * sizeof(__le64);
-                       rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
-                       rx_ring->sbq_len = NUM_SMALL_BUFFERS;
-                       rx_ring->sbq_size =
-                           rx_ring->sbq_len * sizeof(__le64);
-                       rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
-                       rx_ring->type = RX_Q;
                }
        }
        return 0;