destroy_workqueue(lio->rxq_status_wq.wq);
        }
  }
 -              struct call_single_data *csd = &droq->csd;
+ 
+ /* Runs in interrupt context. */
+ static void lio_update_txq_status(struct octeon_device *oct, int iq_num)
+ {
+       struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
+       struct net_device *netdev;
+       struct lio *lio;
+ 
+       netdev = oct->props[iq->ifidx].netdev;
+ 
+       /* This is needed because the first IQ does not have
+        * a netdev associated with it.
+        */
+       if (!netdev)
+               return;
+ 
+       lio = GET_LIO(netdev);
+       if (netif_is_multiqueue(netdev)) {
+               if (__netif_subqueue_stopped(netdev, iq->q_index) &&
+                   lio->linfo.link.s.link_up &&
+                   (!octnet_iq_is_full(oct, iq_num))) {
+                       netif_wake_subqueue(netdev, iq->q_index);
+                       INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
+                                                 tx_restart, 1);
+               }
+       } else if (netif_queue_stopped(netdev) &&
+                  lio->linfo.link.s.link_up &&
+                  (!octnet_iq_is_full(oct, lio->txq))) {
+               INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
+                                         tx_restart, 1);
+               netif_wake_queue(netdev);
+       }
+ }
+ 
+ /**
+  * \brief Setup output queue
+  * @param oct octeon device
+  * @param q_no which queue
+  * @param num_descs how many descriptors
+  * @param desc_size size of each descriptor
+  * @param app_ctx application context
+  */
+ static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
+                            int desc_size, void *app_ctx)
+ {
+       int ret_val;
+ 
+       dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
+       /* droq creation and local register settings. */
+       ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
+       if (ret_val < 0)
+               return ret_val;
+ 
+       if (ret_val == 1) {
+               dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
+               return 0;
+       }
+ 
+       /* Enable the droq queues */
+       octeon_set_droq_pkt_op(oct, q_no, 1);
+ 
+       /* Send Credit for Octeon Output queues. Credits are always
+        * sent after the output queue is enabled.
+        */
+       writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg);
+ 
+       return ret_val;
+ }
+ 
+ /** Routine to push packets arriving on Octeon interface upto network layer.
+  * @param oct_id   - octeon device id.
+  * @param skbuff   - skbuff struct to be passed to network layer.
+  * @param len      - size of total data received.
+  * @param rh       - Control header associated with the packet
+  * @param param    - additional control data with the packet
+  * @param arg      - farg registered in droq_ops
+  */
+ static void
+ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
+                    void *skbuff,
+                    u32 len,
+                    union octeon_rh *rh,
+                    void *param,
+                    void *arg)
+ {
+       struct net_device *netdev = (struct net_device *)arg;
+       struct octeon_droq *droq =
+           container_of(param, struct octeon_droq, napi);
+       struct sk_buff *skb = (struct sk_buff *)skbuff;
+       struct skb_shared_hwtstamps *shhwtstamps;
+       struct napi_struct *napi = param;
+       u16 vtag = 0;
+       u32 r_dh_off;
+       u64 ns;
+ 
+       if (netdev) {
+               struct lio *lio = GET_LIO(netdev);
+               struct octeon_device *oct = lio->oct_dev;
+               int packet_was_received;
+ 
+               /* Do not proceed if the interface is not in RUNNING state. */
+               if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
+                       recv_buffer_free(skb);
+                       droq->stats.rx_dropped++;
+                       return;
+               }
+ 
+               skb->dev = netdev;
+ 
+               skb_record_rx_queue(skb, droq->q_no);
+               if (likely(len > MIN_SKB_SIZE)) {
+                       struct octeon_skb_page_info *pg_info;
+                       unsigned char *va;
+ 
+                       pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+                       if (pg_info->page) {
+                               /* For Paged allocation use the frags */
+                               va = page_address(pg_info->page) +
+                                       pg_info->page_offset;
+                               memcpy(skb->data, va, MIN_SKB_SIZE);
+                               skb_put(skb, MIN_SKB_SIZE);
+                               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+                                               pg_info->page,
+                                               pg_info->page_offset +
+                                               MIN_SKB_SIZE,
+                                               len - MIN_SKB_SIZE,
+                                               LIO_RXBUFFER_SZ);
+                       }
+               } else {
+                       struct octeon_skb_page_info *pg_info =
+                               ((struct octeon_skb_page_info *)(skb->cb));
+                       skb_copy_to_linear_data(skb, page_address(pg_info->page)
+                                               + pg_info->page_offset, len);
+                       skb_put(skb, len);
+                       put_page(pg_info->page);
+               }
+ 
+               r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
+ 
+               if (oct->ptp_enable) {
+                       if (rh->r_dh.has_hwtstamp) {
+                               /* timestamp is included from the hardware at
+                                * the beginning of the packet.
+                                */
+                               if (ifstate_check
+                                       (lio,
+                                        LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
+                                       /* Nanoseconds are in the first 64-bits
+                                        * of the packet.
+                                        */
+                                       memcpy(&ns, (skb->data + r_dh_off),
+                                              sizeof(ns));
+                                       r_dh_off -= BYTES_PER_DHLEN_UNIT;
+                                       shhwtstamps = skb_hwtstamps(skb);
+                                       shhwtstamps->hwtstamp =
+                                               ns_to_ktime(ns +
+                                                           lio->ptp_adjust);
+                               }
+                       }
+               }
+ 
+               if (rh->r_dh.has_hash) {
+                       __be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
+                       u32 hash = be32_to_cpu(*hash_be);
+ 
+                       skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
+                       r_dh_off -= BYTES_PER_DHLEN_UNIT;
+               }
+ 
+               skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
+               skb->protocol = eth_type_trans(skb, skb->dev);
+ 
+               if ((netdev->features & NETIF_F_RXCSUM) &&
+                   (((rh->r_dh.encap_on) &&
+                     (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
+                    (!(rh->r_dh.encap_on) &&
+                     (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
+                       /* checksum has already been verified */
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+               else
+                       skb->ip_summed = CHECKSUM_NONE;
+ 
+               /* Setting Encapsulation field on basis of status received
+                * from the firmware
+                */
+               if (rh->r_dh.encap_on) {
+                       skb->encapsulation = 1;
+                       skb->csum_level = 1;
+                       droq->stats.rx_vxlan++;
+               }
+ 
+               /* inbound VLAN tag */
+               if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+                   rh->r_dh.vlan) {
+                       u16 priority = rh->r_dh.priority;
+                       u16 vid = rh->r_dh.vlan;
+ 
+                       vtag = (priority << VLAN_PRIO_SHIFT) | vid;
+                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
+               }
+ 
+               packet_was_received = (napi_gro_receive(napi, skb) != GRO_DROP);
+ 
+               if (packet_was_received) {
+                       droq->stats.rx_bytes_received += len;
+                       droq->stats.rx_pkts_received++;
+               } else {
+                       droq->stats.rx_dropped++;
+                       netif_info(lio, rx_err, lio->netdev,
+                                  "droq:%d  error rx_dropped:%llu\n",
+                                  droq->q_no, droq->stats.rx_dropped);
+               }
+ 
+       } else {
+               recv_buffer_free(skb);
+       }
+ }
+ 
+ /**
+  * \brief wrapper for calling napi_schedule
+  * @param param parameters to pass to napi_schedule
+  *
+  * Used when scheduling on different CPUs
+  */
+ static void napi_schedule_wrapper(void *param)
+ {
+       struct napi_struct *napi = param;
+ 
+       napi_schedule(napi);
+ }
+ 
+ /**
+  * \brief callback when receive interrupt occurs and we are in NAPI mode
+  * @param arg pointer to octeon output queue
+  */
+ static void liquidio_napi_drv_callback(void *arg)
+ {
+       struct octeon_device *oct;
+       struct octeon_droq *droq = arg;
+       int this_cpu = smp_processor_id();
+ 
+       oct = droq->oct_dev;
+ 
+       if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct) ||
+           droq->cpu_id == this_cpu) {
+               napi_schedule_irqoff(&droq->napi);
+       } else {
++              call_single_data_t *csd = &droq->csd;
+ 
+               csd->func = napi_schedule_wrapper;
+               csd->info = &droq->napi;
+               csd->flags = 0;
+ 
+               smp_call_function_single_async(droq->cpu_id, csd);
+       }
+ }
+ 
+ /**
+  * \brief Entry point for NAPI polling
+  * @param napi NAPI structure
+  * @param budget maximum number of items to process
+  */
+ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
+ {
+       struct octeon_instr_queue *iq;
+       struct octeon_device *oct;
+       struct octeon_droq *droq;
+       int tx_done = 0, iq_no;
+       int work_done;
+ 
+       droq = container_of(napi, struct octeon_droq, napi);
+       oct = droq->oct_dev;
+       iq_no = droq->q_no;
+ 
+       /* Handle Droq descriptors */
+       work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
+                                                POLL_EVENT_PROCESS_PKTS,
+                                                budget);
+ 
+       /* Flush the instruction queue */
+       iq = oct->instr_queue[iq_no];
+       if (iq) {
+               /* TODO: move this check to inside octeon_flush_iq,
+                * once check_db_timeout is removed
+                */
+               if (atomic_read(&iq->instr_pending))
+                       /* Process iq buffers with in the budget limits */
+                       tx_done = octeon_flush_iq(oct, iq, budget);
+               else
+                       tx_done = 1;
+               /* Update iq read-index rather than waiting for next interrupt.
+                * Return back if tx_done is false.
+                */
+               /* sub-queue status update */
+               lio_update_txq_status(oct, iq_no);
+       } else {
+               dev_err(&oct->pci_dev->dev, "%s:  iq (%d) num invalid\n",
+                       __func__, iq_no);
+       }
+ 
+ #define MAX_REG_CNT  2000000U
+       /* force enable interrupt if reg cnts are high to avoid wraparound */
+       if ((work_done < budget && tx_done) ||
+           (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
+           (droq->pkt_count >= MAX_REG_CNT)) {
+               tx_done = 1;
+               napi_complete_done(napi, work_done);
+ 
+               octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
+                                            POLL_EVENT_ENABLE_INTR, 0);
+               return 0;
+       }
+ 
+       return (!tx_done) ? (budget) : (work_done);
+ }
+ 
+ /**
+  * \brief Setup input and output queues
+  * @param octeon_dev octeon device
+  * @param ifidx Interface index
+  *
+  * Note: Queues are with respect to the octeon device. Thus
+  * an input queue is for egress packets, and output queues
+  * are for ingress packets.
+  */
+ int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
+                            u32 num_iqs, u32 num_oqs)
+ {
+       struct octeon_droq_ops droq_ops;
+       struct net_device *netdev;
+       struct octeon_droq *droq;
+       struct napi_struct *napi;
+       int cpu_id_modulus;
+       int num_tx_descs;
+       struct lio *lio;
+       int retval = 0;
+       int q, q_no;
+       int cpu_id;
+ 
+       netdev = octeon_dev->props[ifidx].netdev;
+ 
+       lio = GET_LIO(netdev);
+ 
+       memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
+ 
+       droq_ops.fptr = liquidio_push_packet;
+       droq_ops.farg = netdev;
+ 
+       droq_ops.poll_mode = 1;
+       droq_ops.napi_fn = liquidio_napi_drv_callback;
+       cpu_id = 0;
+       cpu_id_modulus = num_present_cpus();
+ 
+       /* set up DROQs. */
+       for (q = 0; q < num_oqs; q++) {
+               q_no = lio->linfo.rxpciq[q].s.q_no;
+               dev_dbg(&octeon_dev->pci_dev->dev,
+                       "%s index:%d linfo.rxpciq.s.q_no:%d\n",
+                       __func__, q, q_no);
+               retval = octeon_setup_droq(
+                   octeon_dev, q_no,
+                   CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev),
+                                               lio->ifidx),
+                   CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev),
+                                                  lio->ifidx),
+                   NULL);
+               if (retval) {
+                       dev_err(&octeon_dev->pci_dev->dev,
+                               "%s : Runtime DROQ(RxQ) creation failed.\n",
+                               __func__);
+                       return 1;
+               }
+ 
+               droq = octeon_dev->droq[q_no];
+               napi = &droq->napi;
+               dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n",
+                       (u64)netdev, (u64)octeon_dev);
+               netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
+ 
+               /* designate a CPU for this droq */
+               droq->cpu_id = cpu_id;
+               cpu_id++;
+               if (cpu_id >= cpu_id_modulus)
+                       cpu_id = 0;
+ 
+               octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
+       }
+ 
+       if (OCTEON_CN23XX_PF(octeon_dev) || OCTEON_CN23XX_VF(octeon_dev)) {
+               /* 23XX PF/VF can send/recv control messages (via the first
+                * PF/VF-owned droq) from the firmware even if the ethX
+                * interface is down, so that's why poll_mode must be off
+                * for the first droq.
+                */
+               octeon_dev->droq[0]->ops.poll_mode = 0;
+       }
+ 
+       /* set up IQs. */
+       for (q = 0; q < num_iqs; q++) {
+               num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
+                   octeon_get_conf(octeon_dev), lio->ifidx);
+               retval = octeon_setup_iq(octeon_dev, ifidx, q,
+                                        lio->linfo.txpciq[q], num_tx_descs,
+                                        netdev_get_tx_queue(netdev, q));
+               if (retval) {
+                       dev_err(&octeon_dev->pci_dev->dev,
+                               " %s : Runtime IQ(TxQ) creation failed.\n",
+                               __func__);
+                       return 1;
+               }
+ 
+               /* XPS */
+               if (!OCTEON_CN23XX_VF(octeon_dev) && octeon_dev->msix_on &&
+                   octeon_dev->ioq_vector) {
+                       struct octeon_ioq_vector    *ioq_vector;
+ 
+                       ioq_vector = &octeon_dev->ioq_vector[q];
+                       netif_set_xps_queue(netdev,
+                                           &ioq_vector->affinity_mask,
+                                           ioq_vector->iq_index);
+               }
+       }
+ 
+       return 0;
+ }
+ 
+ static
+ int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
+ {
+       struct octeon_device *oct = droq->oct_dev;
+       struct octeon_device_priv *oct_priv =
+           (struct octeon_device_priv *)oct->priv;
+ 
+       if (droq->ops.poll_mode) {
+               droq->ops.napi_fn(droq);
+       } else {
+               if (ret & MSIX_PO_INT) {
+                       if (OCTEON_CN23XX_VF(oct))
+                               dev_err(&oct->pci_dev->dev,
+                                       "should not come here should not get rx when poll mode = 0 for vf\n");
+                       tasklet_schedule(&oct_priv->droq_tasklet);
+                       return 1;
+               }
+               /* this will be flushed periodically by check iq db */
+               if (ret & MSIX_PI_INT)
+                       return 0;
+       }
+ 
+       return 0;
+ }
+ 
+ irqreturn_t
+ liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
+ {
+       struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
+       struct octeon_device *oct = ioq_vector->oct_dev;
+       struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
+       u64 ret;
+ 
+       ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
+ 
+       if (ret & MSIX_PO_INT || ret & MSIX_PI_INT)
+               liquidio_schedule_msix_droq_pkt_handler(droq, ret);
+ 
+       return IRQ_HANDLED;
+ }
+ 
+ /**
+  * \brief Droq packet processor sceduler
+  * @param oct octeon device
+  */
+ static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
+ {
+       struct octeon_device_priv *oct_priv =
+               (struct octeon_device_priv *)oct->priv;
+       struct octeon_droq *droq;
+       u64 oq_no;
+ 
+       if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
+               for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
+                    oq_no++) {
+                       if (!(oct->droq_intr & BIT_ULL(oq_no)))
+                               continue;
+ 
+                       droq = oct->droq[oq_no];
+ 
+                       if (droq->ops.poll_mode) {
+                               droq->ops.napi_fn(droq);
+                               oct_priv->napi_mask |= (1 << oq_no);
+                       } else {
+                               tasklet_schedule(&oct_priv->droq_tasklet);
+                       }
+               }
+       }
+ }
+ 
+ /**
+  * \brief Interrupt handler for octeon
+  * @param irq unused
+  * @param dev octeon device
+  */
+ static
+ irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
+                                        void *dev)
+ {
+       struct octeon_device *oct = (struct octeon_device *)dev;
+       irqreturn_t ret;
+ 
+       /* Disable our interrupts for the duration of ISR */
+       oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
+ 
+       ret = oct->fn_list.process_interrupt_regs(oct);
+ 
+       if (ret == IRQ_HANDLED)
+               liquidio_schedule_droq_pkt_handlers(oct);
+ 
+       /* Re-enable our interrupts  */
+       if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
+               oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
+ 
+       return ret;
+ }
+ 
+ /**
+  * \brief Setup interrupt for octeon device
+  * @param oct octeon device
+  *
+  *  Enable interrupt in Octeon device as given in the PCI interrupt mask.
+  */
+ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
+ {
+       struct msix_entry *msix_entries;
+       char *queue_irq_names = NULL;
+       int i, num_interrupts = 0;
+       int num_alloc_ioq_vectors;
+       char *aux_irq_name = NULL;
+       int num_ioq_vectors;
+       int irqret, err;
+ 
+       oct->num_msix_irqs = num_ioqs;
+       if (oct->msix_on) {
+               if (OCTEON_CN23XX_PF(oct)) {
+                       num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;
+ 
+                       /* one non ioq interrupt for handling
+                        * sli_mac_pf_int_sum
+                        */
+                       oct->num_msix_irqs += 1;
+               } else if (OCTEON_CN23XX_VF(oct)) {
+                       num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF;
+               }
+ 
+               /* allocate storage for the names assigned to each irq */
+               oct->irq_name_storage =
+                       kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL);
+               if (!oct->irq_name_storage) {
+                       dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
+                       return -ENOMEM;
+               }
+ 
+               queue_irq_names = oct->irq_name_storage;
+ 
+               if (OCTEON_CN23XX_PF(oct))
+                       aux_irq_name = &queue_irq_names
+                               [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
+ 
+               oct->msix_entries = kcalloc(oct->num_msix_irqs,
+                                           sizeof(struct msix_entry),
+                                           GFP_KERNEL);
+               if (!oct->msix_entries) {
+                       dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
+                       kfree(oct->irq_name_storage);
+                       oct->irq_name_storage = NULL;
+                       return -ENOMEM;
+               }
+ 
+               msix_entries = (struct msix_entry *)oct->msix_entries;
+ 
+               /*Assumption is that pf msix vectors start from pf srn to pf to
+                * trs and not from 0. if not change this code
+                */
+               if (OCTEON_CN23XX_PF(oct)) {
+                       for (i = 0; i < oct->num_msix_irqs - 1; i++)
+                               msix_entries[i].entry =
+                                       oct->sriov_info.pf_srn + i;
+ 
+                       msix_entries[oct->num_msix_irqs - 1].entry =
+                               oct->sriov_info.trs;
+               } else if (OCTEON_CN23XX_VF(oct)) {
+                       for (i = 0; i < oct->num_msix_irqs; i++)
+                               msix_entries[i].entry = i;
+               }
+               num_alloc_ioq_vectors = pci_enable_msix_range(
+                                               oct->pci_dev, msix_entries,
+                                               oct->num_msix_irqs,
+                                               oct->num_msix_irqs);
+               if (num_alloc_ioq_vectors < 0) {
+                       dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
+                       kfree(oct->msix_entries);
+                       oct->msix_entries = NULL;
+                       kfree(oct->irq_name_storage);
+                       oct->irq_name_storage = NULL;
+                       return num_alloc_ioq_vectors;
+               }
+ 
+               dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
+ 
+               num_ioq_vectors = oct->num_msix_irqs;
+               /** For PF, there is one non-ioq interrupt handler */
+               if (OCTEON_CN23XX_PF(oct)) {
+                       num_ioq_vectors -= 1;
+ 
+                       snprintf(aux_irq_name, INTRNAMSIZ,
+                                "LiquidIO%u-pf%u-aux", oct->octeon_id,
+                                oct->pf_num);
+                       irqret = request_irq(
+                                       msix_entries[num_ioq_vectors].vector,
+                                       liquidio_legacy_intr_handler, 0,
+                                       aux_irq_name, oct);
+                       if (irqret) {
+                               dev_err(&oct->pci_dev->dev,
+                                       "Request_irq failed for MSIX interrupt Error: %d\n",
+                                       irqret);
+                               pci_disable_msix(oct->pci_dev);
+                               kfree(oct->msix_entries);
+                               kfree(oct->irq_name_storage);
+                               oct->irq_name_storage = NULL;
+                               oct->msix_entries = NULL;
+                               return irqret;
+                       }
+               }
+               for (i = 0 ; i < num_ioq_vectors ; i++) {
+                       if (OCTEON_CN23XX_PF(oct))
+                               snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
+                                        INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u",
+                                        oct->octeon_id, oct->pf_num, i);
+ 
+                       if (OCTEON_CN23XX_VF(oct))
+                               snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
+                                        INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u",
+                                        oct->octeon_id, oct->vf_num, i);
+ 
+                       irqret = request_irq(msix_entries[i].vector,
+                                            liquidio_msix_intr_handler, 0,
+                                            &queue_irq_names[IRQ_NAME_OFF(i)],
+                                            &oct->ioq_vector[i]);
+ 
+                       if (irqret) {
+                               dev_err(&oct->pci_dev->dev,
+                                       "Request_irq failed for MSIX interrupt Error: %d\n",
+                                       irqret);
+                               /** Freeing the non-ioq irq vector here . */
+                               free_irq(msix_entries[num_ioq_vectors].vector,
+                                        oct);
+ 
+                               while (i) {
+                                       i--;
+                                       /** clearing affinity mask. */
+                                       irq_set_affinity_hint(
+                                                     msix_entries[i].vector,
+                                                     NULL);
+                                       free_irq(msix_entries[i].vector,
+                                                &oct->ioq_vector[i]);
+                               }
+                               pci_disable_msix(oct->pci_dev);
+                               kfree(oct->msix_entries);
+                               kfree(oct->irq_name_storage);
+                               oct->irq_name_storage = NULL;
+                               oct->msix_entries = NULL;
+                               return irqret;
+                       }
+                       oct->ioq_vector[i].vector = msix_entries[i].vector;
+                       /* assign the cpu mask for this msix interrupt vector */
+                       irq_set_affinity_hint(msix_entries[i].vector,
+                                             &oct->ioq_vector[i].affinity_mask
+                                             );
+               }
+               dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
+                       oct->octeon_id);
+       } else {
+               err = pci_enable_msi(oct->pci_dev);
+               if (err)
+                       dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
+                                err);
+               else
+                       oct->flags |= LIO_FLAG_MSI_ENABLED;
+ 
+               /* allocate storage for the names assigned to the irq */
+               oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
+               if (!oct->irq_name_storage)
+                       return -ENOMEM;
+ 
+               queue_irq_names = oct->irq_name_storage;
+ 
+               if (OCTEON_CN23XX_PF(oct))
+                       snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
+                                "LiquidIO%u-pf%u-rxtx-%u",
+                                oct->octeon_id, oct->pf_num, 0);
+ 
+               if (OCTEON_CN23XX_VF(oct))
+                       snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
+                                "LiquidIO%u-vf%u-rxtx-%u",
+                                oct->octeon_id, oct->vf_num, 0);
+ 
+               irqret = request_irq(oct->pci_dev->irq,
+                                    liquidio_legacy_intr_handler,
+                                    IRQF_SHARED,
+                                    &queue_irq_names[IRQ_NAME_OFF(0)], oct);
+               if (irqret) {
+                       if (oct->flags & LIO_FLAG_MSI_ENABLED)
+                               pci_disable_msi(oct->pci_dev);
+                       dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
+                               irqret);
+                       kfree(oct->irq_name_storage);
+                       oct->irq_name_storage = NULL;
+                       return irqret;
+               }
+       }
+       return 0;
+ }