return IRQ_HANDLED;
 }
+
+/**
+ * \brief Droq packet processor sceduler
+ * @param oct octeon device
+ */
+static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
+{
+       struct octeon_device_priv *oct_priv =
+               (struct octeon_device_priv *)oct->priv;
+       struct octeon_droq *droq;
+       u64 oq_no;
+
+       if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
+               for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
+                    oq_no++) {
+                       if (!(oct->droq_intr & BIT_ULL(oq_no)))
+                               continue;
+
+                       droq = oct->droq[oq_no];
+
+                       if (droq->ops.poll_mode) {
+                               droq->ops.napi_fn(droq);
+                               oct_priv->napi_mask |= (1 << oq_no);
+                       } else {
+                               tasklet_schedule(&oct_priv->droq_tasklet);
+                       }
+               }
+       }
+}
+
+/**
+ * \brief Interrupt handler for octeon
+ * @param irq unused
+ * @param dev octeon device
+ */
+irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
+                                        void *dev)
+{
+       struct octeon_device *oct = (struct octeon_device *)dev;
+       irqreturn_t ret;
+
+       /* Disable our interrupts for the duration of ISR */
+       oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
+
+       ret = oct->fn_list.process_interrupt_regs(oct);
+
+       if (ret == IRQ_HANDLED)
+               liquidio_schedule_droq_pkt_handlers(oct);
+
+       /* Re-enable our interrupts  */
+       if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
+               oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
+
+       return ret;
+}
 
        }
 }
 
-/**
- * \brief Droq packet processor sceduler
- * @param oct octeon device
- */
-static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
-{
-       struct octeon_device_priv *oct_priv =
-               (struct octeon_device_priv *)oct->priv;
-       u64 oq_no;
-       struct octeon_droq *droq;
-
-       if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
-               for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
-                    oq_no++) {
-                       if (!(oct->droq_intr & BIT_ULL(oq_no)))
-                               continue;
-
-                       droq = oct->droq[oq_no];
-
-                       if (droq->ops.poll_mode) {
-                               droq->ops.napi_fn(droq);
-                               oct_priv->napi_mask |= (1 << oq_no);
-                       } else {
-                               tasklet_schedule(&oct_priv->droq_tasklet);
-                       }
-               }
-       }
-}
-
-/**
- * \brief Interrupt handler for octeon
- * @param irq unused
- * @param dev octeon device
- */
-static
-irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
-                                        void *dev)
-{
-       struct octeon_device *oct = (struct octeon_device *)dev;
-       irqreturn_t ret;
-
-       /* Disable our interrupts for the duration of ISR */
-       oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
-
-       ret = oct->fn_list.process_interrupt_regs(oct);
-
-       if (ret == IRQ_HANDLED)
-               liquidio_schedule_droq_pkt_handlers(oct);
-
-       /* Re-enable our interrupts  */
-       if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
-               oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
-
-       return ret;
-}
-
 /**
  * \brief Setup interrupt for octeon device
  * @param oct octeon device
 
 irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)),
                                       void *dev);
 
+irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
+                                        void *dev);
 /**
  * \brief Register ethtool operations
  * @param netdev    pointer to network device