static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
 {
-       unsigned long flags = 0;
        struct sk_buff **skb_ptr = NULL;
        struct sk_buff **temp;
 #define NR_SKB_COMPLETED 128
                more = 0;
                skb_ptr = completed;
 
-               if (spin_trylock_irqsave(&fifo->tx_lock, flags)) {
+               if (__netif_tx_trylock(fifo->txq)) {
                        vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr,
                                                NR_SKB_COMPLETED, &more);
-                       spin_unlock_irqrestore(&fifo->tx_lock, flags);
+                       __netif_tx_unlock(fifo->txq);
                }
+
                /* free SKBs */
                for (temp = completed; temp != skb_ptr; temp++)
                        dev_kfree_skb_irq(*temp);
-       } while (more) ;
+       } while (more);
 }
 
 static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
        }
 }
 
-/*
- * MultiQ manipulation helper functions
- */
-static inline int vxge_netif_queue_stopped(struct vxge_fifo *fifo)
-{
-       struct net_device *dev = fifo->ndev;
-       struct netdev_queue *txq = NULL;
-       int vpath_no = fifo->driver_id;
-       int ret = 0;
-
-       if (fifo->tx_steering_type)
-               txq = netdev_get_tx_queue(dev, vpath_no);
-       else
-               txq = netdev_get_tx_queue(dev, 0);
-
-       ret = netif_tx_queue_stopped(txq);
-       return ret;
-}
-
-void vxge_stop_tx_queue(struct vxge_fifo *fifo)
-{
-       struct net_device *dev = fifo->ndev;
-       struct netdev_queue *txq = NULL;
-
-       if (fifo->tx_steering_type)
-               txq = netdev_get_tx_queue(dev, fifo->driver_id);
-       else
-               txq = netdev_get_tx_queue(dev, 0);
-
-       netif_tx_stop_queue(txq);
-}
-
-void vxge_wake_tx_queue(struct vxge_fifo *fifo)
-{
-       struct net_device *dev = fifo->ndev;
-       struct netdev_queue *txq = NULL;
-       int vpath_no = fifo->driver_id;
-
-       if (fifo->tx_steering_type)
-               txq = netdev_get_tx_queue(dev, vpath_no);
-       else
-               txq = netdev_get_tx_queue(dev, 0);
-
-       if (netif_tx_queue_stopped(txq))
-               netif_tx_wake_queue(txq);
-}
-
 /*
  * vxge_callback_link_up
  *
                                &dtr, &t_code) == VXGE_HW_OK);
 
        *skb_ptr = done_skb;
-       vxge_wake_tx_queue(fifo);
+       if (netif_tx_queue_stopped(fifo->txq))
+               netif_tx_wake_queue(fifo->txq);
 
        vxge_debug_entryexit(VXGE_TRACE,
                                "%s: %s:%d  Exiting...",
 }
 
 /* select a vpath to transmit the packet */
-static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb,
-       int *do_lock)
+static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb)
 {
        u16 queue_len, counter = 0;
        if (skb->protocol == htons(ETH_P_IP)) {
                                vdev->vpath_selector[queue_len - 1];
                        if (counter >= queue_len)
                                counter = queue_len - 1;
-
-                       if (ip->protocol == IPPROTO_UDP) {
-#ifdef NETIF_F_LLTX
-                               *do_lock = 0;
-#endif
-                       }
                }
        }
        return counter;
  *
  * This function is the Tx entry point of the driver. Neterion NIC supports
  * certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
- * NOTE: when device cant queue the pkt, just the trans_start variable will
- * not be upadted.
 */
 static netdev_tx_t
 vxge_xmit(struct sk_buff *skb, struct net_device *dev)
        struct vxge_tx_priv *txdl_priv = NULL;
        struct __vxge_hw_fifo *fifo_hw;
        int offload_type;
-       unsigned long flags = 0;
        int vpath_no = 0;
-       int do_spin_tx_lock = 1;
 
        vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
                        dev->name, __func__, __LINE__);
        if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
                vpath_no = skb_get_queue_mapping(skb);
        else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
-               vpath_no = vxge_get_vpath_no(vdev, skb, &do_spin_tx_lock);
+               vpath_no = vxge_get_vpath_no(vdev, skb);
 
        vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
 
        fifo = &vdev->vpaths[vpath_no].fifo;
        fifo_hw = fifo->handle;
 
-       if (do_spin_tx_lock)
-               spin_lock_irqsave(&fifo->tx_lock, flags);
-       else {
-               if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
-                       return NETDEV_TX_LOCKED;
-       }
-
-       if (vxge_netif_queue_stopped(fifo)) {
-               spin_unlock_irqrestore(&fifo->tx_lock, flags);
+       if (netif_tx_queue_stopped(fifo->txq))
                return NETDEV_TX_BUSY;
-       }
 
        avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
        if (avail == 0) {
                vxge_debug_tx(VXGE_ERR,
                        "%s: No free TXDs available", dev->name);
                fifo->stats.txd_not_free++;
-               vxge_stop_tx_queue(fifo);
-               goto _exit2;
+               goto _exit0;
        }
 
        /* Last TXD?  Stop tx queue to avoid dropping packets.  TX
         * completion will resume the queue.
         */
        if (avail == 1)
-               vxge_stop_tx_queue(fifo);
+               netif_tx_stop_queue(fifo->txq);
 
        status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
        if (unlikely(status != VXGE_HW_OK)) {
                vxge_debug_tx(VXGE_ERR,
                   "%s: Out of descriptors .", dev->name);
                fifo->stats.txd_out_of_desc++;
-               vxge_stop_tx_queue(fifo);
-               goto _exit2;
+               goto _exit0;
        }
 
        vxge_debug_tx(VXGE_TRACE,
 
        if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) {
                vxge_hw_fifo_txdl_free(fifo_hw, dtr);
-               vxge_stop_tx_queue(fifo);
                fifo->stats.pci_map_fail++;
-               goto _exit2;
+               goto _exit0;
        }
 
        txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
                if (!frag->size)
                        continue;
 
-               dma_pointer =
-                       (u64)pci_map_page(fifo->pdev, frag->page,
+               dma_pointer = (u64) pci_map_page(fifo->pdev, frag->page,
                                frag->page_offset, frag->size,
                                PCI_DMA_TODEVICE);
 
                if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer)))
-                       goto _exit0;
+                       goto _exit2;
                vxge_debug_tx(VXGE_TRACE,
                        "%s: %s:%d frag = %d dma_pointer = 0x%llx",
                                dev->name, __func__, __LINE__, i,
        offload_type = vxge_offload_type(skb);
 
        if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
-
                int mss = vxge_tcp_mss(skb);
                if (mss) {
-                       vxge_debug_tx(VXGE_TRACE,
-                               "%s: %s:%d mss = %d",
+                       vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d",
                                dev->name, __func__, __LINE__, mss);
                        vxge_hw_fifo_txdl_mss_set(dtr, mss);
                } else {
                                        VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
 
        vxge_hw_fifo_txdl_post(fifo_hw, dtr);
-#ifdef NETIF_F_LLTX
-       dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
-#endif
-       spin_unlock_irqrestore(&fifo->tx_lock, flags);
 
-       VXGE_COMPLETE_VPATH_TX(fifo);
        vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d  Exiting...",
                dev->name, __func__, __LINE__);
        return NETDEV_TX_OK;
 
-_exit0:
+_exit2:
        vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
-
 _exit1:
        j = 0;
        frag = &skb_shinfo(skb)->frags[0];
        }
 
        vxge_hw_fifo_txdl_free(fifo_hw, dtr);
-_exit2:
+_exit0:
+       netif_tx_stop_queue(fifo->txq);
        dev_kfree_skb(skb);
-       spin_unlock_irqrestore(&fifo->tx_lock, flags);
-       VXGE_COMPLETE_VPATH_TX(fifo);
 
        return NETDEV_TX_OK;
 }
        clear_bit(vp_id, &vdev->vp_reset);
 
        /* Start the vpath queue */
-       vxge_wake_tx_queue(&vpath->fifo);
+       if (netif_tx_queue_stopped(vpath->fifo.txq))
+               netif_tx_wake_queue(vpath->fifo.txq);
 
        return ret;
 }
                                vdev->config.tx_steering_type;
                        vpath->fifo.ndev = vdev->ndev;
                        vpath->fifo.pdev = vdev->pdev;
+                       if (vdev->config.tx_steering_type)
+                               vpath->fifo.txq =
+                                       netdev_get_tx_queue(vdev->ndev, i);
+                       else
+                               vpath->fifo.txq =
+                                       netdev_get_tx_queue(vdev->ndev, 0);
                        vpath->fifo.indicate_max_pkts =
                                vdev->config.fifo_indicate_max_pkts;
                        vpath->ring.rx_vector_no = 0;
                                        vxge_vpath_intr_disable(vdev, i);
 
                                        /* stop the queue for this vpath */
-                                       vxge_stop_tx_queue(&vpath->fifo);
+                                       netif_tx_stop_queue(vpath->fifo.txq);
                                        continue;
                                }
                        }
                goto out1;
        }
 
-
        if (vdev->config.intr_type != MSI_X) {
                netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
                        vdev->config.napi_weight);
        struct net_device *ndev;
        enum vxge_hw_status status = VXGE_HW_OK;
        struct vxgedev *vdev;
-       int i, ret = 0, no_of_queue = 1;
+       int ret = 0, no_of_queue = 1;
        u64 stat;
 
        *vdev_out = NULL;
        if (vdev->config.gro_enable)
                ndev->features |= NETIF_F_GRO;
 
-#ifdef NETIF_F_LLTX
-       ndev->features |= NETIF_F_LLTX;
-#endif
-
-       for (i = 0; i < no_of_vpath; i++)
-               spin_lock_init(&vdev->vpaths[i].fifo.tx_lock);
-
        if (register_netdev(ndev)) {
                vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
                        "%s: %s : device registration failed!",
 {
        struct net_device *dev = hldev->ndev;
        struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+       struct vxge_vpath *vpath = NULL;
        int vpath_idx;
 
        vxge_debug_entryexit(vdev->level_trace,
         */
        vdev->cric_err_event = type;
 
-       for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++)
-               if (vdev->vpaths[vpath_idx].device_id == vp_id)
+       for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
+               vpath = &vdev->vpaths[vpath_idx];
+               if (vpath->device_id == vp_id)
                        break;
+       }
 
        if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
                if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
                                vxge_vpath_intr_disable(vdev, vpath_idx);
 
                                /* stop the queue for this vpath */
-                               vxge_stop_tx_queue(&vdev->vpaths[vpath_idx].
-                                                       fifo);
+                               netif_tx_stop_queue(vpath->fifo.txq);
                        }
                }
        }
 
                vdev->vpaths[j].is_configured = 1;
                vdev->vpaths[j].device_id = i;
-               vdev->vpaths[j].fifo.driver_id = j;
                vdev->vpaths[j].ring.driver_id = j;
                vdev->vpaths[j].vdev = vdev;
                vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;