u32 reg_ier = AT91_IRQ_ERR_FRAME;
                reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next);
 
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                at91_write(priv, AT91_IER, reg_ier);
        }
 
 
 
 end:
        if (work_done < quota) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                /* enable all IRQs if we are not in bus off state */
                if (priv->can.state != CAN_STATE_BUS_OFF)
                        c_can_irq_control(priv, true);
 
                work_done += flexcan_poll_bus_err(dev, reg_esr);
 
        if (work_done < quota) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                /* enable IRQs */
                flexcan_write(FLEXCAN_IFLAG_DEFAULT, ®s->imask1);
                flexcan_write(priv->reg_ctrl_default, ®s->ctrl);
 
                work_done += ifi_canfd_do_rx_poll(ndev, quota - work_done);
 
        if (work_done < quota) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                ifi_canfd_irq_enable(ndev, 1);
        }
 
 
        /* We have processed all packets that the adapter had, but it
         * was less than our budget, stop polling */
        if (received < budget)
-               napi_complete(napi);
+               napi_complete_done(napi, received);
 
        spin_lock_irqsave(&mod->lock, flags);
 
 
                work_done += m_can_do_rx_poll(dev, (quota - work_done));
 
        if (work_done < quota) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                m_can_enable_all_interrupts(priv);
        }
 
 
        }
        /* All packets processed */
        if (num_pkts < quota) {
-               napi_complete(napi);
+               napi_complete_done(napi, num_pkts);
                priv->ier |= RCAR_CAN_IER_RXFIE;
                writeb(priv->ier, &priv->regs->ier);
        }
 
 
        /* All packets processed */
        if (num_pkts < quota) {
-               napi_complete(napi);
+               napi_complete_done(napi, num_pkts);
                /* Enable Rx FIFO interrupts */
                rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
                                   RCANFD_RFCC_RFIE);
 
                can_led_event(ndev, CAN_LED_EVENT_RX);
 
        if (work_done < quota) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                ier = priv->read_reg(priv, XCAN_IER_OFFSET);
                ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
                priv->write_reg(priv, XCAN_IER_OFFSET, ier);
 
        }
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                iowrite32(TYPHOON_INTR_NONE,
                                tp->ioaddr + TYPHOON_REG_INTR_MASK);
                typhoon_post_pci_writes(tp->ioaddr);
 
        }
 
        if (i < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, i);
                if (test_and_clear_bit(BFIN_MAC_RX_IRQ_DISABLED, &lp->flags))
                        enable_irq(IRQ_MAC_RX);
        }
 
        et131x_handle_send_pkts(adapter);
 
        if (work_done < budget) {
-               napi_complete(&adapter->napi);
+               napi_complete_done(&adapter->napi, work_done);
                et131x_enable_interrupts(adapter);
        }
 
 
 
        if (rxcomplete < budget) {
 
-               napi_complete(napi);
+               napi_complete_done(napi, rxcomplete);
 
                netdev_dbg(priv->dev,
                           "NAPI Complete, did %d packets with budget %d\n",
 
        processed = xgene_enet_process_ring(ring, budget);
 
        if (processed != budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, processed);
                enable_irq(ring->irq);
        }
 
 
                        work_done = budget;
 
                if (work_done < budget) {
-                       napi_complete(napi);
+                       napi_complete_done(napi, work_done);
                        self->aq_hw_ops->hw_irq_enable(self->aq_hw,
                                        1U << self->aq_ring_param.vec_idx);
                }
 
 
        work_done = arc_emac_rx(ndev, budget);
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
        }
 
 
        if (!tx_complete || work == budget)
                return budget;
 
-       napi_complete(&np->napi);
+       napi_complete_done(&np->napi, work);
 
        /* enable interrupt */
        if (alx->flags & ALX_FLAG_USING_MSIX) {
 
 
        if (work_done < budget) {
 quit_polling:
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                adapter->hw.intr_mask |= ISR_RX_PKT;
                AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
        }
 
        /* If no Tx and not enough Rx work done, exit the polling mode */
        if (work_done < budget) {
 quit_polling:
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                imr_data = AT_READ_REG(&adapter->hw, REG_IMR);
                AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT);
                /* test debug */
 
        if (work_done >= budget)
                return work_done;
 
-       napi_complete(napi);
+       napi_complete_done(napi, work_done);
        /* re-enable Interrupt */
        if (likely(adapter->int_enabled))
                atlx_imr_set(adapter, IMR_NORMAL_MASK);
 
        }
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                b44_enable_ints(bp);
        }
 
 
 
        /* no more packet in rx/tx queue, remove device from poll
         * queue */
-       napi_complete(napi);
+       napi_complete_done(napi, rx_work_done);
 
        /* restore rx/tx interrupt */
        enet_dmac_writel(priv, priv->dma_chan_int_mask,
 
                return weight;
 
        if (handled < weight) {
-               napi_complete(napi);
+               napi_complete_done(napi, handled);
                bgmac_chip_intrs_on(bgmac);
        }
 
 
                rmb();
                if (likely(!bnx2_has_fast_work(bnapi))) {
 
-                       napi_complete(napi);
+                       napi_complete_done(napi, work_done);
                        BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
                                bnapi->last_status_idx);
 
                rmb();
                if (likely(!bnx2_has_work(bnapi))) {
-                       napi_complete(napi);
+                       napi_complete_done(napi, work_done);
                        if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
                                BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
 
                 * has been updated when NAPI was scheduled.
                 */
                if (IS_FCOE_FP(fp)) {
-                       napi_complete(napi);
+                       napi_complete_done(napi, rx_work_done);
                } else {
                        bnx2x_update_fpsb_idx(fp);
                        /* bnx2x_has_rx_work() reads the status block,
 
        }
 
        if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, rx_pkts);
                BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
        }
        return rx_pkts;
 
        sbdma_tx_process(sc, &(sc->sbm_txdma), 1);
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
 
 #ifdef CONFIG_SBMAC_COALESCE
                __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
 
                return rcvd;
 
 poll_exit:
-       napi_complete(napi);
+       napi_complete_done(napi, rcvd);
 
        rx_ctrl->rx_complete++;
 
 
 
        work_done = bp->macbgem_ops.mog_rx(bp, budget);
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
 
                /* Packets received while interrupts were disabled */
                status = macb_readl(bp, RSR);
 
        work_done = xgmac_rx(priv, budget);
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                __raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
        }
        return work_done;
 
        }
 
        if ((work_done < budget) && (tx_done)) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
                                             POLL_EVENT_ENABLE_INTR, 0);
                return 0;
 
        }
 
        if ((work_done < budget) && (tx_done)) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
                                             POLL_EVENT_ENABLE_INTR, 0);
                return 0;
 
 
        if (work_done < budget) {
                /* We stopped because no more packets were available. */
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                octeon_mgmt_enable_rx_irq(p);
        }
        octeon_mgmt_update_rx_stats(netdev);
 
 
        if (work_done < budget) {
                /* Slow packet rate, exit polling */
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                /* Re-enable interrupts */
                cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD,
                                               cq->cq_idx);
 
        int work_done = process_responses(adapter, budget);
 
        if (likely(work_done < budget)) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                writel(adapter->sge->respQ.cidx,
                       adapter->regs + A_SG_SLEEPING);
        }
 
                __skb_queue_head_init(&queue);
                skb_queue_splice_init(&q->rx_queue, &queue);
                if (skb_queue_empty(&queue)) {
-                       napi_complete(napi);
+                       napi_complete_done(napi, work_done);
                        spin_unlock_irq(&q->lock);
                        return work_done;
                }
        int work_done = process_responses(adap, qs, budget);
 
        if (likely(work_done < budget)) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
 
                /*
                 * Because we don't atomically flush the following
 
        u32 val;
 
        if (likely(work_done < budget)) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                intr_params = rspq->next_intr_params;
                rspq->next_intr_params = rspq->intr_params;
        } else
 
                 * exit polling
                 */
 
-               napi_complete(napi);
+               napi_complete_done(napi, rq_work_done);
                if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
                        enic_set_int_moderation(enic, &enic->rq[0]);
                vnic_intr_unmask(&enic->intr[intr]);
                 * exit polling
                 */
 
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
                        enic_set_int_moderation(enic, &enic->rq[rq]);
                vnic_intr_unmask(&enic->intr[intr]);
 
 
          /* Remove us from polling list and enable RX intr. */
 
-         napi_complete(napi);
-         iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
+       napi_complete_done(napi, work_done);
+       iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
 
          /* The last op happens after poll completion. Which means the following:
           * 1. it can race with disabling irqs in irq handler
           * before we did napi_complete(). See? We would lose it. */
 
          /* remove ourselves from the polling list */
-         napi_complete(napi);
+         napi_complete_done(napi, work_done);
 
          return work_done;
 }
 
                /* We processed all packets available.  Tell NAPI it can
                 * stop polling then re-enable rx interrupts.
                 */
-               napi_complete(napi);
+               napi_complete_done(napi, npackets);
                int_enable = dnet_readl(bp, INTR_ENB);
                int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
                dnet_writel(bp, int_enable, INTR_ENB);
 
                be_process_mcc(adapter);
 
        if (max_work < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, max_work);
 
                /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
                 * delay via a delay multiplier encoding value
 
        tx_work_done = ethoc_tx(priv->netdev, budget);
 
        if (rx_work_done < budget && tx_work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, rx_work_done);
                ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
        }
 
 
        if (work_done < budget) {
                u32 buf_int_enable_value = 0;
 
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
 
                /* set tx_done and rx_rdy bits */
                buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT;
 
        int cleaned = qman_p_poll_dqrr(np->p, budget);
 
        if (cleaned < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, cleaned);
                qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
 
        } else if (np->down) {
 
        fec_enet_tx(ndev);
 
        if (pkts < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, pkts);
                writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
        }
        return pkts;
 
 
        if (received < budget && tx_left) {
                /* done */
-               napi_complete(napi);
+               napi_complete_done(napi, received);
                (*fep->ops->napi_enable)(dev);
 
                return received;
 
 
        if (work_done < budget) {
                u32 imask;
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                /* Clear the halt bit in RSTAT */
                gfar_write(®s->rstat, gfargrp->rstat);
 
 
        if (!num_act_queues) {
                u32 imask;
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
 
                /* Clear the halt bit in RSTAT */
                gfar_write(®s->rstat, gfargrp->rstat);
 
                howmany += ucc_geth_rx(ugeth, i, budget - howmany);
 
        if (howmany < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, howmany);
                setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS);
        }
 
 
                priv->reg_inten |= RCV_INT;
                writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
        }
-       napi_complete(napi);
+       napi_complete_done(napi, rx);
 done:
        /* clean up tx descriptors and start a new timer if necessary */
        tx_remaining = hip04_tx_reclaim(ndev, false);
 
        } while (ints & DEF_INT_MASK);
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                hisi_femac_irq_enable(priv, DEF_INT_MASK &
                                        (~IRQ_INT_TX_PER_PACKET));
        }
 
        } while (ints & DEF_INT_MASK);
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                hix5hd2_irq_enable(priv);
        }
 
 
        ibmveth_replenish_task(adapter);
 
        if (frames_processed < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, frames_processed);
 
                /* We think we are done - reenable interrupts,
                 * then check once more to make sure we are done.
 
 
        if (frames_processed < budget) {
                enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
-               napi_complete(napi);
+               napi_complete_done(napi, frames_processed);
                if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
                    napi_reschedule(napi)) {
                        disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
 
 
        /* If budget not fully consumed, exit the polling mode */
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                e100_enable_irq(nic);
        }
 
 
 
        /* If budget not fully consumed, exit the polling mode */
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                if (!test_bit(__IXGB_DOWN, &adapter->flags))
                        ixgb_irq_enable(adapter);
        }
 
 
        work_done = korina_rx(dev, budget);
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
 
                writel(readl(&lp->rx_dma_regs->dmasm) &
                        ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
 
 {
        struct ltq_etop_chan *ch = container_of(napi,
                                struct ltq_etop_chan, napi);
-       int rx = 0;
-       int complete = 0;
+       int work_done = 0;
 
-       while ((rx < budget) && !complete) {
+       while (work_done < budget) {
                struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
 
-               if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
-                       ltq_etop_hw_receive(ch);
-                       rx++;
-               } else {
-                       complete = 1;
-               }
+               if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C)
+                       break;
+               ltq_etop_hw_receive(ch);
+               work_done++;
        }
-       if (complete || !rx) {
-               napi_complete(&ch->napi);
+       if (work_done < budget) {
+               napi_complete_done(&ch->napi, work_done);
                ltq_dma_ack_irq(&ch->dma);
        }
-       return rx;
+       return work_done;
 }
 
 static int
 
        if (work_done < budget) {
                if (mp->oom)
                        mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                wrlp(mp, INT_MASK, mp->int_mask);
        }
 
 
                        rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]);
        }
 
-       budget -= rx_done;
-
-       if (budget > 0) {
+       if (rx_done < budget) {
                cause_rx_tx = 0;
-               napi_complete(napi);
+               napi_complete_done(napi, rx_done);
 
                if (pp->neta_armada3700) {
                        unsigned long flags;
 
 
        if (budget > 0) {
                cause_rx = 0;
-               napi_complete(napi);
+               napi_complete_done(napi, rx_done);
 
                mvpp2_interrupts_enable(port);
        }
 
        }
        work_done = rxq_process(dev, budget);
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                wrl(pep, INT_MASK, ALL_INTS);
        }
 
 
        }
 
        if (rx < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, rx);
        }
 
        priv->reg_imr |= RPKT_FINISH_M;
 
 
        myri10ge_ss_unlock_napi(ss);
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                put_be32(htonl(3), ss->irq_claim);
        }
        return work_done;
 
                np->intr_status = readl(ioaddr + IntrStatus);
        } while (np->intr_status);
 
-       napi_complete(napi);
+       napi_complete_done(napi, work_done);
 
        /* Reenable interrupts providing nothing is trying to shut
         * the chip down. */
 
        s2io_chk_rx_buffers(nic, ring);
 
        if (pkts_processed < budget_org) {
-               napi_complete(napi);
+               napi_complete_done(napi, pkts_processed);
                /*Re Enable MSI-Rx Vector*/
                addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
                addr += 7 - ring->ring_no;
                        break;
        }
        if (pkts_processed < budget_org) {
-               napi_complete(napi);
+               napi_complete_done(napi, pkts_processed);
                /* Re enable the Rx interrupts for the ring */
                writeq(0, &bar0->rx_traffic_mask);
                readl(&bar0->rx_traffic_mask);
 
        vxge_hw_vpath_poll_rx(ring->handle);
        pkts_processed = ring->pkts_processed;
 
-       if (ring->pkts_processed < budget_org) {
-               napi_complete(napi);
+       if (pkts_processed < budget_org) {
+               napi_complete_done(napi, pkts_processed);
 
                /* Re enable the Rx interrupts for the vpath */
                vxge_hw_channel_msix_unmask(
        VXGE_COMPLETE_ALL_TX(vdev);
 
        if (pkts_processed < budget_org) {
-               napi_complete(napi);
+               napi_complete_done(napi, pkts_processed);
                /* Re enable the Rx interrupts for the ring */
                vxge_hw_device_unmask_all(hldev);
                vxge_hw_device_flush_io(hldev);
 
        if (rx_work < budget) {
                /* re-enable interrupts
                   (msix not enabled in napi) */
-               napi_complete(napi);
+               napi_complete_done(napi, rx_work);
 
                writel(np->irqmask, base + NvRegIrqMask);
        }
 
        rx_done = __lpc_handle_recv(ndev, budget);
 
        if (rx_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, rx_done);
                lpc_eth_enable_int(pldat->net_base);
        }
 
 
                poll_end_flag = true;
 
        if (poll_end_flag) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                pch_gbe_irq_enable(adapter);
        }
 
 
        pkts = pasemi_mac_clean_rx(rx_ring(mac), budget);
        if (pkts < budget) {
                /* all done, no more packets present */
-               napi_complete(napi);
+               napi_complete_done(napi, pkts);
 
                pasemi_mac_restart_rx_intr(mac);
                pasemi_mac_restart_tx_intr(mac);
 
                work_done = budget;
 
        if (work_done < budget) {
-               napi_complete(&sds_ring->napi);
+               napi_complete_done(&sds_ring->napi, work_done);
                if (test_bit(__NX_DEV_UP, &adapter->state))
                        netxen_nic_enable_int(sds_ring);
        }
 
                        qede_rx_int(fp, budget) : 0;
        if (rx_work_done < budget) {
                if (!qede_poll_is_more_work(fp)) {
-                       napi_complete(napi);
+                       napi_complete_done(napi, rx_work_done);
 
                        /* Update and reenable interrupts */
                        qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
 
                work_done = budget;
 
        if (work_done < budget) {
-               napi_complete(&sds_ring->napi);
+               napi_complete_done(&sds_ring->napi, work_done);
                if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
                        qlcnic_enable_sds_intr(adapter, sds_ring);
                        qlcnic_enable_tx_intr(adapter, tx_ring);
        work_done = qlcnic_process_rcv_ring(sds_ring, budget);
 
        if (work_done < budget) {
-               napi_complete(&sds_ring->napi);
+               napi_complete_done(&sds_ring->napi, work_done);
                if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
                        qlcnic_enable_sds_intr(adapter, sds_ring);
        }
                work_done = budget;
 
        if (work_done < budget) {
-               napi_complete(&sds_ring->napi);
+               napi_complete_done(&sds_ring->napi, work_done);
                qlcnic_enable_sds_intr(adapter, sds_ring);
        }
 
                work_done = budget;
 
        if (work_done < budget) {
-               napi_complete(&sds_ring->napi);
+               napi_complete_done(&sds_ring->napi, work_done);
                qlcnic_enable_sds_intr(adapter, sds_ring);
        }
 
        adapter = sds_ring->adapter;
        work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
        if (work_done < budget) {
-               napi_complete(&sds_ring->napi);
+               napi_complete_done(&sds_ring->napi, work_done);
                if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
                        qlcnic_enable_sds_intr(adapter, sds_ring);
        }
 
        }
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                ql_enable_completion_interrupt(qdev, rx_ring->irq);
        }
        return work_done;
 
        emac_mac_rx_process(adpt, rx_q, &work_done, budget);
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
 
                irq->mask |= rx_q->intr;
                writel(irq->mask, adpt->base + EMAC_INT_MASK);
 
        }
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
 
                rtl_irq_enable(tp, enable_mask);
                mmiowb();
 
        }
 
        if (credits < budget)
-               napi_complete(napi);
+               napi_complete_done(napi, credits);
 
        rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
 
 
 
        work_done = sxgbe_rx(priv, budget);
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum);
        }
 
 
                 * since efx_nic_eventq_read_ack() will have no effect if
                 * interrupts have already been disabled.
                 */
-               napi_complete(napi);
+               napi_complete_done(napi, spent);
                efx_nic_eventq_read_ack(channel);
        }
 
 
                 * since ef4_nic_eventq_read_ack() will have no effect if
                 * interrupts have already been disabled.
                 */
-               napi_complete(napi);
+               napi_complete_done(napi, spent);
                ef4_nic_eventq_read_ack(channel);
        }
 
 
        smsc9420_pci_flush_write(pd);
 
        if (work_done < budget) {
-               napi_complete(&pd->napi);
+               napi_complete_done(&pd->napi, work_done);
 
                /* re-enable RX DMA interrupts */
                dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA);
 
 
        work_done = stmmac_rx(priv, budget);
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                stmmac_enable_dma_irq(priv);
        }
        return work_done;
 
        work_done = niu_poll_core(np, lp, budget);
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                niu_ldg_rearm(np, lp, 1);
        }
        return work_done;
 
                gp->status = readl(gp->regs + GREG_STAT);
        } while (gp->status & GREG_STAT_NAPI);
 
-       napi_complete(napi);
+       napi_complete_done(napi, work_done);
        gem_enable_ints(gp);
 
        return work_done;
 
        int processed = vnet_event_napi(port, budget);
 
        if (processed < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, processed);
                port->rx_event &= ~LDC_EVENT_DATA_READY;
                vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED);
        }
 
                 * device lock and allow waiting tasks (eg rmmod) to advance) */
                priv->napi_stop = 0;
 
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                bdx_enable_interrupts(priv);
        }
        return work_done;
 
        }
 
        if (num_rx < budget) {
-               napi_complete(napi_rx);
+               napi_complete_done(napi_rx, num_rx);
                writel(0xff, &cpsw->wr_regs->rx_en);
                if (cpsw->quirk_irq && cpsw->rx_irq_disabled) {
                        cpsw->rx_irq_disabled = false;
 
                                        &emac_rxhost_errcodes[cause][0], ch);
                }
        } else if (num_rx_pkts < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, num_rx_pkts);
                emac_int_enable(priv);
        }
 
 
 
        netcp_rxpool_refill(netcp);
        if (packets < budget) {
-               napi_complete(&netcp->rx_napi);
+               napi_complete_done(&netcp->rx_napi, packets);
                knav_queue_enable_notify(netcp->rx_queue);
        }
 
 
        }
 
        /* There are no packets left. */
-       napi_complete(&info_mpipe->napi);
+       napi_complete_done(&info_mpipe->napi, work);
 
        md = &mpipe_data[instance];
        /* Re-enable hypervisor interrupts. */
 
                }
        }
 
-       napi_complete(&info->napi);
+       napi_complete_done(&info->napi, work);
 
        if (!priv->active)
                goto done;
 
        }
 
        if (packets_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, packets_done);
                gelic_card_rx_irq_on(card);
        }
        return packets_done;
 
        /* if all packets are in the stack, enable interrupts and return 0 */
        /* if not, return 1 */
        if (packets_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, packets_done);
                spider_net_rx_irq_on(card);
                card->ignore_rx_ramfull = 0;
        }
 
        spin_unlock(&lp->rx_lock);
 
        if (received < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, received);
                /* enable interrupts */
                tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl);
        }
 
 
        if (num_received < budget) {
                data->rxpending = 0;
-               napi_complete(napi);
+               napi_complete_done(napi, num_received);
 
                TSI_WRITE(TSI108_EC_INTMASK,
                                     TSI_READ(TSI108_EC_INTMASK)
 
        }
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                iowrite16(enable_mask, ioaddr + IntrEnable);
                mmiowb();
        }
 
        velocity_tx_srv(vptr);
        /* If budget not fully consumed, exit the polling mode */
        if (rx_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, rx_done);
                mac_enable_int(vptr->mac_regs);
        }
        spin_unlock_irqrestore(&vptr->lock, flags);
 
        }
 
        if (rx_count < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, rx_count);
                w5100_enable_intr(priv);
        }
 
 
        }
 
        if (rx_count < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, rx_count);
                w5300_write(priv, W5300_IMR, IR_S0);
                mmiowb();
        }
 
        }
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
 
                if (adapter->unset_rx_last) {
                        adapter->rx_last_jiffies = jiffies;
 
        rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
 
        if (rxd_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, rxd_done);
                vmxnet3_enable_all_intrs(rx_queue->adapter);
        }
        return rxd_done;
        rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
 
        if (rxd_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, rxd_done);
                vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
        }
        return rxd_done;
 
        howmany += hdlc_rx_done(priv, budget - howmany);
 
        if (howmany < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, howmany);
                qe_setbits32(priv->uccf->p_uccm,
                             (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
        }
 
                received = sca_rx_done(port, budget);
 
        if (received < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, received);
                enable_intr(port);
        }
 
 
        done = ath10k_htt_txrx_compl_task(ar, budget);
 
        if (done < budget) {
-               napi_complete(ctx);
+               napi_complete_done(ctx, done);
                /* In case of MSI, it is possible that interrupts are received
                 * while NAPI poll is inprogress. So pending interrupts that are
                 * received after processing all copy engine pipes by NAPI poll
 
        done = budget - quota;
 
        if (done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, done);
                wil6210_unmask_irq_rx(wil);
                wil_dbg_txrx(wil, "NAPI RX complete\n");
        }
 
        work_done = xenvif_tx_action(queue, budget);
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                xenvif_napi_schedule_or_enable_events(queue);
        }
 
 
        if (work_done < budget) {
                int more_to_do = 0;
 
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
 
                RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
                if (more_to_do)
 
 
        if (rx_count < budget) {
                /* No more work */
-               napi_complete(napi);
+               napi_complete_done(napi, rx_count);
                enable_irq(rx_group->irq);
        }
        return rx_count;
 
 
        /* If there aren't any more packets to receive stop the poll */
        if (rx_count < budget)
-               napi_complete(napi);
+               napi_complete_done(napi, rx_count);
 
        return rx_count;
 }