/* intrmod: max. packets to trigger interrupt */
 #define LIO_INTRMOD_RXMAXCNT_TRIGGER   384
 /* intrmod: min. packets to trigger interrupt */
-#define LIO_INTRMOD_RXMINCNT_TRIGGER   1
+#define LIO_INTRMOD_RXMINCNT_TRIGGER   0
 /* intrmod: max. time to trigger interrupt */
 #define LIO_INTRMOD_RXMAXTMR_TRIGGER   128
 /* 66xx:intrmod: min. time to trigger interrupt
 
                        return octeon_dev->octeon_id;
        return -1;
 }
+
+void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
+{
+       /* the whole thing needs to be atomic, ideally */
+       if (droq) {
+               spin_lock_bh(&droq->lock);
+               writel(droq->pkt_count, droq->pkts_sent_reg);
+               droq->pkt_count = 0;
+               spin_unlock_bh(&droq->lock);
+       }
+       if (iq) {
+               spin_lock_bh(&iq->lock);
+               writel(iq->pkt_in_done, iq->inst_cnt_reg);
+               iq->pkt_in_done = 0;
+               spin_unlock_bh(&iq->lock);
+       }
+}
 
        return fn_arg;
 }
 
-/** Check for packets on Droq. This function should be called with
- * lock held.
+/** Check for packets on Droq. This function should be called with lock held.
  *  @param  droq - Droq on which count is checked.
  *  @return Returns packet count.
  */
 u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq)
 {
        u32 pkt_count = 0;
+       u32 last_count;
 
        pkt_count = readl(droq->pkts_sent_reg);
-       if (pkt_count) {
-               atomic_add(pkt_count, &droq->pkts_pending);
-               writel(pkt_count, droq->pkts_sent_reg);
-       }
 
-       return pkt_count;
+       last_count = pkt_count - droq->pkt_count;
+       droq->pkt_count = pkt_count;
+
+       /* we shall write to cnts  at napi irq enable or end of droq tasklet */
+       if (last_count)
+               atomic_add(last_count, &droq->pkts_pending);
+
+       return last_count;
 }
 
 static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq)
        u32 pkt_count = 0, pkts_processed = 0;
        struct list_head *tmp, *tmp2;
 
+       /* Grab the droq lock */
+       spin_lock(&droq->lock);
+
+       octeon_droq_check_hw_for_pkts(droq);
        pkt_count = atomic_read(&droq->pkts_pending);
-       if (!pkt_count)
+
+       if (!pkt_count) {
+               spin_unlock(&droq->lock);
                return 0;
+       }
 
        if (pkt_count > budget)
                pkt_count = budget;
 
-       /* Grab the droq lock */
-       spin_lock(&droq->lock);
-
        pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count);
 
        atomic_sub(pkts_processed, &droq->pkts_pending);
        spin_lock(&droq->lock);
 
        while (total_pkts_processed < budget) {
+               octeon_droq_check_hw_for_pkts(droq);
+
                pkts_available =
                        CVM_MIN((budget - total_pkts_processed),
                                (u32)(atomic_read(&droq->pkts_pending)));
                atomic_sub(pkts_processed, &droq->pkts_pending);
 
                total_pkts_processed += pkts_processed;
-
-               octeon_droq_check_hw_for_pkts(droq);
        }
 
        spin_unlock(&droq->lock);