{
        int cpu;
 
+       atomic64_set(&iovad->fq_flush_start_cnt,  0);
+       atomic64_set(&iovad->fq_flush_finish_cnt, 0);
+
        iovad->fq = alloc_percpu(struct iova_fq);
        if (!iovad->fq)
                return -ENOMEM;
 
 static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
 {
+       u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt);
        unsigned idx;
 
        fq_ring_for_each(idx, fq) {
 
+               if (fq->entries[idx].counter >= counter)
+                       break;
+
                if (iovad->entry_dtor)
                        iovad->entry_dtor(fq->entries[idx].data);
 
                free_iova_fast(iovad,
                               fq->entries[idx].iova_pfn,
                               fq->entries[idx].pages);
+
+               fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
        }
+}
 
-       fq->head = 0;
-       fq->tail = 0;
+static void iova_domain_flush(struct iova_domain *iovad)
+{
+       atomic64_inc(&iovad->fq_flush_start_cnt);
+       iovad->flush_cb(iovad);
+       atomic64_inc(&iovad->fq_flush_finish_cnt);
 }
 
 static void fq_destroy_all_entries(struct iova_domain *iovad)
        struct iova_fq *fq = get_cpu_ptr(iovad->fq);
        unsigned idx;
 
+       /*
+        * First remove all entries from the flush queue that have already been
+        * flushed out on another CPU. This makes the fq_full() check below less
+        * likely to be true.
+        */
+       fq_ring_free(iovad, fq);
+
        if (fq_full(fq)) {
-               iovad->flush_cb(iovad);
+               iova_domain_flush(iovad);
                fq_ring_free(iovad, fq);
        }
 
        fq->entries[idx].iova_pfn = pfn;
        fq->entries[idx].pages    = pages;
        fq->entries[idx].data     = data;
+       fq->entries[idx].counter  = atomic64_read(&iovad->fq_flush_start_cnt);
 
        put_cpu_ptr(iovad->fq);
 }
 
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/rbtree.h>
+#include <linux/atomic.h>
 #include <linux/dma-mapping.h>
 
 /* iova structure */
        unsigned long iova_pfn;
        unsigned long pages;
        unsigned long data;
+       u64 counter; /* Flush counter when this entrie was added */
 };
 
 /* Per-CPU Flush Queue structure */
                                           iova entry */
 
        struct iova_fq __percpu *fq;    /* Flush Queue */
+
+       atomic64_t      fq_flush_start_cnt;     /* Number of TLB flushes that
+                                                  have been started */
+
+       atomic64_t      fq_flush_finish_cnt;    /* Number of TLB flushes that
+                                                  have been finished */
 };
 
 static inline unsigned long iova_size(struct iova *iova)