static void init_iova_rcaches(struct iova_domain *iovad);
 static void free_iova_rcaches(struct iova_domain *iovad);
 static void fq_destroy_all_entries(struct iova_domain *iovad);
+static void fq_flush_timeout(unsigned long data);
 
 void
 init_iova_domain(struct iova_domain *iovad, unsigned long granule,
        if (!iovad->fq)
                return;
 
+       if (timer_pending(&iovad->fq_timer))
+               del_timer(&iovad->fq_timer);
+
        fq_destroy_all_entries(iovad);
+
        free_percpu(iovad->fq);
 
        iovad->fq         = NULL;
                spin_lock_init(&fq->lock);
        }
 
+       setup_timer(&iovad->fq_timer, fq_flush_timeout, (unsigned long)iovad);
+       atomic_set(&iovad->fq_timer_on, 0);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(init_iova_flush_queue);
        }
 }
 
+static void fq_flush_timeout(unsigned long data)
+{
+       struct iova_domain *iovad = (struct iova_domain *)data;
+       int cpu;
+
+       atomic_set(&iovad->fq_timer_on, 0);
+       iova_domain_flush(iovad);
+
+       for_each_possible_cpu(cpu) {
+               unsigned long flags;
+               struct iova_fq *fq;
+
+               fq = per_cpu_ptr(iovad->fq, cpu);
+               spin_lock_irqsave(&fq->lock, flags);
+               fq_ring_free(iovad, fq);
+               spin_unlock_irqrestore(&fq->lock, flags);
+       }
+}
+
 void queue_iova(struct iova_domain *iovad,
                unsigned long pfn, unsigned long pages,
                unsigned long data)
        fq->entries[idx].counter  = atomic64_read(&iovad->fq_flush_start_cnt);
 
        spin_unlock_irqrestore(&fq->lock, flags);
+
+       if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0)
+               mod_timer(&iovad->fq_timer,
+                         jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
+
        put_cpu_ptr(iovad->fq);
 }
 EXPORT_SYMBOL_GPL(queue_iova);
 
 /* Number of entries per Flush Queue */
 #define IOVA_FQ_SIZE   256
 
+/* Timeout (in ms) after which entries are flushed from the Flush-Queue */
+#define IOVA_FQ_TIMEOUT        10
+
 /* Flush Queue entry for defered flushing */
 struct iova_fq_entry {
        unsigned long iova_pfn;
 
        atomic64_t      fq_flush_finish_cnt;    /* Number of TLB flushes that
                                                   have been finished */
+
+       struct timer_list fq_timer;             /* Timer to regularily empty the
+                                                  flush-queues */
+       atomic_t fq_timer_on;                   /* 1 when timer is active, 0
+                                                  when not */
 };
 
 static inline unsigned long iova_size(struct iova *iova)