struct dma_page *d_page, *next;
        enum pool_type type;
        bool is_cached = false;
-       unsigned count = 0, i;
+       unsigned count = 0, i, npages = 0;
        unsigned long irq_flags;
 
        type = ttm_to_type(ttm->page_flags, ttm->caching_state);
        } else {
                pool->npages_free += count;
                list_splice(&ttm_dma->pages_list, &pool->free_list);
+               npages = count;
                if (pool->npages_free > _manager->options.max_size) {
-                       count = pool->npages_free - _manager->options.max_size;
+                       npages = pool->npages_free - _manager->options.max_size;
+                       /* free at least NUM_PAGES_TO_ALLOC number of pages
+                        * to reduce calls to set_memory_wb */
+                       if (npages < NUM_PAGES_TO_ALLOC)
+                               npages = NUM_PAGES_TO_ALLOC;
                }
        }
        spin_unlock_irqrestore(&pool->lock, irq_flags);
                ttm_dma->dma_address[i] = 0;
        }
 
-       /* shrink pool if necessary */
-       if (count)
-               ttm_dma_page_pool_free(pool, count);
+       /* shrink pool if necessary (only on !is_cached pools)*/
+       if (npages)
+               ttm_dma_page_pool_free(pool, npages);
        ttm->state = tt_unpopulated;
 }
 EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);