return 0;
 }
 
+static int i915_shrinker_info(struct seq_file *m, void *unused)
+{
+       struct drm_i915_private *i915 = node_to_i915(m->private);
+
+       seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
+       seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
+
+       return 0;
+}
+
 static int i915_semaphore_status(struct seq_file *m, void *unused)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
        {"i915_dmc_info", i915_dmc_info, 0},
        {"i915_display_info", i915_display_info, 0},
        {"i915_engine_info", i915_engine_info, 0},
+       {"i915_shrinker_info", i915_shrinker_info, 0},
        {"i915_semaphore_status", i915_semaphore_status, 0},
        {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
        {"i915_dp_mst_info", i915_dp_mst_info, 0},
 
 static unsigned long
 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 {
-       struct drm_i915_private *dev_priv =
+       struct drm_i915_private *i915 =
                container_of(shrinker, struct drm_i915_private, mm.shrinker);
        struct drm_i915_gem_object *obj;
+       unsigned long num_objects = 0;
        unsigned long count = 0;
 
-       spin_lock(&dev_priv->mm.obj_lock);
-       list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link)
-               if (can_release_pages(obj))
+       spin_lock(&i915->mm.obj_lock);
+       list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
+               if (can_release_pages(obj)) {
                        count += obj->base.size >> PAGE_SHIFT;
+                       num_objects++;
+               }
 
-       list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link)
-               if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
+       list_for_each_entry(obj, &i915->mm.bound_list, mm.link)
+               if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) {
                        count += obj->base.size >> PAGE_SHIFT;
-       spin_unlock(&dev_priv->mm.obj_lock);
+                       num_objects++;
+               }
+       spin_unlock(&i915->mm.obj_lock);
+
+       /* Update our preferred vmscan batch size for the next pass.
+        * Our rough guess for an effective batch size is roughly 2
+        * available GEM objects worth of pages. That is we don't want
+        * the shrinker to fire, until it is worth the cost of freeing an
+        * entire GEM object.
+        */
+       if (num_objects) {
+               unsigned long avg = 2 * count / num_objects;
+
+               i915->mm.shrinker.batch =
+                       max((i915->mm.shrinker.batch + avg) >> 1,
+                           128ul /* default SHRINK_BATCH */);
+       }
 
        return count;
 }
        dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
        dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
        dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
+       dev_priv->mm.shrinker.batch = 4096;
        WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
 
        dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;