struct drm_i915_gem_object *obj;
        struct file_stats stats;
        struct intel_engine_cs *ring;
-       int i;
+       int i, j;
 
        memset(&stats, 0, sizeof(stats));
 
        for_each_ring(ring, dev_priv, i) {
-               list_for_each_entry(obj,
-                                   &ring->batch_pool.cache_list,
-                                   batch_pool_list)
-                       per_file_stats(0, obj, &stats);
+               for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
+                       list_for_each_entry(obj,
+                                           &ring->batch_pool.cache_list[j],
+                                           batch_pool_link)
+                               per_file_stats(0, obj, &stats);
+               }
        }
 
        print_file_stats(m, "batch pool", stats);
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
        struct intel_engine_cs *ring;
-       int count = 0;
-       int ret, i;
+       int total = 0;
+       int ret, i, j;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
                return ret;
 
        for_each_ring(ring, dev_priv, i) {
-               seq_printf(m, "%s cache:\n", ring->name);
-               list_for_each_entry(obj,
-                                   &ring->batch_pool.cache_list,
-                                   batch_pool_list) {
-                       seq_puts(m, "   ");
-                       describe_obj(m, obj);
-                       seq_putc(m, '\n');
-                       count++;
+               for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
+                       int count;
+
+                       count = 0;
+                       list_for_each_entry(obj,
+                                           &ring->batch_pool.cache_list[j],
+                                           batch_pool_link)
+                               count++;
+                       seq_printf(m, "%s cache[%d]: %d objects\n",
+                                  ring->name, j, count);
+
+                       list_for_each_entry(obj,
+                                           &ring->batch_pool.cache_list[j],
+                                           batch_pool_link) {
+                               seq_puts(m, "   ");
+                               describe_obj(m, obj);
+                               seq_putc(m, '\n');
+                       }
+
+                       total += count;
                }
        }
 
-       seq_printf(m, "total: %d\n", count);
+       seq_printf(m, "total: %d\n", total);
 
        mutex_unlock(&dev->struct_mutex);
 
 
 void i915_gem_batch_pool_init(struct drm_device *dev,
                              struct i915_gem_batch_pool *pool)
 {
+       int n;
+
        pool->dev = dev;
-       INIT_LIST_HEAD(&pool->cache_list);
+
+       for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
+               INIT_LIST_HEAD(&pool->cache_list[n]);
 }
 
 /**
  */
 void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
 {
+       int n;
+
        WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
 
-       while (!list_empty(&pool->cache_list)) {
-               struct drm_i915_gem_object *obj =
-                       list_first_entry(&pool->cache_list,
-                                        struct drm_i915_gem_object,
-                                        batch_pool_list);
+       for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
+               while (!list_empty(&pool->cache_list[n])) {
+                       struct drm_i915_gem_object *obj =
+                               list_first_entry(&pool->cache_list[n],
+                                                struct drm_i915_gem_object,
+                                                batch_pool_link);
 
-               list_del(&obj->batch_pool_list);
-               drm_gem_object_unreference(&obj->base);
+                       list_del(&obj->batch_pool_link);
+                       drm_gem_object_unreference(&obj->base);
+               }
        }
 }
 
 {
        struct drm_i915_gem_object *obj = NULL;
        struct drm_i915_gem_object *tmp, *next;
+       struct list_head *list;
+       int n;
 
        WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
 
-       list_for_each_entry_safe(tmp, next,
-                                &pool->cache_list, batch_pool_list) {
+       /* Compute a power-of-two bucket, but throw everything greater than
+        * 16KiB into the same bucket: i.e. the the buckets hold objects of
+        * (1 page, 2 pages, 4 pages, 8+ pages).
+        */
+       n = fls(size >> PAGE_SHIFT) - 1;
+       if (n >= ARRAY_SIZE(pool->cache_list))
+               n = ARRAY_SIZE(pool->cache_list) - 1;
+       list = &pool->cache_list[n];
+
+       list_for_each_entry_safe(tmp, next, list, batch_pool_link) {
                /* The batches are strictly LRU ordered */
                if (tmp->active)
                        break;
 
                /* While we're looping, do some clean up */
                if (tmp->madv == __I915_MADV_PURGED) {
-                       list_del(&tmp->batch_pool_list);
+                       list_del(&tmp->batch_pool_link);
                        drm_gem_object_unreference(&tmp->base);
                        continue;
                }
 
-               /*
-                * Select a buffer that is at least as big as needed
-                * but not 'too much' bigger. A better way to do this
-                * might be to bucket the pool objects based on size.
-                */
-               if (tmp->base.size >= size && tmp->base.size <= 2 * size) {
+               if (tmp->base.size >= size) {
                        obj = tmp;
                        break;
                }
                obj->madv = I915_MADV_DONTNEED;
        }
 
-       list_move_tail(&obj->batch_pool_list, &pool->cache_list);
+       list_move_tail(&obj->batch_pool_link, list);
        i915_gem_object_pin_pages(obj);
        return obj;
 }