batch_len + batch_start_offset > src_obj->base.size)
                return ERR_PTR(-E2BIG);
 
+       if (WARN_ON(dest_obj->pages_pin_count == 0))
+               return ERR_PTR(-ENODEV);
+
        ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
        if (ret) {
                DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n");
                goto unpin_src;
        }
 
-       ret = i915_gem_object_get_pages(dest_obj);
-       if (ret) {
-               DRM_DEBUG_DRIVER("CMD: Failed to get pages for shadow batch\n");
-               goto unmap_src;
-       }
-       i915_gem_object_pin_pages(dest_obj);
-
        ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
        if (ret) {
                DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n");
        dst = vmap_batch(dest_obj, 0, batch_len);
        if (!dst) {
                DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
-               i915_gem_object_unpin_pages(dest_obj);
                ret = -ENOMEM;
                goto unmap_src;
        }
        }
 
        vunmap(batch_base);
-       i915_gem_object_unpin_pages(shadow_batch_obj);
 
        return ret;
 }
 
                                         struct drm_i915_gem_object,
                                         batch_pool_list);
 
-               WARN_ON(obj->active);
-
-               list_del_init(&obj->batch_pool_list);
+               list_del(&obj->batch_pool_list);
                drm_gem_object_unreference(&obj->base);
        }
 }
 
 /**
- * i915_gem_batch_pool_get() - select a buffer from the pool
+ * i915_gem_batch_pool_get() - allocate a buffer from the pool
  * @pool: the batch buffer pool
  * @size: the minimum desired size of the returned buffer
  *
- * Finds or allocates a batch buffer in the pool with at least the requested
- * size. The caller is responsible for any domain, active/inactive, or
- * purgeability management for the returned buffer.
+ * Returns an inactive buffer from @pool with at least @size bytes,
+ * with the pages pinned. The caller must i915_gem_object_unpin_pages()
+ * on the returned object.
  *
  * Note: Callers must hold the struct_mutex
  *
- * Return: the selected batch buffer object
+ * Return: the buffer object or an error pointer
  */
 struct drm_i915_gem_object *
 i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
        WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
 
        list_for_each_entry_safe(tmp, next,
-                       &pool->cache_list, batch_pool_list) {
-
+                                &pool->cache_list, batch_pool_list) {
                if (tmp->active)
                        continue;
 
                 * but not 'too much' bigger. A better way to do this
                 * might be to bucket the pool objects based on size.
                 */
-               if (tmp->base.size >= size &&
-                   tmp->base.size <= (2 * size)) {
+               if (tmp->base.size >= size && tmp->base.size <= 2 * size) {
                        obj = tmp;
                        break;
                }
        }
 
-       if (!obj) {
+       if (obj == NULL) {
+               int ret;
+
                obj = i915_gem_alloc_object(pool->dev, size);
-               if (!obj)
+               if (obj == NULL)
                        return ERR_PTR(-ENOMEM);
 
-               list_add_tail(&obj->batch_pool_list, &pool->cache_list);
-       }
-       else
-               /* Keep list in LRU order */
-               list_move_tail(&obj->batch_pool_list, &pool->cache_list);
+               ret = i915_gem_object_get_pages(obj);
+               if (ret)
+                       return ERR_PTR(ret);
 
-       obj->madv = I915_MADV_WILLNEED;
+               obj->madv = I915_MADV_DONTNEED;
+       }
 
+       list_move_tail(&obj->batch_pool_list, &pool->cache_list);
+       i915_gem_object_pin_pages(obj);
        return obj;
 }
 
 #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
 #define  __EXEC_OBJECT_NEEDS_MAP (1<<29)
 #define  __EXEC_OBJECT_NEEDS_BIAS (1<<28)
-#define  __EXEC_OBJECT_PURGEABLE (1<<27)
 
 #define BATCH_OFFSET_BIAS (256*1024)
 
        if (entry->flags & __EXEC_OBJECT_HAS_PIN)
                vma->pin_count--;
 
-       if (entry->flags & __EXEC_OBJECT_PURGEABLE)
-               obj->madv = I915_MADV_DONTNEED;
-
-       entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE |
-                         __EXEC_OBJECT_HAS_PIN |
-                         __EXEC_OBJECT_PURGEABLE);
+       entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
 }
 
 static void eb_destroy(struct eb_vmas *eb)
        if (ret)
                goto err;
 
+       i915_gem_object_unpin_pages(shadow_batch_obj);
+
        memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
 
        vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
        vma->exec_entry = shadow_exec_entry;
-       vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE | __EXEC_OBJECT_HAS_PIN;
+       vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
        drm_gem_object_reference(&shadow_batch_obj->base);
        list_add_tail(&vma->exec_list, &eb->vmas);
 
        return shadow_batch_obj;
 
 err:
+       i915_gem_object_unpin_pages(shadow_batch_obj);
        if (ret == -EACCES) /* unhandled chained batch */
                return batch_obj;
        else