return 0;
 
 out_gem_unload:
-       if (dev_priv->mm.inactive_shrinker.shrink)
+       if (dev_priv->mm.inactive_shrinker.scan_objects)
                unregister_shrinker(&dev_priv->mm.inactive_shrinker);
 
        if (dev->pdev->msi_enabled)
 
        i915_teardown_sysfs(dev);
 
-       if (dev_priv->mm.inactive_shrinker.shrink)
+       if (dev_priv->mm.inactive_shrinker.scan_objects)
                unregister_shrinker(&dev_priv->mm.inactive_shrinker);
 
        mutex_lock(&dev->struct_mutex);
 
                                         struct drm_i915_fence_reg *fence,
                                         bool enable);
 
-static int i915_gem_inactive_shrink(struct shrinker *shrinker,
-                                   struct shrink_control *sc);
+static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
+                                            struct shrink_control *sc);
+static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
+                                           struct shrink_control *sc);
 static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
-static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
+static long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 
 static bool cpu_cache_is_coherent(struct drm_device *dev,
        return __i915_gem_shrink(dev_priv, target, true);
 }
 
-static void
+static long
 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
 {
        struct drm_i915_gem_object *obj, *next;
+       long freed = 0;
 
        i915_gem_evict_everything(dev_priv->dev);
 
        list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
-                                global_list)
+                                global_list) {
+               if (obj->pages_pin_count == 0)
+                       freed += obj->base.size >> PAGE_SHIFT;
                i915_gem_object_put_pages(obj);
+       }
+       return freed;
 }
 
 static int
 
        dev_priv->mm.interruptible = true;
 
-       dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
+       dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
+       dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
        dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
        register_shrinker(&dev_priv->mm.inactive_shrinker);
 }
 #endif
 }
 
-static int
-i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+static unsigned long
+i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
 {
        struct drm_i915_private *dev_priv =
                container_of(shrinker,
                             mm.inactive_shrinker);
        struct drm_device *dev = dev_priv->dev;
        struct drm_i915_gem_object *obj;
-       int nr_to_scan = sc->nr_to_scan;
        bool unlock = true;
-       int cnt;
+       unsigned long count;
 
        if (!mutex_trylock(&dev->struct_mutex)) {
                if (!mutex_is_locked_by(&dev->struct_mutex, current))
                unlock = false;
        }
 
-       if (nr_to_scan) {
-               nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
-               if (nr_to_scan > 0)
-                       nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
-                                                       false);
-               if (nr_to_scan > 0)
-                       i915_gem_shrink_all(dev_priv);
-       }
-
-       cnt = 0;
+       count = 0;
        list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
                if (obj->pages_pin_count == 0)
-                       cnt += obj->base.size >> PAGE_SHIFT;
+                       count += obj->base.size >> PAGE_SHIFT;
 
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
                if (obj->active)
                        continue;
 
                if (obj->pin_count == 0 && obj->pages_pin_count == 0)
-                       cnt += obj->base.size >> PAGE_SHIFT;
+                       count += obj->base.size >> PAGE_SHIFT;
        }
 
        if (unlock)
                mutex_unlock(&dev->struct_mutex);
-       return cnt;
+       return count;
 }
 
 /* All the new VM stuff */
        return 0;
 }
 
+static unsigned long
+i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(shrinker,
+                            struct drm_i915_private,
+                            mm.inactive_shrinker);
+       struct drm_device *dev = dev_priv->dev;
+       int nr_to_scan = sc->nr_to_scan;
+       unsigned long freed;
+       bool unlock = true;
+
+       if (!mutex_trylock(&dev->struct_mutex)) {
+               if (!mutex_is_locked_by(&dev->struct_mutex, current))
+                       return 0;
+
+               if (dev_priv->mm.shrinker_no_lock_stealing)
+                       return 0;
+
+               unlock = false;
+       }
+
+       freed = i915_gem_purge(dev_priv, nr_to_scan);
+       if (freed < nr_to_scan)
+               freed += __i915_gem_shrink(dev_priv, nr_to_scan,
+                                                       false);
+       if (freed < nr_to_scan)
+               freed += i915_gem_shrink_all(dev_priv);
+
+       if (unlock)
+               mutex_unlock(&dev->struct_mutex);
+       return freed;
+}
+
 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
                                     struct i915_address_space *vm)
 {
 
        return nr_free;
 }
 
-/* Get good estimation how many pages are free in pools */
-static int ttm_pool_get_num_unused_pages(void)
-{
-       unsigned i;
-       int total = 0;
-       for (i = 0; i < NUM_POOLS; ++i)
-               total += _manager->pools[i].npages;
-
-       return total;
-}
-
 /**
  * Callback for mm to request pool to reduce number of page held.
+ *
+ * XXX: (dchinner) Deadlock warning!
+ *
+ * ttm_page_pool_free() does memory allocation using GFP_KERNEL.  that means
+ * this can deadlock when called a sc->gfp_mask that is not equal to
+ * GFP_KERNEL.
+ *
+ * This code is crying out for a shrinker per pool....
  */
-static int ttm_pool_mm_shrink(struct shrinker *shrink,
-                             struct shrink_control *sc)
+static unsigned long
+ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 {
        static atomic_t start_pool = ATOMIC_INIT(0);
        unsigned i;
        unsigned pool_offset = atomic_add_return(1, &start_pool);
        struct ttm_page_pool *pool;
        int shrink_pages = sc->nr_to_scan;
+       unsigned long freed = 0;
 
        pool_offset = pool_offset % NUM_POOLS;
        /* select start pool in round robin fashion */
                        break;
                pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
                shrink_pages = ttm_page_pool_free(pool, nr_free);
+               freed += nr_free - shrink_pages;
        }
-       /* return estimated number of unused pages in pool */
-       return ttm_pool_get_num_unused_pages();
+       return freed;
+}
+
+
+static unsigned long
+ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+       unsigned i;
+       unsigned long count = 0;
+
+       for (i = 0; i < NUM_POOLS; ++i)
+               count += _manager->pools[i].npages;
+
+       return count;
 }
 
 static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
 {
-       manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
+       manager->mm_shrink.count_objects = ttm_pool_shrink_count;
+       manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
        manager->mm_shrink.seeks = 1;
        register_shrinker(&manager->mm_shrink);
 }
 
 }
 EXPORT_SYMBOL_GPL(ttm_dma_populate);
 
-/* Get good estimation how many pages are free in pools */
-static int ttm_dma_pool_get_num_unused_pages(void)
-{
-       struct device_pools *p;
-       unsigned total = 0;
-
-       mutex_lock(&_manager->lock);
-       list_for_each_entry(p, &_manager->pools, pools)
-               total += p->pool->npages_free;
-       mutex_unlock(&_manager->lock);
-       return total;
-}
-
 /* Put all pages in pages list to correct pool to wait for reuse */
 void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
 {
 
 /**
  * Callback for mm to request pool to reduce number of page held.
+ *
+ * XXX: (dchinner) Deadlock warning!
+ *
+ * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention
+ * needs to be paid to sc->gfp_mask to determine if this can be done or not.
+ * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really
+ * bad.
+ *
+ * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
+ * shrinkers
  */
-static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
-                                 struct shrink_control *sc)
+static unsigned long
+ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 {
        static atomic_t start_pool = ATOMIC_INIT(0);
        unsigned idx = 0;
        unsigned pool_offset = atomic_add_return(1, &start_pool);
        unsigned shrink_pages = sc->nr_to_scan;
        struct device_pools *p;
+       unsigned long freed = 0;
 
        if (list_empty(&_manager->pools))
-               return 0;
+               return SHRINK_STOP;
 
        mutex_lock(&_manager->lock);
        pool_offset = pool_offset % _manager->npools;
                        continue;
                nr_free = shrink_pages;
                shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
+               freed += nr_free - shrink_pages;
+
                pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
                         p->pool->dev_name, p->pool->name, current->pid,
                         nr_free, shrink_pages);
        }
        mutex_unlock(&_manager->lock);
-       /* return estimated number of unused pages in pool */
-       return ttm_dma_pool_get_num_unused_pages();
+       return freed;
+}
+
+static unsigned long
+ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+       struct device_pools *p;
+       unsigned long count = 0;
+
+       mutex_lock(&_manager->lock);
+       list_for_each_entry(p, &_manager->pools, pools)
+               count += p->pool->npages_free;
+       mutex_unlock(&_manager->lock);
+       return count;
 }
 
 static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
 {
-       manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
+       manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
+       manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
        manager->mm_shrink.seeks = 1;
        register_shrinker(&manager->mm_shrink);
 }
 
        return 0;
 }
 
-static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc)
+static unsigned long bch_mca_scan(struct shrinker *shrink,
+                                 struct shrink_control *sc)
 {
        struct cache_set *c = container_of(shrink, struct cache_set, shrink);
        struct btree *b, *t;
        unsigned long i, nr = sc->nr_to_scan;
+       unsigned long freed = 0;
 
        if (c->shrinker_disabled)
-               return 0;
+               return SHRINK_STOP;
 
        if (c->try_harder)
-               return 0;
-
-       /*
-        * If nr == 0, we're supposed to return the number of items we have
-        * cached. Not allowed to return -1.
-        */
-       if (!nr)
-               return mca_can_free(c) * c->btree_pages;
+               return SHRINK_STOP;
 
        /* Return -1 if we can't do anything right now */
        if (sc->gfp_mask & __GFP_WAIT)
 
        i = 0;
        list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
-               if (!nr)
+               if (freed >= nr)
                        break;
 
                if (++i > 3 &&
                    !mca_reap(b, NULL, 0)) {
                        mca_data_free(b);
                        rw_unlock(true, b);
-                       --nr;
+                       freed++;
                }
        }
 
        if (list_empty(&c->btree_cache))
                goto out;
 
-       for (i = 0; nr && i < c->bucket_cache_used; i++) {
+       for (i = 0; (nr--) && i < c->bucket_cache_used; i++) {
                b = list_first_entry(&c->btree_cache, struct btree, list);
                list_rotate_left(&c->btree_cache);
 
                        mca_bucket_free(b);
                        mca_data_free(b);
                        rw_unlock(true, b);
-                       --nr;
+                       freed++;
                } else
                        b->accessed = 0;
        }
 out:
-       nr = mca_can_free(c) * c->btree_pages;
        mutex_unlock(&c->bucket_lock);
-       return nr;
+       return freed;
+}
+
+static unsigned long bch_mca_count(struct shrinker *shrink,
+                                  struct shrink_control *sc)
+{
+       struct cache_set *c = container_of(shrink, struct cache_set, shrink);
+
+       if (c->shrinker_disabled)
+               return 0;
+
+       if (c->try_harder)
+               return 0;
+
+       return mca_can_free(c) * c->btree_pages;
 }
 
 void bch_btree_cache_free(struct cache_set *c)
                c->verify_data = NULL;
 #endif
 
-       c->shrink.shrink = bch_mca_shrink;
+       c->shrink.count_objects = bch_mca_count;
+       c->shrink.scan_objects = bch_mca_scan;
        c->shrink.seeks = 4;
        c->shrink.batch = c->btree_pages * 2;
        register_shrinker(&c->shrink);
 
                struct shrink_control sc;
                sc.gfp_mask = GFP_KERNEL;
                sc.nr_to_scan = strtoul_or_return(buf);
-               c->shrink.shrink(&c->shrink, &sc);
+               c->shrink.scan_objects(&c->shrink, &sc);
        }
 
        sysfs_strtoul(congested_read_threshold_us,
 
                                unsigned long max_jiffies)
 {
        if (jiffies - b->last_accessed < max_jiffies)
-               return 1;
+               return 0;
 
        if (!(gfp & __GFP_IO)) {
                if (test_bit(B_READING, &b->state) ||
                    test_bit(B_WRITING, &b->state) ||
                    test_bit(B_DIRTY, &b->state))
-                       return 1;
+                       return 0;
        }
 
        if (b->hold_count)
-               return 1;
+               return 0;
 
        __make_buffer_clean(b);
        __unlink_buffer(b);
        __free_buffer_wake(b);
 
-       return 0;
+       return 1;
 }
 
-static void __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
-                  struct shrink_control *sc)
+static long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
+                  gfp_t gfp_mask)
 {
        int l;
        struct dm_buffer *b, *tmp;
+       long freed = 0;
 
        for (l = 0; l < LIST_SIZE; l++) {
-               list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list)
-                       if (!__cleanup_old_buffer(b, sc->gfp_mask, 0) &&
-                           !--nr_to_scan)
-                               return;
+               list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
+                       freed += __cleanup_old_buffer(b, gfp_mask, 0);
+                       if (!--nr_to_scan)
+                               break;
+               }
                dm_bufio_cond_resched();
        }
+       return freed;
 }
 
-static int shrink(struct shrinker *shrinker, struct shrink_control *sc)
+static unsigned long
+dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 {
-       struct dm_bufio_client *c =
-           container_of(shrinker, struct dm_bufio_client, shrinker);
-       unsigned long r;
-       unsigned long nr_to_scan = sc->nr_to_scan;
+       struct dm_bufio_client *c;
+       unsigned long freed;
 
+       c = container_of(shrink, struct dm_bufio_client, shrinker);
        if (sc->gfp_mask & __GFP_IO)
                dm_bufio_lock(c);
        else if (!dm_bufio_trylock(c))
-               return !nr_to_scan ? 0 : -1;
+               return SHRINK_STOP;
 
-       if (nr_to_scan)
-               __scan(c, nr_to_scan, sc);
+       freed  = __scan(c, sc->nr_to_scan, sc->gfp_mask);
+       dm_bufio_unlock(c);
+       return freed;
+}
 
-       r = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
-       if (r > INT_MAX)
-               r = INT_MAX;
+static unsigned long
+dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+       struct dm_bufio_client *c;
+       unsigned long count;
 
-       dm_bufio_unlock(c);
+       c = container_of(shrink, struct dm_bufio_client, shrinker);
+       if (sc->gfp_mask & __GFP_IO)
+               dm_bufio_lock(c);
+       else if (!dm_bufio_trylock(c))
+               return 0;
 
-       return r;
+       count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
+       dm_bufio_unlock(c);
+       return count;
 }
 
 /*
        __cache_size_refresh();
        mutex_unlock(&dm_bufio_clients_lock);
 
-       c->shrinker.shrink = shrink;
+       c->shrinker.count_objects = dm_bufio_shrink_count;
+       c->shrinker.scan_objects = dm_bufio_shrink_scan;
        c->shrinker.seeks = 1;
        c->shrinker.batch = 0;
        register_shrinker(&c->shrinker);
                        struct dm_buffer *b;
                        b = list_entry(c->lru[LIST_CLEAN].prev,
                                       struct dm_buffer, lru_list);
-                       if (__cleanup_old_buffer(b, 0, max_age * HZ))
+                       if (!__cleanup_old_buffer(b, 0, max_age * HZ))
                                break;
                        dm_bufio_cond_resched();
                }
 
 /*
  * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
  *
- * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
- * many objects (pages) we have in total.
+ * 'nr_to_scan' is the number of objects to scan for freeing.
  *
  * 'gfp_mask' is the mask of the allocation that got us into this mess.
  *
- * Return value is the number of objects (pages) remaining, or -1 if we cannot
+ * Return value is the number of objects freed or -1 if we cannot
  * proceed without risk of deadlock (due to gfp_mask).
  *
  * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
  * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
  * pages freed.
  */
-static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
+static unsigned long
+ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 {
        struct ashmem_range *range, *next;
+       unsigned long freed = 0;
 
        /* We might recurse into filesystem code, so bail out if necessary */
-       if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
-               return -1;
-       if (!sc->nr_to_scan)
-               return lru_count;
+       if (!(sc->gfp_mask & __GFP_FS))
+               return SHRINK_STOP;
 
        mutex_lock(&ashmem_mutex);
        list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
                range->purged = ASHMEM_WAS_PURGED;
                lru_del(range);
 
-               sc->nr_to_scan -= range_size(range);
-               if (sc->nr_to_scan <= 0)
+               freed += range_size(range);
+               if (--sc->nr_to_scan <= 0)
                        break;
        }
        mutex_unlock(&ashmem_mutex);
+       return freed;
+}
 
+static unsigned long
+ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+       /*
+        * note that lru_count is count of pages on the lru, not a count of
+        * objects on the list. This means the scan function needs to return the
+        * number of pages freed, not the number of objects scanned.
+        */
        return lru_count;
 }
 
 static struct shrinker ashmem_shrinker = {
-       .shrink = ashmem_shrink,
+       .count_objects = ashmem_shrink_count,
+       .scan_objects = ashmem_shrink_scan,
+       /*
+        * XXX (dchinner): I wish people would comment on why they need on
+        * significant changes to the default value here
+        */
        .seeks = DEFAULT_SEEKS * 4,
 };
 
                if (capable(CAP_SYS_ADMIN)) {
                        struct shrink_control sc = {
                                .gfp_mask = GFP_KERNEL,
-                               .nr_to_scan = 0,
+                               .nr_to_scan = LONG_MAX,
                        };
 
                        nodes_setall(sc.nodes_to_scan);
-
-                       ret = ashmem_shrink(&ashmem_shrinker, &sc);
-                       sc.nr_to_scan = ret;
-                       ashmem_shrink(&ashmem_shrinker, &sc);
+                       ashmem_shrink_scan(&ashmem_shrinker, &sc);
                }
                break;
        }
 
                        pr_info(x);                     \
        } while (0)
 
-static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
+static unsigned long lowmem_count(struct shrinker *s,
+                                 struct shrink_control *sc)
+{
+       return global_page_state(NR_ACTIVE_ANON) +
+               global_page_state(NR_ACTIVE_FILE) +
+               global_page_state(NR_INACTIVE_ANON) +
+               global_page_state(NR_INACTIVE_FILE);
+}
+
+static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
 {
        struct task_struct *tsk;
        struct task_struct *selected = NULL;
-       int rem = 0;
+       unsigned long rem = 0;
        int tasksize;
        int i;
        short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
                        break;
                }
        }
-       if (sc->nr_to_scan > 0)
-               lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %hd\n",
-                               sc->nr_to_scan, sc->gfp_mask, other_free,
-                               other_file, min_score_adj);
-       rem = global_page_state(NR_ACTIVE_ANON) +
-               global_page_state(NR_ACTIVE_FILE) +
-               global_page_state(NR_INACTIVE_ANON) +
-               global_page_state(NR_INACTIVE_FILE);
-       if (sc->nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
-               lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
-                            sc->nr_to_scan, sc->gfp_mask, rem);
-               return rem;
+
+       lowmem_print(3, "lowmem_scan %lu, %x, ofree %d %d, ma %hd\n",
+                       sc->nr_to_scan, sc->gfp_mask, other_free,
+                       other_file, min_score_adj);
+
+       if (min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
+               lowmem_print(5, "lowmem_scan %lu, %x, return 0\n",
+                            sc->nr_to_scan, sc->gfp_mask);
+               return 0;
        }
+
        selected_oom_score_adj = min_score_adj;
 
        rcu_read_lock();
                lowmem_deathpending_timeout = jiffies + HZ;
                send_sig(SIGKILL, selected, 0);
                set_tsk_thread_flag(selected, TIF_MEMDIE);
-               rem -= selected_tasksize;
+               rem += selected_tasksize;
        }
-       lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
+
+       lowmem_print(4, "lowmem_scan %lu, %x, return %lu\n",
                     sc->nr_to_scan, sc->gfp_mask, rem);
        rcu_read_unlock();
        return rem;
 }
 
 static struct shrinker lowmem_shrinker = {
-       .shrink = lowmem_shrink,
+       .scan_objects = lowmem_scan,
+       .count_objects = lowmem_count,
        .seeks = DEFAULT_SEEKS * 16
 };