}
 }
 
-static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
+static unsigned long
+mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 {
        struct kvm *kvm;
        int nr_to_scan = sc->nr_to_scan;
-
-       if (nr_to_scan == 0)
-               goto out;
+       unsigned long freed = 0;
 
        raw_spin_lock(&kvm_lock);
 
                        goto unlock;
                }
 
-               prepare_zap_oldest_mmu_page(kvm, &invalid_list);
+               if (prepare_zap_oldest_mmu_page(kvm, &invalid_list))
+                       freed++;
                kvm_mmu_commit_zap_page(kvm, &invalid_list);
 
 unlock:
                spin_unlock(&kvm->mmu_lock);
                srcu_read_unlock(&kvm->srcu, idx);
 
+               /*
+                * unfair on small ones
+                * per-vm shrinkers cry out
+                * sadness comes quickly
+                */
                list_move_tail(&kvm->vm_list, &vm_list);
                break;
        }
 
        raw_spin_unlock(&kvm_lock);
+       return freed;
 
-out:
+}
+
+static unsigned long
+mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
        return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
 }
 
 static struct shrinker mmu_shrinker = {
-       .shrink = mmu_shrink,
+       .count_objects = mmu_shrink_count,
+       .scan_objects = mmu_shrink_scan,
        .seeks = DEFAULT_SEEKS * 10,
 };
 
 
 /*
  * Remove stale credentials. Avoid sleeping inside the loop.
  */
-static int
+static long
 rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
 {
        spinlock_t *cache_lock;
        struct rpc_cred *cred, *next;
        unsigned long expired = jiffies - RPC_AUTH_EXPIRY_MORATORIUM;
+       long freed = 0;
 
        list_for_each_entry_safe(cred, next, &cred_unused, cr_lru) {
 
                 */
                if (time_in_range(cred->cr_expire, expired, jiffies) &&
                    test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0)
-                       return 0;
+                       break;
 
                list_del_init(&cred->cr_lru);
                number_cred_unused--;
+               freed++;
                if (atomic_read(&cred->cr_count) != 0)
                        continue;
 
                }
                spin_unlock(cache_lock);
        }
-       return (number_cred_unused / 100) * sysctl_vfs_cache_pressure;
+       return freed;
 }
 
 /*
  * Run memory cache shrinker.
  */
-static int
-rpcauth_cache_shrinker(struct shrinker *shrink, struct shrink_control *sc)
+static unsigned long
+rpcauth_cache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+
 {
        LIST_HEAD(free);
-       int res;
-       int nr_to_scan = sc->nr_to_scan;
-       gfp_t gfp_mask = sc->gfp_mask;
+       unsigned long freed;
+
+       if ((sc->gfp_mask & GFP_KERNEL) != GFP_KERNEL)
+               return SHRINK_STOP;
 
-       if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
-               return (nr_to_scan == 0) ? 0 : -1;
+       /* nothing left, don't come back */
        if (list_empty(&cred_unused))
-               return 0;
+               return SHRINK_STOP;
+
        spin_lock(&rpc_credcache_lock);
-       res = rpcauth_prune_expired(&free, nr_to_scan);
+       freed = rpcauth_prune_expired(&free, sc->nr_to_scan);
        spin_unlock(&rpc_credcache_lock);
        rpcauth_destroy_credlist(&free);
-       return res;
+
+       return freed;
+}
+
+static unsigned long
+rpcauth_cache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+
+{
+       return (number_cred_unused / 100) * sysctl_vfs_cache_pressure;
 }
 
 /*
 }
 
 static struct shrinker rpc_cred_shrinker = {
-       .shrink = rpcauth_cache_shrinker,
+       .count_objects = rpcauth_cache_shrink_count,
+       .scan_objects = rpcauth_cache_shrink_scan,
        .seeks = DEFAULT_SEEKS,
 };