if (!sc)
                return -ENOMEM;
 
+       /* Need to ensure auto-resizing doesn't interfere */
+       mutex_lock(&conf->cache_size_mutex);
+
        for (i = conf->max_nr_stripes; i; i--) {
                nsh = alloc_stripe(sc, GFP_KERNEL);
                if (!nsh)
                        kmem_cache_free(sc, nsh);
                }
                kmem_cache_destroy(sc);
+               mutex_unlock(&conf->cache_size_mutex);
                return -ENOMEM;
        }
        /* Step 2 - Must use GFP_NOIO now.
        } else
                err = -ENOMEM;
 
+       mutex_unlock(&conf->cache_size_mutex);
        /* Step 4, return new stripes to service */
        while(!list_empty(&newstripes)) {
                nsh = list_entry(newstripes.next, struct stripe_head, lru);
        pr_debug("%d stripes handled\n", handled);
 
        spin_unlock_irq(&conf->device_lock);
-       if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) {
+       if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) &&
+           mutex_trylock(&conf->cache_size_mutex)) {
                grow_one_stripe(conf, __GFP_NOWARN);
                /* Set flag even if allocation failed.  This helps
                 * slow down allocation requests when mem is short
                 */
                set_bit(R5_DID_ALLOC, &conf->cache_state);
+               mutex_unlock(&conf->cache_size_mutex);
        }
 
        async_tx_issue_pending_all();
                return -EINVAL;
 
        conf->min_nr_stripes = size;
+       mutex_lock(&conf->cache_size_mutex);
        while (size < conf->max_nr_stripes &&
               drop_one_stripe(conf))
                ;
+       mutex_unlock(&conf->cache_size_mutex);
 
 
        err = md_allow_write(mddev);
        if (err)
                return err;
 
+       mutex_lock(&conf->cache_size_mutex);
        while (size > conf->max_nr_stripes)
                if (!grow_one_stripe(conf, GFP_KERNEL))
                        break;
+       mutex_unlock(&conf->cache_size_mutex);
 
        return 0;
 }
                                      struct shrink_control *sc)
 {
        struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
-       int ret = 0;
-       while (ret < sc->nr_to_scan) {
-               if (drop_one_stripe(conf) == 0)
-                       return SHRINK_STOP;
-               ret++;
+       unsigned long ret = SHRINK_STOP;
+
+       if (mutex_trylock(&conf->cache_size_mutex)) {
+               ret= 0;
+               while (ret < sc->nr_to_scan) {
+                       if (drop_one_stripe(conf) == 0) {
+                               ret = SHRINK_STOP;
+                               break;
+                       }
+                       ret++;
+               }
+               mutex_unlock(&conf->cache_size_mutex);
        }
        return ret;
 }
                goto abort;
        spin_lock_init(&conf->device_lock);
        seqcount_init(&conf->gen_lock);
+       mutex_init(&conf->cache_size_mutex);
        init_waitqueue_head(&conf->wait_for_quiescent);
        for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
                init_waitqueue_head(&conf->wait_for_stripe[i]);