/*----------------------------------------------------------------*/
 
+#define THROTTLE_THRESHOLD (1 * HZ)
+
+struct throttle {
+       struct rw_semaphore lock;
+       unsigned long threshold;
+       bool throttle_applied;
+};
+
+static void throttle_init(struct throttle *t)
+{
+       init_rwsem(&t->lock);
+       t->throttle_applied = false;
+}
+
+static void throttle_work_start(struct throttle *t)
+{
+       t->threshold = jiffies + THROTTLE_THRESHOLD;
+}
+
+static void throttle_work_update(struct throttle *t)
+{
+       if (!t->throttle_applied && jiffies > t->threshold) {
+               down_write(&t->lock);
+               t->throttle_applied = true;
+       }
+}
+
+static void throttle_work_complete(struct throttle *t)
+{
+       if (t->throttle_applied) {
+               t->throttle_applied = false;
+               up_write(&t->lock);
+       }
+}
+
+static void throttle_lock(struct throttle *t)
+{
+       down_read(&t->lock);
+}
+
+static void throttle_unlock(struct throttle *t)
+{
+       up_read(&t->lock);
+}
+
+/*----------------------------------------------------------------*/
+
 /*
  * A pool device ties together a metadata device and a data device.  It
  * also provides the interface for creating and destroying internal
        struct dm_kcopyd_client *copier;
 
        struct workqueue_struct *wq;
+       struct throttle throttle;
        struct work_struct worker;
        struct delayed_work waker;
        struct delayed_work no_space_timeout;
                        pool->process_bio(tc, bio);
 
                if ((count++ & 127) == 0) {
+                       throttle_work_update(&pool->throttle);
                        dm_pool_issue_prefetches(pool->pmd);
                }
        }
 {
        struct pool *pool = container_of(ws, struct pool, worker);
 
+       throttle_work_start(&pool->throttle);
        dm_pool_issue_prefetches(pool->pmd);
+       throttle_work_update(&pool->throttle);
        process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
+       throttle_work_update(&pool->throttle);
        process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
+       throttle_work_update(&pool->throttle);
        process_deferred_bios(pool);
+       throttle_work_complete(&pool->throttle);
 }
 
 /*
        wake_worker(pool);
 }
 
+static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio)
+{
+       struct pool *pool = tc->pool;
+
+       throttle_lock(&pool->throttle);
+       thin_defer_bio(tc, bio);
+       throttle_unlock(&pool->throttle);
+}
+
 static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
 {
        struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
        }
 
        if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
-               thin_defer_bio(tc, bio);
+               thin_defer_bio_with_throttle(tc, bio);
                return DM_MAPIO_SUBMITTED;
        }
 
                goto bad_wq;
        }
 
+       throttle_init(&pool->throttle);
        INIT_WORK(&pool->worker, do_worker);
        INIT_DELAYED_WORK(&pool->waker, do_waker);
        INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);