unsigned                sequential_cutoff;
        unsigned                readahead;
 
+       unsigned                io_disable:1;
        unsigned                verify:1;
        unsigned                bypass_torture_test:1;
 
        unsigned                writeback_rate_minimum;
 
        enum stop_on_failure    stop_when_cache_set_failed;
+#define DEFAULT_CACHED_DEV_ERROR_LIMIT 64
+       atomic_t                io_errors;
+       unsigned                error_limit;
 };
 
 enum alloc_reserve {
 
 /* Forward declarations */
 
+void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio);
 void bch_count_io_errors(struct cache *, blk_status_t, int, const char *);
 void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
                              blk_status_t, const char *);
                         struct bkey *, int, bool);
 bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned,
                       unsigned, unsigned, bool);
+bool bch_cached_dev_error(struct cached_dev *dc);
 
 __printf(2, 3)
 bool bch_cache_set_error(struct cache_set *, const char *, ...);
 
 }
 
 /* IO errors */
+void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
+{
+       char buf[BDEVNAME_SIZE];
+       unsigned errors;
+
+       WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
+
+       errors = atomic_add_return(1, &dc->io_errors);
+       if (errors < dc->error_limit)
+               pr_err("%s: IO error on backing device, unrecoverable",
+                       bio_devname(bio, buf));
+       else
+               bch_cached_dev_error(dc);
+}
 
 void bch_count_io_errors(struct cache *ca,
                         blk_status_t error,
 
 
        if (bio->bi_status) {
                struct search *s = container_of(cl, struct search, cl);
+               struct cached_dev *dc = container_of(s->d,
+                                                    struct cached_dev, disk);
                /*
                 * If a bio has REQ_PREFLUSH for writeback mode, it is
                 * speically assembled in cached_dev_write() for a non-zero
                }
                s->recoverable = false;
                /* should count I/O error for backing device here */
+               bch_count_backing_io_errors(dc, bio);
        }
 
        bio_put(bio);
                            bio_data_dir(bio),
                            &ddip->d->disk->part0, ddip->start_time);
 
-       kfree(ddip);
+       if (bio->bi_status) {
+               struct cached_dev *dc = container_of(ddip->d,
+                                                    struct cached_dev, disk);
+               /* should count I/O error for backing device here */
+               bch_count_backing_io_errors(dc, bio);
+       }
 
+       kfree(ddip);
        bio->bi_end_io(bio);
 }
 
        struct cached_dev *dc = container_of(d, struct cached_dev, disk);
        int rw = bio_data_dir(bio);
 
-       if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
+       if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) ||
+                    dc->io_disable)) {
                bio->bi_status = BLK_STS_IOERR;
                bio_endio(bio);
                return BLK_QC_T_NONE;
 
                max(dc->disk.disk->queue->backing_dev_info->ra_pages,
                    q->backing_dev_info->ra_pages);
 
+       atomic_set(&dc->io_errors, 0);
+       dc->io_disable = false;
+       dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT;
        /* default to auto */
        dc->stop_when_cache_set_failed = BCH_CACHED_DEV_STOP_AUTO;
 
        return flash_dev_run(c, u);
 }
 
+bool bch_cached_dev_error(struct cached_dev *dc)
+{
+       char name[BDEVNAME_SIZE];
+
+       if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
+               return false;
+
+       dc->io_disable = true;
+       /* make others know io_disable is true earlier */
+       smp_mb();
+
+       pr_err("stop %s: too many IO errors on backing device %s\n",
+               dc->disk.disk->disk_name, bdevname(dc->bdev, name));
+
+       bcache_device_stop(&dc->disk);
+       return true;
+}
+
 /* Cache set */
 
 __printf(2, 3)
 
        var_print(writeback_delay);
        var_print(writeback_percent);
        sysfs_hprint(writeback_rate,    dc->writeback_rate.rate << 9);
-
+       sysfs_hprint(io_errors,         atomic_read(&dc->io_errors));
+       sysfs_printf(io_error_limit,    "%i", dc->error_limit);
+       sysfs_printf(io_disable,        "%i", dc->io_disable);
        var_print(writeback_rate_update_seconds);
        var_print(writeback_rate_i_term_inverse);
        var_print(writeback_rate_p_term_inverse);
        d_strtoul(writeback_rate_i_term_inverse);
        d_strtoul_nonzero(writeback_rate_p_term_inverse);
 
+       sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
+
+       if (attr == &sysfs_io_disable) {
+               int v = strtoul_or_return(buf);
+
+               dc->io_disable = v ? 1 : 0;
+       }
+
        d_strtoi_h(sequential_cutoff);
        d_strtoi_h(readahead);
 
        &sysfs_writeback_rate_i_term_inverse,
        &sysfs_writeback_rate_p_term_inverse,
        &sysfs_writeback_rate_debug,
+       &sysfs_errors,
+       &sysfs_io_error_limit,
+       &sysfs_io_disable,
        &sysfs_dirty_data,
        &sysfs_stripe_size,
        &sysfs_partial_stripes_expensive,