struct block_device *md_dev;
        struct dm_pool_metadata *pmd;
 
-       uint32_t sectors_per_block;
-       unsigned block_shift;
-       dm_block_t offset_mask;
        dm_block_t low_water_blocks;
+       uint32_t sectors_per_block;
 
        struct pool_features pf;
        unsigned low_water_triggered:1; /* A dm event has been sent */
        struct work_struct worker;
        struct delayed_work waker;
 
-       unsigned ref_count;
        unsigned long last_commit_jiffies;
+       unsigned ref_count;
 
        spinlock_t lock;
        struct bio_list deferred_bios;
 
 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
 {
-       return bio->bi_sector >> tc->pool->block_shift;
+       sector_t block_nr = bio->bi_sector;
+
+       (void) sector_div(block_nr, tc->pool->sectors_per_block);
+
+       return block_nr;
 }
 
 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
 {
        struct pool *pool = tc->pool;
+       sector_t bi_sector = bio->bi_sector;
 
        bio->bi_bdev = tc->pool_dev->bdev;
-       bio->bi_sector = (block << pool->block_shift) +
-               (bio->bi_sector & pool->offset_mask);
+       bio->bi_sector = (block * pool->sectors_per_block) +
+                        sector_div(bi_sector, pool->sectors_per_block);
 }
 
 static void remap_to_origin(struct thin_c *tc, struct bio *bio)
  */
 static int io_overlaps_block(struct pool *pool, struct bio *bio)
 {
-       return !(bio->bi_sector & pool->offset_mask) &&
-               (bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT));
+       sector_t bi_sector = bio->bi_sector;
 
+       return !sector_div(bi_sector, pool->sectors_per_block) &&
+               (bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT));
 }
 
 static int io_overwrites_block(struct pool *pool, struct bio *bio)
                         * part of the discard that is in a subsequent
                         * block.
                         */
-                       sector_t offset = bio->bi_sector - (block << pool->block_shift);
-                       unsigned remaining = (pool->sectors_per_block - offset) << 9;
+                       sector_t offset = bio->bi_sector - (block * pool->sectors_per_block);
+                       unsigned remaining = (pool->sectors_per_block - offset) << SECTOR_SHIFT;
                        bio->bi_size = min(bio->bi_size, remaining);
 
                        cell_release_singleton(cell, bio);
 
        pool->pmd = pmd;
        pool->sectors_per_block = block_size;
-       pool->block_shift = ffs(block_size) - 1;
-       pool->offset_mask = block_size - 1;
        pool->low_water_blocks = 0;
        pool_features_init(&pool->pf);
        pool->prison = prison_create(PRISON_CELLS);
        if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
            block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
            block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
-           !is_power_of_2(block_size)) {
+           block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
                ti->error = "Invalid block size";
                r = -EINVAL;
                goto out;
                goto out_flags_changed;
        }
 
+       /*
+        * The block layer requires discard_granularity to be a power of 2.
+        */
+       if (pf.discard_enabled && !is_power_of_2(block_size)) {
+               ti->error = "Discard support must be disabled when the block size is not a power of 2";
+               r = -EINVAL;
+               goto out_flags_changed;
+       }
+
        pt->pool = pool;
        pt->ti = ti;
        pt->metadata_dev = metadata_dev;
        int r;
        struct pool_c *pt = ti->private;
        struct pool *pool = pt->pool;
-       dm_block_t data_size, sb_data_size;
+       sector_t data_size = ti->len;
+       dm_block_t sb_data_size;
 
        /*
         * Take control of the pool object.
        if (r)
                return r;
 
-       data_size = ti->len >> pool->block_shift;
+       (void) sector_div(data_size, pool->sectors_per_block);
+
        r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
        if (r) {
                DMERR("failed to retrieve data device size");
 
        if (data_size < sb_data_size) {
                DMERR("pool target too small, is %llu blocks (expected %llu)",
-                     data_size, sb_data_size);
+                     (unsigned long long)data_size, sb_data_size);
                return -EINVAL;
 
        } else if (data_size > sb_data_size) {
 static int thin_iterate_devices(struct dm_target *ti,
                                iterate_devices_callout_fn fn, void *data)
 {
-       dm_block_t blocks;
+       sector_t blocks;
        struct thin_c *tc = ti->private;
+       struct pool *pool = tc->pool;
 
        /*
         * We can't call dm_pool_get_data_dev_size() since that blocks.  So
         * we follow a more convoluted path through to the pool's target.
         */
-       if (!tc->pool->ti)
+       if (!pool->ti)
                return 0;       /* nothing is bound */
 
-       blocks = tc->pool->ti->len >> tc->pool->block_shift;
+       blocks = pool->ti->len;
+       (void) sector_div(blocks, pool->sectors_per_block);
        if (blocks)
-               return fn(ti, tc->pool_dev, 0, tc->pool->sectors_per_block * blocks, data);
+               return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
 
        return 0;
 }
 
 static struct target_type thin_target = {
        .name = "thin",
-       .version = {1, 1, 0},
+       .version = {1, 2, 0},
        .module = THIS_MODULE,
        .ctr = thin_ctr,
        .dtr = thin_dtr,