struct request_queue *q;
        size_t n;
 
-       if (!d->stripe_size_bits)
-               d->stripe_size_bits = 31;
+       if (!d->stripe_size)
+               d->stripe_size = 1 << 31;
 
-       d->nr_stripes = round_up(sectors, 1 << d->stripe_size_bits) >>
-               d->stripe_size_bits;
+       d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
 
        if (!d->nr_stripes || d->nr_stripes > SIZE_MAX / sizeof(atomic_t))
                return -ENOMEM;
 
        sysfs_hprint(dirty_data,
                     bcache_dev_sectors_dirty(&dc->disk) << 9);
 
-       sysfs_hprint(stripe_size,       (1 << dc->disk.stripe_size_bits) << 9);
+       sysfs_hprint(stripe_size,       dc->disk.stripe_size << 9);
        var_printf(partial_stripes_expensive,   "%u");
 
        var_printf(sequential_merge,    "%i");
 
 
 static bool dirty_full_stripe_pred(struct keybuf *buf, struct bkey *k)
 {
-       uint64_t stripe;
+       uint64_t stripe = KEY_START(k);
        unsigned nr_sectors = KEY_SIZE(k);
        struct cached_dev *dc = container_of(buf, struct cached_dev,
                                             writeback_keys);
-       unsigned stripe_size = 1 << dc->disk.stripe_size_bits;
 
        if (!KEY_DIRTY(k))
                return false;
 
-       stripe = KEY_START(k) >> dc->disk.stripe_size_bits;
-       while (1) {
-               if (atomic_read(dc->disk.stripe_sectors_dirty + stripe) !=
-                   stripe_size)
-                       return false;
+       do_div(stripe, dc->disk.stripe_size);
 
-               if (nr_sectors <= stripe_size)
+       while (1) {
+               if (atomic_read(dc->disk.stripe_sectors_dirty + stripe) ==
+                   dc->disk.stripe_size)
                        return true;
 
-               nr_sectors -= stripe_size;
+               if (nr_sectors <= dc->disk.stripe_size)
+                       return false;
+
+               nr_sectors -= dc->disk.stripe_size;
                stripe++;
        }
 }
 
                for (i = 0; i < dc->disk.nr_stripes; i++)
                        if (atomic_read(dc->disk.stripe_sectors_dirty + i) ==
-                           1 << dc->disk.stripe_size_bits)
+                           dc->disk.stripe_size)
                                goto full_stripes;
 
                goto normal_refill;
 full_stripes:
+               searched_from_start = false;    /* not searching entire btree */
                bch_refill_keybuf(dc->disk.c, buf, &end,
                                  dirty_full_stripe_pred);
        } else {
                                  uint64_t offset, int nr_sectors)
 {
        struct bcache_device *d = c->devices[inode];
-       unsigned stripe_size, stripe_offset;
-       uint64_t stripe;
+       unsigned stripe_offset;
+       uint64_t stripe = offset;
 
        if (!d)
                return;
 
-       stripe_size = 1 << d->stripe_size_bits;
-       stripe = offset >> d->stripe_size_bits;
-       stripe_offset = offset & (stripe_size - 1);
+       do_div(stripe, d->stripe_size);
+
+       stripe_offset = offset & (d->stripe_size - 1);
 
        while (nr_sectors) {
                int s = min_t(unsigned, abs(nr_sectors),
-                             stripe_size - stripe_offset);
+                             d->stripe_size - stripe_offset);
 
                if (nr_sectors < 0)
                        s = -s;
 
                                           uint64_t offset,
                                           unsigned nr_sectors)
 {
-       uint64_t stripe = offset >> d->stripe_size_bits;
+       uint64_t stripe = offset;
+
+       do_div(stripe, d->stripe_size);
 
        while (1) {
                if (atomic_read(d->stripe_sectors_dirty + stripe))
                        return true;
 
-               if (nr_sectors <= 1 << d->stripe_size_bits)
+               if (nr_sectors <= d->stripe_size)
                        return false;
 
-               nr_sectors -= 1 << d->stripe_size_bits;
+               nr_sectors -= d->stripe_size;
                stripe++;
        }
 }