cloned_bio->bi_bdev = pd->bdev;
                cloned_bio->bi_private = psd;
                cloned_bio->bi_end_io = pkt_end_io_read_cloned;
-               pd->stats.secs_r += bio->bi_size >> 9;
+               pd->stats.secs_r += bio_sectors(bio);
                pkt_queue_bio(pd, cloned_bio);
                return;
        }
 
 {
        io->bdev = m->dev->bdev;
        io->sector = map_sector(m, bio);
-       io->count = bio->bi_size >> 9;
+       io->count = bio_sectors(bio);
 }
 
 static void hold_bio(struct mirror_set *ms, struct bio *bio)
 
 {
        if (likely(is_power_of_2(chunk_sects))) {
                return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
-                                       + (bio->bi_size >> 9));
+                                       + bio_sectors(bio));
        } else{
                sector_t sector = bio->bi_sector;
                return chunk_sects >= (sector_div(sector, chunk_sects)
-                                               + (bio->bi_size >> 9));
+                                               + bio_sectors(bio));
        }
 }
 
        printk("md/raid0:%s: make_request bug: can't convert block across chunks"
               " or bigger than %dk %llu %d\n",
               mdname(mddev), chunk_sects / 2,
-              (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
+              (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
 
        bio_io_error(bio);
        return;
 
                         (bio_data_dir(bio) == WRITE) ? "write" : "read",
                         (unsigned long long) bio->bi_sector,
                         (unsigned long long) bio->bi_sector +
-                        (bio->bi_size >> 9) - 1);
+                        bio_sectors(bio) - 1);
 
                call_bio_endio(r1_bio);
        }
                                         " %llu-%llu\n",
                                         (unsigned long long) mbio->bi_sector,
                                         (unsigned long long) mbio->bi_sector +
-                                        (mbio->bi_size >> 9) - 1);
+                                        bio_sectors(mbio) - 1);
                                call_bio_endio(r1_bio);
                        }
                }
        r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
 
        r1_bio->master_bio = bio;
-       r1_bio->sectors = bio->bi_size >> 9;
+       r1_bio->sectors = bio_sectors(bio);
        r1_bio->state = 0;
        r1_bio->mddev = mddev;
        r1_bio->sector = bio->bi_sector;
                        r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
 
                        r1_bio->master_bio = bio;
-                       r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
+                       r1_bio->sectors = bio_sectors(bio) - sectors_handled;
                        r1_bio->state = 0;
                        r1_bio->mddev = mddev;
                        r1_bio->sector = bio->bi_sector + sectors_handled;
        /* Mustn't call r1_bio_write_done before this next test,
         * as it could result in the bio being freed.
         */
-       if (sectors_handled < (bio->bi_size >> 9)) {
+       if (sectors_handled < bio_sectors(bio)) {
                r1_bio_write_done(r1_bio);
                /* We need another r1_bio.  It has already been counted
                 * in bio->bi_phys_segments
                 */
                r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
                r1_bio->master_bio = bio;
-               r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
+               r1_bio->sectors = bio_sectors(bio) - sectors_handled;
                r1_bio->state = 0;
                r1_bio->mddev = mddev;
                r1_bio->sector = bio->bi_sector + sectors_handled;
                wbio->bi_rw = WRITE;
                wbio->bi_end_io = end_sync_write;
                atomic_inc(&r1_bio->remaining);
-               md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
+               md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
 
                generic_make_request(wbio);
        }
                        r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
 
                        r1_bio->master_bio = mbio;
-                       r1_bio->sectors = (mbio->bi_size >> 9)
-                                         - sectors_handled;
+                       r1_bio->sectors = bio_sectors(mbio) - sectors_handled;
                        r1_bio->state = 0;
                        set_bit(R1BIO_ReadError, &r1_bio->state);
                        r1_bio->mddev = mddev;
 
        /* If this request crosses a chunk boundary, we need to
         * split it.  This will only happen for 1 PAGE (or less) requests.
         */
-       if (unlikely((bio->bi_sector & chunk_mask) + (bio->bi_size >> 9)
+       if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio)
                     > chunk_sects
                     && (conf->geo.near_copies < conf->geo.raid_disks
                         || conf->prev.near_copies < conf->prev.raid_disks))) {
        bad_map:
                printk("md/raid10:%s: make_request bug: can't convert block across chunks"
                       " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
-                      (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
+                      (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
 
                bio_io_error(bio);
                return;
         */
        wait_barrier(conf);
 
-       sectors = bio->bi_size >> 9;
+       sectors = bio_sectors(bio);
        while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
            bio->bi_sector < conf->reshape_progress &&
            bio->bi_sector + sectors > conf->reshape_progress) {
                        r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
 
                        r10_bio->master_bio = bio;
-                       r10_bio->sectors = ((bio->bi_size >> 9)
-                                           - sectors_handled);
+                       r10_bio->sectors = bio_sectors(bio) - sectors_handled;
                        r10_bio->state = 0;
                        r10_bio->mddev = mddev;
                        r10_bio->sector = bio->bi_sector + sectors_handled;
         * after checking if we need to go around again.
         */
 
-       if (sectors_handled < (bio->bi_size >> 9)) {
+       if (sectors_handled < bio_sectors(bio)) {
                one_write_done(r10_bio);
                /* We need another r10_bio.  It has already been counted
                 * in bio->bi_phys_segments.
                r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
 
                r10_bio->master_bio = bio;
-               r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
+               r10_bio->sectors = bio_sectors(bio) - sectors_handled;
 
                r10_bio->mddev = mddev;
                r10_bio->sector = bio->bi_sector + sectors_handled;
                d = r10_bio->devs[i].devnum;
                atomic_inc(&conf->mirrors[d].rdev->nr_pending);
                atomic_inc(&r10_bio->remaining);
-               md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9);
+               md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
 
                tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
                tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
                d = r10_bio->devs[i].devnum;
                atomic_inc(&r10_bio->remaining);
                md_sync_acct(conf->mirrors[d].replacement->bdev,
-                            tbio->bi_size >> 9);
+                            bio_sectors(tbio));
                generic_make_request(tbio);
        }
 
        wbio2 = r10_bio->devs[1].repl_bio;
        if (wbio->bi_end_io) {
                atomic_inc(&conf->mirrors[d].rdev->nr_pending);
-               md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
+               md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
                generic_make_request(wbio);
        }
        if (wbio2 && wbio2->bi_end_io) {
                atomic_inc(&conf->mirrors[d].replacement->nr_pending);
                md_sync_acct(conf->mirrors[d].replacement->bdev,
-                            wbio2->bi_size >> 9);
+                            bio_sectors(wbio2));
                generic_make_request(wbio2);
        }
 }
                r10_bio = mempool_alloc(conf->r10bio_pool,
                                        GFP_NOIO);
                r10_bio->master_bio = mbio;
-               r10_bio->sectors = (mbio->bi_size >> 9)
-                       - sectors_handled;
+               r10_bio->sectors = bio_sectors(mbio) - sectors_handled;
                r10_bio->state = 0;
                set_bit(R10BIO_ReadError,
                        &r10_bio->state);
 
  */
 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
 {
-       int sectors = bio->bi_size >> 9;
+       int sectors = bio_sectors(bio);
        if (bio->bi_sector + sectors < sector + STRIPE_SECTORS)
                return bio->bi_next;
        else
 {
        sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
        unsigned int chunk_sectors = mddev->chunk_sectors;
-       unsigned int bio_sectors = bio->bi_size >> 9;
+       unsigned int bio_sectors = bio_sectors(bio);
 
        if (mddev->new_chunk_sectors < mddev->chunk_sectors)
                chunk_sectors = mddev->new_chunk_sectors;
 {
        struct request_queue *q = bdev_get_queue(bi->bi_bdev);
 
-       if ((bi->bi_size>>9) > queue_max_sectors(q))
+       if (bio_sectors(bi) > queue_max_sectors(q))
                return 0;
        blk_recount_segments(q, bi);
        if (bi->bi_phys_segments > queue_max_segments(q))
                align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
 
                if (!bio_fits_rdev(align_bi) ||
-                   is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
+                   is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi),
                                &first_bad, &bad_sectors)) {
                        /* too big in some way, or has a known bad block */
                        bio_put(align_bi);
 
        }
 
        prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
-       if ((bio->bi_size >> 9) > max_sectors)
+       if (bio_sectors(bio) > max_sectors)
                return 0;
 
        if (!q->merge_bvec_fn)
 
                __entry->dev            = bio->bi_bdev ?
                                          bio->bi_bdev->bd_dev : 0;
                __entry->sector         = bio->bi_sector;
-               __entry->nr_sector      = bio->bi_size >> 9;
+               __entry->nr_sector      = bio_sectors(bio);
                blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
                __entry->dev            = bio->bi_bdev ?
                                          bio->bi_bdev->bd_dev : 0;
                __entry->sector         = bio->bi_sector;
-               __entry->nr_sector      = bio->bi_size >> 9;
+               __entry->nr_sector      = bio_sectors(bio);
                __entry->error          = error;
                blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
        ),
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
                __entry->sector         = bio->bi_sector;
-               __entry->nr_sector      = bio->bi_size >> 9;
+               __entry->nr_sector      = bio_sectors(bio);
                blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
                __entry->sector         = bio->bi_sector;
-               __entry->nr_sector      = bio->bi_size >> 9;
+               __entry->nr_sector      = bio_sectors(bio);
                blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
        TP_fast_assign(
                __entry->dev            = bio ? bio->bi_bdev->bd_dev : 0;
                __entry->sector         = bio ? bio->bi_sector : 0;
-               __entry->nr_sector      = bio ? bio->bi_size >> 9 : 0;
+               __entry->nr_sector      = bio ? bio_sectors(bio) : 0;
                blk_fill_rwbs(__entry->rwbs,
                              bio ? bio->bi_rw : 0, __entry->nr_sector);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
                __entry->sector         = bio->bi_sector;
-               __entry->nr_sector      = bio->bi_size >> 9;
+               __entry->nr_sector      = bio_sectors(bio);
                __entry->old_dev        = dev;
                __entry->old_sector     = from;
                blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);