}
        return 0;
 }
+
+static inline int rdev_has_badblock(struct md_rdev *rdev, sector_t s,
+                                   int sectors)
+{
+       sector_t first_bad;
+       int bad_sectors;
+
+       return is_badblock(rdev, s, sectors, &first_bad, &bad_sectors);
+}
+
 extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
                              int is_new);
 extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
 
                 * to user-side. So if something waits for IO, then it
                 * will wait for the 'master' bio.
                 */
-               sector_t first_bad;
-               int bad_sectors;
-
                r1_bio->bios[mirror] = NULL;
                to_put = bio;
                /*
                        set_bit(R1BIO_Uptodate, &r1_bio->state);
 
                /* Maybe we can clear some bad blocks. */
-               if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
-                               &first_bad, &bad_sectors) && !discard_error) {
+               if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) &&
+                   !discard_error) {
                        r1_bio->bios[mirror] = IO_MADE_GOOD;
                        set_bit(R1BIO_MadeGood, &r1_bio->state);
                }
        struct r1bio *r1_bio = get_resync_r1bio(bio);
        struct mddev *mddev = r1_bio->mddev;
        struct r1conf *conf = mddev->private;
-       sector_t first_bad;
-       int bad_sectors;
        struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
 
        if (!uptodate) {
                        set_bit(MD_RECOVERY_NEEDED, &
                                mddev->recovery);
                set_bit(R1BIO_WriteError, &r1_bio->state);
-       } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
-                              &first_bad, &bad_sectors) &&
-                  !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
-                               r1_bio->sector,
-                               r1_bio->sectors,
-                               &first_bad, &bad_sectors)
-               )
+       } else if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) &&
+                  !rdev_has_badblock(conf->mirrors[r1_bio->read_disk].rdev,
+                                     r1_bio->sector, r1_bio->sectors)) {
                set_bit(R1BIO_MadeGood, &r1_bio->state);
+       }
 
        put_sync_write_buf(r1_bio, uptodate);
 }
                        s = PAGE_SIZE >> 9;
 
                do {
-                       sector_t first_bad;
-                       int bad_sectors;
-
                        rdev = conf->mirrors[d].rdev;
                        if (rdev &&
                            (test_bit(In_sync, &rdev->flags) ||
                             (!test_bit(Faulty, &rdev->flags) &&
                              rdev->recovery_offset >= sect + s)) &&
-                           is_badblock(rdev, sect, s,
-                                       &first_bad, &bad_sectors) == 0) {
+                           rdev_has_badblock(rdev, sect, s) == 0) {
                                atomic_inc(&rdev->nr_pending);
                                if (sync_page_io(rdev, sect, s<<9,
                                         conf->tmppage, REQ_OP_READ, false))
 
                 * The 'master' represents the composite IO operation to
                 * user-side. So if something waits for IO, then it will
                 * wait for the 'master' bio.
-                */
-               sector_t first_bad;
-               int bad_sectors;
-
-               /*
+                *
                 * Do not set R10BIO_Uptodate if the current device is
                 * rebuilding or Faulty. This is because we cannot use
                 * such device for properly reading the data back (we could
                        set_bit(R10BIO_Uptodate, &r10_bio->state);
 
                /* Maybe we can clear some bad blocks. */
-               if (is_badblock(rdev,
-                               r10_bio->devs[slot].addr,
-                               r10_bio->sectors,
-                               &first_bad, &bad_sectors) && !discard_error) {
+               if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr,
+                                     r10_bio->sectors) &&
+                   !discard_error) {
                        bio_put(bio);
                        if (repl)
                                r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
                }
 
                if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
-                       sector_t first_bad;
                        sector_t dev_sector = r10_bio->devs[i].addr;
-                       int bad_sectors;
-                       int is_bad;
 
                        /*
                         * Discard request doesn't care the write result
                        if (!r10_bio->sectors)
                                continue;
 
-                       is_bad = is_badblock(rdev, dev_sector, r10_bio->sectors,
-                                            &first_bad, &bad_sectors);
-                       if (is_bad < 0) {
+                       if (rdev_has_badblock(rdev, dev_sector,
+                                             r10_bio->sectors) < 0) {
                                /*
                                 * Mustn't write here until the bad block
                                 * is acknowledged
        struct mddev *mddev = r10_bio->mddev;
        struct r10conf *conf = mddev->private;
        int d;
-       sector_t first_bad;
-       int bad_sectors;
        int slot;
        int repl;
        struct md_rdev *rdev = NULL;
                                        &rdev->mddev->recovery);
                        set_bit(R10BIO_WriteError, &r10_bio->state);
                }
-       } else if (is_badblock(rdev,
-                            r10_bio->devs[slot].addr,
-                            r10_bio->sectors,
-                            &first_bad, &bad_sectors))
+       } else if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr,
+                                    r10_bio->sectors)) {
                set_bit(R10BIO_MadeGood, &r10_bio->state);
+       }
 
        rdev_dec_pending(rdev, mddev);
 
 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
                            int sectors, struct page *page, enum req_op op)
 {
-       sector_t first_bad;
-       int bad_sectors;
-
-       if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
-           && (op == REQ_OP_READ || test_bit(WriteErrorSeen, &rdev->flags)))
+       if (rdev_has_badblock(rdev, sector, sectors) &&
+           (op == REQ_OP_READ || test_bit(WriteErrorSeen, &rdev->flags)))
                return -1;
        if (sync_page_io(rdev, sector, sectors << 9, page, op, false))
                /* success */
                        s = PAGE_SIZE >> 9;
 
                do {
-                       sector_t first_bad;
-                       int bad_sectors;
-
                        d = r10_bio->devs[sl].devnum;
                        rdev = conf->mirrors[d].rdev;
                        if (rdev &&
                            test_bit(In_sync, &rdev->flags) &&
                            !test_bit(Faulty, &rdev->flags) &&
-                           is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
-                                       &first_bad, &bad_sectors) == 0) {
+                           rdev_has_badblock(rdev,
+                                             r10_bio->devs[sl].addr + sect,
+                                             s) == 0) {
                                atomic_inc(&rdev->nr_pending);
                                success = sync_page_io(rdev,
                                                       r10_bio->devs[sl].addr +
 
                 */
                while (op_is_write(op) && rdev &&
                       test_bit(WriteErrorSeen, &rdev->flags)) {
-                       sector_t first_bad;
-                       int bad_sectors;
-                       int bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf),
-                                             &first_bad, &bad_sectors);
+                       int bad = rdev_has_badblock(rdev, sh->sector,
+                                                   RAID5_STRIPE_SECTORS(conf));
                        if (!bad)
                                break;
 
        struct r5conf *conf = sh->raid_conf;
        int disks = sh->disks, i;
        struct md_rdev *rdev;
-       sector_t first_bad;
-       int bad_sectors;
        int replacement = 0;
 
        for (i = 0 ; i < disks; i++) {
        if (replacement) {
                if (bi->bi_status)
                        md_error(conf->mddev, rdev);
-               else if (is_badblock(rdev, sh->sector,
-                                    RAID5_STRIPE_SECTORS(conf),
-                                    &first_bad, &bad_sectors))
+               else if (rdev_has_badblock(rdev, sh->sector,
+                                          RAID5_STRIPE_SECTORS(conf)))
                        set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
        } else {
                if (bi->bi_status) {
                        if (!test_and_set_bit(WantReplacement, &rdev->flags))
                                set_bit(MD_RECOVERY_NEEDED,
                                        &rdev->mddev->recovery);
-               } else if (is_badblock(rdev, sh->sector,
-                                      RAID5_STRIPE_SECTORS(conf),
-                                      &first_bad, &bad_sectors)) {
+               } else if (rdev_has_badblock(rdev, sh->sector,
+                                            RAID5_STRIPE_SECTORS(conf))) {
                        set_bit(R5_MadeGood, &sh->dev[i].flags);
                        if (test_bit(R5_ReadError, &sh->dev[i].flags))
                                /* That was a successful write so make
        /* Now to look around and see what can be done */
        for (i=disks; i--; ) {
                struct md_rdev *rdev;
-               sector_t first_bad;
-               int bad_sectors;
                int is_bad = 0;
 
                dev = &sh->dev[i];
                rdev = conf->disks[i].replacement;
                if (rdev && !test_bit(Faulty, &rdev->flags) &&
                    rdev->recovery_offset >= sh->sector + RAID5_STRIPE_SECTORS(conf) &&
-                   !is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf),
-                                &first_bad, &bad_sectors))
+                   !rdev_has_badblock(rdev, sh->sector,
+                                      RAID5_STRIPE_SECTORS(conf)))
                        set_bit(R5_ReadRepl, &dev->flags);
                else {
                        if (rdev && !test_bit(Faulty, &rdev->flags))
                if (rdev && test_bit(Faulty, &rdev->flags))
                        rdev = NULL;
                if (rdev) {
-                       is_bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf),
-                                            &first_bad, &bad_sectors);
+                       is_bad = rdev_has_badblock(rdev, sh->sector,
+                                                  RAID5_STRIPE_SECTORS(conf));
                        if (s->blocked_rdev == NULL
                            && (test_bit(Blocked, &rdev->flags)
                                || is_bad < 0)) {
        struct r5conf *conf = mddev->private;
        struct bio *align_bio;
        struct md_rdev *rdev;
-       sector_t sector, end_sector, first_bad;
-       int bad_sectors, dd_idx;
+       sector_t sector, end_sector;
+       int dd_idx;
        bool did_inc;
 
        if (!in_chunk_boundary(mddev, raid_bio)) {
 
        atomic_inc(&rdev->nr_pending);
 
-       if (is_badblock(rdev, sector, bio_sectors(raid_bio), &first_bad,
-                       &bad_sectors)) {
+       if (rdev_has_badblock(rdev, sector, bio_sectors(raid_bio))) {
                rdev_dec_pending(rdev, mddev);
                return 0;
        }