if (bio_has_data(bio)) {
                rq->nr_phys_segments = bio_phys_segments(q, bio);
-               rq->nr_hw_segments = bio_hw_segments(q, bio);
                rq->buffer = bio_data(bio);
        }
        rq->current_nr_sectors = bio_cur_sectors(bio);
 
 void blk_recalc_rq_segments(struct request *rq)
 {
        int nr_phys_segs;
-       int nr_hw_segs;
        unsigned int phys_size;
-       unsigned int hw_size;
        struct bio_vec *bv, *bvprv = NULL;
        int seg_size;
-       int hw_seg_size;
        int cluster;
        struct req_iterator iter;
        int high, highprv = 1;
                return;
 
        cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
-       hw_seg_size = seg_size = 0;
-       phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
+       seg_size = 0;
+       phys_size = nr_phys_segs = 0;
        rq_for_each_segment(bv, rq, iter) {
                /*
                 * the trick here is making sure that a high page is never
                                goto new_segment;
 
                        seg_size += bv->bv_len;
-                       hw_seg_size += bv->bv_len;
                        bvprv = bv;
                        continue;
                }
 new_segment:
-               if (nr_hw_segs == 1 &&
-                   hw_seg_size > rq->bio->bi_hw_front_size)
-                       rq->bio->bi_hw_front_size = hw_seg_size;
-               hw_seg_size = bv->bv_len;
-               nr_hw_segs++;
-
                nr_phys_segs++;
                bvprv = bv;
                seg_size = bv->bv_len;
                highprv = high;
        }
 
-       if (nr_hw_segs == 1 &&
-           hw_seg_size > rq->bio->bi_hw_front_size)
-               rq->bio->bi_hw_front_size = hw_seg_size;
-       if (hw_seg_size > rq->biotail->bi_hw_back_size)
-               rq->biotail->bi_hw_back_size = hw_seg_size;
        rq->nr_phys_segments = nr_phys_segs;
-       rq->nr_hw_segments = nr_hw_segs;
 }
 
 void blk_recount_segments(struct request_queue *q, struct bio *bio)
        blk_recalc_rq_segments(&rq);
        bio->bi_next = nxt;
        bio->bi_phys_segments = rq.nr_phys_segments;
-       bio->bi_hw_segments = rq.nr_hw_segments;
        bio->bi_flags |= (1 << BIO_SEG_VALID);
 }
 EXPORT_SYMBOL(blk_recount_segments);
                                    struct request *req,
                                    struct bio *bio)
 {
-       int nr_hw_segs = bio_hw_segments(q, bio);
        int nr_phys_segs = bio_phys_segments(q, bio);
 
-       if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
+       if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments
            || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
                req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
         * This will form the start of a new hw segment.  Bump both
         * counters.
         */
-       req->nr_hw_segments += nr_hw_segs;
        req->nr_phys_segments += nr_phys_segs;
        return 1;
 }
                                struct request *next)
 {
        int total_phys_segments;
-       int total_hw_segments;
 
        /*
         * First check if the either of the requests are re-queued
        if (total_phys_segments > q->max_phys_segments)
                return 0;
 
-       total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
-
-       if (total_hw_segments > q->max_hw_segments)
+       if (total_phys_segments > q->max_hw_segments)
                return 0;
 
        /* Merge is OK... */
        req->nr_phys_segments = total_phys_segments;
-       req->nr_hw_segments = total_hw_segments;
        return 1;
 }
 
 
                         * device can handle
                         */
                        rq->nr_phys_segments++;
-                       rq->nr_hw_segments++;
                }
 
                if (!q->prep_rq_fn)
                                 * so that we don't add it again
                                 */
                                --rq->nr_phys_segments;
-                               --rq->nr_hw_segments;
                        }
 
                        rq = NULL;
 
                                        sbio->bi_size = r1_bio->sectors << 9;
                                        sbio->bi_idx = 0;
                                        sbio->bi_phys_segments = 0;
-                                       sbio->bi_hw_segments = 0;
-                                       sbio->bi_hw_front_size = 0;
-                                       sbio->bi_hw_back_size = 0;
                                        sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
                                        sbio->bi_flags |= 1 << BIO_UPTODATE;
                                        sbio->bi_next = NULL;
 
                tbio->bi_size = r10_bio->sectors << 9;
                tbio->bi_idx = 0;
                tbio->bi_phys_segments = 0;
-               tbio->bi_hw_segments = 0;
-               tbio->bi_hw_front_size = 0;
-               tbio->bi_hw_back_size = 0;
                tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
                tbio->bi_flags |= 1 << BIO_UPTODATE;
                tbio->bi_next = NULL;
 
        return bio->bi_phys_segments;
 }
 
-inline int bio_hw_segments(struct request_queue *q, struct bio *bio)
-{
-       if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
-               blk_recount_segments(q, bio);
-
-       return bio->bi_hw_segments;
-}
-
 /**
  *     __bio_clone     -       clone a bio
  *     @bio: destination bio
         */
 
        while (bio->bi_phys_segments >= q->max_phys_segments
-              || bio->bi_hw_segments >= q->max_hw_segments) {
+              || bio->bi_phys_segments >= q->max_hw_segments) {
 
                if (retried_segments)
                        return 0;
 
        bio->bi_vcnt++;
        bio->bi_phys_segments++;
-       bio->bi_hw_segments++;
  done:
        bio->bi_size += len;
        return len;
 EXPORT_SYMBOL(__bio_clone);
 EXPORT_SYMBOL(bio_clone);
 EXPORT_SYMBOL(bio_phys_segments);
-EXPORT_SYMBOL(bio_hw_segments);
 EXPORT_SYMBOL(bio_add_page);
 EXPORT_SYMBOL(bio_add_pc_page);
 EXPORT_SYMBOL(bio_get_nr_vecs);
 
         */
        unsigned short          bi_phys_segments;
 
-       /* Number of segments after physical and DMA remapping
-        * hardware coalescing is performed.
-        */
-       unsigned short          bi_hw_segments;
-
        unsigned int            bi_size;        /* residual I/O count */
 
-       /*
-        * To keep track of the max hw size, we account for the
-        * sizes of the first and last virtually mergeable segments
-        * in this bio
-        */
-       unsigned int            bi_hw_front_size;
-       unsigned int            bi_hw_back_size;
-
        unsigned int            bi_max_vecs;    /* max bvl_vecs we can hold */
 
        struct bio_vec          *bi_io_vec;     /* the actual vec list */
 #define BIO_UPTODATE   0       /* ok after I/O completion */
 #define BIO_RW_BLOCK   1       /* RW_AHEAD set, and read/write would block */
 #define BIO_EOF                2       /* out-out-bounds error */
-#define BIO_SEG_VALID  3       /* nr_hw_seg valid */
+#define BIO_SEG_VALID  3       /* bi_phys_segments valid */
 #define BIO_CLONED     4       /* doesn't own data */
 #define BIO_BOUNCED    5       /* bio is a bounce bio */
 #define BIO_USER_MAPPED 6      /* contains user pages */
 extern void bio_endio(struct bio *, int);
 struct request_queue;
 extern int bio_phys_segments(struct request_queue *, struct bio *);
-extern int bio_hw_segments(struct request_queue *, struct bio *);
 
 extern void __bio_clone(struct bio *, struct bio *);
 extern struct bio *bio_clone(struct bio *, gfp_t);
 
         */
        unsigned short nr_phys_segments;
 
-       /* Number of scatter-gather addr+len pairs after
-        * physical and DMA remapping hardware coalescing is performed.
-        * This is the number of scatter-gather entries the driver
-        * will actually have to deal with after DMA mapping is done.
-        */
-       unsigned short nr_hw_segments;
-
        unsigned short ioprio;
 
        void *special;