}
        ubd_dev->queue->queuedata = ubd_dev;
 
-       blk_queue_max_hw_segments(ubd_dev->queue, MAX_SG);
+       blk_queue_max_segments(ubd_dev->queue, MAX_SG);
        err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]);
        if(err){
                *error_out = "Failed to register device";
 
         * limitation.
         */
        blk_recalc_rq_segments(rq);
-       if (rq->nr_phys_segments > queue_max_phys_segments(q) ||
-           rq->nr_phys_segments > queue_max_hw_segments(q)) {
+       if (rq->nr_phys_segments > queue_max_segments(q)) {
                printk(KERN_ERR "%s: over max segments limit.\n", __func__);
                return -EIO;
        }
 
 {
        int nr_phys_segs = bio_phys_segments(q, bio);
 
-       if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) ||
-           req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) {
+       if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) {
                req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
                total_phys_segments--;
        }
 
-       if (total_phys_segments > queue_max_phys_segments(q))
-               return 0;
-
-       if (total_phys_segments > queue_max_hw_segments(q))
+       if (total_phys_segments > queue_max_segments(q))
                return 0;
 
        /* Merge is OK... */
 
  */
 void blk_set_default_limits(struct queue_limits *lim)
 {
-       lim->max_phys_segments = MAX_PHYS_SEGMENTS;
-       lim->max_hw_segments = MAX_HW_SEGMENTS;
+       lim->max_segments = BLK_MAX_SEGMENTS;
        lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
        lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
        lim->max_sectors = BLK_DEF_MAX_SECTORS;
 EXPORT_SYMBOL(blk_queue_max_discard_sectors);
 
 /**
- * blk_queue_max_phys_segments - set max phys segments for a request for this queue
+ * blk_queue_max_segments - set max hw segments for a request for this queue
  * @q:  the request queue for the device
  * @max_segments:  max number of segments
  *
  * Description:
  *    Enables a low level driver to set an upper limit on the number of
- *    physical data segments in a request.  This would be the largest sized
- *    scatter list the driver could handle.
+ *    hw data segments in a request.
  **/
-void blk_queue_max_phys_segments(struct request_queue *q,
-                                unsigned short max_segments)
+void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
 {
        if (!max_segments) {
                max_segments = 1;
                       __func__, max_segments);
        }
 
-       q->limits.max_phys_segments = max_segments;
+       q->limits.max_segments = max_segments;
 }
-EXPORT_SYMBOL(blk_queue_max_phys_segments);
-
-/**
- * blk_queue_max_hw_segments - set max hw segments for a request for this queue
- * @q:  the request queue for the device
- * @max_segments:  max number of segments
- *
- * Description:
- *    Enables a low level driver to set an upper limit on the number of
- *    hw data segments in a request.  This would be the largest number of
- *    address/length pairs the host adapter can actually give at once
- *    to the device.
- **/
-void blk_queue_max_hw_segments(struct request_queue *q,
-                              unsigned short max_segments)
-{
-       if (!max_segments) {
-               max_segments = 1;
-               printk(KERN_INFO "%s: set to minimum %d\n",
-                      __func__, max_segments);
-       }
-
-       q->limits.max_hw_segments = max_segments;
-}
-EXPORT_SYMBOL(blk_queue_max_hw_segments);
+EXPORT_SYMBOL(blk_queue_max_segments);
 
 /**
  * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
        t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
                                            b->seg_boundary_mask);
 
-       t->max_phys_segments = min_not_zero(t->max_phys_segments,
-                                           b->max_phys_segments);
-
-       t->max_hw_segments = min_not_zero(t->max_hw_segments,
-                                         b->max_hw_segments);
+       t->max_segments = min_not_zero(t->max_segments, b->max_segments);
 
        t->max_segment_size = min_not_zero(t->max_segment_size,
                                           b->max_segment_size);
  * does is adjust the queue so that the buf is always appended
  * silently to the scatterlist.
  *
- * Note: This routine adjusts max_hw_segments to make room for
- * appending the drain buffer.  If you call
- * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
- * calling this routine, you must set the limit to one fewer than your
- * device can support otherwise there won't be room for the drain
- * buffer.
+ * Note: This routine adjusts max_hw_segments to make room for appending
+ * the drain buffer.  If you call blk_queue_max_segments() after calling
+ * this routine, you must set the limit to one fewer than your device
+ * can support otherwise there won't be room for the drain buffer.
  */
 int blk_queue_dma_drain(struct request_queue *q,
                               dma_drain_needed_fn *dma_drain_needed,
                               void *buf, unsigned int size)
 {
-       if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
+       if (queue_max_segments(q) < 2)
                return -EINVAL;
        /* make room for appending the drain */
-       blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
-       blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
+       blk_queue_max_segments(q, queue_max_segments(q) - 1);
        q->dma_drain_needed = dma_drain_needed;
        q->dma_drain_buffer = buf;
        q->dma_drain_size = size;
 
        }
 
        blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
-       blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
+       blk_queue_max_segments(sdev->request_queue, sg_tablesize);
        ata_port_printk(ap, KERN_INFO,
                "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
                (unsigned long long)*ap->host->dev->dma_mask,
 
        blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit);
        RequestQueue->queuedata = Controller;
        blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit);
-       blk_queue_max_phys_segments(RequestQueue, Controller->DriverScatterGatherLimit);
+       blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit);
        blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
        disk->queue = RequestQueue;
        sprintf(disk->disk_name, "rd/c%dd%d", Controller->ControllerNumber, n);
 
        blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
 
        /* This is a hardware imposed limit. */
-       blk_queue_max_hw_segments(disk->queue, h->maxsgentries);
-
-       /* This is a limit in the driver and could be eliminated. */
-       blk_queue_max_phys_segments(disk->queue, h->maxsgentries);
+       blk_queue_max_segments(disk->queue, h->maxsgentries);
 
        blk_queue_max_hw_sectors(disk->queue, h->cciss_max_sectors);
 
 
                blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
 
        /* This is a hardware imposed limit. */
-       blk_queue_max_hw_segments(q, SG_MAX);
+       blk_queue_max_segments(q, SG_MAX);
 
-       /* This is a driver limit and could be eliminated. */
-       blk_queue_max_phys_segments(q, SG_MAX);
-       
        init_timer(&hba[i]->timer);
        hba[i]->timer.expires = jiffies + IDA_TIMER;
        hba[i]->timer.data = (unsigned long)hba[i];
 
        max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s);
 
        blk_queue_max_hw_sectors(q, max_seg_s >> 9);
-       blk_queue_max_phys_segments(q, max_segments ? max_segments : MAX_PHYS_SEGMENTS);
-       blk_queue_max_hw_segments(q, max_segments ? max_segments : MAX_HW_SEGMENTS);
+       blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
        blk_queue_max_segment_size(q, max_seg_s);
        blk_queue_logical_block_size(q, 512);
        blk_queue_segment_boundary(q, PAGE_SIZE-1);
 
                return -ENOMEM;
        }
 
-       blk_queue_max_phys_segments(pf_queue, cluster);
-       blk_queue_max_hw_segments(pf_queue, cluster);
+       blk_queue_max_segments(pf_queue, cluster);
 
        for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
                struct gendisk *disk = pf->disk;
 
 static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
 {
        if ((pd->settings.size << 9) / CD_FRAMESIZE
-           <= queue_max_phys_segments(q)) {
+           <= queue_max_segments(q)) {
                /*
                 * The cdrom device can handle one segment/frame
                 */
                clear_bit(PACKET_MERGE_SEGS, &pd->flags);
                return 0;
        } else if ((pd->settings.size << 9) / PAGE_SIZE
-                  <= queue_max_phys_segments(q)) {
+                  <= queue_max_segments(q)) {
                /*
                 * We can handle this case at the expense of some extra memory
                 * copies during write operations
 
        blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
                          ps3disk_prepare_flush);
 
-       blk_queue_max_phys_segments(queue, -1);
-       blk_queue_max_hw_segments(queue, -1);
+       blk_queue_max_segments(queue, -1);
        blk_queue_max_segment_size(queue, dev->bounce_size);
 
        gendisk = alloc_disk(PS3DISK_MINORS);
 
        priv->queue = queue;
        queue->queuedata = dev;
        blk_queue_make_request(queue, ps3vram_make_request);
-       blk_queue_max_phys_segments(queue, MAX_PHYS_SEGMENTS);
-       blk_queue_max_hw_segments(queue, MAX_HW_SEGMENTS);
+       blk_queue_max_segments(queue, BLK_MAX_HW_SEGMENTS);
        blk_queue_max_segment_size(queue, BLK_MAX_SEGMENT_SIZE);
        blk_queue_max_hw_sectors(queue, BLK_SAFE_MAX_SECTORS);
 
 
 
        port->disk = g;
 
-       blk_queue_max_hw_segments(q, port->ring_cookies);
-       blk_queue_max_phys_segments(q, port->ring_cookies);
+       blk_queue_max_segments(q, port->ring_cookies);
        blk_queue_max_hw_sectors(q, port->max_xfer_size);
        g->major = vdc_major;
        g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
 
                        break;
                }
                disk->queue = q;
-               blk_queue_max_hw_segments(q, CARM_MAX_REQ_SG);
-               blk_queue_max_phys_segments(q, CARM_MAX_REQ_SG);
+               blk_queue_max_segments(q, CARM_MAX_REQ_SG);
                blk_queue_segment_boundary(q, CARM_SG_BOUNDARY);
 
                q->queuedata = port;
 
        disk->queue = q;
 
        blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
-       blk_queue_max_hw_segments(q, UB_MAX_REQ_SG);
-       blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
+       blk_queue_max_segments(q, UB_MAX_REQ_SG);
        blk_queue_segment_boundary(q, 0xffffffff);      /* Dubious. */
        blk_queue_max_hw_sectors(q, UB_MAX_SECTORS);
        blk_queue_logical_block_size(q, lun->capacity.bsize);
 
        }
 
        d->disk = g;
-       blk_queue_max_hw_segments(q, VIOMAXBLOCKDMA);
-       blk_queue_max_phys_segments(q, VIOMAXBLOCKDMA);
+       blk_queue_max_segments(q, VIOMAXBLOCKDMA);
        blk_queue_max_hw_sectors(q, VIODASD_MAXSECTORS);
        g->major = VIODASD_MAJOR;
        g->first_minor = dev_no << PARTITION_SHIFT;
 
        blk_queue_max_segment_size(rq, PAGE_SIZE);
 
        /* Ensure a merged request will fit in a single I/O ring slot. */
-       blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
-       blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+       blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
 
        /* Make sure buffer addresses are sector-aligned. */
        blk_queue_dma_alignment(rq, 511);
 
 {
        blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
        /* using DMA so memory will need to be contiguous */
-       blk_queue_max_hw_segments(gd.gdrom_rq, 1);
+       blk_queue_max_segments(gd.gdrom_rq, 1);
        /* set a large max size to get most from DMA */
        blk_queue_max_segment_size(gd.gdrom_rq, 0x40000);
        gd.disk->queue = gd.gdrom_rq;
 
        gendisk->first_minor = deviceno;
        strncpy(gendisk->disk_name, c->name,
                        sizeof(gendisk->disk_name));
-       blk_queue_max_hw_segments(q, 1);
-       blk_queue_max_phys_segments(q, 1);
+       blk_queue_max_segments(q, 1);
        blk_queue_max_hw_sectors(q, 4096 / 512);
        gendisk->queue = q;
        gendisk->fops = &viocd_fops;
 
                max_sg_entries >>= 1;
 #endif /* CONFIG_PCI */
 
-       blk_queue_max_hw_segments(q, max_sg_entries);
-       blk_queue_max_phys_segments(q, max_sg_entries);
+       blk_queue_max_segments(q, max_sg_entries);
 
        /* assign drive queue */
        drive->queue = q;
 
        if ((bi->bi_size>>9) > queue_max_sectors(q))
                return 0;
        blk_recount_segments(q, bi);
-       if (bi->bi_phys_segments > queue_max_phys_segments(q))
+       if (bi->bi_phys_segments > queue_max_segments(q))
                return 0;
 
        if (q->merge_bvec_fn)
 
 
        blk_queue_bounce_limit(msb->queue, limit);
        blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES);
-       blk_queue_max_phys_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS);
-       blk_queue_max_hw_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS);
+       blk_queue_max_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS);
        blk_queue_max_segment_size(msb->queue,
                                   MSPRO_BLOCK_MAX_PAGES * msb->page_size);
 
 
        queue = gd->queue;
        queue->queuedata = i2o_blk_dev;
 
-       blk_queue_max_phys_segments(queue, I2O_MAX_PHYS_SEGMENTS);
        blk_queue_max_hw_sectors(queue, max_sectors);
-       blk_queue_max_hw_segments(queue, i2o_sg_tablesize(c, body_size));
+       blk_queue_max_segments(queue, i2o_sg_tablesize(c, body_size));
 
        osm_debug("max sectors = %d\n", queue->max_sectors);
        osm_debug("phys segments = %d\n", queue->max_phys_segments);
 
                if (mq->bounce_buf) {
                        blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
                        blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
-                       blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
-                       blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
+                       blk_queue_max_segments(mq->queue, bouncesz / 512);
                        blk_queue_max_segment_size(mq->queue, bouncesz);
 
                        mq->sg = kmalloc(sizeof(struct scatterlist),
                blk_queue_bounce_limit(mq->queue, limit);
                blk_queue_max_hw_sectors(mq->queue,
                        min(host->max_blk_count, host->max_req_size / 512));
-               blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
-               blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
+               blk_queue_max_segments(mq->queue, host->max_hw_segs);
                blk_queue_max_segment_size(mq->queue, host->max_seg_size);
 
                mq->sg = kmalloc(sizeof(struct scatterlist) *
 
        blk_queue_logical_block_size(block->request_queue, block->bp_block);
        max = block->base->discipline->max_blocks << block->s2b_shift;
        blk_queue_max_hw_sectors(block->request_queue, max);
-       blk_queue_max_phys_segments(block->request_queue, -1L);
-       blk_queue_max_hw_segments(block->request_queue, -1L);
+       blk_queue_max_segments(block->request_queue, -1L);
        /* with page sized segments we can translate each segement into
         * one idaw/tidaw
         */
 
 
        blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
        blk_queue_max_hw_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
-       blk_queue_max_phys_segments(blkdat->request_queue, -1L);
-       blk_queue_max_hw_segments(blkdat->request_queue, -1L);
+       blk_queue_max_segments(blkdat->request_queue, -1L);
        blk_queue_max_segment_size(blkdat->request_queue, -1L);
        blk_queue_segment_boundary(blkdat->request_queue, -1L);
 
 
                if (tgt->service_parms.class3_parms[0] & 0x80000000)
                        rport->supported_classes |= FC_COS_CLASS3;
                if (rport->rqst_q)
-                       blk_queue_max_hw_segments(rport->rqst_q, 1);
+                       blk_queue_max_segments(rport->rqst_q, 1);
        } else
                tgt_dbg(tgt, "rport add failed\n");
        spin_unlock_irqrestore(vhost->host->host_lock, flags);
        }
 
        if (shost_to_fc_host(shost)->rqst_q)
-               blk_queue_max_hw_segments(shost_to_fc_host(shost)->rqst_q, 1);
+               blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
        dev_set_drvdata(dev, vhost);
        spin_lock(&ibmvfc_driver_lock);
        list_add_tail(&vhost->queue, &ibmvfc_head);
 
        /*
         * this limit is imposed by hardware restrictions
         */
-       blk_queue_max_hw_segments(q, shost->sg_tablesize);
-       blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS);
+       blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
+                                       SCSI_MAX_SG_CHAIN_SEGMENTS));
 
        blk_queue_max_hw_sectors(q, shost->max_sectors);
        blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
 
        if (list_empty(&sdp->sfds)) {   /* no existing opens on this device */
                sdp->sgdebug = 0;
                q = sdp->device->request_queue;
-               sdp->sg_tablesize = min(queue_max_hw_segments(q),
-                                       queue_max_phys_segments(q));
+               sdp->sg_tablesize = queue_max_segments(q);
        }
        if ((sfp = sg_add_sfp(sdp, dev)))
                filp->private_data = sfp;
        sdp->device = scsidp;
        INIT_LIST_HEAD(&sdp->sfds);
        init_waitqueue_head(&sdp->o_excl_wait);
-       sdp->sg_tablesize = min(queue_max_hw_segments(q),
-                               queue_max_phys_segments(q));
+       sdp->sg_tablesize = queue_max_segments(q);
        sdp->index = k;
        kref_init(&sdp->d_ref);
 
 
                return -ENODEV;
        }
 
-       i = min(queue_max_hw_segments(SDp->request_queue),
-               queue_max_phys_segments(SDp->request_queue));
+       i = queue_max_segments(SDp->request_queue);
        if (st_max_sg_segs < i)
                i = st_max_sg_segs;
        buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
 
        blkdev->gd->queue = blk_init_queue(blkvsc_request, &blkdev->lock);
 
        blk_queue_max_segment_size(blkdev->gd->queue, PAGE_SIZE);
-       blk_queue_max_phys_segments(blkdev->gd->queue,
-                                   MAX_MULTIPAGE_BUFFER_COUNT);
-       blk_queue_max_hw_segments(blkdev->gd->queue,
-                                 MAX_MULTIPAGE_BUFFER_COUNT);
+       blk_queue_max_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT);
        blk_queue_segment_boundary(blkdev->gd->queue, PAGE_SIZE-1);
        blk_queue_bounce_limit(blkdev->gd->queue, BLK_BOUNCE_ANY);
        blk_queue_dma_alignment(blkdev->gd->queue, 511);
 
        int nr_pages;
 
        nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       if (nr_pages > queue_max_phys_segments(q))
-               nr_pages = queue_max_phys_segments(q);
-       if (nr_pages > queue_max_hw_segments(q))
-               nr_pages = queue_max_hw_segments(q);
+       if (nr_pages > queue_max_segments(q))
+               nr_pages = queue_max_segments(q);
 
        return nr_pages;
 }
         * make this too complex.
         */
 
-       while (bio->bi_phys_segments >= queue_max_phys_segments(q)
-              || bio->bi_phys_segments >= queue_max_hw_segments(q)) {
+       while (bio->bi_phys_segments >= queue_max_segments(q)) {
 
                if (retried_segments)
                        return 0;
 
        unsigned int            discard_alignment;
 
        unsigned short          logical_block_size;
-       unsigned short          max_hw_segments;
-       unsigned short          max_phys_segments;
+       unsigned short          max_segments;
 
        unsigned char           misaligned;
        unsigned char           discard_misaligned;
        blk_queue_max_hw_sectors(q, max);
 }
 
-extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
-extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
+extern void blk_queue_max_segments(struct request_queue *, unsigned short);
+
+static inline void blk_queue_max_phys_segments(struct request_queue *q, unsigned short max)
+{
+       blk_queue_max_segments(q, max);
+}
+
+static inline void blk_queue_max_hw_segments(struct request_queue *q, unsigned short max)
+{
+       blk_queue_max_segments(q, max);
+}
+
+
 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
 extern void blk_queue_max_discard_sectors(struct request_queue *q,
                unsigned int max_discard_sectors);
        return q->limits.max_hw_sectors;
 }
 
-static inline unsigned short queue_max_hw_segments(struct request_queue *q)
-{
-       return q->limits.max_hw_segments;
-}
-
-static inline unsigned short queue_max_phys_segments(struct request_queue *q)
+static inline unsigned short queue_max_segments(struct request_queue *q)
 {
-       return q->limits.max_phys_segments;
+       return q->limits.max_segments;
 }
 
 static inline unsigned int queue_max_segment_size(struct request_queue *q)
 
 /* defines for max_sectors and max_phys_segments */
 #define I2O_MAX_SECTORS                        1024
 #define I2O_MAX_SECTORS_LIMITED                128
-#define I2O_MAX_PHYS_SEGMENTS          MAX_PHYS_SEGMENTS
+#define I2O_MAX_PHYS_SEGMENTS          BLK_MAX_SEGMENTS
 
 /*
  *     Message structures