if (q->id < 0)
                goto fail_q;
 
+       q->bio_split = bioset_create(BIO_POOL_SIZE, 0);
+       if (!q->bio_split)
+               goto fail_id;
+
        q->backing_dev_info.ra_pages =
                        (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
        q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
 
        err = bdi_init(&q->backing_dev_info);
        if (err)
-               goto fail_id;
+               goto fail_split;
 
        setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
                    laptop_mode_timer_fn, (unsigned long) q);
 
 fail_bdi:
        bdi_destroy(&q->backing_dev_info);
+fail_split:
+       bioset_free(q->bio_split);
 fail_id:
        ida_simple_remove(&blk_queue_ida, q->id);
 fail_q:
        struct request *req;
        unsigned int request_count = 0;
 
+       blk_queue_split(q, &bio, q->bio_split);
+
        /*
         * low level driver can indicate that it wants pages above a
         * certain limit bounced to low memory (ie for highmem, or even
                goto end_io;
        }
 
-       if (likely(bio_is_rw(bio) &&
-                  nr_sectors > queue_max_hw_sectors(q))) {
-               printk(KERN_ERR "bio too big device %s (%u > %u)\n",
-                      bdevname(bio->bi_bdev, b),
-                      bio_sectors(bio),
-                      queue_max_hw_sectors(q));
-               goto end_io;
-       }
-
        part = bio->bi_bdev->bd_part;
        if (should_fail_request(part, bio->bi_iter.bi_size) ||
            should_fail_request(&part_to_disk(part)->part0,
 
 
 #include "blk.h"
 
+static struct bio *blk_bio_discard_split(struct request_queue *q,
+                                        struct bio *bio,
+                                        struct bio_set *bs)
+{
+       unsigned int max_discard_sectors, granularity;
+       int alignment;
+       sector_t tmp;
+       unsigned split_sectors;
+
+       /* Zero-sector (unknown) and one-sector granularities are the same.  */
+       granularity = max(q->limits.discard_granularity >> 9, 1U);
+
+       max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
+       max_discard_sectors -= max_discard_sectors % granularity;
+
+       if (unlikely(!max_discard_sectors)) {
+               /* XXX: warn */
+               return NULL;
+       }
+
+       if (bio_sectors(bio) <= max_discard_sectors)
+               return NULL;
+
+       split_sectors = max_discard_sectors;
+
+       /*
+        * If the next starting sector would be misaligned, stop the discard at
+        * the previous aligned sector.
+        */
+       alignment = (q->limits.discard_alignment >> 9) % granularity;
+
+       tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
+       tmp = sector_div(tmp, granularity);
+
+       if (split_sectors > tmp)
+               split_sectors -= tmp;
+
+       return bio_split(bio, split_sectors, GFP_NOIO, bs);
+}
+
+static struct bio *blk_bio_write_same_split(struct request_queue *q,
+                                           struct bio *bio,
+                                           struct bio_set *bs)
+{
+       if (!q->limits.max_write_same_sectors)
+               return NULL;
+
+       if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
+               return NULL;
+
+       return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
+}
+
+static struct bio *blk_bio_segment_split(struct request_queue *q,
+                                        struct bio *bio,
+                                        struct bio_set *bs)
+{
+       struct bio *split;
+       struct bio_vec bv, bvprv;
+       struct bvec_iter iter;
+       unsigned seg_size = 0, nsegs = 0;
+       int prev = 0;
+
+       struct bvec_merge_data bvm = {
+               .bi_bdev        = bio->bi_bdev,
+               .bi_sector      = bio->bi_iter.bi_sector,
+               .bi_size        = 0,
+               .bi_rw          = bio->bi_rw,
+       };
+
+       bio_for_each_segment(bv, bio, iter) {
+               if (q->merge_bvec_fn &&
+                   q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
+                       goto split;
+
+               bvm.bi_size += bv.bv_len;
+
+               if (bvm.bi_size >> 9 > queue_max_sectors(q))
+                       goto split;
+
+               /*
+                * If the queue doesn't support SG gaps and adding this
+                * offset would create a gap, disallow it.
+                */
+               if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) &&
+                   prev && bvec_gap_to_prev(&bvprv, bv.bv_offset))
+                       goto split;
+
+               if (prev && blk_queue_cluster(q)) {
+                       if (seg_size + bv.bv_len > queue_max_segment_size(q))
+                               goto new_segment;
+                       if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
+                               goto new_segment;
+                       if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
+                               goto new_segment;
+
+                       seg_size += bv.bv_len;
+                       bvprv = bv;
+                       prev = 1;
+                       continue;
+               }
+new_segment:
+               if (nsegs == queue_max_segments(q))
+                       goto split;
+
+               nsegs++;
+               bvprv = bv;
+               prev = 1;
+               seg_size = bv.bv_len;
+       }
+
+       return NULL;
+split:
+       split = bio_clone_bioset(bio, GFP_NOIO, bs);
+
+       split->bi_iter.bi_size -= iter.bi_size;
+       bio->bi_iter = iter;
+
+       if (bio_integrity(bio)) {
+               bio_integrity_advance(bio, split->bi_iter.bi_size);
+               bio_integrity_trim(split, 0, bio_sectors(split));
+       }
+
+       return split;
+}
+
+void blk_queue_split(struct request_queue *q, struct bio **bio,
+                    struct bio_set *bs)
+{
+       struct bio *split;
+
+       if ((*bio)->bi_rw & REQ_DISCARD)
+               split = blk_bio_discard_split(q, *bio, bs);
+       else if ((*bio)->bi_rw & REQ_WRITE_SAME)
+               split = blk_bio_write_same_split(q, *bio, bs);
+       else
+               split = blk_bio_segment_split(q, *bio, q->bio_split);
+
+       if (split) {
+               bio_chain(split, *bio);
+               generic_make_request(*bio);
+               *bio = split;
+       }
+}
+EXPORT_SYMBOL(blk_queue_split);
+
 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
                                             struct bio *bio,
                                             bool no_sg_merge)
 {
        struct bio_vec bv, bvprv = { NULL };
-       int cluster, high, highprv = 1;
+       int cluster, prev = 0;
        unsigned int seg_size, nr_phys_segs;
        struct bio *fbio, *bbio;
        struct bvec_iter iter;
        cluster = blk_queue_cluster(q);
        seg_size = 0;
        nr_phys_segs = 0;
-       high = 0;
        for_each_bio(bio) {
                bio_for_each_segment(bv, bio, iter) {
                        /*
                        if (no_sg_merge)
                                goto new_segment;
 
-                       /*
-                        * the trick here is making sure that a high page is
-                        * never considered part of another segment, since
-                        * that might change with the bounce page.
-                        */
-                       high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q);
-                       if (!high && !highprv && cluster) {
+                       if (prev && cluster) {
                                if (seg_size + bv.bv_len
                                    > queue_max_segment_size(q))
                                        goto new_segment;
 
                        nr_phys_segs++;
                        bvprv = bv;
+                       prev = 1;
                        seg_size = bv.bv_len;
-                       highprv = high;
                }
                bbio = bio;
        }
 
                return;
        }
 
+       blk_queue_split(q, &bio, q->bio_split);
+
        if (!is_flush_fua && !blk_queue_nomerges(q) &&
            blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
                return;
                return;
        }
 
+       blk_queue_split(q, &bio, q->bio_split);
+
        if (!is_flush_fua && !blk_queue_nomerges(q) &&
            blk_attempt_plug_merge(q, bio, &request_count, NULL))
                return;
 
 
        blk_trace_shutdown(q);
 
+       if (q->bio_split)
+               bioset_free(q->bio_split);
+
        ida_simple_remove(&blk_queue_ida, q->id);
        call_rcu(&q->rcu_head, blk_free_queue_rcu);
 }
 
        struct drbd_device *device = (struct drbd_device *) q->queuedata;
        unsigned long start_jif;
 
+       blk_queue_split(q, &bio, q->bio_split);
+
        start_jif = jiffies;
 
        /*
 
        char b[BDEVNAME_SIZE];
        struct bio *split;
 
+       blk_queue_bounce(q, &bio);
+
+       blk_queue_split(q, &bio, q->bio_split);
+
        pd = q->queuedata;
        if (!pd) {
                pr_err("%s incorrect request queue\n",
                goto end_io;
        }
 
-       blk_queue_bounce(q, &bio);
-
        do {
                sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
                sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
 
 
        dev_dbg(&dev->core, "%s\n", __func__);
 
+       blk_queue_split(q, &bio, q->bio_split);
+
        spin_lock_irq(&priv->lock);
        busy = !bio_list_empty(&priv->list);
        bio_list_add(&priv->list, bio);
 
        struct rsxx_bio_meta *bio_meta;
        int st = -EINVAL;
 
+       blk_queue_split(q, &bio, q->bio_split);
+
        might_sleep();
 
        if (!card)
 
                 (unsigned long long)bio->bi_iter.bi_sector,
                 bio->bi_iter.bi_size);
 
+       blk_queue_split(q, &bio, q->bio_split);
+
        spin_lock_irq(&card->lock);
        *card->biotail = bio;
        bio->bi_next = NULL;
 
        if (unlikely(!zram_meta_get(zram)))
                goto error;
 
+       blk_queue_split(queue, &bio, queue->bio_split);
+
        if (!valid_io_request(zram, bio->bi_iter.bi_sector,
                                        bio->bi_iter.bi_size)) {
                atomic64_inc(&zram->stats.invalid_io);
 
 
        map = dm_get_live_table(md, &srcu_idx);
 
+       blk_queue_split(q, &bio, q->bio_split);
+
        generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0);
 
        /* if we're suspended, we have to queue this io for later */
 
        unsigned int sectors;
        int cpu;
 
+       blk_queue_split(q, &bio, q->bio_split);
+
        if (mddev == NULL || mddev->pers == NULL
            || !mddev->ready) {
                bio_io_error(bio);
 
        unsigned long source_addr;
        unsigned long bytes_done;
 
+       blk_queue_split(q, &bio, q->bio_split);
+
        bytes_done = 0;
        dev_info = bio->bi_bdev->bd_disk->private_data;
        if (dev_info == NULL)
 
        unsigned long page_addr;
        unsigned long bytes;
 
+       blk_queue_split(q, &bio, q->bio_split);
+
        if ((bio->bi_iter.bi_sector & 7) != 0 ||
            (bio->bi_iter.bi_size & 4095) != 0)
                /* Request is not page-aligned. */
 
        int rw = bio_rw(old_bio);
        int inactive;
 
+       blk_queue_split(q, &old_bio, q->bio_split);
+
        if (!lo)
                goto err;
 
 
 
        struct blk_mq_tag_set   *tag_set;
        struct list_head        tag_set_list;
+       struct bio_set          *bio_split;
 };
 
 #define QUEUE_FLAG_QUEUED      1       /* uses generic tag queueing */
 extern int blk_insert_cloned_request(struct request_queue *q,
                                     struct request *rq);
 extern void blk_delay_queue(struct request_queue *, unsigned long);
+extern void blk_queue_split(struct request_queue *, struct bio **,
+                           struct bio_set *);
 extern void blk_recount_segments(struct request_queue *, struct bio *);
 extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
 extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,