Contains the value 1 while the device is suspended.
                Otherwise it contains 0. Read-only attribute.
 Users:         util-linux, device-mapper udev rules
+
+What:          /sys/block/dm-<num>/dm/rq_based_seq_io_merge_deadline
+Date:          March 2015
+KernelVersion: 4.1
+Contact:       dm-devel@redhat.com
+Description:   Allow control over how long a request that is a
+               reasonable merge candidate can be queued on the request
+               queue.  The resolution of this deadline is in
+               microseconds (ranging from 1 to 100000 usecs).
+               Setting this attribute to 0 (the default) will disable
+               request-based DM's merge heuristic and associated extra
+               accounting.  This attribute is not applicable to
+               bio-based DM devices so it will only ever report 0 for
+               them.
 
 #include <linux/delay.h>
 #include <linux/wait.h>
 #include <linux/kthread.h>
+#include <linux/ktime.h>
 #include <linux/elevator.h> /* for rq_end_sector() */
 
 #include <trace/events/block.h>
        struct task_struct *kworker_task;
 
        /* for request-based merge heuristic in dm_request_fn() */
-       sector_t last_rq_pos;
+       unsigned seq_rq_merge_deadline_usecs;
        int last_rq_rw;
+       sector_t last_rq_pos;
+       ktime_t last_rq_start_time;
 };
 
 /*
        blk_start_request(orig);
        atomic_inc(&md->pending[rq_data_dir(orig)]);
 
-       md->last_rq_pos = rq_end_sector(orig);
-       md->last_rq_rw = rq_data_dir(orig);
+       if (md->seq_rq_merge_deadline_usecs) {
+               md->last_rq_pos = rq_end_sector(orig);
+               md->last_rq_rw = rq_data_dir(orig);
+               md->last_rq_start_time = ktime_get();
+       }
 
        /*
         * Hold the md reference here for the in-flight I/O.
        dm_get(md);
 }
 
+#define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
+
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
+{
+       return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
+}
+
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
+                                                    const char *buf, size_t count)
+{
+       unsigned deadline;
+
+       if (!dm_request_based(md))
+               return count;
+
+       if (kstrtouint(buf, 10, &deadline))
+               return -EINVAL;
+
+       if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
+               deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
+
+       md->seq_rq_merge_deadline_usecs = deadline;
+
+       return count;
+}
+
+static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md)
+{
+       ktime_t kt_deadline;
+
+       if (!md->seq_rq_merge_deadline_usecs)
+               return false;
+
+       kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
+       kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
+
+       return !ktime_after(ktime_get(), kt_deadline);
+}
+
 /*
  * q->request_fn for request-based dm.
  * Called with the queue lock held.
                        continue;
                }
 
-               if (md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
+               if (dm_request_peeked_before_merge_deadline(md) &&
+                   md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
                    md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq))
                        goto delay_and_out;
 
        if (!q)
                return 0;
 
+       /* disable dm_request_fn's merge heuristic by default */
+       md->seq_rq_merge_deadline_usecs = 0;
+
        md->queue = q;
        dm_init_md_queue(md);
        blk_queue_softirq_done(md->queue, dm_softirq_done);