while (true) {
                start = read_seqcount_begin(&rq->gstate_seq);
                gstate = READ_ONCE(rq->gstate);
-               deadline = rq->deadline;
+               deadline = blk_rq_deadline(rq);
                if (!read_seqcount_retry(&rq->gstate_seq, start))
                        break;
                cond_resched();
 
 static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
                          unsigned int *next_set)
 {
-       if (time_after_eq(jiffies, rq->deadline)) {
+       const unsigned long deadline = blk_rq_deadline(rq);
+
+       if (time_after_eq(jiffies, deadline)) {
                list_del_init(&rq->timeout_list);
 
                /*
                 */
                if (!blk_mark_rq_complete(rq))
                        blk_rq_timed_out(rq);
-       } else if (!*next_set || time_after(*next_timeout, rq->deadline)) {
-               *next_timeout = rq->deadline;
+       } else if (!*next_set || time_after(*next_timeout, deadline)) {
+               *next_timeout = deadline;
                *next_set = 1;
        }
 }
                 * immediately and that scan sees the new timeout value.
                 * No need for fancy synchronizations.
                 */
-               req->deadline = jiffies;
+               blk_rq_set_deadline(req, jiffies);
                mod_timer(&req->q->timeout, 0);
        } else {
                if (blk_mark_rq_complete(req))
        if (!req->timeout)
                req->timeout = q->rq_timeout;
 
-       req->deadline = jiffies + req->timeout;
+       blk_rq_set_deadline(req, jiffies + req->timeout);
        req->rq_flags &= ~RQF_MQ_TIMEOUT_EXPIRED;
 
        /*
         * than an existing one, modify the timer. Round up to next nearest
         * second.
         */
-       expiry = blk_rq_timeout(round_jiffies_up(req->deadline));
+       expiry = blk_rq_timeout(round_jiffies_up(blk_rq_deadline(req)));
 
        if (!timer_pending(&q->timeout) ||
            time_before(expiry, q->timeout.expires)) {
 
                q->last_merge = NULL;
 }
 
+/*
+ * Steal a bit from this field for legacy IO path atomic IO marking. Note that
+ * setting the deadline clears the bottom bit, potentially clearing the
+ * completed bit. The user has to be OK with this (current ones are fine).
+ */
+static inline void blk_rq_set_deadline(struct request *rq, unsigned long time)
+{
+       rq->__deadline = time & ~0x1UL;
+}
+
+static inline unsigned long blk_rq_deadline(struct request *rq)
+{
+       return rq->__deadline & ~0x1UL;
+}
+
 /*
  * Internal io_context interface
  */