]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
block: defer timeouts to a workqueue
authorChristoph Hellwig <hch@lst.de>
Fri, 23 Jun 2017 17:41:41 +0000 (10:41 -0700)
committerAshok Vairavan <ashok.vairavan@oracle.com>
Thu, 29 Jun 2017 19:09:53 +0000 (12:09 -0700)
Timer context is not very useful for drivers to perform any meaningful abort
action from.  So instead of calling the driver from this useless context
defer it to a workqueue as soon as possible.

Note that while a delayed_work item would seem the right thing here I didn't
dare to use it due to the magic in blk_add_timer that pokes deep into timer
internals.  But maybe this encourages Tejun to add a sensible API for that to
the workqueue API and we'll all be fine in the end :)

Contains a major update from Keith Bush:

"This patch removes synchronizing the timeout work so that the timer can
 start a freeze on its own queue. The timer enters the queue, so timer
 context can only start a freeze, but not wait for frozen."

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
(cherry picked from commit 287922eb0b186e2a5bf54fdd04b734c25c90035c)

Orabug: 25654233

Signed-off-by: Ashok Vairavan <ashok.vairavan@oracle.com>
Reviewed-by: Jack Vogel <jack.vogel@oracle.com>
block/blk-core.c
block/blk-mq.c
block/blk-timeout.c
block/blk.h
include/linux/blkdev.h

index 5c451b5ee89e7ac29589c28dac2e2c8b5700c905..f2dae161d5a0ba1e77ac1e6d56bc2680c3608a31 100644 (file)
@@ -637,6 +637,13 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
        wake_up_all(&q->mq_freeze_wq);
 }
 
+static void blk_rq_timed_out_timer(unsigned long data)
+{
+       struct request_queue *q = (struct request_queue *)data;
+
+       kblockd_schedule_work(&q->timeout_work);
+}
+
 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 {
        struct request_queue *q;
@@ -793,6 +800,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
        if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
                goto fail;
 
+       INIT_WORK(&q->timeout_work, blk_timeout_work);
        q->request_fn           = rfn;
        q->prep_rq_fn           = NULL;
        q->unprep_rq_fn         = NULL;
index 1a9dba2f5a464870c3ae28594466de46a158833d..2ba612246e3119bca1f28369918cb456cfabc949 100644 (file)
@@ -616,9 +616,10 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
        }
 }
 
-static void blk_mq_rq_timer(unsigned long priv)
+static void blk_mq_timeout_work(struct work_struct *work)
 {
-       struct request_queue *q = (struct request_queue *)priv;
+       struct request_queue *q =
+               container_of(work, struct request_queue, timeout_work);
        struct blk_mq_timeout_data data = {
                .next           = 0,
                .next_set       = 0,
@@ -626,6 +627,9 @@ static void blk_mq_rq_timer(unsigned long priv)
        struct blk_mq_hw_ctx *hctx;
        int i;
 
+       if (blk_queue_enter(q, true))
+               return;
+
        queue_for_each_hw_ctx(q, hctx, i) {
                /*
                 * If not software queues are currently mapped to this
@@ -647,6 +651,7 @@ static void blk_mq_rq_timer(unsigned long priv)
                                blk_mq_tag_idle(hctx);
                }
        }
+       blk_queue_exit(q);
 }
 
 /*
@@ -1960,7 +1965,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
        if (!q->nr_hw_queues)
                goto err_hctxs;
 
-       setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
+       INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
        blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
 
        q->nr_queues = nr_cpu_ids;
index 246dfb16c3d988c4f84749065a66977b825c98b5..bf7bc7e46520a2fc4f0cd777d9f27deeac769e49 100644 (file)
@@ -127,13 +127,16 @@ static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout
        }
 }
 
-void blk_rq_timed_out_timer(unsigned long data)
+void blk_timeout_work(struct work_struct *work)
 {
-       struct request_queue *q = (struct request_queue *) data;
+       struct request_queue *q =
+               container_of(work, struct request_queue, timeout_work);
        unsigned long flags, next = 0;
        struct request *rq, *tmp;
        int next_set = 0;
 
+       if (blk_queue_enter(q, true))
+               return;
        spin_lock_irqsave(q->queue_lock, flags);
 
        list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
@@ -143,6 +146,7 @@ void blk_rq_timed_out_timer(unsigned long data)
                mod_timer(&q->timeout, round_jiffies_up(next));
 
        spin_unlock_irqrestore(q->queue_lock, flags);
+       blk_queue_exit(q);
 }
 
 /**
index a86adf704f7c1e37cb29026b5c4ee6f3c9fd88c4..8ab0e2523408a2a9a77d38cc61aeaa8002e01db7 100644 (file)
@@ -87,7 +87,7 @@ static inline void blk_queue_enter_live(struct request_queue *q)
        percpu_ref_get(&q->q_usage_counter);
 }
 
-void blk_rq_timed_out_timer(unsigned long data);
+void blk_timeout_work(struct work_struct *work);
 unsigned long blk_rq_timeout(unsigned long timeout);
 void blk_add_timer(struct request *req);
 void blk_delete_timer(struct request *);
index 143578081b32fc029abb94e5d6de527c5a751f50..55db2417916a4e7c3bd708dbed7792acee3d05fb 100644 (file)
@@ -509,6 +509,7 @@ struct request_queue {
 
        struct blk_mq_tag_set   *tag_set;
        struct list_head        tag_set_list;
+       UEK_KABI_EXTEND(struct work_struct  timeout_work)
 };
 
 #define QUEUE_FLAG_QUEUED      1       /* uses generic tag queueing */