From: Christoph Hellwig Date: Fri, 23 Jun 2017 17:41:41 +0000 (-0700) Subject: block: defer timeouts to a workqueue X-Git-Tag: v4.1.12-105.0.20170705_2000~11 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=0016903caec5582f66c6993eac302deb2cf49e4c;p=users%2Fjedix%2Flinux-maple.git block: defer timeouts to a workqueue Timer context is not very useful for drivers to perform any meaningful abort action from. So instead of calling the driver from this useless context defer it to a workqueue as soon as possible. Note that while a delayed_work item would seem the right thing here I didn't dare to use it due to the magic in blk_add_timer that pokes deep into timer internals. But maybe this encourages Tejun to add a sensible API for that to the workqueue API and we'll all be fine in the end :) Contains a major update from Keith Bush: "This patch removes synchronizing the timeout work so that the timer can start a freeze on its own queue. The timer enters the queue, so timer context can only start a freeze, but not wait for frozen." Signed-off-by: Christoph Hellwig Acked-by: Keith Busch Signed-off-by: Jens Axboe (cherry picked from commit 287922eb0b186e2a5bf54fdd04b734c25c90035c) Orabug: 25654233 Signed-off-by: Ashok Vairavan Reviewed-by: Jack Vogel --- diff --git a/block/blk-core.c b/block/blk-core.c index 5c451b5ee89e..f2dae161d5a0 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -637,6 +637,13 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref) wake_up_all(&q->mq_freeze_wq); } +static void blk_rq_timed_out_timer(unsigned long data) +{ + struct request_queue *q = (struct request_queue *)data; + + kblockd_schedule_work(&q->timeout_work); +} + struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) { struct request_queue *q; @@ -793,6 +800,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) goto fail; + INIT_WORK(&q->timeout_work, blk_timeout_work); q->request_fn = rfn; q->prep_rq_fn = NULL; q->unprep_rq_fn = NULL; diff --git a/block/blk-mq.c b/block/blk-mq.c index 1a9dba2f5a46..2ba612246e31 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -616,9 +616,10 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, } } -static void blk_mq_rq_timer(unsigned long priv) +static void blk_mq_timeout_work(struct work_struct *work) { - struct request_queue *q = (struct request_queue *)priv; + struct request_queue *q = + container_of(work, struct request_queue, timeout_work); struct blk_mq_timeout_data data = { .next = 0, .next_set = 0, @@ -626,6 +627,9 @@ static void blk_mq_rq_timer(unsigned long priv) struct blk_mq_hw_ctx *hctx; int i; + if (blk_queue_enter(q, true)) + return; + queue_for_each_hw_ctx(q, hctx, i) { /* * If not software queues are currently mapped to this @@ -647,6 +651,7 @@ static void blk_mq_rq_timer(unsigned long priv) blk_mq_tag_idle(hctx); } } + blk_queue_exit(q); } /* @@ -1960,7 +1965,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, if (!q->nr_hw_queues) goto err_hctxs; - setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); + INIT_WORK(&q->timeout_work, blk_mq_timeout_work); blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); q->nr_queues = nr_cpu_ids; diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 246dfb16c3d9..bf7bc7e46520 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -127,13 +127,16 @@ static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout } } -void blk_rq_timed_out_timer(unsigned long data) +void blk_timeout_work(struct work_struct *work) { - struct request_queue *q = (struct request_queue *) data; + struct request_queue *q = + container_of(work, struct request_queue, timeout_work); unsigned long flags, next = 0; struct request *rq, *tmp; int next_set = 0; + if (blk_queue_enter(q, true)) + return; spin_lock_irqsave(q->queue_lock, flags); list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) @@ -143,6 +146,7 @@ void blk_rq_timed_out_timer(unsigned long data) mod_timer(&q->timeout, round_jiffies_up(next)); spin_unlock_irqrestore(q->queue_lock, flags); + blk_queue_exit(q); } /** diff --git a/block/blk.h b/block/blk.h index a86adf704f7c..8ab0e2523408 100644 --- a/block/blk.h +++ b/block/blk.h @@ -87,7 +87,7 @@ static inline void blk_queue_enter_live(struct request_queue *q) percpu_ref_get(&q->q_usage_counter); } -void blk_rq_timed_out_timer(unsigned long data); +void blk_timeout_work(struct work_struct *work); unsigned long blk_rq_timeout(unsigned long timeout); void blk_add_timer(struct request *req); void blk_delete_timer(struct request *); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 143578081b32..55db2417916a 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -509,6 +509,7 @@ struct request_queue { struct blk_mq_tag_set *tag_set; struct list_head tag_set_list; + UEK_KABI_EXTEND(struct work_struct timeout_work) }; #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */