__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
init_waitqueue_head(&q->mq_freeze_wq);
+ mutex_init(&q->mq_freeze_lock);
- if (blkcg_init_queue(q))
+ /*
+ * Init percpu_ref in atomic mode so that it's faster to shutdown.
+ * See blk_register_queue() for details.
+ */
+ if (percpu_ref_init(&q->q_usage_counter,
+ blk_queue_usage_counter_release,
+ PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
goto fail_bdi;
+ if (blkcg_init_queue(q))
+ goto fail_ref;
+
return q;
+ fail_ref:
+ percpu_ref_exit(&q->q_usage_counter);
fail_bdi:
bdi_destroy(&q->backing_dev_info);
fail_id:
clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
}
- static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
- {
- while (true) {
- int ret;
-
- if (percpu_ref_tryget_live(&q->mq_usage_counter))
- return 0;
-
- if (!(gfp & __GFP_WAIT))
- return -EBUSY;
-
- ret = wait_event_interruptible(q->mq_freeze_wq,
- !q->mq_freeze_depth || blk_queue_dying(q));
- if (blk_queue_dying(q))
- return -ENODEV;
- if (ret)
- return ret;
- }
- }
-
- static void blk_mq_queue_exit(struct request_queue *q)
- {
- percpu_ref_put(&q->mq_usage_counter);
- }
-
- static void blk_mq_usage_counter_release(struct percpu_ref *ref)
- {
- struct request_queue *q =
- container_of(ref, struct request_queue, mq_usage_counter);
-
- wake_up_all(&q->mq_freeze_wq);
- }
-
void blk_mq_freeze_queue_start(struct request_queue *q)
{
- bool freeze;
+ int freeze_depth;
- mutex_lock(&q->mq_freeze_lock);
- freeze = !q->mq_freeze_depth++;
- if (freeze) {
- percpu_ref_kill(&q->mq_usage_counter);
+ freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
+ if (freeze_depth == 1) {
+ percpu_ref_kill(&q->q_usage_counter);
blk_mq_run_hw_queues(q, false);
}
+ mutex_unlock(&q->mq_freeze_lock);
}
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
void blk_mq_unfreeze_queue(struct request_queue *q)
{
- bool wake;
+ int freeze_depth;
- mutex_lock(&q->mq_freeze_lock);
- wake = !--q->mq_freeze_depth;
- WARN_ON_ONCE(q->mq_freeze_depth < 0);
- if (wake) {
- percpu_ref_reinit(&q->mq_usage_counter);
+ freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
+ WARN_ON_ONCE(freeze_depth < 0);
+ if (!freeze_depth) {
+ percpu_ref_reinit(&q->q_usage_counter);
wake_up_all(&q->mq_freeze_wq);
}
+ mutex_unlock(&q->mq_freeze_lock);
}
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
0, 1, _THIS_IP_);
- xfs_trans_cancel(tp, 0);
+ /* we abort the update if there was an IO error */
+ if (ioend->io_error) {
++ xfs_trans_cancel(tp);
+ return ioend->io_error;
+ }
+
return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
}