]> www.infradead.org Git - nvme.git/commitdiff
block: force noio scope in blk_mq_freeze_queue
authorChristoph Hellwig <hch@lst.de>
Fri, 31 Jan 2025 12:03:47 +0000 (13:03 +0100)
committerJens Axboe <axboe@kernel.dk>
Fri, 31 Jan 2025 14:20:08 +0000 (07:20 -0700)
When block drivers or the core block code perform allocations with a
frozen queue, this could try to recurse into the block device to
reclaim memory and deadlock.  Thus all allocations done by a process
that froze a queue need to be done without __GFP_IO and __GFP_FS.
Instead of tying to track all of them down, force a noio scope as
part of freezing the queue.

Note that nvme is a bit of a mess here due to the non-owner freezes,
and they will be addressed separately.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20250131120352.1315351-2-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
26 files changed:
block/blk-cgroup.c
block/blk-iocost.c
block/blk-iolatency.c
block/blk-mq.c
block/blk-pm.c
block/blk-rq-qos.c
block/blk-settings.c
block/blk-sysfs.c
block/blk-throttle.c
block/blk-zoned.c
block/elevator.c
drivers/block/aoe/aoedev.c
drivers/block/ataflop.c
drivers/block/loop.c
drivers/block/nbd.c
drivers/block/rbd.c
drivers/block/sunvdc.c
drivers/block/swim3.c
drivers/block/virtio_blk.c
drivers/mtd/mtd_blkdevs.c
drivers/nvme/host/core.c
drivers/nvme/host/multipath.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_scan.c
drivers/ufs/core/ufs-sysfs.c
include/linux/blk-mq.h

index 45a395862fbc88f448fe281eeac620710bc1587d..c795fa3a30e1a6e16ad7d795f9ae0c30df3b104e 100644 (file)
@@ -1545,6 +1545,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
        struct request_queue *q = disk->queue;
        struct blkg_policy_data *pd_prealloc = NULL;
        struct blkcg_gq *blkg, *pinned_blkg = NULL;
+       unsigned int memflags;
        int ret;
 
        if (blkcg_policy_enabled(q, pol))
@@ -1559,7 +1560,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
                return -EINVAL;
 
        if (queue_is_mq(q))
-               blk_mq_freeze_queue(q);
+               memflags = blk_mq_freeze_queue(q);
 retry:
        spin_lock_irq(&q->queue_lock);
 
@@ -1623,7 +1624,7 @@ retry:
        spin_unlock_irq(&q->queue_lock);
 out:
        if (queue_is_mq(q))
-               blk_mq_unfreeze_queue(q);
+               blk_mq_unfreeze_queue(q, memflags);
        if (pinned_blkg)
                blkg_put(pinned_blkg);
        if (pd_prealloc)
@@ -1667,12 +1668,13 @@ void blkcg_deactivate_policy(struct gendisk *disk,
 {
        struct request_queue *q = disk->queue;
        struct blkcg_gq *blkg;
+       unsigned int memflags;
 
        if (!blkcg_policy_enabled(q, pol))
                return;
 
        if (queue_is_mq(q))
-               blk_mq_freeze_queue(q);
+               memflags = blk_mq_freeze_queue(q);
 
        mutex_lock(&q->blkcg_mutex);
        spin_lock_irq(&q->queue_lock);
@@ -1696,7 +1698,7 @@ void blkcg_deactivate_policy(struct gendisk *disk,
        mutex_unlock(&q->blkcg_mutex);
 
        if (queue_is_mq(q))
-               blk_mq_unfreeze_queue(q);
+               blk_mq_unfreeze_queue(q, memflags);
 }
 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
 
index a5894ec9696e7e8c1011cbda1d849562e1732d31..65a1d4427ccf4beefeaf2e5b3478b8d300f47189 100644 (file)
@@ -3224,6 +3224,7 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
        u32 qos[NR_QOS_PARAMS];
        bool enable, user;
        char *body, *p;
+       unsigned int memflags;
        int ret;
 
        blkg_conf_init(&ctx, input);
@@ -3247,7 +3248,7 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
                ioc = q_to_ioc(disk->queue);
        }
 
-       blk_mq_freeze_queue(disk->queue);
+       memflags = blk_mq_freeze_queue(disk->queue);
        blk_mq_quiesce_queue(disk->queue);
 
        spin_lock_irq(&ioc->lock);
@@ -3347,7 +3348,7 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
                wbt_enable_default(disk);
 
        blk_mq_unquiesce_queue(disk->queue);
-       blk_mq_unfreeze_queue(disk->queue);
+       blk_mq_unfreeze_queue(disk->queue, memflags);
 
        blkg_conf_exit(&ctx);
        return nbytes;
@@ -3355,7 +3356,7 @@ einval:
        spin_unlock_irq(&ioc->lock);
 
        blk_mq_unquiesce_queue(disk->queue);
-       blk_mq_unfreeze_queue(disk->queue);
+       blk_mq_unfreeze_queue(disk->queue, memflags);
 
        ret = -EINVAL;
 err:
@@ -3414,6 +3415,7 @@ static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
 {
        struct blkg_conf_ctx ctx;
        struct request_queue *q;
+       unsigned int memflags;
        struct ioc *ioc;
        u64 u[NR_I_LCOEFS];
        bool user;
@@ -3441,7 +3443,7 @@ static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
                ioc = q_to_ioc(q);
        }
 
-       blk_mq_freeze_queue(q);
+       memflags = blk_mq_freeze_queue(q);
        blk_mq_quiesce_queue(q);
 
        spin_lock_irq(&ioc->lock);
@@ -3493,7 +3495,7 @@ static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
        spin_unlock_irq(&ioc->lock);
 
        blk_mq_unquiesce_queue(q);
-       blk_mq_unfreeze_queue(q);
+       blk_mq_unfreeze_queue(q, memflags);
 
        blkg_conf_exit(&ctx);
        return nbytes;
@@ -3502,7 +3504,7 @@ einval:
        spin_unlock_irq(&ioc->lock);
 
        blk_mq_unquiesce_queue(q);
-       blk_mq_unfreeze_queue(q);
+       blk_mq_unfreeze_queue(q, memflags);
 
        ret = -EINVAL;
 err:
index ebb522788d9780f6d4b452b826f113957be02772..42c1e0b9a68f2e5fcc87ba8b1c00e3653ccc1759 100644 (file)
@@ -749,9 +749,11 @@ static void blkiolatency_enable_work_fn(struct work_struct *work)
         */
        enabled = atomic_read(&blkiolat->enable_cnt);
        if (enabled != blkiolat->enabled) {
-               blk_mq_freeze_queue(blkiolat->rqos.disk->queue);
+               unsigned int memflags;
+
+               memflags = blk_mq_freeze_queue(blkiolat->rqos.disk->queue);
                blkiolat->enabled = enabled;
-               blk_mq_unfreeze_queue(blkiolat->rqos.disk->queue);
+               blk_mq_unfreeze_queue(blkiolat->rqos.disk->queue, memflags);
        }
 }
 
index da39a1cac70229f08cd2ebadc63ad2efcc906100..40490ac8804570e579b257abd078e13d859de988 100644 (file)
@@ -210,12 +210,12 @@ int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
 }
 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
 
-void blk_mq_freeze_queue(struct request_queue *q)
+void blk_mq_freeze_queue_nomemsave(struct request_queue *q)
 {
        blk_freeze_queue_start(q);
        blk_mq_freeze_queue_wait(q);
 }
-EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
+EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_nomemsave);
 
 bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
 {
@@ -236,12 +236,12 @@ bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
        return unfreeze;
 }
 
-void blk_mq_unfreeze_queue(struct request_queue *q)
+void blk_mq_unfreeze_queue_nomemrestore(struct request_queue *q)
 {
        if (__blk_mq_unfreeze_queue(q, false))
                blk_unfreeze_release_lock(q);
 }
-EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
+EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue_nomemrestore);
 
 /*
  * non_owner variant of blk_freeze_queue_start
@@ -4223,13 +4223,14 @@ static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set,
                                         bool shared)
 {
        struct request_queue *q;
+       unsigned int memflags;
 
        lockdep_assert_held(&set->tag_list_lock);
 
        list_for_each_entry(q, &set->tag_list, tag_set_list) {
-               blk_mq_freeze_queue(q);
+               memflags = blk_mq_freeze_queue(q);
                queue_set_hctx_shared(q, shared);
-               blk_mq_unfreeze_queue(q);
+               blk_mq_unfreeze_queue(q, memflags);
        }
 }
 
@@ -4992,6 +4993,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
        struct request_queue *q;
        LIST_HEAD(head);
        int prev_nr_hw_queues = set->nr_hw_queues;
+       unsigned int memflags;
        int i;
 
        lockdep_assert_held(&set->tag_list_lock);
@@ -5003,8 +5005,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
        if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
                return;
 
+       memflags = memalloc_noio_save();
        list_for_each_entry(q, &set->tag_list, tag_set_list)
-               blk_mq_freeze_queue(q);
+               blk_mq_freeze_queue_nomemsave(q);
+
        /*
         * Switch IO scheduler to 'none', cleaning up the data associated
         * with the previous scheduler. We will switch back once we are done
@@ -5052,7 +5056,8 @@ switch_back:
                blk_mq_elv_switch_back(&head, q);
 
        list_for_each_entry(q, &set->tag_list, tag_set_list)
-               blk_mq_unfreeze_queue(q);
+               blk_mq_unfreeze_queue_nomemrestore(q);
+       memalloc_noio_restore(memflags);
 
        /* Free the excess tags when nr_hw_queues shrink. */
        for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
index 42e8420747153b840317ec01c6020384449d3c3f..8d3e052f91da16a1668b2de4578588a5b55a12e3 100644 (file)
@@ -89,7 +89,7 @@ int blk_pre_runtime_suspend(struct request_queue *q)
        if (percpu_ref_is_zero(&q->q_usage_counter))
                ret = 0;
        /* Switch q_usage_counter back to per-cpu mode. */
-       blk_mq_unfreeze_queue(q);
+       blk_mq_unfreeze_queue_nomemrestore(q);
 
        if (ret < 0) {
                spin_lock_irq(&q->queue_lock);
index eb9618cd68adfb94fdcaf1101b865777cb196b3a..d4d4f4dc0e23fed61b4d65ab4492c673ba584383 100644 (file)
@@ -299,6 +299,7 @@ int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
                const struct rq_qos_ops *ops)
 {
        struct request_queue *q = disk->queue;
+       unsigned int memflags;
 
        lockdep_assert_held(&q->rq_qos_mutex);
 
@@ -310,14 +311,14 @@ int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
         * No IO can be in-flight when adding rqos, so freeze queue, which
         * is fine since we only support rq_qos for blk-mq queue.
         */
-       blk_mq_freeze_queue(q);
+       memflags = blk_mq_freeze_queue(q);
 
        if (rq_qos_id(q, rqos->id))
                goto ebusy;
        rqos->next = q->rq_qos;
        q->rq_qos = rqos;
 
-       blk_mq_unfreeze_queue(q);
+       blk_mq_unfreeze_queue(q, memflags);
 
        if (rqos->ops->debugfs_attrs) {
                mutex_lock(&q->debugfs_mutex);
@@ -327,7 +328,7 @@ int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
 
        return 0;
 ebusy:
-       blk_mq_unfreeze_queue(q);
+       blk_mq_unfreeze_queue(q, memflags);
        return -EBUSY;
 }
 
@@ -335,17 +336,18 @@ void rq_qos_del(struct rq_qos *rqos)
 {
        struct request_queue *q = rqos->disk->queue;
        struct rq_qos **cur;
+       unsigned int memflags;
 
        lockdep_assert_held(&q->rq_qos_mutex);
 
-       blk_mq_freeze_queue(q);
+       memflags = blk_mq_freeze_queue(q);
        for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
                if (*cur == rqos) {
                        *cur = rqos->next;
                        break;
                }
        }
-       blk_mq_unfreeze_queue(q);
+       blk_mq_unfreeze_queue(q, memflags);
 
        mutex_lock(&q->debugfs_mutex);
        blk_mq_debugfs_unregister_rqos(rqos);
index db12396ff5c793a402e2057949d56e0fd7382d2c..c44dadc35e1ece20be211eb3ad19a8b1ac31ef3b 100644 (file)
@@ -461,11 +461,12 @@ EXPORT_SYMBOL_GPL(queue_limits_commit_update);
 int queue_limits_commit_update_frozen(struct request_queue *q,
                struct queue_limits *lim)
 {
+       unsigned int memflags;
        int ret;
 
-       blk_mq_freeze_queue(q);
+       memflags = blk_mq_freeze_queue(q);
        ret = queue_limits_commit_update(q, lim);
-       blk_mq_unfreeze_queue(q);
+       blk_mq_unfreeze_queue(q, memflags);
 
        return ret;
 }
index 7b970e6765e72a64bd4e2815fb40d54ebae1b857..6f548a4376aa4a4e4e2a0c7a66349059196f3d9c 100644 (file)
@@ -681,7 +681,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
        struct queue_sysfs_entry *entry = to_queue(attr);
        struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
        struct request_queue *q = disk->queue;
-       unsigned int noio_flag;
+       unsigned int memflags;
        ssize_t res;
 
        if (!entry->store_limit && !entry->store)
@@ -711,11 +711,9 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
        }
 
        mutex_lock(&q->sysfs_lock);
-       blk_mq_freeze_queue(q);
-       noio_flag = memalloc_noio_save();
+       memflags = blk_mq_freeze_queue(q);
        res = entry->store(disk, page, length);
-       memalloc_noio_restore(noio_flag);
-       blk_mq_unfreeze_queue(q);
+       blk_mq_unfreeze_queue(q, memflags);
        mutex_unlock(&q->sysfs_lock);
        return res;
 }
index 82dbaefcfa3bf56f0079ef2e043bbc55e320e4af..8d149aff9fd0b7fe23d51116b2353b6cf41801e1 100644 (file)
@@ -1202,6 +1202,7 @@ static int blk_throtl_init(struct gendisk *disk)
 {
        struct request_queue *q = disk->queue;
        struct throtl_data *td;
+       unsigned int memflags;
        int ret;
 
        td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
@@ -1215,7 +1216,7 @@ static int blk_throtl_init(struct gendisk *disk)
         * Freeze queue before activating policy, to synchronize with IO path,
         * which is protected by 'q_usage_counter'.
         */
-       blk_mq_freeze_queue(disk->queue);
+       memflags = blk_mq_freeze_queue(disk->queue);
        blk_mq_quiesce_queue(disk->queue);
 
        q->td = td;
@@ -1239,7 +1240,7 @@ static int blk_throtl_init(struct gendisk *disk)
 
 out:
        blk_mq_unquiesce_queue(disk->queue);
-       blk_mq_unfreeze_queue(disk->queue);
+       blk_mq_unfreeze_queue(disk->queue, memflags);
 
        return ret;
 }
index 9d08a54c201eea8840e52c6897ad80b62726f029..761ea662ddc34296b6e8642e937e542b712a5268 100644 (file)
@@ -1717,9 +1717,10 @@ int blk_revalidate_disk_zones(struct gendisk *disk)
        else
                pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
        if (ret) {
-               blk_mq_freeze_queue(q);
+               unsigned int memflags = blk_mq_freeze_queue(q);
+
                disk_free_zone_resources(disk);
-               blk_mq_unfreeze_queue(q);
+               blk_mq_unfreeze_queue(q, memflags);
        }
 
        return ret;
index b81216c48b6bcb638058ccc206dc0b2a90460477..cd2ce492160100e90326ae1dceac6013e624f5d5 100644 (file)
@@ -570,6 +570,7 @@ static struct elevator_type *elevator_get_default(struct request_queue *q)
 void elevator_init_mq(struct request_queue *q)
 {
        struct elevator_type *e;
+       unsigned int memflags;
        int err;
 
        WARN_ON_ONCE(blk_queue_registered(q));
@@ -590,13 +591,13 @@ void elevator_init_mq(struct request_queue *q)
         *
         * Disk isn't added yet, so verifying queue lock only manually.
         */
-       blk_mq_freeze_queue(q);
+       memflags = blk_mq_freeze_queue(q);
 
        blk_mq_cancel_work_sync(q);
 
        err = blk_mq_init_sched(q, e);
 
-       blk_mq_unfreeze_queue(q);
+       blk_mq_unfreeze_queue(q, memflags);
 
        if (err) {
                pr_warn("\"%s\" elevator initialization failed, "
@@ -614,11 +615,12 @@ void elevator_init_mq(struct request_queue *q)
  */
 int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 {
+       unsigned int memflags;
        int ret;
 
        lockdep_assert_held(&q->sysfs_lock);
 
-       blk_mq_freeze_queue(q);
+       memflags = blk_mq_freeze_queue(q);
        blk_mq_quiesce_queue(q);
 
        if (q->elevator) {
@@ -639,7 +641,7 @@ int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 
 out_unfreeze:
        blk_mq_unquiesce_queue(q);
-       blk_mq_unfreeze_queue(q);
+       blk_mq_unfreeze_queue(q, memflags);
 
        if (ret) {
                pr_warn("elv: switch to \"%s\" failed, falling back to \"none\"\n",
@@ -651,9 +653,11 @@ out_unfreeze:
 
 void elevator_disable(struct request_queue *q)
 {
+       unsigned int memflags;
+
        lockdep_assert_held(&q->sysfs_lock);
 
-       blk_mq_freeze_queue(q);
+       memflags = blk_mq_freeze_queue(q);
        blk_mq_quiesce_queue(q);
 
        elv_unregister_queue(q);
@@ -664,7 +668,7 @@ void elevator_disable(struct request_queue *q)
        blk_add_trace_msg(q, "elv switch: none");
 
        blk_mq_unquiesce_queue(q);
-       blk_mq_unfreeze_queue(q);
+       blk_mq_unfreeze_queue(q, memflags);
 }
 
 /*
index 3523dd82d7a002b217f2f4856e9f42daeba13caa..4db7f6ce8ade074e0b8a8d90d06c7b7a0e7308dd 100644 (file)
@@ -226,10 +226,11 @@ aoedev_downdev(struct aoedev *d)
        /* fast fail all pending I/O */
        if (d->blkq) {
                /* UP is cleared, freeze+quiesce to insure all are errored */
-               blk_mq_freeze_queue(d->blkq);
+               unsigned int memflags = blk_mq_freeze_queue(d->blkq);
+
                blk_mq_quiesce_queue(d->blkq);
                blk_mq_unquiesce_queue(d->blkq);
-               blk_mq_unfreeze_queue(d->blkq);
+               blk_mq_unfreeze_queue(d->blkq, memflags);
        }
 
        if (d->gd)
index 110f9aca2667d5ad4ba55ba98a5b877cbc57d5d5..a81ade622a01d9311a412d64bcfd6694a23910b1 100644 (file)
@@ -746,6 +746,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
        unsigned char   *p;
        int sect, nsect;
        unsigned long   flags;
+       unsigned int memflags;
        int ret;
 
        if (type) {
@@ -758,7 +759,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
        }
 
        q = unit[drive].disk[type]->queue;
-       blk_mq_freeze_queue(q);
+       memflags = blk_mq_freeze_queue(q);
        blk_mq_quiesce_queue(q);
 
        local_irq_save(flags);
@@ -817,7 +818,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
        ret = FormatError ? -EIO : 0;
 out:
        blk_mq_unquiesce_queue(q);
-       blk_mq_unfreeze_queue(q);
+       blk_mq_unfreeze_queue(q, memflags);
        return ret;
 }
 
index d1f1d6bef2e69608d77cfaff5b796b582fe401bf..c05fe27a96b64f1f1ea3868510fdd0c7f4937f55 100644 (file)
@@ -586,6 +586,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
 {
        struct file *file = fget(arg);
        struct file *old_file;
+       unsigned int memflags;
        int error;
        bool partscan;
        bool is_loop;
@@ -623,14 +624,14 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
 
        /* and ... switch */
        disk_force_media_change(lo->lo_disk);
-       blk_mq_freeze_queue(lo->lo_queue);
+       memflags = blk_mq_freeze_queue(lo->lo_queue);
        mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
        lo->lo_backing_file = file;
        lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
        mapping_set_gfp_mask(file->f_mapping,
                             lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
        loop_update_dio(lo);
-       blk_mq_unfreeze_queue(lo->lo_queue);
+       blk_mq_unfreeze_queue(lo->lo_queue, memflags);
        partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
        loop_global_unlock(lo, is_loop);
 
@@ -1255,6 +1256,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
        int err;
        bool partscan = false;
        bool size_changed = false;
+       unsigned int memflags;
 
        err = mutex_lock_killable(&lo->lo_mutex);
        if (err)
@@ -1272,7 +1274,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
        }
 
        /* I/O needs to be drained before changing lo_offset or lo_sizelimit */
-       blk_mq_freeze_queue(lo->lo_queue);
+       memflags = blk_mq_freeze_queue(lo->lo_queue);
 
        err = loop_set_status_from_info(lo, info);
        if (err)
@@ -1294,7 +1296,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
        loop_update_dio(lo);
 
 out_unfreeze:
-       blk_mq_unfreeze_queue(lo->lo_queue);
+       blk_mq_unfreeze_queue(lo->lo_queue, memflags);
        if (partscan)
                clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
 out_unlock:
@@ -1446,6 +1448,7 @@ static int loop_set_capacity(struct loop_device *lo)
 static int loop_set_dio(struct loop_device *lo, unsigned long arg)
 {
        bool use_dio = !!arg;
+       unsigned int memflags;
 
        if (lo->lo_state != Lo_bound)
                return -ENXIO;
@@ -1459,18 +1462,19 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg)
                vfs_fsync(lo->lo_backing_file, 0);
        }
 
-       blk_mq_freeze_queue(lo->lo_queue);
+       memflags = blk_mq_freeze_queue(lo->lo_queue);
        if (use_dio)
                lo->lo_flags |= LO_FLAGS_DIRECT_IO;
        else
                lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
-       blk_mq_unfreeze_queue(lo->lo_queue);
+       blk_mq_unfreeze_queue(lo->lo_queue, memflags);
        return 0;
 }
 
 static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
 {
        struct queue_limits lim;
+       unsigned int memflags;
        int err = 0;
 
        if (lo->lo_state != Lo_bound)
@@ -1485,10 +1489,10 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
        lim = queue_limits_start_update(lo->lo_queue);
        loop_update_limits(lo, &lim, arg);
 
-       blk_mq_freeze_queue(lo->lo_queue);
+       memflags = blk_mq_freeze_queue(lo->lo_queue);
        err = queue_limits_commit_update(lo->lo_queue, &lim);
        loop_update_dio(lo);
-       blk_mq_unfreeze_queue(lo->lo_queue);
+       blk_mq_unfreeze_queue(lo->lo_queue, memflags);
 
        return err;
 }
index b63a0f29a54abb72d329853b0bc12fef5d5627e1..7bdc7eb808ea93c4cce2422717155050cdc945ca 100644 (file)
@@ -1234,6 +1234,7 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
        struct socket *sock;
        struct nbd_sock **socks;
        struct nbd_sock *nsock;
+       unsigned int memflags;
        int err;
 
        /* Arg will be cast to int, check it to avoid overflow */
@@ -1247,7 +1248,7 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
         * We need to make sure we don't get any errant requests while we're
         * reallocating the ->socks array.
         */
-       blk_mq_freeze_queue(nbd->disk->queue);
+       memflags = blk_mq_freeze_queue(nbd->disk->queue);
 
        if (!netlink && !nbd->task_setup &&
            !test_bit(NBD_RT_BOUND, &config->runtime_flags))
@@ -1288,12 +1289,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
        INIT_WORK(&nsock->work, nbd_pending_cmd_work);
        socks[config->num_connections++] = nsock;
        atomic_inc(&config->live_connections);
-       blk_mq_unfreeze_queue(nbd->disk->queue);
+       blk_mq_unfreeze_queue(nbd->disk->queue, memflags);
 
        return 0;
 
 put_socket:
-       blk_mq_unfreeze_queue(nbd->disk->queue);
+       blk_mq_unfreeze_queue(nbd->disk->queue, memflags);
        sockfd_put(sock);
        return err;
 }
index 5b393e4a1ddfc4eba1a821b9bf8e04585bdf2190..faafd7ff43d6ef53110ab3663cc7ac322214cc8c 100644 (file)
@@ -7281,9 +7281,10 @@ static ssize_t do_rbd_remove(const char *buf, size_t count)
                 * Prevent new IO from being queued and wait for existing
                 * IO to complete/fail.
                 */
-               blk_mq_freeze_queue(rbd_dev->disk->queue);
+               unsigned int memflags = blk_mq_freeze_queue(rbd_dev->disk->queue);
+
                blk_mark_disk_dead(rbd_dev->disk);
-               blk_mq_unfreeze_queue(rbd_dev->disk->queue);
+               blk_mq_unfreeze_queue(rbd_dev->disk->queue, memflags);
        }
 
        del_gendisk(rbd_dev->disk);
index 88dcae6ec575172ed1d4b019f9851c1320f62d4d..05c4aee7f262a94a4b05cd46107d31623edc90d5 100644 (file)
@@ -1113,6 +1113,7 @@ static void vdc_requeue_inflight(struct vdc_port *port)
 static void vdc_queue_drain(struct vdc_port *port)
 {
        struct request_queue *q = port->disk->queue;
+       unsigned int memflags;
 
        /*
         * Mark the queue as draining, then freeze/quiesce to ensure
@@ -1121,12 +1122,12 @@ static void vdc_queue_drain(struct vdc_port *port)
        port->drain = 1;
        spin_unlock_irq(&port->vio.lock);
 
-       blk_mq_freeze_queue(q);
+       memflags = blk_mq_freeze_queue(q);
        blk_mq_quiesce_queue(q);
 
        spin_lock_irq(&port->vio.lock);
        port->drain = 0;
-       blk_mq_unquiesce_queue(q);
+       blk_mq_unquiesce_queue(q, memflags);
        blk_mq_unfreeze_queue(q);
 }
 
index 9914153b365b611a02b9dc5fb11dcf483c830404..3aedcb5add61f7a51fd2cb6f265980b19c41bff4 100644 (file)
@@ -840,6 +840,7 @@ static int grab_drive(struct floppy_state *fs, enum swim_state state,
 static void release_drive(struct floppy_state *fs)
 {
        struct request_queue *q = disks[fs->index]->queue;
+       unsigned int memflags;
        unsigned long flags;
 
        swim3_dbg("%s", "-> release drive\n");
@@ -848,10 +849,10 @@ static void release_drive(struct floppy_state *fs)
        fs->state = idle;
        spin_unlock_irqrestore(&swim3_lock, flags);
 
-       blk_mq_freeze_queue(q);
+       memflags = blk_mq_freeze_queue(q);
        blk_mq_quiesce_queue(q);
        blk_mq_unquiesce_queue(q);
-       blk_mq_unfreeze_queue(q);
+       blk_mq_unfreeze_queue(q, memflags);
 }
 
 static int fd_eject(struct floppy_state *fs)
index bbaa26b523b8dd18e20497e7980196b7be5b745a..a4af39fc7ea28a83f9f77312b7145d20aeab25f6 100644 (file)
@@ -1584,11 +1584,12 @@ static int virtblk_freeze(struct virtio_device *vdev)
 {
        struct virtio_blk *vblk = vdev->priv;
        struct request_queue *q = vblk->disk->queue;
+       unsigned int memflags;
 
        /* Ensure no requests in virtqueues before deleting vqs. */
-       blk_mq_freeze_queue(q);
+       memflags = blk_mq_freeze_queue(q);
        blk_mq_quiesce_queue_nowait(q);
-       blk_mq_unfreeze_queue(q);
+       blk_mq_unfreeze_queue(q, memflags);
 
        /* Ensure we don't receive any more interrupts */
        virtio_reset_device(vdev);
index ee7e1d9089861ac88f3d53d0ad2273d30a6fdcad..847c11542f024c3d1333c24fb8ea9f3479506be0 100644 (file)
@@ -404,6 +404,7 @@ out_list_del:
 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
 {
        unsigned long flags;
+       unsigned int memflags;
 
        lockdep_assert_held(&mtd_table_mutex);
 
@@ -420,10 +421,10 @@ int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
        spin_unlock_irqrestore(&old->queue_lock, flags);
 
        /* freeze+quiesce queue to ensure all requests are flushed */
-       blk_mq_freeze_queue(old->rq);
+       memflags = blk_mq_freeze_queue(old->rq);
        blk_mq_quiesce_queue(old->rq);
        blk_mq_unquiesce_queue(old->rq);
-       blk_mq_unfreeze_queue(old->rq);
+       blk_mq_unfreeze_queue(old->rq, memflags);
 
        /* If the device is currently open, tell trans driver to close it,
                then put mtd device, and don't touch it again */
index 76b615d4d5b91e163e5a6e7baf451c959a2c3cab..40046770f1bf0b98261d8b80e21aa0cc04ebb7a0 100644 (file)
@@ -2132,15 +2132,16 @@ static int nvme_update_ns_info_generic(struct nvme_ns *ns,
                struct nvme_ns_info *info)
 {
        struct queue_limits lim;
+       unsigned int memflags;
        int ret;
 
        lim = queue_limits_start_update(ns->disk->queue);
        nvme_set_ctrl_limits(ns->ctrl, &lim);
 
-       blk_mq_freeze_queue(ns->disk->queue);
+       memflags = blk_mq_freeze_queue(ns->disk->queue);
        ret = queue_limits_commit_update(ns->disk->queue, &lim);
        set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
-       blk_mq_unfreeze_queue(ns->disk->queue);
+       blk_mq_unfreeze_queue(ns->disk->queue, memflags);
 
        /* Hide the block-interface for these devices */
        if (!ret)
@@ -2155,6 +2156,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
        struct nvme_id_ns_nvm *nvm = NULL;
        struct nvme_zone_info zi = {};
        struct nvme_id_ns *id;
+       unsigned int memflags;
        sector_t capacity;
        unsigned lbaf;
        int ret;
@@ -2186,7 +2188,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
 
        lim = queue_limits_start_update(ns->disk->queue);
 
-       blk_mq_freeze_queue(ns->disk->queue);
+       memflags = blk_mq_freeze_queue(ns->disk->queue);
        ns->head->lba_shift = id->lbaf[lbaf].ds;
        ns->head->nuse = le64_to_cpu(id->nuse);
        capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze));
@@ -2219,7 +2221,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
 
        ret = queue_limits_commit_update(ns->disk->queue, &lim);
        if (ret) {
-               blk_mq_unfreeze_queue(ns->disk->queue);
+               blk_mq_unfreeze_queue(ns->disk->queue, memflags);
                goto out;
        }
 
@@ -2235,7 +2237,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
                ns->head->features |= NVME_NS_DEAC;
        set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
        set_bit(NVME_NS_READY, &ns->flags);
-       blk_mq_unfreeze_queue(ns->disk->queue);
+       blk_mq_unfreeze_queue(ns->disk->queue, memflags);
 
        if (blk_queue_is_zoned(ns->queue)) {
                ret = blk_revalidate_disk_zones(ns->disk);
@@ -2291,9 +2293,10 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
        if (!ret && nvme_ns_head_multipath(ns->head)) {
                struct queue_limits *ns_lim = &ns->disk->queue->limits;
                struct queue_limits lim;
+               unsigned int memflags;
 
                lim = queue_limits_start_update(ns->head->disk->queue);
-               blk_mq_freeze_queue(ns->head->disk->queue);
+               memflags = blk_mq_freeze_queue(ns->head->disk->queue);
                /*
                 * queue_limits mixes values that are the hardware limitations
                 * for bio splitting with what is the device configuration.
@@ -2325,7 +2328,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
                set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
                nvme_mpath_revalidate_paths(ns);
 
-               blk_mq_unfreeze_queue(ns->head->disk->queue);
+               blk_mq_unfreeze_queue(ns->head->disk->queue, memflags);
        }
 
        return ret;
index a85d190942bdf990244c1b40be84130ef5a57e20..2a7635565083046c575efe1793362ae10581defd 100644 (file)
@@ -60,7 +60,7 @@ void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
        lockdep_assert_held(&subsys->lock);
        list_for_each_entry(h, &subsys->nsheads, entry)
                if (h->disk)
-                       blk_mq_unfreeze_queue(h->disk->queue);
+                       blk_mq_unfreeze_queue_nomemrestore(h->disk->queue);
 }
 
 void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
index 4411426a78948c614481a5aa9c85af6da9178e79..b86e259516a7e93cac70942d5b3273c703c6eb76 100644 (file)
@@ -2723,6 +2723,7 @@ int
 scsi_device_quiesce(struct scsi_device *sdev)
 {
        struct request_queue *q = sdev->request_queue;
+       unsigned int memflags;
        int err;
 
        /*
@@ -2737,7 +2738,7 @@ scsi_device_quiesce(struct scsi_device *sdev)
 
        blk_set_pm_only(q);
 
-       blk_mq_freeze_queue(q);
+       memflags = blk_mq_freeze_queue(q);
        /*
         * Ensure that the effect of blk_set_pm_only() will be visible
         * for percpu_ref_tryget() callers that occur after the queue
@@ -2745,7 +2746,7 @@ scsi_device_quiesce(struct scsi_device *sdev)
         * was called. See also https://lwn.net/Articles/573497/.
         */
        synchronize_rcu();
-       blk_mq_unfreeze_queue(q);
+       blk_mq_unfreeze_queue(q, memflags);
 
        mutex_lock(&sdev->state_mutex);
        err = scsi_device_set_state(sdev, SDEV_QUIESCE);
index 042329b74c6e6829afae5a4feed057bba4bff64b..312d782139548347feb6037d34db1691dad06329 100644 (file)
@@ -220,6 +220,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
        int new_shift = sbitmap_calculate_shift(depth);
        bool need_alloc = !sdev->budget_map.map;
        bool need_free = false;
+       unsigned int memflags;
        int ret;
        struct sbitmap sb_backup;
 
@@ -240,7 +241,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
         * and here disk isn't added yet, so freezing is pretty fast
         */
        if (need_free) {
-               blk_mq_freeze_queue(sdev->request_queue);
+               memflags = blk_mq_freeze_queue(sdev->request_queue);
                sb_backup = sdev->budget_map;
        }
        ret = sbitmap_init_node(&sdev->budget_map,
@@ -256,7 +257,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
                else
                        sbitmap_free(&sb_backup);
                ret = 0;
-               blk_mq_unfreeze_queue(sdev->request_queue);
+               blk_mq_unfreeze_queue(sdev->request_queue, memflags);
        }
        return ret;
 }
index 796e37a1d859f21bb47e899b8fae86fefc7daca6..3438269a54405b568f7ad0fdd254733931a1500f 100644 (file)
@@ -1439,6 +1439,7 @@ static ssize_t max_number_of_rtt_store(struct device *dev,
        struct ufs_hba *hba = dev_get_drvdata(dev);
        struct ufs_dev_info *dev_info = &hba->dev_info;
        struct scsi_device *sdev;
+       unsigned int memflags;
        unsigned int rtt;
        int ret;
 
@@ -1458,14 +1459,16 @@ static ssize_t max_number_of_rtt_store(struct device *dev,
 
        ufshcd_rpm_get_sync(hba);
 
+       memflags = memalloc_noio_save();
        shost_for_each_device(sdev, hba->host)
-               blk_mq_freeze_queue(sdev->request_queue);
+               blk_mq_freeze_queue_nomemsave(sdev->request_queue);
 
        ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
                QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt);
 
        shost_for_each_device(sdev, hba->host)
-               blk_mq_unfreeze_queue(sdev->request_queue);
+               blk_mq_unfreeze_queue_nomemrestore(sdev->request_queue);
+       memalloc_noio_restore(memflags);
 
        ufshcd_rpm_put_sync(hba);
 
index a0a9007cc1e36f89ebb21e699de3234a3cf9ef5b..9ebb53f031cdb519c6b60cc2bc0c4b8b3f4d5ff4 100644 (file)
@@ -900,8 +900,22 @@ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
                busy_tag_iter_fn *fn, void *priv);
 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
-void blk_mq_freeze_queue(struct request_queue *q);
-void blk_mq_unfreeze_queue(struct request_queue *q);
+void blk_mq_freeze_queue_nomemsave(struct request_queue *q);
+void blk_mq_unfreeze_queue_nomemrestore(struct request_queue *q);
+static inline unsigned int __must_check
+blk_mq_freeze_queue(struct request_queue *q)
+{
+       unsigned int memflags = memalloc_noio_save();
+
+       blk_mq_freeze_queue_nomemsave(q);
+       return memflags;
+}
+static inline void
+blk_mq_unfreeze_queue(struct request_queue *q, unsigned int memflags)
+{
+       blk_mq_unfreeze_queue_nomemrestore(q);
+       memalloc_noio_restore(memflags);
+}
 void blk_freeze_queue_start(struct request_queue *q);
 void blk_mq_freeze_queue_wait(struct request_queue *q);
 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,