q->queuedata = dev;
        q->bsg_job_fn = job_fn;
-       queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
-       queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
+       blk_queue_flag_set(QUEUE_FLAG_BIDI, q);
+       blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
        blk_queue_softirq_done(q, bsg_softirq_done);
        blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
 
 
                 * topology on all peers. */
                blk_queue_discard_granularity(q, 512);
                q->limits.max_discard_sectors = drbd_max_discard_sectors(connection);
-               queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+               blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
                q->limits.max_write_zeroes_sectors = drbd_max_discard_sectors(connection);
        } else {
-               queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
+               blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
                blk_queue_discard_granularity(q, 0);
                q->limits.max_discard_sectors = 0;
                q->limits.max_write_zeroes_sectors = 0;
 
        blk_mq_freeze_queue(lo->lo_queue);
        lo->use_dio = use_dio;
        if (use_dio) {
-               queue_flag_clear_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue);
+               blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue);
                lo->lo_flags |= LO_FLAGS_DIRECT_IO;
        } else {
-               queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue);
+               blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
                lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
        }
        blk_mq_unfreeze_queue(lo->lo_queue);
                q->limits.discard_alignment = 0;
                blk_queue_max_discard_sectors(q, 0);
                blk_queue_max_write_zeroes_sectors(q, 0);
-               queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
+               blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
                return;
        }
 
 
        blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
        blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
-       queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+       blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
 }
 
 static void loop_unprepare_queue(struct loop_device *lo)
         * page. For directio mode, merge does help to dispatch bigger request
         * to underlayer disk. We will enable merge once directio is enabled.
         */
-       queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue);
+       blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
 
        err = -ENOMEM;
        disk = lo->lo_disk = alloc_disk(1 << part_shift);
 
        else
                set_disk_ro(nbd->disk, false);
        if (config->flags & NBD_FLAG_SEND_TRIM)
-               queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
+               blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue);
        if (config->flags & NBD_FLAG_SEND_FLUSH) {
                if (config->flags & NBD_FLAG_SEND_FUA)
                        blk_queue_write_cache(nbd->disk->queue, true, true);
                nbd->config = NULL;
 
                nbd->tag_set.timeout = 0;
-               queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
+               blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue);
 
                mutex_unlock(&nbd->config_lock);
                nbd_put(nbd);
        /*
         * Tell the block layer that we are not a rotational device
         */
-       queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
-       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
+       blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
+       blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
        disk->queue->limits.discard_granularity = 512;
        blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
        blk_queue_max_segment_size(disk->queue, UINT_MAX);
 
        nullb->q->limits.discard_granularity = nullb->dev->blocksize;
        nullb->q->limits.discard_alignment = nullb->dev->blocksize;
        blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
-       queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nullb->q);
+       blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
 }
 
 static int null_open(struct block_device *bdev, fmode_t mode)
        }
 
        nullb->q->queuedata = nullb;
-       queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
-       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
+       blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
+       blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
 
        mutex_lock(&lock);
        nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
 
                goto out_tag_set;
        }
 
-       queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+       blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
        /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
 
        /* set io sizes to object size */
        blk_queue_io_opt(q, segment_size);
 
        /* enable the discard support */
-       queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+       blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
        q->limits.discard_granularity = segment_size;
        blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
        blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE);
 
        blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors);
        blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
 
-       queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue);
-       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, card->queue);
+       blk_queue_flag_set(QUEUE_FLAG_NONROT, card->queue);
+       blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, card->queue);
        if (rsxx_discard_supported(card)) {
-               queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, card->queue);
+               blk_queue_flag_set(QUEUE_FLAG_DISCARD, card->queue);
                blk_queue_max_discard_sectors(card->queue,
                                                RSXX_HW_BLK_SIZE >> 9);
                card->queue->limits.discard_granularity = RSXX_HW_BLK_SIZE;
 
        /* set optimal I/O size to 8KB */
        blk_queue_io_opt(q, 8192);
 
-       queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
-       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
+       blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+       blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
 
        blk_queue_rq_timeout(q, 8 * HZ);
 
 
        unsigned int segments = info->max_indirect_segments ? :
                                BLKIF_MAX_SEGMENTS_PER_REQUEST;
 
-       queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
+       blk_queue_flag_set(QUEUE_FLAG_VIRT, rq);
 
        if (info->feature_discard) {
-               queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
+               blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq);
                blk_queue_max_discard_sectors(rq, get_capacity(gd));
                rq->limits.discard_granularity = info->discard_granularity;
                rq->limits.discard_alignment = info->discard_alignment;
                if (info->feature_secdiscard)
-                       queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
+                       blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
        }
 
        /* Hard sector size and max sectors impersonate the equiv. hardware. */
                                blkif_req(req)->error = BLK_STS_NOTSUPP;
                                info->feature_discard = 0;
                                info->feature_secdiscard = 0;
-                               queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
-                               queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
+                               blk_queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
+                               blk_queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
                        }
                        break;
                case BLKIF_OP_FLUSH_DISKCACHE:
 
        /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
        set_capacity(zram->disk, 0);
        /* zram devices sort of resembles non-rotational disks */
-       queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
-       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
+       blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
+       blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
 
        /*
         * To ensure that we always get PAGE_SIZE aligned
        blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
        zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
        blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
-       queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
+       blk_queue_flag_set(QUEUE_FLAG_DISCARD, zram->disk->queue);
 
        /*
         * zram_bio_discard() will clear all logical blocks if logical block
 
               queue_max_sectors(q) / 2);
 
        if (ata_id_is_ssd(id)) {
-               queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
-               queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
+               blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+               blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
        }
 
        /* calculate drive capacity, and select LBA if possible */
 
        q->request_fn = do_ide_request;
        q->initialize_rq_fn = ide_initialize_rq;
        q->cmd_size = sizeof(struct ide_request);
-       queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
+       blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
        if (blk_init_allocated_queue(q) < 0) {
                blk_cleanup_queue(q);
                return 1;
 
        tqueue->limits.discard_granularity = geo->sec_per_chk * geo->sec_size;
        tqueue->limits.discard_alignment = 0;
        blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
-       queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue);
+       blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue);
 
        pr_info("pblk(%s): luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
                        tdisk->disk_name,
 
        q->limits = *limits;
 
        if (!dm_table_supports_discards(t)) {
-               queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
+               blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
                /* Must also clear discard limits... */
                q->limits.max_discard_sectors = 0;
                q->limits.max_hw_discard_sectors = 0;
                q->limits.discard_alignment = 0;
                q->limits.discard_misaligned = 0;
        } else
-               queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+               blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
 
        if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
                wc = true;
        blk_queue_write_cache(q, wc, fua);
 
        if (dm_table_supports_dax(t))
-               queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
+               blk_queue_flag_set(QUEUE_FLAG_DAX, q);
        if (dm_table_supports_dax_write_cache(t))
                dax_write_cache(t->md->dax_dev, true);
 
        /* Ensure that all underlying devices are non-rotational. */
        if (dm_table_all_devices_attribute(t, device_is_nonrot))
-               queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+               blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
        else
-               queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
+               blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
 
        if (!dm_table_supports_write_same(t))
                q->limits.max_write_same_sectors = 0;
                q->limits.max_write_zeroes_sectors = 0;
 
        if (dm_table_all_devices_attribute(t, queue_supports_sg_merge))
-               queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
+               blk_queue_flag_clear(QUEUE_FLAG_NO_SG_MERGE, q);
        else
-               queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
+               blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q);
 
        dm_table_verify_integrity(t);
 
         * have it set.
         */
        if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
-               queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
+               blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
 }
 
 unsigned int dm_table_get_num_targets(struct dm_table *t)
 
        }
 
        if (!discard_supported)
-               queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+               blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
        else
-               queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+               blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
 
        /*
         * Here we calculate the device offsets.
 
                if (mddev->degraded)
                        nonrot = false;
                if (nonrot)
-                       queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
+                       blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
                else
-                       queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
+                       blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
                mddev->queue->backing_dev_info->congested_data = mddev;
                mddev->queue->backing_dev_info->congested_fn = md_congested;
        }
 
                                discard_supported = true;
                }
                if (!discard_supported)
-                       queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+                       blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
                else
-                       queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+                       blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
        }
 
        /* calculate array device size */
 
                }
        }
        if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
-               queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+               blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
        print_conf(conf);
        return err;
 }
 
        if (mddev->queue) {
                if (discard_supported)
-                       queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
+                       blk_queue_flag_set(QUEUE_FLAG_DISCARD,
                                                mddev->queue);
                else
-                       queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
+                       blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
                                                  mddev->queue);
        }
 
 
                break;
        }
        if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
-               queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+               blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
 
        print_conf(conf);
        return err;
 
        if (mddev->queue) {
                if (discard_supported)
-                       queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
+                       blk_queue_flag_set(QUEUE_FLAG_DISCARD,
                                                mddev->queue);
                else
-                       queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
+                       blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
                                                  mddev->queue);
        }
        /* need to check that every block has at least one working mirror */
 
                if (devices_handle_discard_safely &&
                    mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
                    mddev->queue->limits.discard_granularity >= stripe)
-                       queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
+                       blk_queue_flag_set(QUEUE_FLAG_DISCARD,
                                                mddev->queue);
                else
-                       queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
+                       blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
                                                mddev->queue);
 
                blk_queue_max_hw_sectors(mddev->queue, UINT_MAX);
 
        if (!max_discard)
                return;
 
-       queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+       blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
        blk_queue_max_discard_sectors(q, max_discard);
        q->limits.discard_granularity = card->pref_erase << 9;
        /* granularity must not be greater than max. discard */
        if (card->pref_erase > max_discard)
                q->limits.discard_granularity = 0;
        if (mmc_can_secure_erase_trim(card))
-               queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
+               blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
 }
 
 /**
        if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
                limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
 
-       queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
-       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
+       blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
+       blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
        if (mmc_can_erase(card))
                mmc_queue_setup_discard(mq->queue, card);
 
 
        blk_queue_logical_block_size(new->rq, tr->blksize);
 
        blk_queue_bounce_limit(new->rq, BLK_BOUNCE_HIGH);
-       queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq);
-       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, new->rq);
+       blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq);
+       blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
 
        if (tr->discard) {
-               queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
+               blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq);
                blk_queue_max_discard_sectors(new->rq, UINT_MAX);
        }
 
 
        blk_queue_make_request(q, nd_blk_make_request);
        blk_queue_max_hw_sectors(q, UINT_MAX);
        blk_queue_logical_block_size(q, nsblk_sector_size(nsblk));
-       queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+       blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
        q->queuedata = nsblk;
 
        disk = alloc_disk(0);
 
        blk_queue_make_request(btt->btt_queue, btt_make_request);
        blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
        blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
-       queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
+       blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_queue);
        btt->btt_queue->queuedata = btt;
 
        set_capacity(btt->btt_disk, 0);
 
        blk_queue_physical_block_size(q, PAGE_SIZE);
        blk_queue_logical_block_size(q, pmem_sector_size(ndns));
        blk_queue_max_hw_sectors(q, UINT_MAX);
-       queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
-       queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
+       blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+       blk_queue_flag_set(QUEUE_FLAG_DAX, q);
        q->queuedata = pmem;
 
        disk = alloc_disk_node(0, nid);
 
 
        blk_queue_max_discard_sectors(queue, UINT_MAX);
        blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
-       queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, queue);
+       blk_queue_flag_set(QUEUE_FLAG_DISCARD, queue);
 
        if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
                blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
        ns->queue = blk_mq_init_queue(ctrl->tagset);
        if (IS_ERR(ns->queue))
                goto out_free_ns;
-       queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
+       blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
        ns->queue->queuedata = ns;
        ns->ctrl = ctrl;
 
 
        q->queuedata = head;
        blk_queue_make_request(q, nvme_ns_head_make_request);
        q->poll_fn = nvme_ns_head_poll;
-       queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+       blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
        /* set to a default value for 512 until disk is validated */
        blk_queue_logical_block_size(q, 512);
 
 
        } else {
                max = block->base->discipline->max_blocks << block->s2b_shift;
        }
-       queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+       blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
        q->limits.max_dev_sectors = max;
        blk_queue_logical_block_size(q, logical_block_size);
        blk_queue_max_hw_sectors(q, max);
 
                blk_queue_max_discard_sectors(q, max_discard_sectors);
                blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
-               queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+               blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
        }
 }
 
 
        dev_info->gd->private_data = dev_info;
        blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
        blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
-       queue_flag_set_unlocked(QUEUE_FLAG_DAX, dev_info->dcssblk_queue);
+       blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->dcssblk_queue);
 
        seg_byte_size = (dev_info->end - dev_info->start + 1);
        set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
 
        blk_queue_logical_block_size(rq, 1 << 12);
        blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
        blk_queue_max_segments(rq, nr_max_blk);
-       queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
-       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq);
+       blk_queue_flag_set(QUEUE_FLAG_NONROT, rq);
+       blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq);
 
        bdev->gendisk = alloc_disk(SCM_NR_PARTS);
        if (!bdev->gendisk) {
 
                        put_disk(xpram_disks[i]);
                        goto out;
                }
-               queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xpram_queues[i]);
-               queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]);
+               blk_queue_flag_set(QUEUE_FLAG_NONROT, xpram_queues[i]);
+               blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]);
                blk_queue_make_request(xpram_queues[i], xpram_make_request);
                blk_queue_logical_block_size(xpram_queues[i], 4096);
        }
 
 
        blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
 
-       queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue);
+       blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue);
        blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
 }
 
 
  * then sending IOs with holes.
  *
  * Though driver can request block layer to disable IO merging by calling-
- * queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue) but
+ * blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue) but
  * user may tune sysfs parameter- nomerges again to 0 or 1.
  *
  * If in future IO scheduling is enabled with SCSI BLK MQ,
 
                 ** merged and can eliminate holes created during merging
                 ** operation.
                 **/
-               queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES,
+               blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
                                sdev->request_queue);
                blk_queue_virt_boundary(sdev->request_queue,
                                ioc->page_size - 1);
 
        if (sdebug_verbose)
                pr_info("slave_alloc <%u %u %u %llu>\n",
                       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
-       queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
+       blk_queue_flag_set(QUEUE_FLAG_BIDI, sdp->request_queue);
        return 0;
 }
 
 
 {
        struct device *dev = shost->dma_dev;
 
-       queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
+       blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
 
        /*
         * this limit is imposed by hardware restrictions
 
         * by default assume old behaviour and bounce for any highmem page
         */
        blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
-       queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
-       queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
+       blk_queue_flag_set(QUEUE_FLAG_BIDI, q);
+       blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
        return 0;
 }
 
 
        case SD_LBP_FULL:
        case SD_LBP_DISABLE:
                blk_queue_max_discard_sectors(q, 0);
-               queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
+               blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
                return;
 
        case SD_LBP_UNMAP:
        }
 
        blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9));
-       queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+       blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
 }
 
 static int sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
        rot = get_unaligned_be16(&buffer[4]);
 
        if (rot == 1) {
-               queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
-               queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
+               blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+               blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
        }
 
        if (sdkp->device->type == TYPE_ZBC) {