*/
 static struct workqueue_struct *kblockd_workqueue;
 
+/**
+ * blk_queue_flag_set - atomically set a queue flag
+ * @flag: flag to be set
+ * @q: request queue
+ */
+void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       queue_flag_set(flag, q);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+}
+EXPORT_SYMBOL(blk_queue_flag_set);
+
+/**
+ * blk_queue_flag_clear - atomically clear a queue flag
+ * @flag: flag to be cleared
+ * @q: request queue
+ */
+void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       queue_flag_clear(flag, q);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+}
+EXPORT_SYMBOL(blk_queue_flag_clear);
+
+/**
+ * blk_queue_flag_test_and_set - atomically test and set a queue flag
+ * @flag: flag to be set
+ * @q: request queue
+ *
+ * Returns the previous value of @flag - 0 if the flag was not set and 1 if
+ * the flag was already set.
+ */
+bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
+{
+       unsigned long flags;
+       bool res;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       res = queue_flag_test_and_set(flag, q);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+
+       return res;
+}
+EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
+
+/**
+ * blk_queue_flag_test_and_clear - atomically test and clear a queue flag
+ * @flag: flag to be cleared
+ * @q: request queue
+ *
+ * Returns the previous value of @flag - 0 if the flag was not set and 1 if
+ * the flag was set.
+ */
+bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q)
+{
+       unsigned long flags;
+       bool res;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       res = queue_flag_test_and_clear(flag, q);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+
+       return res;
+}
+EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_clear);
+
 static void blk_clear_congested(struct request_list *rl, int sync)
 {
 #ifdef CONFIG_CGROUP_WRITEBACK
  */
 int blk_set_preempt_only(struct request_queue *q)
 {
-       unsigned long flags;
-       int res;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-       res = queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q);
-       spin_unlock_irqrestore(q->queue_lock, flags);
-
-       return res;
+       return blk_queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q);
 }
 EXPORT_SYMBOL_GPL(blk_set_preempt_only);
 
 void blk_clear_preempt_only(struct request_queue *q)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-       queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
+       blk_queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
        wake_up_all(&q->mq_freeze_wq);
-       spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL_GPL(blk_clear_preempt_only);
 
 
 void blk_set_queue_dying(struct request_queue *q)
 {
-       spin_lock_irq(q->queue_lock);
-       queue_flag_set(QUEUE_FLAG_DYING, q);
-       spin_unlock_irq(q->queue_lock);
+       blk_queue_flag_set(QUEUE_FLAG_DYING, q);
 
        /*
         * When queue DYING flag is set, we need to block new req
 
  */
 void blk_mq_quiesce_queue_nowait(struct request_queue *q)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-       queue_flag_set(QUEUE_FLAG_QUIESCED, q);
-       spin_unlock_irqrestore(q->queue_lock, flags);
+       blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
 }
 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
 
  */
 void blk_mq_unquiesce_queue(struct request_queue *q)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-       queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
-       spin_unlock_irqrestore(q->queue_lock, flags);
+       blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
 
        /* dispatch requests which are inserted during quiescing */
        blk_mq_run_hw_queues(q, true);
 
 
 void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
 {
-       spin_lock_irq(q->queue_lock);
        if (queueable)
-               queue_flag_clear(QUEUE_FLAG_FLUSH_NQ, q);
+               blk_queue_flag_clear(QUEUE_FLAG_FLUSH_NQ, q);
        else
-               queue_flag_set(QUEUE_FLAG_FLUSH_NQ, q);
-       spin_unlock_irq(q->queue_lock);
+               blk_queue_flag_set(QUEUE_FLAG_FLUSH_NQ, q);
 }
 EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
 
 
        if (neg)                                                        \
                val = !val;                                             \
                                                                        \
-       spin_lock_irq(q->queue_lock);                                   \
        if (val)                                                        \
-               queue_flag_set(QUEUE_FLAG_##flag, q);                   \
+               blk_queue_flag_set(QUEUE_FLAG_##flag, q);               \
        else                                                            \
-               queue_flag_clear(QUEUE_FLAG_##flag, q);                 \
-       spin_unlock_irq(q->queue_lock);                                 \
+               blk_queue_flag_clear(QUEUE_FLAG_##flag, q);             \
        return ret;                                                     \
 }
 
        if (ret < 0)
                return ret;
 
-       spin_lock_irq(q->queue_lock);
        if (poll_on)
-               queue_flag_set(QUEUE_FLAG_POLL, q);
+               blk_queue_flag_set(QUEUE_FLAG_POLL, q);
        else
-               queue_flag_clear(QUEUE_FLAG_POLL, q);
-       spin_unlock_irq(q->queue_lock);
+               blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
 
        return ret;
 }
        if (set == -1)
                return -EINVAL;
 
-       spin_lock_irq(q->queue_lock);
        if (set)
-               queue_flag_set(QUEUE_FLAG_WC, q);
+               blk_queue_flag_set(QUEUE_FLAG_WC, q);
        else
-               queue_flag_clear(QUEUE_FLAG_WC, q);
-       spin_unlock_irq(q->queue_lock);
+               blk_queue_flag_clear(QUEUE_FLAG_WC, q);
 
        return count;
 }
         */
        mutex_lock(&q->sysfs_lock);
 
-       spin_lock_irq(q->queue_lock);
-       queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
-       spin_unlock_irq(q->queue_lock);
+       blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
 
        /*
         * Remove the sysfs attributes before unregistering the queue data
 
                char *p = (char *) buf;
 
                val = simple_strtoul(p, &p, 10);
-               spin_lock_irq(q->queue_lock);
                if (val)
-                       queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
+                       blk_queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
                else
-                       queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
-               spin_unlock_irq(q->queue_lock);
+                       blk_queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
        }
 
        return count;
 
                                 (1 << QUEUE_FLAG_SAME_COMP)    |       \
                                 (1 << QUEUE_FLAG_POLL))
 
+void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
+void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
+bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
+bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
+
 /*
  * @q->queue_lock is set while a queue is being initialized. Since we know
  * that no other threads access the queue object before @q->queue_lock has