/* device handler flags */
 #define ALUA_OPTIMIZE_STPG             0x01
 #define ALUA_RTPG_EXT_HDR_UNSUPP       0x02
+#define ALUA_SYNC_STPG                 0x04
 /* State machine flags */
 #define ALUA_PG_RUN_RTPG               0x10
 #define ALUA_PG_RUN_STPG               0x20
 static LIST_HEAD(port_group_list);
 static DEFINE_SPINLOCK(port_group_lock);
 static struct workqueue_struct *kaluad_wq;
+static struct workqueue_struct *kaluad_sync_wq;
 
 struct alua_port_group {
        struct kref             kref;
        int err = SCSI_DH_OK;
        struct alua_queue_data *qdata, *tmp;
        unsigned long flags;
+       struct workqueue_struct *alua_wq = kaluad_wq;
 
        spin_lock_irqsave(&pg->lock, flags);
        sdev = pg->rtpg_sdev;
                spin_unlock_irqrestore(&pg->lock, flags);
                return;
        }
+       if (pg->flags & ALUA_SYNC_STPG)
+               alua_wq = kaluad_sync_wq;
        pg->flags |= ALUA_PG_RUNNING;
        if (pg->flags & ALUA_PG_RUN_RTPG) {
                pg->flags &= ~ALUA_PG_RUN_RTPG;
                        pg->flags &= ~ALUA_PG_RUNNING;
                        pg->flags |= ALUA_PG_RUN_RTPG;
                        spin_unlock_irqrestore(&pg->lock, flags);
-                       queue_delayed_work(kaluad_wq, &pg->rtpg_work,
+                       queue_delayed_work(alua_wq, &pg->rtpg_work,
                                           pg->interval * HZ);
                        return;
                }
                        pg->interval = 0;
                        pg->flags &= ~ALUA_PG_RUNNING;
                        spin_unlock_irqrestore(&pg->lock, flags);
-                       queue_delayed_work(kaluad_wq, &pg->rtpg_work,
+                       queue_delayed_work(alua_wq, &pg->rtpg_work,
                                           pg->interval * HZ);
                        return;
                }
 {
        int start_queue = 0;
        unsigned long flags;
+       struct workqueue_struct *alua_wq = kaluad_wq;
 
        if (!pg)
                return;
                scsi_device_get(sdev);
                start_queue = 1;
        }
+       if (pg->flags & ALUA_SYNC_STPG)
+               alua_wq = kaluad_sync_wq;
        spin_unlock_irqrestore(&pg->lock, flags);
 
        if (start_queue &&
-           !queue_delayed_work(kaluad_wq, &pg->rtpg_work,
+           !queue_delayed_work(alua_wq, &pg->rtpg_work,
                                msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) {
                scsi_device_put(sdev);
                kref_put(&pg->kref, release_port_group);
                /* Temporary failure, bypass */
                return SCSI_DH_DEV_TEMP_BUSY;
        }
+       kaluad_sync_wq = create_workqueue("kaluad_sync");
+       if (!kaluad_sync_wq) {
+               destroy_workqueue(kaluad_wq);
+               return SCSI_DH_DEV_TEMP_BUSY;
+       }
        r = scsi_register_device_handler(&alua_dh);
        if (r != 0) {
                printk(KERN_ERR "%s: Failed to register scsi device handler",
                        ALUA_DH_NAME);
+               destroy_workqueue(kaluad_sync_wq);
                destroy_workqueue(kaluad_wq);
        }
        return r;
 static void __exit alua_exit(void)
 {
        scsi_unregister_device_handler(&alua_dh);
+       destroy_workqueue(kaluad_sync_wq);
        destroy_workqueue(kaluad_wq);
 }