WQ_DFL_ACTIVE           = WQ_MAX_ACTIVE / 2,
 };
 
+/*
+ * System-wide workqueues which are always present.
+ *
+ * system_wq is the one used by schedule[_delayed]_work[_on]().
+ * Multi-CPU multi-threaded.  There are users which expect relatively
+ * short queue flush time.  Don't queue works which can run for too
+ * long.
+ *
+ * system_long_wq is similar to system_wq but may host long running
+ * works.  Queue flushing might take relatively long.
+ *
+ * system_nrt_wq is non-reentrant and guarantees that any given work
+ * item is never executed in parallel by multiple CPUs.  Queue
+ * flushing might take relatively long.
+ */
+extern struct workqueue_struct *system_wq;
+extern struct workqueue_struct *system_long_wq;
+extern struct workqueue_struct *system_nrt_wq;
+
 extern struct workqueue_struct *
-__create_workqueue_key(const char *name, unsigned int flags, int max_active,
-                      struct lock_class_key *key, const char *lock_name);
+__alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
+                     struct lock_class_key *key, const char *lock_name);
 
 #ifdef CONFIG_LOCKDEP
-#define __create_workqueue(name, flags, max_active)            \
+#define alloc_workqueue(name, flags, max_active)               \
 ({                                                             \
        static struct lock_class_key __key;                     \
        const char *__lock_name;                                \
        else                                                    \
                __lock_name = #name;                            \
                                                                \
-       __create_workqueue_key((name), (flags), (max_active),   \
-                               &__key, __lock_name);           \
+       __alloc_workqueue_key((name), (flags), (max_active),    \
+                             &__key, __lock_name);             \
 })
 #else
-#define __create_workqueue(name, flags, max_active)            \
-       __create_workqueue_key((name), (flags), (max_active), NULL, NULL)
+#define alloc_workqueue(name, flags, max_active)               \
+       __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL)
 #endif
 
 #define create_workqueue(name)                                 \
-       __create_workqueue((name), WQ_RESCUER, 1)
+       alloc_workqueue((name), WQ_RESCUER, 1)
 #define create_freezeable_workqueue(name)                      \
-       __create_workqueue((name),                              \
-                          WQ_FREEZEABLE | WQ_SINGLE_CPU | WQ_RESCUER, 1)
+       alloc_workqueue((name), WQ_FREEZEABLE | WQ_SINGLE_CPU | WQ_RESCUER, 1)
 #define create_singlethread_workqueue(name)                    \
-       __create_workqueue((name), WQ_SINGLE_CPU | WQ_RESCUER, 1)
+       alloc_workqueue((name), WQ_SINGLE_CPU | WQ_RESCUER, 1)
 
 extern void destroy_workqueue(struct workqueue_struct *wq);
 
 
 #endif
 };
 
+struct workqueue_struct *system_wq __read_mostly;
+struct workqueue_struct *system_long_wq __read_mostly;
+struct workqueue_struct *system_nrt_wq __read_mostly;
+EXPORT_SYMBOL_GPL(system_wq);
+EXPORT_SYMBOL_GPL(system_long_wq);
+EXPORT_SYMBOL_GPL(system_nrt_wq);
+
 #define for_each_busy_worker(worker, i, pos, gcwq)                     \
        for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)                     \
                hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
 }
 EXPORT_SYMBOL(cancel_delayed_work_sync);
 
-static struct workqueue_struct *keventd_wq __read_mostly;
-
 /**
  * schedule_work - put work task in global workqueue
  * @work: job to be done
  */
 int schedule_work(struct work_struct *work)
 {
-       return queue_work(keventd_wq, work);
+       return queue_work(system_wq, work);
 }
 EXPORT_SYMBOL(schedule_work);
 
  */
 int schedule_work_on(int cpu, struct work_struct *work)
 {
-       return queue_work_on(cpu, keventd_wq, work);
+       return queue_work_on(cpu, system_wq, work);
 }
 EXPORT_SYMBOL(schedule_work_on);
 
 int schedule_delayed_work(struct delayed_work *dwork,
                                        unsigned long delay)
 {
-       return queue_delayed_work(keventd_wq, dwork, delay);
+       return queue_delayed_work(system_wq, dwork, delay);
 }
 EXPORT_SYMBOL(schedule_delayed_work);
 
 int schedule_delayed_work_on(int cpu,
                        struct delayed_work *dwork, unsigned long delay)
 {
-       return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
+       return queue_delayed_work_on(cpu, system_wq, dwork, delay);
 }
 EXPORT_SYMBOL(schedule_delayed_work_on);
 
  */
 void flush_scheduled_work(void)
 {
-       flush_workqueue(keventd_wq);
+       flush_workqueue(system_wq);
 }
 EXPORT_SYMBOL(flush_scheduled_work);
 
 
 int keventd_up(void)
 {
-       return keventd_wq != NULL;
+       return system_wq != NULL;
 }
 
 static struct cpu_workqueue_struct *alloc_cwqs(void)
        return clamp_val(max_active, 1, WQ_MAX_ACTIVE);
 }
 
-struct workqueue_struct *__create_workqueue_key(const char *name,
-                                               unsigned int flags,
-                                               int max_active,
-                                               struct lock_class_key *key,
-                                               const char *lock_name)
+struct workqueue_struct *__alloc_workqueue_key(const char *name,
+                                              unsigned int flags,
+                                              int max_active,
+                                              struct lock_class_key *key,
+                                              const char *lock_name)
 {
        struct workqueue_struct *wq;
        unsigned int cpu;
 
+       max_active = max_active ?: WQ_DFL_ACTIVE;
        max_active = wq_clamp_max_active(max_active, name);
 
        wq = kzalloc(sizeof(*wq), GFP_KERNEL);
        }
        return NULL;
 }
-EXPORT_SYMBOL_GPL(__create_workqueue_key);
+EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
 
 /**
  * destroy_workqueue - safely terminate a workqueue
                        continue;
 
                debug_work_activate(rebind_work);
-               insert_work(get_cwq(gcwq->cpu, keventd_wq), rebind_work,
+               insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
                            worker->scheduled.next,
                            work_color_to_flags(WORK_NO_COLOR));
        }
                spin_unlock_irq(&gcwq->lock);
        }
 
-       keventd_wq = __create_workqueue("events", 0, WQ_DFL_ACTIVE);
-       BUG_ON(!keventd_wq);
+       system_wq = alloc_workqueue("events", 0, 0);
+       system_long_wq = alloc_workqueue("events_long", 0, 0);
+       system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
+       BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq);
 }