for_each_online_cpu(cpu) {
                struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
 
-               if (force_all_cpus ||
-                   pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
+               if (pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
                    data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) ||
                    pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
                    pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
 void lru_cache_disable(void)
 {
        atomic_inc(&lru_disable_count);
-#ifdef CONFIG_SMP
        /*
-        * lru_add_drain_all in the force mode will schedule draining on
-        * all online CPUs so any calls of lru_cache_disabled wrapped by
-        * local_lock or preemption disabled would be ordered by that.
-        * The atomic operation doesn't need to have stronger ordering
-        * requirements because that is enforced by the scheduling
-        * guarantees.
+        * Readers of lru_disable_count are protected by either disabling
+        * preemption or rcu_read_lock:
+        *
+        * preempt_disable, local_irq_disable  [bh_lru_lock()]
+        * rcu_read_lock                       [rt_spin_lock CONFIG_PREEMPT_RT]
+        * preempt_disable                     [local_lock !CONFIG_PREEMPT_RT]
+        *
+        * Since v5.1 kernel, synchronize_rcu() is guaranteed to wait on
+        * preempt_disable() regions of code. So any CPU which sees
+        * lru_disable_count = 0 will have exited the critical
+        * section when synchronize_rcu() returns.
         */
+       synchronize_rcu();
+#ifdef CONFIG_SMP
        __lru_add_drain_all(true);
 #else
        lru_add_and_bh_lrus_drain();