]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
tools/memory-model: Add example for heuristic lockless reads
authorPaul E. McKenney <paulmck@kernel.org>
Thu, 13 May 2021 21:54:58 +0000 (14:54 -0700)
committerPaul E. McKenney <paulmck@kernel.org>
Tue, 25 May 2021 00:13:27 +0000 (17:13 -0700)
This commit adds example code for heuristic lockless reads, based loosely
on the sem_lock() and sem_unlock() functions.

Reported-by: Manfred Spraul <manfred@colorfullife.com>
[ paulmck: Update per Manfred Spraul and Hillf Danton feedback. ]
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
tools/memory-model/Documentation/access-marking.txt

index 58bff26198767e9284202f176c61a5fd26f0fb3b..be7d507997cf8d64b98857695cf40c5807b8ca57 100644 (file)
@@ -319,6 +319,100 @@ of the ASSERT_EXCLUSIVE_WRITER() is to allow KCSAN to check for a buggy
 concurrent lockless write.
 
 
+Lock-Protected Writes With Heuristic Lockless Reads
+---------------------------------------------------
+
+For another example, suppose that the code can normally make use of
+a per-data-structure lock, but there are times when a global lock
+is required.  These times are indicated via a global flag.  The code
+might look as follows, and is based loosely on nf_conntrack_lock(),
+nf_conntrack_all_lock(), and nf_conntrack_all_unlock():
+
+       bool global_flag;
+       DEFINE_SPINLOCK(global_lock);
+       struct foo {
+               spinlock_t f_lock;
+               int f_data;
+       };
+
+       /* All foo structures are in the following array. */
+       int nfoo;
+       struct foo *foo_array;
+
+       void do_something_locked(struct foo *fp)
+       {
+               bool gf = true;
+
+               /* IMPORTANT: Heuristic plus spin_lock()! */
+               if (!data_race(global_flag)) {
+                       spin_lock(&fp->f_lock);
+                       if (!smp_load_acquire(&global_flag)) {
+                               do_something(fp);
+                               spin_unlock(&fp->f_lock);
+                               return;
+                       }
+                       spin_unlock(&fp->f_lock);
+               }
+               spin_lock(&global_lock);
+               /* Lock held, thus global flag cannot change. */
+               if (!global_flag) {
+                       spin_lock(&fp->f_lock);
+                       spin_unlock(&global_lock);
+                       gf = false;
+               }
+               do_something(fp);
+               if (fg)
+                       spin_unlock(&global_lock);
+               else
+                       spin_lock(&fp->f_lock);
+       }
+
+       void begin_global(void)
+       {
+               int i;
+
+               spin_lock(&global_lock);
+               WRITE_ONCE(global_flag, true);
+               for (i = 0; i < nfoo; i++) {
+                       /* Wait for pre-existing local locks. */
+                       spin_lock(&fp->f_lock);
+                       spin_unlock(&fp->f_lock);
+               }
+       }
+
+       void end_global(void)
+       {
+               smp_store_release(&global_flag, false);
+               /* Pre-existing global lock acquisitions will recheck. */
+               spin_unlock(&global_lock);
+       }
+
+All code paths leading from the do_something_locked() function's first
+read from global_flag acquire a lock, so endless load fusing cannot
+happen.
+
+If the value read from global_flag is true, then global_flag is rechecked
+while holding global_lock, which prevents global_flag from changing.
+If this recheck finds that global_flag is now false, the acquisition
+of ->f_lock prior to the release of global_lock will result in any subsequent
+begin_global() invocation waiting to acquire ->f_lock.
+
+On the other hand, if the value read from global_flag is false, then
+global_flag, then rechecking under ->f_lock combined with synchronization
+with begin_global() guarantees than any erroneous read will cause the
+do_something_locked() function's first do_something() invocation to happen
+before begin_global() returns.  The combination of the smp_load_acquire()
+in do_something_locked() and the smp_store_release() in end_global()
+guarantees that either the do_something_locked() function's first
+do_something() invocation happens after the call to end_global() or that
+do_something_locked() acquires global_lock() and rechecks under the lock.
+
+For this to work, only those foo structures in foo_array[] may be
+passed to do_something_locked().  The reason for this is that the
+synchronization with begin_global() relies on momentarily locking each
+and every foo structure.
+
+
 Lockless Reads and Writes
 -------------------------