Since commit 
70e806e4e645 ("mm: Do early cow for pinned pages during
fork() for ptes") pages under a FOLL_PIN will not be write protected
during COW for fork.  This means that pages returned from
pin_user_pages(FOLL_WRITE) should not become write protected while the pin
is active.
However, there is a small race where get_user_pages_fast(FOLL_PIN) can
establish a FOLL_PIN at the same time copy_present_page() is write
protecting it:
        CPU 0                             CPU 1
   get_user_pages_fast()
    internal_get_user_pages_fast()
                                       copy_page_range()
                                         pte_alloc_map_lock()
                                           copy_present_page()
                                             atomic_read(has_pinned) == 0
					     page_maybe_dma_pinned() == false
     atomic_set(has_pinned, 1);
     gup_pgd_range()
      gup_pte_range()
       pte_t pte = gup_get_pte(ptep)
       pte_access_permitted(pte)
       try_grab_compound_head()
                                             pte = pte_wrprotect(pte)
	                                     set_pte_at();
                                         pte_unmap_unlock()
      // GUP now returns with a write protected page
The first attempt to resolve this by using the write protect caused
problems (and was missing a barrrier), see commit 
f3c64eda3e50 ("mm: avoid
early COW write protect games during fork()")
Instead wrap copy_p4d_range() with the write side of a seqcount and check
the read side around gup_pgd_range().  If there is a collision then
get_user_pages_fast() fails and falls back to slow GUP.
Slow GUP is safe against this race because copy_page_range() is only
called while holding the exclusive side of the mmap_lock on the src
mm_struct.
[akpm@linux-foundation.org: coding style fixes]
Link: https://lore.kernel.org/r/CAHk-=wi=iCnYCARbPGjkVJu9eyYeZ13N64tZYLdOB8CP5Q_PLw@mail.gmail.com
Link: https://lkml.kernel.org/r/2-v4-908497cf359a+4782-gup_fork_jgg@nvidia.com
Fixes: f3c64eda3e50 ("mm: avoid early COW write protect games during fork()")
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Reviewed-by: Peter Xu <peterx@redhat.com>
Acked-by: "Ahmed S. Darwish" <a.darwish@linutronix.de>	[seqcount_t parts]
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jann Horn <jannh@google.com>
Cc: Kirill Shutemov <kirill@shutemov.name>
Cc: Kirill Tkhai <ktkhai@virtuozzo.com>
Cc: Leon Romanovsky <leonro@nvidia.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
        .pgd            = swapper_pg_dir,
        .mm_users       = ATOMIC_INIT(2),
        .mm_count       = ATOMIC_INIT(1),
+       .write_protect_seq = SEQCNT_ZERO(tboot_mm.write_protect_seq),
        MMAP_LOCK_INITIALIZER(init_mm)
        .page_table_lock =  __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
        .mmlist         = LIST_HEAD_INIT(init_mm.mmlist),
 
        .mm_rb                  = RB_ROOT,
        .mm_users               = ATOMIC_INIT(2),
        .mm_count               = ATOMIC_INIT(1),
+       .write_protect_seq      = SEQCNT_ZERO(efi_mm.write_protect_seq),
        MMAP_LOCK_INITIALIZER(efi_mm)
        .page_table_lock        = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
        .mmlist                 = LIST_HEAD_INIT(efi_mm.mmlist),
 
 #include <linux/uprobes.h>
 #include <linux/page-flags-layout.h>
 #include <linux/workqueue.h>
+#include <linux/seqlock.h>
 
 #include <asm/mmu.h>
 
                 */
                atomic_t has_pinned;
 
+               /**
+                * @write_protect_seq: Locked when any thread is write
+                * protecting pages mapped by this mm to enforce a later COW,
+                * for instance during page table copying for fork().
+                */
+               seqcount_t write_protect_seq;
+
 #ifdef CONFIG_MMU
                atomic_long_t pgtables_bytes;   /* PTE page table pages */
 #endif
 
        mm->vmacache_seqnum = 0;
        atomic_set(&mm->mm_users, 1);
        atomic_set(&mm->mm_count, 1);
+       seqcount_init(&mm->write_protect_seq);
        mmap_init_lock(mm);
        INIT_LIST_HEAD(&mm->mmlist);
        mm->core_state = NULL;
 
 {
        unsigned long flags;
        int nr_pinned = 0;
+       unsigned seq;
 
        if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) ||
            !gup_fast_permitted(start, end))
                return 0;
 
+       if (gup_flags & FOLL_PIN) {
+               seq = raw_read_seqcount(¤t->mm->write_protect_seq);
+               if (seq & 1)
+                       return 0;
+       }
+
        /*
         * Disable interrupts. The nested form is used, in order to allow full,
         * general purpose use of this routine.
        local_irq_save(flags);
        gup_pgd_range(start, end, gup_flags, pages, &nr_pinned);
        local_irq_restore(flags);
+
+       /*
+        * When pinning pages for DMA there could be a concurrent write protect
+        * from fork() via copy_page_range(), in this case always fail fast GUP.
+        */
+       if (gup_flags & FOLL_PIN) {
+               if (read_seqcount_retry(¤t->mm->write_protect_seq, seq)) {
+                       unpin_user_pages(pages, nr_pinned);
+                       return 0;
+               }
+       }
        return nr_pinned;
 }
 
 
        .pgd            = swapper_pg_dir,
        .mm_users       = ATOMIC_INIT(2),
        .mm_count       = ATOMIC_INIT(1),
+       .write_protect_seq = SEQCNT_ZERO(init_mm.write_protect_seq),
        MMAP_LOCK_INITIALIZER(init_mm)
        .page_table_lock =  __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
        .arg_lock       =  __SPIN_LOCK_UNLOCKED(init_mm.arg_lock),
 
                mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
                                        0, src_vma, src_mm, addr, end);
                mmu_notifier_invalidate_range_start(&range);
+               /*
+                * Disabling preemption is not needed for the write side, as
+                * the read side doesn't spin, but goes to the mmap_lock.
+                *
+                * Use the raw variant of the seqcount_t write API to avoid
+                * lockdep complaining about preemptibility.
+                */
+               mmap_assert_write_locked(src_mm);
+               raw_write_seqcount_begin(&src_mm->write_protect_seq);
        }
 
        ret = 0;
                }
        } while (dst_pgd++, src_pgd++, addr = next, addr != end);
 
-       if (is_cow)
+       if (is_cow) {
+               raw_write_seqcount_end(&src_mm->write_protect_seq);
                mmu_notifier_invalidate_range_end(&range);
+       }
        return ret;
 }