]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
Merge tag 'random-6.11-rc1-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 24 Jul 2024 17:29:50 +0000 (10:29 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 24 Jul 2024 17:29:50 +0000 (10:29 -0700)
Pull random number generator updates from Jason Donenfeld:
 "This adds getrandom() support to the vDSO.

  First, it adds a new kind of mapping to mmap(2), MAP_DROPPABLE, which
  lets the kernel zero out pages anytime under memory pressure, which
  enables allocating memory that never gets swapped to disk but also
  doesn't count as being mlocked.

  Then, the vDSO implementation of getrandom() is introduced in a
  generic manner and hooked into random.c.

  Next, this is implemented on x86. (Also, though it's not ready for
  this pull, somebody has begun an arm64 implementation already)

  Finally, two vDSO selftests are added.

  There are also two housekeeping cleanup commits"

* tag 'random-6.11-rc1-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random:
  MAINTAINERS: add random.h headers to RNG subsection
  random: note that RNDGETPOOL was removed in 2.6.9-rc2
  selftests/vDSO: add tests for vgetrandom
  x86: vdso: Wire up getrandom() vDSO implementation
  random: introduce generic vDSO getrandom() implementation
  mm: add MAP_DROPPABLE for designating always lazily freeable mappings

16 files changed:
1  2 
MAINTAINERS
arch/x86/Kconfig
arch/x86/include/asm/vdso/vsyscall.h
fs/proc/task_mmu.c
include/linux/mm.h
include/vdso/datapage.h
mm/ksm.c
mm/madvise.c
mm/memory.c
mm/mempolicy.c
mm/mlock.c
mm/mmap.c
mm/rmap.c
mm/vmscan.c
tools/testing/selftests/mm/.gitignore
tools/testing/selftests/mm/Makefile

diff --cc MAINTAINERS
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc mm/ksm.c
Simple merge
diff --cc mm/madvise.c
Simple merge
diff --cc mm/memory.c
Simple merge
diff --cc mm/mempolicy.c
index 327a19b0883dbb8ba7b4fc19a0ea33033e4d336e,32291ab2596096a70875ae3b8127befcd7ed4473..b858e22b259d8d2698d91de34128f8fefec9c887
@@@ -2303,12 -2298,16 +2303,15 @@@ struct folio *vma_alloc_folio_noprof(gf
  {
        struct mempolicy *pol;
        pgoff_t ilx;
 -      struct page *page;
 +      struct folio *folio;
  
+       if (vma->vm_flags & VM_DROPPABLE)
+               gfp |= __GFP_NOWARN;
        pol = get_vma_policy(vma, addr, order, &ilx);
 -      page = alloc_pages_mpol_noprof(gfp | __GFP_COMP, order,
 -                                     pol, ilx, numa_node_id());
 +      folio = folio_alloc_mpol_noprof(gfp, order, pol, ilx, numa_node_id());
        mpol_cond_put(pol);
 -      return page_rmappable_folio(page);
 +      return folio;
  }
  EXPORT_SYMBOL(vma_alloc_folio_noprof);
  
diff --cc mm/mlock.c
Simple merge
diff --cc mm/mmap.c
Simple merge
diff --cc mm/rmap.c
index 8616308610b9fb64b1465395b75417163ec189c2,1f9b5a9cb121cfaecb628bacb1182acc43f88cc9..2490e727e2dcbc1438a430291977b36bea7887c8
+++ b/mm/rmap.c
@@@ -1394,27 -1384,26 +1394,31 @@@ void folio_add_anon_rmap_pmd(struct fol
   *
   * Like folio_add_anon_rmap_*() but must only be called on *new* folios.
   * This means the inc-and-test can be bypassed.
 - * The folio does not have to be locked.
 + * The folio doesn't necessarily need to be locked while it's exclusive
 + * unless two threads map it concurrently. However, the folio must be
 + * locked if it's shared.
   *
 - * If the folio is pmd-mappable, it is accounted as a THP.  As the folio
 - * is new, it's assumed to be mapped exclusively by a single process.
 + * If the folio is pmd-mappable, it is accounted as a THP.
   */
  void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
 -              unsigned long address)
 +              unsigned long address, rmap_t flags)
  {
 -      int nr = folio_nr_pages(folio);
 +      const int nr = folio_nr_pages(folio);
 +      const bool exclusive = flags & RMAP_EXCLUSIVE;
 +      int nr_pmdmapped = 0;
  
        VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
 +      VM_WARN_ON_FOLIO(!exclusive && !folio_test_locked(folio), folio);
        VM_BUG_ON_VMA(address < vma->vm_start ||
                        address + (nr << PAGE_SHIFT) > vma->vm_end, vma);
-       if (!folio_test_swapbacked(folio))
 +
 -      if (!(vma->vm_flags & VM_DROPPABLE))
+       /*
+        * VM_DROPPABLE mappings don't swap; instead they're just dropped when
+        * under memory pressure.
+        */
++      if (!folio_test_swapbacked(folio) && !(vma->vm_flags & VM_DROPPABLE))
                __folio_set_swapbacked(folio);
 -      __folio_set_anon(folio, vma, address, true);
 +      __folio_set_anon(folio, vma, address, exclusive);
  
        if (likely(!folio_test_large(folio))) {
                /* increment count (starts at -1) */
@@@ -1858,8 -1862,15 +1868,13 @@@ static bool try_to_unmap_one(struct fol
                                 * discarded. Remap the page to page table.
                                 */
                                set_pte_at(mm, address, pvmw.pte, pteval);
-                               folio_set_swapbacked(folio);
+                               /*
+                                * Unlike MADV_FREE mappings, VM_DROPPABLE ones
+                                * never get swap backed on failure to drop.
+                                */
+                               if (!(vma->vm_flags & VM_DROPPABLE))
+                                       folio_set_swapbacked(folio);
 -                              ret = false;
 -                              page_vma_mapped_walk_done(&pvmw);
 -                              break;
 +                              goto walk_abort;
                        }
  
                        if (swap_duplicate(entry) < 0) {
diff --cc mm/vmscan.c
Simple merge
Simple merge
index e1aa09ddaa3daa820d5d8ff3a826fe0ec7b5c952,e3e5740e13e1514194259c6bc5c77c288718c9a2..901e0d07765b6f71f288d731741487b6f99907ff
@@@ -75,7 -73,7 +75,8 @@@ TEST_GEN_FILES += ksm_functional_test
  TEST_GEN_FILES += mdwe_test
  TEST_GEN_FILES += hugetlb_fault_after_madv
  TEST_GEN_FILES += hugetlb_madv_vs_map
 +TEST_GEN_FILES += hugetlb_dio
+ TEST_GEN_FILES += droppable
  
  ifneq ($(ARCH),arm64)
  TEST_GEN_FILES += soft-dirty