]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: take placement mappings gap into account
authorRick Edgecombe <rick.p.edgecombe@intel.com>
Tue, 26 Mar 2024 02:16:53 +0000 (19:16 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Apr 2024 03:56:28 +0000 (20:56 -0700)
When memory is being placed, mmap() will take care to respect the guard
gaps of certain types of memory (VM_SHADOWSTACK, VM_GROWSUP and
VM_GROWSDOWN).  In order to ensure guard gaps between mappings, mmap()
needs to consider two things:

 1. That the new mapping isn't placed in an any existing mappings guard
    gaps.
 2. That the new mapping isn't placed such that any existing mappings
    are not in *its* guard gaps.

The longstanding behavior of mmap() is to ensure 1, but not take any care
around 2.  So for example, if there is a PAGE_SIZE free area, and a mmap()
with a PAGE_SIZE size, and a type that has a guard gap is being placed,
mmap() may place the shadow stack in the PAGE_SIZE free area.  Then the
mapping that is supposed to have a guard gap will not have a gap to the
adjacent VMA.

For MAP_GROWSDOWN/VM_GROWSDOWN and MAP_GROWSUP/VM_GROWSUP this has not
been a problem in practice because applications place these kinds of
mappings very early, when there is not many mappings to find a space
between.  But for shadow stacks, they may be placed throughout the
lifetime of the application.

Use the start_gap field to find a space that includes the guard gap for
the new mapping.  Take care to not interfere with the alignment.

Link: https://lkml.kernel.org/r/20240326021656.202649-12-rick.p.edgecombe@intel.com
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Aneesh Kumar K.V <aneesh.kumar@kernel.org>
Cc: Borislav Petkov (AMD) <bp@alien8.de>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Deepak Gupta <debug@rivosinc.com>
Cc: Guo Ren <guoren@kernel.org>
Cc: Helge Deller <deller@gmx.de>
Cc: H. Peter Anvin (Intel) <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
mm/mmap.c

index 1b6903f4c57b45fa32a810f71795fe691151e221..2d5e492ef57ffb8c5e78e205c0fc0447cf65627d 100644 (file)
@@ -3468,6 +3468,7 @@ struct vm_unmapped_area_info {
        unsigned long high_limit;
        unsigned long align_mask;
        unsigned long align_offset;
+       unsigned long start_gap;
 };
 
 extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);
index 4ad386f3f63fa57f403a98485ee2000d30a0ba7b..d3c2ca8efa534c4d6b50f9dff50dd7e254005f34 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1579,7 +1579,7 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
        VMA_ITERATOR(vmi, current->mm, 0);
 
        /* Adjust search length to account for worst case alignment overhead */
-       length = info->length + info->align_mask;
+       length = info->length + info->align_mask + info->start_gap;
        if (length < info->length)
                return -ENOMEM;
 
@@ -1591,7 +1591,13 @@ retry:
        if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length))
                return -ENOMEM;
 
-       gap = vma_iter_addr(&vmi);
+       /*
+        * Adjust for the gap first so it doesn't interfere with the
+        * later alignment. The first step is the minimum needed to
+        * fulill the start gap, the next steps is the minimum to align
+        * that. It is the minimum needed to fulill both.
+        */
+       gap = vma_iter_addr(&vmi) + info->start_gap;
        gap += (info->align_offset - gap) & info->align_mask;
        tmp = vma_next(&vmi);
        if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
@@ -1630,7 +1636,7 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
        VMA_ITERATOR(vmi, current->mm, 0);
 
        /* Adjust search length to account for worst case alignment overhead */
-       length = info->length + info->align_mask;
+       length = info->length + info->align_mask + info->start_gap;
        if (length < info->length)
                return -ENOMEM;