}
/* Update vma->vm_page_prot to reflect vma->vm_flags. */
-// LRH: Needed
void vma_set_page_prot(struct vm_area_struct *vma)
{
unsigned long vm_flags = vma->vm_flags;
/*
* Requires inode->i_mapping->i_mmap_rwsem
*/
-// LRH: Needed
static void __remove_shared_vm_struct(struct vm_area_struct *vma,
struct file *file, struct address_space *mapping)
{
* Unlink a file-based vm structure from its interval tree, to hide
* vma from rmap and vmtruncate before freeing its page tables.
*/
-// LRH: Needed
void unlink_file_vma(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
/*
* Close a vm structure and free it, returning the next.
*/
-// LRH: Needed
static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
{
struct vm_area_struct *next = vma->vm_next;
return next;
}
-// LRH: Needed
static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags,
struct list_head *uf);
SYSCALL_DEFINE1(brk, unsigned long, brk)
while (vma) {
struct anon_vma *anon_vma = vma->anon_vma;
-// struct anon_vma_chain *avc;
+ struct anon_vma_chain *avc;
-// pr_cont("vma: %lu-%lu", vma->vm_start, vma->vm_end);
if (anon_vma) {
-// pr_cont(" anon");
anon_vma_lock_read(anon_vma);
-// list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
-// anon_vma_interval_tree_verify(avc);
+ list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
+ anon_vma_interval_tree_verify(avc);
anon_vma_unlock_read(anon_vma);
}
-// pr_cont("\n");
highest_address = vm_end_gap(vma);
vma = vma->vm_next;
}
/*
- * munmap_vma_range() - munmap VMAs that overlap a range.
+ * munmap_vma_range() - munmap VMAs that overlap the range.
* @mm: The mm struct
* @start: The start of the range.
* @len: The length of the range.
* @pprev: pointer to the pointer that will be set to previous vm_area_struct
*
+ * Find all the vm_area_struct that overlap from @start to
+ * @end and munmap them. Set @pprev to the previous vm_area_struct.
+ *
* Returns: -ENOMEM on munmap failure or 0 on success.
*/
static inline int
* Helper for vma_adjust() in the split_vma insert case: insert a vma into the
* mm's list and the mm tree. It has already been inserted into the interval tree.
*/
-extern void mt_dump(const struct maple_tree *mt);
static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
{
struct vm_area_struct *prev;
* us to insert it before dropping the locks
* (it may either follow vma or precede it).
*/
- /* maple tree store is done in the __vma_link call in this
- * call graph */
__insert_vm_struct(mm, insert);
} else if (end_changed && !next) {
mm->highest_vm_end = vm_end_gap(vma);
* parameter) may establish ptes with the wrong permissions of NNNN
* instead of the right permissions of XXXX.
*/
-
-//LRH:
-// p = prev, n = next, a = add, nn = next next
-// 0. Adding page over partial p, cannot merge
-// 1. Adding page between p and n, all become p
-// 2. Adding page between p and n, a merges with p
-// 3. Adding page between p and n, a merges with n
-// 4. Adding page over p, a merges with n
-// 5. Adding page over n, a merges with p
-// 6. Adding page over all of n, p-a-nn all become p
-// 7. Adding page over all of n, p-a all become p
-// 8. Adding page over all of n, a-nn all become nn.
struct vm_area_struct *vma_merge(struct mm_struct *mm,
struct vm_area_struct *prev, unsigned long addr,
unsigned long end, unsigned long vm_flags,
can_vma_merge_before(next, vm_flags,
anon_vma, file, pgoff+pglen,
vm_userfaultfd_ctx)) {
- if (prev && addr < prev->vm_end){ /* case 4 */
+ if (prev && addr < prev->vm_end) /* case 4 */
err = __vma_adjust(prev, prev->vm_start,
addr, prev->vm_pgoff, NULL, next);
- }else { /* cases 3, 8 */
+ else { /* cases 3, 8 */
err = __vma_adjust(area, addr, next->vm_end,
next->vm_pgoff - pglen, NULL, next);
/*
/* Clear old maps, set up prev and uf */
if (munmap_vma_range(mm, addr, len, &prev, uf))
return -ENOMEM;
-
/*
* Private writable mapping: check memory availability
*/
EXPORT_SYMBOL(get_unmapped_area);
-/*
- * find_vma() - Find the best VMA for a given address.
+/**
+ * find_vma() - Find the VMA for a given address, or the next vma. May return
+ * NULL in the case of no vma at addr or above
* @mm The mm_struct to check
* @addr: The address
*
- * Searches the user memory map for the VMA which either contains this address
- * or is the next VMA after this address.
- *
- * Returns: The VMA or %NULL if the address is higher than any mapping.
+ * Returns: The VMA assoicated with addr, or the next vma.
+ * May return NULL in the case of no vma at addr or above.
*/
struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
{
}
EXPORT_SYMBOL(find_vma);
-/*
- * find_vma_prev() - Find the best VMA for a given address and the previous VMA.
+/**
+ * find_vma_prev() - Find the VMA for a given address, or the next vma and
+ * sets %pprev to the previous VMA, if any.
* @mm The mm_struct to check
* @addr: The address
* @pprev: The pointer to set to the previous VMA
*
- * Searches the user memory map for the VMA which either contains this address
- * or is the next VMA after this address. Sets %pprev to the previous VMA or
- * NULL if this is the first VMA.
- *
- * Returns: The VMA or %NULL if the address is higher than any mapping.
+ * Returns: The VMA associated with @addr, or the next vma.
+ * May return NULL in the case of no vma at addr or above.
*/
struct vm_area_struct *
find_vma_prev(struct mm_struct *mm, unsigned long addr,
anon_vma_interval_tree_post_update_vma(vma);
spin_unlock(&mm->page_table_lock);
- error = acct_stack_growth(vma, size, grow);
- if (error)
- goto no_update;
- /*
- * vma_gap_update() doesn't support concurrent
- * updates, but we only hold a shared mmap_sem
- * lock here, so we need to protect against
- * concurrent vma expansions.
- * anon_vma_lock_write() doesn't help here, as
- * we don't guarantee that all growable vmas
- * in a mm share the same root anon vma.
- * So, we reuse mm->page_table_lock to guard
- * against concurrent vma expansions.
- */
- spin_lock(&mm->page_table_lock);
- if (vma->vm_flags & VM_LOCKED)
- mm->locked_vm += grow;
- vm_stat_account(mm, vma->vm_flags, grow);
- anon_vma_interval_tree_pre_update_vma(vma);
- vma->vm_start = address;
- vma->vm_pgoff -= grow;
- // Overwrite old entry in mtree.
- __vma_mt_store(mm, vma);
- anon_vma_interval_tree_post_update_vma(vma);
- vma_gap_update(vma);
- spin_unlock(&mm->page_table_lock);
-
- perf_event_mmap(vma);
+ perf_event_mmap(vma);
+ }
+ }
}
-no_update:
anon_vma_unlock_write(vma->anon_vma);
khugepaged_enter_vma_merge(vma, vma->vm_flags);
validate_mm(mm);
vma = remove_vma(vma);
} while (vma);
vm_unacct_memory(nr_accounted);
- //validate_mm(mm);
+ validate_mm(mm);
}
/*