struct vm_area_struct *vma;
struct file *exe_file;
struct mm_struct *mm = spu->mm;
- MA_STATE(mas, &mm->mm_mt, spu_ref, spu_ref);
if (!mm)
goto out;
}
mmap_read_lock(mm);
- vma = mas_walk(&mas);
+ vma = find_vma_intersection(mm, spu_ref, spu_ref + 1);
if (!vma)
goto fail_no_image_cookie;
static unsigned long
lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
{
- unsigned long cookie = NO_COOKIE;
+ unsigned long cookie = INVALID_COOKIE;
struct vm_area_struct *vma;
mmap_read_lock(mm);
} else {
/* must be an anonymous map */
*offset = addr;
+ cookie = NO_COOKIE;
}
- } else
- cookie = INVALID_COOKIE;
+ }
mmap_read_unlock(mm);
extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
unsigned long pgoff, unsigned long *populate, struct list_head *uf);
-extern int do_mas_munmap(struct ma_state *mas,struct mm_struct *mm,
+extern int do_mas_munmap(struct ma_state *mas, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf, bool downgrade);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
/* Information about our backing store: */
unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
- units */
+ * units
+ */
struct file * vm_file; /* File we map to (can be NULL). */
/*
* For areas with an address space and backing store,
__entry->low_limit, __entry->high_limit, __entry->align_mask,
__entry->align_offset)
);
-TRACE_EVENT(vma_mt_erase,
-
- TP_PROTO(struct mm_struct *mm, struct vm_area_struct *vma),
-
- TP_ARGS(mm, vma),
-
- TP_STRUCT__entry(
- __field(struct mm_struct *, mm)
- __field(struct vm_area_struct *, vma)
- __field(unsigned long, vm_start)
- __field(unsigned long, vm_end)
- ),
-
- TP_fast_assign(
- __entry->mm = mm;
- __entry->vma = vma;
- __entry->vm_start = vma->vm_start;
- __entry->vm_end = vma->vm_end - 1;
- ),
-
- TP_printk("mt_mod %px, (%px), ERASE, %lu, %lu,",
- __entry->mm, __entry->vma,
- (unsigned long) __entry->vm_start,
- (unsigned long) __entry->vm_end
- )
-);
TRACE_EVENT(vma_mt_szero,
TP_PROTO(struct mm_struct *mm, unsigned long start,
}
/*
- * vma_mt_szero() - Clear a given range from the maple tree.
+ * vma_mt_szero() - Set a given range to zero. Used when modifying a
+ * vm_area_struct start or end.
+ *
* @mm: The struct_mm
* @start: The start address to zero
* @end: The end address to zero.
- *
- * Used when modifying a vm_area_struct start or end and there is no maple state
- * pointing to the correct location.
- * Note: the end address is inclusive in the maple tree.
*/
static inline void vma_mt_szero(struct mm_struct *mm, unsigned long start,
unsigned long end)
trace_vma_mt_szero(mm, start, end);
mtree_store_range(&mm->mm_mt, start, end - 1, NULL, GFP_KERNEL);
}
+
/*
* vma_mt_store() - Store a given vm_area_struct in the maple tree.
+ *
* @mm: The struct_mm
* @vma: The vm_area_struct to store in the maple tree.
- *
- * Used when there is no maple state pointing to the correct location.
- * Note: the end address is inclusive in the maple tree.
*/
static inline void vma_mt_store(struct mm_struct *mm, struct vm_area_struct *vma)
{
mm->map_count++;
validate_mm(mm);
}
+
static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
{
MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_end - 1);
max);
tlb_finish_mmu(&tlb, start, end);
}
-
/*
* __split_vma() bypasses sysctl_max_map_count checking. We use this where it
* has already been checked or doesn't make sense to fail.
if (likely(vma->vm_start >= newbrk)) { // remove entire mapping(s)
mas->last = oldbrk - 1;
- ret = do_mas_align_munmap(mas, vma, mm, newbrk, oldbrk, uf, true);
+ ret = do_mas_align_munmap(mas, vma, mm, newbrk, oldbrk, uf,
+ true);
goto munmap_full_vma;
}
prev = NULL;
if (rb_prev)
prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
-
- __vma_link_list(mm, vma, prev);
}
/*
/* remove from the MM's tree and list */
rb_erase(&vma->vm_rb, &mm->mm_rb);
-
- __vma_unlink_list(mm, vma);
}
/*