dma_resv_lock_slow(&obj, &ctx);
fs_reclaim_acquire(GFP_KERNEL);
/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
- i_mmap_lock_write(&mapping);
- i_mmap_unlock_write(&mapping);
+// i_mmap_lock_write(&mapping);
+// i_mmap_unlock_write(&mapping);
#ifdef CONFIG_MMU_NOTIFIER
lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
__dma_fence_might_wait();
static inline
struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
{
- return mas_find(&vmi->mas, max - 1);
+ struct vm_area_struct *vma;
+
+ rcu_read_lock();
+ vma = mas_find(&vmi->mas, max - 1);
+ rcu_read_unlock();
+ return vma;
}
static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
{
+ struct vm_area_struct *vma;
/*
* Uses mas_find() to get the first VMA when the iterator starts.
* Calling mas_next() could skip the first entry.
*/
- return mas_find(&vmi->mas, ULONG_MAX);
+ rcu_read_lock();
+ vma = mas_find(&vmi->mas, ULONG_MAX);
+ rcu_read_unlock();
+ return vma;
}
static inline
static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
{
- return mas_prev(&vmi->mas, 0);
+ struct vm_area_struct *vma;
+
+ rcu_read_lock();
+ vma = mas_prev(&vmi->mas, 0);
+ rcu_read_unlock();
+ return vma;
}
static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
unsigned long start, unsigned long end, gfp_t gfp)
{
+ mas_lock(&vmi->mas);
__mas_set_range(&vmi->mas, start, end - 1);
mas_store_gfp(&vmi->mas, NULL, gfp);
+ mas_unlock(&vmi->mas);
if (unlikely(mas_is_err(&vmi->mas)))
return -ENOMEM;
if (unlikely(mas_is_span_wr(wr_mas)))
return false;
- wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
- mas->offset);
+ wr_mas->content = mas_slot(mas, wr_mas->slots, mas->offset);
if (ma_is_leaf(wr_mas->type))
return true;
{
MA_WR_STATE(wr_mas, mas, entry);
+ rcu_read_lock();
mas_wr_prealloc_setup(&wr_mas);
mas->store_type = mas_wr_store_type(&wr_mas);
mas_prealloc_calc(&wr_mas, entry);
+ rcu_read_unlock();
if (!mas->node_request)
return 0;
mmap_read_lock(mm);
arch_exit_mmap(mm);
+ rcu_read_lock();
vma = vma_next(&vmi);
+ rcu_read_unlock();
if (!vma || unlikely(xa_is_zero(vma))) {
/* Can happen if dup_mmap() received an OOM */
mmap_read_unlock(mm);
goto end_split_failed;
}
vma_start_write(next);
+ mas_lock(mas_detach);
mas_set(mas_detach, vms->vma_count++);
error = mas_store_gfp(mas_detach, next, GFP_KERNEL);
+ mas_unlock(mas_detach);
if (error)
goto munmap_gather_failed;
struct mm_struct *mm, unsigned long start, unsigned long end,
struct list_head *uf, bool unlock)
{
- struct maple_tree mt_detach;
+ struct maple_tree mt_detach =
+ MTREE_INIT(mt_detach, vmi->mas.tree->ma_flags);
MA_STATE(mas_detach, &mt_detach, 0, 0);
- mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
- mt_on_stack(mt_detach);
+// mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
+// mt_on_stack(mt_detach);
struct vma_munmap_struct vms;
int error;
{
int ret;
- mas_lock(&vmi->mas);
ret = mas_preallocate(&vmi->mas, vma, GFP_KERNEL | GFP_NOWAIT);
- mas_unlock(&vmi->mas);
return ret;
}
static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
{
- return mas_walk(&vmi->mas);
+ struct vm_area_struct *vma;
+
+ rcu_read_lock();
+ vma = mas_walk(&vmi->mas);
+ rcu_read_unlock();
+ return vma;
}
/* Store a VMA with preallocated memory */
static inline
struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
{
- return mas_prev_range(&vmi->mas, 0);
+ struct vm_area_struct *vma;
+
+ rcu_read_lock();
+ vma = mas_prev_range(&vmi->mas, 0);
+ rcu_read_unlock();
+ return vma;
}
/*