gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
gate_vma.vm_start = 0xffff0000;
gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
- gate_vma.vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
+ init_vm_flags(&gate_vma, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC);
return 0;
}
arch_initcall(gate_vma_init);
vma_set_anonymous(vma);
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
vma->vm_end = vma->vm_start + PAGE_SIZE;
- vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
+ init_vm_flags(vma, VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
mmap_write_lock(current->mm);
if (insert_vm_struct(current->mm, vma)) {
vma_set_anonymous(vma);
vma->vm_end = PAGE_SIZE;
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
- vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
- VM_DONTEXPAND | VM_DONTDUMP;
+ init_vm_flags(vma, VM_READ | VM_MAYREAD | VM_IO |
+ VM_DONTEXPAND | VM_DONTDUMP);
mmap_write_lock(current->mm);
if (insert_vm_struct(current->mm, vma)) {
mmap_write_unlock(current->mm);
vma_init(&gate_vma, NULL);
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
- gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+ init_vm_flags(&gate_vma, VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC);
gate_vma.vm_page_prot = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX);
return 0;
struct vm_area_struct vma;
vma.vm_mm = tlb->mm;
- vma.vm_flags = 0;
+ init_vm_flags(&vma, 0);
if (tlb->fullmm) {
flush_tlb_mm(tlb->mm);
return;
return -EINVAL;
}
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
/*
* VM_NOHUGEPAGE and split them.
*/
for_each_vma_range(vmi, vma, addr + len) {
- vma->vm_flags |= VM_NOHUGEPAGE;
+ set_vm_flags(vma->flags, VM_NOHUGEPAGE);
walk_page_vma(vma, &subpage_walk_ops, NULL);
}
}
pfn = paste_addr >> PAGE_SHIFT;
/* flags, page_prot from cxl_mmap(), except we want cachable */
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma->flags, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
prot = __pgprot(pgprot_val(vma->vm_page_prot) | _PAGE_DIRTY);
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
vma->vm_ops = &spufs_mem_mmap_vmops;
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_cntl_mmap_vmops;
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_signal1_mmap_vmops;
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_signal2_mmap_vmops;
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_mss_mmap_vmops;
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_psmap_mmap_vmops;
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_mfc_mmap_vmops;
VMA_ITERATOR(vmi, mm, 0);
for_each_vma(vmi, vma) {
- vma->vm_flags &= ~VM_HUGEPAGE;
- vma->vm_flags |= VM_NOHUGEPAGE;
+ mod_vm_flags(vma, VM_NOHUGEPAGE, VM_HUGEPAGE);
walk_page_vma(vma, &thp_split_walk_ops, NULL);
}
mm->def_flags |= VM_NOHUGEPAGE;
}
if (vsyscall_mode == XONLY)
- gate_vma.vm_flags = VM_EXEC;
+ init_vm_flags(&gate_vma, VM_EXEC);
BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
(unsigned long)VSYSCALL_ADDR);
return ret;
vma->vm_ops = &sgx_vm_ops;
- vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+ set_vm_flags(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
vma->vm_private_data = encl;
return 0;
vma->vm_ops = &sgx_vepc_vm_ops;
/* Don't copy VMA in fork() */
- vma->vm_flags |= VM_PFNMAP | VM_IO | VM_DONTDUMP | VM_DONTCOPY;
+ set_vm_flags(vma, VM_PFNMAP | VM_IO | VM_DONTDUMP | VM_DONTCOPY);
vma->vm_private_data = vepc;
return 0;
ret = reserve_pfn_range(paddr, size, prot, 0);
if (ret == 0 && vma)
- vma->vm_flags |= VM_PAT;
+ set_vm_flags(vma, VM_PAT);
return ret;
}
}
free_pfn_range(paddr, size);
if (vma)
- vma->vm_flags &= ~VM_PAT;
+ clear_vm_flags(vma, VM_PAT);
}
/*
*/
void untrack_pfn_moved(struct vm_area_struct *vma)
{
- vma->vm_flags &= ~VM_PAT;
+ clear_vm_flags(vma, VM_PAT);
}
pgprot_t pgprot_writecombine(pgprot_t prot)
vma_init(&gate_vma, NULL);
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
- gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+ init_vm_flags(&gate_vma, VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC);
gate_vma.vm_page_prot = PAGE_READONLY;
return 0;
return -EROFS;
/* changing from read to write with mprotect is not allowed */
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
pfrt_log_dev = to_pfrt_log_dev(file);
proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
return -EPERM;
}
- vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
- vma->vm_flags &= ~VM_MAYWRITE;
+ mod_vm_flags(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
refcount_set(&vdata->refcnt, 1);
vma->vm_private_data = vdata;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
if (vdata->type == MSPEC_UNCACHED)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &mspec_vm_ops;
return -EINVAL;
}
- vma->vm_flags |= VM_IO;
+ set_vm_flags(vma, VM_IO);
return remap_pfn_range(vma, vma->vm_start,
phys_base >> PAGE_SHIFT,
return rc;
vma->vm_ops = &dax_vm_ops;
- vma->vm_flags |= VM_HUGEPAGE;
+ set_vm_flags(vma, VM_HUGEPAGE);
return 0;
}
if (rc < 0)
return rc;
- vma->vm_flags |= VM_DONTCOPY;
+ set_vm_flags(vma, VM_DONTCOPY);
pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
*/
if (is_cow_mapping(vma->vm_flags) &&
!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
return drm_gem_ttm_mmap(obj, vma);
}
address = dev->adev->rmmio_remap.bus_addr;
- vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
- VM_DONTDUMP | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
+ VM_DONTDUMP | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
address = kfd_get_process_doorbells(pdd);
if (!address)
return -ENOMEM;
- vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
- VM_DONTDUMP | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
+ VM_DONTDUMP | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pfn = __pa(page->kernel_address);
pfn >>= PAGE_SHIFT;
- vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
- | VM_DONTDUMP | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
+ | VM_DONTDUMP | VM_PFNMAP);
pr_debug("Mapping signal page\n");
pr_debug(" start user address == 0x%08lx\n", vma->vm_start);
return -ENOMEM;
}
- vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
- | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND
+ | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
/* Mapping pages to user process */
return remap_pfn_range(vma, vma->vm_start,
PFN_DOWN(__pa(qpd->cwsr_kaddr)),
goto err_drm_gem_object_put;
}
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
}
* the whole buffer.
*/
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_DONTEXPAND;
+ mod_vm_flags(vma, VM_DONTEXPAND, VM_PFNMAP);
if (dma_obj->map_noncoherent) {
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
return ret;
}
- vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
if (shmem->map_wc)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
if (!capable(CAP_SYS_ADMIN) &&
(dma->flags & _DRM_DMA_USE_PCI_RO)) {
- vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+ clear_vm_flags(vma, VM_WRITE | VM_MAYWRITE);
#if defined(__i386__) || defined(__x86_64__)
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
#else
vma->vm_ops = &drm_vm_dma_ops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
drm_vm_open_locked(dev, vma);
return 0;
return -EINVAL;
if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
- vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+ clear_vm_flags(vma, VM_WRITE | VM_MAYWRITE);
#if defined(__i386__) || defined(__x86_64__)
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
#else
default:
return -EINVAL; /* This should never happen. */
}
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
drm_vm_open_locked(dev, vma);
return 0;
{
pgprot_t vm_page_prot;
- vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP);
vm_page_prot = vm_get_page_prot(vma->vm_flags);
unsigned long vm_size;
int ret;
- vma->vm_flags &= ~VM_PFNMAP;
+ clear_vm_flags(vma, VM_PFNMAP);
vma->vm_pgoff = 0;
vm_size = vma->vm_end - vma->vm_start;
if (obj->import_attach)
return dma_buf_mmap(obj->dma_buf, vma, 0);
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
exynos_gem->flags);
*/
vma->vm_ops = &psbfb_vm_ops;
vma->vm_private_data = (void *)fb;
- vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP);
return 0;
}
buf = dev_priv->mmap_buffer;
buf_priv = buf->dev_private;
- vma->vm_flags |= VM_DONTCOPY;
+ set_vm_flags(vma, VM_DONTCOPY);
buf_priv->currently_mapped = I810_BUF_MAPPED;
i915_gem_object_put(obj);
return -EINVAL;
}
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
}
anon = mmap_singleton(to_i915(dev));
return PTR_ERR(anon);
}
- vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+ set_vm_flags(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
/*
* We keep the ref on mmo->obj, not vm_file, but we require
* dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
*/
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
return 0;
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP;
+ mod_vm_flags(vma, VM_MIXEDMAP, VM_PFNMAP);
if (omap_obj->flags & OMAP_BO_WC) {
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
* We allocated a struct page table for rk_obj, so clear
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
*/
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_flags &= ~VM_PFNMAP;
+ mod_vm_flags(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP, VM_PFNMAP);
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
* and set the vm_pgoff (used as a fake buffer offset by DRM)
* to 0 as we want to map the whole buffer.
*/
- vma->vm_flags &= ~VM_PFNMAP;
+ clear_vm_flags(vma, VM_PFNMAP);
vma->vm_pgoff = 0;
err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
} else {
pgprot_t prot = vm_get_page_prot(vma->vm_flags);
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_flags &= ~VM_PFNMAP;
+ mod_vm_flags(vma, VM_MIXEDMAP, VM_PFNMAP);
vma->vm_page_prot = pgprot_writecombine(prot);
}
vma->vm_private_data = bo;
- vma->vm_flags |= VM_PFNMAP;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
return 0;
}
EXPORT_SYMBOL(ttm_bo_mmap_obj);
return -EINVAL;
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
- vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+ set_vm_flags(vma, VM_MIXEDMAP | VM_DONTEXPAND);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
vma->vm_ops = &virtio_gpu_vram_vm_ops;
/* Use VM_PFNMAP rather than VM_MIXEDMAP if not a COW mapping */
if (!is_cow_mapping(vma->vm_flags))
- vma->vm_flags = (vma->vm_flags & ~VM_MIXEDMAP) | VM_PFNMAP;
+ mod_vm_flags(vma, VM_PFNMAP, VM_MIXEDMAP);
ttm_bo_put(bo); /* release extra ref taken by ttm_bo_mmap_obj() */
* vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
* the whole buffer.
*/
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+ mod_vm_flags(vma, VM_MIXEDMAP | VM_DONTEXPAND, VM_PFNMAP);
vma->vm_pgoff = 0;
/*
if (vma_pages(vma) != 1)
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_DONTDUMP | VM_DONTEXPAND;
+ set_vm_flags(vma, VM_IO | VM_DONTDUMP | VM_DONTEXPAND);
vma->vm_ops = &cs_char_vm_ops;
vma->vm_private_data = file->private_data;
atomic_dec(&msc->user_count);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTCOPY;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTCOPY);
vma->vm_ops = &msc_mmap_ops;
return ret;
}
pm_runtime_get_sync(&stm->dev);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &stm_mmap_vmops;
vm_iomap_memory(vma, phys, size);
ret = -EPERM;
goto done;
}
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
addr = vma->vm_start;
for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
memlen = uctxt->egrbufs.buffers[i].len;
goto done;
}
- vma->vm_flags = flags;
+ reset_vm_flags(vma, flags);
hfi1_cdbg(PROC,
"%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
if (vma->vm_flags & (VM_WRITE | VM_EXEC))
return -EPERM;
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
if (!dev->mdev->clock_info)
return -EOPNOTSUPP;
if (vma->vm_flags & VM_WRITE)
return -EPERM;
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
/* Don't expose to user-space information it shouldn't have */
if (PAGE_SIZE > 4096)
}
/* don't allow them to later change with mprotect */
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
}
pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
phys = dd->physaddr + ureg;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ set_vm_flags(vma, VM_DONTCOPY | VM_DONTEXPAND);
ret = io_remap_pfn_range(vma, vma->vm_start,
phys >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
* don't allow them to later change to readable with mprotect (for when
* not initially mapped readable, as is normally the case)
*/
- vma->vm_flags &= ~VM_MAYREAD;
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ mod_vm_flags(vma, VM_DONTCOPY | VM_DONTEXPAND, VM_MAYREAD);
/* We used PAT if wc_cookie == 0 */
if (!dd->wc_cookie)
goto bail;
}
/* don't allow them to later change to writable with mprotect */
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
start = vma->vm_start;
* Don't allow permission to later change to writable
* with mprotect.
*/
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
} else
goto bail;
len = vma->vm_end - vma->vm_start;
vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
vma->vm_ops = &qib_file_vm_ops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
ret = 1;
bail:
usnic_dbg("\n");
us_ibdev = to_usdev(context->device);
- vma->vm_flags |= VM_IO;
+ set_vm_flags(vma, VM_IO);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vfid = vma->vm_pgoff;
usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
}
/* Map UAR to kernel space, VM_LOCKED? */
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ set_vm_flags(vma, VM_DONTCOPY | VM_DONTEXPAND);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (io_remap_pfn_range(vma, start, context->uar.pfn, size,
vma->vm_page_prot))
return ret;
}
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_private_data = &buf->handler;
vma->vm_ops = &vb2_common_vm_ops;
/*
* Make sure that vm_areas for 2 buffers won't be merged together
*/
- vma->vm_flags |= VM_DONTEXPAND;
+ set_vm_flags(vma, VM_DONTEXPAND);
/*
* Use common vm_area operations to track buffer refcount.
}
vma->vm_ops = &videobuf_vm_ops;
- vma->vm_flags |= VM_DONTEXPAND;
+ set_vm_flags(vma, VM_DONTEXPAND);
vma->vm_private_data = map;
dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
map->count = 1;
map->q = q;
vma->vm_ops = &videobuf_vm_ops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
+ /* using shared anonymous pages */
+ mod_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_IO);
vma->vm_private_data = map;
dprintk(1, "mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
map, q, vma->vm_start, vma->vm_end, vma->vm_pgoff, first, last);
}
vma->vm_ops = &videobuf_vm_ops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_private_data = map;
dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
ctx->psn_phys, ctx->pe , ctx->master);
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &cxl_mmap_vmops;
return 0;
{
struct hl_ts_buff *ts_buff = buf->private;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | VM_NORESERVE;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | VM_NORESERVE);
return remap_vmalloc_range(vma, ts_buff->user_buff_address, 0);
}
{
int rc;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
- VM_DONTCOPY | VM_NORESERVE;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+ VM_DONTCOPY | VM_NORESERVE);
rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
(dma_addr - HOST_PHYS_BASE), size);
{
int rc;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
- VM_DONTCOPY | VM_NORESERVE;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+ VM_DONTCOPY | VM_NORESERVE);
#ifdef _HAS_DMA_MMAP_COHERENT
address = pci_resource_start(hdev->pdev, SRAM_CFG_BAR_ID) + offset_in_bar;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
- VM_DONTCOPY | VM_NORESERVE;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+ VM_DONTCOPY | VM_NORESERVE);
rc = remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT,
block_size, vma->vm_page_prot);
{
int rc;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
- VM_DONTCOPY | VM_NORESERVE;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+ VM_DONTCOPY | VM_NORESERVE);
rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
(dma_addr - HOST_PHYS_BASE), size);
if ((vma->vm_flags & VM_READ) || (vma->vm_flags & VM_EXEC) ||
!(vma->vm_flags & VM_WRITE))
return -EINVAL;
- vma->vm_flags &= ~(VM_MAYREAD | VM_MAYEXEC);
+ clear_vm_flags(vma, VM_MAYREAD | VM_MAYEXEC);
return 0;
}
if (rc)
return rc;
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &ocxl_vmops;
return 0;
(afu->config.global_mmio_size >> PAGE_SHIFT))
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &global_mmio_vmops;
vma->vm_private_data = afu;
/* Ensure userspace cannot acquire VM_WRITE + VM_SHARED later. */
if (vma->vm_flags & VM_WRITE)
- vma->vm_flags &= ~VM_MAYSHARE;
+ clear_vm_flags(vma, VM_MAYSHARE);
else if (vma->vm_flags & VM_SHARED)
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
/* Create write-combine mapping so all clients observe a wipe. */
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- vma->vm_flags |= VM_DONTCOPY | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTCOPY | VM_DONTDUMP);
return vm_iomap_memory(vma, drvdata->rmem->base, drvdata->rmem->size);
}
vma->vm_end & (GRU_GSEG_PAGESIZE - 1))
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_LOCKED |
- VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_LOCKED |
+ VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = PAGE_SHARED;
vma->vm_ops = &gru_vm_ops;
if (!qfr)
return -ENOMEM;
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK;
+ set_vm_flags(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK);
vma->vm_ops = &uacce_vm_ops;
vma->vm_private_data = q;
qfr->type = type;
/* completion area is mapped read-only for user */
if (vma->vm_flags & VM_WRITE)
return -EPERM;
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
if (remap_pfn_range(vma, vma->vm_start, ctx->ca_buf_ra >> PAGE_SHIFT,
len, vma->vm_page_prot))
(ctx->psn_size >> PAGE_SHIFT))
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &ocxlflash_vmops;
return 0;
}
sfp->mmap_called = 1;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_private_data = sfp;
vma->vm_ops = &sg_mmap_vm_ops;
out:
buf->boff == offset) {
vm_mem = buf->priv;
ret = frame_mmap(isp, vm_mem->vaddr, vma);
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
break;
}
}
* Without VM_SHARED, remap_pfn_range() treats
* this kind of mapping as invalid.
*/
- vma->vm_flags |= VM_SHARED;
+ set_vm_flags(vma, VM_SHARED);
ret = hmm_mmap(vma, vma->vm_pgoff << PAGE_SHIFT);
mutex_unlock(&isp->mutex);
return ret;
goto error;
}
raw_virt_addr->data_bytes = origin_size;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
mutex_unlock(&isp->mutex);
return 0;
}
vma->vm_private_data = bo;
vma->vm_ops = &hmm_bo_vm_ops;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
/*
* call hmm_bo_vm_open explicitly.
}
vma->vm_ops = &meye_vm_ops;
- vma->vm_flags &= ~VM_IO; /* not I/O memory */
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ /* not I/O memory */
+ mod_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_IO);
vma->vm_private_data = (void *) (offset / gbufsize);
meye_vm_open(vma);
ret = remap_vmalloc_range(vma, sbuf->buffer, 0);
if (ret)
return ret;
- vma->vm_flags |= VM_DONTEXPAND;
+ set_vm_flags(vma, VM_DONTEXPAND);
vma->vm_private_data = sbuf;
vma->vm_ops = &stk_v4l_vm_ops;
sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_MAPPED;
{
struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &tcmu_vm_ops;
vma->vm_private_data = udev;
static int uio_mmap_logical(struct vm_area_struct *vma)
{
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &uio_logical_vm_ops;
return 0;
}
}
}
- vma->vm_flags |= VM_IO;
- vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP);
+ set_vm_flags(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &usbdev_vm_ops;
vma->vm_private_data = usbm;
if (vma->vm_flags & VM_WRITE)
return -EPERM;
- vma->vm_flags &= ~VM_MAYWRITE;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ mod_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_MAYWRITE);
vma->vm_private_data = filp->private_data;
mon_bin_vma_open(vma);
return 0;
{
struct vduse_iova_domain *domain = file->private_data;
- vma->vm_flags |= VM_DONTDUMP | VM_DONTEXPAND;
+ set_vm_flags(vma, VM_DONTDUMP | VM_DONTEXPAND);
vma->vm_private_data = domain;
vma->vm_ops = &vduse_domain_mmap_ops;
* See remap_pfn_range(), called from vfio_pci_fault() but we can't
* change vm_flags within the fault handler. Set them now.
*/
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &vfio_pci_mmap_ops;
return 0;
if (vma->vm_end - vma->vm_start != notify.size)
return -ENOTSUPP;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &vhost_vdpa_vm_ops;
return 0;
}
#ifndef MMU
/* this is uClinux (no MMU) specific code */
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_start = videomemory;
return 0;
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
vma->vm_ops = &fb_deferred_io_vm_ops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
if (!(info->flags & FBINFO_VIRTFB))
- vma->vm_flags |= VM_IO;
+ set_vm_flags(vma, VM_IO);
vma->vm_private_data = info;
return 0;
}
vma->vm_private_data = vm_priv;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &gntalloc_vmops;
vma->vm_ops = &gntdev_vmops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP);
if (use_ptemod)
- vma->vm_flags |= VM_DONTCOPY;
+ set_vm_flags(vma, VM_DONTCOPY);
vma->vm_private_data = map;
if (map->flags) {
vma_priv->file_priv = file_priv;
vma_priv->users = 1;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND;
+ set_vm_flags(vma, VM_IO | VM_DONTEXPAND);
vma->vm_ops = &privcmd_buf_vm_ops;
vma->vm_private_data = vma_priv;
{
/* DONTCOPY is essential for Xen because copy_page_range doesn't know
* how to recreate these mappings */
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
- VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTCOPY |
+ VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &privcmd_vm_ops;
vma->vm_private_data = NULL;
static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
{
- vma->vm_flags |= VM_DONTEXPAND;
+ set_vm_flags(vma, VM_DONTEXPAND);
vma->vm_ops = &aio_ring_vm_ops;
return 0;
}
* unpopulated ptes via cramfs_read_folio().
*/
int i;
- vma->vm_flags |= VM_MIXEDMAP;
+ set_vm_flags(vma, VM_MIXEDMAP);
for (i = 0; i < pages && !ret; i++) {
vm_fault_t vmf;
unsigned long off = i * PAGE_SIZE;
return -EINVAL;
vma->vm_ops = &erofs_dax_vm_ops;
- vma->vm_flags |= VM_HUGEPAGE;
+ set_vm_flags(vma, VM_HUGEPAGE);
return 0;
}
#else
BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
vma->vm_end = STACK_TOP_MAX;
vma->vm_start = vma->vm_end - PAGE_SIZE;
- vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
+ init_vm_flags(vma, VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
err = insert_vm_struct(mm, vma);
}
/* mprotect_fixup is overkill to remove the temporary stack flags */
- vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
+ clear_vm_flags(vma, VM_STACK_INCOMPLETE_SETUP);
stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
stack_size = vma->vm_end - vma->vm_start;
file_accessed(file);
if (IS_DAX(file_inode(file))) {
vma->vm_ops = &ext4_dax_vm_ops;
- vma->vm_flags |= VM_HUGEPAGE;
+ set_vm_flags(vma, VM_HUGEPAGE);
} else {
vma->vm_ops = &ext4_file_vm_ops;
}
{
file_accessed(file);
vma->vm_ops = &fuse_dax_vm_ops;
- vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
+ set_vm_flags(vma, VM_MIXEDMAP | VM_HUGEPAGE);
return 0;
}
* way when do_mmap unwinds (may be important on powerpc
* and ia64).
*/
- vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
+ set_vm_flags(vma, VM_HUGETLB | VM_DONTEXPAND);
vma->vm_ops = &hugetlb_vm_ops;
ret = seal_check_future_write(info->seals, vma);
* as input to create an allocation policy.
*/
vma_init(&pseudo_vma, mm);
- pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
+ init_vm_flags(&pseudo_vma, VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
pseudo_vma.vm_file = file;
for (index = start; index < end; index++) {
"orangefs_file_mmap: called on %pD\n", file);
/* set the sequential readahead hint */
- vma->vm_flags |= VM_SEQ_READ;
- vma->vm_flags &= ~VM_RAND_READ;
+ mod_vm_flags(vma, VM_SEQ_READ, VM_RAND_READ);
file_accessed(file);
vma->vm_ops = &orangefs_file_vm_ops;
mas_for_each(&mas, vma, ULONG_MAX) {
if (!(vma->vm_flags & VM_SOFTDIRTY))
continue;
- vma->vm_flags &= ~VM_SOFTDIRTY;
+ clear_vm_flags(vma, VM_SOFTDIRTY);
vma_set_page_prot(vma);
}
if (vma->vm_flags & (VM_WRITE | VM_EXEC))
return -EPERM;
- vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
- vma->vm_flags |= VM_MIXEDMAP;
+ mod_vm_flags(vma, VM_MIXEDMAP, VM_MAYWRITE | VM_MAYEXEC);
vma->vm_ops = &vmcore_mmap_ops;
len = 0;
for_each_vma(vmi, vma) {
if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
- vma->vm_flags &= ~__VM_UFFD_FLAGS;
+ clear_vm_flags(vma, __VM_UFFD_FLAGS);
}
}
mmap_write_unlock(mm);
octx = vma->vm_userfaultfd_ctx.ctx;
if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
- vma->vm_flags &= ~__VM_UFFD_FLAGS;
+ clear_vm_flags(vma, __VM_UFFD_FLAGS);
return 0;
}
} else {
/* Drop uffd context if remap feature not enabled */
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
- vma->vm_flags &= ~__VM_UFFD_FLAGS;
+ clear_vm_flags(vma, __VM_UFFD_FLAGS);
}
}
prev = vma;
}
- vma->vm_flags = new_flags;
+ reset_vm_flags(vma, new_flags);
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
}
mmap_write_unlock(mm);
* the next vma was merged into the current one and
* the current one has not been updated yet.
*/
- vma->vm_flags = new_flags;
+ reset_vm_flags(vma, new_flags);
vma->vm_userfaultfd_ctx.ctx = ctx;
if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
* the next vma was merged into the current one and
* the current one has not been updated yet.
*/
- vma->vm_flags = new_flags;
+ reset_vm_flags(vma, new_flags);
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
skip:
file_accessed(file);
vma->vm_ops = &xfs_file_vm_ops;
if (IS_DAX(inode))
- vma->vm_flags |= VM_HUGEPAGE;
+ set_vm_flags(vma, VM_HUGEPAGE);
return 0;
}
* VM_MAYWRITE as we still want them to be COW-writable.
*/
if (vma->vm_flags & VM_SHARED)
- vma->vm_flags &= ~(VM_MAYWRITE);
+ clear_vm_flags(vma, VM_MAYWRITE);
}
return 0;
if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EPERM;
} else {
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
}
/* remap_vmalloc_range() checks size and offset constraints */
return remap_vmalloc_range(vma, rb_map->rb,
*/
return -EPERM;
} else {
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
}
/* remap_vmalloc_range() checks size and offset constraints */
return remap_vmalloc_range(vma, rb_map->rb, vma->vm_pgoff + RINGBUF_PGOFF);
/* set default open/close callbacks */
vma->vm_ops = &bpf_map_default_vmops;
vma->vm_private_data = map;
- vma->vm_flags &= ~VM_MAYEXEC;
+ clear_vm_flags(vma, VM_MAYEXEC);
if (!(vma->vm_flags & VM_WRITE))
/* disallow re-mapping with PROT_WRITE */
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
err = map->ops->map_mmap(map, vma);
if (err)
* Since pinned accounting is per vm we cannot allow fork() to copy our
* vma.
*/
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &perf_mmap_vmops;
if (event->pmu->event_mapped)
goto exit;
}
spin_unlock_irqrestore(&kcov->lock, flags);
- vma->vm_flags |= VM_DONTEXPAND;
+ set_vm_flags(vma, VM_DONTEXPAND);
for (off = 0; off < size; off += PAGE_SIZE) {
page = vmalloc_to_page(kcov->area + off);
res = vm_insert_page(vma, vma->vm_start + off, page);
return -EINVAL;
vma->vm_ops = &relay_file_mmap_ops;
- vma->vm_flags |= VM_DONTEXPAND;
+ set_vm_flags(vma, VM_DONTEXPAND);
vma->vm_private_data = buf;
return 0;
/*
* vm_flags is protected by the mmap_lock held in write mode.
*/
- vma->vm_flags = new_flags;
+ reset_vm_flags(vma, new_flags);
if (!vma->vm_file) {
error = replace_anon_vma_name(vma, anon_name);
if (error)
if (!(vma->vm_flags & VM_MIXEDMAP)) {
BUG_ON(mmap_read_trylock(vma->vm_mm));
BUG_ON(vma->vm_flags & VM_PFNMAP);
- vma->vm_flags |= VM_MIXEDMAP;
+ set_vm_flags(vma, VM_MIXEDMAP);
}
/* Defer page refcount checking till we're about to map that page. */
return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
if (!(vma->vm_flags & VM_MIXEDMAP)) {
BUG_ON(mmap_read_trylock(vma->vm_mm));
BUG_ON(vma->vm_flags & VM_PFNMAP);
- vma->vm_flags |= VM_MIXEDMAP;
+ set_vm_flags(vma, VM_MIXEDMAP);
}
return insert_page(vma, addr, page, vma->vm_page_prot);
}
vma->vm_pgoff = pfn;
}
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
BUG_ON(addr >= end);
pfn -= addr >> PAGE_SHIFT;
*/
if (newflags & VM_LOCKED)
newflags |= VM_IO;
- WRITE_ONCE(vma->vm_flags, newflags);
+ reset_vm_flags(vma, newflags);
lru_add_drain();
walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL);
if (newflags & VM_IO) {
newflags &= ~VM_IO;
- WRITE_ONCE(vma->vm_flags, newflags);
+ reset_vm_flags(vma, newflags);
}
}
if ((newflags & VM_LOCKED) && (oldflags & VM_LOCKED)) {
/* No work to do, and mlocking twice would be wrong */
- vma->vm_flags = newflags;
+ reset_vm_flags(vma, newflags);
} else {
mlock_vma_pages_range(vma, start, end, newflags);
}
vma->vm_start = addr;
vma->vm_end = end;
- vma->vm_flags = vm_flags;
+ init_vm_flags(vma, vm_flags);
vma->vm_page_prot = vm_get_page_prot(vm_flags);
vma->vm_pgoff = pgoff;
* then new mapped in-place (which must be aimed as
* a completely new data area).
*/
- vma->vm_flags |= VM_SOFTDIRTY;
+ set_vm_flags(vma, VM_SOFTDIRTY);
vma_set_page_prot(vma);
anon_vma_interval_tree_pre_update_vma(vma);
}
vma->vm_end = addr + len;
- vma->vm_flags |= VM_SOFTDIRTY;
+ set_vm_flags(vma, VM_SOFTDIRTY);
mas_store_prealloc(mas, vma);
if (vma->anon_vma) {
vma->vm_start = addr;
vma->vm_end = addr + len;
vma->vm_pgoff = addr >> PAGE_SHIFT;
- vma->vm_flags = flags;
+ init_vm_flags(vma, flags);
vma->vm_page_prot = vm_get_page_prot(flags);
mas_set_range(mas, vma->vm_start, addr + len - 1);
if (mas_store_gfp(mas, vma, GFP_KERNEL))
mm->data_vm += len >> PAGE_SHIFT;
if (flags & VM_LOCKED)
mm->locked_vm += (len >> PAGE_SHIFT);
- vma->vm_flags |= VM_SOFTDIRTY;
+ set_vm_flags(vma, VM_SOFTDIRTY);
validate_mm(mm);
return 0;
* vm_flags and vm_page_prot are protected by the mmap_lock
* held in write mode.
*/
- vma->vm_flags = newflags;
+ reset_vm_flags(vma, newflags);
/*
* We want to check manually if we can change individual PTEs writable
* if we can't do that automatically for all PTEs in a mapping. For
/* Conceal VM_ACCOUNT so old reservation is not undone */
if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {
- vma->vm_flags &= ~VM_ACCOUNT;
+ clear_vm_flags(vma, VM_ACCOUNT);
excess = vma->vm_end - vma->vm_start - old_len;
if (old_addr > vma->vm_start &&
old_addr + old_len < vma->vm_end)
/* Restore VM_ACCOUNT if one or two pieces of vma left */
if (excess) {
- vma->vm_flags |= VM_ACCOUNT;
+ set_vm_flags(vma, VM_ACCOUNT);
if (split)
- find_vma(mm, vma->vm_end)->vm_flags |= VM_ACCOUNT;
+ set_vm_flags(find_vma(mm, vma->vm_end), VM_ACCOUNT);
}
return new_addr;
mmap_write_lock(current->mm);
vma = find_vma(current->mm, (unsigned long)ret);
if (vma)
- vma->vm_flags |= VM_USERMAP;
+ set_vm_flags(vma, VM_USERMAP);
mmap_write_unlock(current->mm);
}
atomic_long_add(total, &mmap_pages_allocated);
- region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
+ set_vm_flags(vma, VM_MAPPED_COPY);
+ region->vm_flags = vma->flags;
region->vm_start = (unsigned long) base;
region->vm_end = region->vm_start + len;
region->vm_top = region->vm_start + (total << PAGE_SHIFT);
region->vm_flags = vm_flags;
region->vm_pgoff = pgoff;
- vma->vm_flags = vm_flags;
+ init_vm_flags(vma, vm_flags);
vma->vm_pgoff = pgoff;
if (file) {
vma->vm_end = start + len;
if (pregion->vm_flags & VM_MAPPED_COPY)
- vma->vm_flags |= VM_MAPPED_COPY;
+ set_vm_flags(vma, VM_MAPPED_COPY);
else {
ret = do_mmap_shared_file(vma);
if (ret < 0) {
if (addr != (pfn << PAGE_SHIFT))
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
return 0;
}
EXPORT_SYMBOL(remap_pfn_range);
if (mlock_future_check(vma->vm_mm, vma->vm_flags | VM_LOCKED, len))
return -EAGAIN;
- vma->vm_flags |= VM_LOCKED | VM_DONTDUMP;
+ set_vm_flags(vma, VM_LOCKED | VM_DONTDUMP);
vma->vm_ops = &secretmem_vm_ops;
return 0;
return ret;
/* arm64 - allow memory tagging on RAM-based files */
- vma->vm_flags |= VM_MTE_ALLOWED;
+ set_vm_flags(vma, VM_MTE_ALLOWED);
file_accessed(file);
vma->vm_ops = &shmem_vm_ops;
size -= PAGE_SIZE;
} while (size > 0);
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
return 0;
}
{
if (vma->vm_flags & (VM_WRITE | VM_EXEC))
return -EPERM;
- vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
+ clear_vm_flags(vma, VM_MAYWRITE | VM_MAYEXEC);
/* Instruct vm_insert_page() to not mmap_read_lock(mm) */
- vma->vm_flags |= VM_MIXEDMAP;
+ set_vm_flags(vma, VM_MIXEDMAP);
vma->vm_ops = &tcp_vm_ops;
return 0;
if (vma->vm_flags & VM_WRITE)
return -EPERM;
/* disallow mprotect() turns it into writable */
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
return remap_pfn_range(vma, vma->vm_start,
page_to_pfn(status),
{
if (vma->vm_flags & VM_SHARED) {
/* do not allow mprotect to make mapping writable */
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
if (vma->vm_flags & VM_WRITE)
return -EACCES;
}
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &sel_mmap_policy_ops;
return 0;
}
/* set VM_READ access as well to fix memset() routines that do
reads before writes (to improve performance) */
- area->vm_flags |= VM_READ;
+ set_vm_flags(area, VM_READ);
if (substream == NULL)
return -ENXIO;
runtime = substream->runtime;
return -EINVAL;
area->vm_ops = &snd_pcm_vm_ops_status;
area->vm_private_data = substream;
- area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- area->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+ mod_vm_flags(area, VM_DONTEXPAND | VM_DONTDUMP,
+ VM_WRITE | VM_MAYWRITE);
+
return 0;
}
return -EINVAL;
area->vm_ops = &snd_pcm_vm_ops_control;
area->vm_private_data = substream;
- area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(area, VM_DONTEXPAND | VM_DONTDUMP);
return 0;
}
int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *area)
{
- area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(area, VM_DONTEXPAND | VM_DONTDUMP);
if (!substream->ops->page &&
!snd_dma_buffer_mmap(snd_pcm_get_dma_buf(substream), area))
return 0;
struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return remap_pfn_range(vma, vma->vm_start,
substream->dma_buffer.addr >> PAGE_SHIFT,
}
area->vm_ops = &usb_stream_hwdep_vm_ops;
- area->vm_flags |= VM_DONTDUMP;
+ set_vm_flags(area, VM_DONTDUMP);
if (!read)
- area->vm_flags |= VM_DONTEXPAND;
+ set_vm_flags(area, VM_DONTEXPAND);
area->vm_private_data = us122l;
atomic_inc(&us122l->mmap_count);
out:
}
area->vm_ops = &us428ctls_vm_ops;
- area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(area, VM_DONTEXPAND | VM_DONTDUMP);
area->vm_private_data = hw->private_data;
return 0;
}
return -ENODEV;
area->vm_ops = &snd_usx2y_hwdep_pcm_vm_ops;
- area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(area, VM_DONTEXPAND | VM_DONTDUMP);
area->vm_private_data = hw->private_data;
return 0;
}