]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: replace vma->vm_flags direct modifications with modifier calls
authorSuren Baghdasaryan <surenb@google.com>
Fri, 28 Oct 2022 22:29:36 +0000 (22:29 +0000)
committerSuren Baghdasaryan <surenb@google.com>
Wed, 23 Nov 2022 02:09:44 +0000 (02:09 +0000)
Replace direct modifications to vma->vm_flags with calls to modifier
functions to be able to track flag changes and to keep vma locking
correctness.

Signed-off-by: Suren Baghdasaryan <surenb@google.com>
121 files changed:
arch/arm/kernel/process.c
arch/ia64/mm/init.c
arch/loongarch/include/asm/tlb.h
arch/powerpc/kvm/book3s_xive_native.c
arch/powerpc/mm/book3s64/subpage_prot.c
arch/powerpc/platforms/book3s/vas-api.c
arch/powerpc/platforms/cell/spufs/file.c
arch/s390/mm/gmap.c
arch/x86/entry/vsyscall/vsyscall_64.c
arch/x86/kernel/cpu/sgx/driver.c
arch/x86/kernel/cpu/sgx/virt.c
arch/x86/mm/pat/memtype.c
arch/x86/um/mem_32.c
drivers/acpi/pfr_telemetry.c
drivers/android/binder.c
drivers/char/mspec.c
drivers/crypto/hisilicon/qm.c
drivers/dax/device.c
drivers/dma/idxd/cdev.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
drivers/gpu/drm/amd/amdkfd/kfd_events.c
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_gem_dma_helper.c
drivers/gpu/drm/drm_gem_shmem_helper.c
drivers/gpu/drm/drm_vm.c
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/gma500/framebuffer.c
drivers/gpu/drm/i810/i810_dma.c
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/mediatek/mtk_drm_gem.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/omapdrm/omap_gem.c
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
drivers/gpu/drm/tegra/gem.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/virtio/virtgpu_vram.c
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
drivers/gpu/drm/xen/xen_drm_front_gem.c
drivers/hsi/clients/cmt_speech.c
drivers/hwtracing/intel_th/msu.c
drivers/hwtracing/stm/core.c
drivers/infiniband/hw/hfi1/file_ops.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/qib/qib_file_ops.c
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
drivers/media/common/videobuf2/videobuf2-dma-contig.c
drivers/media/common/videobuf2/videobuf2-vmalloc.c
drivers/media/v4l2-core/videobuf-dma-contig.c
drivers/media/v4l2-core/videobuf-dma-sg.c
drivers/media/v4l2-core/videobuf-vmalloc.c
drivers/misc/cxl/context.c
drivers/misc/habanalabs/common/memory.c
drivers/misc/habanalabs/gaudi/gaudi.c
drivers/misc/habanalabs/gaudi2/gaudi2.c
drivers/misc/habanalabs/goya/goya.c
drivers/misc/ocxl/context.c
drivers/misc/ocxl/sysfs.c
drivers/misc/open-dice.c
drivers/misc/sgi-gru/grufile.c
drivers/misc/uacce/uacce.c
drivers/sbus/char/oradax.c
drivers/scsi/cxlflash/ocxl_hw.c
drivers/scsi/sg.c
drivers/staging/media/atomisp/pci/atomisp_fops.c
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
drivers/staging/media/deprecated/meye/meye.c
drivers/staging/media/deprecated/stkwebcam/stk-webcam.c
drivers/target/target_core_user.c
drivers/uio/uio.c
drivers/usb/core/devio.c
drivers/usb/mon/mon_bin.c
drivers/vdpa/vdpa_user/iova_domain.c
drivers/vfio/pci/vfio_pci_core.c
drivers/vhost/vdpa.c
drivers/video/fbdev/68328fb.c
drivers/video/fbdev/core/fb_defio.c
drivers/xen/gntalloc.c
drivers/xen/gntdev.c
drivers/xen/privcmd-buf.c
drivers/xen/privcmd.c
fs/aio.c
fs/cramfs/inode.c
fs/erofs/data.c
fs/exec.c
fs/ext4/file.c
fs/fuse/dax.c
fs/hugetlbfs/inode.c
fs/orangefs/file.c
fs/proc/task_mmu.c
fs/proc/vmcore.c
fs/userfaultfd.c
fs/xfs/xfs_file.c
include/linux/mm.h
kernel/bpf/ringbuf.c
kernel/bpf/syscall.c
kernel/events/core.c
kernel/kcov.c
kernel/relay.c
mm/madvise.c
mm/memory.c
mm/mlock.c
mm/mmap.c
mm/mprotect.c
mm/mremap.c
mm/nommu.c
mm/secretmem.c
mm/shmem.c
mm/vmalloc.c
net/ipv4/tcp.c
security/selinux/selinuxfs.c
sound/core/oss/pcm_oss.c
sound/core/pcm_native.c
sound/soc/pxa/mmp-sspa.c
sound/usb/usx2y/us122l.c
sound/usb/usx2y/usX2Yhwdep.c
sound/usb/usx2y/usx2yhwdeppcm.c

index a2b31d91a1b6e32091edd704310376112d9fbda2..6864fbc7ac8e415a004c9f9dea353819d84e2318 100644 (file)
@@ -316,7 +316,7 @@ static int __init gate_vma_init(void)
        gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
        gate_vma.vm_start = 0xffff0000;
        gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
-       gate_vma.vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
+       init_vm_flags(&gate_vma, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC);
        return 0;
 }
 arch_initcall(gate_vma_init);
index fc4e4217e87ff5a089c6d7b194a199fe23e71ca7..d355e0ce28abbdd0dd77e7e52791aec074097e13 100644 (file)
@@ -109,7 +109,7 @@ ia64_init_addr_space (void)
                vma_set_anonymous(vma);
                vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
                vma->vm_end = vma->vm_start + PAGE_SIZE;
-               vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
+               init_vm_flags(vma, VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT);
                vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
                mmap_write_lock(current->mm);
                if (insert_vm_struct(current->mm, vma)) {
@@ -127,8 +127,8 @@ ia64_init_addr_space (void)
                        vma_set_anonymous(vma);
                        vma->vm_end = PAGE_SIZE;
                        vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
-                       vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
-                                       VM_DONTEXPAND | VM_DONTDUMP;
+                       init_vm_flags(vma, VM_READ | VM_MAYREAD | VM_IO |
+                                     VM_DONTEXPAND | VM_DONTDUMP);
                        mmap_write_lock(current->mm);
                        if (insert_vm_struct(current->mm, vma)) {
                                mmap_write_unlock(current->mm);
@@ -272,7 +272,7 @@ static int __init gate_vma_init(void)
        vma_init(&gate_vma, NULL);
        gate_vma.vm_start = FIXADDR_USER_START;
        gate_vma.vm_end = FIXADDR_USER_END;
-       gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+       init_vm_flags(&gate_vma, VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC);
        gate_vma.vm_page_prot = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX);
 
        return 0;
index dd24f5898f651cf9e264c9d6bf23fc8e3190e3ac..51e35b44d1050e81a6ef60d872a8920dd902724b 100644 (file)
@@ -149,7 +149,7 @@ static inline void tlb_flush(struct mmu_gather *tlb)
        struct vm_area_struct vma;
 
        vma.vm_mm = tlb->mm;
-       vma.vm_flags = 0;
+       init_vm_flags(&vma, 0);
        if (tlb->fullmm) {
                flush_tlb_mm(tlb->mm);
                return;
index 5271c33fe79e4d5f6e83c5afa21a089bfec92c81..a0cb393dcbc55a5c84c550762b726dba388af63b 100644 (file)
@@ -325,7 +325,7 @@ static int kvmppc_xive_native_mmap(struct kvm_device *dev,
                return -EINVAL;
        }
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       set_vm_flags(vma, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
 
        /*
index d73b3b4176e81d2112a4e5e1cbd6bdff8d678aa4..b1239b916f9bd1cbc9086199d42b2d1aa37eb724 100644 (file)
@@ -156,7 +156,7 @@ static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
         * VM_NOHUGEPAGE and split them.
         */
        for_each_vma_range(vmi, vma, addr + len) {
-               vma->vm_flags |= VM_NOHUGEPAGE;
+               set_vm_flags(vma->flags, VM_NOHUGEPAGE);
                walk_page_vma(vma, &subpage_walk_ops, NULL);
        }
 }
index 40f5ae5e1238d633ba71a020ba6a5b46ff296468..5a10cefd9ecd4f9f8f8e5ff937ff89c3db9f3477 100644 (file)
@@ -525,7 +525,7 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
        pfn = paste_addr >> PAGE_SHIFT;
 
        /* flags, page_prot from cxl_mmap(), except we want cachable */
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       set_vm_flags(vma->flags, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
 
        prot = __pgprot(pgprot_val(vma->vm_page_prot) | _PAGE_DIRTY);
index 62d90a5e23d1ef49a2a5387cbe8b2185eb65657a..784fa39a484ae08e0549a8aae44146768e2484b6 100644 (file)
@@ -291,7 +291,7 @@ static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       set_vm_flags(vma, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_mem_mmap_vmops;
@@ -381,7 +381,7 @@ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       set_vm_flags(vma, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_cntl_mmap_vmops;
@@ -1043,7 +1043,7 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       set_vm_flags(vma, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_signal1_mmap_vmops;
@@ -1179,7 +1179,7 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       set_vm_flags(vma, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_signal2_mmap_vmops;
@@ -1302,7 +1302,7 @@ static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       set_vm_flags(vma, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_mss_mmap_vmops;
@@ -1364,7 +1364,7 @@ static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       set_vm_flags(vma, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_psmap_mmap_vmops;
@@ -1424,7 +1424,7 @@ static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       set_vm_flags(vma, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_mfc_mmap_vmops;
index 02d15c8dc92e994692f2322cf3232f85a032f76a..439638122c9226192b16e26d77c6047e6327a872 100644 (file)
@@ -2518,8 +2518,7 @@ static inline void thp_split_mm(struct mm_struct *mm)
        VMA_ITERATOR(vmi, mm, 0);
 
        for_each_vma(vmi, vma) {
-               vma->vm_flags &= ~VM_HUGEPAGE;
-               vma->vm_flags |= VM_NOHUGEPAGE;
+               mod_vm_flags(vma, VM_NOHUGEPAGE, VM_HUGEPAGE);
                walk_page_vma(vma, &thp_split_walk_ops, NULL);
        }
        mm->def_flags |= VM_NOHUGEPAGE;
index 4af81df133ee8dc781c7c787c693d7701ae8793d..e2a1626d86d899d1ce77b5a44af1efcc49d11c9e 100644 (file)
@@ -391,7 +391,7 @@ void __init map_vsyscall(void)
        }
 
        if (vsyscall_mode == XONLY)
-               gate_vma.vm_flags = VM_EXEC;
+               init_vm_flags(&gate_vma, VM_EXEC);
 
        BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
                     (unsigned long)VSYSCALL_ADDR);
index aa9b8b8688676fc66ebc6fdd605fe4467991af13..42c0bded93b623425dcdc6bad51442202f7c60da 100644 (file)
@@ -95,7 +95,7 @@ static int sgx_mmap(struct file *file, struct vm_area_struct *vma)
                return ret;
 
        vma->vm_ops = &sgx_vm_ops;
-       vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+       set_vm_flags(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
        vma->vm_private_data = encl;
 
        return 0;
index 6a77a14eee38c646f88910fa61f17dbee3d75996..0774a0bfeb2870360b45e40e07957798bfd13111 100644 (file)
@@ -105,7 +105,7 @@ static int sgx_vepc_mmap(struct file *file, struct vm_area_struct *vma)
 
        vma->vm_ops = &sgx_vepc_vm_ops;
        /* Don't copy VMA in fork() */
-       vma->vm_flags |= VM_PFNMAP | VM_IO | VM_DONTDUMP | VM_DONTCOPY;
+       set_vm_flags(vma, VM_PFNMAP | VM_IO | VM_DONTDUMP | VM_DONTCOPY);
        vma->vm_private_data = vepc;
 
        return 0;
index 66a209f7eb86da01b9c27537cb7a4183f657162b..926fa2d3d725c3590cf26135af7086ed797cfe96 100644 (file)
@@ -1046,7 +1046,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
 
                ret = reserve_pfn_range(paddr, size, prot, 0);
                if (ret == 0 && vma)
-                       vma->vm_flags |= VM_PAT;
+                       set_vm_flags(vma, VM_PAT);
                return ret;
        }
 
@@ -1112,7 +1112,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
        }
        free_pfn_range(paddr, size);
        if (vma)
-               vma->vm_flags &= ~VM_PAT;
+               clear_vm_flags(vma, VM_PAT);
 }
 
 /*
@@ -1122,7 +1122,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
  */
 void untrack_pfn_moved(struct vm_area_struct *vma)
 {
-       vma->vm_flags &= ~VM_PAT;
+       clear_vm_flags(vma, VM_PAT);
 }
 
 pgprot_t pgprot_writecombine(pgprot_t prot)
index cafd01f730dab4aef0b358aca03507dd3d35587e..bfd2c320ad254bede2bb5d24934ff38c9e739f38 100644 (file)
@@ -16,7 +16,7 @@ static int __init gate_vma_init(void)
        vma_init(&gate_vma, NULL);
        gate_vma.vm_start = FIXADDR_USER_START;
        gate_vma.vm_end = FIXADDR_USER_END;
-       gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+       init_vm_flags(&gate_vma, VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC);
        gate_vma.vm_page_prot = PAGE_READONLY;
 
        return 0;
index 9abf350bd7a5a28cc63d8960c395b02a0482b240..3d8b64ba3c301eef83f666c2bbc16ed4dc174e22 100644 (file)
@@ -310,7 +310,7 @@ pfrt_log_mmap(struct file *file, struct vm_area_struct *vma)
                return -EROFS;
 
        /* changing from read to write with mprotect is not allowed */
-       vma->vm_flags &= ~VM_MAYWRITE;
+       clear_vm_flags(vma, VM_MAYWRITE);
 
        pfrt_log_dev = to_pfrt_log_dev(file);
 
index 880224ec6abb8e62257857bbac76358e4c006b82..dd6c99223b8ce9c9accb94f62a6b45f3fa4c075b 100644 (file)
@@ -5572,8 +5572,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
                       proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
                return -EPERM;
        }
-       vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
-       vma->vm_flags &= ~VM_MAYWRITE;
+       mod_vm_flags(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
 
        vma->vm_ops = &binder_vm_ops;
        vma->vm_private_data = proc;
index f8231e2e84beccec68c0440e7068907aee261a42..57bd36a28f957aec2c35267925989dceb0194a75 100644 (file)
@@ -206,7 +206,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
        refcount_set(&vdata->refcnt, 1);
        vma->vm_private_data = vdata;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
        if (vdata->type == MSPEC_UNCACHED)
                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        vma->vm_ops = &mspec_vm_ops;
index 8b387de69d229b7cb6059a6bc5094e277a4be16b..0070e7f21c1bf791abb27f37a61a868089b0aa3a 100644 (file)
@@ -3334,7 +3334,7 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
                                return -EINVAL;
                }
 
-               vma->vm_flags |= VM_IO;
+               set_vm_flags(vma, VM_IO);
 
                return remap_pfn_range(vma, vma->vm_start,
                                       phys_base >> PAGE_SHIFT,
index 5494d745ced58ad9fc12bfa2e8fc0ab71ad3548c..6e9726dfaa7eddabda1a234bb97bd6a693ba40dd 100644 (file)
@@ -308,7 +308,7 @@ static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
                return rc;
 
        vma->vm_ops = &dax_vm_ops;
-       vma->vm_flags |= VM_HUGEPAGE;
+       set_vm_flags(vma, VM_HUGEPAGE);
        return 0;
 }
 
index a9b96b18772f322ef4c74ba8acad04ae1f978c99..1d0cc8b11d69e0cd2db23ce0528058b8ff4e7063 100644 (file)
@@ -202,7 +202,7 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
        if (rc < 0)
                return rc;
 
-       vma->vm_flags |= VM_DONTCOPY;
+       set_vm_flags(vma, VM_DONTCOPY);
        pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
                                IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT;
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
index 8ef31d687ef3b26352ce69802b2d576f7096f534..784d65deb5c26faf40a7bd65430ef98ffe02589b 100644 (file)
@@ -256,7 +256,7 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str
         */
        if (is_cow_mapping(vma->vm_flags) &&
            !(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
-               vma->vm_flags &= ~VM_MAYWRITE;
+               clear_vm_flags(vma, VM_MAYWRITE);
 
        return drm_gem_ttm_mmap(obj, vma);
 }
index 6d291aa6386bd7561a6ed87d5c66ed9409fe2521..7beb8dd6a5e6bb51493b31f020fe41018b2cdc45 100644 (file)
@@ -2879,8 +2879,8 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
 
        address = dev->adev->rmmio_remap.bus_addr;
 
-       vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
-                               VM_DONTDUMP | VM_PFNMAP;
+       set_vm_flags(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
+                               VM_DONTDUMP | VM_PFNMAP);
 
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
index cd4e61bf04939f93fd6387580db8f53961460b82..6cbe47cf9be53be347997a88998b5b70042cb249 100644 (file)
@@ -159,8 +159,8 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
        address = kfd_get_process_doorbells(pdd);
        if (!address)
                return -ENOMEM;
-       vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
-                               VM_DONTDUMP | VM_PFNMAP;
+       set_vm_flags(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
+                               VM_DONTDUMP | VM_PFNMAP);
 
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
index 729d26d648af3b7795ab45a090fc332809f4e3c4..95cd20056ceab16eea9a0ec1358f034a5d669cde 100644 (file)
@@ -1052,8 +1052,8 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
        pfn = __pa(page->kernel_address);
        pfn >>= PAGE_SHIFT;
 
-       vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
-                      | VM_DONTDUMP | VM_PFNMAP;
+       set_vm_flags(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
+                      | VM_DONTDUMP | VM_PFNMAP);
 
        pr_debug("Mapping signal page\n");
        pr_debug("     start user address  == 0x%08lx\n", vma->vm_start);
index 951b636772484304a06b32fed2a5f48c852b09ed..4422c44862523b5c3145e37de425b02c2374196b 100644 (file)
@@ -1972,8 +1972,8 @@ int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
                return -ENOMEM;
        }
 
-       vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
-               | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
+       set_vm_flags(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND
+               | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
        /* Mapping pages to user process */
        return remap_pfn_range(vma, vma->vm_start,
                               PFN_DOWN(__pa(qpd->cwsr_kaddr)),
index 8b68a3c1e6ab6ea53a712b891c1ba8adbc8d1bbc..ee8270ff593fe0c7a5b381b9385905411f54439b 100644 (file)
@@ -1047,7 +1047,7 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
                        goto err_drm_gem_object_put;
                }
 
-               vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+               set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
                vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
                vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
        }
index f6901ff97bbb5b55808e3da571b0744bedb18309..7149f05151e2f98778ef1eac1094696a9501c936 100644 (file)
@@ -530,8 +530,7 @@ int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct *
         * the whole buffer.
         */
        vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
-       vma->vm_flags &= ~VM_PFNMAP;
-       vma->vm_flags |= VM_DONTEXPAND;
+       mod_vm_flags(vma, VM_DONTEXPAND, VM_PFNMAP);
 
        if (dma_obj->map_noncoherent) {
                vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
index 35138f8a375c3cb509f8b718f73b575654249039..396d52a0374d76e6ccef97a035d0a52e09846aa5 100644 (file)
@@ -627,7 +627,7 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
                return ret;
        }
 
-       vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
        if (shmem->map_wc)
                vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
index f024dc93939ef3d696ac9c720fe87166223e5a1b..8867bb6c40e362573c64b8750d3dccdf5c853e0f 100644 (file)
@@ -476,7 +476,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
 
        if (!capable(CAP_SYS_ADMIN) &&
            (dma->flags & _DRM_DMA_USE_PCI_RO)) {
-               vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+               clear_vm_flags(vma, VM_WRITE | VM_MAYWRITE);
 #if defined(__i386__) || defined(__x86_64__)
                pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
 #else
@@ -492,7 +492,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
 
        vma->vm_ops = &drm_vm_dma_ops;
 
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
 
        drm_vm_open_locked(dev, vma);
        return 0;
@@ -560,7 +560,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
                return -EINVAL;
 
        if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
-               vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+               clear_vm_flags(vma, VM_WRITE | VM_MAYWRITE);
 #if defined(__i386__) || defined(__x86_64__)
                pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
 #else
@@ -628,7 +628,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
        default:
                return -EINVAL; /* This should never happen. */
        }
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
 
        drm_vm_open_locked(dev, vma);
        return 0;
index cc386f8a7116e6d9bb5504872917281b65903954..b69672b32b2c2e012b6d8c52e6ea82d3bc0ace2a 100644 (file)
@@ -130,7 +130,7 @@ static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
 {
        pgprot_t vm_page_prot;
 
-       vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP);
 
        vm_page_prot = vm_get_page_prot(vma->vm_flags);
 
index 3e493f48e0d44a07047f0df6b9c849b5b6561dba..c330d415729cdf80c96bdb169cb5f777c85e2149 100644 (file)
@@ -274,7 +274,7 @@ static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
        unsigned long vm_size;
        int ret;
 
-       vma->vm_flags &= ~VM_PFNMAP;
+       clear_vm_flags(vma, VM_PFNMAP);
        vma->vm_pgoff = 0;
 
        vm_size = vma->vm_end - vma->vm_start;
@@ -368,7 +368,7 @@ static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct
        if (obj->import_attach)
                return dma_buf_mmap(obj->dma_buf, vma, 0);
 
-       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
 
        DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
                          exynos_gem->flags);
index aa3ecf771fd3691ceda363648662f50d3af5f0ee..6f97b6d270d6d0859e97d5bd32ec228d06e1ca4d 100644 (file)
@@ -139,7 +139,7 @@ static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
         */
        vma->vm_ops = &psbfb_vm_ops;
        vma->vm_private_data = (void *)fb;
-       vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP);
        return 0;
 }
 
index 9fb4dd63342f3be5a0115fa476f8a5d3a037f224..bced8c30709e642f5caa27dad80f3145e480ebe1 100644 (file)
@@ -102,7 +102,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
        buf = dev_priv->mmap_buffer;
        buf_priv = buf->dev_private;
 
-       vma->vm_flags |= VM_DONTCOPY;
+       set_vm_flags(vma, VM_DONTCOPY);
 
        buf_priv->currently_mapped = I810_BUF_MAPPED;
 
index 73d9eda1d6b7a6fdd237da6bff783e67495648cc..9d0fd7da86f234b49d5be8c01221e43aa06a8fb4 100644 (file)
@@ -984,7 +984,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
                        i915_gem_object_put(obj);
                        return -EINVAL;
                }
-               vma->vm_flags &= ~VM_MAYWRITE;
+               clear_vm_flags(vma, VM_MAYWRITE);
        }
 
        anon = mmap_singleton(to_i915(dev));
@@ -993,7 +993,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
                return PTR_ERR(anon);
        }
 
-       vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+       set_vm_flags(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
 
        /*
         * We keep the ref on mmo->obj, not vm_file, but we require
index 47e96b0289f98ebc345548f92877c6a7e68f45f6..427089733b87e76423275b6c7b5cbcf33d94c37d 100644 (file)
@@ -158,7 +158,7 @@ static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
         * dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear
         * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
         */
-       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
        vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
 
index 1dee0d18abbb601a6f9e40447dda39b0e4e8bba1..8aff3ae909afd9b02460c58a1ac8ae2a6edee24a 100644 (file)
@@ -1012,7 +1012,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
-       vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
 
        return 0;
index cf571796fd26e6ccc1648053b8305eccea90762f..9c0e7d6a378430567ca1abb086144f35e82dfbd3 100644 (file)
@@ -543,8 +543,7 @@ int omap_gem_mmap_obj(struct drm_gem_object *obj,
 {
        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 
-       vma->vm_flags &= ~VM_PFNMAP;
-       vma->vm_flags |= VM_MIXEDMAP;
+       mod_vm_flags(vma, VM_MIXEDMAP, VM_PFNMAP);
 
        if (omap_obj->flags & OMAP_BO_WC) {
                vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
index da8a69953706d941aabea89208025fba4bdf8ef8..290f2eae937509c04437fd090557fbe3dfd4d34f 100644 (file)
@@ -250,8 +250,7 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
         * We allocated a struct page table for rk_obj, so clear
         * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
         */
-       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
-       vma->vm_flags &= ~VM_PFNMAP;
+       mod_vm_flags(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP, VM_PFNMAP);
 
        vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
        vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
index 81991090adcc93bc83b12f0107a9eaeb9ecc7121..5a0d2076ca7adb11377df7da7d9b4113cfd81eaa 100644 (file)
@@ -573,7 +573,7 @@ int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
                 * and set the vm_pgoff (used as a fake buffer offset by DRM)
                 * to 0 as we want to map the whole buffer.
                 */
-               vma->vm_flags &= ~VM_PFNMAP;
+               clear_vm_flags(vma, VM_PFNMAP);
                vma->vm_pgoff = 0;
 
                err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
@@ -587,8 +587,7 @@ int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
        } else {
                pgprot_t prot = vm_get_page_prot(vma->vm_flags);
 
-               vma->vm_flags |= VM_MIXEDMAP;
-               vma->vm_flags &= ~VM_PFNMAP;
+               mod_vm_flags(vma, VM_MIXEDMAP, VM_PFNMAP);
 
                vma->vm_page_prot = pgprot_writecombine(prot);
        }
index 38119311284d965982a977658c1e4bd6d48de878..6b074023ced11c4f298f99d7c8f86793cc38e54e 100644 (file)
@@ -468,8 +468,7 @@ int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
 
        vma->vm_private_data = bo;
 
-       vma->vm_flags |= VM_PFNMAP;
-       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
        return 0;
 }
 EXPORT_SYMBOL(ttm_bo_mmap_obj);
index 6b45b0429fef9e828b8a4955ac3a1809f044048c..5498a1dbef63936c6fae9355c5bfd4ab9506b951 100644 (file)
@@ -46,7 +46,7 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
                return -EINVAL;
 
        vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
-       vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+       set_vm_flags(vma, VM_MIXEDMAP | VM_DONTEXPAND);
        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
        vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
        vma->vm_ops = &virtio_gpu_vram_vm_ops;
index 265f7c48d856ec54ed9cfefdb95023cefc6347b8..8c8015528b6f323b02a11cd43cd825c69285b277 100644 (file)
@@ -97,7 +97,7 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
 
        /* Use VM_PFNMAP rather than VM_MIXEDMAP if not a COW mapping */
        if (!is_cow_mapping(vma->vm_flags))
-               vma->vm_flags = (vma->vm_flags & ~VM_MIXEDMAP) | VM_PFNMAP;
+               mod_vm_flags(vma, VM_PFNMAP, VM_MIXEDMAP);
 
        ttm_bo_put(bo); /* release extra ref taken by ttm_bo_mmap_obj() */
 
index e31554d7139f194b08c5d72a96e32f289aa145e8..6ba057677ececdb37e21305dd58907a9de423ed3 100644 (file)
@@ -70,8 +70,7 @@ static int xen_drm_front_gem_object_mmap(struct drm_gem_object *gem_obj,
         * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
         * the whole buffer.
         */
-       vma->vm_flags &= ~VM_PFNMAP;
-       vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+       mod_vm_flags(vma, VM_MIXEDMAP | VM_DONTEXPAND, VM_PFNMAP);
        vma->vm_pgoff = 0;
 
        /*
index 8069f795c86493ad0f5e1482ad0514b758309218..952a31e742a187a0d292fba109ae39c6898166d1 100644 (file)
@@ -1264,7 +1264,7 @@ static int cs_char_mmap(struct file *file, struct vm_area_struct *vma)
        if (vma_pages(vma) != 1)
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_DONTDUMP | VM_DONTEXPAND;
+       set_vm_flags(vma, VM_IO | VM_DONTDUMP | VM_DONTEXPAND);
        vma->vm_ops = &cs_char_vm_ops;
        vma->vm_private_data = file->private_data;
 
index 6c8215a47a601109da29f66269a96f3c2ed3a436..a6f178bf3ded90253f916a85cfd19266827e127a 100644 (file)
@@ -1659,7 +1659,7 @@ out:
                atomic_dec(&msc->user_count);
 
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTCOPY;
+       set_vm_flags(vma, VM_DONTEXPAND | VM_DONTCOPY);
        vma->vm_ops = &msc_mmap_ops;
        return ret;
 }
index 2712e699ba08cf2d30415eef43287b36cdc5ec75..9a59e61c4194110b86ba3652be0aff1893da05cf 100644 (file)
@@ -715,7 +715,7 @@ static int stm_char_mmap(struct file *file, struct vm_area_struct *vma)
        pm_runtime_get_sync(&stm->dev);
 
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_ops = &stm_mmap_vmops;
        vm_iomap_memory(vma, phys, size);
 
index f5f9269fdc16276242e4ddd38dfa4541d8ecc866..7294f2d33bc6d275a0028de3534c9914541bf516 100644 (file)
@@ -403,7 +403,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
                        ret = -EPERM;
                        goto done;
                }
-               vma->vm_flags &= ~VM_MAYWRITE;
+               clear_vm_flags(vma, VM_MAYWRITE);
                addr = vma->vm_start;
                for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
                        memlen = uctxt->egrbufs.buffers[i].len;
@@ -528,7 +528,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
                goto done;
        }
 
-       vma->vm_flags = flags;
+       reset_vm_flags(vma, flags);
        hfi1_cdbg(PROC,
                  "%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
                    ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
index c669ef6e47e730dffa584e162c6fbf18ce6204d2..538318c809b38d8c1cd545462bd8a999d502342c 100644 (file)
@@ -2087,7 +2087,7 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
 
        if (vma->vm_flags & (VM_WRITE | VM_EXEC))
                return -EPERM;
-       vma->vm_flags &= ~VM_MAYWRITE;
+       clear_vm_flags(vma, VM_MAYWRITE);
 
        if (!dev->mdev->clock_info)
                return -EOPNOTSUPP;
@@ -2311,7 +2311,7 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
 
                if (vma->vm_flags & VM_WRITE)
                        return -EPERM;
-               vma->vm_flags &= ~VM_MAYWRITE;
+               clear_vm_flags(vma, VM_MAYWRITE);
 
                /* Don't expose to user-space information it shouldn't have */
                if (PAGE_SIZE > 4096)
index 3937144b2ae5a8242d5a906d1d684877fb9bf11d..16ef80df4b7f54eaccf20746391226767944af65 100644 (file)
@@ -733,7 +733,7 @@ static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
                }
 
                /* don't allow them to later change with mprotect */
-               vma->vm_flags &= ~VM_MAYWRITE;
+               clear_vm_flags(vma, VM_MAYWRITE);
        }
 
        pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
@@ -769,7 +769,7 @@ static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
                phys = dd->physaddr + ureg;
                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
-               vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+               set_vm_flags(vma, VM_DONTCOPY | VM_DONTEXPAND);
                ret = io_remap_pfn_range(vma, vma->vm_start,
                                         phys >> PAGE_SHIFT,
                                         vma->vm_end - vma->vm_start,
@@ -810,8 +810,7 @@ static int mmap_piobufs(struct vm_area_struct *vma,
         * don't allow them to later change to readable with mprotect (for when
         * not initially mapped readable, as is normally the case)
         */
-       vma->vm_flags &= ~VM_MAYREAD;
-       vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+       mod_vm_flags(vma, VM_DONTCOPY | VM_DONTEXPAND, VM_MAYREAD);
 
        /* We used PAT if wc_cookie == 0 */
        if (!dd->wc_cookie)
@@ -852,7 +851,7 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma,
                goto bail;
        }
        /* don't allow them to later change to writable with mprotect */
-       vma->vm_flags &= ~VM_MAYWRITE;
+       clear_vm_flags(vma, VM_MAYWRITE);
 
        start = vma->vm_start;
 
@@ -944,7 +943,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
                 * Don't allow permission to later change to writable
                 * with mprotect.
                 */
-               vma->vm_flags &= ~VM_MAYWRITE;
+               clear_vm_flags(vma, VM_MAYWRITE);
        } else
                goto bail;
        len = vma->vm_end - vma->vm_start;
@@ -955,7 +954,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
 
        vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
        vma->vm_ops = &qib_file_vm_ops;
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
        ret = 1;
 
 bail:
index 6e8c4fbb80834f11bb6bd2c7dbc9558040457082..6f9237c2a26bd72a94e173341e7bd08eb3d0b724 100644 (file)
@@ -672,7 +672,7 @@ int usnic_ib_mmap(struct ib_ucontext *context,
        usnic_dbg("\n");
 
        us_ibdev = to_usdev(context->device);
-       vma->vm_flags |= VM_IO;
+       set_vm_flags(vma, VM_IO);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        vfid = vma->vm_pgoff;
        usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
index 19176583dbde680e20d4da7ac11c2eb895f8e3a1..7f1b7b5dd3f47870f38276872b020a986cf772ae 100644 (file)
@@ -408,7 +408,7 @@ int pvrdma_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
        }
 
        /* Map UAR to kernel space, VM_LOCKED? */
-       vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+       set_vm_flags(vma, VM_DONTCOPY | VM_DONTEXPAND);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        if (io_remap_pfn_range(vma, start, context->uar.pfn, size,
                               vma->vm_page_prot))
index 678b359717c4857d17d2bd8870fe9cda483f1a88..4d4d6b203d33208045d8ea5ede071a288fd1c851 100644 (file)
@@ -292,7 +292,7 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
                return ret;
        }
 
-       vma->vm_flags           |= VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_private_data    = &buf->handler;
        vma->vm_ops             = &vb2_common_vm_ops;
 
index 948152f1596b496d0286c9ccf02e7a8d02309be9..0ff3af255da84d5e7a435f02dbe66bd11885e2cd 100644 (file)
@@ -182,7 +182,7 @@ static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
        /*
         * Make sure that vm_areas for 2 buffers won't be merged together
         */
-       vma->vm_flags           |= VM_DONTEXPAND;
+       set_vm_flags(vma, VM_DONTEXPAND);
 
        /*
         * Use common vm_area operations to track buffer refcount.
index 52312ce2ba0562b24fe4a90307a3d9110c34f6e8..eba8a33f348dbd0ea0ec6699141ac04681950d1c 100644 (file)
@@ -320,7 +320,7 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
        }
 
        vma->vm_ops = &videobuf_vm_ops;
-       vma->vm_flags |= VM_DONTEXPAND;
+       set_vm_flags(vma, VM_DONTEXPAND);
        vma->vm_private_data = map;
 
        dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
index f75e5eedeee05ee31aa8f12ef34343ce9756c03e..0c5816282370a119f43a03e99dbb2aeacba2376a 100644 (file)
@@ -634,8 +634,8 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
        map->count    = 1;
        map->q        = q;
        vma->vm_ops   = &videobuf_vm_ops;
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
-       vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
+       /* using shared anonymous pages */
+       mod_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_IO);
        vma->vm_private_data = map;
        dprintk(1, "mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
                map, q, vma->vm_start, vma->vm_end, vma->vm_pgoff, first, last);
index 9b2443720ab017c34873c53b29c7dceeb8406196..48d439ccd4149169039bed502bbc08e554acc5e4 100644 (file)
@@ -247,7 +247,7 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
        }
 
        vma->vm_ops          = &videobuf_vm_ops;
-       vma->vm_flags       |= VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_private_data = map;
 
        dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
index acaa44809c58a11edc1af14a55ab5e3c0cb35430..17562e4efcb2e5711e0c6950571d4c67d9f3fec1 100644 (file)
@@ -220,7 +220,7 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
        pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
                 ctx->psn_phys, ctx->pe , ctx->master);
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       set_vm_flags(vma, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        vma->vm_ops = &cxl_mmap_vmops;
        return 0;
index ef28f3b37b93216bf9a7b50f296f21fcdff62261..ee10e9eb251caa2b72fc82bb86e73dfe448a223a 100644 (file)
@@ -2082,7 +2082,7 @@ static int hl_ts_mmap(struct hl_mmap_mem_buf *buf, struct vm_area_struct *vma, v
 {
        struct hl_ts_buff *ts_buff = buf->private;
 
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | VM_NORESERVE;
+       set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | VM_NORESERVE);
        return remap_vmalloc_range(vma, ts_buff->user_buff_address, 0);
 }
 
index 92560414e84333ded0f7564eb8e1c2f137ed5f42..1e3992be0eb2db6fc49f0e60755364b89b3cc484 100644 (file)
@@ -4236,8 +4236,8 @@ static int gaudi_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
 {
        int rc;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
-                       VM_DONTCOPY | VM_NORESERVE;
+       set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+                       VM_DONTCOPY | VM_NORESERVE);
 
        rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
                                (dma_addr - HOST_PHYS_BASE), size);
index 65e6cae6100a469062175d7785f6df6713b69af9..d19c408b5f1a2e638dad8aa6c98245ce3ef6d42b 100644 (file)
@@ -5599,8 +5599,8 @@ static int gaudi2_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
 {
        int rc;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
-                       VM_DONTCOPY | VM_NORESERVE;
+       set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+                       VM_DONTCOPY | VM_NORESERVE);
 
 #ifdef _HAS_DMA_MMAP_COHERENT
 
@@ -9894,8 +9894,8 @@ static int gaudi2_block_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
 
        address = pci_resource_start(hdev->pdev, SRAM_CFG_BAR_ID) + offset_in_bar;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
-                       VM_DONTCOPY | VM_NORESERVE;
+       set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+                       VM_DONTCOPY | VM_NORESERVE);
 
        rc = remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT,
                        block_size, vma->vm_page_prot);
index 5ef9e3ca97a69acb1dc704d7d7e4173d531cd5c4..9bf66dcd3b642d715a05b7be2badcc48a965d60f 100644 (file)
@@ -2880,8 +2880,8 @@ static int goya_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
 {
        int rc;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
-                       VM_DONTCOPY | VM_NORESERVE;
+       set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+                       VM_DONTCOPY | VM_NORESERVE);
 
        rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
                                (dma_addr - HOST_PHYS_BASE), size);
index 9eb0d93b01c67a7f0b1d69c05edd66011f347681..e6f941248e93170823da57ace801b0eb51c91372 100644 (file)
@@ -180,7 +180,7 @@ static int check_mmap_afu_irq(struct ocxl_context *ctx,
        if ((vma->vm_flags & VM_READ) || (vma->vm_flags & VM_EXEC) ||
                !(vma->vm_flags & VM_WRITE))
                return -EINVAL;
-       vma->vm_flags &= ~(VM_MAYREAD | VM_MAYEXEC);
+       clear_vm_flags(vma, VM_MAYREAD | VM_MAYEXEC);
        return 0;
 }
 
@@ -204,7 +204,7 @@ int ocxl_context_mmap(struct ocxl_context *ctx, struct vm_area_struct *vma)
        if (rc)
                return rc;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       set_vm_flags(vma, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        vma->vm_ops = &ocxl_vmops;
        return 0;
index 25c78df8055d3f228d5bf638094f3dac174b8246..9398246cac7974e83fa028f317bdd368b2efd31a 100644 (file)
@@ -134,7 +134,7 @@ static int global_mmio_mmap(struct file *filp, struct kobject *kobj,
                (afu->config.global_mmio_size >> PAGE_SHIFT))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       set_vm_flags(vma, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        vma->vm_ops = &global_mmio_vmops;
        vma->vm_private_data = afu;
index c61be3404c6f283e8e3408edc3d35dd064e5d859..9f9438b5b075a557c38637e2aa28ad68e49f8f8c 100644 (file)
@@ -96,13 +96,13 @@ static int open_dice_mmap(struct file *filp, struct vm_area_struct *vma)
 
        /* Ensure userspace cannot acquire VM_WRITE + VM_SHARED later. */
        if (vma->vm_flags & VM_WRITE)
-               vma->vm_flags &= ~VM_MAYSHARE;
+               clear_vm_flags(vma, VM_MAYSHARE);
        else if (vma->vm_flags & VM_SHARED)
-               vma->vm_flags &= ~VM_MAYWRITE;
+               clear_vm_flags(vma, VM_MAYWRITE);
 
        /* Create write-combine mapping so all clients observe a wipe. */
        vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-       vma->vm_flags |= VM_DONTCOPY | VM_DONTDUMP;
+       set_vm_flags(vma, VM_DONTCOPY | VM_DONTDUMP);
        return vm_iomap_memory(vma, drvdata->rmem->base, drvdata->rmem->size);
 }
 
index 7ffcfc0bb58723fe373b00e4dabade697103f296..8b777286d3b2705ca61c405d7c3f13da5f328295 100644 (file)
@@ -101,8 +101,8 @@ static int gru_file_mmap(struct file *file, struct vm_area_struct *vma)
                                vma->vm_end & (GRU_GSEG_PAGESIZE - 1))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_LOCKED |
-                        VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_LOCKED |
+                        VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_page_prot = PAGE_SHARED;
        vma->vm_ops = &gru_vm_ops;
 
index b70a013139c74becfc22e01daf7b5a8c75fc4a44..d8f01f2ee770935679f0f0e4c70ee9ce55f1b9c6 100644 (file)
@@ -229,7 +229,7 @@ static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
        if (!qfr)
                return -ENOMEM;
 
-       vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK;
+       set_vm_flags(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK);
        vma->vm_ops = &uacce_vm_ops;
        vma->vm_private_data = q;
        qfr->type = type;
index 21b7cb6e7e705d06f302ff069cc23f255924e77d..a096734daad02790620bf139bdd2a2fa2b207362 100644 (file)
@@ -389,7 +389,7 @@ static int dax_devmap(struct file *f, struct vm_area_struct *vma)
        /* completion area is mapped read-only for user */
        if (vma->vm_flags & VM_WRITE)
                return -EPERM;
-       vma->vm_flags &= ~VM_MAYWRITE;
+       clear_vm_flags(vma, VM_MAYWRITE);
 
        if (remap_pfn_range(vma, vma->vm_start, ctx->ca_buf_ra >> PAGE_SHIFT,
                            len, vma->vm_page_prot))
index 631eda2d467e246466db77466953542554a33845..d386c25c2699ce785e813cbe7694abd7bb615834 100644 (file)
@@ -1167,7 +1167,7 @@ static int afu_mmap(struct file *file, struct vm_area_struct *vma)
            (ctx->psn_size >> PAGE_SHIFT))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       set_vm_flags(vma, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        vma->vm_ops = &ocxlflash_vmops;
        return 0;
index ce34a8ad53b4e9d99501e043be2d81c2476748f2..646c1651c6a6f26e4fa56b5532695f98102852b8 100644 (file)
@@ -1288,7 +1288,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
        }
 
        sfp->mmap_called = 1;
-       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_private_data = sfp;
        vma->vm_ops = &sg_mmap_vm_ops;
 out:
index 84a84e0cdeef7bad977f20ce7652f13ad73c2035..75297ac3857f59bb983ef0e7c30517dae8d2cda5 100644 (file)
@@ -951,7 +951,7 @@ int atomisp_videobuf_mmap_mapper(struct videobuf_queue *q,
                    buf->boff == offset) {
                        vm_mem = buf->priv;
                        ret = frame_mmap(isp, vm_mem->vaddr, vma);
-                       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+                       set_vm_flags(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
                        break;
                }
        }
@@ -1027,7 +1027,7 @@ static int atomisp_mmap(struct file *file, struct vm_area_struct *vma)
                 * Without VM_SHARED, remap_pfn_range() treats
                 * this kind of mapping as invalid.
                 */
-               vma->vm_flags |= VM_SHARED;
+               set_vm_flags(vma, VM_SHARED);
                ret = hmm_mmap(vma, vma->vm_pgoff << PAGE_SHIFT);
                mutex_unlock(&isp->mutex);
                return ret;
@@ -1071,7 +1071,7 @@ static int atomisp_mmap(struct file *file, struct vm_area_struct *vma)
                        goto error;
                }
                raw_virt_addr->data_bytes = origin_size;
-               vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+               set_vm_flags(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
                mutex_unlock(&isp->mutex);
                return 0;
        }
index a5fd6d38d3c4186e5de4dbe79c94857ed5b276be..6703b26aa2d6ee141571037cda3519d841b21407 100644 (file)
@@ -1102,7 +1102,7 @@ int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
        vma->vm_private_data = bo;
 
        vma->vm_ops = &hmm_bo_vm_ops;
-       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
 
        /*
         * call hmm_bo_vm_open explicitly.
index 5d87efd9b95c6862a190649c12d7232d61b61888..2505e64d711936021367d600946835f774113903 100644 (file)
@@ -1476,8 +1476,8 @@ static int meye_mmap(struct file *file, struct vm_area_struct *vma)
        }
 
        vma->vm_ops = &meye_vm_ops;
-       vma->vm_flags &= ~VM_IO;        /* not I/O memory */
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       /* not I/O memory */
+       mod_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_IO);
        vma->vm_private_data = (void *) (offset / gbufsize);
        meye_vm_open(vma);
 
index 787edb3d47c23d0ed62c05082a45252fb554e1b7..196d1034f104e25988f49609643e5b01af567ae0 100644 (file)
@@ -779,7 +779,7 @@ static int v4l_stk_mmap(struct file *fp, struct vm_area_struct *vma)
        ret = remap_vmalloc_range(vma, sbuf->buffer, 0);
        if (ret)
                return ret;
-       vma->vm_flags |= VM_DONTEXPAND;
+       set_vm_flags(vma, VM_DONTEXPAND);
        vma->vm_private_data = sbuf;
        vma->vm_ops = &stk_v4l_vm_ops;
        sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_MAPPED;
index 2940559c30860bf36ab4314aa234425610b82917..9fd64259904cf7c9d55db26c033dcf1080ae7c43 100644 (file)
@@ -1928,7 +1928,7 @@ static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
 {
        struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
 
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_ops = &tcmu_vm_ops;
 
        vma->vm_private_data = udev;
index 43afbb7c5ab916d317f206fc4e2f302cb0a95e7f..08802744f3b77f932bf092c1465c62d7a1cc7084 100644 (file)
@@ -713,7 +713,7 @@ static const struct vm_operations_struct uio_logical_vm_ops = {
 
 static int uio_mmap_logical(struct vm_area_struct *vma)
 {
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_ops = &uio_logical_vm_ops;
        return 0;
 }
index 837f3e57f5809bf574d6443f418871bc2cc8fd39..d9aefa259883c8e725cad83c72bcee1570d1fd97 100644 (file)
@@ -279,8 +279,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
                }
        }
 
-       vma->vm_flags |= VM_IO;
-       vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP);
+       set_vm_flags(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_ops = &usbdev_vm_ops;
        vma->vm_private_data = usbm;
 
index 094e812e9e69223d3c7a866bb1f416ef73a9aeac..9b2d48a65fdf93f36ae1ade7717006002dfe8e2c 100644 (file)
@@ -1272,8 +1272,7 @@ static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma)
        if (vma->vm_flags & VM_WRITE)
                return -EPERM;
 
-       vma->vm_flags &= ~VM_MAYWRITE;
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       mod_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_MAYWRITE);
        vma->vm_private_data = filp->private_data;
        mon_bin_vma_open(vma);
        return 0;
index e682bc7ee6c9994d7cb44892efc366941a269024..39dcce2e455b69afffbea99eb110e7d65316eb9c 100644 (file)
@@ -512,7 +512,7 @@ static int vduse_domain_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct vduse_iova_domain *domain = file->private_data;
 
-       vma->vm_flags |= VM_DONTDUMP | VM_DONTEXPAND;
+       set_vm_flags(vma, VM_DONTDUMP | VM_DONTEXPAND);
        vma->vm_private_data = domain;
        vma->vm_ops = &vduse_domain_mmap_ops;
 
index e030c2120183ef5c6bd4019ad50e5e6ac4f40a6c..ae2bd0ddf2537e2b8962d48a6006a40e1453ddf8 100644 (file)
@@ -1792,7 +1792,7 @@ int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma
         * See remap_pfn_range(), called from vfio_pci_fault() but we can't
         * change vm_flags within the fault handler.  Set them now.
         */
-       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_ops = &vfio_pci_mmap_ops;
 
        return 0;
index 166044642fd5cc268c867bb78ba91c351b9e3b3c..9bc2997a6617ece157ded2b84616d89c77a387ef 100644 (file)
@@ -1305,7 +1305,7 @@ static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
        if (vma->vm_end - vma->vm_start != notify.size)
                return -ENOTSUPP;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_ops = &vhost_vdpa_vm_ops;
        return 0;
 }
index 7db03ed77c762fc490d1f8a39de9599ed386d915..a794a740af107521ce5d43ea093b9dfd9a72dd0c 100644 (file)
@@ -391,7 +391,7 @@ static int mc68x328fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
 #ifndef MMU
        /* this is uClinux (no MMU) specific code */
 
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_start = videomemory;
 
        return 0;
index c730253ab85cee86a796aa784179bfb13c3c4b7a..af0bfaa2d014475ada4e4cf62ce99222cf8daa7c 100644 (file)
@@ -232,9 +232,9 @@ static const struct address_space_operations fb_deferred_io_aops = {
 int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
 {
        vma->vm_ops = &fb_deferred_io_vm_ops;
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
        if (!(info->flags & FBINFO_VIRTFB))
-               vma->vm_flags |= VM_IO;
+               set_vm_flags(vma, VM_IO);
        vma->vm_private_data = info;
        return 0;
 }
index a15729beb9d1bc7e6d038f68aef044ad6307b3a5..ee4a8958dc680616986c2c7a6b0622b7b6a4f469 100644 (file)
@@ -525,7 +525,7 @@ static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
 
        vma->vm_private_data = vm_priv;
 
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
 
        vma->vm_ops = &gntalloc_vmops;
 
index 4d9a3050de6a3f6e3e2d41abfcefd41564dd3125..6d5bb1ebb661e91111830b7310b619f58a46031d 100644 (file)
@@ -1055,10 +1055,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
 
        vma->vm_ops = &gntdev_vmops;
 
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
+       set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP);
 
        if (use_ptemod)
-               vma->vm_flags |= VM_DONTCOPY;
+               set_vm_flags(vma, VM_DONTCOPY);
 
        vma->vm_private_data = map;
        if (map->flags) {
index dd5bbb6e1b6b9d1033ae2bdbe60af5ca1de3e0de..037547918630286f54eae8cb3a2eec5c6efa4f96 100644 (file)
@@ -156,7 +156,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
        vma_priv->file_priv = file_priv;
        vma_priv->users = 1;
 
-       vma->vm_flags |= VM_IO | VM_DONTEXPAND;
+       set_vm_flags(vma, VM_IO | VM_DONTEXPAND);
        vma->vm_ops = &privcmd_buf_vm_ops;
        vma->vm_private_data = vma_priv;
 
index fae50a24630bd0e517c5512f327fd9d55d244221..5192293cadb3352f115643f827977d6d9820812e 100644 (file)
@@ -934,8 +934,8 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
 {
        /* DONTCOPY is essential for Xen because copy_page_range doesn't know
         * how to recreate these mappings */
-       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
-                        VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTCOPY |
+                        VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_ops = &privcmd_vm_ops;
        vma->vm_private_data = NULL;
 
index 5b2ff20ad32298e827b8cb68da24160e8b91ae41..af162ff232046ceb320d90bc2078cebc3243c24f 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -390,7 +390,7 @@ static const struct vm_operations_struct aio_ring_vm_ops = {
 
 static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
 {
-       vma->vm_flags |= VM_DONTEXPAND;
+       set_vm_flags(vma, VM_DONTEXPAND);
        vma->vm_ops = &aio_ring_vm_ops;
        return 0;
 }
index 61ccf7722fc3c446a4b90c63a88dddf721a8db04..874a17a1b8d9b9cfcd8cdc5f183c81c5db4377e7 100644 (file)
@@ -408,7 +408,7 @@ static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
                 * unpopulated ptes via cramfs_read_folio().
                 */
                int i;
-               vma->vm_flags |= VM_MIXEDMAP;
+               set_vm_flags(vma, VM_MIXEDMAP);
                for (i = 0; i < pages && !ret; i++) {
                        vm_fault_t vmf;
                        unsigned long off = i * PAGE_SIZE;
index fe8ac0e163f7ebf85e81c3b275e45b333b0efb17..8c90b08f7d591d8c0be8b890be958d90dea18b32 100644 (file)
@@ -431,7 +431,7 @@ static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
                return -EINVAL;
 
        vma->vm_ops = &erofs_dax_vm_ops;
-       vma->vm_flags |= VM_HUGEPAGE;
+       set_vm_flags(vma, VM_HUGEPAGE);
        return 0;
 }
 #else
index a0b1f0337a6280fe7bf0ff653901cf5252dfbd25..3d041d1bd23e01bbb38ce1f31d8b2186ba1d710e 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -269,7 +269,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
        BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
        vma->vm_end = STACK_TOP_MAX;
        vma->vm_start = vma->vm_end - PAGE_SIZE;
-       vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
+       init_vm_flags(vma, VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP);
        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 
        err = insert_vm_struct(mm, vma);
@@ -833,7 +833,7 @@ int setup_arg_pages(struct linux_binprm *bprm,
        }
 
        /* mprotect_fixup is overkill to remove the temporary stack flags */
-       vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
+       clear_vm_flags(vma, VM_STACK_INCOMPLETE_SETUP);
 
        stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
        stack_size = vma->vm_end - vma->vm_start;
index a7a597c727e638dff296d7d7b5b663c63a9d3051..8bf8a3b394f992df8071e0111c19f729b2ca4498 100644 (file)
@@ -801,7 +801,7 @@ static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
        file_accessed(file);
        if (IS_DAX(file_inode(file))) {
                vma->vm_ops = &ext4_dax_vm_ops;
-               vma->vm_flags |= VM_HUGEPAGE;
+               set_vm_flags(vma, VM_HUGEPAGE);
        } else {
                vma->vm_ops = &ext4_file_vm_ops;
        }
index e23e802a80130af9d007e36e47b446105a65c410..599969edc869d5b28763ab82a50469935b8af786 100644 (file)
@@ -860,7 +860,7 @@ int fuse_dax_mmap(struct file *file, struct vm_area_struct *vma)
 {
        file_accessed(file);
        vma->vm_ops = &fuse_dax_vm_ops;
-       vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
+       set_vm_flags(vma, VM_MIXEDMAP | VM_HUGEPAGE);
        return 0;
 }
 
index df7772335dc0e41cfbebfca782d5f64cbc26cccc..db14497e2ca40c399233bfa9ce80bcc4cecc7de3 100644 (file)
@@ -132,7 +132,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
         * way when do_mmap unwinds (may be important on powerpc
         * and ia64).
         */
-       vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
+       set_vm_flags(vma, VM_HUGETLB | VM_DONTEXPAND);
        vma->vm_ops = &hugetlb_vm_ops;
 
        ret = seal_check_future_write(info->seals, vma);
@@ -813,7 +813,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
         * as input to create an allocation policy.
         */
        vma_init(&pseudo_vma, mm);
-       pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
+       init_vm_flags(&pseudo_vma, VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
        pseudo_vma.vm_file = file;
 
        for (index = start; index < end; index++) {
index 732661aa26804677f82f1c54a18c91057cc315f5..5be499030c7c495ce4d04ec9cb03041f9d4dbc93 100644 (file)
@@ -390,8 +390,7 @@ static int orangefs_file_mmap(struct file *file, struct vm_area_struct *vma)
                     "orangefs_file_mmap: called on %pD\n", file);
 
        /* set the sequential readahead hint */
-       vma->vm_flags |= VM_SEQ_READ;
-       vma->vm_flags &= ~VM_RAND_READ;
+       mod_vm_flags(vma, VM_SEQ_READ, VM_RAND_READ);
 
        file_accessed(file);
        vma->vm_ops = &orangefs_file_vm_ops;
index 8a74cdcc9af00f7217f8a5ab3700537b5af6913d..ed04de0f5c4d51ee98a26c53fbeb141eade11fb1 100644 (file)
@@ -1294,7 +1294,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                        mas_for_each(&mas, vma, ULONG_MAX) {
                                if (!(vma->vm_flags & VM_SOFTDIRTY))
                                        continue;
-                               vma->vm_flags &= ~VM_SOFTDIRTY;
+                               clear_vm_flags(vma, VM_SOFTDIRTY);
                                vma_set_page_prot(vma);
                        }
 
index f2aa86c421f2d4daef6cae428912701306d07958..d733ec42bd6fc48aa7c75b88c02cd5dd7c132a6e 100644 (file)
@@ -582,8 +582,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
        if (vma->vm_flags & (VM_WRITE | VM_EXEC))
                return -EPERM;
 
-       vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
-       vma->vm_flags |= VM_MIXEDMAP;
+       mod_vm_flags(vma, VM_MIXEDMAP, VM_MAYWRITE | VM_MAYEXEC);
        vma->vm_ops = &vmcore_mmap_ops;
 
        len = 0;
index 98ac37e34e3d4bd4ac229e01a42b2e2a8a27685d..f46252544924acd69c6c452cef52f6793464ed69 100644 (file)
@@ -618,7 +618,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
                for_each_vma(vmi, vma) {
                        if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
                                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
-                               vma->vm_flags &= ~__VM_UFFD_FLAGS;
+                               clear_vm_flags(vma, __VM_UFFD_FLAGS);
                        }
                }
                mmap_write_unlock(mm);
@@ -652,7 +652,7 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
        octx = vma->vm_userfaultfd_ctx.ctx;
        if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
-               vma->vm_flags &= ~__VM_UFFD_FLAGS;
+               clear_vm_flags(vma, __VM_UFFD_FLAGS);
                return 0;
        }
 
@@ -733,7 +733,7 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma,
        } else {
                /* Drop uffd context if remap feature not enabled */
                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
-               vma->vm_flags &= ~__VM_UFFD_FLAGS;
+               clear_vm_flags(vma, __VM_UFFD_FLAGS);
        }
 }
 
@@ -895,7 +895,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
                        prev = vma;
                }
 
-               vma->vm_flags = new_flags;
+               reset_vm_flags(vma, new_flags);
                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
        }
        mmap_write_unlock(mm);
@@ -1463,7 +1463,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
                 * the next vma was merged into the current one and
                 * the current one has not been updated yet.
                 */
-               vma->vm_flags = new_flags;
+               reset_vm_flags(vma, new_flags);
                vma->vm_userfaultfd_ctx.ctx = ctx;
 
                if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
@@ -1651,7 +1651,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
                 * the next vma was merged into the current one and
                 * the current one has not been updated yet.
                 */
-               vma->vm_flags = new_flags;
+               reset_vm_flags(vma, new_flags);
                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
 
        skip:
index e462d39c840e62911242e1e157756b81d8737c9d..f509a97da7f559cf2af9cdc2fa02b124207b4640 100644 (file)
@@ -1429,7 +1429,7 @@ xfs_file_mmap(
        file_accessed(file);
        vma->vm_ops = &xfs_file_vm_ops;
        if (IS_DAX(inode))
-               vma->vm_flags |= VM_HUGEPAGE;
+               set_vm_flags(vma, VM_HUGEPAGE);
        return 0;
 }
 
index 440013141534c0f8b79e49a4f630ea9bb9b79e01..ecb1cd991443db21e1afbedca60474adcc49591f 100644 (file)
@@ -3566,7 +3566,7 @@ static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
                 * VM_MAYWRITE as we still want them to be COW-writable.
                 */
                if (vma->vm_flags & VM_SHARED)
-                       vma->vm_flags &= ~(VM_MAYWRITE);
+                       clear_vm_flags(vma, VM_MAYWRITE);
        }
 
        return 0;
index 9e832acf4692574d5e7d774ce4b7b3b64610cbf2..d25894cb569b19fba0a0ead70f4c6d498d96a792 100644 (file)
@@ -269,7 +269,7 @@ static int ringbuf_map_mmap_kern(struct bpf_map *map, struct vm_area_struct *vma
                if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
                        return -EPERM;
        } else {
-               vma->vm_flags &= ~VM_MAYWRITE;
+               clear_vm_flags(vma, VM_MAYWRITE);
        }
        /* remap_vmalloc_range() checks size and offset constraints */
        return remap_vmalloc_range(vma, rb_map->rb,
@@ -290,7 +290,7 @@ static int ringbuf_map_mmap_user(struct bpf_map *map, struct vm_area_struct *vma
                         */
                        return -EPERM;
        } else {
-               vma->vm_flags &= ~VM_MAYWRITE;
+               clear_vm_flags(vma, VM_MAYWRITE);
        }
        /* remap_vmalloc_range() checks size and offset constraints */
        return remap_vmalloc_range(vma, rb_map->rb, vma->vm_pgoff + RINGBUF_PGOFF);
index 7b373a5e861f445ee5d73dad224fc5b38066ecfc..446e691d4da11d8a761540e9dd30301114415e99 100644 (file)
@@ -806,10 +806,10 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
        /* set default open/close callbacks */
        vma->vm_ops = &bpf_map_default_vmops;
        vma->vm_private_data = map;
-       vma->vm_flags &= ~VM_MAYEXEC;
+       clear_vm_flags(vma, VM_MAYEXEC);
        if (!(vma->vm_flags & VM_WRITE))
                /* disallow re-mapping with PROT_WRITE */
-               vma->vm_flags &= ~VM_MAYWRITE;
+               clear_vm_flags(vma, VM_MAYWRITE);
 
        err = map->ops->map_mmap(map, vma);
        if (err)
index 884871427a94f06f01e2660d281a8d02dc5fcd79..50d8b5f6419d5ac85176abfe930b1f4b0809c963 100644 (file)
@@ -6395,7 +6395,7 @@ aux_unlock:
         * Since pinned accounting is per vm we cannot allow fork() to copy our
         * vma.
         */
-       vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_ops = &perf_mmap_vmops;
 
        if (event->pmu->event_mapped)
index e5cd09fd8a05087cac8226ab5fa1fc693927b88e..27fc1e26e1e19252b7b0cc8e858a750fad7c81c8 100644 (file)
@@ -489,7 +489,7 @@ static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
                goto exit;
        }
        spin_unlock_irqrestore(&kcov->lock, flags);
-       vma->vm_flags |= VM_DONTEXPAND;
+       set_vm_flags(vma, VM_DONTEXPAND);
        for (off = 0; off < size; off += PAGE_SIZE) {
                page = vmalloc_to_page(kcov->area + off);
                res = vm_insert_page(vma, vma->vm_start + off, page);
index d7edc934c56d5e498461a078941e80dac4cf8345..be2d7c94afa730a68496b8ecbded9a899e93c658 100644 (file)
@@ -91,7 +91,7 @@ static int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma)
                return -EINVAL;
 
        vma->vm_ops = &relay_file_mmap_ops;
-       vma->vm_flags |= VM_DONTEXPAND;
+       set_vm_flags(vma, VM_DONTEXPAND);
        vma->vm_private_data = buf;
 
        return 0;
index c7105ec6d08c00b9501763165819ad678a4d579d..ff52af67d1edb1cb5ee8b2f079ea6cb4f316ff78 100644 (file)
@@ -182,7 +182,7 @@ success:
        /*
         * vm_flags is protected by the mmap_lock held in write mode.
         */
-       vma->vm_flags = new_flags;
+       reset_vm_flags(vma, new_flags);
        if (!vma->vm_file) {
                error = replace_anon_vma_name(vma, anon_name);
                if (error)
index f88c351aecd4177b84b4bc34d5c2ed438d76a6b0..a97f5b4b2acde92b5498f96a5b2e35ec462949bc 100644 (file)
@@ -1991,7 +1991,7 @@ int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
        if (!(vma->vm_flags & VM_MIXEDMAP)) {
                BUG_ON(mmap_read_trylock(vma->vm_mm));
                BUG_ON(vma->vm_flags & VM_PFNMAP);
-               vma->vm_flags |= VM_MIXEDMAP;
+               set_vm_flags(vma, VM_MIXEDMAP);
        }
        /* Defer page refcount checking till we're about to map that page. */
        return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
@@ -2049,7 +2049,7 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
        if (!(vma->vm_flags & VM_MIXEDMAP)) {
                BUG_ON(mmap_read_trylock(vma->vm_mm));
                BUG_ON(vma->vm_flags & VM_PFNMAP);
-               vma->vm_flags |= VM_MIXEDMAP;
+               set_vm_flags(vma, VM_MIXEDMAP);
        }
        return insert_page(vma, addr, page, vma->vm_page_prot);
 }
@@ -2515,7 +2515,7 @@ int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
                vma->vm_pgoff = pfn;
        }
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
 
        BUG_ON(addr >= end);
        pfn -= addr >> PAGE_SHIFT;
index 06aa9e204fac81085dc9ad3969d099fb2d32a5e7..4807e91aaa8b158952fd8c393509d8df0638f636 100644 (file)
@@ -380,7 +380,7 @@ static void mlock_vma_pages_range(struct vm_area_struct *vma,
         */
        if (newflags & VM_LOCKED)
                newflags |= VM_IO;
-       WRITE_ONCE(vma->vm_flags, newflags);
+       reset_vm_flags(vma, newflags);
 
        lru_add_drain();
        walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL);
@@ -388,7 +388,7 @@ static void mlock_vma_pages_range(struct vm_area_struct *vma,
 
        if (newflags & VM_IO) {
                newflags &= ~VM_IO;
-               WRITE_ONCE(vma->vm_flags, newflags);
+               reset_vm_flags(vma, newflags);
        }
 }
 
@@ -456,7 +456,7 @@ success:
 
        if ((newflags & VM_LOCKED) && (oldflags & VM_LOCKED)) {
                /* No work to do, and mlocking twice would be wrong */
-               vma->vm_flags = newflags;
+               reset_vm_flags(vma, newflags);
        } else {
                mlock_vma_pages_range(vma, start, end, newflags);
        }
index 4e1bdaa1e0b429bc5fe907dd82834e8a6d500674..f615f5762dd461a6c7d04943ad4a01fbfd42160e 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2610,7 +2610,7 @@ cannot_expand:
 
        vma->vm_start = addr;
        vma->vm_end = end;
-       vma->vm_flags = vm_flags;
+       init_vm_flags(vma, vm_flags);
        vma->vm_page_prot = vm_get_page_prot(vm_flags);
        vma->vm_pgoff = pgoff;
 
@@ -2739,7 +2739,7 @@ expanded:
         * then new mapped in-place (which must be aimed as
         * a completely new data area).
         */
-       vma->vm_flags |= VM_SOFTDIRTY;
+       set_vm_flags(vma, VM_SOFTDIRTY);
 
        vma_set_page_prot(vma);
 
@@ -2962,7 +2962,7 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma,
                        anon_vma_interval_tree_pre_update_vma(vma);
                }
                vma->vm_end = addr + len;
-               vma->vm_flags |= VM_SOFTDIRTY;
+               set_vm_flags(vma, VM_SOFTDIRTY);
                mas_store_prealloc(mas, vma);
 
                if (vma->anon_vma) {
@@ -2982,7 +2982,7 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma,
        vma->vm_start = addr;
        vma->vm_end = addr + len;
        vma->vm_pgoff = addr >> PAGE_SHIFT;
-       vma->vm_flags = flags;
+       init_vm_flags(vma, flags);
        vma->vm_page_prot = vm_get_page_prot(flags);
        mas_set_range(mas, vma->vm_start, addr + len - 1);
        if (mas_store_gfp(mas, vma, GFP_KERNEL))
@@ -2995,7 +2995,7 @@ out:
        mm->data_vm += len >> PAGE_SHIFT;
        if (flags & VM_LOCKED)
                mm->locked_vm += (len >> PAGE_SHIFT);
-       vma->vm_flags |= VM_SOFTDIRTY;
+       set_vm_flags(vma, VM_SOFTDIRTY);
        validate_mm(mm);
        return 0;
 
index 668bfaa6ed2aebdbc64182e7d2c8a007a2d4bae7..9faa022aa05bda7a70edb2d216b2572440c7d487 100644 (file)
@@ -630,7 +630,7 @@ success:
         * vm_flags and vm_page_prot are protected by the mmap_lock
         * held in write mode.
         */
-       vma->vm_flags = newflags;
+       reset_vm_flags(vma, newflags);
        /*
         * We want to check manually if we can change individual PTEs writable
         * if we can't do that automatically for all PTEs in a mapping. For
index 4e91bb5311c63f06f33ec12cd2e74e444d70e0f4..e7093def4760e2f67545b657352abc697fafd448 100644 (file)
@@ -661,7 +661,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
 
        /* Conceal VM_ACCOUNT so old reservation is not undone */
        if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {
-               vma->vm_flags &= ~VM_ACCOUNT;
+               clear_vm_flags(vma, VM_ACCOUNT);
                excess = vma->vm_end - vma->vm_start - old_len;
                if (old_addr > vma->vm_start &&
                    old_addr + old_len < vma->vm_end)
@@ -716,9 +716,9 @@ static unsigned long move_vma(struct vm_area_struct *vma,
 
        /* Restore VM_ACCOUNT if one or two pieces of vma left */
        if (excess) {
-               vma->vm_flags |= VM_ACCOUNT;
+               set_vm_flags(vma, VM_ACCOUNT);
                if (split)
-                       find_vma(mm, vma->vm_end)->vm_flags |= VM_ACCOUNT;
+                       set_vm_flags(find_vma(mm, vma->vm_end), VM_ACCOUNT);
        }
 
        return new_addr;
index 214c70e1d05942e8a0a9a2e4ece7a6f5086f9c62..b3154357ced52285efef952b615643009ff9f435 100644 (file)
@@ -173,7 +173,7 @@ static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
                mmap_write_lock(current->mm);
                vma = find_vma(current->mm, (unsigned long)ret);
                if (vma)
-                       vma->vm_flags |= VM_USERMAP;
+                       set_vm_flags(vma, VM_USERMAP);
                mmap_write_unlock(current->mm);
        }
 
@@ -991,7 +991,8 @@ static int do_mmap_private(struct vm_area_struct *vma,
 
        atomic_long_add(total, &mmap_pages_allocated);
 
-       region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
+       set_vm_flags(vma, VM_MAPPED_COPY);
+       region->vm_flags = vma->flags;
        region->vm_start = (unsigned long) base;
        region->vm_end   = region->vm_start + len;
        region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
@@ -1088,7 +1089,7 @@ unsigned long do_mmap(struct file *file,
        region->vm_flags = vm_flags;
        region->vm_pgoff = pgoff;
 
-       vma->vm_flags = vm_flags;
+       init_vm_flags(vma, vm_flags);
        vma->vm_pgoff = pgoff;
 
        if (file) {
@@ -1152,7 +1153,7 @@ unsigned long do_mmap(struct file *file,
                        vma->vm_end = start + len;
 
                        if (pregion->vm_flags & VM_MAPPED_COPY)
-                               vma->vm_flags |= VM_MAPPED_COPY;
+                               set_vm_flags(vma, VM_MAPPED_COPY);
                        else {
                                ret = do_mmap_shared_file(vma);
                                if (ret < 0) {
@@ -1632,7 +1633,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
        if (addr != (pfn << PAGE_SHIFT))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
        return 0;
 }
 EXPORT_SYMBOL(remap_pfn_range);
index 04c3ac9448a1883d490729a60f493c9b2caa2ac5..334b85714bd7723fee50cafc183b60c01d3a2afb 100644 (file)
@@ -128,7 +128,7 @@ static int secretmem_mmap(struct file *file, struct vm_area_struct *vma)
        if (mlock_future_check(vma->vm_mm, vma->vm_flags | VM_LOCKED, len))
                return -EAGAIN;
 
-       vma->vm_flags |= VM_LOCKED | VM_DONTDUMP;
+       set_vm_flags(vma, VM_LOCKED | VM_DONTDUMP);
        vma->vm_ops = &secretmem_vm_ops;
 
        return 0;
index c1d8b8a1aa3b4edc1d1d4ec712b6a5cffbc92ae2..534d226f72c883f5f90fe44f412bcaa96d7d4654 100644 (file)
@@ -2279,7 +2279,7 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
                return ret;
 
        /* arm64 - allow memory tagging on RAM-based files */
-       vma->vm_flags |= VM_MTE_ALLOWED;
+       set_vm_flags(vma, VM_MTE_ALLOWED);
 
        file_accessed(file);
        vma->vm_ops = &shmem_vm_ops;
index ccaa461998f3c37981df44b89a4ecdcd0c2f4f76..73a70f3803f1744154c0bf4162b70d6ef8420b5a 100644 (file)
@@ -3643,7 +3643,7 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
                size -= PAGE_SIZE;
        } while (size > 0);
 
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
 
        return 0;
 }
index 54836a6b81d6fa2059f312c753f62ee658bb6b18..c79ad6e045b3451172e5fdeffa9c72b4a14e8600 100644 (file)
@@ -1890,10 +1890,10 @@ int tcp_mmap(struct file *file, struct socket *sock,
 {
        if (vma->vm_flags & (VM_WRITE | VM_EXEC))
                return -EPERM;
-       vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
+       clear_vm_flags(vma, VM_MAYWRITE | VM_MAYEXEC);
 
        /* Instruct vm_insert_page() to not mmap_read_lock(mm) */
-       vma->vm_flags |= VM_MIXEDMAP;
+       set_vm_flags(vma, VM_MIXEDMAP);
 
        vma->vm_ops = &tcp_vm_ops;
        return 0;
index a00d191394365b50fd59fcdda75ba8f1343ec1f9..7acb95e5019031c9fb56321576d391240b8513b2 100644 (file)
@@ -262,7 +262,7 @@ static int sel_mmap_handle_status(struct file *filp,
        if (vma->vm_flags & VM_WRITE)
                return -EPERM;
        /* disallow mprotect() turns it into writable */
-       vma->vm_flags &= ~VM_MAYWRITE;
+       clear_vm_flags(vma, VM_MAYWRITE);
 
        return remap_pfn_range(vma, vma->vm_start,
                               page_to_pfn(status),
@@ -506,13 +506,13 @@ static int sel_mmap_policy(struct file *filp, struct vm_area_struct *vma)
 {
        if (vma->vm_flags & VM_SHARED) {
                /* do not allow mprotect to make mapping writable */
-               vma->vm_flags &= ~VM_MAYWRITE;
+               clear_vm_flags(vma, VM_MAYWRITE);
 
                if (vma->vm_flags & VM_WRITE)
                        return -EACCES;
        }
 
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_ops = &sel_mmap_policy_ops;
 
        return 0;
index ac2efeb63a39637d0399c3273491bb3924c9ddde..52473e2acd0756a10084515039f7ada4bdc2d1ca 100644 (file)
@@ -2910,7 +2910,7 @@ static int snd_pcm_oss_mmap(struct file *file, struct vm_area_struct *area)
        }
        /* set VM_READ access as well to fix memset() routines that do
           reads before writes (to improve performance) */
-       area->vm_flags |= VM_READ;
+       set_vm_flags(area, VM_READ);
        if (substream == NULL)
                return -ENXIO;
        runtime = substream->runtime;
index 33769ca78cc8f9e47fb0348c79049c2170809eed..c3cdc98bdf9ccd2fd7940c5db6ba94177408afea 100644 (file)
@@ -3667,8 +3667,9 @@ static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file
                return -EINVAL;
        area->vm_ops = &snd_pcm_vm_ops_status;
        area->vm_private_data = substream;
-       area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
-       area->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+       mod_vm_flags(area, VM_DONTEXPAND | VM_DONTDUMP,
+                    VM_WRITE | VM_MAYWRITE);
+
        return 0;
 }
 
@@ -3704,7 +3705,7 @@ static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file
                return -EINVAL;
        area->vm_ops = &snd_pcm_vm_ops_control;
        area->vm_private_data = substream;
-       area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(area, VM_DONTEXPAND | VM_DONTDUMP);
        return 0;
 }
 
@@ -3820,7 +3821,7 @@ static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
 int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
                             struct vm_area_struct *area)
 {
-       area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(area, VM_DONTEXPAND | VM_DONTDUMP);
        if (!substream->ops->page &&
            !snd_dma_buffer_mmap(snd_pcm_get_dma_buf(substream), area))
                return 0;
index fb5a4390443fe10509f2915c3151be3ba9163f36..fdd72d9bb46c369370674ed068edb98ec022763a 100644 (file)
@@ -404,7 +404,7 @@ static int mmp_pcm_mmap(struct snd_soc_component *component,
                        struct snd_pcm_substream *substream,
                        struct vm_area_struct *vma)
 {
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        return remap_pfn_range(vma, vma->vm_start,
                substream->dma_buffer.addr >> PAGE_SHIFT,
index e558931cce16e2ab5ab626184ebec4cef6e9fe2b..b51db622a69b5fd28d53f270afd9beadc2a03dbb 100644 (file)
@@ -224,9 +224,9 @@ static int usb_stream_hwdep_mmap(struct snd_hwdep *hw,
        }
 
        area->vm_ops = &usb_stream_hwdep_vm_ops;
-       area->vm_flags |= VM_DONTDUMP;
+       set_vm_flags(area, VM_DONTDUMP);
        if (!read)
-               area->vm_flags |= VM_DONTEXPAND;
+               set_vm_flags(area, VM_DONTEXPAND);
        area->vm_private_data = us122l;
        atomic_inc(&us122l->mmap_count);
 out:
index c29da0341bc5b3f1a002c1b9f1f92cc34b9c6462..3abe6d891f986350f967df02b312747031cc3c10 100644 (file)
@@ -61,7 +61,7 @@ static int snd_us428ctls_mmap(struct snd_hwdep *hw, struct file *filp, struct vm
        }
 
        area->vm_ops = &us428ctls_vm_ops;
-       area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(area, VM_DONTEXPAND | VM_DONTDUMP);
        area->vm_private_data = hw->private_data;
        return 0;
 }
index 767a227d54da491b533ab2bb76d1c6d5653df35a..22ce93b2fb24332ca203572968af4e17aa478e92 100644 (file)
@@ -706,7 +706,7 @@ static int snd_usx2y_hwdep_pcm_mmap(struct snd_hwdep *hw, struct file *filp, str
                return -ENODEV;
 
        area->vm_ops = &snd_usx2y_hwdep_pcm_vm_ops;
-       area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       set_vm_flags(area, VM_DONTEXPAND | VM_DONTDUMP);
        area->vm_private_data = hw->private_data;
        return 0;
 }