gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
        gate_vma.vm_start = 0xffff0000;
        gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
-       gate_vma.vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
+       vm_flags_init(&gate_vma, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC);
        return 0;
 }
 arch_initcall(gate_vma_init);
 
                vma_set_anonymous(vma);
                vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
                vma->vm_end = vma->vm_start + PAGE_SIZE;
-               vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
+               vm_flags_init(vma, VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT);
                vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
                mmap_write_lock(current->mm);
                if (insert_vm_struct(current->mm, vma)) {
                        vma_set_anonymous(vma);
                        vma->vm_end = PAGE_SIZE;
                        vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
-                       vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
-                                       VM_DONTEXPAND | VM_DONTDUMP;
+                       vm_flags_init(vma, VM_READ | VM_MAYREAD | VM_IO |
+                                     VM_DONTEXPAND | VM_DONTDUMP);
                        mmap_write_lock(current->mm);
                        if (insert_vm_struct(current->mm, vma)) {
                                mmap_write_unlock(current->mm);
        vma_init(&gate_vma, NULL);
        gate_vma.vm_start = FIXADDR_USER_START;
        gate_vma.vm_end = FIXADDR_USER_END;
-       gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+       vm_flags_init(&gate_vma, VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC);
        gate_vma.vm_page_prot = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX);
 
        return 0;
 
        struct vm_area_struct vma;
 
        vma.vm_mm = tlb->mm;
-       vma.vm_flags = 0;
+       vm_flags_init(&vma, 0);
        if (tlb->fullmm) {
                flush_tlb_mm(tlb->mm);
                return;
 
                return -EINVAL;
        }
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       vm_flags_set(vma, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
 
        /*
 
         * VM_NOHUGEPAGE and split them.
         */
        for_each_vma_range(vmi, vma, addr + len) {
-               vma->vm_flags |= VM_NOHUGEPAGE;
+               vm_flags_set(vma, VM_NOHUGEPAGE);
                walk_page_vma(vma, &subpage_walk_ops, NULL);
        }
 }
 
        pfn = paste_addr >> PAGE_SHIFT;
 
        /* flags, page_prot from cxl_mmap(), except we want cachable */
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       vm_flags_set(vma, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
 
        prot = __pgprot(pgprot_val(vma->vm_page_prot) | _PAGE_DIRTY);
 
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       vm_flags_set(vma, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_mem_mmap_vmops;
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       vm_flags_set(vma, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_cntl_mmap_vmops;
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       vm_flags_set(vma, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_signal1_mmap_vmops;
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       vm_flags_set(vma, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_signal2_mmap_vmops;
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       vm_flags_set(vma, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_mss_mmap_vmops;
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       vm_flags_set(vma, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_psmap_mmap_vmops;
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       vm_flags_set(vma, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        vma->vm_ops = &spufs_mfc_mmap_vmops;
 
        VMA_ITERATOR(vmi, mm, 0);
 
        for_each_vma(vmi, vma) {
-               vma->vm_flags &= ~VM_HUGEPAGE;
-               vma->vm_flags |= VM_NOHUGEPAGE;
+               vm_flags_mod(vma, VM_NOHUGEPAGE, VM_HUGEPAGE);
                walk_page_vma(vma, &thp_split_walk_ops, NULL);
        }
        mm->def_flags |= VM_NOHUGEPAGE;
 
        }
 
        if (vsyscall_mode == XONLY)
-               gate_vma.vm_flags = VM_EXEC;
+               vm_flags_init(&gate_vma, VM_EXEC);
 
        BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
                     (unsigned long)VSYSCALL_ADDR);
 
                return ret;
 
        vma->vm_ops = &sgx_vm_ops;
-       vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+       vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
        vma->vm_private_data = encl;
 
        return 0;
 
 
        vma->vm_ops = &sgx_vepc_vm_ops;
        /* Don't copy VMA in fork() */
-       vma->vm_flags |= VM_PFNMAP | VM_IO | VM_DONTDUMP | VM_DONTCOPY;
+       vm_flags_set(vma, VM_PFNMAP | VM_IO | VM_DONTDUMP | VM_DONTCOPY);
        vma->vm_private_data = vepc;
 
        return 0;
 
 
                ret = reserve_pfn_range(paddr, size, prot, 0);
                if (ret == 0 && vma)
-                       vma->vm_flags |= VM_PAT;
+                       vm_flags_set(vma, VM_PAT);
                return ret;
        }
 
        }
        free_pfn_range(paddr, size);
        if (vma)
-               vma->vm_flags &= ~VM_PAT;
+               vm_flags_clear(vma, VM_PAT);
 }
 
 /*
  */
 void untrack_pfn_moved(struct vm_area_struct *vma)
 {
-       vma->vm_flags &= ~VM_PAT;
+       vm_flags_clear(vma, VM_PAT);
 }
 
 pgprot_t pgprot_writecombine(pgprot_t prot)
 
        vma_init(&gate_vma, NULL);
        gate_vma.vm_start = FIXADDR_USER_START;
        gate_vma.vm_end = FIXADDR_USER_END;
-       gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+       vm_flags_init(&gate_vma, VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC);
        gate_vma.vm_page_prot = PAGE_READONLY;
 
        return 0;
 
                return -EROFS;
 
        /* changing from read to write with mprotect is not allowed */
-       vma->vm_flags &= ~VM_MAYWRITE;
+       vm_flags_clear(vma, VM_MAYWRITE);
 
        pfrt_log_dev = to_pfrt_log_dev(file);
 
 
                       proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
                return -EPERM;
        }
-       vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
-       vma->vm_flags &= ~VM_MAYWRITE;
+       vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
 
        vma->vm_ops = &binder_vm_ops;
        vma->vm_private_data = proc;
 
        refcount_set(&vdata->refcnt, 1);
        vma->vm_private_data = vdata;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
        if (vdata->type == MSPEC_UNCACHED)
                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        vma->vm_ops = &mspec_vm_ops;
 
                                return -EINVAL;
                }
 
-               vma->vm_flags |= VM_IO;
+               vm_flags_set(vma, VM_IO);
 
                return remap_pfn_range(vma, vma->vm_start,
                                       phys_base >> PAGE_SHIFT,
 
                return rc;
 
        vma->vm_ops = &dax_vm_ops;
-       vma->vm_flags |= VM_HUGEPAGE;
+       vm_flags_set(vma, VM_HUGEPAGE);
        return 0;
 }
 
 
        if (rc < 0)
                return rc;
 
-       vma->vm_flags |= VM_DONTCOPY;
+       vm_flags_set(vma, VM_DONTCOPY);
        pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
                                IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT;
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
         */
        if (is_cow_mapping(vma->vm_flags) &&
            !(vma->vm_flags & VM_ACCESS_FLAGS))
-               vma->vm_flags &= ~VM_MAYWRITE;
+               vm_flags_clear(vma, VM_MAYWRITE);
 
        return drm_gem_ttm_mmap(obj, vma);
 }
 
 
        address = dev->adev->rmmio_remap.bus_addr;
 
-       vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
-                               VM_DONTDUMP | VM_PFNMAP;
+       vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
+                               VM_DONTDUMP | VM_PFNMAP);
 
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
 
        address = kfd_get_process_doorbells(pdd);
        if (!address)
                return -ENOMEM;
-       vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
-                               VM_DONTDUMP | VM_PFNMAP;
+       vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
+                               VM_DONTDUMP | VM_PFNMAP);
 
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
 
        pfn = __pa(page->kernel_address);
        pfn >>= PAGE_SHIFT;
 
-       vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
-                      | VM_DONTDUMP | VM_PFNMAP;
+       vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
+                      | VM_DONTDUMP | VM_PFNMAP);
 
        pr_debug("Mapping signal page\n");
        pr_debug("     start user address  == 0x%08lx\n", vma->vm_start);
 
                return -ENOMEM;
        }
 
-       vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
-               | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
+       vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND
+               | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
        /* Mapping pages to user process */
        return remap_pfn_range(vma, vma->vm_start,
                               PFN_DOWN(__pa(qpd->cwsr_kaddr)),
 
                        goto err_drm_gem_object_put;
                }
 
-               vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+               vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
                vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
                vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
        }
 
         * the whole buffer.
         */
        vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
-       vma->vm_flags &= ~VM_PFNMAP;
-       vma->vm_flags |= VM_DONTEXPAND;
+       vm_flags_mod(vma, VM_DONTEXPAND, VM_PFNMAP);
 
        if (dma_obj->map_noncoherent) {
                vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 
        if (ret)
                return ret;
 
-       vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
        if (shmem->map_wc)
                vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 
 
        if (!capable(CAP_SYS_ADMIN) &&
            (dma->flags & _DRM_DMA_USE_PCI_RO)) {
-               vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+               vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
 #if defined(__i386__) || defined(__x86_64__)
                pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
 #else
 
        vma->vm_ops = &drm_vm_dma_ops;
 
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
 
        drm_vm_open_locked(dev, vma);
        return 0;
                return -EINVAL;
 
        if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
-               vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+               vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
 #if defined(__i386__) || defined(__x86_64__)
                pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
 #else
        default:
                return -EINVAL; /* This should never happen. */
        }
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
 
        drm_vm_open_locked(dev, vma);
        return 0;
 
 {
        pgprot_t vm_page_prot;
 
-       vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
 
        vm_page_prot = vm_get_page_prot(vma->vm_flags);
 
 
        unsigned long vm_size;
        int ret;
 
-       vma->vm_flags &= ~VM_PFNMAP;
+       vm_flags_clear(vma, VM_PFNMAP);
        vma->vm_pgoff = 0;
 
        vm_size = vma->vm_end - vma->vm_start;
        if (obj->import_attach)
                return dma_buf_mmap(obj->dma_buf, vma, 0);
 
-       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
 
        DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
                          exynos_gem->flags);
 
         */
        vma->vm_ops = &psbfb_vm_ops;
        vma->vm_private_data = (void *)fb;
-       vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP);
        return 0;
 }
 
 
        buf = dev_priv->mmap_buffer;
        buf_priv = buf->dev_private;
 
-       vma->vm_flags |= VM_DONTCOPY;
+       vm_flags_set(vma, VM_DONTCOPY);
 
        buf_priv->currently_mapped = I810_BUF_MAPPED;
 
 
                        i915_gem_object_put(obj);
                        return -EINVAL;
                }
-               vma->vm_flags &= ~VM_MAYWRITE;
+               vm_flags_clear(vma, VM_MAYWRITE);
        }
 
        anon = mmap_singleton(to_i915(dev));
                return PTR_ERR(anon);
        }
 
-       vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+       vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
 
        /*
         * We keep the ref on mmo->obj, not vm_file, but we require
 
         * dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear
         * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
         */
-       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
        vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
 
 
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
-       vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
 
        return 0;
 
 {
        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 
-       vma->vm_flags &= ~VM_PFNMAP;
-       vma->vm_flags |= VM_MIXEDMAP;
+       vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
 
        if (omap_obj->flags & OMAP_BO_WC) {
                vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 
         * We allocated a struct page table for rk_obj, so clear
         * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
         */
-       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
-       vma->vm_flags &= ~VM_PFNMAP;
+       vm_flags_mod(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP, VM_PFNMAP);
 
        vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
        vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
 
                 * and set the vm_pgoff (used as a fake buffer offset by DRM)
                 * to 0 as we want to map the whole buffer.
                 */
-               vma->vm_flags &= ~VM_PFNMAP;
+               vm_flags_clear(vma, VM_PFNMAP);
                vma->vm_pgoff = 0;
 
                err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
        } else {
                pgprot_t prot = vm_get_page_prot(vma->vm_flags);
 
-               vma->vm_flags |= VM_MIXEDMAP;
-               vma->vm_flags &= ~VM_PFNMAP;
+               vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
 
                vma->vm_page_prot = pgprot_writecombine(prot);
        }
 
 
        vma->vm_private_data = bo;
 
-       vma->vm_flags |= VM_PFNMAP;
-       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
        return 0;
 }
 EXPORT_SYMBOL(ttm_bo_mmap_obj);
 
                return -EINVAL;
 
        vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
-       vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+       vm_flags_set(vma, VM_MIXEDMAP | VM_DONTEXPAND);
        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
        vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
        vma->vm_ops = &virtio_gpu_vram_vm_ops;
 
 
        /* Use VM_PFNMAP rather than VM_MIXEDMAP if not a COW mapping */
        if (!is_cow_mapping(vma->vm_flags))
-               vma->vm_flags = (vma->vm_flags & ~VM_MIXEDMAP) | VM_PFNMAP;
+               vm_flags_mod(vma, VM_PFNMAP, VM_MIXEDMAP);
 
        ttm_bo_put(bo); /* release extra ref taken by ttm_bo_mmap_obj() */
 
 
         * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
         * the whole buffer.
         */
-       vma->vm_flags &= ~VM_PFNMAP;
-       vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+       vm_flags_mod(vma, VM_MIXEDMAP | VM_DONTEXPAND, VM_PFNMAP);
        vma->vm_pgoff = 0;
 
        /*
 
        if (vma_pages(vma) != 1)
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_DONTDUMP | VM_DONTEXPAND;
+       vm_flags_set(vma, VM_IO | VM_DONTDUMP | VM_DONTEXPAND);
        vma->vm_ops = &cs_char_vm_ops;
        vma->vm_private_data = file->private_data;
 
 
                atomic_dec(&msc->user_count);
 
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTCOPY;
+       vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY);
        vma->vm_ops = &msc_mmap_ops;
        return ret;
 }
 
        pm_runtime_get_sync(&stm->dev);
 
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_ops = &stm_mmap_vmops;
        vm_iomap_memory(vma, phys, size);
 
 
                        ret = -EPERM;
                        goto done;
                }
-               vma->vm_flags &= ~VM_MAYWRITE;
+               vm_flags_clear(vma, VM_MAYWRITE);
                addr = vma->vm_start;
                for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
                        memlen = uctxt->egrbufs.buffers[i].len;
                goto done;
        }
 
-       vma->vm_flags = flags;
+       vm_flags_reset(vma, flags);
        hfi1_cdbg(PROC,
                  "%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
                    ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
 
 
        if (vma->vm_flags & (VM_WRITE | VM_EXEC))
                return -EPERM;
-       vma->vm_flags &= ~VM_MAYWRITE;
+       vm_flags_clear(vma, VM_MAYWRITE);
 
        if (!dev->mdev->clock_info)
                return -EOPNOTSUPP;
 
                if (vma->vm_flags & VM_WRITE)
                        return -EPERM;
-               vma->vm_flags &= ~VM_MAYWRITE;
+               vm_flags_clear(vma, VM_MAYWRITE);
 
                /* Don't expose to user-space information it shouldn't have */
                if (PAGE_SIZE > 4096)
 
                }
 
                /* don't allow them to later change with mprotect */
-               vma->vm_flags &= ~VM_MAYWRITE;
+               vm_flags_clear(vma, VM_MAYWRITE);
        }
 
        pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
                phys = dd->physaddr + ureg;
                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
-               vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+               vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND);
                ret = io_remap_pfn_range(vma, vma->vm_start,
                                         phys >> PAGE_SHIFT,
                                         vma->vm_end - vma->vm_start,
         * don't allow them to later change to readable with mprotect (for when
         * not initially mapped readable, as is normally the case)
         */
-       vma->vm_flags &= ~VM_MAYREAD;
-       vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+       vm_flags_mod(vma, VM_DONTCOPY | VM_DONTEXPAND, VM_MAYREAD);
 
        /* We used PAT if wc_cookie == 0 */
        if (!dd->wc_cookie)
                goto bail;
        }
        /* don't allow them to later change to writable with mprotect */
-       vma->vm_flags &= ~VM_MAYWRITE;
+       vm_flags_clear(vma, VM_MAYWRITE);
 
        start = vma->vm_start;
 
                 * Don't allow permission to later change to writable
                 * with mprotect.
                 */
-               vma->vm_flags &= ~VM_MAYWRITE;
+               vm_flags_clear(vma, VM_MAYWRITE);
        } else
                goto bail;
        len = vma->vm_end - vma->vm_start;
 
        vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
        vma->vm_ops = &qib_file_vm_ops;
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
        ret = 1;
 
 bail:
 
        usnic_dbg("\n");
 
        us_ibdev = to_usdev(context->device);
-       vma->vm_flags |= VM_IO;
+       vm_flags_set(vma, VM_IO);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        vfid = vma->vm_pgoff;
        usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
 
        }
 
        /* Map UAR to kernel space, VM_LOCKED? */
-       vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+       vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        if (io_remap_pfn_range(vma, start, context->uar.pfn, size,
                               vma->vm_page_prot))
 
                return ret;
        }
 
-       vma->vm_flags           |= VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_private_data    = &buf->handler;
        vma->vm_ops             = &vb2_common_vm_ops;
 
 
        /*
         * Make sure that vm_areas for 2 buffers won't be merged together
         */
-       vma->vm_flags           |= VM_DONTEXPAND;
+       vm_flags_set(vma, VM_DONTEXPAND);
 
        /*
         * Use common vm_area operations to track buffer refcount.
 
        }
 
        vma->vm_ops = &videobuf_vm_ops;
-       vma->vm_flags |= VM_DONTEXPAND;
+       vm_flags_set(vma, VM_DONTEXPAND);
        vma->vm_private_data = map;
 
        dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
 
        map->count    = 1;
        map->q        = q;
        vma->vm_ops   = &videobuf_vm_ops;
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
-       vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
+       /* using shared anonymous pages */
+       vm_flags_mod(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_IO);
        vma->vm_private_data = map;
        dprintk(1, "mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
                map, q, vma->vm_start, vma->vm_end, vma->vm_pgoff, first, last);
 
        }
 
        vma->vm_ops          = &videobuf_vm_ops;
-       vma->vm_flags       |= VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_private_data = map;
 
        dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
 
        pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
                 ctx->psn_phys, ctx->pe , ctx->master);
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       vm_flags_set(vma, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        vma->vm_ops = &cxl_mmap_vmops;
        return 0;
 
 {
        struct hl_ts_buff *ts_buff = buf->private;
 
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | VM_NORESERVE;
+       vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | VM_NORESERVE);
        return remap_vmalloc_range(vma, ts_buff->user_buff_address, 0);
 }
 
 
 {
        int rc;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
-                       VM_DONTCOPY | VM_NORESERVE;
+       vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+                       VM_DONTCOPY | VM_NORESERVE);
 
        rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
                                (dma_addr - HOST_PHYS_BASE), size);
 
 {
        int rc;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
-                       VM_DONTCOPY | VM_NORESERVE;
+       vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+                       VM_DONTCOPY | VM_NORESERVE);
 
 #ifdef _HAS_DMA_MMAP_COHERENT
 
 
        address = pci_resource_start(hdev->pdev, SRAM_CFG_BAR_ID) + offset_in_bar;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
-                       VM_DONTCOPY | VM_NORESERVE;
+       vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+                       VM_DONTCOPY | VM_NORESERVE);
 
        rc = remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT,
                        block_size, vma->vm_page_prot);
 
 {
        int rc;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
-                       VM_DONTCOPY | VM_NORESERVE;
+       vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+                       VM_DONTCOPY | VM_NORESERVE);
 
        rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
                                (dma_addr - HOST_PHYS_BASE), size);
 
        if ((vma->vm_flags & VM_READ) || (vma->vm_flags & VM_EXEC) ||
                !(vma->vm_flags & VM_WRITE))
                return -EINVAL;
-       vma->vm_flags &= ~(VM_MAYREAD | VM_MAYEXEC);
+       vm_flags_clear(vma, VM_MAYREAD | VM_MAYEXEC);
        return 0;
 }
 
        if (rc)
                return rc;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       vm_flags_set(vma, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        vma->vm_ops = &ocxl_vmops;
        return 0;
 
                (afu->config.global_mmio_size >> PAGE_SHIFT))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       vm_flags_set(vma, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        vma->vm_ops = &global_mmio_vmops;
        vma->vm_private_data = afu;
 
                if (vma->vm_flags & VM_WRITE)
                        return -EPERM;
                /* Ensure userspace cannot acquire VM_WRITE later. */
-               vma->vm_flags &= ~VM_MAYWRITE;
+               vm_flags_clear(vma, VM_MAYWRITE);
        }
 
        /* Create write-combine mapping so all clients observe a wipe. */
        vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-       vma->vm_flags |= VM_DONTCOPY | VM_DONTDUMP;
+       vm_flags_set(vma, VM_DONTCOPY | VM_DONTDUMP);
        return vm_iomap_memory(vma, drvdata->rmem->base, drvdata->rmem->size);
 }
 
 
                                vma->vm_end & (GRU_GSEG_PAGESIZE - 1))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_LOCKED |
-                        VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_LOCKED |
+                        VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_page_prot = PAGE_SHARED;
        vma->vm_ops = &gru_vm_ops;
 
 
        if (!qfr)
                return -ENOMEM;
 
-       vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK;
+       vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK);
        vma->vm_ops = &uacce_vm_ops;
        vma->vm_private_data = q;
        qfr->type = type;
 
        /* completion area is mapped read-only for user */
        if (vma->vm_flags & VM_WRITE)
                return -EPERM;
-       vma->vm_flags &= ~VM_MAYWRITE;
+       vm_flags_clear(vma, VM_MAYWRITE);
 
        if (remap_pfn_range(vma, vma->vm_start, ctx->ca_buf_ra >> PAGE_SHIFT,
                            len, vma->vm_page_prot))
 
            (ctx->psn_size >> PAGE_SHIFT))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP;
+       vm_flags_set(vma, VM_IO | VM_PFNMAP);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        vma->vm_ops = &ocxlflash_vmops;
        return 0;
 
        }
 
        sfp->mmap_called = 1;
-       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_private_data = sfp;
        vma->vm_ops = &sg_mmap_vm_ops;
 out:
 
        vma->vm_private_data = bo;
 
        vma->vm_ops = &hmm_bo_vm_ops;
-       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
 
        /*
         * call hmm_bo_vm_open explicitly.
 
        }
 
        vma->vm_ops = &meye_vm_ops;
-       vma->vm_flags &= ~VM_IO;        /* not I/O memory */
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       /* not I/O memory */
+       vm_flags_mod(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_IO);
        vma->vm_private_data = (void *) (offset / gbufsize);
        meye_vm_open(vma);
 
 
        ret = remap_vmalloc_range(vma, sbuf->buffer, 0);
        if (ret)
                return ret;
-       vma->vm_flags |= VM_DONTEXPAND;
+       vm_flags_set(vma, VM_DONTEXPAND);
        vma->vm_private_data = sbuf;
        vma->vm_ops = &stk_v4l_vm_ops;
        sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_MAPPED;
 
 {
        struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
 
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_ops = &tcmu_vm_ops;
 
        vma->vm_private_data = udev;
 
 
 static int uio_mmap_logical(struct vm_area_struct *vma)
 {
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_ops = &uio_logical_vm_ops;
        return 0;
 }
 
                }
        }
 
-       vma->vm_flags |= VM_IO;
-       vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP);
+       vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_ops = &usbdev_vm_ops;
        vma->vm_private_data = usbm;
 
 
        if (vma->vm_flags & VM_WRITE)
                return -EPERM;
 
-       vma->vm_flags &= ~VM_MAYWRITE;
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_mod(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_MAYWRITE);
        vma->vm_private_data = filp->private_data;
        mon_bin_vma_open(vma);
        return 0;
 
 {
        struct vduse_iova_domain *domain = file->private_data;
 
-       vma->vm_flags |= VM_DONTDUMP | VM_DONTEXPAND;
+       vm_flags_set(vma, VM_DONTDUMP | VM_DONTEXPAND);
        vma->vm_private_data = domain;
        vma->vm_ops = &vduse_domain_mmap_ops;
 
 
         * See remap_pfn_range(), called from vfio_pci_fault() but we can't
         * change vm_flags within the fault handler.  Set them now.
         */
-       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_ops = &vfio_pci_mmap_ops;
 
        return 0;
 
        if (vma->vm_end - vma->vm_start != notify.size)
                return -ENOTSUPP;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_ops = &vhost_vdpa_vm_ops;
        return 0;
 }
 
 #ifndef MMU
        /* this is uClinux (no MMU) specific code */
 
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_start = videomemory;
 
        return 0;
 
 int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
 {
        vma->vm_ops = &fb_deferred_io_vm_ops;
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
        if (!(info->flags & FBINFO_VIRTFB))
-               vma->vm_flags |= VM_IO;
+               vm_flags_set(vma, VM_IO);
        vma->vm_private_data = info;
        return 0;
 }
 
 
        vma->vm_private_data = vm_priv;
 
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
 
        vma->vm_ops = &gntalloc_vmops;
 
 
 
        vma->vm_ops = &gntdev_vmops;
 
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
+       vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP);
 
        if (use_ptemod)
-               vma->vm_flags |= VM_DONTCOPY;
+               vm_flags_set(vma, VM_DONTCOPY);
 
        vma->vm_private_data = map;
        if (map->flags) {
 
        vma_priv->file_priv = file_priv;
        vma_priv->users = 1;
 
-       vma->vm_flags |= VM_IO | VM_DONTEXPAND;
+       vm_flags_set(vma, VM_IO | VM_DONTEXPAND);
        vma->vm_ops = &privcmd_buf_vm_ops;
        vma->vm_private_data = vma_priv;
 
 
 {
        /* DONTCOPY is essential for Xen because copy_page_range doesn't know
         * how to recreate these mappings */
-       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
-                        VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTCOPY |
+                        VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_ops = &privcmd_vm_ops;
        vma->vm_private_data = NULL;
 
 
 
 static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
 {
-       vma->vm_flags |= VM_DONTEXPAND;
+       vm_flags_set(vma, VM_DONTEXPAND);
        vma->vm_ops = &aio_ring_vm_ops;
        return 0;
 }
 
                 * unpopulated ptes via cramfs_read_folio().
                 */
                int i;
-               vma->vm_flags |= VM_MIXEDMAP;
+               vm_flags_set(vma, VM_MIXEDMAP);
                for (i = 0; i < pages && !ret; i++) {
                        vm_fault_t vmf;
                        unsigned long off = i * PAGE_SIZE;
 
                return -EINVAL;
 
        vma->vm_ops = &erofs_dax_vm_ops;
-       vma->vm_flags |= VM_HUGEPAGE;
+       vm_flags_set(vma, VM_HUGEPAGE);
        return 0;
 }
 #else
 
        BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
        vma->vm_end = STACK_TOP_MAX;
        vma->vm_start = vma->vm_end - PAGE_SIZE;
-       vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
+       vm_flags_init(vma, VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP);
        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 
        err = insert_vm_struct(mm, vma);
        }
 
        /* mprotect_fixup is overkill to remove the temporary stack flags */
-       vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
+       vm_flags_clear(vma, VM_STACK_INCOMPLETE_SETUP);
 
        stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
        stack_size = vma->vm_end - vma->vm_start;
 
        file_accessed(file);
        if (IS_DAX(file_inode(file))) {
                vma->vm_ops = &ext4_dax_vm_ops;
-               vma->vm_flags |= VM_HUGEPAGE;
+               vm_flags_set(vma, VM_HUGEPAGE);
        } else {
                vma->vm_ops = &ext4_file_vm_ops;
        }
 
 {
        file_accessed(file);
        vma->vm_ops = &fuse_dax_vm_ops;
-       vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
+       vm_flags_set(vma, VM_MIXEDMAP | VM_HUGEPAGE);
        return 0;
 }
 
 
         * way when do_mmap unwinds (may be important on powerpc
         * and ia64).
         */
-       vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
+       vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND);
        vma->vm_ops = &hugetlb_vm_ops;
 
        ret = seal_check_future_write(info->seals, vma);
         * as input to create an allocation policy.
         */
        vma_init(&pseudo_vma, mm);
-       pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
+       vm_flags_init(&pseudo_vma, VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
        pseudo_vma.vm_file = file;
 
        for (index = start; index < end; index++) {
 
                     "orangefs_file_mmap: called on %pD\n", file);
 
        /* set the sequential readahead hint */
-       vma->vm_flags |= VM_SEQ_READ;
-       vma->vm_flags &= ~VM_RAND_READ;
+       vm_flags_mod(vma, VM_SEQ_READ, VM_RAND_READ);
 
        file_accessed(file);
        vma->vm_ops = &orangefs_file_vm_ops;
 
                        for_each_vma(vmi, vma) {
                                if (!(vma->vm_flags & VM_SOFTDIRTY))
                                        continue;
-                               vma->vm_flags &= ~VM_SOFTDIRTY;
+                               vm_flags_clear(vma, VM_SOFTDIRTY);
                                vma_set_page_prot(vma);
                        }
 
 
        if (vma->vm_flags & (VM_WRITE | VM_EXEC))
                return -EPERM;
 
-       vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
-       vma->vm_flags |= VM_MIXEDMAP;
+       vm_flags_mod(vma, VM_MIXEDMAP, VM_MAYWRITE | VM_MAYEXEC);
        vma->vm_ops = &vmcore_mmap_ops;
 
        len = 0;
 
 {
        const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP;
 
-       vma->vm_flags = flags;
+       vm_flags_reset(vma, flags);
        /*
         * For shared mappings, we want to enable writenotify while
         * userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply
 
        file_accessed(file);
        vma->vm_ops = &xfs_file_vm_ops;
        if (IS_DAX(inode))
-               vma->vm_flags |= VM_HUGEPAGE;
+               vm_flags_set(vma, VM_HUGEPAGE);
        return 0;
 }
 
 
                 * VM_MAYWRITE as we still want them to be COW-writable.
                 */
                if (vma->vm_flags & VM_SHARED)
-                       vma->vm_flags &= ~(VM_MAYWRITE);
+                       vm_flags_clear(vma, VM_MAYWRITE);
        }
 
        return 0;
 
                if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
                        return -EPERM;
        } else {
-               vma->vm_flags &= ~VM_MAYWRITE;
+               vm_flags_clear(vma, VM_MAYWRITE);
        }
        /* remap_vmalloc_range() checks size and offset constraints */
        return remap_vmalloc_range(vma, rb_map->rb,
                         */
                        return -EPERM;
        } else {
-               vma->vm_flags &= ~VM_MAYWRITE;
+               vm_flags_clear(vma, VM_MAYWRITE);
        }
        /* remap_vmalloc_range() checks size and offset constraints */
        return remap_vmalloc_range(vma, rb_map->rb, vma->vm_pgoff + RINGBUF_PGOFF);
 
        /* set default open/close callbacks */
        vma->vm_ops = &bpf_map_default_vmops;
        vma->vm_private_data = map;
-       vma->vm_flags &= ~VM_MAYEXEC;
+       vm_flags_clear(vma, VM_MAYEXEC);
        if (!(vma->vm_flags & VM_WRITE))
                /* disallow re-mapping with PROT_WRITE */
-               vma->vm_flags &= ~VM_MAYWRITE;
+               vm_flags_clear(vma, VM_MAYWRITE);
 
        err = map->ops->map_mmap(map, vma);
        if (err)
 
         * Since pinned accounting is per vm we cannot allow fork() to copy our
         * vma.
         */
-       vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_ops = &perf_mmap_vmops;
 
        if (event->pmu->event_mapped)
 
                goto exit;
        }
        spin_unlock_irqrestore(&kcov->lock, flags);
-       vma->vm_flags |= VM_DONTEXPAND;
+       vm_flags_set(vma, VM_DONTEXPAND);
        for (off = 0; off < size; off += PAGE_SIZE) {
                page = vmalloc_to_page(kcov->area + off);
                res = vm_insert_page(vma, vma->vm_start + off, page);
 
                return -EINVAL;
 
        vma->vm_ops = &relay_file_mmap_ops;
-       vma->vm_flags |= VM_DONTEXPAND;
+       vm_flags_set(vma, VM_DONTEXPAND);
        vma->vm_private_data = buf;
 
        return 0;
 
        /*
         * vm_flags is protected by the mmap_lock held in write mode.
         */
-       vma->vm_flags = new_flags;
+       vm_flags_reset(vma, new_flags);
        if (!vma->vm_file || vma_is_anon_shmem(vma)) {
                error = replace_anon_vma_name(vma, anon_name);
                if (error)
 
        if (!(vma->vm_flags & VM_MIXEDMAP)) {
                BUG_ON(mmap_read_trylock(vma->vm_mm));
                BUG_ON(vma->vm_flags & VM_PFNMAP);
-               vma->vm_flags |= VM_MIXEDMAP;
+               vm_flags_set(vma, VM_MIXEDMAP);
        }
        /* Defer page refcount checking till we're about to map that page. */
        return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
        if (!(vma->vm_flags & VM_MIXEDMAP)) {
                BUG_ON(mmap_read_trylock(vma->vm_mm));
                BUG_ON(vma->vm_flags & VM_PFNMAP);
-               vma->vm_flags |= VM_MIXEDMAP;
+               vm_flags_set(vma, VM_MIXEDMAP);
        }
        return insert_page(vma, addr, page, vma->vm_page_prot);
 }
                vma->vm_pgoff = pfn;
        }
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
 
        BUG_ON(addr >= end);
        pfn -= addr >> PAGE_SHIFT;
 
         */
        if (newflags & VM_LOCKED)
                newflags |= VM_IO;
-       WRITE_ONCE(vma->vm_flags, newflags);
+       vm_flags_reset(vma, newflags);
 
        lru_add_drain();
        walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL);
 
        if (newflags & VM_IO) {
                newflags &= ~VM_IO;
-               WRITE_ONCE(vma->vm_flags, newflags);
+               vm_flags_reset(vma, newflags);
        }
 }
 
 
        if ((newflags & VM_LOCKED) && (oldflags & VM_LOCKED)) {
                /* No work to do, and mlocking twice would be wrong */
-               vma->vm_flags = newflags;
+               vm_flags_reset(vma, newflags);
        } else {
                mlock_vma_pages_range(vma, start, end, newflags);
        }
 
        vma_iter_set(&vmi, addr);
        vma->vm_start = addr;
        vma->vm_end = end;
-       vma->vm_flags = vm_flags;
+       vm_flags_init(vma, vm_flags);
        vma->vm_page_prot = vm_get_page_prot(vm_flags);
        vma->vm_pgoff = pgoff;
 
         * then new mapped in-place (which must be aimed as
         * a completely new data area).
         */
-       vma->vm_flags |= VM_SOFTDIRTY;
+       vm_flags_set(vma, VM_SOFTDIRTY);
 
        vma_set_page_prot(vma);
 
                init_vma_prep(&vp, vma);
                vma_prepare(&vp);
                vma->vm_end = addr + len;
-               vma->vm_flags |= VM_SOFTDIRTY;
+               vm_flags_set(vma, VM_SOFTDIRTY);
                vma_iter_store(vmi, vma);
 
                vma_complete(&vp, vmi, mm);
        vma->vm_start = addr;
        vma->vm_end = addr + len;
        vma->vm_pgoff = addr >> PAGE_SHIFT;
-       vma->vm_flags = flags;
+       vm_flags_init(vma, flags);
        vma->vm_page_prot = vm_get_page_prot(flags);
        if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
                goto mas_store_fail;
        mm->data_vm += len >> PAGE_SHIFT;
        if (flags & VM_LOCKED)
                mm->locked_vm += (len >> PAGE_SHIFT);
-       vma->vm_flags |= VM_SOFTDIRTY;
+       vm_flags_set(vma, VM_SOFTDIRTY);
        validate_mm(mm);
        return 0;
 
 
         * vm_flags and vm_page_prot are protected by the mmap_lock
         * held in write mode.
         */
-       vma->vm_flags = newflags;
+       vm_flags_reset(vma, newflags);
        if (vma_wants_manual_pte_write_upgrade(vma))
                mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
        vma_set_page_prot(vma);
 
 
        /* Conceal VM_ACCOUNT so old reservation is not undone */
        if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {
-               vma->vm_flags &= ~VM_ACCOUNT;
+               vm_flags_clear(vma, VM_ACCOUNT);
                if (vma->vm_start < old_addr)
                        account_start = vma->vm_start;
                if (vma->vm_end > old_addr + old_len)
        /* Restore VM_ACCOUNT if one or two pieces of vma left */
        if (account_start) {
                vma = vma_prev(&vmi);
-               vma->vm_flags |= VM_ACCOUNT;
+               vm_flags_set(vma, VM_ACCOUNT);
        }
 
        if (account_end) {
                vma = vma_next(&vmi);
-               vma->vm_flags |= VM_ACCOUNT;
+               vm_flags_set(vma, VM_ACCOUNT);
        }
 
        return new_addr;
 
                mmap_write_lock(current->mm);
                vma = find_vma(current->mm, (unsigned long)ret);
                if (vma)
-                       vma->vm_flags |= VM_USERMAP;
+                       vm_flags_set(vma, VM_USERMAP);
                mmap_write_unlock(current->mm);
        }
 
 
        atomic_long_add(total, &mmap_pages_allocated);
 
-       region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
+       vm_flags_set(vma, VM_MAPPED_COPY);
+       region->vm_flags = vma->vm_flags;
        region->vm_start = (unsigned long) base;
        region->vm_end   = region->vm_start + len;
        region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
        region->vm_flags = vm_flags;
        region->vm_pgoff = pgoff;
 
-       vma->vm_flags = vm_flags;
+       vm_flags_init(vma, vm_flags);
        vma->vm_pgoff = pgoff;
 
        if (file) {
                        vma->vm_end = start + len;
 
                        if (pregion->vm_flags & VM_MAPPED_COPY)
-                               vma->vm_flags |= VM_MAPPED_COPY;
+                               vm_flags_set(vma, VM_MAPPED_COPY);
                        else {
                                ret = do_mmap_shared_file(vma);
                                if (ret < 0) {
        if (addr != (pfn << PAGE_SHIFT))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
        return 0;
 }
 EXPORT_SYMBOL(remap_pfn_range);
 
        if (mlock_future_check(vma->vm_mm, vma->vm_flags | VM_LOCKED, len))
                return -EAGAIN;
 
-       vma->vm_flags |= VM_LOCKED | VM_DONTDUMP;
+       vm_flags_set(vma, VM_LOCKED | VM_DONTDUMP);
        vma->vm_ops = &secretmem_vm_ops;
 
        return 0;
 
                return ret;
 
        /* arm64 - allow memory tagging on RAM-based files */
-       vma->vm_flags |= VM_MTE_ALLOWED;
+       vm_flags_set(vma, VM_MTE_ALLOWED);
 
        file_accessed(file);
        /* This is anonymous shared memory if it is unlinked at the time of mmap */
 
                size -= PAGE_SIZE;
        } while (size > 0);
 
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
 
        return 0;
 }
 
 {
        if (vma->vm_flags & (VM_WRITE | VM_EXEC))
                return -EPERM;
-       vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
+       vm_flags_clear(vma, VM_MAYWRITE | VM_MAYEXEC);
 
        /* Instruct vm_insert_page() to not mmap_read_lock(mm) */
-       vma->vm_flags |= VM_MIXEDMAP;
+       vm_flags_set(vma, VM_MIXEDMAP);
 
        vma->vm_ops = &tcp_vm_ops;
        return 0;
 
        if (vma->vm_flags & VM_WRITE)
                return -EPERM;
        /* disallow mprotect() turns it into writable */
-       vma->vm_flags &= ~VM_MAYWRITE;
+       vm_flags_clear(vma, VM_MAYWRITE);
 
        return remap_pfn_range(vma, vma->vm_start,
                               page_to_pfn(status),
 {
        if (vma->vm_flags & VM_SHARED) {
                /* do not allow mprotect to make mapping writable */
-               vma->vm_flags &= ~VM_MAYWRITE;
+               vm_flags_clear(vma, VM_MAYWRITE);
 
                if (vma->vm_flags & VM_WRITE)
                        return -EACCES;
        }
 
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_ops = &sel_mmap_policy_ops;
 
        return 0;
 
        }
        /* set VM_READ access as well to fix memset() routines that do
           reads before writes (to improve performance) */
-       area->vm_flags |= VM_READ;
+       vm_flags_set(area, VM_READ);
        if (substream == NULL)
                return -ENXIO;
        runtime = substream->runtime;
 
                return -EINVAL;
        area->vm_ops = &snd_pcm_vm_ops_status;
        area->vm_private_data = substream;
-       area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
-       area->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+       vm_flags_mod(area, VM_DONTEXPAND | VM_DONTDUMP,
+                    VM_WRITE | VM_MAYWRITE);
+
        return 0;
 }
 
                return -EINVAL;
        area->vm_ops = &snd_pcm_vm_ops_control;
        area->vm_private_data = substream;
-       area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(area, VM_DONTEXPAND | VM_DONTDUMP);
        return 0;
 }
 
 int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
                             struct vm_area_struct *area)
 {
-       area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(area, VM_DONTEXPAND | VM_DONTDUMP);
        if (!substream->ops->page &&
            !snd_dma_buffer_mmap(snd_pcm_get_dma_buf(substream), area))
                return 0;
 
                        struct snd_pcm_substream *substream,
                        struct vm_area_struct *vma)
 {
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        return remap_pfn_range(vma, vma->vm_start,
                substream->dma_buffer.addr >> PAGE_SHIFT,
 
        }
 
        area->vm_ops = &usb_stream_hwdep_vm_ops;
-       area->vm_flags |= VM_DONTDUMP;
+       vm_flags_set(area, VM_DONTDUMP);
        if (!read)
-               area->vm_flags |= VM_DONTEXPAND;
+               vm_flags_set(area, VM_DONTEXPAND);
        area->vm_private_data = us122l;
        atomic_inc(&us122l->mmap_count);
 out:
 
        }
 
        area->vm_ops = &us428ctls_vm_ops;
-       area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(area, VM_DONTEXPAND | VM_DONTDUMP);
        area->vm_private_data = hw->private_data;
        return 0;
 }
 
                return -ENODEV;
 
        area->vm_ops = &snd_usx2y_hwdep_pcm_vm_ops;
-       area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(area, VM_DONTEXPAND | VM_DONTDUMP);
        area->vm_private_data = hw->private_data;
        return 0;
 }