unsigned long old_addr, struct vm_area_struct *new_vma,
                unsigned long new_addr, unsigned long len,
                bool need_rmap_locks);
+
+/*
+ * Flags used by change_protection().  For now we make it a bitmap so
+ * that we can pass in multiple flags just like parameters.  However
+ * for now all the callers are only use one of the flags at the same
+ * time.
+ */
+/* Whether we should allow dirty bit accounting */
+#define  MM_CP_DIRTY_ACCT                  (1UL << 0)
+/* Whether this protection change is for NUMA hints */
+#define  MM_CP_PROT_NUMA                   (1UL << 1)
+
 extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
                              unsigned long end, pgprot_t newprot,
-                             int dirty_accountable, int prot_numa);
+                             unsigned long cp_flags);
 extern int mprotect_fixup(struct vm_area_struct *vma,
                          struct vm_area_struct **pprev, unsigned long start,
                          unsigned long end, unsigned long newflags);
 
 
 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                unsigned long addr, unsigned long end, pgprot_t newprot,
-               int dirty_accountable, int prot_numa)
+               unsigned long cp_flags)
 {
        pte_t *pte, oldpte;
        spinlock_t *ptl;
        unsigned long pages = 0;
        int target_node = NUMA_NO_NODE;
+       bool dirty_accountable = cp_flags & MM_CP_DIRTY_ACCT;
+       bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
 
        /*
         * Can be called with only the mmap_sem for reading by
 
 static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
                pud_t *pud, unsigned long addr, unsigned long end,
-               pgprot_t newprot, int dirty_accountable, int prot_numa)
+               pgprot_t newprot, unsigned long cp_flags)
 {
        pmd_t *pmd;
        unsigned long next;
                                __split_huge_pmd(vma, pmd, addr, false, NULL);
                        } else {
                                int nr_ptes = change_huge_pmd(vma, pmd, addr,
-                                               newprot, prot_numa);
+                                                             newprot, cp_flags);
 
                                if (nr_ptes) {
                                        if (nr_ptes == HPAGE_PMD_NR) {
                        /* fall through, the trans huge pmd just split */
                }
                this_pages = change_pte_range(vma, pmd, addr, next, newprot,
-                                dirty_accountable, prot_numa);
+                                             cp_flags);
                pages += this_pages;
 next:
                cond_resched();
 
 static inline unsigned long change_pud_range(struct vm_area_struct *vma,
                p4d_t *p4d, unsigned long addr, unsigned long end,
-               pgprot_t newprot, int dirty_accountable, int prot_numa)
+               pgprot_t newprot, unsigned long cp_flags)
 {
        pud_t *pud;
        unsigned long next;
                if (pud_none_or_clear_bad(pud))
                        continue;
                pages += change_pmd_range(vma, pud, addr, next, newprot,
-                                dirty_accountable, prot_numa);
+                                         cp_flags);
        } while (pud++, addr = next, addr != end);
 
        return pages;
 
 static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
                pgd_t *pgd, unsigned long addr, unsigned long end,
-               pgprot_t newprot, int dirty_accountable, int prot_numa)
+               pgprot_t newprot, unsigned long cp_flags)
 {
        p4d_t *p4d;
        unsigned long next;
                if (p4d_none_or_clear_bad(p4d))
                        continue;
                pages += change_pud_range(vma, p4d, addr, next, newprot,
-                                dirty_accountable, prot_numa);
+                                         cp_flags);
        } while (p4d++, addr = next, addr != end);
 
        return pages;
 
 static unsigned long change_protection_range(struct vm_area_struct *vma,
                unsigned long addr, unsigned long end, pgprot_t newprot,
-               int dirty_accountable, int prot_numa)
+               unsigned long cp_flags)
 {
        struct mm_struct *mm = vma->vm_mm;
        pgd_t *pgd;
                if (pgd_none_or_clear_bad(pgd))
                        continue;
                pages += change_p4d_range(vma, pgd, addr, next, newprot,
-                                dirty_accountable, prot_numa);
+                                         cp_flags);
        } while (pgd++, addr = next, addr != end);
 
        /* Only flush the TLB if we actually modified any entries: */
 
 unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
                       unsigned long end, pgprot_t newprot,
-                      int dirty_accountable, int prot_numa)
+                      unsigned long cp_flags)
 {
        unsigned long pages;
 
        if (is_vm_hugetlb_page(vma))
                pages = hugetlb_change_protection(vma, start, end, newprot);
        else
-               pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
+               pages = change_protection_range(vma, start, end, newprot,
+                                               cp_flags);
 
        return pages;
 }
        vma_set_page_prot(vma);
 
        change_protection(vma, start, end, vma->vm_page_prot,
-                         dirty_accountable, 0);
+                         dirty_accountable ? MM_CP_DIRTY_ACCT : 0);
 
        /*
         * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major