KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
                KSWAPD_SKIP_CONGESTION_WAIT,
                PAGEOUTRUN, ALLOCSTALL, PGROTATED,
+#ifdef CONFIG_NUMA_BALANCING
+               NUMA_PTE_UPDATES,
+               NUMA_HINT_FAULTS,
+               NUMA_HINT_FAULTS_LOCAL,
+               NUMA_PAGE_MIGRATE,
+#endif
 #ifdef CONFIG_MIGRATION
                PGMIGRATE_SUCCESS, PGMIGRATE_FAIL,
 #endif
 
 
 #endif /* CONFIG_VM_EVENT_COUNTERS */
 
+#ifdef CONFIG_NUMA_BALANCING
+#define count_vm_numa_event(x)     count_vm_event(x)
+#define count_vm_numa_events(x, y) count_vm_events(x, y)
+#else
+#define count_vm_numa_event(x) do {} while (0)
+#define count_vm_numa_events(x, y) do {} while (0)
+#endif /* CONFIG_NUMA_BALANCING */
+
 #define __count_zone_vm_events(item, zone, delta) \
                __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
                zone_idx(zone), delta)
 
        struct page *page = NULL;
        unsigned long haddr = addr & HPAGE_PMD_MASK;
        int target_nid;
+       int current_nid = -1;
 
        spin_lock(&mm->page_table_lock);
        if (unlikely(!pmd_same(pmd, *pmdp)))
        page = pmd_page(pmd);
        get_page(page);
        spin_unlock(&mm->page_table_lock);
+       current_nid = page_to_nid(page);
+       count_vm_numa_event(NUMA_HINT_FAULTS);
+       if (current_nid == numa_node_id())
+               count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
 
        target_nid = mpol_misplaced(page, vma, haddr);
        if (target_nid == -1)
 
        set_pte_at(mm, addr, ptep, pte);
        update_mmu_cache(vma, addr, ptep);
 
+       count_vm_numa_event(NUMA_HINT_FAULTS);
        page = vm_normal_page(vma, addr, pte);
        if (!page) {
                pte_unmap_unlock(ptep, ptl);
 
        get_page(page);
        current_nid = page_to_nid(page);
+       if (current_nid == numa_node_id())
+               count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
        target_nid = mpol_misplaced(page, vma, addr);
        pte_unmap_unlock(ptep, ptl);
        if (target_nid == -1) {
        unsigned long offset;
        spinlock_t *ptl;
        bool numa = false;
+       int local_nid = numa_node_id();
+       unsigned long nr_faults = 0;
+       unsigned long nr_faults_local = 0;
 
        spin_lock(&mm->page_table_lock);
        pmd = *pmdp;
                curr_nid = page_to_nid(page);
                task_numa_fault(curr_nid, 1);
 
+               nr_faults++;
+               if (curr_nid == local_nid)
+                       nr_faults_local++;
+
                pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
        }
        pte_unmap_unlock(orig_pte, ptl);
 
+       count_vm_numa_events(NUMA_HINT_FAULTS, nr_faults);
+       count_vm_numa_events(NUMA_HINT_FAULTS_LOCAL, nr_faults_local);
        return 0;
 }
 #else
 
        BUILD_BUG_ON(_PAGE_NUMA != _PAGE_PROTNONE);
 
        nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
+       if (nr_updated)
+               count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
 
        return nr_updated;
 }
 
                if (nr_remaining) {
                        putback_lru_pages(&migratepages);
                        isolated = 0;
-               }
+               } else
+                       count_vm_numa_event(NUMA_PAGE_MIGRATE);
        }
        BUG_ON(!list_empty(&migratepages));
 out:
 
 
        "pgrotated",
 
+#ifdef CONFIG_NUMA_BALANCING
+       "numa_pte_updates",
+       "numa_hint_faults",
+       "numa_hint_faults_local",
+       "numa_pages_migrated",
+#endif
 #ifdef CONFIG_MIGRATION
        "pgmigrate_success",
        "pgmigrate_fail",