* sets it, so none of the operations on it need to be atomic.
  */
 
-/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_NIDPID] | ... | FLAGS | */
+/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
 #define SECTIONS_PGOFF         ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
 #define NODES_PGOFF            (SECTIONS_PGOFF - NODES_WIDTH)
 #define ZONES_PGOFF            (NODES_PGOFF - ZONES_WIDTH)
-#define LAST_NIDPID_PGOFF      (ZONES_PGOFF - LAST_NIDPID_WIDTH)
+#define LAST_CPUPID_PGOFF      (ZONES_PGOFF - LAST_CPUPID_WIDTH)
 
 /*
  * Define the bit shifts to access each section.  For non-existent
 #define SECTIONS_PGSHIFT       (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
 #define NODES_PGSHIFT          (NODES_PGOFF * (NODES_WIDTH != 0))
 #define ZONES_PGSHIFT          (ZONES_PGOFF * (ZONES_WIDTH != 0))
-#define LAST_NIDPID_PGSHIFT    (LAST_NIDPID_PGOFF * (LAST_NIDPID_WIDTH != 0))
+#define LAST_CPUPID_PGSHIFT    (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
 
 /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
 #ifdef NODE_NOT_IN_PAGE_FLAGS
 #define ZONES_MASK             ((1UL << ZONES_WIDTH) - 1)
 #define NODES_MASK             ((1UL << NODES_WIDTH) - 1)
 #define SECTIONS_MASK          ((1UL << SECTIONS_WIDTH) - 1)
-#define LAST_NIDPID_MASK       ((1UL << LAST_NIDPID_WIDTH) - 1)
+#define LAST_CPUPID_MASK       ((1UL << LAST_CPUPID_WIDTH) - 1)
 #define ZONEID_MASK            ((1UL << ZONEID_SHIFT) - 1)
 
 static inline enum zone_type page_zonenum(const struct page *page)
 #endif
 
 #ifdef CONFIG_NUMA_BALANCING
-static inline int nid_pid_to_nidpid(int nid, int pid)
+static inline int cpu_pid_to_cpupid(int cpu, int pid)
 {
-       return ((nid & LAST__NID_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
+       return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
 }
 
-static inline int nidpid_to_pid(int nidpid)
+static inline int cpupid_to_pid(int cpupid)
 {
-       return nidpid & LAST__PID_MASK;
+       return cpupid & LAST__PID_MASK;
 }
 
-static inline int nidpid_to_nid(int nidpid)
+static inline int cpupid_to_cpu(int cpupid)
 {
-       return (nidpid >> LAST__PID_SHIFT) & LAST__NID_MASK;
+       return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
 }
 
-static inline bool nidpid_pid_unset(int nidpid)
+static inline int cpupid_to_nid(int cpupid)
 {
-       return nidpid_to_pid(nidpid) == (-1 & LAST__PID_MASK);
+       return cpu_to_node(cpupid_to_cpu(cpupid));
 }
 
-static inline bool nidpid_nid_unset(int nidpid)
+static inline bool cpupid_pid_unset(int cpupid)
 {
-       return nidpid_to_nid(nidpid) == (-1 & LAST__NID_MASK);
+       return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
 }
 
-#ifdef LAST_NIDPID_NOT_IN_PAGE_FLAGS
-static inline int page_nidpid_xchg_last(struct page *page, int nid)
+static inline bool cpupid_cpu_unset(int cpupid)
 {
-       return xchg(&page->_last_nidpid, nid);
+       return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
 }
 
-static inline int page_nidpid_last(struct page *page)
+#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
+static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
 {
-       return page->_last_nidpid;
+       return xchg(&page->_last_cpupid, cpupid);
 }
-static inline void page_nidpid_reset_last(struct page *page)
+
+static inline int page_cpupid_last(struct page *page)
+{
+       return page->_last_cpupid;
+}
+static inline void page_cpupid_reset_last(struct page *page)
 {
-       page->_last_nidpid = -1;
+       page->_last_cpupid = -1;
 }
 #else
-static inline int page_nidpid_last(struct page *page)
+static inline int page_cpupid_last(struct page *page)
 {
-       return (page->flags >> LAST_NIDPID_PGSHIFT) & LAST_NIDPID_MASK;
+       return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
 }
 
-extern int page_nidpid_xchg_last(struct page *page, int nidpid);
+extern int page_cpupid_xchg_last(struct page *page, int cpupid);
 
-static inline void page_nidpid_reset_last(struct page *page)
+static inline void page_cpupid_reset_last(struct page *page)
 {
-       int nidpid = (1 << LAST_NIDPID_SHIFT) - 1;
+       int cpupid = (1 << LAST_CPUPID_SHIFT) - 1;
 
-       page->flags &= ~(LAST_NIDPID_MASK << LAST_NIDPID_PGSHIFT);
-       page->flags |= (nidpid & LAST_NIDPID_MASK) << LAST_NIDPID_PGSHIFT;
+       page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
+       page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
 }
-#endif /* LAST_NIDPID_NOT_IN_PAGE_FLAGS */
-#else
-static inline int page_nidpid_xchg_last(struct page *page, int nidpid)
+#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
+#else /* !CONFIG_NUMA_BALANCING */
+static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
 {
-       return page_to_nid(page);
+       return page_to_nid(page); /* XXX */
 }
 
-static inline int page_nidpid_last(struct page *page)
+static inline int page_cpupid_last(struct page *page)
 {
-       return page_to_nid(page);
+       return page_to_nid(page); /* XXX */
 }
 
-static inline int nidpid_to_nid(int nidpid)
+static inline int cpupid_to_nid(int cpupid)
 {
        return -1;
 }
 
-static inline int nidpid_to_pid(int nidpid)
+static inline int cpupid_to_pid(int cpupid)
 {
        return -1;
 }
 
-static inline int nid_pid_to_nidpid(int nid, int pid)
+static inline int cpupid_to_cpu(int cpupid)
 {
        return -1;
 }
 
-static inline bool nidpid_pid_unset(int nidpid)
+static inline int cpu_pid_to_cpupid(int nid, int pid)
+{
+       return -1;
+}
+
+static inline bool cpupid_pid_unset(int cpupid)
 {
        return 1;
 }
 
-static inline void page_nidpid_reset_last(struct page *page)
+static inline void page_cpupid_reset_last(struct page *page)
 {
 }
-#endif
+#endif /* CONFIG_NUMA_BALANCING */
 
 static inline struct zone *page_zone(const struct page *page)
 {
 
        void *shadow;
 #endif
 
-#ifdef LAST_NIDPID_NOT_IN_PAGE_FLAGS
-       int _last_nidpid;
+#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
+       int _last_cpupid;
 #endif
 }
 /*
 
  * lookup is necessary.
  *
  * No sparsemem or sparsemem vmemmap: |       NODE     | ZONE |             ... | FLAGS |
- *      " plus space for last_nidpid: |       NODE     | ZONE | LAST_NIDPID ... | FLAGS |
+ *      " plus space for last_cpupid: |       NODE     | ZONE | LAST_CPUPID ... | FLAGS |
  * classic sparse with space for node:| SECTION | NODE | ZONE |             ... | FLAGS |
- *      " plus space for last_nidpid: | SECTION | NODE | ZONE | LAST_NIDPID ... | FLAGS |
+ *      " plus space for last_cpupid: | SECTION | NODE | ZONE | LAST_CPUPID ... | FLAGS |
  * classic sparse no space for node:  | SECTION |     ZONE    | ... | FLAGS |
  */
 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
 #define LAST__PID_SHIFT 8
 #define LAST__PID_MASK  ((1 << LAST__PID_SHIFT)-1)
 
-#define LAST__NID_SHIFT NODES_SHIFT
-#define LAST__NID_MASK  ((1 << LAST__NID_SHIFT)-1)
+#define LAST__CPU_SHIFT NR_CPUS_BITS
+#define LAST__CPU_MASK  ((1 << LAST__CPU_SHIFT)-1)
 
-#define LAST_NIDPID_SHIFT (LAST__PID_SHIFT+LAST__NID_SHIFT)
+#define LAST_CPUPID_SHIFT (LAST__PID_SHIFT+LAST__CPU_SHIFT)
 #else
-#define LAST_NIDPID_SHIFT 0
+#define LAST_CPUPID_SHIFT 0
 #endif
 
-#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_NIDPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
-#define LAST_NIDPID_WIDTH LAST_NIDPID_SHIFT
+#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
+#define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
 #else
-#define LAST_NIDPID_WIDTH 0
+#define LAST_CPUPID_WIDTH 0
 #endif
 
 /*
 #define NODE_NOT_IN_PAGE_FLAGS
 #endif
 
-#if defined(CONFIG_NUMA_BALANCING) && LAST_NIDPID_WIDTH == 0
-#define LAST_NIDPID_NOT_IN_PAGE_FLAGS
+#if defined(CONFIG_NUMA_BALANCING) && LAST_CPUPID_WIDTH == 0
+#define LAST_CPUPID_NOT_IN_PAGE_FLAGS
 #endif
 
 #endif /* _LINUX_PAGE_FLAGS_LAYOUT */
 
 #include <linux/mmzone.h>
 #include <linux/kbuild.h>
 #include <linux/page_cgroup.h>
+#include <linux/log2.h>
 
 void foo(void)
 {
        DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
        DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES);
        DEFINE(NR_PCG_FLAGS, __NR_PCG_FLAGS);
+#ifdef CONFIG_SMP
+       DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS));
+#endif
        /* End of constants */
 }
 
 /*
  * Got a PROT_NONE fault for a page on @node.
  */
-void task_numa_fault(int last_nidpid, int node, int pages, bool migrated)
+void task_numa_fault(int last_cpupid, int node, int pages, bool migrated)
 {
        struct task_struct *p = current;
        int priv;
         * First accesses are treated as private, otherwise consider accesses
         * to be private if the accessing pid has not changed
         */
-       if (!nidpid_pid_unset(last_nidpid))
-               priv = ((p->pid & LAST__PID_MASK) == nidpid_to_pid(last_nidpid));
+       if (!cpupid_pid_unset(last_cpupid))
+               priv = ((p->pid & LAST__PID_MASK) == cpupid_to_pid(last_cpupid));
        else
                priv = 1;
 
 
        struct page *page;
        unsigned long haddr = addr & HPAGE_PMD_MASK;
        int page_nid = -1, this_nid = numa_node_id();
-       int target_nid, last_nidpid = -1;
+       int target_nid, last_cpupid = -1;
        bool page_locked;
        bool migrated = false;
 
        page = pmd_page(pmd);
        BUG_ON(is_huge_zero_page(page));
        page_nid = page_to_nid(page);
-       last_nidpid = page_nidpid_last(page);
+       last_cpupid = page_cpupid_last(page);
        count_vm_numa_event(NUMA_HINT_FAULTS);
        if (page_nid == this_nid)
                count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
                page_unlock_anon_vma_read(anon_vma);
 
        if (page_nid != -1)
-               task_numa_fault(last_nidpid, page_nid, HPAGE_PMD_NR, migrated);
+               task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, migrated);
 
        return 0;
 }
                page_tail->mapping = page->mapping;
 
                page_tail->index = page->index + i;
-               page_nidpid_xchg_last(page_tail, page_nidpid_last(page));
+               page_cpupid_xchg_last(page_tail, page_cpupid_last(page));
 
                BUG_ON(!PageAnon(page_tail));
                BUG_ON(!PageUptodate(page_tail));
 
 
 #include "internal.h"
 
-#ifdef LAST_NIDPID_NOT_IN_PAGE_FLAGS
-#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_nidpid.
+#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
+#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
 #endif
 
 #ifndef CONFIG_NEED_MULTIPLE_NODES
        struct page *page = NULL;
        spinlock_t *ptl;
        int page_nid = -1;
-       int last_nidpid;
+       int last_cpupid;
        int target_nid;
        bool migrated = false;
 
        }
        BUG_ON(is_zero_pfn(page_to_pfn(page)));
 
-       last_nidpid = page_nidpid_last(page);
+       last_cpupid = page_cpupid_last(page);
        page_nid = page_to_nid(page);
        target_nid = numa_migrate_prep(page, vma, addr, page_nid);
        pte_unmap_unlock(ptep, ptl);
 
 out:
        if (page_nid != -1)
-               task_numa_fault(last_nidpid, page_nid, 1, migrated);
+               task_numa_fault(last_cpupid, page_nid, 1, migrated);
        return 0;
 }
 
        unsigned long offset;
        spinlock_t *ptl;
        bool numa = false;
-       int last_nidpid;
+       int last_cpupid;
 
        spin_lock(&mm->page_table_lock);
        pmd = *pmdp;
                if (unlikely(!page))
                        continue;
 
-               last_nidpid = page_nidpid_last(page);
+               last_cpupid = page_cpupid_last(page);
                page_nid = page_to_nid(page);
                target_nid = numa_migrate_prep(page, vma, addr, page_nid);
                pte_unmap_unlock(pte, ptl);
                }
 
                if (page_nid != -1)
-                       task_numa_fault(last_nidpid, page_nid, 1, migrated);
+                       task_numa_fault(last_cpupid, page_nid, 1, migrated);
 
                pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
        }
 
        struct zone *zone;
        int curnid = page_to_nid(page);
        unsigned long pgoff;
+       int thiscpu = raw_smp_processor_id();
+       int thisnid = cpu_to_node(thiscpu);
        int polnid = -1;
        int ret = -1;
 
 
        /* Migrate the page towards the node whose CPU is referencing it */
        if (pol->flags & MPOL_F_MORON) {
-               int last_nidpid;
-               int this_nidpid;
+               int last_cpupid;
+               int this_cpupid;
 
-               polnid = numa_node_id();
-               this_nidpid = nid_pid_to_nidpid(polnid, current->pid);
+               polnid = thisnid;
+               this_cpupid = cpu_pid_to_cpupid(thiscpu, current->pid);
 
                /*
                 * Multi-stage node selection is used in conjunction
                 * it less likely we act on an unlikely task<->page
                 * relation.
                 */
-               last_nidpid = page_nidpid_xchg_last(page, this_nidpid);
-               if (!nidpid_pid_unset(last_nidpid) && nidpid_to_nid(last_nidpid) != polnid)
+               last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
+               if (!cpupid_pid_unset(last_cpupid) && cpupid_to_nid(last_cpupid) != thisnid)
                        goto out;
 
 #ifdef CONFIG_NUMA_BALANCING
                 * This way a short and temporary process migration will
                 * not cause excessive memory migration.
                 */
-               if (polnid != current->numa_preferred_nid &&
+               if (thisnid != current->numa_preferred_nid &&
                                !current->numa_migrate_seq)
                        goto out;
 #endif
 
                                          __GFP_NOWARN) &
                                         ~GFP_IOFS, 0);
        if (newpage)
-               page_nidpid_xchg_last(newpage, page_nidpid_last(page));
+               page_cpupid_xchg_last(newpage, page_cpupid_last(page));
 
        return newpage;
 }
        if (!new_page)
                goto out_fail;
 
-       page_nidpid_xchg_last(new_page, page_nidpid_last(page));
+       page_cpupid_xchg_last(new_page, page_cpupid_last(page));
 
        isolated = numamigrate_isolate_page(pgdat, page);
        if (!isolated) {
 
        unsigned long or_mask, add_mask;
 
        shift = 8 * sizeof(unsigned long);
-       width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH - LAST_NIDPID_SHIFT;
+       width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH - LAST_CPUPID_SHIFT;
        mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
-               "Section %d Node %d Zone %d Lastnidpid %d Flags %d\n",
+               "Section %d Node %d Zone %d Lastcpupid %d Flags %d\n",
                SECTIONS_WIDTH,
                NODES_WIDTH,
                ZONES_WIDTH,
-               LAST_NIDPID_WIDTH,
+               LAST_CPUPID_WIDTH,
                NR_PAGEFLAGS);
        mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
-               "Section %d Node %d Zone %d Lastnidpid %d\n",
+               "Section %d Node %d Zone %d Lastcpupid %d\n",
                SECTIONS_SHIFT,
                NODES_SHIFT,
                ZONES_SHIFT,
-               LAST_NIDPID_SHIFT);
+               LAST_CPUPID_SHIFT);
        mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
-               "Section %lu Node %lu Zone %lu Lastnidpid %lu\n",
+               "Section %lu Node %lu Zone %lu Lastcpupid %lu\n",
                (unsigned long)SECTIONS_PGSHIFT,
                (unsigned long)NODES_PGSHIFT,
                (unsigned long)ZONES_PGSHIFT,
-               (unsigned long)LAST_NIDPID_PGSHIFT);
+               (unsigned long)LAST_CPUPID_PGSHIFT);
        mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
                "Node/Zone ID: %lu -> %lu\n",
                (unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
        mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
                "Node not in page flags");
 #endif
-#ifdef LAST_NIDPID_NOT_IN_PAGE_FLAGS
+#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
        mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
-               "Last nidpid not in page flags");
+               "Last cpupid not in page flags");
 #endif
 
        if (SECTIONS_WIDTH) {
 
                INIT_LIST_HEAD(&lruvec->lists[lru]);
 }
 
-#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_NIDPID_NOT_IN_PAGE_FLAGS)
-int page_nidpid_xchg_last(struct page *page, int nidpid)
+#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
+int page_cpupid_xchg_last(struct page *page, int cpupid)
 {
        unsigned long old_flags, flags;
-       int last_nidpid;
+       int last_cpupid;
 
        do {
                old_flags = flags = page->flags;
-               last_nidpid = page_nidpid_last(page);
+               last_cpupid = page_cpupid_last(page);
 
-               flags &= ~(LAST_NIDPID_MASK << LAST_NIDPID_PGSHIFT);
-               flags |= (nidpid & LAST_NIDPID_MASK) << LAST_NIDPID_PGSHIFT;
+               flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
+               flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
        } while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags));
 
-       return last_nidpid;
+       return last_cpupid;
 }
 #endif
 
 
 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                unsigned long addr, unsigned long end, pgprot_t newprot,
-               int dirty_accountable, int prot_numa, bool *ret_all_same_nidpid)
+               int dirty_accountable, int prot_numa, bool *ret_all_same_cpupid)
 {
        struct mm_struct *mm = vma->vm_mm;
        pte_t *pte, oldpte;
        spinlock_t *ptl;
        unsigned long pages = 0;
-       bool all_same_nidpid = true;
-       int last_nid = -1;
+       bool all_same_cpupid = true;
+       int last_cpu = -1;
        int last_pid = -1;
 
        pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
 
                                page = vm_normal_page(vma, addr, oldpte);
                                if (page) {
-                                       int nidpid = page_nidpid_last(page);
-                                       int this_nid = nidpid_to_nid(nidpid);
-                                       int this_pid = nidpid_to_pid(nidpid);
+                                       int cpupid = page_cpupid_last(page);
+                                       int this_cpu = cpupid_to_cpu(cpupid);
+                                       int this_pid = cpupid_to_pid(cpupid);
 
-                                       if (last_nid == -1)
-                                               last_nid = this_nid;
+                                       if (last_cpu == -1)
+                                               last_cpu = this_cpu;
                                        if (last_pid == -1)
                                                last_pid = this_pid;
-                                       if (last_nid != this_nid ||
+                                       if (last_cpu != this_cpu ||
                                            last_pid != this_pid) {
-                                               all_same_nidpid = false;
+                                               all_same_cpupid = false;
                                        }
 
                                        if (!pte_numa(oldpte)) {
        arch_leave_lazy_mmu_mode();
        pte_unmap_unlock(pte - 1, ptl);
 
-       *ret_all_same_nidpid = all_same_nidpid;
+       *ret_all_same_cpupid = all_same_cpupid;
        return pages;
 }
 
        pmd_t *pmd;
        unsigned long next;
        unsigned long pages = 0;
-       bool all_same_nidpid;
+       bool all_same_cpupid;
 
        pmd = pmd_offset(pud, addr);
        do {
                if (pmd_none_or_clear_bad(pmd))
                        continue;
                this_pages = change_pte_range(vma, pmd, addr, next, newprot,
-                                dirty_accountable, prot_numa, &all_same_nidpid);
+                                dirty_accountable, prot_numa, &all_same_cpupid);
                pages += this_pages;
 
                /*
                 * node. This allows a regular PMD to be handled as one fault
                 * and effectively batches the taking of the PTL
                 */
-               if (prot_numa && this_pages && all_same_nidpid)
+               if (prot_numa && this_pages && all_same_cpupid)
                        change_pmd_protnuma(vma->vm_mm, addr, pmd);
        } while (pmd++, addr = next, addr != end);
 
 
                bad_page(page);
                return 1;
        }
-       page_nidpid_reset_last(page);
+       page_cpupid_reset_last(page);
        if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
                page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
        return 0;
                mminit_verify_page_links(page, zone, nid, pfn);
                init_page_count(page);
                page_mapcount_reset(page);
-               page_nidpid_reset_last(page);
+               page_cpupid_reset_last(page);
                SetPageReserved(page);
                /*
                 * Mark the block movable so that blocks are reserved for