return ret;
 }
 
-#if USE_SPLIT_PTLOCKS
+#if USE_SPLIT_PTE_PTLOCKS
 /*
  * If we are using split PTE locks, then we need to take the page
  * lock here.  Otherwise we are using shared mm->page_table_lock
 {
        spin_unlock(ptl);
 }
-#else /* !USE_SPLIT_PTLOCKS */
+#else /* !USE_SPLIT_PTE_PTLOCKS */
 static inline void do_pte_lock(spinlock_t *ptl) {}
 static inline void do_pte_unlock(spinlock_t *ptl) {}
-#endif /* USE_SPLIT_PTLOCKS */
+#endif /* USE_SPLIT_PTE_PTLOCKS */
 
 static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
        unsigned long pfn)
 
 {
        spinlock_t *ptl = NULL;
 
-#if USE_SPLIT_PTLOCKS
+#if USE_SPLIT_PTE_PTLOCKS
        ptl = __pte_lockptr(page);
        spin_lock_nest_lock(ptl, &mm->page_table_lock);
 #endif
 
                        __set_pfn_prot(pfn, PAGE_KERNEL_RO);
 
-                       if (level == PT_PTE && USE_SPLIT_PTLOCKS)
+                       if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
                                __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
 
                        xen_mc_issue(PARAVIRT_LAZY_MMU);
                if (!PageHighMem(page)) {
                        xen_mc_batch();
 
-                       if (level == PT_PTE && USE_SPLIT_PTLOCKS)
+                       if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
                                __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
 
                        __set_pfn_prot(pfn, PAGE_KERNEL);
 
 }
 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
 
-#if USE_SPLIT_PTLOCKS
+#if USE_SPLIT_PTE_PTLOCKS
 /*
  * We tuck a spinlock to guard each pagetable page into its struct page,
  * at page->private, with BUILD_BUG_ON to make sure that this will not
 } while (0)
 #define pte_lock_deinit(page)  ((page)->mapping = NULL)
 #define pte_lockptr(mm, pmd)   ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
-#else  /* !USE_SPLIT_PTLOCKS */
+#else  /* !USE_SPLIT_PTE_PTLOCKS */
 /*
  * We use mm->page_table_lock to guard all pagetable pages of the mm.
  */
 #define pte_lock_init(page)    do {} while (0)
 #define pte_lock_deinit(page)  do {} while (0)
 #define pte_lockptr(mm, pmd)   ({(void)(pmd); &(mm)->page_table_lock;})
-#endif /* USE_SPLIT_PTLOCKS */
+#endif /* USE_SPLIT_PTE_PTLOCKS */
 
 static inline void pgtable_page_ctor(struct page *page)
 {
 
 
 struct address_space;
 
-#define USE_SPLIT_PTLOCKS      (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
+#define USE_SPLIT_PTE_PTLOCKS  (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
 
 /*
  * Each physical page in the system has a struct page associated with
                                                 * indicates order in the buddy
                                                 * system if PG_buddy is set.
                                                 */
-#if USE_SPLIT_PTLOCKS
+#if USE_SPLIT_PTE_PTLOCKS
                spinlock_t ptl;
 #endif
                struct kmem_cache *slab_cache;  /* SL[AU]B: Pointer to slab */
        NR_MM_COUNTERS
 };
 
-#if USE_SPLIT_PTLOCKS && defined(CONFIG_MMU)
+#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
 #define SPLIT_RSS_COUNTING
 /* per-thread cached information, */
 struct task_rss_stat {
        int events;     /* for synchronization threshold */
        int count[NR_MM_COUNTERS];
 };
-#endif /* USE_SPLIT_PTLOCKS */
+#endif /* USE_SPLIT_PTE_PTLOCKS */
 
 struct mm_rss_stat {
        atomic_long_t count[NR_MM_COUNTERS];