"Node %d Inactive(anon): %8lu kB\n"
                       "Node %d Active(file):   %8lu kB\n"
                       "Node %d Inactive(file): %8lu kB\n"
-#ifdef CONFIG_UNEVICTABLE_LRU
                       "Node %d Unevictable:    %8lu kB\n"
                       "Node %d Mlocked:        %8lu kB\n"
-#endif
 #ifdef CONFIG_HIGHMEM
                       "Node %d HighTotal:      %8lu kB\n"
                       "Node %d HighFree:       %8lu kB\n"
                       nid, K(node_page_state(nid, NR_INACTIVE_ANON)),
                       nid, K(node_page_state(nid, NR_ACTIVE_FILE)),
                       nid, K(node_page_state(nid, NR_INACTIVE_FILE)),
-#ifdef CONFIG_UNEVICTABLE_LRU
                       nid, K(node_page_state(nid, NR_UNEVICTABLE)),
                       nid, K(node_page_state(nid, NR_MLOCK)),
-#endif
 #ifdef CONFIG_HIGHMEM
                       nid, K(i.totalhigh),
                       nid, K(i.freehigh),
 
                "Inactive(anon): %8lu kB\n"
                "Active(file):   %8lu kB\n"
                "Inactive(file): %8lu kB\n"
-#ifdef CONFIG_UNEVICTABLE_LRU
                "Unevictable:    %8lu kB\n"
                "Mlocked:        %8lu kB\n"
-#endif
 #ifdef CONFIG_HIGHMEM
                "HighTotal:      %8lu kB\n"
                "HighFree:       %8lu kB\n"
                K(pages[LRU_INACTIVE_ANON]),
                K(pages[LRU_ACTIVE_FILE]),
                K(pages[LRU_INACTIVE_FILE]),
-#ifdef CONFIG_UNEVICTABLE_LRU
                K(pages[LRU_UNEVICTABLE]),
                K(global_page_state(NR_MLOCK)),
-#endif
 #ifdef CONFIG_HIGHMEM
                K(i.totalhigh),
                K(i.freehigh),
 
        u |= kpf_copy_bit(k, KPF_SWAPCACHE,     PG_swapcache);
        u |= kpf_copy_bit(k, KPF_SWAPBACKED,    PG_swapbacked);
 
-#ifdef CONFIG_UNEVICTABLE_LRU
        u |= kpf_copy_bit(k, KPF_UNEVICTABLE,   PG_unevictable);
        u |= kpf_copy_bit(k, KPF_MLOCKED,       PG_mlocked);
-#endif
 
 #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
        u |= kpf_copy_bit(k, KPF_UNCACHED,      PG_uncached);
 
        NR_ACTIVE_ANON,         /*  "     "     "   "       "         */
        NR_INACTIVE_FILE,       /*  "     "     "   "       "         */
        NR_ACTIVE_FILE,         /*  "     "     "   "       "         */
-#ifdef CONFIG_UNEVICTABLE_LRU
        NR_UNEVICTABLE,         /*  "     "     "   "       "         */
        NR_MLOCK,               /* mlock()ed pages found and moved off LRU */
-#else
-       NR_UNEVICTABLE = NR_ACTIVE_FILE, /* avoid compiler errors in dead code */
-       NR_MLOCK = NR_ACTIVE_FILE,
-#endif
        NR_ANON_PAGES,  /* Mapped anonymous pages */
        NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
                           only modified from process context */
        LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
        LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
        LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
-#ifdef CONFIG_UNEVICTABLE_LRU
        LRU_UNEVICTABLE,
-#else
-       LRU_UNEVICTABLE = LRU_ACTIVE_FILE, /* avoid compiler errors in dead code */
-#endif
        NR_LRU_LISTS
 };
 
 
 static inline int is_unevictable_lru(enum lru_list l)
 {
-#ifdef CONFIG_UNEVICTABLE_LRU
        return (l == LRU_UNEVICTABLE);
-#else
-       return 0;
-#endif
 }
 
 enum zone_watermarks {
 
        PG_reclaim,             /* To be reclaimed asap */
        PG_buddy,               /* Page is free, on buddy lists */
        PG_swapbacked,          /* Page is backed by RAM/swap */
-#ifdef CONFIG_UNEVICTABLE_LRU
        PG_unevictable,         /* Page is "unevictable"  */
-#endif
 #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
        PG_mlocked,             /* Page is vma mlocked */
 #endif
        SETPAGEFLAG_NOOP(SwapCache) CLEARPAGEFLAG_NOOP(SwapCache)
 #endif
 
-#ifdef CONFIG_UNEVICTABLE_LRU
 PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
        TESTCLEARFLAG(Unevictable, unevictable)
-#else
-PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable)
-       SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable)
-       __CLEARPAGEFLAG_NOOP(Unevictable)
-#endif
 
 #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
 #define MLOCK_PAGES 1
 
 #endif /* !PAGEFLAGS_EXTENDED */
 
-#ifdef CONFIG_UNEVICTABLE_LRU
-#define __PG_UNEVICTABLE       (1 << PG_unevictable)
-#else
-#define __PG_UNEVICTABLE       0
-#endif
-
 #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
 #define __PG_MLOCKED           (1 << PG_mlocked)
 #else
         1 << PG_private | 1 << PG_private_2 | \
         1 << PG_buddy   | 1 << PG_writeback | 1 << PG_reserved | \
         1 << PG_slab    | 1 << PG_swapcache | 1 << PG_active | \
-        __PG_UNEVICTABLE | __PG_MLOCKED)
+        1 << PG_unevictable | __PG_MLOCKED)
 
 /*
  * Flags checked when a page is prepped for return by the page allocator.
 
        AS_EIO          = __GFP_BITS_SHIFT + 0, /* IO error on async write */
        AS_ENOSPC       = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
        AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
-#ifdef CONFIG_UNEVICTABLE_LRU
        AS_UNEVICTABLE  = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
-#endif
 };
 
 static inline void mapping_set_error(struct address_space *mapping, int error)
        }
 }
 
-#ifdef CONFIG_UNEVICTABLE_LRU
-
 static inline void mapping_set_unevictable(struct address_space *mapping)
 {
        set_bit(AS_UNEVICTABLE, &mapping->flags);
                return test_bit(AS_UNEVICTABLE, &mapping->flags);
        return !!mapping;
 }
-#else
-static inline void mapping_set_unevictable(struct address_space *mapping) { }
-static inline void mapping_clear_unevictable(struct address_space *mapping) { }
-static inline int mapping_unevictable(struct address_space *mapping)
-{
-       return 0;
-}
-#endif
 
 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
 {
 
  */
 int page_mkclean(struct page *);
 
-#ifdef CONFIG_UNEVICTABLE_LRU
 /*
  * called in munlock()/munmap() path to check for other vmas holding
  * the page mlocked.
  */
 int try_to_munlock(struct page *);
-#else
-static inline int try_to_munlock(struct page *page)
-{
-       return 0;       /* a.k.a. SWAP_SUCCESS */
-}
-#endif
 
 #else  /* !CONFIG_MMU */
 
 
 }
 #endif
 
-#ifdef CONFIG_UNEVICTABLE_LRU
 extern int page_evictable(struct page *page, struct vm_area_struct *vma);
 extern void scan_mapping_unevictable_pages(struct address_space *);
 
                                        void __user *, size_t *, loff_t *);
 extern int scan_unevictable_register_node(struct node *node);
 extern void scan_unevictable_unregister_node(struct node *node);
-#else
-static inline int page_evictable(struct page *page,
-                                               struct vm_area_struct *vma)
-{
-       return 1;
-}
-
-static inline void scan_mapping_unevictable_pages(struct address_space *mapping)
-{
-}
-
-static inline int scan_unevictable_register_node(struct node *node)
-{
-       return 0;
-}
-
-static inline void scan_unevictable_unregister_node(struct node *node) { }
-#endif
 
 extern int kswapd_run(int nid);
 
 
 #ifdef CONFIG_HUGETLB_PAGE
                HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
 #endif
-#ifdef CONFIG_UNEVICTABLE_LRU
                UNEVICTABLE_PGCULLED,   /* culled to noreclaim list */
                UNEVICTABLE_PGSCANNED,  /* scanned for reclaimability */
                UNEVICTABLE_PGRESCUED,  /* rescued from noreclaim list */
                UNEVICTABLE_PGCLEARED,  /* on COW, page truncate */
                UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
                UNEVICTABLE_MLOCKFREED,
-#endif
                NR_VM_EVENT_ITEMS
 };
 
 
                .extra2         = &one,
        },
 #endif
-#ifdef CONFIG_UNEVICTABLE_LRU
        {
                .ctl_name       = CTL_UNNUMBERED,
                .procname       = "scan_unevictable_pages",
                .mode           = 0644,
                .proc_handler   = &scan_unevictable_handler,
        },
-#endif
 /*
  * NOTE: do not add new entries to this table unless you have read
  * Documentation/sysctl/ctl_unnumbered.txt
 
        def_bool y
        depends on !ARCH_NO_VIRT_TO_BUS
 
-config UNEVICTABLE_LRU
-       bool "Add LRU list to track non-evictable pages"
-       default y
-       help
-         Keeps unevictable pages off of the active and inactive pageout
-         lists, so kswapd will not waste CPU time or have its balancing
-         algorithms thrown off by scanning these pages.  Selecting this
-         will use one page flag and increase the code size a little,
-         say Y unless you know what you are doing.
-
-         See Documentation/vm/unevictable-lru.txt for more information.
-
 config HAVE_MLOCK
        bool
        default y if MMU=y
 
 config HAVE_MLOCKED_PAGE_BIT
        bool
-       default y if HAVE_MLOCK=y && UNEVICTABLE_LRU=y
+       default y if HAVE_MLOCK=y
 
 config MMU_NOTIFIER
        bool
 
 }
 #endif
 
-#ifdef CONFIG_UNEVICTABLE_LRU
 /*
  * unevictable_migrate_page() called only from migrate_page_copy() to
  * migrate unevictable flag to new page.
        if (TestClearPageUnevictable(old))
                SetPageUnevictable(new);
 }
-#else
-static inline void unevictable_migrate_page(struct page *new, struct page *old)
-{
-}
-#endif
 
 #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
 /*
 
 }
 EXPORT_SYMBOL(can_do_mlock);
 
-#ifdef CONFIG_UNEVICTABLE_LRU
 /*
  * Mlocked pages are marked with PageMlocked() flag for efficient testing
  * in vmscan and, possibly, the fault path; and to support semi-accurate
        return retval;
 }
 
-#else /* CONFIG_UNEVICTABLE_LRU */
-
-/*
- * Just make pages present if VM_LOCKED.  No-op if unlocking.
- */
-static long __mlock_vma_pages_range(struct vm_area_struct *vma,
-                                  unsigned long start, unsigned long end,
-                                  int mlock)
-{
-       if (mlock && (vma->vm_flags & VM_LOCKED))
-               return make_pages_present(start, end);
-       return 0;
-}
-
-static inline int __mlock_posix_error_return(long retval)
-{
-       return 0;
-}
-
-#endif /* CONFIG_UNEVICTABLE_LRU */
-
 /**
  * mlock_vma_pages_range() - mlock pages in specified vma range.
  * @vma - the vma containing the specfied address range
 
 
        printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n"
                " inactive_file:%lu"
-//TODO:  check/adjust line lengths
-#ifdef CONFIG_UNEVICTABLE_LRU
                " unevictable:%lu"
-#endif
                " dirty:%lu writeback:%lu unstable:%lu\n"
                " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
                global_page_state(NR_ACTIVE_ANON),
                global_page_state(NR_ACTIVE_FILE),
                global_page_state(NR_INACTIVE_ANON),
                global_page_state(NR_INACTIVE_FILE),
-#ifdef CONFIG_UNEVICTABLE_LRU
                global_page_state(NR_UNEVICTABLE),
-#endif
                global_page_state(NR_FILE_DIRTY),
                global_page_state(NR_WRITEBACK),
                global_page_state(NR_UNSTABLE_NFS),
                        " inactive_anon:%lukB"
                        " active_file:%lukB"
                        " inactive_file:%lukB"
-#ifdef CONFIG_UNEVICTABLE_LRU
                        " unevictable:%lukB"
-#endif
                        " present:%lukB"
                        " pages_scanned:%lu"
                        " all_unreclaimable? %s"
                        K(zone_page_state(zone, NR_INACTIVE_ANON)),
                        K(zone_page_state(zone, NR_ACTIVE_FILE)),
                        K(zone_page_state(zone, NR_INACTIVE_FILE)),
-#ifdef CONFIG_UNEVICTABLE_LRU
                        K(zone_page_state(zone, NR_UNEVICTABLE)),
-#endif
                        K(zone->present_pages),
                        zone->pages_scanned,
                        (zone_is_all_unreclaimable(zone) ? "yes" : "no")
 
        return ret;
 }
 
-#ifdef CONFIG_UNEVICTABLE_LRU
 /**
  * try_to_munlock - try to munlock a page
  * @page: the page to be munlocked
        else
                return try_to_unmap_file(page, 1, 0);
 }
-#endif
+
 
  *
  * lru_lock must not be held, interrupts must be enabled.
  */
-#ifdef CONFIG_UNEVICTABLE_LRU
 void putback_lru_page(struct page *page)
 {
        int lru;
        put_page(page);         /* drop ref from isolate */
 }
 
-#else /* CONFIG_UNEVICTABLE_LRU */
-
-void putback_lru_page(struct page *page)
-{
-       int lru;
-       VM_BUG_ON(PageLRU(page));
-
-       lru = !!TestClearPageActive(page) + page_is_file_cache(page);
-       lru_cache_add_lru(page, lru);
-       put_page(page);
-}
-#endif /* CONFIG_UNEVICTABLE_LRU */
-
-
 /*
  * shrink_page_list() returns the number of reclaimed pages
  */
 }
 #endif
 
-#ifdef CONFIG_UNEVICTABLE_LRU
 /*
  * page_evictable - test whether a page is evictable
  * @page: the page to test
        sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
 }
 
-#endif
 
        "nr_active_anon",
        "nr_inactive_file",
        "nr_active_file",
-#ifdef CONFIG_UNEVICTABLE_LRU
        "nr_unevictable",
        "nr_mlock",
-#endif
        "nr_anon_pages",
        "nr_mapped",
        "nr_file_pages",
        "htlb_buddy_alloc_success",
        "htlb_buddy_alloc_fail",
 #endif
-#ifdef CONFIG_UNEVICTABLE_LRU
        "unevictable_pgs_culled",
        "unevictable_pgs_scanned",
        "unevictable_pgs_rescued",
        "unevictable_pgs_stranded",
        "unevictable_pgs_mlockfreed",
 #endif
-#endif
 };
 
 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,