* thread doing COW.
                 */
                ptep_clear_flush_notify(vma, address, page_table);
-               set_pte_at(mm, address, page_table, entry);
-               update_mmu_cache(vma, address, entry);
                SetPageSwapBacked(new_page);
-               lru_cache_add_active_anon(new_page);
+               lru_cache_add_active_or_unevictable(new_page, vma);
                page_add_new_anon_rmap(new_page, vma, address);
 
+//TODO:  is this safe?  do_anonymous_page() does it this way.
+               set_pte_at(mm, address, page_table, entry);
+               update_mmu_cache(vma, address, entry);
                if (old_page) {
                        /*
                         * Only after switching the pte to the new page may
                goto release;
        inc_mm_counter(mm, anon_rss);
        SetPageSwapBacked(page);
-       lru_cache_add_active_anon(page);
+       lru_cache_add_active_or_unevictable(page, vma);
        page_add_new_anon_rmap(page, vma, address);
        set_pte_at(mm, address, page_table, entry);
 
                entry = mk_pte(page, vma->vm_page_prot);
                if (flags & FAULT_FLAG_WRITE)
                        entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-               set_pte_at(mm, address, page_table, entry);
                if (anon) {
-                        inc_mm_counter(mm, anon_rss);
+                       inc_mm_counter(mm, anon_rss);
                        SetPageSwapBacked(page);
-                        lru_cache_add_active_anon(page);
-                        page_add_new_anon_rmap(page, vma, address);
+                       lru_cache_add_active_or_unevictable(page, vma);
+                       page_add_new_anon_rmap(page, vma, address);
                } else {
                        inc_mm_counter(mm, file_rss);
                        page_add_file_rmap(page);
                                get_page(dirty_page);
                        }
                }
+//TODO:  is this safe?  do_anonymous_page() does it this way.
+               set_pte_at(mm, address, page_table, entry);
 
                /* no need to invalidate: a not-present page won't be cached */
                update_mmu_cache(vma, address, entry);
 
 #include <linux/backing-dev.h>
 #include <linux/memcontrol.h>
 
+#include "internal.h"
+
 /* How many pages do we try to swap or page in/out together? */
 int page_cluster;
 
        spin_unlock_irq(&zone->lru_lock);
 }
 
+/**
+ * lru_cache_add_active_or_unevictable
+ * @page:  the page to be added to LRU
+ * @vma:   vma in which page is mapped for determining reclaimability
+ *
+ * place @page on active or unevictable LRU list, depending on
+ * page_evictable().  Note that if the page is not evictable,
+ * it goes directly back onto it's zone's unevictable list.  It does
+ * NOT use a per cpu pagevec.
+ */
+void lru_cache_add_active_or_unevictable(struct page *page,
+                                       struct vm_area_struct *vma)
+{
+       if (page_evictable(page, vma))
+               lru_cache_add_lru(page, LRU_ACTIVE + page_is_file_cache(page));
+       else
+               add_page_to_unevictable_list(page);
+}
+
 /*
  * Drain pages out of the cpu's pagevecs.
  * Either "cpu" is the current CPU, and preemption has already been