*/
 static inline void free_page_mlock(struct page *page)
 {
-       if (unlikely(TestClearPageMlocked(page))) {
-               unsigned long flags;
-
-               local_irq_save(flags);
-               __dec_zone_page_state(page, NR_MLOCK);
-               __count_vm_event(UNEVICTABLE_MLOCKFREED);
-               local_irq_restore(flags);
-       }
+       __ClearPageMlocked(page);
+       __dec_zone_page_state(page, NR_MLOCK);
+       __count_vm_event(UNEVICTABLE_MLOCKFREED);
 }
 
 #else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
 
 
 static inline int free_pages_check(struct page *page)
 {
-       free_page_mlock(page);
        if (unlikely(page_mapcount(page) |
                (page->mapping != NULL)  |
                (page_count(page) != 0)  |
        unsigned long flags;
        int i;
        int bad = 0;
+       int clearMlocked = PageMlocked(page);
 
        for (i = 0 ; i < (1 << order) ; ++i)
                bad += free_pages_check(page + i);
        kernel_map_pages(page, 1 << order, 0);
 
        local_irq_save(flags);
+       if (unlikely(clearMlocked))
+               free_page_mlock(page);
        __count_vm_events(PGFREE, 1 << order);
        free_one_page(page_zone(page), page, order,
                                        get_pageblock_migratetype(page));
        struct zone *zone = page_zone(page);
        struct per_cpu_pages *pcp;
        unsigned long flags;
+       int clearMlocked = PageMlocked(page);
 
        if (PageAnon(page))
                page->mapping = NULL;
 
        pcp = &zone_pcp(zone, get_cpu())->pcp;
        local_irq_save(flags);
+       if (unlikely(clearMlocked))
+               free_page_mlock(page);
        __count_vm_event(PGFREE);
+
        if (cold)
                list_add_tail(&page->lru, &pcp->list);
        else