unsigned int munlock_vma_page(struct page *page)
 {
        int nr_pages;
-       pg_data_t *pgdat = page_pgdat(page);
 
        /* For try_to_munlock() and to serialize with page migration */
        BUG_ON(!PageLocked(page));
-
        VM_BUG_ON_PAGE(PageTail(page), page);
 
-       /*
-        * Serialize with any parallel __split_huge_page_refcount() which
-        * might otherwise copy PageMlocked to part of the tail pages before
-        * we clear it in the head page. It also stabilizes thp_nr_pages().
-        */
-       spin_lock_irq(&pgdat->lru_lock);
-
        if (!TestClearPageMlocked(page)) {
                /* Potentially, PTE-mapped THP: do not skip the rest PTEs */
-               nr_pages = 1;
-               goto unlock_out;
+               return 0;
        }
 
        nr_pages = thp_nr_pages(page);
-       __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
+       mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
 
-       if (__munlock_isolate_lru_page(page, true)) {
-               spin_unlock_irq(&pgdat->lru_lock);
+       if (!isolate_lru_page(page))
                __munlock_isolated_page(page);
-               goto out;
-       }
-       __munlock_isolation_failed(page);
-
-unlock_out:
-       spin_unlock_irq(&pgdat->lru_lock);
+       else
+               __munlock_isolation_failed(page);
 
-out:
        return nr_pages - 1;
 }