if (HPageTemporary(page)) {
                remove_hugetlb_page(h, page, false);
+               spin_unlock(&hugetlb_lock);
                update_and_free_page(h, page);
        } else if (h->surplus_huge_pages_node[nid]) {
                /* remove the page from active list */
                remove_hugetlb_page(h, page, true);
+               spin_unlock(&hugetlb_lock);
                update_and_free_page(h, page);
        } else {
                arch_clear_hugepage_flags(page);
                enqueue_huge_page(h, page);
+               spin_unlock(&hugetlb_lock);
        }
-       spin_unlock(&hugetlb_lock);
 }
 
 /*
                                list_entry(h->hugepage_freelists[node].next,
                                          struct page, lru);
                        remove_hugetlb_page(h, page, acct_surplus);
+                       /*
+                        * unlock/lock around update_and_free_page is temporary
+                        * and will be removed with subsequent patch.
+                        */
+                       spin_unlock(&hugetlb_lock);
                        update_and_free_page(h, page);
+                       spin_lock(&hugetlb_lock);
                        ret = 1;
                        break;
                }
                }
                remove_hugetlb_page(h, page, false);
                h->max_huge_pages--;
+               spin_unlock(&hugetlb_lock);
                update_and_free_page(h, head);
-               rc = 0;
+               return 0;
        }
 out:
        spin_unlock(&hugetlb_lock);
                                                nodemask_t *nodes_allowed)
 {
        int i;
+       struct page *page, *next;
+       LIST_HEAD(page_list);
 
        if (hstate_is_gigantic(h))
                return;
 
+       /*
+        * Collect pages to be freed on a list, and free after dropping lock
+        */
        for_each_node_mask(i, *nodes_allowed) {
-               struct page *page, *next;
                struct list_head *freel = &h->hugepage_freelists[i];
                list_for_each_entry_safe(page, next, freel, lru) {
                        if (count >= h->nr_huge_pages)
-                               return;
+                               goto out;
                        if (PageHighMem(page))
                                continue;
                        remove_hugetlb_page(h, page, false);
-                       update_and_free_page(h, page);
+                       list_add(&page->lru, &page_list);
                }
        }
+
+out:
+       spin_unlock(&hugetlb_lock);
+       list_for_each_entry_safe(page, next, &page_list, lru) {
+               update_and_free_page(h, page);
+               cond_resched();
+       }
+       spin_lock(&hugetlb_lock);
 }
 #else
 static inline void try_to_free_low(struct hstate *h, unsigned long count,