#include <linux/pagemap.h>
 #include <linux/mempolicy.h>
 #include <linux/cpuset.h>
+#include <linux/mutex.h>
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
 static struct list_head hugepage_freelists[MAX_NUMNODES];
 static unsigned int nr_huge_pages_node[MAX_NUMNODES];
 static unsigned int free_huge_pages_node[MAX_NUMNODES];
+/*
+ * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
+ */
+static DEFINE_SPINLOCK(hugetlb_lock);
 
 static void clear_huge_page(struct page *page, unsigned long addr)
 {
        }
 }
 
-/*
- * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
- */
-static DEFINE_SPINLOCK(hugetlb_lock);
-
 static void enqueue_huge_page(struct page *page)
 {
        int nid = page_to_nid(page);
        pte_t *ptep;
        pte_t entry;
        int ret;
+       static DEFINE_MUTEX(hugetlb_instantiation_mutex);
 
        ptep = huge_pte_alloc(mm, address);
        if (!ptep)
                return VM_FAULT_OOM;
 
+       /*
+        * Serialize hugepage allocation and instantiation, so that we don't
+        * get spurious allocation failures if two CPUs race to instantiate
+        * the same page in the page cache.
+        */
+       mutex_lock(&hugetlb_instantiation_mutex);
        entry = *ptep;
-       if (pte_none(entry))
-               return hugetlb_no_page(mm, vma, address, ptep, write_access);
+       if (pte_none(entry)) {
+               ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
+               mutex_unlock(&hugetlb_instantiation_mutex);
+               return ret;
+       }
 
        ret = VM_FAULT_MINOR;
 
                if (write_access && !pte_write(entry))
                        ret = hugetlb_cow(mm, vma, address, ptep, entry);
        spin_unlock(&mm->page_table_lock);
+       mutex_unlock(&hugetlb_instantiation_mutex);
 
        return ret;
 }