}
 
 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
-static int set_max_huge_pages(struct hstate *h, unsigned long count,
+static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
                              nodemask_t *nodes_allowed)
 {
        unsigned long min_count, ret;
 
        spin_lock(&hugetlb_lock);
 
+       /*
+        * Check for a node specific request.
+        * Changing node specific huge page count may require a corresponding
+        * change to the global count.  In any case, the passed node mask
+        * (nodes_allowed) will restrict alloc/free to the specified node.
+        */
+       if (nid != NUMA_NO_NODE) {
+               unsigned long old_count = count;
+
+               count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
+               /*
+                * User may have specified a large count value which caused the
+                * above calculation to overflow.  In this case, they wanted
+                * to allocate as many huge pages as possible.  Set count to
+                * largest possible value to align with their intention.
+                */
+               if (count < old_count)
+                       count = ULONG_MAX;
+       }
+
        /*
         * Gigantic pages runtime allocation depend on the capability for large
         * page range allocation.
                }
        } else if (nodes_allowed) {
                /*
-                * per node hstate attribute: adjust count to global,
-                * but restrict alloc/free to the specified node.
+                * Node specific request.  count adjustment happens in
+                * set_max_huge_pages() after acquiring hugetlb_lock.
                 */
-               count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
                init_nodemask_of_node(nodes_allowed, nid);
-       } else
-               nodes_allowed = &node_states[N_MEMORY];
+       } else {
+               /*
+                * Node specific request, but we could not allocate the few
+                * words required for a node mask.  We are unlikely to hit
+                * this condition.  Since we can not pass down the appropriate
+                * node mask, just return ENOMEM.
+                */
+               err = -ENOMEM;
+               goto out;
+       }
 
-       err = set_max_huge_pages(h, count, nodes_allowed);
+       err = set_max_huge_pages(h, count, nid, nodes_allowed);
 
 out:
        if (nodes_allowed != &node_states[N_MEMORY])