static unsigned long __initdata default_hstate_max_huge_pages;
 static bool __initdata parsed_valid_hugepagesz = true;
 static bool __initdata parsed_default_hugepagesz;
+static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
 
 /*
  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
        return ERR_PTR(-ENOSPC);
 }
 
-int alloc_bootmem_huge_page(struct hstate *h)
+int alloc_bootmem_huge_page(struct hstate *h, int nid)
        __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
-int __alloc_bootmem_huge_page(struct hstate *h)
+int __alloc_bootmem_huge_page(struct hstate *h, int nid)
 {
-       struct huge_bootmem_page *m;
+       struct huge_bootmem_page *m = NULL; /* initialize for clang */
        int nr_nodes, node;
 
+       if (nid >= nr_online_nodes)
+               return 0;
+       /* do node specific alloc */
+       if (nid != NUMA_NO_NODE) {
+               m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h),
+                               0, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+               if (!m)
+                       return 0;
+               goto found;
+       }
+       /* allocate from next node when distributing huge pages */
        for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
-               void *addr;
-
-               addr = memblock_alloc_try_nid_raw(
+               m = memblock_alloc_try_nid_raw(
                                huge_page_size(h), huge_page_size(h),
                                0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
-               if (addr) {
-                       /*
-                        * Use the beginning of the huge page to store the
-                        * huge_bootmem_page struct (until gather_bootmem
-                        * puts them into the mem_map).
-                        */
-                       m = addr;
-                       goto found;
-               }
+               /*
+                * Use the beginning of the huge page to store the
+                * huge_bootmem_page struct (until gather_bootmem
+                * puts them into the mem_map).
+                */
+               if (!m)
+                       return 0;
+               goto found;
        }
-       return 0;
 
 found:
-       BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
        /* Put them into a private list first because mem_map is not up yet */
        INIT_LIST_HEAD(&m->list);
        list_add(&m->list, &huge_boot_pages);
                cond_resched();
        }
 }
+static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
+{
+       unsigned long i;
+       char buf[32];
+
+       for (i = 0; i < h->max_huge_pages_node[nid]; ++i) {
+               if (hstate_is_gigantic(h)) {
+                       if (!alloc_bootmem_huge_page(h, nid))
+                               break;
+               } else {
+                       struct page *page;
+                       gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
+
+                       page = alloc_fresh_huge_page(h, gfp_mask, nid,
+                                       &node_states[N_MEMORY], NULL);
+                       if (!page)
+                               break;
+                       put_page(page); /* free it into the hugepage allocator */
+               }
+               cond_resched();
+       }
+       if (i == h->max_huge_pages_node[nid])
+               return;
+
+       string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
+       pr_warn("HugeTLB: allocating %u of page size %s failed node%d.  Only allocated %lu hugepages.\n",
+               h->max_huge_pages_node[nid], buf, nid, i);
+       h->max_huge_pages -= (h->max_huge_pages_node[nid] - i);
+       h->max_huge_pages_node[nid] = i;
+}
 
 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
 {
        unsigned long i;
        nodemask_t *node_alloc_noretry;
+       bool node_specific_alloc = false;
 
+       /* skip gigantic hugepages allocation if hugetlb_cma enabled */
+       if (hstate_is_gigantic(h) && hugetlb_cma_size) {
+               pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
+               return;
+       }
+
+       /* do node specific alloc */
+       for (i = 0; i < nr_online_nodes; i++) {
+               if (h->max_huge_pages_node[i] > 0) {
+                       hugetlb_hstate_alloc_pages_onenode(h, i);
+                       node_specific_alloc = true;
+               }
+       }
+
+       if (node_specific_alloc)
+               return;
+
+       /* below will do all node balanced alloc */
        if (!hstate_is_gigantic(h)) {
                /*
                 * Bit mask controlling how hard we retry per-node allocations.
 
        for (i = 0; i < h->max_huge_pages; ++i) {
                if (hstate_is_gigantic(h)) {
-                       if (hugetlb_cma_size) {
-                               pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
-                               goto free;
-                       }
-                       if (!alloc_bootmem_huge_page(h))
+                       if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
                                break;
                } else if (!alloc_pool_huge_page(h,
                                         &node_states[N_MEMORY],
                        h->max_huge_pages, buf, i);
                h->max_huge_pages = i;
        }
-free:
        kfree(node_alloc_noretry);
 }
 
                        }
                        default_hstate.max_huge_pages =
                                default_hstate_max_huge_pages;
+
+                       for (i = 0; i < nr_online_nodes; i++)
+                               default_hstate.max_huge_pages_node[i] =
+                                       default_hugepages_in_node[i];
                }
        }
 
        parsed_hstate = h;
 }
 
+bool __init __weak hugetlb_node_alloc_supported(void)
+{
+       return true;
+}
 /*
  * hugepages command line processing
  * hugepages normally follows a valid hugepagsz or default_hugepagsz
 {
        unsigned long *mhp;
        static unsigned long *last_mhp;
+       int node = NUMA_NO_NODE;
+       int count;
+       unsigned long tmp;
+       char *p = s;
 
        if (!parsed_valid_hugepagesz) {
                pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
                return 0;
        }
 
-       if (sscanf(s, "%lu", mhp) <= 0)
-               *mhp = 0;
+       while (*p) {
+               count = 0;
+               if (sscanf(p, "%lu%n", &tmp, &count) != 1)
+                       goto invalid;
+               /* Parameter is node format */
+               if (p[count] == ':') {
+                       if (!hugetlb_node_alloc_supported()) {
+                               pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
+                               return 0;
+                       }
+                       node = tmp;
+                       p += count + 1;
+                       if (node < 0 || node >= nr_online_nodes)
+                               goto invalid;
+                       /* Parse hugepages */
+                       if (sscanf(p, "%lu%n", &tmp, &count) != 1)
+                               goto invalid;
+                       if (!hugetlb_max_hstate)
+                               default_hugepages_in_node[node] = tmp;
+                       else
+                               parsed_hstate->max_huge_pages_node[node] = tmp;
+                       *mhp += tmp;
+                       /* Go to parse next node*/
+                       if (p[count] == ',')
+                               p += count + 1;
+                       else
+                               break;
+               } else {
+                       if (p != s)
+                               goto invalid;
+                       *mhp = tmp;
+                       break;
+               }
+       }
 
        /*
         * Global state is always initialized later in hugetlb_init.
        last_mhp = mhp;
 
        return 1;
+
+invalid:
+       pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p);
+       return 0;
 }
 __setup("hugepages=", hugepages_setup);
 
 static int __init default_hugepagesz_setup(char *s)
 {
        unsigned long size;
+       int i;
 
        parsed_valid_hugepagesz = false;
        if (parsed_default_hugepagesz) {
         */
        if (default_hstate_max_huge_pages) {
                default_hstate.max_huge_pages = default_hstate_max_huge_pages;
+               for (i = 0; i < nr_online_nodes; i++)
+                       default_hstate.max_huge_pages_node[i] =
+                               default_hugepages_in_node[i];
                if (hstate_is_gigantic(&default_hstate))
                        hugetlb_hstate_alloc_pages(&default_hstate);
                default_hstate_max_huge_pages = 0;