#ifdef CONFIG_CMA
 static struct cma *hugetlb_cma[MAX_NUMNODES];
+static bool hugetlb_cma_page(struct page *page, unsigned int order)
+{
+       return cma_pages_valid(hugetlb_cma[page_to_nid(page)], page,
+                               1 << order);
+}
+#else
+static bool hugetlb_cma_page(struct page *page, unsigned int order)
+{
+       return false;
+}
 #endif
 static unsigned long hugetlb_cma_size __initdata;
 
        atomic_set(compound_pincount_ptr(page), 0);
 
        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
+               p->mapping = NULL;
                clear_compound_head(p);
                set_page_refcounted(p);
        }
                                1 << PG_active | 1 << PG_private |
                                1 << PG_writeback);
        }
-       if (hstate_is_gigantic(h)) {
+
+       /*
+        * Non-gigantic pages demoted from CMA allocated gigantic pages
+        * need to be given back to CMA in free_gigantic_page.
+        */
+       if (hstate_is_gigantic(h) ||
+           hugetlb_cma_page(page, huge_page_order(h))) {
                destroy_compound_gigantic_page(page, huge_page_order(h));
                free_gigantic_page(page, huge_page_order(h));
        } else {
                 * h->demote_order is initially 0.
                 * - We can not demote gigantic pages if runtime freeing
                 *   is not supported, so skip this.
+                * - If CMA allocation is possible, we can not demote
+                *   HUGETLB_PAGE_ORDER or smaller size pages.
                 */
                if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
                        continue;
+               if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER)
+                       continue;
                for_each_hstate(h2) {
                        if (h2 == h)
                                continue;
        if (!demote_hstate)
                return -EINVAL;
        demote_order = demote_hstate->order;
+       if (demote_order < HUGETLB_PAGE_ORDER)
+               return -EINVAL;
 
        /* demote order must be smaller than hstate order */
        h = kobj_to_hstate(kobj, &nid);
        if (hugetlb_cma_size < (PAGE_SIZE << order)) {
                pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
                        (PAGE_SIZE << order) / SZ_1M);
+               hugetlb_cma_size = 0;
                return;
        }
 
                size = round_up(size, PAGE_SIZE << order);
 
                snprintf(name, sizeof(name), "hugetlb%d", nid);
-               res = cma_declare_contiguous_nid(0, size, 0, PAGE_SIZE << order,
+               /*
+                * Note that 'order per bit' is based on smallest size that
+                * may be returned to CMA allocator in the case of
+                * huge page demotion.
+                */
+               res = cma_declare_contiguous_nid(0, size, 0,
+                                               PAGE_SIZE << HUGETLB_PAGE_ORDER,
                                                 0, false, name,
                                                 &hugetlb_cma[nid], nid);
                if (res) {
                if (reserved >= hugetlb_cma_size)
                        break;
        }
+
+       if (!reserved)
+               /*
+                * hugetlb_cma_size is used to determine if allocations from
+                * cma are possible.  Set to zero if no cma regions are set up.
+                */
+               hugetlb_cma_size = 0;
 }
 
 void __init hugetlb_cma_check(void)