#endif
 
 #ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
+int page_group_by_mobility_disabled __read_mostly;
+
 static inline int get_pageblock_migratetype(struct page *page)
 {
+       if (unlikely(page_group_by_mobility_disabled))
+               return MIGRATE_UNMOVABLE;
+
        return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
 }
 
 {
        WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
 
+       if (unlikely(page_group_by_mobility_disabled))
+               return MIGRATE_UNMOVABLE;
+
        /* Cluster high-order atomic allocations together */
        if (unlikely(order > 0) &&
                        (!(gfp_flags & __GFP_WAIT) || in_interrupt()))
                /* cpuset refresh routine should be here */
        }
        vm_total_pages = nr_free_pagecache_pages();
-       printk("Built %i zonelists in %s order.  Total pages: %ld\n",
+       /*
+        * Disable grouping by mobility if the number of pages in the
+        * system is too low to allow the mechanism to work. It would be
+        * more accurate, but expensive to check per-zone. This check is
+        * made on memory-hotadd so a system can start with mobility
+        * disabled and enable it later
+        */
+       if (vm_total_pages < (MAX_ORDER_NR_PAGES * MIGRATE_TYPES))
+               page_group_by_mobility_disabled = 1;
+       else
+               page_group_by_mobility_disabled = 0;
+
+       printk("Built %i zonelists in %s order, mobility grouping %s.  "
+               "Total pages: %ld\n",
                        num_online_nodes(),
                        zonelist_order_name[current_zonelist_order],
+                       page_group_by_mobility_disabled ? "off" : "on",
                        vm_total_pages);
 #ifdef CONFIG_NUMA
        printk("Policy zone: %s\n", zone_names[policy_zone]);