#endif /* CONFIG_SPARSEMEM */
 
 #ifdef CONFIG_MEMORY_HOTPLUG
+extern void resize_hpt_for_hotplug(unsigned long new_mem_size);
 extern int create_section_mapping(unsigned long start, unsigned long end);
 extern int remove_section_mapping(unsigned long start, unsigned long end);
 #ifdef CONFIG_NUMA
 
 }
 
 #ifdef CONFIG_MEMORY_HOTPLUG
+void resize_hpt_for_hotplug(unsigned long new_mem_size)
+{
+       unsigned target_hpt_shift;
+
+       if (!mmu_hash_ops.resize_hpt)
+               return;
+
+       target_hpt_shift = htab_shift_for_mem_size(new_mem_size);
+
+       /*
+        * To avoid lots of HPT resizes if memory size is fluctuating
+        * across a boundary, we deliberately have some hysterisis
+        * here: we immediately increase the HPT size if the target
+        * shift exceeds the current shift, but we won't attempt to
+        * reduce unless the target shift is at least 2 below the
+        * current shift
+        */
+       if ((target_hpt_shift > ppc64_pft_size)
+           || (target_hpt_shift < (ppc64_pft_size - 1))) {
+               int rc;
+
+               rc = mmu_hash_ops.resize_hpt(target_hpt_shift);
+               if (rc)
+                       printk(KERN_WARNING
+                              "Unable to resize hash page table to target order %d: %d\n",
+                              target_hpt_shift, rc);
+       }
+}
+
 int hash__create_section_mapping(unsigned long start, unsigned long end)
 {
        int rc = htab_bolt_mapping(start, end, __pa(start),
 
        unsigned long nr_pages = size >> PAGE_SHIFT;
        int rc;
 
+       resize_hpt_for_hotplug(memblock_phys_mem_size());
+
        pgdata = NODE_DATA(nid);
 
        start = (unsigned long)__va(start);
         */
        vm_unmap_aliases();
 
+       resize_hpt_for_hotplug(memblock_phys_mem_size());
+
        return ret;
 }
 #endif