*/
 static struct mask_info socket_info;
 static struct mask_info book_info;
+static struct mask_info drawer_info;
 
 DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology);
 EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology);
 }
 
 static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
+                                         struct mask_info *drawer,
                                          struct mask_info *book,
                                          struct mask_info *socket,
                                          int one_socket_per_cpu)
                        continue;
                for (i = 0; i <= smp_cpu_mtid; i++) {
                        topo = &per_cpu(cpu_topology, lcpu + i);
+                       topo->drawer_id = drawer->id;
                        topo->book_id = book->id;
                        topo->core_id = rcore;
                        topo->thread_id = lcpu + i;
+                       cpumask_set_cpu(lcpu + i, &drawer->mask);
                        cpumask_set_cpu(lcpu + i, &book->mask);
                        cpumask_set_cpu(lcpu + i, &socket->mask);
                        if (one_socket_per_cpu)
                cpumask_clear(&info->mask);
                info = info->next;
        }
+       info = &drawer_info;
+       while (info) {
+               cpumask_clear(&info->mask);
+               info = info->next;
+       }
 }
 
 static union topology_entry *next_tle(union topology_entry *tle)
 {
        struct mask_info *socket = &socket_info;
        struct mask_info *book = &book_info;
+       struct mask_info *drawer = &drawer_info;
        union topology_entry *tle, *end;
 
        tle = info->tle;
        end = (union topology_entry *)((unsigned long)info + info->length);
        while (tle < end) {
                switch (tle->nl) {
+               case 3:
+                       drawer = drawer->next;
+                       drawer->id = tle->container.id;
+                       break;
                case 2:
                        book = book->next;
                        book->id = tle->container.id;
                        socket->id = tle->container.id;
                        break;
                case 0:
-                       add_cpus_to_mask(&tle->cpu, book, socket, 0);
+                       add_cpus_to_mask(&tle->cpu, drawer, book, socket, 0);
                        break;
                default:
                        clear_masks();
 {
        struct mask_info *socket = &socket_info;
        struct mask_info *book = &book_info;
+       struct mask_info *drawer = &drawer_info;
        union topology_entry *tle, *end;
 
        tle = info->tle;
                        book->id = tle->container.id;
                        break;
                case 0:
-                       socket = add_cpus_to_mask(&tle->cpu, book, socket, 1);
+                       socket = add_cpus_to_mask(&tle->cpu, drawer, book, socket, 1);
                        break;
                default:
                        clear_masks();
                topo->thread_mask = cpu_thread_map(cpu);
                topo->core_mask = cpu_group_map(&socket_info, cpu);
                topo->book_mask = cpu_group_map(&book_info, cpu);
+               topo->drawer_mask = cpu_group_map(&drawer_info, cpu);
                if (!MACHINE_HAS_TOPOLOGY) {
                        topo->thread_id = cpu;
                        topo->core_id = cpu;
                        topo->socket_id = cpu;
                        topo->book_id = cpu;
+                       topo->drawer_id = cpu;
                }
        }
        numa_update_cpu_topology();
 
 void store_topology(struct sysinfo_15_1_x *info)
 {
-       if (topology_max_mnest >= 3)
-               stsi(info, 15, 1, 3);
-       else
-               stsi(info, 15, 1, 2);
+       stsi(info, 15, 1, min(topology_max_mnest, 4));
 }
 
 int arch_update_cpu_topology(void)
        return &per_cpu(cpu_topology, cpu).book_mask;
 }
 
+static const struct cpumask *cpu_drawer_mask(int cpu)
+{
+       return &per_cpu(cpu_topology, cpu).drawer_mask;
+}
+
 static int __init early_parse_topology(char *p)
 {
        return kstrtobool(p, &topology_enabled);
        { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
        { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
        { cpu_book_mask, SD_INIT_NAME(BOOK) },
+       { cpu_drawer_mask, SD_INIT_NAME(DRAWER) },
        { cpu_cpu_mask, SD_INIT_NAME(DIE) },
        { NULL, },
 };
        printk(KERN_CONT " / %d\n", info->mnest);
        alloc_masks(info, &socket_info, 1);
        alloc_masks(info, &book_info, 2);
+       alloc_masks(info, &drawer_info, 3);
        set_sched_topology(s390_topology);
        return 0;
 }
 
 #define DIST_CORE      1
 #define DIST_MC                2
 #define DIST_BOOK      3
-#define DIST_MAX       4
+#define DIST_DRAWER    4
+#define DIST_MAX       5
 
 /* Node distance reported to common code */
 #define EMU_NODE_DIST  10
 #define NODE_ID_FREE   -1
 
 /* Different levels of toptree */
-enum toptree_level {CORE, MC, BOOK, NODE, TOPOLOGY};
+enum toptree_level {CORE, MC, BOOK, DRAWER, NODE, TOPOLOGY};
 
 /* The two toptree IDs */
 enum {TOPTREE_ID_PHYS, TOPTREE_ID_NUMA};
  * Return node of core
  */
 static struct toptree *core_node(struct toptree *core)
+{
+       return core->parent->parent->parent->parent;
+}
+
+/*
+ * Return drawer of core
+ */
+static struct toptree *core_drawer(struct toptree *core)
 {
        return core->parent->parent->parent;
 }
  */
 static int dist_core_to_core(struct toptree *core1, struct toptree *core2)
 {
+       if (core_drawer(core1)->id != core_drawer(core2)->id)
+               return DIST_DRAWER;
        if (core_book(core1)->id != core_book(core2)->id)
                return DIST_BOOK;
        if (core_mc(core1)->id != core_mc(core2)->id)
        struct toptree *core;
 
        /* Always try to move perfectly fitting structures first */
+       move_level_to_numa(numa, phys, DRAWER, true);
+       move_level_to_numa(numa, phys, DRAWER, false);
        move_level_to_numa(numa, phys, BOOK, true);
        move_level_to_numa(numa, phys, BOOK, false);
        move_level_to_numa(numa, phys, MC, true);
  */
 static struct toptree *toptree_from_topology(void)
 {
-       struct toptree *phys, *node, *book, *mc, *core;
+       struct toptree *phys, *node, *drawer, *book, *mc, *core;
        struct cpu_topology_s390 *top;
        int cpu;
 
        for_each_online_cpu(cpu) {
                top = &per_cpu(cpu_topology, cpu);
                node = toptree_get_child(phys, 0);
-               book = toptree_get_child(node, top->book_id);
+               drawer = toptree_get_child(node, top->drawer_id);
+               book = toptree_get_child(drawer, top->book_id);
                mc = toptree_get_child(book, top->socket_id);
                core = toptree_get_child(mc, top->core_id);
-               if (!book || !mc || !core)
+               if (!drawer || !book || !mc || !core)
                        panic("NUMA emulation could not allocate memory");
                cpumask_set_cpu(cpu, &core->mask);
                toptree_update_mask(mc);
                cpumask_copy(&top->thread_mask, &core->mask);
                cpumask_copy(&top->core_mask, &core_mc(core)->mask);
                cpumask_copy(&top->book_mask, &core_book(core)->mask);
+               cpumask_copy(&top->drawer_mask, &core_drawer(core)->mask);
                cpumask_set_cpu(cpu, &node_to_cpumask_map[core_node(core)->id]);
                top->node_id = core_node(core)->id;
        }