extern u16 __read_mostly tlb_lld_1g[NR_INFO];
 
 /*
- *  CPU type and hardware bug flags. Kept separately for each CPU.
- *  Members of this structure are referenced in head_32.S, so think twice
- *  before touching them. [mj]
+ * CPU type and hardware bug flags. Kept separately for each CPU.
  */
 
+struct cpuinfo_topology {
+       // Real APIC ID read from the local APIC
+       u32                     apicid;
+       // The initial APIC ID provided by CPUID
+       u32                     initial_apicid;
+};
+
 struct cpuinfo_x86 {
        __u8                    x86;            /* CPU family */
        __u8                    x86_vendor;     /* CPU vendor */
        };
        char                    x86_vendor_id[16];
        char                    x86_model_id[64];
+       struct cpuinfo_topology topo;
        /* in KB - valid for CPUS which support this call: */
        unsigned int            x86_cache_size;
        int                     x86_cache_alignment;    /* In bytes */
        u64                     ppin;
        /* cpuid returned max cores value: */
        u16                     x86_max_cores;
-       u16                     apicid;
-       u16                     initial_apicid;
        u16                     x86_clflush_size;
        /* number of cores as seen by the OS: */
        u16                     booted_cores;
 
 
        bits = c->x86_coreid_bits;
        /* Low order bits define the core id (index of core in socket) */
-       c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
+       c->cpu_core_id = c->topo.initial_apicid & ((1 << bits)-1);
        /* Convert the initial APIC ID into the socket ID */
-       c->phys_proc_id = c->initial_apicid >> bits;
+       c->phys_proc_id = c->topo.initial_apicid >> bits;
        /* use socket ID also for last level cache */
        per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
 }
 #ifdef CONFIG_NUMA
        int cpu = smp_processor_id();
        int node;
-       unsigned apicid = c->apicid;
+       unsigned apicid = c->topo.apicid;
 
        node = numa_cpu_node(cpu);
        if (node == NUMA_NO_NODE)
                 * through CPU mapping may alter the outcome, directly
                 * access __apicid_to_node[].
                 */
-               int ht_nodeid = c->initial_apicid;
+               int ht_nodeid = c->topo.initial_apicid;
 
                if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
                        node = __apicid_to_node[ht_nodeid];
                set_cpu_cap(c, X86_FEATURE_FSRS);
 
        /* get apicid instead of initial apic id from cpuid */
-       c->apicid = read_apic_id();
+       c->topo.apicid = read_apic_id();
 
        /* K6s reports MCEs but don't actually have all the MSRs */
        if (c->x86 < 6)
 
                 * LLC is at the core complex level.
                 * Core complex ID is ApicId[3] for these processors.
                 */
-               per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
+               per_cpu(cpu_llc_id, cpu) = c->topo.apicid >> 3;
        } else {
                /*
                 * LLC ID is calculated from the number of threads sharing the
                if (num_sharing_cache) {
                        int bits = get_count_order(num_sharing_cache);
 
-                       per_cpu(cpu_llc_id, cpu) = c->apicid >> bits;
+                       per_cpu(cpu_llc_id, cpu) = c->topo.apicid >> bits;
                }
        }
 }
         * LLC is at the core complex level.
         * Core complex ID is ApicId[3] for these processors.
         */
-       per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
+       per_cpu(cpu_llc_id, cpu) = c->topo.apicid >> 3;
 }
 
 void init_amd_cacheinfo(struct cpuinfo_x86 *c)
                                new_l2 = this_leaf.size/1024;
                                num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
                                index_msb = get_count_order(num_threads_sharing);
-                               l2_id = c->apicid & ~((1 << index_msb) - 1);
+                               l2_id = c->topo.apicid & ~((1 << index_msb) - 1);
                                break;
                        case 3:
                                new_l3 = this_leaf.size/1024;
                                num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
                                index_msb = get_count_order(num_threads_sharing);
-                               l3_id = c->apicid & ~((1 << index_msb) - 1);
+                               l3_id = c->topo.apicid & ~((1 << index_msb) - 1);
                                break;
                        default:
                                break;
                unsigned int apicid, nshared, first, last;
 
                nshared = base->eax.split.num_threads_sharing + 1;
-               apicid = cpu_data(cpu).apicid;
+               apicid = cpu_data(cpu).topo.apicid;
                first = apicid - (apicid % nshared);
                last = first + nshared - 1;
 
                        if (!this_cpu_ci->info_list)
                                continue;
 
-                       apicid = cpu_data(i).apicid;
+                       apicid = cpu_data(i).topo.apicid;
                        if ((apicid < first) || (apicid > last))
                                continue;
 
                        this_leaf = this_cpu_ci->info_list + index;
 
                        for_each_online_cpu(sibling) {
-                               apicid = cpu_data(sibling).apicid;
+                               apicid = cpu_data(sibling).topo.apicid;
                                if ((apicid < first) || (apicid > last))
                                        continue;
                                cpumask_set_cpu(sibling,
        index_msb = get_count_order(num_threads_sharing);
 
        for_each_online_cpu(i)
-               if (cpu_data(i).apicid >> index_msb == c->apicid >> index_msb) {
+               if (cpu_data(i).topo.apicid >> index_msb == c->topo.apicid >> index_msb) {
                        struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
 
                        if (i == cpu || !sib_cpu_ci->info_list)
 
        num_threads_sharing = 1 + id4_regs->eax.split.num_threads_sharing;
        index_msb = get_count_order(num_threads_sharing);
-       id4_regs->id = c->apicid >> index_msb;
+       id4_regs->id = c->topo.apicid >> index_msb;
 }
 
 int populate_cache_leaves(unsigned int cpu)
 
                return;
 
        index_msb = get_count_order(smp_num_siblings);
-       c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
+       c->phys_proc_id = apic->phys_pkg_id(c->topo.initial_apicid, index_msb);
 
        smp_num_siblings = smp_num_siblings / c->x86_max_cores;
 
 
        core_bits = get_count_order(c->x86_max_cores);
 
-       c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
+       c->cpu_core_id = apic->phys_pkg_id(c->topo.initial_apicid, index_msb) &
                                       ((1 << core_bits) - 1);
 #endif
 }
        get_cpu_address_sizes(c);
 
        if (c->cpuid_level >= 0x00000001) {
-               c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
+               c->topo.initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
 #ifdef CONFIG_X86_32
 # ifdef CONFIG_SMP
-               c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
+               c->topo.apicid = apic->phys_pkg_id(c->topo.initial_apicid, 0);
 # else
-               c->apicid = c->initial_apicid;
+               c->topo.apicid = c->topo.initial_apicid;
 # endif
 #endif
-               c->phys_proc_id = c->initial_apicid;
+               c->phys_proc_id = c->topo.initial_apicid;
        }
 
        get_model_name(c); /* Default name */
 
        apicid = apic->cpu_present_to_apicid(cpu);
 
-       if (apicid != c->apicid) {
+       if (apicid != c->topo.apicid) {
                pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n",
-                      cpu, apicid, c->initial_apicid);
+                      cpu, apicid, c->topo.initial_apicid);
        }
        BUG_ON(topology_update_package_map(c->phys_proc_id, cpu));
        BUG_ON(topology_update_die_map(c->cpu_die_id, cpu));
        apply_forced_caps(c);
 
 #ifdef CONFIG_X86_64
-       c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
+       c->topo.apicid = apic->phys_pkg_id(c->topo.initial_apicid, 0);
 #endif
 
        /*
 
                 * when running on host.
                 */
                if (!boot_cpu_has(X86_FEATURE_HYPERVISOR) && c->x86_model <= 0x3)
-                       c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
+                       c->phys_proc_id = c->topo.apicid >> APICID_SOCKET_ID_BIT;
 
                cacheinfo_hygon_init_llc_id(c, cpu);
        } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
 
        bits = c->x86_coreid_bits;
        /* Low order bits define the core id (index of core in socket) */
-       c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
+       c->cpu_core_id = c->topo.initial_apicid & ((1 << bits)-1);
        /* Convert the initial APIC ID into the socket ID */
-       c->phys_proc_id = c->initial_apicid >> bits;
+       c->phys_proc_id = c->topo.initial_apicid >> bits;
        /* use socket ID also for last level cache */
        per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
 }
 #ifdef CONFIG_NUMA
        int cpu = smp_processor_id();
        int node;
-       unsigned int apicid = c->apicid;
+       unsigned int apicid = c->topo.apicid;
 
        node = numa_cpu_node(cpu);
        if (node == NUMA_NO_NODE)
                 * through CPU mapping may alter the outcome, directly
                 * access __apicid_to_node[].
                 */
-               int ht_nodeid = c->initial_apicid;
+               int ht_nodeid = c->topo.initial_apicid;
 
                if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
                        node = __apicid_to_node[ht_nodeid];
        set_cpu_cap(c, X86_FEATURE_REP_GOOD);
 
        /* get apicid instead of initial apic id from cpuid */
-       c->apicid = read_apic_id();
+       c->topo.apicid = read_apic_id();
 
        /*
         * XXX someone from Hygon needs to confirm this DTRT
 
        m.socketid = -1;
 
        for_each_possible_cpu(cpu) {
-               if (cpu_data(cpu).initial_apicid == lapic_id) {
+               if (cpu_data(cpu).topo.initial_apicid == lapic_id) {
                        m.extcpu = cpu;
                        m.socketid = cpu_data(m.extcpu).phys_proc_id;
                        break;
 
        m->cpuvendor = boot_cpu_data.x86_vendor;
        m->cpuid = cpuid_eax(1);
        m->socketid = cpu_data(m->extcpu).phys_proc_id;
-       m->apicid = cpu_data(m->extcpu).initial_apicid;
+       m->apicid = cpu_data(m->extcpu).topo.initial_apicid;
        m->mcgcap = __rdmsr(MSR_IA32_MCG_CAP);
        m->ppin = cpu_data(m->extcpu).ppin;
        m->microcode = boot_cpu_data.microcode;
 
                   cpumask_weight(topology_core_cpumask(cpu)));
        seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
        seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
-       seq_printf(m, "apicid\t\t: %d\n", c->apicid);
-       seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid);
+       seq_printf(m, "apicid\t\t: %d\n", c->topo.apicid);
+       seq_printf(m, "initial apicid\t: %d\n", c->topo.initial_apicid);
 #endif
 }
 
 
        /*
         * initial apic id, which also represents 32-bit extended x2apic id.
         */
-       c->initial_apicid = edx;
+       c->topo.initial_apicid = edx;
        smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx));
 #endif
        return 0;
         * Populate HT related information from sub-leaf level 0.
         */
        cpuid_count(leaf, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
-       c->initial_apicid = edx;
+       c->topo.initial_apicid = edx;
        core_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
        smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx));
        core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
        die_select_mask = (~(-1 << die_plus_mask_width)) >>
                                core_plus_mask_width;
 
-       c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid,
+       c->cpu_core_id = apic->phys_pkg_id(c->topo.initial_apicid,
                                ht_mask_width) & core_select_mask;
 
        if (die_level_present) {
-               c->cpu_die_id = apic->phys_pkg_id(c->initial_apicid,
+               c->cpu_die_id = apic->phys_pkg_id(c->topo.initial_apicid,
                                        core_plus_mask_width) & die_select_mask;
        }
 
-       c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid,
+       c->phys_proc_id = apic->phys_pkg_id(c->topo.initial_apicid,
                                pkg_mask_width);
        /*
         * Reinit the apicid, now that we have extended initial_apicid.
         */
-       c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
+       c->topo.apicid = apic->phys_pkg_id(c->topo.initial_apicid, 0);
 
        c->x86_max_cores = (core_level_siblings / smp_num_siblings);
        __max_die_per_package = (die_level_siblings / core_level_siblings);
 
 static int xen_cpu_present_to_apicid(int cpu)
 {
        if (cpu_present(cpu))
-               return cpu_data(cpu).apicid;
+               return cpu_data(cpu).topo.apicid;
        else
                return BAD_APICID;
 }
 
        if (first_cpu_of_numa_node >= nr_cpu_ids)
                return -1;
 #ifdef CONFIG_X86_64
-       return cpu_data(first_cpu_of_numa_node).apicid;
+       return cpu_data(first_cpu_of_numa_node).topo.apicid;
 #else
        return first_cpu_of_numa_node;
 #endif
 
        if (cpu_online(cpu))
                remove_cpu(cpu);
 
-       lapicid = cpu_data(cpu).apicid;
+       lapicid = cpu_data(cpu).topo.apicid;
        dev_dbg(dev, "Try to remove cpu %lld with lapicid %lld\n", cpu, lapicid);
        ret = hcall_sos_remove_cpu(lapicid);
        if (ret < 0) {