The physical ID of the die. This information is retrieved via CPUID.
 
-  - cpuinfo_x86.phys_proc_id:
+  - cpuinfo_x86.topo.pkg_id:
 
     The physical ID of the package. This information is retrieved via CPUID
     and deduced from the APIC IDs of the cores in the package.
 
        u32                     apicid;
        // The initial APIC ID provided by CPUID
        u32                     initial_apicid;
+
+       // Physical package ID
+       u32                     pkg_id;
 };
 
 struct cpuinfo_x86 {
        u16                     x86_clflush_size;
        /* number of cores as seen by the OS: */
        u16                     booted_cores;
-       /* Physical processor id: */
-       u16                     phys_proc_id;
        /* Logical processor id: */
        u16                     logical_proc_id;
        /* Core id: */
 
 extern const struct cpumask *cpu_clustergroup_mask(int cpu);
 
 #define topology_logical_package_id(cpu)       (cpu_data(cpu).logical_proc_id)
-#define topology_physical_package_id(cpu)      (cpu_data(cpu).phys_proc_id)
+#define topology_physical_package_id(cpu)      (cpu_data(cpu).topo.pkg_id)
 #define topology_logical_die_id(cpu)           (cpu_data(cpu).logical_die_id)
 #define topology_die_id(cpu)                   (cpu_data(cpu).cpu_die_id)
 #define topology_core_id(cpu)                  (cpu_data(cpu).cpu_core_id)
 
  * struct x86_cpuinit_ops - platform specific cpu hotplug setups
  * @setup_percpu_clockev:      set up the per cpu clock event device
  * @early_percpu_clock_init:   early init of the per cpu clock event device
- * @fixup_cpu_id:              fixup function for cpuinfo_x86::phys_proc_id
+ * @fixup_cpu_id:              fixup function for cpuinfo_x86::topo.pkg_id
  * @parallel_bringup:          Parallel bringup control
  */
 struct x86_cpuinit_ops {
 
                nodes = ((val >> 3) & 7) + 1;
        }
 
-       c->phys_proc_id = node / nodes;
+       c->topo.pkg_id = node / nodes;
 }
 
 static int __init numachip_system_init(void)
 
        /* Low order bits define the core id (index of core in socket) */
        c->cpu_core_id = c->topo.initial_apicid & ((1 << bits)-1);
        /* Convert the initial APIC ID into the socket ID */
-       c->phys_proc_id = c->topo.initial_apicid >> bits;
+       c->topo.pkg_id = c->topo.initial_apicid >> bits;
        /* use socket ID also for last level cache */
-       per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
+       per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->topo.pkg_id;
 }
 
 u32 amd_get_nodes_per_socket(void)
 
         * turns means that the only possibility is SMT (as indicated in
         * cpuid1). Since cpuid2 doesn't specify shared caches, and we know
         * that SMT shares all caches, we can unconditionally set cpu_llc_id to
-        * c->phys_proc_id.
+        * c->topo.pkg_id.
         */
        if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
-               per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
+               per_cpu(cpu_llc_id, cpu) = c->topo.pkg_id;
 #endif
 
        c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
 
                return;
 
        index_msb = get_count_order(smp_num_siblings);
-       c->phys_proc_id = apic->phys_pkg_id(c->topo.initial_apicid, index_msb);
+       c->topo.pkg_id = apic->phys_pkg_id(c->topo.initial_apicid, index_msb);
 
        smp_num_siblings = smp_num_siblings / c->x86_max_cores;
 
                c->topo.apicid = c->topo.initial_apicid;
 # endif
 #endif
-               c->phys_proc_id = c->topo.initial_apicid;
+               c->topo.pkg_id = c->topo.initial_apicid;
        }
 
        get_model_name(c); /* Default name */
                pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n",
                       cpu, apicid, c->topo.initial_apicid);
        }
-       BUG_ON(topology_update_package_map(c->phys_proc_id, cpu));
+       BUG_ON(topology_update_package_map(c->topo.pkg_id, cpu));
        BUG_ON(topology_update_die_map(c->cpu_die_id, cpu));
 #else
        c->logical_proc_id = 0;
 
                 * when running on host.
                 */
                if (!boot_cpu_has(X86_FEATURE_HYPERVISOR) && c->x86_model <= 0x3)
-                       c->phys_proc_id = c->topo.apicid >> APICID_SOCKET_ID_BIT;
+                       c->topo.pkg_id = c->topo.apicid >> APICID_SOCKET_ID_BIT;
 
                cacheinfo_hygon_init_llc_id(c, cpu);
        } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
        /* Low order bits define the core id (index of core in socket) */
        c->cpu_core_id = c->topo.initial_apicid & ((1 << bits)-1);
        /* Convert the initial APIC ID into the socket ID */
-       c->phys_proc_id = c->topo.initial_apicid >> bits;
+       c->topo.pkg_id = c->topo.initial_apicid >> bits;
        /* use socket ID also for last level cache */
-       per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
+       per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->topo.pkg_id;
 }
 
 static void srat_detect_node(struct cpuinfo_x86 *c)
 
        for_each_possible_cpu(cpu) {
                if (cpu_data(cpu).topo.initial_apicid == lapic_id) {
                        m.extcpu = cpu;
-                       m.socketid = cpu_data(m.extcpu).phys_proc_id;
+                       m.socketid = cpu_data(m.extcpu).topo.pkg_id;
                        break;
                }
        }
 
        m->time = __ktime_get_real_seconds();
        m->cpuvendor = boot_cpu_data.x86_vendor;
        m->cpuid = cpuid_eax(1);
-       m->socketid = cpu_data(m->extcpu).phys_proc_id;
+       m->socketid = cpu_data(m->extcpu).topo.pkg_id;
        m->apicid = cpu_data(m->extcpu).topo.initial_apicid;
        m->mcgcap = __rdmsr(MSR_IA32_MCG_CAP);
        m->ppin = cpu_data(m->extcpu).ppin;
 
                              unsigned int cpu)
 {
 #ifdef CONFIG_SMP
-       seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
+       seq_printf(m, "physical id\t: %d\n", c->topo.pkg_id);
        seq_printf(m, "siblings\t: %d\n",
                   cpumask_weight(topology_core_cpumask(cpu)));
        seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
 
                                        core_plus_mask_width) & die_select_mask;
        }
 
-       c->phys_proc_id = apic->phys_pkg_id(c->topo.initial_apicid,
-                               pkg_mask_width);
+       c->topo.pkg_id = apic->phys_pkg_id(c->topo.initial_apicid, pkg_mask_width);
        /*
         * Reinit the apicid, now that we have extended initial_apicid.
         */
 
        for_each_possible_cpu(cpu) {
                struct cpuinfo_x86 *c = &cpu_data(cpu);
 
-               if (c->initialized && c->phys_proc_id == phys_pkg)
+               if (c->initialized && c->topo.pkg_id == phys_pkg)
                        return c->logical_proc_id;
        }
        return -1;
  */
 static int topology_phys_to_logical_die(unsigned int die_id, unsigned int cur_cpu)
 {
-       int cpu, proc_id = cpu_data(cur_cpu).phys_proc_id;
+       int cpu, proc_id = cpu_data(cur_cpu).topo.pkg_id;
 
        for_each_possible_cpu(cpu) {
                struct cpuinfo_x86 *c = &cpu_data(cpu);
 
                if (c->initialized && c->cpu_die_id == die_id &&
-                   c->phys_proc_id == proc_id)
+                   c->topo.pkg_id == proc_id)
                        return c->logical_die_id;
        }
        return -1;
 
        *c = boot_cpu_data;
        c->cpu_index = id;
-       topology_update_package_map(c->phys_proc_id, id);
+       topology_update_package_map(c->topo.pkg_id, id);
        topology_update_die_map(c->cpu_die_id, id);
        c->initialized = true;
 }
        if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
                int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 
-               if (c->phys_proc_id == o->phys_proc_id &&
+               if (c->topo.pkg_id == o->topo.pkg_id &&
                    c->cpu_die_id == o->cpu_die_id &&
                    per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
                        if (c->cpu_core_id == o->cpu_core_id)
                                return topology_sane(c, o, "smt");
                }
 
-       } else if (c->phys_proc_id == o->phys_proc_id &&
+       } else if (c->topo.pkg_id == o->topo.pkg_id &&
                   c->cpu_die_id == o->cpu_die_id &&
                   c->cpu_core_id == o->cpu_core_id) {
                return topology_sane(c, o, "smt");
 
 static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 {
-       if (c->phys_proc_id == o->phys_proc_id &&
+       if (c->topo.pkg_id == o->topo.pkg_id &&
            c->cpu_die_id == o->cpu_die_id)
                return true;
        return false;
  */
 static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 {
-       if (c->phys_proc_id == o->phys_proc_id)
+       if (c->topo.pkg_id == o->topo.pkg_id)
                return true;
        return false;
 }
 
        int max_core_id, min_core_id;
        struct lpfc_vector_map_info *cpup;
        struct lpfc_vector_map_info *new_cpup;
-#ifdef CONFIG_X86
-       struct cpuinfo_x86 *cpuinfo;
-#endif
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        struct lpfc_hdwq_stat *c_stat;
 #endif
        for_each_present_cpu(cpu) {
                cpup = &phba->sli4_hba.cpu_map[cpu];
 #ifdef CONFIG_X86
-               cpuinfo = &cpu_data(cpu);
-               cpup->phys_id = cpuinfo->phys_proc_id;
+               cpup->phys_id = topology_physical_package_id(cpu);
                cpup->core_id = cpuinfo->cpu_core_id;
                if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
                        cpup->flag |= LPFC_CPU_MAP_HYPER;