unsigned long n;
 };
 
+static inline unsigned int cpu_core(struct cpuinfo_mips *cpuinfo)
+{
+       return cpuinfo->core;
+}
+
+static inline void cpu_set_core(struct cpuinfo_mips *cpuinfo,
+                               unsigned int core)
+{
+       cpuinfo->core = core;
+}
+
+static inline unsigned int cpu_vpe_id(struct cpuinfo_mips *cpuinfo)
+{
 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
-# define cpu_vpe_id(cpuinfo)   ((cpuinfo)->vpe_id)
-#else
-# define cpu_vpe_id(cpuinfo)   ({ (void)cpuinfo; 0; })
+       return cpuinfo->vpe_id;
 #endif
+       return 0;
+}
+
+static inline void cpu_set_vpe_id(struct cpuinfo_mips *cpuinfo,
+                                 unsigned int vpe)
+{
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
+       cpuinfo->vpe_id = vpe;
+#endif
+}
 
 static inline unsigned long cpu_asid_inc(void)
 {
 
  */
 static inline unsigned int mips_cm_vp_id(unsigned int cpu)
 {
-       unsigned int core = cpu_data[cpu].core;
+       unsigned int core = cpu_core(&cpu_data[cpu]);
        unsigned int vp = cpu_vpe_id(&cpu_data[cpu]);
 
        return (core * mips_cm_max_vp_width()) + vp;
 
 
 #ifdef CONFIG_SMP
 #define topology_physical_package_id(cpu)      (cpu_data[cpu].package)
-#define topology_core_id(cpu)                  (cpu_data[cpu].core)
+#define topology_core_id(cpu)                  (cpu_core(&cpu_data[cpu]))
 #define topology_core_cpumask(cpu)             (&cpu_core_map[cpu])
 #define topology_sibling_cpumask(cpu)          (&cpu_sibling_map[cpu])
 #endif
 
 
 #ifndef CONFIG_MIPS_CPS
        if (cpu_has_mips_r2_r6) {
-               c->core = get_ebase_cpunum();
+               unsigned int core;
+
+               core = get_ebase_cpunum();
                if (cpu_has_mipsmt)
-                       c->core >>= fls(core_nvpes()) - 1;
+                       core >>= fls(core_nvpes()) - 1;
+               cpu_set_core(c, core);
        }
 #endif
 }
 
                 * CM 2.5 & older, so have to ensure other VP(E)s don't
                 * race with us.
                 */
-               curr_core = current_cpu_data.core;
+               curr_core = cpu_core(¤t_cpu_data);
                spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core),
                                  per_cpu(cm_core_lock_flags, curr_core));
 
        unsigned int curr_core;
 
        if (mips_cm_revision() < CM_REV_CM3) {
-               curr_core = current_cpu_data.core;
+               curr_core = cpu_core(¤t_cpu_data);
                spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core),
                                       per_cpu(cm_core_lock_flags, curr_core));
        } else {
 
                return;
 
        preempt_disable();
-       curr_core = current_cpu_data.core;
+       curr_core = cpu_core(¤t_cpu_data);
        spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
                          per_cpu(cpc_core_lock_flags, curr_core));
        write_cpc_cl_other(core << __ffs(CPC_Cx_OTHER_CORENUM));
                /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
                return;
 
-       curr_core = current_cpu_data.core;
+       curr_core = cpu_core(¤t_cpu_data);
        spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
                               per_cpu(cpc_core_lock_flags, curr_core));
        preempt_enable();
 
 int cps_pm_enter_state(enum cps_pm_state state)
 {
        unsigned cpu = smp_processor_id();
-       unsigned core = current_cpu_data.core;
+       unsigned core = cpu_core(¤t_cpu_data);
        unsigned online, left;
        cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled);
        u32 *core_ready_count, *nc_core_ready_count;
                * defined by the interAptiv & proAptiv SUMs as ensuring that the
                *  operation resulting from the preceding store is complete.
                */
-               uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
+               uasm_i_addiu(&p, t0, zero, 1 << cpu_core(&cpu_data[cpu]));
                uasm_i_sw(&p, t0, 0, r_pcohctl);
                uasm_i_lw(&p, t0, 0, r_pcohctl);
 
 static int cps_pm_online_cpu(unsigned int cpu)
 {
        enum cps_pm_state state;
-       unsigned core = cpu_data[cpu].core;
+       unsigned core = cpu_core(&cpu_data[cpu]);
        void *entry_fn, *core_rc;
 
        for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
 
        seq_printf(m, "kscratch registers\t: %d\n",
                      hweight8(cpu_data[n].kscratch_mask));
        seq_printf(m, "package\t\t\t: %d\n", cpu_data[n].package);
-       seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
+       seq_printf(m, "core\t\t\t: %d\n", cpu_core(&cpu_data[n]));
 
 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
        if (cpu_has_mipsmt)
-               seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
+               seq_printf(m, "VPE\t\t\t: %d\n", cpu_vpe_id(&cpu_data[n]));
        else if (cpu_has_vp)
-               seq_printf(m, "VP\t\t\t: %d\n", cpu_data[n].vpe_id);
+               seq_printf(m, "VP\t\t\t: %d\n", cpu_vpe_id(&cpu_data[n]));
 #endif
 
        sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
 
                break;
        case CPU_BMIPS5000:
                write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0));
-               current_cpu_data.core = (read_c0_brcm_config() >> 25) & 3;
+               cpu_set_core(¤t_cpu_data, (read_c0_brcm_config() >> 25) & 3);
                break;
        }
 }
 
                        smp_num_siblings = core_vpes;
 
                for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
-                       cpu_data[nvpes + v].core = c;
-#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
-                       cpu_data[nvpes + v].vpe_id = v;
-#endif
+                       cpu_set_core(&cpu_data[nvpes + v], c);
+                       cpu_set_vpe_id(&cpu_data[nvpes + v], v);
                }
 
                nvpes += core_vpes;
                        cpu_has_dc_aliases ? "dcache aliasing" : "");
 
                for_each_present_cpu(c) {
-                       if (cpu_data[c].core)
+                       if (cpu_core(&cpu_data[c]))
                                set_cpu_present(c, false);
                }
        }
        }
 
        /* Mark this CPU as booted */
-       atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask,
+       atomic_set(&mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)].vpe_mask,
                   1 << cpu_vpe_id(¤t_cpu_data));
 
        return;
 
 static void remote_vpe_boot(void *dummy)
 {
-       unsigned core = current_cpu_data.core;
+       unsigned core = cpu_core(¤t_cpu_data);
        struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
 
        mips_cps_boot_vpes(core_cfg, cpu_vpe_id(¤t_cpu_data));
 
 static void cps_boot_secondary(int cpu, struct task_struct *idle)
 {
-       unsigned core = cpu_data[cpu].core;
+       unsigned core = cpu_core(&cpu_data[cpu]);
        unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
        struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
        struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
                mips_cm_unlock_other();
        }
 
-       if (core != current_cpu_data.core) {
+       if (core != cpu_core(¤t_cpu_data)) {
                /* Boot a VPE on another powered up core */
                for (remote = 0; remote < NR_CPUS; remote++) {
-                       if (cpu_data[remote].core != core)
+                       if (cpu_core(&cpu_data[remote]) != core)
                                continue;
                        if (cpu_online(remote))
                                break;
        if (!cps_pm_support_state(CPS_PM_POWER_GATED))
                return -EINVAL;
 
-       core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core];
+       core_cfg = &mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)];
        atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask);
        smp_mb__after_atomic();
        set_cpu_online(cpu, false);
        local_irq_disable();
        idle_task_exit();
        cpu = smp_processor_id();
-       core = cpu_data[cpu].core;
+       core = cpu_core(&cpu_data[cpu]);
        cpu_death = CPU_DEATH_POWER;
 
        pr_debug("CPU%d going offline\n", cpu);
 
        if (cpu_has_mipsmt || cpu_has_vp) {
+               core = cpu_core(&cpu_data[cpu]);
+
                /* Look for another online VPE within the core */
                for_each_online_cpu(cpu_death_sibling) {
-                       if (cpu_data[cpu_death_sibling].core != core)
+                       if (cpu_core(&cpu_data[cpu_death_sibling]) != core)
                                continue;
 
                        /*
 
 static void cps_cpu_die(unsigned int cpu)
 {
-       unsigned core = cpu_data[cpu].core;
+       unsigned core = cpu_core(&cpu_data[cpu]);
        unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
        ktime_t fail_time;
        unsigned stat;
 
        if (tc != 0)
                smvp_copy_vpe_config();
 
-       cpu_data[ncpu].vpe_id = tc;
+       cpu_set_vpe_id(&cpu_data[ncpu], tc);
 
        return ncpu;
 }
 
        if (smp_num_siblings > 1) {
                for_each_cpu(i, &cpu_sibling_setup_map) {
                        if (cpu_data[cpu].package == cpu_data[i].package &&
-                                   cpu_data[cpu].core == cpu_data[i].core) {
+                           cpu_core(&cpu_data[cpu]) == cpu_core(&cpu_data[i])) {
                                cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
                                cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
                        }
                core_present = 0;
                for_each_cpu(k, &temp_foreign_map)
                        if (cpu_data[i].package == cpu_data[k].package &&
-                           cpu_data[i].core == cpu_data[k].core)
+                           cpu_core(&cpu_data[i]) == cpu_core(&cpu_data[k]))
                                core_present = 1;
                if (!core_present)
                        cpumask_set_cpu(i, &temp_foreign_map);
 
        if (mips_cpc_present()) {
                for_each_cpu(cpu, mask) {
-                       core = cpu_data[cpu].core;
+                       core = cpu_core(&cpu_data[cpu]);
 
-                       if (core == current_cpu_data.core)
+                       if (core == cpu_core(¤t_cpu_data))
                                continue;
 
                        while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
 
                loongson3_ipi_write32(0xffffffff, ipi_en0_regs[cpu_logical_map(i)]);
 
        per_cpu(cpu_state, cpu) = CPU_ONLINE;
-       cpu_data[cpu].core =
-               cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
+       cpu_set_core(&cpu_data[cpu],
+                    cpu_logical_map(cpu) % loongson_sysconf.cores_per_package);
        cpu_data[cpu].package =
                cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
 
        ipi_status0_regs_init();
        ipi_en0_regs_init();
        ipi_mailbox_buf_init();
-       cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package;
+       cpu_set_core(&cpu_data[0],
+                    cpu_logical_map(0) % loongson_sysconf.cores_per_package);
        cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
 }
 
 
 static int loongson3_disable_clock(unsigned int cpu)
 {
-       uint64_t core_id = cpu_data[cpu].core;
+       uint64_t core_id = cpu_core(&cpu_data[cpu]);
        uint64_t package_id = cpu_data[cpu].package;
 
        if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) {
 
 static int loongson3_enable_clock(unsigned int cpu)
 {
-       uint64_t core_id = cpu_data[cpu].core;
+       uint64_t core_id = cpu_core(&cpu_data[cpu]);
        uint64_t package_id = cpu_data[cpu].package;
 
        if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) {
 
        int hwtid;
 
        hwtid = hard_smp_processor_id();
-       current_cpu_data.core = hwtid / NLM_THREADS_PER_CORE;
+       cpu_set_core(¤t_cpu_data, hwtid / NLM_THREADS_PER_CORE);
        current_cpu_data.package = nlm_nodeid();
        nlm_percpu_init(hwtid);
        nlm_smp_irq_init(hwtid);
 
 #ifdef CONFIG_MIPS_MT_SMP
 static int cpu_has_mipsmt_pertccounters;
 #define WHAT           (MIPS_PERFCTRL_MT_EN_VPE | \
-                        M_PERFCTL_VPEID(cpu_data[smp_processor_id()].vpe_id))
+                        M_PERFCTL_VPEID(cpu_vpe_id(¤t_cpu_data)))
 #define vpe_id()       (cpu_has_mipsmt_pertccounters ? \
-                       0 : cpu_data[smp_processor_id()].vpe_id)
+                       0 : cpu_vpe_id(¤t_cpu_data))
 
 /*
  * The number of bits to shift to convert between counters per core and
 
         * TODO: don't treat core 0 specially, just prevent the final core
         * TODO: remap interrupt affinity temporarily
         */
-       if (!cpu_data[dev->cpu].core && (index > STATE_NC_WAIT))
+       if (!cpu_core(&cpu_data[dev->cpu]) && (index > STATE_NC_WAIT))
                index = STATE_NC_WAIT;
 
        /* Select the appropriate cps_pm_state */