extern struct lppaca lppaca[];
 
+#define lppaca_of(cpu) (lppaca[cpu])
+
 /*
  * SLB shadow buffer structure as defined in the PAPR.  The save_area
  * contains adjacent ESID and VSID pairs for each shadowed SLB.  The
 
 
        for_each_possible_cpu(cpu) {
                if (firmware_has_feature(FW_FEATURE_ISERIES))
-                       sum_purr += lppaca[cpu].emulated_time_base;
+                       sum_purr += lppaca_of(cpu).emulated_time_base;
                else {
                        struct cpu_usage *cu;
 
                   ppp_data.active_system_procs);
 
        /* pool related entries are apropriate for shared configs */
-       if (lppaca[0].shared_proc) {
+       if (lppaca_of(0).shared_proc) {
                unsigned long pool_idle_time, pool_procs;
 
                seq_printf(m, "pool=%d\n", ppp_data.pool_num);
                return;
 
        for_each_possible_cpu(cpu) {
-               cmo_faults += lppaca[cpu].cmo_faults;
-               cmo_fault_time += lppaca[cpu].cmo_fault_time;
+               cmo_faults += lppaca_of(cpu).cmo_faults;
+               cmo_fault_time += lppaca_of(cpu).cmo_fault_time;
        }
 
        seq_printf(m, "cmo_faults=%lu\n", cmo_faults);
        unsigned long dispatch_dispersions = 0;
 
        for_each_possible_cpu(cpu) {
-               dispatches += lppaca[cpu].yield_count;
-               dispatch_dispersions += lppaca[cpu].dispersion_count;
+               dispatches += lppaca_of(cpu).yield_count;
+               dispatch_dispersions += lppaca_of(cpu).dispersion_count;
        }
 
        seq_printf(m, "dispatches=%lu\n", dispatches);
        seq_printf(m, "partition_potential_processors=%d\n",
                   partition_potential_processors);
 
-       seq_printf(m, "shared_processor_mode=%d\n", lppaca[0].shared_proc);
+       seq_printf(m, "shared_processor_mode=%d\n", lppaca_of(0).shared_proc);
 
        seq_printf(m, "slb_size=%d\n", mmu_slb_size);
 
 
                return;
        holder_cpu = lock_value & 0xffff;
        BUG_ON(holder_cpu >= NR_CPUS);
-       yield_count = lppaca[holder_cpu].yield_count;
+       yield_count = lppaca_of(holder_cpu).yield_count;
        if ((yield_count & 1) == 0)
                return;         /* virtual cpu is currently running */
        rmb();
                return;         /* no write lock at present */
        holder_cpu = lock_value & 0xffff;
        BUG_ON(holder_cpu >= NR_CPUS);
-       yield_count = lppaca[holder_cpu].yield_count;
+       yield_count = lppaca_of(holder_cpu).yield_count;
        if ((yield_count & 1) == 0)
                return;         /* virtual cpu is currently running */
        rmb();
 
        pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE);
 
        for (i = 0; i < NR_CPUS; i++) {
-               if (lppaca[i].dyn_proc_status >= 2)
+               if (lppaca_of(i).dyn_proc_status >= 2)
                        continue;
 
                snprintf(p, 32 - (p - buf), "@%d", i);
 
                dt_prop_str(dt, "device_type", device_type_cpu);
 
-               index = lppaca[i].dyn_hv_phys_proc_index;
+               index = lppaca_of(i).dyn_hv_phys_proc_index;
                d = &xIoHriProcessorVpd[index];
 
                dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
 
        BUG_ON((nr < 0) || (nr >= NR_CPUS));
 
        /* Verify that our partition has a processor nr */
-       if (lppaca[nr].dyn_proc_status >= 2)
+       if (lppaca_of(nr).dyn_proc_status >= 2)
                return;
 
        /* The processor is currently spinning, waiting
 
        }
 
        /* set our initial buffer indices */
-       dtl->last_idx = lppaca[dtl->cpu].dtl_idx = 0;
+       dtl->last_idx = lppaca_of(dtl->cpu).dtl_idx = 0;
 
        /* ensure that our updates to the lppaca fields have occurred before
         * we actually enable the logging */
        smp_wmb();
 
        /* enable event logging */
-       lppaca[dtl->cpu].dtl_enable_mask = dtl_event_mask;
+       lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask;
 
        return 0;
 }
 {
        int hwcpu = get_hard_smp_processor_id(dtl->cpu);
 
-       lppaca[dtl->cpu].dtl_enable_mask = 0x0;
+       lppaca_of(dtl->cpu).dtl_enable_mask = 0x0;
 
        unregister_dtl(hwcpu, __pa(dtl->buf));
 
        /* actual number of entries read */
        n_read = 0;
 
-       cur_idx = lppaca[dtl->cpu].dtl_idx;
+       cur_idx = lppaca_of(dtl->cpu).dtl_idx;
        last_idx = dtl->last_idx;
 
        if (cur_idx - last_idx > dtl->buf_entries) {
 
        long ret;
 
        if (cpu_has_feature(CPU_FTR_ALTIVEC))
-               lppaca[cpu].vmxregs_in_use = 1;
+               lppaca_of(cpu).vmxregs_in_use = 1;
 
-       addr = __pa(&lppaca[cpu]);
+       addr = __pa(&lppaca_of(cpu));
        ret = register_vpa(hwcpu, addr);
 
        if (ret) {