#define __ARCH_IRQ_STAT
 
-#define local_softirq_pending()        __get_cpu_var(irq_stat).__softirq_pending
+#define local_softirq_pending()        __this_cpu_read(irq_stat.__softirq_pending)
+
+#define __ARCH_SET_SOFTIRQ_PENDING
+
+#define set_softirq_pending(x) __this_cpu_write(irq_stat.__softirq_pending, (x))
+#define or_softirq_pending(x) __this_cpu_or(irq_stat.__softirq_pending, (x))
 
 static inline void ack_bad_irq(unsigned int irq)
 {
 
 
 static inline void arch_enter_lazy_mmu_mode(void)
 {
-       struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+       struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
 
        batch->active = 1;
 }
 
 static inline void arch_leave_lazy_mmu_mode(void)
 {
-       struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+       struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
 
        if (batch->index)
                __flush_tlb_pending(batch);
 
 
 static inline void xics_push_cppr(unsigned int vec)
 {
-       struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
+       struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
 
        if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1))
                return;
 
 static inline unsigned char xics_pop_cppr(void)
 {
-       struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
+       struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
 
        if (WARN_ON(os_cppr->index < 1))
                return LOWEST_PRIORITY;
 
 static inline void xics_set_base_cppr(unsigned char cppr)
 {
-       struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
+       struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
 
        /* we only really want to set the priority when there's
         * just one cppr value on the stack
 
 static inline unsigned char xics_cppr_top(void)
 {
-       struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
+       struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
        
        return os_cppr->stack[os_cppr->index];
 }
 
 
        may_hard_irq_enable();
 
-       __get_cpu_var(irq_stat).doorbell_irqs++;
+       __this_cpu_inc(irq_stat.doorbell_irqs);
 
        smp_ipi_demux();
 
 
 int arch_install_hw_breakpoint(struct perf_event *bp)
 {
        struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-       struct perf_event **slot = &__get_cpu_var(bp_per_reg);
+       struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
 
        *slot = bp;
 
  */
 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
 {
-       struct perf_event **slot = &__get_cpu_var(bp_per_reg);
+       struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
 
        if (*slot != bp) {
                WARN_ONCE(1, "Can't find the breakpoint");
         */
        rcu_read_lock();
 
-       bp = __get_cpu_var(bp_per_reg);
+       bp = __this_cpu_read(bp_per_reg);
        if (!bp)
                goto out;
        info = counter_arch_bp(bp);
 
         * We don't need to disable preemption here because any CPU can
         * safely use any IOMMU pool.
         */
-       pool_nr = __raw_get_cpu_var(iommu_pool_hash) & (tbl->nr_pools - 1);
+       pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
 
        if (largealloc)
                pool = &(tbl->large_pool);
 
 static inline notrace int decrementer_check_overflow(void)
 {
        u64 now = get_tb_or_rtc();
-       u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
+       u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
  
        return now >= *next_tb;
 }
 
        /* And finally process it */
        if (unlikely(irq == NO_IRQ))
-               __get_cpu_var(irq_stat).spurious_irqs++;
+               __this_cpu_inc(irq_stat.spurious_irqs);
        else
                generic_handle_irq(irq);
 
 
 {
        struct thread_info *thread_info, *exception_thread_info;
        struct thread_info *backup_current_thread_info =
-               &__get_cpu_var(kgdb_thread_info);
+               this_cpu_ptr(&kgdb_thread_info);
 
        if (user_mode(regs))
                return 0;
 
 
 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
 {
-       __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+       __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
        kcb->kprobe_status = kcb->prev_kprobe.status;
        kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
 }
 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
                                struct kprobe_ctlblk *kcb)
 {
-       __get_cpu_var(current_kprobe) = p;
+       __this_cpu_write(current_kprobe, p);
        kcb->kprobe_saved_msr = regs->msr;
 }
 
                                ret = 1;
                                goto no_kprobe;
                        }
-                       p = __get_cpu_var(current_kprobe);
+                       p = __this_cpu_read(current_kprobe);
                        if (p->break_handler && p->break_handler(p, regs)) {
                                goto ss_probe;
                        }
 
                    uint64_t nip, uint64_t addr)
 {
        uint64_t srr1;
-       int index = __get_cpu_var(mce_nest_count)++;
-       struct machine_check_event *mce = &__get_cpu_var(mce_event[index]);
+       int index = __this_cpu_inc_return(mce_nest_count);
+       struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
 
        /*
         * Return if we don't have enough space to log mce event.
  */
 int get_mce_event(struct machine_check_event *mce, bool release)
 {
-       int index = __get_cpu_var(mce_nest_count) - 1;
+       int index = __this_cpu_read(mce_nest_count) - 1;
        struct machine_check_event *mc_evt;
        int ret = 0;
 
 
        /* Check if we have MCE info to process. */
        if (index < MAX_MC_EVT) {
-               mc_evt = &__get_cpu_var(mce_event[index]);
+               mc_evt = this_cpu_ptr(&mce_event[index]);
                /* Copy the event structure and release the original */
                if (mce)
                        *mce = *mc_evt;
        }
        /* Decrement the count to free the slot. */
        if (release)
-               __get_cpu_var(mce_nest_count)--;
+               __this_cpu_dec(mce_nest_count);
 
        return ret;
 }
        if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
                return;
 
-       index = __get_cpu_var(mce_queue_count)++;
+       index = __this_cpu_inc_return(mce_queue_count);
        /* If queue is full, just return for now. */
        if (index >= MAX_MC_EVT) {
-               __get_cpu_var(mce_queue_count)--;
+               __this_cpu_dec(mce_queue_count);
                return;
        }
-       __get_cpu_var(mce_event_queue[index]) = evt;
+       memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));
 
        /* Queue irq work to process this event later. */
        irq_work_queue(&mce_event_process_work);
         * For now just print it to console.
         * TODO: log this error event to FSP or nvram.
         */
-       while (__get_cpu_var(mce_queue_count) > 0) {
-               index = __get_cpu_var(mce_queue_count) - 1;
+       while (__this_cpu_read(mce_queue_count) > 0) {
+               index = __this_cpu_read(mce_queue_count) - 1;
                machine_check_print_event_info(
-                               &__get_cpu_var(mce_event_queue[index]));
-               __get_cpu_var(mce_queue_count)--;
+                               this_cpu_ptr(&mce_event_queue[index]));
+               __this_cpu_dec(mce_queue_count);
        }
 }
 
 
 
 void __set_breakpoint(struct arch_hw_breakpoint *brk)
 {
-       __get_cpu_var(current_brk) = *brk;
+       memcpy(this_cpu_ptr(¤t_brk), brk, sizeof(*brk));
 
        if (cpu_has_feature(CPU_FTR_DAWR))
                set_dawr(brk);
  * schedule DABR
  */
 #ifndef CONFIG_HAVE_HW_BREAKPOINT
-       if (unlikely(!hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk)))
+       if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk)))
                __set_breakpoint(&new->thread.hw_brk);
 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
 #endif
         * Collect processor utilization data per process
         */
        if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
-               struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
+               struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
                long unsigned start_tb, current_tb;
                start_tb = old_thread->start_tb;
                cu->current_tb = current_tb = mfspr(SPRN_PURR);
 #endif /* CONFIG_PPC64 */
 
 #ifdef CONFIG_PPC_BOOK3S_64
-       batch = &__get_cpu_var(ppc64_tlb_batch);
+       batch = this_cpu_ptr(&ppc64_tlb_batch);
        if (batch->active) {
                current_thread_info()->local_flags |= _TLF_LAZY_MMU;
                if (batch->index)
 #ifdef CONFIG_PPC_BOOK3S_64
        if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
                current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
-               batch = &__get_cpu_var(ppc64_tlb_batch);
+               batch = this_cpu_ptr(&ppc64_tlb_batch);
                batch->active = 1;
        }
 #endif /* CONFIG_PPC_BOOK3S_64 */
 
 
 irqreturn_t smp_ipi_demux(void)
 {
-       struct cpu_messages *info = &__get_cpu_var(ipi_message);
+       struct cpu_messages *info = this_cpu_ptr(&ipi_message);
        unsigned int all;
 
        mb();   /* order any irq clear */
        idle_task_exit();
        cpu = smp_processor_id();
        printk(KERN_DEBUG "CPU%d offline\n", cpu);
-       __get_cpu_var(cpu_state) = CPU_DEAD;
+       __this_cpu_write(cpu_state, CPU_DEAD);
        smp_wmb();
-       while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
+       while (__this_cpu_read(cpu_state) != CPU_UP_PREPARE)
                cpu_relax();
 }
 
 
        ppc_set_pmu_inuse(1);
 
        /* Only need to enable them once */
-       if (__get_cpu_var(pmcs_enabled))
+       if (__this_cpu_read(pmcs_enabled))
                return;
 
-       __get_cpu_var(pmcs_enabled) = 1;
+       __this_cpu_write(pmcs_enabled, 1);
 
        if (ppc_md.enable_pmcs)
                ppc_md.enable_pmcs();
 
 
 DEFINE_PER_CPU(u8, irq_work_pending);
 
-#define set_irq_work_pending_flag()    __get_cpu_var(irq_work_pending) = 1
-#define test_irq_work_pending()                __get_cpu_var(irq_work_pending)
-#define clear_irq_work_pending()       __get_cpu_var(irq_work_pending) = 0
+#define set_irq_work_pending_flag()    __this_cpu_write(irq_work_pending, 1)
+#define test_irq_work_pending()                __this_cpu_read(irq_work_pending)
+#define clear_irq_work_pending()       __this_cpu_write(irq_work_pending, 0)
 
 #endif /* 32 vs 64 bit */
 
 static void __timer_interrupt(void)
 {
        struct pt_regs *regs = get_irq_regs();
-       u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
-       struct clock_event_device *evt = &__get_cpu_var(decrementers);
+       u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
+       struct clock_event_device *evt = this_cpu_ptr(&decrementers);
        u64 now;
 
        trace_timer_interrupt_entry(regs);
                *next_tb = ~(u64)0;
                if (evt->event_handler)
                        evt->event_handler(evt);
-               __get_cpu_var(irq_stat).timer_irqs_event++;
+               __this_cpu_inc(irq_stat.timer_irqs_event);
        } else {
                now = *next_tb - now;
                if (now <= DECREMENTER_MAX)
                /* We may have raced with new irq work */
                if (test_irq_work_pending())
                        set_dec(1);
-               __get_cpu_var(irq_stat).timer_irqs_others++;
+               __this_cpu_inc(irq_stat.timer_irqs_others);
        }
 
 #ifdef CONFIG_PPC64
        /* collect purr register values often, for accurate calculations */
        if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
-               struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
+               struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
                cu->current_tb = mfspr(SPRN_PURR);
        }
 #endif
 void timer_interrupt(struct pt_regs * regs)
 {
        struct pt_regs *old_regs;
-       u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
+       u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
 
        /* Ensure a positive value is written to the decrementer, or else
         * some CPUs will continue to take decrementer exceptions.
 static int decrementer_set_next_event(unsigned long evt,
                                      struct clock_event_device *dev)
 {
-       __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt;
+       __this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt);
        set_dec(evt);
 
        /* We may have raced with new irq work */
 /* Interrupt handler for the timer broadcast IPI */
 void tick_broadcast_ipi_handler(void)
 {
-       u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
+       u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
 
        *next_tb = get_tb_or_rtc();
        __timer_interrupt();
 
 {
        long handled = 0;
 
-       __get_cpu_var(irq_stat).mce_exceptions++;
+       __this_cpu_inc(irq_stat.mce_exceptions);
 
        if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
                handled = cur_cpu_spec->machine_check_early(regs);
 
 long hmi_exception_realmode(struct pt_regs *regs)
 {
-       __get_cpu_var(irq_stat).hmi_exceptions++;
+       __this_cpu_inc(irq_stat.hmi_exceptions);
 
        if (ppc_md.hmi_exception_early)
                ppc_md.hmi_exception_early(regs);
        enum ctx_state prev_state = exception_enter();
        int recover = 0;
 
-       __get_cpu_var(irq_stat).mce_exceptions++;
+       __this_cpu_inc(irq_stat.mce_exceptions);
 
        /* See if any machine dependent calls. In theory, we would want
         * to call the CPU first, and call the ppc_md. one if the CPU
 
 void performance_monitor_exception(struct pt_regs *regs)
 {
-       __get_cpu_var(irq_stat).pmu_irqs++;
+       __this_cpu_inc(irq_stat.pmu_irqs);
 
        perf_irq(regs);
 }
 
        unsigned long sid;
        int ret = -1;
 
-       sid = ++(__get_cpu_var(pcpu_last_used_sid));
+       sid = __this_cpu_inc_return(pcpu_last_used_sid);
        if (sid < NUM_TIDS) {
-               __get_cpu_var(pcpu_sids).entry[sid] = entry;
+               __this_cpu_write(pcpu_sids)entry[sid], entry);
                entry->val = sid;
-               entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
+               entry->pentry = this_cpu_ptr(&pcpu_sids.entry[sid]);
                ret = sid;
        }
 
 static inline int local_sid_lookup(struct id *entry)
 {
        if (entry && entry->val != 0 &&
-           __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
-           entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
+           __this_cpu_read(pcpu_sids.entry[entry->val]) == entry &&
+           entry->pentry == this_cpu_ptr(&pcpu_sids.entry[entry->val]))
                return entry->val;
        return -1;
 }
 /* Invalidate all id mappings on local core -- call with preempt disabled */
 static inline void local_sid_destroy_all(void)
 {
-       __get_cpu_var(pcpu_last_used_sid) = 0;
-       memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
+       __this_cpu_write(pcpu_last_used_sid, 0);
+       memset(this_cpu_ptr(&pcpu_sids), 0, sizeof(pcpu_sids));
 }
 
 static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
 
        mtspr(SPRN_GESR, vcpu->arch.shared->esr);
 
        if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
-           __get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] != vcpu) {
+           __this_cpu_read(last_vcpu_of_lpid[get_lpid(vcpu)]) != vcpu) {
                kvmppc_e500_tlbil_all(vcpu_e500);
-               __get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] = vcpu;
+               __this_cpu_write(last_vcpu_of_lpid[get_lpid(vcpu)], vcpu);
        }
 }
 
 
        unsigned long want_v;
        unsigned long flags;
        real_pte_t pte;
-       struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+       struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
        unsigned long psize = batch->psize;
        int ssize = batch->ssize;
        int i;
 
        else {
                int i;
                struct ppc64_tlb_batch *batch =
-                       &__get_cpu_var(ppc64_tlb_batch);
+                       this_cpu_ptr(&ppc64_tlb_batch);
 
                for (i = 0; i < number; i++)
                        flush_hash_page(batch->vpn[i], batch->pte[i],
 
 
        ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
 
-       index = __get_cpu_var(next_tlbcam_idx);
+       index = this_cpu_read(next_tlbcam_idx);
 
        /* Just round-robin the entries and wrap when we hit the end */
        if (unlikely(index == ncams - 1))
-               __get_cpu_var(next_tlbcam_idx) = tlbcam_index;
+               __this_cpu_write(next_tlbcam_idx, tlbcam_index);
        else
-               __get_cpu_var(next_tlbcam_idx)++;
+               __this_cpu_inc(next_tlbcam_idx);
 
        return index;
 }
 
 {
        struct hugepd_freelist **batchp;
 
-       batchp = &get_cpu_var(hugepd_freelist_cur);
+       batchp = this_cpu_ptr(&hugepd_freelist_cur);
 
        if (atomic_read(&tlb->mm->mm_users) < 2 ||
            cpumask_equal(mm_cpumask(tlb->mm),
 
 
 static void power_pmu_bhrb_enable(struct perf_event *event)
 {
-       struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+       struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
 
        if (!ppmu->bhrb_nr)
                return;
 
 static void power_pmu_bhrb_disable(struct perf_event *event)
 {
-       struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+       struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
 
        if (!ppmu->bhrb_nr)
                return;
        if (!ppmu)
                return;
        local_irq_save(flags);
-       cpuhw = &__get_cpu_var(cpu_hw_events);
+       cpuhw = this_cpu_ptr(&cpu_hw_events);
 
        if (!cpuhw->disabled) {
                /*
                return;
        local_irq_save(flags);
 
-       cpuhw = &__get_cpu_var(cpu_hw_events);
+       cpuhw = this_cpu_ptr(&cpu_hw_events);
        if (!cpuhw->disabled)
                goto out;
 
         * Add the event to the list (if there is room)
         * and check whether the total set is still feasible.
         */
-       cpuhw = &__get_cpu_var(cpu_hw_events);
+       cpuhw = this_cpu_ptr(&cpu_hw_events);
        n0 = cpuhw->n_events;
        if (n0 >= ppmu->n_counter)
                goto out;
 
        power_pmu_read(event);
 
-       cpuhw = &__get_cpu_var(cpu_hw_events);
+       cpuhw = this_cpu_ptr(&cpu_hw_events);
        for (i = 0; i < cpuhw->n_events; ++i) {
                if (event == cpuhw->event[i]) {
                        while (++i < cpuhw->n_events) {
  */
 static void power_pmu_start_txn(struct pmu *pmu)
 {
-       struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+       struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
 
        perf_pmu_disable(pmu);
        cpuhw->group_flag |= PERF_EVENT_TXN;
  */
 static void power_pmu_cancel_txn(struct pmu *pmu)
 {
-       struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+       struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
 
        cpuhw->group_flag &= ~PERF_EVENT_TXN;
        perf_pmu_enable(pmu);
 
        if (!ppmu)
                return -EAGAIN;
-       cpuhw = &__get_cpu_var(cpu_hw_events);
+       cpuhw = this_cpu_ptr(&cpu_hw_events);
        n = cpuhw->n_events;
        if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
                return -EAGAIN;
 
                if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) {
                        struct cpu_hw_events *cpuhw;
-                       cpuhw = &__get_cpu_var(cpu_hw_events);
+                       cpuhw = this_cpu_ptr(&cpu_hw_events);
                        power_pmu_bhrb_read(cpuhw);
                        data.br_stack = &cpuhw->bhrb_stack;
                }
 static void perf_event_interrupt(struct pt_regs *regs)
 {
        int i, j;
-       struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+       struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
        struct perf_event *event;
        unsigned long val[8];
        int found, active;
 
        unsigned long flags;
 
        local_irq_save(flags);
-       cpuhw = &__get_cpu_var(cpu_hw_events);
+       cpuhw = this_cpu_ptr(&cpu_hw_events);
 
        if (!cpuhw->disabled) {
                cpuhw->disabled = 1;
        unsigned long flags;
 
        local_irq_save(flags);
-       cpuhw = &__get_cpu_var(cpu_hw_events);
+       cpuhw = this_cpu_ptr(&cpu_hw_events);
        if (!cpuhw->disabled)
                goto out;
 
 static void perf_event_interrupt(struct pt_regs *regs)
 {
        int i;
-       struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+       struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
        struct perf_event *event;
        unsigned long val;
        int found = 0;
 
 
 static void iic_eoi(struct irq_data *d)
 {
-       struct iic *iic = &__get_cpu_var(cpu_iic);
+       struct iic *iic = this_cpu_ptr(&cpu_iic);
        out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
        BUG_ON(iic->eoi_ptr < 0);
 }
        struct iic *iic;
        unsigned int virq;
 
-       iic = &__get_cpu_var(cpu_iic);
+       iic = this_cpu_ptr(&cpu_iic);
        *(unsigned long *) &pending =
                in_be64((u64 __iomem *) &iic->regs->pending_destr);
        if (!(pending.flags & CBE_IIC_IRQ_VALID))
 
 void iic_setup_cpu(void)
 {
-       out_be64(&__get_cpu_var(cpu_iic).regs->prio, 0xff);
+       out_be64(this_cpu_ptr(&cpu_iic.regs->prio), 0xff);
 }
 
 u8 iic_get_target_id(int cpu)
 
 
        local_irq_save(flags);
 
-       depth = &__get_cpu_var(opal_trace_depth);
+       depth = this_cpu_ptr(&opal_trace_depth);
 
        if (*depth)
                goto out;
 
        local_irq_save(flags);
 
-       depth = &__get_cpu_var(opal_trace_depth);
+       depth = this_cpu_ptr(&opal_trace_depth);
 
        if (*depth)
                goto out;
 
 
 static unsigned int ps3_get_irq(void)
 {
-       struct ps3_private *pd = &__get_cpu_var(ps3_private);
+       struct ps3_private *pd = this_cpu_ptr(&ps3_private);
        u64 x = (pd->bmp.status & pd->bmp.mask);
        unsigned int plug;
 
 
  */
 static void consume_dtle(struct dtl_entry *dtle, u64 index)
 {
-       struct dtl_ring *dtlr = &__get_cpu_var(dtl_rings);
+       struct dtl_ring *dtlr = this_cpu_ptr(&dtl_rings);
        struct dtl_entry *wp = dtlr->write_ptr;
        struct lppaca *vpa = local_paca->lppaca_ptr;
 
 
        if (opcode > MAX_HCALL_OPCODE)
                return;
 
-       h = &__get_cpu_var(hcall_stats)[opcode / 4];
+       h = this_cpu_ptr(&hcall_stats[opcode / 4]);
        h->tb_start = mftb();
        h->purr_start = mfspr(SPRN_PURR);
 }
        if (opcode > MAX_HCALL_OPCODE)
                return;
 
-       h = &__get_cpu_var(hcall_stats)[opcode / 4];
+       h = this_cpu_ptr(&hcall_stats[opcode / 4]);
        h->num_calls++;
        h->tb_total += mftb() - h->tb_start;
        h->purr_total += mfspr(SPRN_PURR) - h->purr_start;
 
 
        local_irq_save(flags);  /* to protect tcep and the page behind it */
 
-       tcep = __get_cpu_var(tce_page);
+       tcep = __this_cpu_read(tce_page);
 
        /* This is safe to do since interrupts are off when we're called
         * from iommu_alloc{,_sg}()
                        return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
                                            direction, attrs);
                }
-               __get_cpu_var(tce_page) = tcep;
+               __this_cpu_write(tce_page, tcep);
        }
 
        rpn = __pa(uaddr) >> TCE_SHIFT;
        long l, limit;
 
        local_irq_disable();    /* to protect tcep and the page behind it */
-       tcep = __get_cpu_var(tce_page);
+       tcep = __this_cpu_read(tce_page);
 
        if (!tcep) {
                tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
                        local_irq_enable();
                        return -ENOMEM;
                }
-               __get_cpu_var(tce_page) = tcep;
+               __this_cpu_write(tce_page, tcep);
        }
 
        proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
 
        unsigned long vpn;
        unsigned long i, pix, rc;
        unsigned long flags = 0;
-       struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+       struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
        int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
        unsigned long param[9];
        unsigned long hash, index, shift, hidx, slot;
 
        local_irq_save(flags);
 
-       depth = &__get_cpu_var(hcall_trace_depth);
+       depth = this_cpu_ptr(&hcall_trace_depth);
 
        if (*depth)
                goto out;
 
        local_irq_save(flags);
 
-       depth = &__get_cpu_var(hcall_trace_depth);
+       depth = this_cpu_ptr(&hcall_trace_depth);
 
        if (*depth)
                goto out;
 
        /* If it isn't an extended log we can use the per cpu 64bit buffer */
        h = (struct rtas_error_log *)&savep[1];
        if (!rtas_error_extended(h)) {
-               memcpy(&__get_cpu_var(mce_data_buf), h, sizeof(__u64));
-               errhdr = (struct rtas_error_log *)&__get_cpu_var(mce_data_buf);
+               memcpy(this_cpu_ptr(&mce_data_buf), h, sizeof(__u64));
+               errhdr = (struct rtas_error_log *)this_cpu_ptr(&mce_data_buf);
        } else {
                int len, error_log_length;
 
 
 
 void xics_teardown_cpu(void)
 {
-       struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
+       struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
 
        /*
         * we have to reset the cppr index to 0 because we're