return ~asid_version_mask(cpu) + 1;
 }
 
-#define cpu_context(cpu, mm)   ((mm)->context.asid[cpu])
+static inline u64 cpu_context(unsigned int cpu, const struct mm_struct *mm)
+{
+       return mm->context.asid[cpu];
+}
+
+static inline void set_cpu_context(unsigned int cpu,
+                                  struct mm_struct *mm, u64 ctx)
+{
+       mm->context.asid[cpu] = ctx;
+}
+
 #define asid_cache(cpu)                (cpu_data[cpu].asid_cache)
 #define cpu_asid(cpu, mm) \
        (cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
        int i;
 
        for_each_possible_cpu(i)
-               cpu_context(i, mm) = 0;
+               set_cpu_context(i, mm, 0);
 
        mm->context.bd_emupage_allocmap = NULL;
        spin_lock_init(&mm->context.bd_emupage_lock);
                htw_start();
        } else {
                /* will get a new context next time */
-               cpu_context(cpu, mm) = 0;
+               set_cpu_context(cpu, mm, 0);
        }
 
        local_irq_restore(flags);
 
 
                for_each_online_cpu(cpu) {
                        if (cpu != smp_processor_id() && cpu_context(cpu, mm))
-                               cpu_context(cpu, mm) = 0;
+                               set_cpu_context(cpu, mm, 0);
                }
        }
        drop_mmu_context(mm);
                         * mm has been completely unused by that CPU.
                         */
                        if (cpu != smp_processor_id() && cpu_context(cpu, mm))
-                               cpu_context(cpu, mm) = !exec;
+                               set_cpu_context(cpu, mm, !exec);
                }
        }
        local_flush_tlb_range(vma, start, end);
                         * by that CPU.
                         */
                        if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
-                               cpu_context(cpu, vma->vm_mm) = 1;
+                               set_cpu_context(cpu, vma->vm_mm, 1);
                }
        }
        local_flush_tlb_page(vma, page);
 
                get_new_mmu_context(kern_mm);
                for_each_possible_cpu(i)
                        if (i != cpu)
-                               cpu_context(i, kern_mm) = 0;
+                               set_cpu_context(i, kern_mm, 0);
                preempt_enable();
        }
        kvm_write_c0_guest_entryhi(cop0, entryhi);
                if (i == cpu)
                        continue;
                if (user)
-                       cpu_context(i, user_mm) = 0;
-               cpu_context(i, kern_mm) = 0;
+                       set_cpu_context(i, user_mm, 0);
+               set_cpu_context(i, kern_mm, 0);
        }
 
        preempt_enable();
 
                kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN);
                kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER);
                for_each_possible_cpu(i) {
-                       cpu_context(i, kern_mm) = 0;
-                       cpu_context(i, user_mm) = 0;
+                       set_cpu_context(i, kern_mm, 0);
+                       set_cpu_context(i, user_mm, 0);
                }
 
                /* Generate new ASID for current mode */
                if (gasid != vcpu->arch.last_user_gasid) {
                        kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER);
                        for_each_possible_cpu(i)
-                               cpu_context(i, user_mm) = 0;
+                               set_cpu_context(i, user_mm, 0);
                        vcpu->arch.last_user_gasid = gasid;
                }
        }
 
                local_flush_tlb_all();  /* start new asid cycle */
        }
 
-       cpu_context(cpu, mm) = asid_cache(cpu) = asid;
+       set_cpu_context(cpu, mm, asid);
+       asid_cache(cpu) = asid;
 }
 
 void check_mmu_context(struct mm_struct *mm)