Makes code futureproof against the impending change to mm->cpu_vm_mask.
It's also a chance to use the new cpumask_ ops which take a pointer
(the older ones are deprecated, but there's no hurry for arch code).
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
                             struct task_struct *tsk)
 {
        /* Mark this context has been used on the new CPU */
-       cpu_set(smp_processor_id(), next->cpu_vm_mask);
+       cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
 
        /* 32-bit keeps track of the current PGDIR in the thread struct */
 #ifdef CONFIG_PPC32
 
        unsigned long vsid;
        struct mm_struct *mm;
        pte_t *ptep;
-       cpumask_t tmp;
+       const struct cpumask *tmp;
        int rc, user_region = 0, local = 0;
        int psize, ssize;
 
                return 1;
 
        /* Check CPU locality */
-       tmp = cpumask_of_cpu(smp_processor_id());
-       if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
+       tmp = cpumask_of(smp_processor_id());
+       if (user_region && cpumask_equal(mm_cpumask(mm), tmp))
                local = 1;
 
 #ifdef CONFIG_HUGETLB_PAGE
        unsigned long vsid;
        void *pgdir;
        pte_t *ptep;
-       cpumask_t mask;
        unsigned long flags;
        int local = 0;
        int ssize;
        local_irq_save(flags);
 
        /* Is that local to this CPU ? */
-       mask = cpumask_of_cpu(smp_processor_id());
-       if (cpus_equal(mm->cpu_vm_mask, mask))
+       if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
                local = 1;
 
        /* Hash it in */
 
                mm->context.id = MMU_NO_CONTEXT;
 
                /* Mark it stale on all CPUs that used this mm */
-               for_each_cpu_mask_nr(cpu, mm->cpu_vm_mask)
+               for_each_cpu(cpu, mm_cpumask(mm))
                        __set_bit(id, stale_map[cpu]);
                return id;
        }
 
 void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
 {
        /* This is safe since tlb_gather_mmu has disabled preemption */
-        cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
        struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
 
        if (atomic_read(&tlb->mm->mm_users) < 2 ||
-           cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
+           cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){
                pgtable_free(pgf);
                return;
        }
 
  */
 void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
 {
-       cpumask_t tmp;
+       const struct cpumask *tmp;
        int i, local = 0;
 
        i = batch->index;
-       tmp = cpumask_of_cpu(smp_processor_id());
-       if (cpus_equal(batch->mm->cpu_vm_mask, tmp))
+       tmp = cpumask_of(smp_processor_id());
+       if (cpumask_equal(mm_cpumask(batch->mm), tmp))
                local = 1;
        if (i == 1)
                flush_hash_page(batch->vaddr[0], batch->pte[0],
 
        pid = mm->context.id;
        if (unlikely(pid == MMU_NO_CONTEXT))
                goto no_context;
-       cpu_mask = mm->cpu_vm_mask;
-       cpu_clear(smp_processor_id(), cpu_mask);
-       if (!cpus_empty(cpu_mask)) {
+       if (!cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
                struct tlb_flush_param p = { .pid = pid };
-               smp_call_function_mask(cpu_mask, do_flush_tlb_mm_ipi, &p, 1);
+               /* Ignores smp_processor_id() even if set. */
+               smp_call_function_many(mm_cpumask(mm),
+                                      do_flush_tlb_mm_ipi, &p, 1);
        }
        _tlbil_pid(pid);
  no_context:
 
 void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
 {
-       cpumask_t cpu_mask;
+       struct cpumask *cpu_mask;
        unsigned int pid;
 
        preempt_disable();
        pid = vma ? vma->vm_mm->context.id : 0;
        if (unlikely(pid == MMU_NO_CONTEXT))
                goto bail;
-       cpu_mask = vma->vm_mm->cpu_vm_mask;
-       cpu_clear(smp_processor_id(), cpu_mask);
-       if (!cpus_empty(cpu_mask)) {
+       cpu_mask = mm_cpumask(vma->vm_mm);
+       if (!cpumask_equal(cpu_mask, cpumask_of(smp_processor_id()))) {
                /* If broadcast tlbivax is supported, use it */
                if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
                        int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
                        goto bail;
                } else {
                        struct tlb_flush_param p = { .pid = pid, .addr = vmaddr };
-                       smp_call_function_mask(cpu_mask,
+                       /* Ignores smp_processor_id() even if set in cpu_mask */
+                       smp_call_function_many(cpu_mask,
                                               do_flush_tlb_page_ipi, &p, 1);
                }
        }
 
        int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
 
        /* Global TLBIE broadcast required with SPEs. */
-       __cpus_setall(&mm->cpu_vm_mask, nr);
+       bitmap_fill(cpumask_bits(mm_cpumask(mm)), nr);
 }
 
 void spu_associate_mm(struct spu *spu, struct mm_struct *mm)