alpha_mv.smp_callin();
 
        /* All kernel threads share the same mm context.  */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
 
        /* inform the notifiers about the new cpu */
 
        setup_processor();
 
        atomic_inc(&mm->mm_users);
-       atomic_inc(&mm->mm_count);
+       mmgrab(mm);
        current->active_mm = mm;
        cpumask_set_cpu(cpu, mm_cpumask(mm));
 
 
         * reference and switch to it.
         */
        cpu = smp_processor_id();
-       atomic_inc(&mm->mm_count);
+       mmgrab(mm);
        current->active_mm = mm;
        cpumask_set_cpu(cpu, mm_cpumask(mm));
 
 
         * All kernel threads share the same mm context; grab a
         * reference and switch to it.
         */
-       atomic_inc(&mm->mm_count);
+       mmgrab(mm);
        current->active_mm = mm;
 
        /*
 
 
        /* Attach the new idle task to the global mm. */
        atomic_inc(&mm->mm_users);
-       atomic_inc(&mm->mm_count);
+       mmgrab(mm);
        current->active_mm = mm;
 
        preempt_disable();
 
        );
 
        /*  Set the memory struct  */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
 
        cpu = smp_processor_id();
 
         */
        ia64_setreg(_IA64_REG_CR_DCR,  (  IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
                                        | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
        BUG_ON(current->mm);
 
 
        printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
 
        /* Set up and load the per-CPU TSS and LDT */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
        if (current->mm)
                BUG();
 
         * reference and switch to it.
         */
        atomic_inc(&mm->mm_users);
-       atomic_inc(&mm->mm_count);
+       mmgrab(mm);
        current->active_mm = mm;
        cpumask_set_cpu(cpu, mm_cpumask(mm));
        enter_lazy_tlb(mm, current);
 
        if (!cpu_data[cpu].asid_cache)
                cpu_data[cpu].asid_cache = asid_first_version(cpu);
 
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
        BUG_ON(current->mm);
        enter_lazy_tlb(&init_mm, current);
 
        }
        printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
 
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
        BUG_ON(current->mm);
 
 
        set_cpu_online(cpunum, true);
 
        /* Initialise the idle task for this CPU */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
        BUG_ON(current->mm);
        enter_lazy_tlb(&init_mm, current);
 
        unsigned int cpu = smp_processor_id();
        int i, base;
 
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
 
        smp_store_cpu_info(cpu);
 
        get_cpu_id(id);
        if (machine_has_cpu_mhz)
                update_cpu_mhz(NULL);
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
        BUG_ON(current->mm);
        enter_lazy_tlb(&init_mm, current);
 
        set_except_vector(18, handle_dbe);
        flush_icache_range(DEBUG_VECTOR_BASE_ADDR, IRQ_VECTOR_BASE_ADDR);
 
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
        cpu_cache_init();
 }
 
        struct mm_struct *mm = &init_mm;
 
        enable_mmu();
-       atomic_inc(&mm->mm_count);
+       mmgrab(mm);
        atomic_inc(&mm->mm_users);
        current->active_mm = mm;
 #ifdef CONFIG_MMU
 
                             : "memory" /* paranoid */);
 
        /* Attach to the address space of init_task. */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
 
        while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
 
        current_thread_info()->new_child = 0;
 
        /* Attach to the address space of init_task. */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
 
        /* inform the notifiers about the new cpu */
 
        show_leds(cpuid);
 
        /* Attach to the address space of init_task. */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
 
        local_ops->cache_all();
 
                             : "memory" /* paranoid */);
 
        /* Attach to the address space of init_task. */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
 
        while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
 
                thread_info_offsets_are_bolixed_pete();
 
        /* Attach to the address space of init_task. */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
 
        /* NOTE: Other cpus have this done as they are started
 
        /* Attach to the address space of init_task.  On SMP we
         * do this in smp.c:smp_callin for other cpus.
         */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
 }
 
        __this_cpu_write(current_asid, min_asid);
 
        /* Set up this thread as another owner of the init_mm */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
        if (current->mm)
                BUG();
 
        for (i = 0; i <= IO_BITMAP_LONGS; i++)
                t->io_bitmap[i] = ~0UL;
 
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        me->active_mm = &init_mm;
        BUG_ON(me->mm);
        enter_lazy_tlb(&init_mm, me);
        /*
         * Set up and load the per-CPU TSS and LDT
         */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        curr->active_mm = &init_mm;
        BUG_ON(curr->mm);
        enter_lazy_tlb(&init_mm, curr);
 
        /* All kernel threads share the same mm context. */
 
        atomic_inc(&mm->mm_users);
-       atomic_inc(&mm->mm_count);
+       mmgrab(mm);
        current->active_mm = mm;
        cpumask_set_cpu(cpu, mm_cpumask(mm));
        enter_lazy_tlb(mm, current);
 
         * and because the mmu_notifier_unregister function also drop
         * mm_count we need to take an extra count here.
         */
-       atomic_inc(&p->mm->mm_count);
+       mmgrab(p->mm);
        mmu_notifier_unregister_no_release(&p->mmu_notifier, p->mm);
        mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
 }
 
                mm->i915 = to_i915(obj->base.dev);
 
                mm->mm = current->mm;
-               atomic_inc(¤t->mm->mm_count);
+               mmgrab(current->mm);
 
                mm->mn = NULL;
 
 
        if (fd) {
                fd->rec_cpu_num = -1; /* no cpu affinity by default */
                fd->mm = current->mm;
-               atomic_inc(&fd->mm->mm_count);
+               mmgrab(fd->mm);
                fp->private_data = fd;
        } else {
                fp->private_data = NULL;
 
 
                if (!IS_ERR_OR_NULL(mm)) {
                        /* ensure this mm_struct can't be freed */
-                       atomic_inc(&mm->mm_count);
+                       mmgrab(mm);
                        /* but do not pin its memory */
                        mmput(mm);
                }
                if (p) {
                        if (atomic_read(&p->mm->mm_users) > 1) {
                                mm = p->mm;
-                               atomic_inc(&mm->mm_count);
+                               mmgrab(mm);
                        }
                        task_unlock(p);
                }
 
        ctx->released = false;
        ctx->mm = current->mm;
        /* prevent the mm struct to be freed */
-       atomic_inc(&ctx->mm->mm_count);
+       mmgrab(ctx->mm);
 
        file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx,
                                  O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
 
  */
 extern struct mm_struct * mm_alloc(void);
 
+/**
+ * mmgrab() - Pin a &struct mm_struct.
+ * @mm: The &struct mm_struct to pin.
+ *
+ * Make sure that @mm will not get freed even after the owning task
+ * exits. This doesn't guarantee that the associated address space
+ * will still exist later on and mmget_not_zero() has to be used before
+ * accessing it.
+ *
+ * This is a preferred way to to pin @mm for a longer/unbounded amount
+ * of time.
+ *
+ * Use mmdrop() to release the reference acquired by mmgrab().
+ *
+ * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
+ * of &mm_struct.mm_count vs &mm_struct.mm_users.
+ */
+static inline void mmgrab(struct mm_struct *mm)
+{
+       atomic_inc(&mm->mm_count);
+}
+
 /* mmdrop drops the mm and the page tables */
 extern void __mmdrop(struct mm_struct *);
 static inline void mmdrop(struct mm_struct *mm)
 
                __set_current_state(TASK_RUNNING);
                down_read(&mm->mmap_sem);
        }
-       atomic_inc(&mm->mm_count);
+       mmgrab(mm);
        BUG_ON(mm != current->active_mm);
        /* more a memory barrier than a real lock */
        task_lock(current);
 
 
 static inline void futex_get_mm(union futex_key *key)
 {
-       atomic_inc(&key->private.mm->mm_count);
+       mmgrab(key->private.mm);
        /*
         * Ensure futex_get_mm() implies a full barrier such that
         * get_futex_key() implies a full barrier. This is relied upon
 
 
        if (!mm) {
                next->active_mm = oldmm;
-               atomic_inc(&oldmm->mm_count);
+               mmgrab(oldmm);
                enter_lazy_tlb(oldmm, next);
        } else
                switch_mm_irqs_off(oldmm, mm, next);
        /*
         * The boot idle thread does lazy MMU switching as well:
         */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        enter_lazy_tlb(&init_mm, current);
 
        /*
 
        list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
        spin_unlock(&khugepaged_mm_lock);
 
-       atomic_inc(&mm->mm_count);
+       mmgrab(mm);
        if (wakeup)
                wake_up_interruptible(&khugepaged_wait);
 
 
        spin_unlock(&ksm_mmlist_lock);
 
        set_bit(MMF_VM_MERGEABLE, &mm->flags);
-       atomic_inc(&mm->mm_count);
+       mmgrab(mm);
 
        if (needs_wakeup)
                wake_up_interruptible(&ksm_thread_wait);
 
        task_lock(tsk);
        active_mm = tsk->active_mm;
        if (active_mm != mm) {
-               atomic_inc(&mm->mm_count);
+               mmgrab(mm);
                tsk->active_mm = mm;
        }
        tsk->mm = mm;
 
                mm->mmu_notifier_mm = mmu_notifier_mm;
                mmu_notifier_mm = NULL;
        }
-       atomic_inc(&mm->mm_count);
+       mmgrab(mm);
 
        /*
         * Serialize the update against mmu_notifier_unregister. A
 
 
        /* oom_mm is bound to the signal struct life time. */
        if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
-               atomic_inc(&tsk->signal->oom_mm->mm_count);
+               mmgrab(tsk->signal->oom_mm);
 
        /*
         * Make sure that the task is woken up from uninterruptible sleep
 
        /* Get a reference to safely compare mm after task_unlock(victim) */
        mm = victim->mm;
-       atomic_inc(&mm->mm_count);
+       mmgrab(mm);
        /*
         * We should send SIGKILL before setting TIF_MEMDIE in order to prevent
         * the OOM victim from depleting the memory reserves from the user
 
                return ERR_PTR(-ENOMEM);
 
        spin_lock_init(&kvm->mmu_lock);
-       atomic_inc(¤t->mm->mm_count);
+       mmgrab(current->mm);
        kvm->mm = current->mm;
        kvm_eventfd_init(kvm);
        mutex_init(&kvm->lock);