__field( unsigned int,  lvl             )
                __field( unsigned int,  numa_node       )
                __field( unsigned int,  num_children    )
-               __field( u32,           childmask       )
+               __field( u32,           groupmask       )
        ),
 
        TP_fast_assign(
                __entry->lvl            = child->parent->level;
                __entry->numa_node      = child->parent->numa_node;
                __entry->num_children   = child->parent->num_children;
-               __entry->childmask      = child->childmask;
+               __entry->groupmask      = child->groupmask;
        ),
 
-       TP_printk("group=%p childmask=%0x parent=%p lvl=%d numa=%d num_children=%d",
-                 __entry->child,  __entry->childmask, __entry->parent,
+       TP_printk("group=%p groupmask=%0x parent=%p lvl=%d numa=%d num_children=%d",
+                 __entry->child,  __entry->groupmask, __entry->parent,
                  __entry->lvl, __entry->numa_node, __entry->num_children)
 );
 
                __field( unsigned int,  lvl             )
                __field( unsigned int,  numa_node       )
                __field( unsigned int,  num_children    )
-               __field( u32,           childmask       )
+               __field( u32,           groupmask       )
        ),
 
        TP_fast_assign(
                __entry->lvl            = tmc->tmgroup->level;
                __entry->numa_node      = tmc->tmgroup->numa_node;
                __entry->num_children   = tmc->tmgroup->num_children;
-               __entry->childmask      = tmc->childmask;
+               __entry->groupmask      = tmc->groupmask;
        ),
 
-       TP_printk("cpu=%d childmask=%0x parent=%p lvl=%d numa=%d num_children=%d",
-                 __entry->cpu,  __entry->childmask, __entry->parent,
+       TP_printk("cpu=%d groupmask=%0x parent=%p lvl=%d numa=%d num_children=%d",
+                 __entry->cpu,  __entry->groupmask, __entry->parent,
                  __entry->lvl, __entry->numa_node, __entry->num_children)
 );
 
 
  *                     outcome is a CPU which might wake up a little early.
  * @evt:               Pointer to tmigr_event which needs to be queued (of idle
  *                     child group)
- * @childmask:         childmask of child group
+ * @childmask:         groupmask of child group
  * @remote:            Is set, when the new timer path is executed in
  *                     tmigr_handle_remote_cpu()
  * @basej:             timer base in jiffies
 
                child = group;
                group = group->parent;
-               data->childmask = child->childmask;
+               data->childmask = child->groupmask;
        } while (group);
 }
 
 {
        struct tmigr_walk data;
 
-       data.childmask = tmc->childmask;
+       data.childmask = tmc->groupmask;
 
        trace_tmigr_cpu_active(tmc);
 
        if (tmigr_is_not_available(tmc))
                return;
 
-       data.childmask = tmc->childmask;
+       data.childmask = tmc->groupmask;
        data.firstexp = KTIME_MAX;
 
        /*
         * in tmigr_handle_remote_up() anyway. Keep this check to speed up the
         * return when nothing has to be done.
         */
-       if (!tmigr_check_migrator(tmc->tmgroup, tmc->childmask)) {
+       if (!tmigr_check_migrator(tmc->tmgroup, tmc->groupmask)) {
                /*
                 * If this CPU was an idle migrator, make sure to clear its wakeup
                 * value so it won't chase timers that have already expired elsewhere.
                return ret;
 
        data.now = get_jiffies_update(&jif);
-       data.childmask = tmc->childmask;
+       data.childmask = tmc->groupmask;
        data.firstexp = KTIME_MAX;
        data.tmc_active = !tmc->idle;
        data.check = false;
        struct tmigr_walk data = { .nextexp = nextexp,
                                   .firstexp = KTIME_MAX,
                                   .evt = &tmc->cpuevt,
-                                  .childmask = tmc->childmask };
+                                  .childmask = tmc->groupmask };
 
        /*
         * If nextexp is KTIME_MAX, the CPU event will be ignored because the
        if (WARN_ON_ONCE(tmc->idle))
                return nextevt;
 
-       if (!tmigr_check_migrator_and_lonely(tmc->tmgroup, tmc->childmask))
+       if (!tmigr_check_migrator_and_lonely(tmc->tmgroup, tmc->groupmask))
                return KTIME_MAX;
 
        do {
        raw_spin_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING);
 
        child->parent = parent;
-       child->childmask = BIT(parent->num_children++);
+       child->groupmask = BIT(parent->num_children++);
 
        raw_spin_unlock(&parent->lock);
        raw_spin_unlock_irq(&child->lock);
         *   the new childmask and parent to subsequent walkers through this
         *   @child. Therefore propagate active state unconditionally.
         */
-       data.childmask = child->childmask;
+       data.childmask = child->groupmask;
 
        /*
         * There is only one new level per time (which is protected by
                        raw_spin_lock_irq(&group->lock);
 
                        tmc->tmgroup = group;
-                       tmc->childmask = BIT(group->num_children++);
+                       tmc->groupmask = BIT(group->num_children++);
 
                        raw_spin_unlock_irq(&group->lock);
 
        if (ret < 0)
                return ret;
 
-       if (tmc->childmask == 0)
+       if (tmc->groupmask == 0)
                return -EINVAL;
 
        return ret;
 
  * @num_children:      Counter of group children to make sure the group is only
  *                     filled with TMIGR_CHILDREN_PER_GROUP; Required for setup
  *                     only
- * @childmask:         childmask of the group in the parent group; is set
- *                     during setup and will never change; can be read
- *                     lockless
+ * @groupmask:         mask of the group in the parent group; is set during
+ *                     setup and will never change; can be read lockless
  * @list:              List head that is added to the per level
  *                     tmigr_level_list; is required during setup when a
  *                     new group needs to be connected to the existing
        unsigned int            level;
        int                     numa_node;
        unsigned int            num_children;
-       u8                      childmask;
+       u8                      groupmask;
        struct list_head        list;
 };
 
  *                     hierarchy
  * @remote:            Is set when timers of the CPU are expired remotely
  * @tmgroup:           Pointer to the parent group
- * @childmask:         childmask of tmigr_cpu in the parent group
+ * @groupmask:         mask of tmigr_cpu in the parent group
  * @wakeup:            Stores the first timer when the timer migration
  *                     hierarchy is completely idle and remote expiry was done;
  *                     is returned to timer code in the idle path and is only
        bool                    idle;
        bool                    remote;
        struct tmigr_group      *tmgroup;
-       u8                      childmask;
+       u8                      groupmask;
        u64                     wakeup;
        struct tmigr_event      cpuevt;
 };
        u32 state;
        /**
         * struct - split state of tmigr_group
-        * @active:     Contains each childmask bit of the active children
-        * @migrator:   Contains childmask of the child which is migrator
+        * @active:     Contains each mask bit of the active children
+        * @migrator:   Contains mask of the child which is migrator
         * @seq:        Sequence counter needs to be increased when an update
         *              to the tmigr_state is done. It prevents a race when
         *              updates in the child groups are propagated in changed