}
 }
 
-/* Must be called with resv->lock held. Calling this with count_only == true
- * will count the number of pages to be added but will not modify the linked
- * list. If regions_needed != NULL and count_only == true, then regions_needed
- * will indicate the number of file_regions needed in the cache to carry out to
- * add the regions for this range.
+/*
+ * Must be called with resv->lock held.
+ *
+ * Calling this with regions_needed != NULL will count the number of pages
+ * to be added but will not modify the linked list. And regions_needed will
+ * indicate the number of file_regions needed in the cache to carry out to add
+ * the regions for this range.
  */
 static long add_reservation_in_range(struct resv_map *resv, long f, long t,
                                     struct hugetlb_cgroup *h_cg,
-                                    struct hstate *h, long *regions_needed,
-                                    bool count_only)
+                                    struct hstate *h, long *regions_needed)
 {
        long add = 0;
        struct list_head *head = &resv->regions;
                 */
                if (rg->from > last_accounted_offset) {
                        add += rg->from - last_accounted_offset;
-                       if (!count_only) {
+                       if (!regions_needed) {
                                nrg = get_file_region_entry_from_cache(
                                        resv, last_accounted_offset, rg->from);
                                record_hugetlb_cgroup_uncharge_info(h_cg, h,
                                                                    resv, nrg);
                                list_add(&nrg->link, rg->link.prev);
                                coalesce_file_region(resv, nrg);
-                       } else if (regions_needed)
+                       } else
                                *regions_needed += 1;
                }
 
         */
        if (last_accounted_offset < t) {
                add += t - last_accounted_offset;
-               if (!count_only) {
+               if (!regions_needed) {
                        nrg = get_file_region_entry_from_cache(
                                resv, last_accounted_offset, t);
                        record_hugetlb_cgroup_uncharge_info(h_cg, h, resv, nrg);
                        list_add(&nrg->link, rg->link.prev);
                        coalesce_file_region(resv, nrg);
-               } else if (regions_needed)
+               } else
                        *regions_needed += 1;
        }
 
 retry:
 
        /* Count how many regions are actually needed to execute this add. */
-       add_reservation_in_range(resv, f, t, NULL, NULL, &actual_regions_needed,
-                                true);
+       add_reservation_in_range(resv, f, t, NULL, NULL,
+                                &actual_regions_needed);
 
        /*
         * Check for sufficient descriptors in the cache to accommodate
                goto retry;
        }
 
-       add = add_reservation_in_range(resv, f, t, h_cg, h, NULL, false);
+       add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
 
        resv->adds_in_progress -= in_regions_needed;
 
 
        spin_lock(&resv->lock);
 
-       /* Count how many hugepages in this range are NOT respresented. */
+       /* Count how many hugepages in this range are NOT represented. */
        chg = add_reservation_in_range(resv, f, t, NULL, NULL,
-                                      out_regions_needed, true);
+                                      out_regions_needed);
 
        if (*out_regions_needed == 0)
                *out_regions_needed = 1;