static u64             ftrace_update_time;
 unsigned long          ftrace_update_tot_cnt;
+unsigned long          ftrace_number_of_pages;
+unsigned long          ftrace_number_of_groups;
 
 static inline int ops_traces_mod(struct ftrace_ops *ops)
 {
                goto again;
        }
 
+       ftrace_number_of_pages += 1 << order;
+       ftrace_number_of_groups++;
+
        cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
        pg->size = cnt;
 
                start_pg = pg->next;
                kfree(pg);
                pg = start_pg;
+               ftrace_number_of_pages -= 1 << order;
+               ftrace_number_of_groups--;
        }
        pr_info("ftrace: FAILED to allocate memory for functions\n");
        return NULL;
                free_pages((unsigned long)pg->records, order);
                tmp_page = pg->next;
                kfree(pg);
+               ftrace_number_of_pages -= 1 << order;
+               ftrace_number_of_groups--;
        }
 }
 
                        *last_pg = pg->next;
                        order = get_count_order(pg->size / ENTRIES_PER_PAGE);
                        free_pages((unsigned long)pg->records, order);
+                       ftrace_number_of_pages -= 1 << order;
+                       ftrace_number_of_groups--;
                        kfree(pg);
                        pg = container_of(last_pg, struct ftrace_page, next);
                        if (!(*last_pg))
                                  __start_mcount_loc,
                                  __stop_mcount_loc);
 
+       pr_info("ftrace: allocated %ld pages with %ld groups\n",
+               ftrace_number_of_pages, ftrace_number_of_groups);
+
        set_ftrace_early_filters();
 
        return;
 
 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
                  size_t cnt, loff_t *ppos)
 {
-       unsigned long *p = filp->private_data;
-       char buf[64]; /* Not too big for a shallow stack */
+       ssize_t ret;
+       char *buf;
        int r;
 
-       r = scnprintf(buf, 63, "%ld", *p);
-       buf[r++] = '\n';
+       /* 256 should be plenty to hold the amount needed */
+       buf = kmalloc(256, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
 
-       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+       r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
+                     ftrace_update_tot_cnt,
+                     ftrace_number_of_pages,
+                     ftrace_number_of_groups);
+
+       ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+       kfree(buf);
+       return ret;
 }
 
 static const struct file_operations tracing_dyn_info_fops = {
 
 #ifdef CONFIG_DYNAMIC_FTRACE
        trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
-                       &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
+                       NULL, &tracing_dyn_info_fops);
 #endif
 
        create_trace_instances(d_tracer);
 
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 extern unsigned long ftrace_update_tot_cnt;
+extern unsigned long ftrace_number_of_pages;
+extern unsigned long ftrace_number_of_groups;
 void ftrace_init_trace_array(struct trace_array *tr);
 #else
 static inline void ftrace_init_trace_array(struct trace_array *tr) { }