{
        struct ftrace_profile *rec;
        struct hlist_head *hhd;
 -      struct hlist_node *n;
        unsigned long key;
  
-       key = hash_long(ip, ftrace_profile_bits);
+       key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
        hhd = &stat->hash[key];
  
        if (hlist_empty(hhd))
        size = 1 << src->size_bits;
        for (i = 0; i < size; i++) {
                hhd = &src->buckets[i];
 -              hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
 +              hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
-                       if (bits > 0)
-                               key = hash_long(entry->ip, bits);
-                       else
-                               key = 0;
                        remove_hash_entry(src, entry);
                        __add_hash_entry(new_hash, entry);
                }
  __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                                  void *data, int flags)
  {
+       struct ftrace_func_entry *rec_entry;
        struct ftrace_func_probe *entry;
 -      struct hlist_node *n, *tmp;
+       struct ftrace_func_probe *p;
+       struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
+       struct list_head free_list;
+       struct ftrace_hash *hash;
 +      struct hlist_node *tmp;
        char str[KSYM_SYMBOL_LEN];
        int type = MATCH_FULL;
        int i, len = 0;
 
                return;
  
        WARN_ON_ONCE(!irqs_disabled());
-       if (!current_trace->allocated_snapshot) {
 -      if (WARN_ON_ONCE(!tr->allocated_snapshot))
++      if (tr->allocated_snapshot) {
 +              /* Only the nop tracer should hit this when disabling */
-               WARN_ON_ONCE(current_trace != &nop_trace);
++              WARN_ON_ONCE(tr->current_trace != &nop_trace);
                return;
 +      }
  
        arch_spin_lock(&ftrace_max_lock);