# of make so .config is not included in this case either (for *config).
  
  no-dot-config-targets := clean mrproper distclean \
 -                       cscope TAGS tags help %docs check% \
 +                       cscope TAGS tags help %docs check% coccicheck \
                         include/linux/version.h headers_% \
-                        kernelversion
 -                       kernelrelease kernelversion %src-pkg
++                       kernelversion %src-pkg
  
  config-targets := 0
  mixed-targets  := 0
 
        int cpu = bp->cpu;
        struct task_struct *tsk = bp->ctx->task;
  
+       /* Pinned counter cpu profiling */
+       if (!tsk) {
+ 
+               if (enable)
+                       per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
+               else
+                       per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight;
+               return;
+       }
+ 
        /* Pinned counter task profiling */
-       if (tsk) {
-               if (cpu >= 0) {
-                       toggle_bp_task_slot(tsk, cpu, enable, type, weight);
-                       return;
-               }
  
+       if (!enable)
+               list_del(&bp->hw.bp_list);
+ 
+       if (cpu >= 0) {
+               toggle_bp_task_slot(bp, cpu, enable, type, weight);
+       } else {
                for_each_online_cpu(cpu)
-                       toggle_bp_task_slot(tsk, cpu, enable, type, weight);
-               return;
+                       toggle_bp_task_slot(bp, cpu, enable, type, weight);
        }
  
-       /* Pinned counter cpu profiling */
        if (enable)
-               per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
-       else
-               per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight;
+               list_add_tail(&bp->hw.bp_list, &bp_task_head);
  }
  
 +/*
 + * Function to perform processor-specific cleanup during unregistration
 + */
 +__weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
 +{
 +      /*
 +       * A weak stub function here for those archs that don't define
 +       * it inside arch/.../kernel/hw_breakpoint.c
 +       */
 +}
 +
  /*
   * Contraints to check before allowing this new breakpoint counter:
   *
 
  endif
  obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
  obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
- obj-$(CONFIG_KSYM_TRACER) += trace_ksym.o
  obj-$(CONFIG_EVENT_TRACING) += power-traces.o
 +ifeq ($(CONFIG_TRACING),y)
 +obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
 +endif
  
  libftrace-y := ftrace.o
 
  int register_tracer(struct tracer *type);
  void unregister_tracer(struct tracer *type);
  int is_tracing_stopped(void);
 +enum trace_file_type {
 +      TRACE_FILE_LAT_FMT      = 1,
 +      TRACE_FILE_ANNOTATE     = 2,
 +};
 +
 +extern cpumask_var_t __read_mostly tracing_buffer_mask;
 +
 +#define for_each_tracing_cpu(cpu)     \
 +      for_each_cpu(cpu, tracing_buffer_mask)
  
- extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr);
- 
  extern unsigned long nsecs_to_usecs(unsigned long nsecs);
  
  extern unsigned long tracing_thresh;