{
        struct event_constraint *c;
        unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+       struct perf_event *e;
        int i, wmin, wmax, num = 0;
        struct hw_perf_event *hwc;
 
                num = perf_assign_events(cpuc->event_list, n, wmin,
                                         wmax, assign);
 
+       /*
+        * Mark the event as committed, so we do not put_constraint()
+        * in case new events are added and fail scheduling.
+        */
+       if (!num && assign) {
+               for (i = 0; i < n; i++) {
+                       e = cpuc->event_list[i];
+                       e->hw.flags |= PERF_X86_EVENT_COMMITTED;
+               }
+       }
        /*
         * scheduling failed or is just a simulation,
         * free resources if necessary
         */
        if (!assign || num) {
                for (i = 0; i < n; i++) {
+                       e = cpuc->event_list[i];
+                       /*
+                        * do not put_constraint() on comitted events,
+                        * because they are good to go
+                        */
+                       if ((e->hw.flags & PERF_X86_EVENT_COMMITTED))
+                               continue;
+
                        if (x86_pmu.put_event_constraints)
-                               x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
+                               x86_pmu.put_event_constraints(cpuc, e);
                }
        }
        return num ? -EINVAL : 0;
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int i;
 
+       /*
+        * event is descheduled
+        */
+       event->hw.flags &= ~PERF_X86_EVENT_COMMITTED;
+
        /*
         * If we're called during a txn, we don't need to do anything.
         * The events never got scheduled and ->cancel_txn will truncate
 
        int     flags;
 };
 /*
- * struct event_constraint flags
+ * struct hw_perf_event.flags flags
  */
 #define PERF_X86_EVENT_PEBS_LDLAT      0x1 /* ld+ldlat data address sampling */
 #define PERF_X86_EVENT_PEBS_ST         0x2 /* st data address sampling */
 #define PERF_X86_EVENT_PEBS_ST_HSW     0x4 /* haswell style st data sampling */
+#define PERF_X86_EVENT_COMMITTED       0x8 /* event passed commit_txn */
 
 struct amd_nb {
        int nb_id;  /* NorthBridge id */
 
        if (x86_pmu.event_constraints) {
                for_each_event_constraint(c, x86_pmu.event_constraints) {
                        if ((event->hw.config & c->cmask) == c->code) {
-                               /* hw.flags zeroed at initialization */
                                event->hw.flags |= c->flags;
                                return c;
                        }
 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
                                        struct perf_event *event)
 {
-       event->hw.flags = 0;
        intel_put_shared_regs_event_constraints(cpuc, event);
 }