static struct event_constraint fixed0_counter0_1_constraint =
                        INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000003ULL);
 
+static struct event_constraint counters_1_7_constraint =
+                       INTEL_ALL_EVENT_CONSTRAINT(0, 0xfeULL);
+
 static struct event_constraint *
 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
                          struct perf_event *event)
        return c;
 }
 
+static struct event_constraint *
+rwc_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+                         struct perf_event *event)
+{
+       struct event_constraint *c;
+
+       c = spr_get_event_constraints(cpuc, idx, event);
+
+       /* The Retire Latency is not supported by the fixed counter 0. */
+       if (event->attr.precise_ip &&
+           (event->attr.sample_type & PERF_SAMPLE_WEIGHT_TYPE) &&
+           constraint_match(&fixed0_constraint, event->hw.config)) {
+               /*
+                * The Instruction PDIR is only available
+                * on the fixed counter 0. Error out for this case.
+                */
+               if (event->attr.precise_ip == 3)
+                       return &emptyconstraint;
+               return &counters_1_7_constraint;
+       }
+
+       return c;
+}
+
 static struct event_constraint *
 mtl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
                          struct perf_event *event)
        struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
 
        if (pmu->cpu_type == hybrid_big)
-               return spr_get_event_constraints(cpuc, idx, event);
+               return rwc_get_event_constraints(cpuc, idx, event);
        if (pmu->cpu_type == hybrid_small)
                return cmt_get_event_constraints(cpuc, idx, event);
 
        if (is_hybrid())
                intel_pmu_check_hybrid_pmus((u64)fixed_mask);
 
+       if (x86_pmu.intel_cap.pebs_timing_info)
+               x86_pmu.flags |= PMU_FL_RETIRE_LATENCY;
+
        intel_aux_output_init();
 
        return 0;
 
 
 #define PEBS_LATENCY_MASK                      0xffff
 #define PEBS_CACHE_LATENCY_OFFSET              32
+#define PEBS_RETIRE_LATENCY_OFFSET             32
 
 /*
  * With adaptive PEBS the layout depends on what fields are configured.
        set_linear_ip(regs, basic->ip);
        regs->flags = PERF_EFLAGS_EXACT;
 
+       if ((sample_type & PERF_SAMPLE_WEIGHT_STRUCT) && (x86_pmu.flags & PMU_FL_RETIRE_LATENCY))
+               data->weight.var3_w = format_size >> PEBS_RETIRE_LATENCY_OFFSET & PEBS_LATENCY_MASK;
+
        /*
         * The record for MEMINFO is in front of GP
         * But PERF_SAMPLE_TRANSACTION needs gprs->ax.
 
                u64     pebs_baseline:1;
                u64     perf_metrics:1;
                u64     pebs_output_pt_available:1;
+               u64     pebs_timing_info:1;
                u64     anythread_deprecated:1;
        };
        u64     capabilities;
 #define PMU_FL_PAIR            0x40 /* merge counters for large incr. events */
 #define PMU_FL_INSTR_LATENCY   0x80 /* Support Instruction Latency in PEBS Memory Info Record */
 #define PMU_FL_MEM_LOADS_AUX   0x100 /* Require an auxiliary event for the complete memory info */
+#define PMU_FL_RETIRE_LATENCY  0x200 /* Support Retire Latency in PEBS */
 
 #define EVENT_VAR(_id)  event_attr_##_id
 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr