]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
perf/x86/amd/uncore: Set ThreadMask and SliceMask for L3 Cache perf events
authorNatarajan, Janakarajan <Janakarajan.Natarajan@amd.com>
Thu, 27 Sep 2018 15:51:55 +0000 (15:51 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 4 Nov 2018 13:52:40 +0000 (14:52 +0100)
[ Upstream commit d7cbbe49a9304520181fb8c9272d1327deec8453 ]

In Family 17h, some L3 Cache Performance events require the ThreadMask
and SliceMask to be set. For other events, these fields do not affect
the count either way.

Set ThreadMask and SliceMask to 0xFF and 0xF respectively.

Signed-off-by: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: H . Peter Anvin <hpa@zytor.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Suravee <Suravee.Suthikulpanit@amd.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Link: http://lkml.kernel.org/r/Message-ID:
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/x86/events/amd/uncore.c
arch/x86/include/asm/perf_event.h

index f5cbbba992833af251e750a1d64a120e9f46368d..4e1d7483b78c64a3c0272107ecdda70301c3f51f 100644 (file)
@@ -35,6 +35,7 @@
 
 static int num_counters_llc;
 static int num_counters_nb;
+static bool l3_mask;
 
 static HLIST_HEAD(uncore_unused_list);
 
@@ -208,6 +209,13 @@ static int amd_uncore_event_init(struct perf_event *event)
        hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
        hwc->idx = -1;
 
+       /*
+        * SliceMask and ThreadMask need to be set for certain L3 events in
+        * Family 17h. For other events, the two fields do not affect the count.
+        */
+       if (l3_mask)
+               hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK);
+
        if (event->cpu < 0)
                return -EINVAL;
 
@@ -542,6 +550,7 @@ static int __init amd_uncore_init(void)
                amd_llc_pmu.name          = "amd_l3";
                format_attr_event_df.show = &event_show_df;
                format_attr_event_l3.show = &event_show_l3;
+               l3_mask                   = true;
        } else {
                num_counters_nb           = NUM_COUNTERS_NB;
                num_counters_llc          = NUM_COUNTERS_L2;
@@ -549,6 +558,7 @@ static int __init amd_uncore_init(void)
                amd_llc_pmu.name          = "amd_l2";
                format_attr_event_df      = format_attr_event;
                format_attr_event_l3      = format_attr_event;
+               l3_mask                   = false;
        }
 
        amd_nb_pmu.attr_groups  = amd_uncore_attr_groups_df;
index 12f54082f4c8ec35fbad10a0270d0846a6b0e8c5..78241b736f2a04aa4ccac261727ecd8798042cda 100644 (file)
 #define INTEL_ARCH_EVENT_MASK  \
        (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
 
+#define AMD64_L3_SLICE_SHIFT                           48
+#define AMD64_L3_SLICE_MASK                            \
+       ((0xFULL) << AMD64_L3_SLICE_SHIFT)
+
+#define AMD64_L3_THREAD_SHIFT                          56
+#define AMD64_L3_THREAD_MASK                           \
+       ((0xFFULL) << AMD64_L3_THREAD_SHIFT)
+
 #define X86_RAW_EVENT_MASK             \
        (ARCH_PERFMON_EVENTSEL_EVENT |  \
         ARCH_PERFMON_EVENTSEL_UMASK |  \