/* Only EVENT1 or EVENT2 need be measured depending on the value of smt_on. */
        {
-               struct cpu_topology *topology = cpu_topology__new();
-               bool smton = smt_on(topology);
+               bool smton = smt_on();
                bool corewide = core_wide(/*system_wide=*/false,
-                                         /*user_requested_cpus=*/false,
-                                         topology);
+                                         /*user_requested_cpus=*/false);
 
-               cpu_topology__delete(topology);
                expr__ctx_clear(ctx);
                TEST_ASSERT_VAL("find ids",
                                expr__find_ids("EVENT1 if #smt_on else EVENT2",
 
        return true;
 }
 
+const struct cpu_topology *online_topology(void)
+{
+       static const struct cpu_topology *topology;
+
+       if (!topology) {
+               topology = cpu_topology__new();
+               if (!topology) {
+                       pr_err("Error creating CPU topology");
+                       abort();
+               }
+       }
+       return topology;
+}
+
 struct cpu_topology *cpu_topology__new(void)
 {
        struct cpu_topology *tp = NULL;
 
        struct hybrid_topology_node     nodes[];
 };
 
+/*
+ * The topology for online CPUs, lazily created.
+ */
+const struct cpu_topology *online_topology(void);
+
 struct cpu_topology *cpu_topology__new(void);
 void cpu_topology__delete(struct cpu_topology *tp);
 /* Determine from the core list whether SMT was enabled. */
 
 
 double expr__get_literal(const char *literal, const struct expr_scanner_ctx *ctx)
 {
-       static struct cpu_topology *topology;
+       const struct cpu_topology *topology;
        double result = NAN;
 
        if (!strcmp("#num_cpus", literal)) {
         * these strings gives an indication of the number of packages, dies,
         * etc.
         */
-       if (!topology) {
-               topology = cpu_topology__new();
-               if (!topology) {
-                       pr_err("Error creating CPU topology");
-                       goto out;
-               }
-       }
        if (!strcasecmp("#smt_on", literal)) {
-               result = smt_on(topology) ? 1.0 : 0.0;
+               result = smt_on() ? 1.0 : 0.0;
                goto out;
        }
        if (!strcmp("#core_wide", literal)) {
-               result = core_wide(ctx->system_wide, ctx->user_requested_cpu_list, topology)
+               result = core_wide(ctx->system_wide, ctx->user_requested_cpu_list)
                        ? 1.0 : 0.0;
                goto out;
        }
        if (!strcmp("#num_packages", literal)) {
+               topology = online_topology();
                result = topology->package_cpus_lists;
                goto out;
        }
        if (!strcmp("#num_dies", literal)) {
+               topology = online_topology();
                result = topology->die_cpus_lists;
                goto out;
        }
        if (!strcmp("#num_cores", literal)) {
+               topology = online_topology();
                result = topology->core_cpus_lists;
                goto out;
        }
 
 #include "cputopo.h"
 #include "smt.h"
 
-bool smt_on(const struct cpu_topology *topology)
+bool smt_on(void)
 {
        static bool cached;
        static bool cached_result;
        if (sysfs__read_int("devices/system/cpu/smt/active", &fs_value) >= 0)
                cached_result = (fs_value == 1);
        else
-               cached_result = cpu_topology__smt_on(topology);
+               cached_result = cpu_topology__smt_on(online_topology());
 
        cached = true;
        return cached_result;
 }
 
-bool core_wide(bool system_wide, const char *user_requested_cpu_list,
-              const struct cpu_topology *topology)
+bool core_wide(bool system_wide, const char *user_requested_cpu_list)
 {
        /* If not everything running on a core is being recorded then we can't use core_wide. */
        if (!system_wide)
                return false;
 
        /* Cheap case that SMT is disabled and therefore we're inherently core_wide. */
-       if (!smt_on(topology))
+       if (!smt_on())
                return true;
 
-       return cpu_topology__core_wide(topology, user_requested_cpu_list);
+       return cpu_topology__core_wide(online_topology(), user_requested_cpu_list);
 }
 
 #ifndef __SMT_H
 #define __SMT_H 1
 
-struct cpu_topology;
-
-/* Returns true if SMT (aka hyperthreading) is enabled. */
-bool smt_on(const struct cpu_topology *topology);
+/*
+ * Returns true if SMT (aka hyperthreading) is enabled. Determined via sysfs or
+ * the online topology.
+ */
+bool smt_on(void);
 
 /*
  * Returns true when system wide and all SMT threads for a core are in the
  * user_requested_cpus map.
  */
-bool core_wide(bool system_wide, const char *user_requested_cpu_list,
-              const struct cpu_topology *topology);
+bool core_wide(bool system_wide, const char *user_requested_cpu_list);
 
 #endif /* __SMT_H */