else
                bpf_map__set_max_entries(skel->maps.stacks, 1);
 
-       if (target__has_cpu(target))
+       if (target__has_cpu(target)) {
+               skel->rodata->has_cpu = 1;
                ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
-       if (target__has_task(target))
+       }
+       if (target__has_task(target)) {
+               skel->rodata->has_task = 1;
                ntasks = perf_thread_map__nr(evlist->core.threads);
-       if (con->filters->nr_types)
+       }
+       if (con->filters->nr_types) {
+               skel->rodata->has_type = 1;
                ntypes = con->filters->nr_types;
-       if (con->filters->nr_cgrps)
+       }
+       if (con->filters->nr_cgrps) {
+               skel->rodata->has_cgroup = 1;
                ncgrps = con->filters->nr_cgrps;
+       }
 
        /* resolve lock name filters to addr */
        if (con->filters->nr_syms) {
                        con->filters->addrs = addrs;
                }
                naddrs = con->filters->nr_addrs;
+               skel->rodata->has_addr = 1;
        }
 
        bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
        bpf_map__set_max_entries(skel->maps.addr_filter, naddrs);
        bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps);
 
+       skel->rodata->stack_skip = con->stack_skip;
+       skel->rodata->aggr_mode = con->aggr_mode;
+       skel->rodata->needs_callstack = con->save_callstack;
+       skel->rodata->lock_owner = con->owner;
+
+       if (con->aggr_mode == LOCK_AGGR_CGROUP || con->filters->nr_cgrps) {
+               if (cgroup_is_v2("perf_event"))
+                       skel->rodata->use_cgroup_v2 = 1;
+       }
+
        if (lock_contention_bpf__load(skel) < 0) {
                pr_err("Failed to load lock-contention BPF skeleton\n");
                return -1;
                u32 cpu;
                u8 val = 1;
 
-               skel->bss->has_cpu = 1;
                fd = bpf_map__fd(skel->maps.cpu_filter);
 
                for (i = 0; i < ncpus; i++) {
                u32 pid;
                u8 val = 1;
 
-               skel->bss->has_task = 1;
                fd = bpf_map__fd(skel->maps.task_filter);
 
                for (i = 0; i < ntasks; i++) {
                u32 pid = evlist->workload.pid;
                u8 val = 1;
 
-               skel->bss->has_task = 1;
                fd = bpf_map__fd(skel->maps.task_filter);
                bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
        }
        if (con->filters->nr_types) {
                u8 val = 1;
 
-               skel->bss->has_type = 1;
                fd = bpf_map__fd(skel->maps.type_filter);
 
                for (i = 0; i < con->filters->nr_types; i++)
        if (con->filters->nr_addrs) {
                u8 val = 1;
 
-               skel->bss->has_addr = 1;
                fd = bpf_map__fd(skel->maps.addr_filter);
 
                for (i = 0; i < con->filters->nr_addrs; i++)
        if (con->filters->nr_cgrps) {
                u8 val = 1;
 
-               skel->bss->has_cgroup = 1;
                fd = bpf_map__fd(skel->maps.cgroup_filter);
 
                for (i = 0; i < con->filters->nr_cgrps; i++)
                        bpf_map_update_elem(fd, &con->filters->cgrps[i], &val, BPF_ANY);
        }
 
-       /* these don't work well if in the rodata section */
-       skel->bss->stack_skip = con->stack_skip;
-       skel->bss->aggr_mode = con->aggr_mode;
-       skel->bss->needs_callstack = con->save_callstack;
-       skel->bss->lock_owner = con->owner;
-
-       if (con->aggr_mode == LOCK_AGGR_CGROUP) {
-               if (cgroup_is_v2("perf_event"))
-                       skel->bss->use_cgroup_v2 = 1;
-
+       if (con->aggr_mode == LOCK_AGGR_CGROUP)
                read_all_cgroups(&con->cgroups);
-       }
 
        bpf_program__set_autoload(skel->progs.collect_lock_syms, false);