From: chris hyser Date: Thu, 7 Apr 2016 20:55:56 +0000 (-0700) Subject: sparc64: Set up core sibling list correctly for T7. X-Git-Tag: v4.1.12-92~163^2~1 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=c5c766357091b621fb0b27ae9d26448854a6d830;p=users%2Fjedix%2Flinux-maple.git sparc64: Set up core sibling list correctly for T7. The important definition of core sibling is that some level of cache is shared. The prior SPARC notion of socket was defined as highest level of shared cache. On T7 platforms, the MD record now describes the CPUs that share the physical socket and this is no longer tied to shared cache. This patch correctly separates these two concepts. Before: [root@ca-sparc30 topology]# cat core_siblings_list 32-63,128-223 After: [root@ca-sparc30 topology]# cat core_siblings_list 32-63 OraBug 22748950 Signed-off-by: Chris Hyser --- diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h index ddc39405d17b..8f70ec3a2977 100644 --- a/arch/sparc/include/asm/cpudata_64.h +++ b/arch/sparc/include/asm/cpudata_64.h @@ -28,9 +28,10 @@ typedef struct { unsigned short ecache_line_size; unsigned short l3_cache_line_size; - unsigned short sock_id; + unsigned short sock_id; /* physical package */ unsigned short core_id; - int proc_id; + unsigned short max_cache_id; /* gorupings of highest shared cache */ + unsigned short proc_id; /* strand (aka HW thread) id */ } cpuinfo_sparc; DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index 713c04d60f94..84a8b875d8e8 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c @@ -861,13 +861,19 @@ static void __mark_core_id(struct mdesc_handle *hp, u64 node, cpu_data(*id).core_id = core_id; } -static void __mark_sock_id(struct mdesc_handle *hp, u64 node, - int sock_id) +static void __mark_max_cache_id(struct mdesc_handle *hp, u64 node, + int max_cache_id) { const u64 *id = mdesc_get_property(hp, node, "id", NULL); - if (*id < num_possible_cpus()) - cpu_data(*id).sock_id = sock_id; + if (*id < num_possible_cpus()) { + cpu_data(*id).max_cache_id = max_cache_id; + + /* On systems without explict socket descriptions socket + * is max_cache_id + */ + cpu_data(*id).sock_id = max_cache_id; + } } static void mark_core_ids(struct mdesc_handle *hp, u64 mp, @@ -876,10 +882,11 @@ static void mark_core_ids(struct mdesc_handle *hp, u64 mp, find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10); } -static void mark_sock_ids(struct mdesc_handle *hp, u64 mp, - int sock_id) +static void mark_max_cache_ids(struct mdesc_handle *hp, u64 mp, + int max_cache_id) { - find_back_node_value(hp, mp, "cpu", __mark_sock_id, sock_id, 10); + find_back_node_value(hp, mp, "cpu", __mark_max_cache_id, + max_cache_id, 10); } static void set_core_ids(struct mdesc_handle *hp) @@ -910,14 +917,15 @@ static void set_core_ids(struct mdesc_handle *hp) } } -static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level) +static int set_max_cache_ids_by_cache(struct mdesc_handle *hp, + int level) { u64 mp; int idx = 1; int fnd = 0; - /* Identify unique sockets by looking for cpus backpointed to by - * shared level n caches. + /* Identify unique highest level of shared cache by looking for cpus + * backpointed to by shared level N caches. */ mdesc_for_each_node_by_name(hp, mp, "cache") { const u64 *cur_lvl; @@ -926,7 +934,7 @@ static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level) if (*cur_lvl != level) continue; - mark_sock_ids(hp, mp, idx); + mark_max_cache_ids(hp, mp, idx); idx++; fnd = 1; } @@ -961,15 +969,17 @@ static void set_sock_ids(struct mdesc_handle *hp) { u64 mp; + /* Find the highest level of shared cache which pre-T7 is also + * the socket. + */ + if (!set_max_cache_ids_by_cache(hp, 3)) + set_max_cache_ids_by_cache(hp, 2); + /* If machine description exposes sockets data use it. - * Otherwise fallback to use shared L3 or L2 caches. */ mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets"); if (mp != MDESC_NODE_NULL) - return set_sock_ids_by_socket(hp, mp); - - if (!set_sock_ids_by_cache(hp, 3)) - set_sock_ids_by_cache(hp, 2); + set_sock_ids_by_socket(hp, mp); } static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id) @@ -1183,7 +1193,6 @@ static void *fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, } } - c->sock_id = -1; c->core_id = 0; c->proc_id = -1; diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index d2dfa92ce889..6ad6c3660ab7 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -1254,7 +1254,8 @@ void smp_fill_in_sib_core_maps(void) } for_each_present_cpu(j) { - if (cpu_data(i).sock_id == cpu_data(j).sock_id) + if (cpu_data(i).max_cache_id == + cpu_data(j).max_cache_id) cpumask_set_cpu(j, &cpu_core_sib_map[i]); } }