int llc_id;
        cpumask_t thread_sibling;
        cpumask_t core_sibling;
-       cpumask_t llc_siblings;
+       cpumask_t llc_sibling;
 };
 
 extern struct cpu_topology cpu_topology[NR_CPUS];
 #define topology_core_id(cpu)          (cpu_topology[cpu].core_id)
 #define topology_core_cpumask(cpu)     (&cpu_topology[cpu].core_sibling)
 #define topology_sibling_cpumask(cpu)  (&cpu_topology[cpu].thread_sibling)
-#define topology_llc_cpumask(cpu)      (&cpu_topology[cpu].llc_siblings)
+#define topology_llc_cpumask(cpu)      (&cpu_topology[cpu].llc_sibling)
 
 void init_cpu_topology(void);
 void store_cpu_topology(unsigned int cpuid);
 
        const cpumask_t *core_mask = &cpu_topology[cpu].core_sibling;
 
        if (cpu_topology[cpu].llc_id != -1) {
-               if (cpumask_subset(&cpu_topology[cpu].llc_siblings, core_mask))
-                       core_mask = &cpu_topology[cpu].llc_siblings;
+               if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
+                       core_mask = &cpu_topology[cpu].llc_sibling;
        }
 
        return core_mask;
                cpu_topo = &cpu_topology[cpu];
 
                if (cpuid_topo->llc_id == cpu_topo->llc_id) {
-                       cpumask_set_cpu(cpu, &cpuid_topo->llc_siblings);
-                       cpumask_set_cpu(cpuid, &cpu_topo->llc_siblings);
+                       cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
+                       cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
                }
 
                if (cpuid_topo->package_id != cpu_topo->package_id)
 {
        struct cpu_topology *cpu_topo = &cpu_topology[cpu];
 
-       cpumask_clear(&cpu_topo->llc_siblings);
-       cpumask_set_cpu(cpu, &cpu_topo->llc_siblings);
+       cpumask_clear(&cpu_topo->llc_sibling);
+       cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
 
        cpumask_clear(&cpu_topo->core_sibling);
        cpumask_set_cpu(cpu, &cpu_topo->core_sibling);