]> www.infradead.org Git - users/hch/configfs.git/commitdiff
LoongArch: For all possible CPUs setup logical-physical CPU mapping
authorHuacai Chen <chenhuacai@loongson.cn>
Tue, 12 Nov 2024 08:35:36 +0000 (16:35 +0800)
committerHuacai Chen <chenhuacai@loongson.cn>
Tue, 12 Nov 2024 08:35:36 +0000 (16:35 +0800)
In order to support ACPI-based physical CPU hotplug, we suppose for all
"possible" CPUs cpu_logical_map() can work. Because some drivers want to
use cpu_logical_map() for all "possible" CPUs, while currently we only
setup logical-physical mapping for "present" CPUs. This lack of mapping
also causes cpu_to_node() cannot work for hot-added CPUs.

All "possible" CPUs are listed in MADT, and the "present" subset is
marked as ACPI_MADT_ENABLED. To setup logical-physical CPU mapping for
all possible CPUs and keep present CPUs continuous in cpu_present_mask,
we parse MADT twice. The first pass handles CPUs with ACPI_MADT_ENABLED
and the second pass handles CPUs without ACPI_MADT_ENABLED.

The global flag (cpu_enumerated) is removed because acpi_map_cpu() calls
cpu_number_map() rather than set_processor_mask() now.

Reported-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
arch/loongarch/kernel/acpi.c
arch/loongarch/kernel/smp.c

index f1a74b80f22c547870eb68a13692369cd354658a..382a09a7152c30597f18efbfed80a4405399894e 100644 (file)
@@ -58,48 +58,48 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
                return ioremap_cache(phys, size);
 }
 
-static int cpu_enumerated = 0;
-
 #ifdef CONFIG_SMP
-static int set_processor_mask(u32 id, u32 flags)
+static int set_processor_mask(u32 id, u32 pass)
 {
-       int nr_cpus;
-       int cpu, cpuid = id;
-
-       if (!cpu_enumerated)
-               nr_cpus = NR_CPUS;
-       else
-               nr_cpus = nr_cpu_ids;
+       int cpu = -1, cpuid = id;
 
-       if (num_processors >= nr_cpus) {
+       if (num_processors >= NR_CPUS) {
                pr_warn(PREFIX "nr_cpus limit of %i reached."
-                       " processor 0x%x ignored.\n", nr_cpus, cpuid);
+                       " processor 0x%x ignored.\n", NR_CPUS, cpuid);
 
                return -ENODEV;
 
        }
+
        if (cpuid == loongson_sysconf.boot_cpu_id)
                cpu = 0;
-       else
-               cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
-
-       if (!cpu_enumerated)
-               set_cpu_possible(cpu, true);
 
-       if (flags & ACPI_MADT_ENABLED) {
+       switch (pass) {
+       case 1: /* Pass 1 handle enabled processors */
+               if (cpu < 0)
+                       cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
                num_processors++;
                set_cpu_present(cpu, true);
-               __cpu_number_map[cpuid] = cpu;
-               __cpu_logical_map[cpu] = cpuid;
-       } else
+               break;
+       case 2: /* Pass 2 handle disabled processors */
+               if (cpu < 0)
+                       cpu = find_first_zero_bit(cpumask_bits(cpu_possible_mask), NR_CPUS);
                disabled_cpus++;
+               break;
+       default:
+               return cpu;
+       }
+
+       set_cpu_possible(cpu, true);
+       __cpu_number_map[cpuid] = cpu;
+       __cpu_logical_map[cpu] = cpuid;
 
        return cpu;
 }
 #endif
 
 static int __init
-acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long end)
+acpi_parse_p1_processor(union acpi_subtable_headers *header, const unsigned long end)
 {
        struct acpi_madt_core_pic *processor = NULL;
 
@@ -110,12 +110,29 @@ acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long en
        acpi_table_print_madt_entry(&header->common);
 #ifdef CONFIG_SMP
        acpi_core_pic[processor->core_id] = *processor;
-       set_processor_mask(processor->core_id, processor->flags);
+       if (processor->flags & ACPI_MADT_ENABLED)
+               set_processor_mask(processor->core_id, 1);
 #endif
 
        return 0;
 }
 
+static int __init
+acpi_parse_p2_processor(union acpi_subtable_headers *header, const unsigned long end)
+{
+       struct acpi_madt_core_pic *processor = NULL;
+
+       processor = (struct acpi_madt_core_pic *)header;
+       if (BAD_MADT_ENTRY(processor, end))
+               return -EINVAL;
+
+#ifdef CONFIG_SMP
+       if (!(processor->flags & ACPI_MADT_ENABLED))
+               set_processor_mask(processor->core_id, 2);
+#endif
+
+       return 0;
+}
 static int __init
 acpi_parse_eio_master(union acpi_subtable_headers *header, const unsigned long end)
 {
@@ -143,12 +160,14 @@ static void __init acpi_process_madt(void)
        }
 #endif
        acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
-                       acpi_parse_processor, MAX_CORE_PIC);
+                       acpi_parse_p1_processor, MAX_CORE_PIC);
+
+       acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
+                       acpi_parse_p2_processor, MAX_CORE_PIC);
 
        acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
                        acpi_parse_eio_master, MAX_IO_PICS);
 
-       cpu_enumerated = 1;
        loongson_sysconf.nr_cpus = num_processors;
 }
 
@@ -310,6 +329,10 @@ static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
        int nid;
 
        nid = acpi_get_node(handle);
+
+       if (nid != NUMA_NO_NODE)
+               nid = early_cpu_to_node(cpu);
+
        if (nid != NUMA_NO_NODE) {
                set_cpuid_to_node(physid, nid);
                node_set(nid, numa_nodes_parsed);
@@ -324,12 +347,14 @@ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu
 {
        int cpu;
 
-       cpu = set_processor_mask(physid, ACPI_MADT_ENABLED);
-       if (cpu < 0) {
+       cpu = cpu_number_map(physid);
+       if (cpu < 0 || cpu >= nr_cpu_ids) {
                pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
-               return cpu;
+               return -ERANGE;
        }
 
+       num_processors++;
+       set_cpu_present(cpu, true);
        acpi_map_cpu2node(handle, cpu, physid);
 
        *pcpu = cpu;
index 9afc2d8b341419df404be792a1d756cc962049b3..c0b498467ffa9f3980c689b1427dd87df275ba61 100644 (file)
@@ -331,11 +331,11 @@ void __init loongson_prepare_cpus(unsigned int max_cpus)
        int i = 0;
 
        parse_acpi_topology();
+       cpu_data[0].global_id = cpu_logical_map(0);
 
        for (i = 0; i < loongson_sysconf.nr_cpus; i++) {
                set_cpu_present(i, true);
                csr_mail_send(0, __cpu_logical_map[i], 0);
-               cpu_data[i].global_id = __cpu_logical_map[i];
        }
 
        per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
@@ -380,6 +380,7 @@ void loongson_init_secondary(void)
                     cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
        cpu_data[cpu].core = pptt_enabled ? cpu_data[cpu].core :
                     cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
+       cpu_data[cpu].global_id = cpu_logical_map(cpu);
 }
 
 void loongson_smp_finish(void)