Currently, cpu/cpuX represents maximum number of possible
cpus in a domain. Those cpu sysfs directories also does
not change as we add/remove cpus via ldom manager.
Update sysfs so that it represents number of present cpus
in the domain. As a result, cpu sysfs is also updated
dynamically upon cpu add/removal.
Orabug:
21775890
Orabug:
25216469
Before the fix:
[root@ca-sparc76 ~]# ldm list
NAME STATE FLAGS CONS VCPU MEMORY UTIL NORM UPTIME
primary active -n-cv- UART 32 32G 0.2% 0.2% 11m
[root@ca-sparc76 ~]# getconf _NPROCESSORS_CONF
512
[root@ca-sparc76 ~]# ldm set-vcpu 64 primary
[root@ca-sparc76 ~]# ldm list
NAME STATE FLAGS CONS VCPU MEMORY UTIL NORM UPTIME
primary active -n-cv- UART 64 32G 0.0% 0.0% 12m
[root@ca-sparc76 ~]# getconf _NPROCESSORS_CONF
512
-------------------------------------------------------------------------
After the fix:
[root@ca-sparc76 ~]# getconf _NPROCESSORS_CONF
32
[root@ca-sparc76 ~]# ldm set-vcpu 64 primary
[root@ca-sparc76 ~]# ldm list
NAME STATE FLAGS CONS VCPU MEMORY UTIL NORM UPTIME
primary active -n-cv- UART 64 32G 0.0% 0.0% 12m
[root@ca-sparc76 ~]# getconf _NPROCESSORS_CONF
64
Signed-off-by: Atish Patra <atish.patra@oracle.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: Thomas Tai <thomas.tai@oracle.com>
Signed-off-by: Allen Pais <allen.pais@oracle.com>
#ifdef CONFIG_HOTPLUG_CPU
int __cpu_disable(void);
void __cpu_die(unsigned int cpu);
+void arch_unregister_cpu(int cpu);
#endif
#endif /* !(__ASSEMBLY__) */
#endif /* !(CONFIG_SMP) */
+void arch_register_cpu(int cpu);
+
#endif /* !(_SPARC64_SMP_H) */
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/stop_machine.h>
+#include <linux/cpu.h>
#include <asm/hypervisor.h>
#include <asm/ldc.h>
resp_len, ncpus, mask,
DR_CPU_STAT_CONFIGURED);
- mdesc_populate_present_mask(mask);
mdesc_fill_in_cpu_data(mask);
for_each_cpu(cpu, mask) {
int err;
dprintk("ds-%llu: Starting cpu %d...\n", ds->id, cpu);
+
+ cpu_maps_update_begin();
+ set_cpu_present(cpu, true);
+ arch_register_cpu(cpu);
+ cpu_maps_update_done();
+
err = cpu_up(cpu);
if (err) {
u32 res = DR_CPU_RES_FAILURE;
dr_cpu_mark(resp, cpu, ncpus,
DR_CPU_RES_FAILURE,
DR_CPU_STAT_CONFIGURED);
+
+ cpu_maps_update_begin();
+ set_cpu_present(cpu, false);
+ arch_unregister_cpu(cpu);
+ cpu_maps_update_done();
}
ds_cap_send(handle, resp, resp_len);
kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
kobject_put(cache_kobjs[cpu]);
}
+
+void arch_unregister_cpu(int cpu)
+{
+ struct cpu *c = &per_cpu(cpu_devices, cpu);
+
+ unregister_cpu(c);
+}
#endif
static int sysfs_cpu_notify(struct notifier_block *self,
#endif
}
+/* This function should only be called from the cpu_maps_update_begin
+ * or cpu_notifier_register_begin context.
+ */
+void arch_register_cpu(int cpu)
+{
+ int node = cpu_to_node(cpu);
+ struct cpu *c = &per_cpu(cpu_devices, cpu);
+
+ if (!node_online(node))
+ panic("corresponding node [%d] for cpu [%d] is not online.\n",
+ node, cpu);
+
+ c->hotpluggable = 1;
+ register_cpu(c, cpu);
+ if (cpu_online(cpu))
+ register_cpu_online(cpu);
+}
+
static int __init topology_init(void)
{
int cpu;
check_mmu_stats();
cpu_notifier_register_begin();
-
- for_each_possible_cpu(cpu) {
- struct cpu *c = &per_cpu(cpu_devices, cpu);
-
- c->hotpluggable = 1;
- register_cpu(c, cpu);
- if (cpu_online(cpu))
- register_cpu_online(cpu);
+ for_each_present_cpu(cpu) {
+ arch_register_cpu(cpu);
}
-
__register_cpu_notifier(&sysfs_cpu_nb);
cpu_notifier_register_done();