void xen_vcpu_restore(void)
{
int cpu;
+ bool vcpuops = true;
+ const struct cpumask *mask;
- for_each_possible_cpu(cpu) {
+ mask = xen_pv_domain() ? cpu_possible_mask : cpu_online_mask;
+
+ /* Only Xen 4.5 and higher supports this. */
+ if (HYPERVISOR_vcpu_op(VCPUOP_is_up, smp_processor_id(), NULL) == -ENOSYS)
+ vcpuops = false;
+
+ for_each_cpu(cpu, mask) {
bool other_cpu = (cpu != smp_processor_id());
- bool is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL);
+ bool is_up = false;
- if (other_cpu && is_up &&
+ if (vcpuops)
+ is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL);
+
+ if (vcpuops && other_cpu && is_up &&
HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL))
BUG();
if (have_vcpu_info_placement)
xen_vcpu_setup(cpu);
- if (other_cpu && is_up &&
+ if (vcpuops && other_cpu && is_up &&
HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL))
BUG();
}
* in that case multiple vcpus might be online. */
for_each_online_cpu(cpu) {
/* Leave it to be NULL. */
- if (cpu >= MAX_VIRT_CPUS)
- continue;
+ if (cpu >= MAX_VIRT_CPUS && cpu <= NR_CPUS)
+ per_cpu(xen_vcpu, cpu) = NULL; /* Triggers xen_vcpu_setup.*/
per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
}
}
static void xen_hvm_post_suspend(int suspend_cancelled)
{
#ifdef CONFIG_XEN_PVHVM
- int cpu;
xen_hvm_init_shared_info();
xen_callback_vector();
xen_unplug_emulated_devices();
- if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
- for_each_online_cpu(cpu) {
- xen_setup_runstate_info(cpu);
- }
- }
+ xen_vcpu_restore();
#endif
}
{
struct vcpu_register_runstate_memory_area area;
+ if (xen_hvm_domain() && !(xen_feature(XENFEAT_hvm_safe_pvclock)))
+ return;
+
area.addr.v = &per_cpu(xen_runstate, cpu);
if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,