#define hypercall_compat32(longmode) (false)
#endif
-static int kvm_gva_rw(CPUState *cs, uint64_t gva, void *_buf, size_t sz,
- bool is_write)
+static bool kvm_gva_to_gpa(CPUState *cs, uint64_t gva, uint64_t *gpa,
+ size_t *len, bool is_write)
{
- uint8_t *buf = (uint8_t *)_buf;
- int ret;
-
- while (sz) {
struct kvm_translation tr = {
.linear_address = gva,
};
- size_t len = TARGET_PAGE_SIZE - (tr.linear_address & ~TARGET_PAGE_MASK);
- if (len > sz) {
- len = sz;
+ if (len) {
+ *len = TARGET_PAGE_SIZE - (gva & ~TARGET_PAGE_MASK);
+ }
+
+ if (kvm_vcpu_ioctl(cs, KVM_TRANSLATE, &tr) || !tr.valid ||
+ (is_write && !tr.writeable)) {
+ return false;
}
+ *gpa = tr.physical_address;
+ return true;
+}
+
+static int kvm_gva_rw(CPUState *cs, uint64_t gva, void *_buf, size_t sz,
+ bool is_write)
+{
+ uint8_t *buf = (uint8_t *)_buf;
+ uint64_t gpa;
+ size_t len;
- ret = kvm_vcpu_ioctl(cs, KVM_TRANSLATE, &tr);
- if (ret || !tr.valid || (is_write && !tr.writeable)) {
+ while (sz) {
+ if (!kvm_gva_to_gpa(cs, gva, &gpa, &len, is_write)) {
return -EFAULT;
}
+ if (len > sz) {
+ len = sz;
+ }
- cpu_physical_memory_rw(tr.physical_address, buf, len, is_write);
+ cpu_physical_memory_rw(gpa, buf, len, is_write);
buf += len;
sz -= len;
env->xen_vcpu_info_gpa = INVALID_GPA;
env->xen_vcpu_info_default_gpa = INVALID_GPA;
+ env->xen_vcpu_time_info_gpa = INVALID_GPA;
return 0;
}
env->xen_vcpu_info_gpa);
}
+static void do_set_vcpu_time_info_gpa(CPUState *cs, run_on_cpu_data data)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ env->xen_vcpu_time_info_gpa = data.host_ulong;
+
+ kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO,
+ env->xen_vcpu_time_info_gpa);
+}
+
static void do_vcpu_soft_reset(CPUState *cs, run_on_cpu_data data)
{
X86CPU *cpu = X86_CPU(cs);
env->xen_vcpu_info_gpa = INVALID_GPA;
env->xen_vcpu_info_default_gpa = INVALID_GPA;
+ env->xen_vcpu_time_info_gpa = INVALID_GPA;
kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, INVALID_GPA);
+ kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO,
+ INVALID_GPA);
}
static int xen_set_shared_info(uint64_t gfn)
return 0;
}
+static int vcpuop_register_vcpu_time_info(CPUState *cs, CPUState *target,
+ uint64_t arg)
+{
+ struct vcpu_register_time_memory_area tma;
+ uint64_t gpa;
+ size_t len;
+
+ /* No need for 32/64 compat handling */
+ qemu_build_assert(sizeof(tma) == 8);
+ qemu_build_assert(sizeof(struct vcpu_time_info) == 32);
+
+ if (!target) {
+ return -ENOENT;
+ }
+
+ if (kvm_copy_from_gva(cs, arg, &tma, sizeof(tma))) {
+ return -EFAULT;
+ }
+
+ /*
+ * Xen actually uses the GVA and does the translation through the guest
+ * page tables each time. But Linux/KVM uses the GPA, on the assumption
+ * that guests only ever use *global* addresses (kernel virtual addresses)
+ * for it. If Linux is changed to redo the GVA→GPA translation each time,
+ * it will offer a new vCPU attribute for that, and we'll use it instead.
+ */
+ if (!kvm_gva_to_gpa(cs, tma.addr.p, &gpa, &len, false) ||
+ len < sizeof(struct vcpu_time_info)) {
+ return -EFAULT;
+ }
+
+ async_run_on_cpu(target, do_set_vcpu_time_info_gpa,
+ RUN_ON_CPU_HOST_ULONG(gpa));
+ return 0;
+}
+
static bool kvm_xen_hcall_vcpu_op(struct kvm_xen_exit *exit, X86CPU *cpu,
int cmd, int vcpu_id, uint64_t arg)
{
int err;
switch (cmd) {
+ case VCPUOP_register_vcpu_time_memory_area:
+ err = vcpuop_register_vcpu_time_info(cs, dest, arg);
+ break;
case VCPUOP_register_vcpu_info:
err = vcpuop_register_vcpu_info(cs, dest, arg);
break;
}
}
+ gpa = env->xen_vcpu_time_info_gpa;
+ if (gpa != INVALID_GPA) {
+ ret = kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO,
+ gpa);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
return 0;
}