From: Joao Martins Date: Thu, 6 Dec 2018 15:18:20 +0000 (-0500) Subject: i386/xen: redirect gnttab callers with XEN_EMULATE X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=d34b7cabdfa359a6ab2388e59b0aa528058d55f8;p=users%2Fdwmw2%2Fqemu.git i386/xen: redirect gnttab callers with XEN_EMULATE It adjust how some of the APIs in xen-backend and xen-pvdev to use the XenBackendOps for platforms that which to emulate some of those operations. Meaning instead of a system call, we do it ourselves in a Qemu equivalent implementation because Qemu will have full visibility of the grant table frames. Hence it is pointless (and less performant) to ask /dev/xen/gntdev to do the mapping when we can just fetch the hva from the gpa (or @frame pointed by gref) directly. Also, make sure that physical address is properly typed as gnt->frame is a 32-bit type and thus when shifted by 12 can lose higher bits, hence some of the casts to hwaddr. Signed-off-by: Joao Martins Signed-off-by: Boris Ostrovsky --- diff --git a/hw/xen/xen-bus.c b/hw/xen/xen-bus.c index 49a725e8c7..d4974f189d 100644 --- a/hw/xen/xen-bus.c +++ b/hw/xen/xen-bus.c @@ -790,7 +790,7 @@ static void xen_device_frontend_destroy(XenDevice *xendev) } } -void xen_device_set_max_grant_refs(XenDevice *xendev, unsigned int nr_refs, +static void xen_set_max_grant_refs(XenDevice *xendev, unsigned int nr_refs, Error **errp) { if (xengnttab_set_max_grants(xendev->xgth, nr_refs)) { @@ -798,7 +798,7 @@ void xen_device_set_max_grant_refs(XenDevice *xendev, unsigned int nr_refs, } } -void *xen_device_map_grant_refs(XenDevice *xendev, uint32_t *refs, +static void *xen_map_grant_refs(XenDevice *xendev, uint32_t *refs, unsigned int nr_refs, int prot, Error **errp) { @@ -814,7 +814,7 @@ void *xen_device_map_grant_refs(XenDevice *xendev, uint32_t *refs, return map; } -void xen_device_unmap_grant_refs(XenDevice *xendev, void *map, +static void xen_unmap_grant_refs(XenDevice *xendev, void *map, unsigned int nr_refs, Error **errp) { if (xengnttab_unmap(xendev->xgth, map, nr_refs)) { @@ -868,7 +868,7 @@ done: g_free(refs); } -void xen_device_copy_grant_refs(XenDevice *xendev, bool to_domain, +static void xen_copy_grant_refs(XenDevice *xendev, bool to_domain, XenDeviceGrantCopySegment segs[], unsigned int nr_segs, Error **errp) { @@ -922,6 +922,39 @@ done: g_free(xengnttab_segs); } +struct XenBackendOps xen_gnt_ops = { + .set_max_grefs = xen_set_max_grant_refs, + .map_grefs = xen_map_grant_refs, + .unmap_grefs = xen_unmap_grant_refs, + .copy_grefs = xen_copy_grant_refs, +}; + +void xen_device_set_max_grant_refs(XenDevice *xendev, unsigned int nr_refs, + Error **errp) +{ + xen_gnt_ops.set_max_grefs(xendev, nr_refs, errp); +} + +void *xen_device_map_grant_refs(XenDevice *xendev, uint32_t *refs, + unsigned int nr_refs, int prot, + Error **errp) +{ + return xen_gnt_ops.map_grefs(xendev, refs, nr_refs, prot, errp); +} + +void xen_device_unmap_grant_refs(XenDevice *xendev, void *map, + unsigned int nr_refs, Error **errp) +{ + xen_gnt_ops.unmap_grefs(xendev, map, nr_refs, errp); +} + +void xen_device_copy_grant_refs(XenDevice *xendev, bool to_domain, + XenDeviceGrantCopySegment segs[], + unsigned int nr_segs, Error **errp) +{ + xen_gnt_ops.copy_grefs(xendev, to_domain, segs, nr_segs, errp); +} + struct XenEventChannel { evtchn_port_t local_port; XenEventHandler handler; diff --git a/hw/xen/xen-legacy-backend.c b/hw/xen/xen-legacy-backend.c index dcc2e1ac72..322c1065d3 100644 --- a/hw/xen/xen-legacy-backend.c +++ b/hw/xen/xen-legacy-backend.c @@ -111,8 +111,8 @@ int xen_be_set_state(struct XenLegacyDevice *xendev, enum xenbus_state state) return 0; } -void xen_be_set_max_grant_refs(struct XenLegacyDevice *xendev, - unsigned int nr_refs) +static void xen_set_max_grant_refs(struct XenLegacyDevice *xendev, + unsigned int nr_refs) { assert(xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV); @@ -122,8 +122,8 @@ void xen_be_set_max_grant_refs(struct XenLegacyDevice *xendev, } } -void *xen_be_map_grant_refs(struct XenLegacyDevice *xendev, uint32_t *refs, - unsigned int nr_refs, int prot) +static void *xen_map_grant_refs(struct XenLegacyDevice *xendev, uint32_t *refs, + unsigned int nr_refs, int prot) { void *ptr; @@ -140,8 +140,8 @@ void *xen_be_map_grant_refs(struct XenLegacyDevice *xendev, uint32_t *refs, return ptr; } -void xen_be_unmap_grant_refs(struct XenLegacyDevice *xendev, void *ptr, - unsigned int nr_refs) +static void xen_unmap_grant_refs(struct XenLegacyDevice *xendev, void *ptr, + unsigned int nr_refs) { assert(xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV); @@ -200,10 +200,10 @@ static int compat_copy_grant_refs(struct XenLegacyDevice *xendev, return 0; } -int xen_be_copy_grant_refs(struct XenLegacyDevice *xendev, - bool to_domain, - XenGrantCopySegment segs[], - unsigned int nr_segs) +static int xen_copy_grant_refs(struct XenLegacyDevice *xendev, + bool to_domain, + XenGrantCopySegment segs[], + unsigned int nr_segs) { xengnttab_grant_copy_segment_t *xengnttab_segs; unsigned int i; @@ -261,6 +261,39 @@ int xen_be_copy_grant_refs(struct XenLegacyDevice *xendev, return rc; } +struct XenLegacyBackendOps xen_legacy_gnt_ops = { + .set_max_grefs = xen_set_max_grant_refs, + .map_grefs = xen_map_grant_refs, + .unmap_grefs = xen_unmap_grant_refs, + .copy_grefs = xen_copy_grant_refs, +}; + +void xen_be_set_max_grant_refs(struct XenLegacyDevice *xendev, + unsigned int nr_refs) +{ + xen_legacy_gnt_ops.set_max_grefs(xendev, nr_refs); +} + +void *xen_be_map_grant_refs(struct XenLegacyDevice *xendev, uint32_t *refs, + unsigned int nr_refs, int prot) +{ + return xen_legacy_gnt_ops.map_grefs(xendev, refs, nr_refs, prot); +} + +void xen_be_unmap_grant_refs(struct XenLegacyDevice *xendev, void *ptr, + unsigned int nr_refs) +{ + xen_legacy_gnt_ops.unmap_grefs(xendev, ptr, nr_refs); +} + +int xen_be_copy_grant_refs(struct XenLegacyDevice *xendev, + bool to_domain, + XenGrantCopySegment segs[], + unsigned int nr_segs) +{ + return xen_legacy_gnt_ops.copy_grefs(xendev, to_domain, segs, nr_segs); +} + /* * get xen backend device, allocate a new one if it doesn't exist. */ diff --git a/include/hw/xen/xen-bus.h b/include/hw/xen/xen-bus.h index 3183f10e3c..befd9fcb56 100644 --- a/include/hw/xen/xen-bus.h +++ b/include/hw/xen/xen-bus.h @@ -134,4 +134,17 @@ void xen_device_unbind_event_channel(XenDevice *xendev, XenEventChannel *channel, Error **errp); +struct XenBackendOps { + void (*set_max_grefs)(struct XenDevice *xendev, unsigned int nr_refs, + Error **errp); + void * (*map_grefs)(struct XenDevice *xendev, uint32_t *refs, + unsigned int nr_refs, int prot, Error **errp); + void (*unmap_grefs)(struct XenDevice *xendev, void *ptr, + unsigned int nr_refs, Error **errp); + void (*copy_grefs)(struct XenDevice *xendev, bool to_domain, + XenDeviceGrantCopySegment segs[], unsigned int nr_segs, + Error **errp); +}; + +extern struct XenBackendOps xen_gnt_ops; #endif /* HW_XEN_BUS_H */ diff --git a/include/hw/xen/xen-legacy-backend.h b/include/hw/xen/xen-legacy-backend.h index 4e82ac963d..12a05e8fca 100644 --- a/include/hw/xen/xen-legacy-backend.h +++ b/include/hw/xen/xen-legacy-backend.h @@ -83,6 +83,18 @@ static inline void xen_be_unmap_grant_ref(struct XenLegacyDevice *xendev, return xen_be_unmap_grant_refs(xendev, ptr, 1); } +struct XenLegacyBackendOps { + void (*set_max_grefs)(struct XenLegacyDevice *xendev, unsigned int nr_refs); + void * (*map_grefs)(struct XenLegacyDevice *xendev, uint32_t *refs, + unsigned int nr_refs, int prot); + void (*unmap_grefs)(struct XenLegacyDevice *xendev, void *ptr, + unsigned int nr_refs); + int (*copy_grefs)(struct XenLegacyDevice *xendev, bool to_domain, + XenGrantCopySegment segs[], unsigned int nr_segs); +}; + +extern struct XenLegacyBackendOps xen_legacy_gnt_ops; + /* actual backend drivers */ extern struct XenDevOps xen_console_ops; /* xen_console.c */ extern struct XenDevOps xen_kbdmouse_ops; /* xen_framebuffer.c */ diff --git a/target/i386/trace-events b/target/i386/trace-events index 4b1ef0f71a..41c48b1f3b 100644 --- a/target/i386/trace-events +++ b/target/i386/trace-events @@ -25,3 +25,6 @@ kvm_xen_evtchn_set(int flags, unsigned int port, int port_type) "flags 0x%x port kvm_xen_evtchn_send(int cpu, int dest, unsigned int port) "cpu %d notify_cpu %d port %u" kvm_xen_set_domid(unsigned int domid) "assigned with domid %u" kvm_xen_add_to_physmap(unsigned int space, unsigned int idx, uint64_t gfn) "add frame space %u idx %u gfn 0x%" PRIx64 +kvm_xen_copy_grant_refs(char *dev, uint32_t ref, uint16_t offset, void *virt, int prot) "gntcopy dev %s gnt %u:%u virt %p prot %d" +kvm_xen_map_grant_refs(unsigned int ref, int prot, unsigned int nr_refs, uint32_t gpa, void *hva) "gntmap gnt %u prot %d nr_refs %u -> gfn 0x%x hva %p" +kvm_xen_unmap_grant_refs(void *hva, unsigned int nr_refs) "gntunmap hva %p nr_refs %d" diff --git a/target/i386/xen.c b/target/i386/xen.c index 726c0dc28d..2cc1f1cbe4 100644 --- a/target/i386/xen.c +++ b/target/i386/xen.c @@ -53,6 +53,10 @@ #define xen_special_pfn(x) \ (X86_HVM_END_SPECIAL_REGION - X86_HVM_NR_SPECIAL_PAGES + (x)) +/* Grant v1 references per 4K page */ +#define GPP_V1 (TARGET_PAGE_SIZE / sizeof(struct grant_entry_v1)) +#define shared_entry(gt, ref) (&((gt)[(ref) / GPP_V1][(ref) % GPP_V1])) + /* * Unhandled hypercalls error: * @@ -131,6 +135,182 @@ static void arch_init_hypercall_page(CPUState *cs, void *addr) } } +static void *gref_to_gnt(uint32_t ref) +{ + CPUState *cs = qemu_get_cpu(0); + XenState *xen = cs ? cs->xen_state : NULL; + XenGrantTable *gnttab = xen ? &xen->gnttab : NULL; + struct grant_entry_v1 *gnt = NULL; + + if (!gnttab) { + return NULL; + } + + gnt = shared_entry(gnttab->frames_v1, ref); + return gnt; +} + +static void *gref_to_hva(uint32_t ref) +{ + struct grant_entry_v1 *gnt = gref_to_gnt(ref); + void *addr = gnt ? gpa_to_hva((hwaddr)gnt->frame << PAGE_SHIFT) : NULL; + return addr; +} + +static void kvm_xen_set_max_grant_refs(unsigned int nr_refs) +{ +} + +static void *kvm_xen_map_grant_refs(uint32_t *refs, unsigned int nr_refs, + int prot) +{ + struct grant_entry_v1 *gnt = NULL; + void *addr; + + gnt = gref_to_gnt(*refs); + addr = gpa_to_hva((hwaddr)gnt->frame << PAGE_SHIFT); + + trace_kvm_xen_map_grant_refs(*refs, prot, nr_refs, gnt->frame, addr); + return addr; +} + +static void kvm_xen_unmap_grant_refs(void *ptr, unsigned int nr_refs) +{ + trace_kvm_xen_unmap_grant_refs(ptr, nr_refs); +} + +static void xen_dev_set_max_grant_refs(struct XenDevice *xendev, + unsigned int nr_refs, Error **errp) +{ + kvm_xen_set_max_grant_refs(nr_refs); +} + +static void *xen_dev_map_grant_refs(struct XenDevice *xendev, uint32_t *refs, + unsigned int nr_refs, int prot, + Error **errp) +{ + return kvm_xen_map_grant_refs(refs, nr_refs, prot); +} + +static void xen_dev_unmap_grant_refs(struct XenDevice *xendev, void *ptr, + unsigned int nr_refs, Error **errp) +{ + kvm_xen_unmap_grant_refs(ptr, nr_refs); +} + +static void xen_dev_copy_grant_refs(struct XenDevice *xendev, + bool to_domain, + XenDeviceGrantCopySegment segs[], + unsigned int nr_segs, Error **errp) +{ + int prot = to_domain ? PROT_WRITE : PROT_READ; + unsigned int i; + + for (i = 0; i < nr_segs; i++) { + XenDeviceGrantCopySegment *seg = &segs[i]; + uint32_t ref; + uint16_t offset; + void *page, *virt; + + if (to_domain) { + ref = seg->dest.foreign.ref; + offset = seg->dest.foreign.offset; + virt = seg->source.virt; + } else { + ref = seg->source.foreign.ref; + offset = seg->source.foreign.offset; + virt = seg->dest.virt; + } + + page = gref_to_hva(ref); + if (!page) { + return; + } + + if (to_domain) { + memcpy(page + offset, virt, seg->len); + } else { + memcpy(virt, page + offset, seg->len); + } + + trace_kvm_xen_copy_grant_refs(xendev->name, ref, offset, virt, prot); + } +} + +static struct XenBackendOps xen_dev_ops = { + .set_max_grefs = xen_dev_set_max_grant_refs, + .map_grefs = xen_dev_map_grant_refs, + .unmap_grefs = xen_dev_unmap_grant_refs, + .copy_grefs = xen_dev_copy_grant_refs, +}; + +static void xen_legacy_dev_set_max_grant_refs(struct XenLegacyDevice *xendev, + unsigned int nr_refs) +{ + kvm_xen_set_max_grant_refs(nr_refs); +} + +static void *xen_legacy_dev_map_grant_refs(struct XenLegacyDevice *xendev, + uint32_t *refs, unsigned int nr_refs, + int prot) +{ + return kvm_xen_map_grant_refs(refs, nr_refs, prot); +} + +static void xen_legacy_dev_unmap_grant_refs(struct XenLegacyDevice *xendev, + void *ptr, unsigned int nr_refs) +{ + kvm_xen_unmap_grant_refs(ptr, nr_refs); +} + +static int xen_legacy_dev_copy_grant_refs(struct XenLegacyDevice *xendev, + bool to_domain, + XenGrantCopySegment segs[], + unsigned int nr_segs) +{ + int prot = to_domain ? PROT_WRITE : PROT_READ; + unsigned int i; + + for (i = 0; i < nr_segs; i++) { + XenGrantCopySegment *seg = &segs[i]; + uint32_t ref; + uint16_t offset; + void *page, *virt; + + if (to_domain) { + ref = seg->dest.foreign.ref; + offset = seg->dest.foreign.offset; + virt = seg->source.virt; + } else { + ref = seg->source.foreign.ref; + offset = seg->source.foreign.offset; + virt = seg->dest.virt; + } + + page = gref_to_hva(ref); + if (!page) { + return -EINVAL; + } + + if (to_domain) { + memcpy(page + offset, virt, seg->len); + } else { + memcpy(virt, page + offset, seg->len); + } + + trace_kvm_xen_copy_grant_refs(xendev->name, ref, offset, virt, prot); + } + + return 0; +} + +static struct XenLegacyBackendOps xen_legacy_dev_ops = { + .set_max_grefs = xen_legacy_dev_set_max_grant_refs, + .map_grefs = xen_legacy_dev_map_grant_refs, + .unmap_grefs = xen_legacy_dev_unmap_grant_refs, + .copy_grefs = xen_legacy_dev_copy_grant_refs, +}; + int kvm_xen_set_hypercall_page(CPUState *env) { struct kvm_xen_hvm_config cfg; @@ -178,6 +358,9 @@ void kvm_xen_init(XenState *xen) qemu_add_exit_notifier(&xen->exit); kvm_xen_evtchn_init(xen); + + xen_legacy_gnt_ops = xen_legacy_dev_ops; + xen_gnt_ops = xen_dev_ops; } void kvm_xen_machine_init(void)