}
}
-void xen_device_set_max_grant_refs(XenDevice *xendev, unsigned int nr_refs,
+static void xen_set_max_grant_refs(XenDevice *xendev, unsigned int nr_refs,
Error **errp)
{
if (xengnttab_set_max_grants(xendev->xgth, nr_refs)) {
}
}
-void *xen_device_map_grant_refs(XenDevice *xendev, uint32_t *refs,
+static void *xen_map_grant_refs(XenDevice *xendev, uint32_t *refs,
unsigned int nr_refs, int prot,
Error **errp)
{
return map;
}
-void xen_device_unmap_grant_refs(XenDevice *xendev, void *map,
+static void xen_unmap_grant_refs(XenDevice *xendev, void *map,
unsigned int nr_refs, Error **errp)
{
if (xengnttab_unmap(xendev->xgth, map, nr_refs)) {
g_free(refs);
}
-void xen_device_copy_grant_refs(XenDevice *xendev, bool to_domain,
+static void xen_copy_grant_refs(XenDevice *xendev, bool to_domain,
XenDeviceGrantCopySegment segs[],
unsigned int nr_segs, Error **errp)
{
g_free(xengnttab_segs);
}
+struct XenBackendOps xen_gnt_ops = {
+ .set_max_grefs = xen_set_max_grant_refs,
+ .map_grefs = xen_map_grant_refs,
+ .unmap_grefs = xen_unmap_grant_refs,
+ .copy_grefs = xen_copy_grant_refs,
+};
+
+void xen_device_set_max_grant_refs(XenDevice *xendev, unsigned int nr_refs,
+ Error **errp)
+{
+ xen_gnt_ops.set_max_grefs(xendev, nr_refs, errp);
+}
+
+void *xen_device_map_grant_refs(XenDevice *xendev, uint32_t *refs,
+ unsigned int nr_refs, int prot,
+ Error **errp)
+{
+ return xen_gnt_ops.map_grefs(xendev, refs, nr_refs, prot, errp);
+}
+
+void xen_device_unmap_grant_refs(XenDevice *xendev, void *map,
+ unsigned int nr_refs, Error **errp)
+{
+ xen_gnt_ops.unmap_grefs(xendev, map, nr_refs, errp);
+}
+
+void xen_device_copy_grant_refs(XenDevice *xendev, bool to_domain,
+ XenDeviceGrantCopySegment segs[],
+ unsigned int nr_segs, Error **errp)
+{
+ xen_gnt_ops.copy_grefs(xendev, to_domain, segs, nr_segs, errp);
+}
+
struct XenEventChannel {
evtchn_port_t local_port;
XenEventHandler handler;
return 0;
}
-void xen_be_set_max_grant_refs(struct XenLegacyDevice *xendev,
- unsigned int nr_refs)
+static void xen_set_max_grant_refs(struct XenLegacyDevice *xendev,
+ unsigned int nr_refs)
{
assert(xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV);
}
}
-void *xen_be_map_grant_refs(struct XenLegacyDevice *xendev, uint32_t *refs,
- unsigned int nr_refs, int prot)
+static void *xen_map_grant_refs(struct XenLegacyDevice *xendev, uint32_t *refs,
+ unsigned int nr_refs, int prot)
{
void *ptr;
return ptr;
}
-void xen_be_unmap_grant_refs(struct XenLegacyDevice *xendev, void *ptr,
- unsigned int nr_refs)
+static void xen_unmap_grant_refs(struct XenLegacyDevice *xendev, void *ptr,
+ unsigned int nr_refs)
{
assert(xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV);
return 0;
}
-int xen_be_copy_grant_refs(struct XenLegacyDevice *xendev,
- bool to_domain,
- XenGrantCopySegment segs[],
- unsigned int nr_segs)
+static int xen_copy_grant_refs(struct XenLegacyDevice *xendev,
+ bool to_domain,
+ XenGrantCopySegment segs[],
+ unsigned int nr_segs)
{
xengnttab_grant_copy_segment_t *xengnttab_segs;
unsigned int i;
return rc;
}
+struct XenLegacyBackendOps xen_legacy_gnt_ops = {
+ .set_max_grefs = xen_set_max_grant_refs,
+ .map_grefs = xen_map_grant_refs,
+ .unmap_grefs = xen_unmap_grant_refs,
+ .copy_grefs = xen_copy_grant_refs,
+};
+
+void xen_be_set_max_grant_refs(struct XenLegacyDevice *xendev,
+ unsigned int nr_refs)
+{
+ xen_legacy_gnt_ops.set_max_grefs(xendev, nr_refs);
+}
+
+void *xen_be_map_grant_refs(struct XenLegacyDevice *xendev, uint32_t *refs,
+ unsigned int nr_refs, int prot)
+{
+ return xen_legacy_gnt_ops.map_grefs(xendev, refs, nr_refs, prot);
+}
+
+void xen_be_unmap_grant_refs(struct XenLegacyDevice *xendev, void *ptr,
+ unsigned int nr_refs)
+{
+ xen_legacy_gnt_ops.unmap_grefs(xendev, ptr, nr_refs);
+}
+
+int xen_be_copy_grant_refs(struct XenLegacyDevice *xendev,
+ bool to_domain,
+ XenGrantCopySegment segs[],
+ unsigned int nr_segs)
+{
+ return xen_legacy_gnt_ops.copy_grefs(xendev, to_domain, segs, nr_segs);
+}
+
/*
* get xen backend device, allocate a new one if it doesn't exist.
*/
XenEventChannel *channel,
Error **errp);
+struct XenBackendOps {
+ void (*set_max_grefs)(struct XenDevice *xendev, unsigned int nr_refs,
+ Error **errp);
+ void * (*map_grefs)(struct XenDevice *xendev, uint32_t *refs,
+ unsigned int nr_refs, int prot, Error **errp);
+ void (*unmap_grefs)(struct XenDevice *xendev, void *ptr,
+ unsigned int nr_refs, Error **errp);
+ void (*copy_grefs)(struct XenDevice *xendev, bool to_domain,
+ XenDeviceGrantCopySegment segs[], unsigned int nr_segs,
+ Error **errp);
+};
+
+extern struct XenBackendOps xen_gnt_ops;
#endif /* HW_XEN_BUS_H */
return xen_be_unmap_grant_refs(xendev, ptr, 1);
}
+struct XenLegacyBackendOps {
+ void (*set_max_grefs)(struct XenLegacyDevice *xendev, unsigned int nr_refs);
+ void * (*map_grefs)(struct XenLegacyDevice *xendev, uint32_t *refs,
+ unsigned int nr_refs, int prot);
+ void (*unmap_grefs)(struct XenLegacyDevice *xendev, void *ptr,
+ unsigned int nr_refs);
+ int (*copy_grefs)(struct XenLegacyDevice *xendev, bool to_domain,
+ XenGrantCopySegment segs[], unsigned int nr_segs);
+};
+
+extern struct XenLegacyBackendOps xen_legacy_gnt_ops;
+
/* actual backend drivers */
extern struct XenDevOps xen_console_ops; /* xen_console.c */
extern struct XenDevOps xen_kbdmouse_ops; /* xen_framebuffer.c */
kvm_xen_evtchn_send(int cpu, int dest, unsigned int port) "cpu %d notify_cpu %d port %u"
kvm_xen_set_domid(unsigned int domid) "assigned with domid %u"
kvm_xen_add_to_physmap(unsigned int space, unsigned int idx, uint64_t gfn) "add frame space %u idx %u gfn 0x%" PRIx64
+kvm_xen_copy_grant_refs(char *dev, uint32_t ref, uint16_t offset, void *virt, int prot) "gntcopy dev %s gnt %u:%u virt %p prot %d"
+kvm_xen_map_grant_refs(unsigned int ref, int prot, unsigned int nr_refs, uint32_t gpa, void *hva) "gntmap gnt %u prot %d nr_refs %u -> gfn 0x%x hva %p"
+kvm_xen_unmap_grant_refs(void *hva, unsigned int nr_refs) "gntunmap hva %p nr_refs %d"
#define xen_special_pfn(x) \
(X86_HVM_END_SPECIAL_REGION - X86_HVM_NR_SPECIAL_PAGES + (x))
+/* Grant v1 references per 4K page */
+#define GPP_V1 (TARGET_PAGE_SIZE / sizeof(struct grant_entry_v1))
+#define shared_entry(gt, ref) (&((gt)[(ref) / GPP_V1][(ref) % GPP_V1]))
+
/*
* Unhandled hypercalls error:
*
}
}
+static void *gref_to_gnt(uint32_t ref)
+{
+ CPUState *cs = qemu_get_cpu(0);
+ XenState *xen = cs ? cs->xen_state : NULL;
+ XenGrantTable *gnttab = xen ? &xen->gnttab : NULL;
+ struct grant_entry_v1 *gnt = NULL;
+
+ if (!gnttab) {
+ return NULL;
+ }
+
+ gnt = shared_entry(gnttab->frames_v1, ref);
+ return gnt;
+}
+
+static void *gref_to_hva(uint32_t ref)
+{
+ struct grant_entry_v1 *gnt = gref_to_gnt(ref);
+ void *addr = gnt ? gpa_to_hva((hwaddr)gnt->frame << PAGE_SHIFT) : NULL;
+ return addr;
+}
+
+static void kvm_xen_set_max_grant_refs(unsigned int nr_refs)
+{
+}
+
+static void *kvm_xen_map_grant_refs(uint32_t *refs, unsigned int nr_refs,
+ int prot)
+{
+ struct grant_entry_v1 *gnt = NULL;
+ void *addr;
+
+ gnt = gref_to_gnt(*refs);
+ addr = gpa_to_hva((hwaddr)gnt->frame << PAGE_SHIFT);
+
+ trace_kvm_xen_map_grant_refs(*refs, prot, nr_refs, gnt->frame, addr);
+ return addr;
+}
+
+static void kvm_xen_unmap_grant_refs(void *ptr, unsigned int nr_refs)
+{
+ trace_kvm_xen_unmap_grant_refs(ptr, nr_refs);
+}
+
+static void xen_dev_set_max_grant_refs(struct XenDevice *xendev,
+ unsigned int nr_refs, Error **errp)
+{
+ kvm_xen_set_max_grant_refs(nr_refs);
+}
+
+static void *xen_dev_map_grant_refs(struct XenDevice *xendev, uint32_t *refs,
+ unsigned int nr_refs, int prot,
+ Error **errp)
+{
+ return kvm_xen_map_grant_refs(refs, nr_refs, prot);
+}
+
+static void xen_dev_unmap_grant_refs(struct XenDevice *xendev, void *ptr,
+ unsigned int nr_refs, Error **errp)
+{
+ kvm_xen_unmap_grant_refs(ptr, nr_refs);
+}
+
+static void xen_dev_copy_grant_refs(struct XenDevice *xendev,
+ bool to_domain,
+ XenDeviceGrantCopySegment segs[],
+ unsigned int nr_segs, Error **errp)
+{
+ int prot = to_domain ? PROT_WRITE : PROT_READ;
+ unsigned int i;
+
+ for (i = 0; i < nr_segs; i++) {
+ XenDeviceGrantCopySegment *seg = &segs[i];
+ uint32_t ref;
+ uint16_t offset;
+ void *page, *virt;
+
+ if (to_domain) {
+ ref = seg->dest.foreign.ref;
+ offset = seg->dest.foreign.offset;
+ virt = seg->source.virt;
+ } else {
+ ref = seg->source.foreign.ref;
+ offset = seg->source.foreign.offset;
+ virt = seg->dest.virt;
+ }
+
+ page = gref_to_hva(ref);
+ if (!page) {
+ return;
+ }
+
+ if (to_domain) {
+ memcpy(page + offset, virt, seg->len);
+ } else {
+ memcpy(virt, page + offset, seg->len);
+ }
+
+ trace_kvm_xen_copy_grant_refs(xendev->name, ref, offset, virt, prot);
+ }
+}
+
+static struct XenBackendOps xen_dev_ops = {
+ .set_max_grefs = xen_dev_set_max_grant_refs,
+ .map_grefs = xen_dev_map_grant_refs,
+ .unmap_grefs = xen_dev_unmap_grant_refs,
+ .copy_grefs = xen_dev_copy_grant_refs,
+};
+
+static void xen_legacy_dev_set_max_grant_refs(struct XenLegacyDevice *xendev,
+ unsigned int nr_refs)
+{
+ kvm_xen_set_max_grant_refs(nr_refs);
+}
+
+static void *xen_legacy_dev_map_grant_refs(struct XenLegacyDevice *xendev,
+ uint32_t *refs, unsigned int nr_refs,
+ int prot)
+{
+ return kvm_xen_map_grant_refs(refs, nr_refs, prot);
+}
+
+static void xen_legacy_dev_unmap_grant_refs(struct XenLegacyDevice *xendev,
+ void *ptr, unsigned int nr_refs)
+{
+ kvm_xen_unmap_grant_refs(ptr, nr_refs);
+}
+
+static int xen_legacy_dev_copy_grant_refs(struct XenLegacyDevice *xendev,
+ bool to_domain,
+ XenGrantCopySegment segs[],
+ unsigned int nr_segs)
+{
+ int prot = to_domain ? PROT_WRITE : PROT_READ;
+ unsigned int i;
+
+ for (i = 0; i < nr_segs; i++) {
+ XenGrantCopySegment *seg = &segs[i];
+ uint32_t ref;
+ uint16_t offset;
+ void *page, *virt;
+
+ if (to_domain) {
+ ref = seg->dest.foreign.ref;
+ offset = seg->dest.foreign.offset;
+ virt = seg->source.virt;
+ } else {
+ ref = seg->source.foreign.ref;
+ offset = seg->source.foreign.offset;
+ virt = seg->dest.virt;
+ }
+
+ page = gref_to_hva(ref);
+ if (!page) {
+ return -EINVAL;
+ }
+
+ if (to_domain) {
+ memcpy(page + offset, virt, seg->len);
+ } else {
+ memcpy(virt, page + offset, seg->len);
+ }
+
+ trace_kvm_xen_copy_grant_refs(xendev->name, ref, offset, virt, prot);
+ }
+
+ return 0;
+}
+
+static struct XenLegacyBackendOps xen_legacy_dev_ops = {
+ .set_max_grefs = xen_legacy_dev_set_max_grant_refs,
+ .map_grefs = xen_legacy_dev_map_grant_refs,
+ .unmap_grefs = xen_legacy_dev_unmap_grant_refs,
+ .copy_grefs = xen_legacy_dev_copy_grant_refs,
+};
+
int kvm_xen_set_hypercall_page(CPUState *env)
{
struct kvm_xen_hvm_config cfg;
qemu_add_exit_notifier(&xen->exit);
kvm_xen_evtchn_init(xen);
+
+ xen_legacy_gnt_ops = xen_legacy_dev_ops;
+ xen_gnt_ops = xen_dev_ops;
}
void kvm_xen_machine_init(void)