From 18d34eabd73a3fbe90efb551259d4def0c12a2d5 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Fri, 12 May 2017 09:46:40 +0100 Subject: [PATCH] xen-netback: use gref mappings for Rx requests First lookup in the frontend gref mapping table to see whether the requested gref is already mapped and has the right permissions. If so, use that instead. Results are 2.04 Mpps measured with pktgen (pkt_size 64, burst 1) with already mapped grants versus half of it with grant copy. Fundamentally it works in the same way as grants, it just avoids asking Xen to copy the page, and hence opening room for other improvements. For example with the mapped grefs it further adds up contention on queue->wq as the kthread_guest_rx goes to sleep more often. We can alternatively copy the skb on xenvif_start_xmit() instead of going through the RX kthread. It would only be beneficial if guest would *only* use the mapped grants (either by copying or recycling mechanisms) otherwise it would significantly add up the added cost of a grant copy hypercall per packet. Signed-off-by: Joao Martins Reviewed-by: Shannon Nelson Acked-by: Konrad Rzeszutek Wilk Orabug: 26107942 --- drivers/net/xen-netback/rx.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c index 5e2497b146a5..f458a23f7c43 100644 --- a/drivers/net/xen-netback/rx.c +++ b/drivers/net/xen-netback/rx.c @@ -135,7 +135,8 @@ static void xenvif_rx_copy_flush(struct xenvif_queue *queue) unsigned int i; int notify; - gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num); + if (queue->rx_copy.num) + gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num); for (i = 0; i < queue->rx_copy.num; i++) { struct gnttab_copy *op; @@ -165,12 +166,18 @@ static void xenvif_rx_copy_flush(struct xenvif_queue *queue) } static void xenvif_rx_copy_add(struct xenvif_queue *queue, + struct xenvif_grant *grant, struct xen_netif_rx_request *req, unsigned int offset, void *data, size_t len) { + struct xen_page_foreign *foreign; struct gnttab_copy *op; struct page *page; - struct xen_page_foreign *foreign; + + if (likely(grant && !(grant->flags & GNTMAP_readonly))) { + memcpy(page_address(grant->page) + offset, data, len); + return; + } if (queue->rx_copy.num == COPY_BATCH_SIZE) xenvif_rx_copy_flush(queue); @@ -329,21 +336,27 @@ static void xenvif_rx_data_slot(struct xenvif_queue *queue, struct xen_netif_rx_request *req, struct xen_netif_rx_response *rsp) { + struct xenvif_grant *grant; unsigned int offset = 0; unsigned int flags; + grant = xenvif_get_grant(queue, req->gref); + do { size_t len; void *data; xenvif_rx_next_chunk(queue, pkt, offset, &data, &len); - xenvif_rx_copy_add(queue, req, offset, data, len); + xenvif_rx_copy_add(queue, grant, req, offset, data, len); offset += len; pkt->remaining_len -= len; } while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0); + if (grant) + xenvif_put_grant(queue, grant); + if (pkt->remaining_len > 0) flags = XEN_NETRXF_more_data; else -- 2.50.1