return PKT_HASH_TYPE_L2;
}
-static struct sk_buff *gve_rx_copy(struct gve_rx_ring *rx,
- struct net_device *dev,
+static struct sk_buff *gve_rx_copy(struct net_device *dev,
struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info,
u16 len)
skb->protocol = eth_type_trans(skb, dev);
- u64_stats_update_begin(&rx->statss);
- rx->rx_copied_pkt++;
- u64_stats_update_end(&rx->statss);
-
return skb;
}
*(slot_addr) ^= offset;
}
+static bool gve_rx_can_flip_buffers(struct net_device *netdev)
+{
+ return PAGE_SIZE == 4096
+ ? netdev->mtu + GVE_RX_PAD + ETH_HLEN <= PAGE_SIZE / 2 : false;
+}
+
+static int gve_rx_can_recycle_buffer(struct page *page)
+{
+ int pagecount = page_count(page);
+
+ /* This page is not being used by any SKBs - reuse */
+ if (pagecount == 1)
+ return 1;
+ /* This page is still being used by an SKB - we can't reuse */
+ else if (pagecount >= 2)
+ return 0;
+ WARN(pagecount < 1, "Pagecount should never be < 1");
+ return -1;
+}
+
+static struct sk_buff *
+gve_rx_raw_addressing(struct device *dev, struct net_device *netdev,
+ struct gve_rx_slot_page_info *page_info, u16 len,
+ struct napi_struct *napi,
+ union gve_rx_data_slot *data_slot)
+{
+ struct sk_buff *skb;
+
+ skb = gve_rx_add_frags(napi, page_info, len);
+ if (!skb)
+ return NULL;
+
+ /* Optimistically stop the kernel from freeing the page by increasing
+ * the page bias. We will check the refcount in refill to determine if
+ * we need to alloc a new page.
+ */
+ get_page(page_info->page);
+
+ return skb;
+}
+
+static struct sk_buff *
+gve_rx_qpl(struct device *dev, struct net_device *netdev,
+ struct gve_rx_ring *rx, struct gve_rx_slot_page_info *page_info,
+ u16 len, struct napi_struct *napi,
+ union gve_rx_data_slot *data_slot)
+{
+ struct sk_buff *skb;
+
+ /* if raw_addressing mode is not enabled gvnic can only receive into
+ * registered segments. If the buffer can't be recycled, our only
+ * choice is to copy the data out of it so that we can return it to the
+ * device.
+ */
+ if (page_info->can_flip) {
+ skb = gve_rx_add_frags(napi, page_info, len);
+ /* No point in recycling if we didn't get the skb */
+ if (skb) {
+ /* Make sure that the page isn't freed. */
+ get_page(page_info->page);
+ gve_rx_flip_buff(page_info, &data_slot->qpl_offset);
+ }
+ } else {
+ skb = gve_rx_copy(netdev, napi, page_info, len);
+ if (skb) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_copied_pkt++;
+ u64_stats_update_end(&rx->statss);
+ }
+ }
+ return skb;
+}
+
static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
netdev_features_t feat, u32 idx)
{
union gve_rx_data_slot *data_slot;
struct sk_buff *skb = NULL;
dma_addr_t page_bus;
- int pagecount;
u16 len;
/* drop this packet */
dma_sync_single_for_cpu(&priv->pdev->dev, page_bus,
PAGE_SIZE, DMA_FROM_DEVICE);
- if (PAGE_SIZE == 4096) {
- if (len <= priv->rx_copybreak) {
- /* Just copy small packets */
- skb = gve_rx_copy(rx, dev, napi, page_info, len);
- u64_stats_update_begin(&rx->statss);
- rx->rx_copybreak_pkt++;
- u64_stats_update_end(&rx->statss);
- goto have_skb;
- }
- if (rx->data.raw_addressing) {
- skb = gve_rx_add_frags(napi, page_info, len);
- goto have_skb;
- }
- if (unlikely(!gve_can_recycle_pages(dev))) {
- skb = gve_rx_copy(rx, dev, napi, page_info, len);
- goto have_skb;
- }
- pagecount = page_count(page_info->page);
- if (pagecount == 1) {
- /* No part of this page is used by any SKBs; we attach
- * the page fragment to a new SKB and pass it up the
- * stack.
- */
- skb = gve_rx_add_frags(napi, page_info, len);
- if (!skb) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_skb_alloc_fail++;
- u64_stats_update_end(&rx->statss);
+ if (len <= priv->rx_copybreak) {
+ /* Just copy small packets */
+ skb = gve_rx_copy(dev, napi, page_info, len);
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_copied_pkt++;
+ rx->rx_copybreak_pkt++;
+ u64_stats_update_end(&rx->statss);
+ } else {
+ u8 can_flip = gve_rx_can_flip_buffers(dev);
+ int recycle = 0;
+
+ if (can_flip) {
+ recycle = gve_rx_can_recycle_buffer(page_info->page);
+ if (recycle < 0) {
+ if (!rx->data.raw_addressing)
+ gve_schedule_reset(priv);
return false;
}
- /* Make sure the kernel stack can't release the page */
- get_page(page_info->page);
- /* "flip" to other packet buffer on this page */
- gve_rx_flip_buff(page_info, &rx->data.data_ring[idx].qpl_offset);
- } else if (pagecount >= 2) {
- /* We have previously passed the other half of this
- * page up the stack, but it has not yet been freed.
- */
- skb = gve_rx_copy(rx, dev, napi, page_info, len);
+ }
+
+ page_info->can_flip = can_flip && recycle;
+ if (rx->data.raw_addressing) {
+ skb = gve_rx_raw_addressing(&priv->pdev->dev, dev,
+ page_info, len, napi,
+ data_slot);
} else {
- WARN(pagecount < 1, "Pagecount should never be < 1");
- return false;
+ skb = gve_rx_qpl(&priv->pdev->dev, dev, rx,
+ page_info, len, napi, data_slot);
}
- } else {
- if (rx->data.raw_addressing)
- skb = gve_rx_add_frags(napi, page_info, len);
- else
- skb = gve_rx_copy(rx, dev, napi, page_info, len);
}
-have_skb:
- /* We didn't manage to allocate an skb but we haven't had any
- * reset worthy failures.
- */
if (!skb) {
u64_stats_update_begin(&rx->statss);
rx->rx_skb_alloc_fail++;
while (fill_cnt - rx->cnt < refill_target) {
struct gve_rx_slot_page_info *page_info;
- struct device *dev = &priv->pdev->dev;
- union gve_rx_data_slot *data_slot;
u32 idx = fill_cnt & rx->mask;
page_info = &rx->data.page_info[idx];
- data_slot = &rx->data.data_ring[idx];
- gve_rx_free_buffer(dev, page_info, data_slot);
- page_info->page = NULL;
- if (gve_rx_alloc_buffer(priv, dev, page_info, data_slot)) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_buf_alloc_fail++;
- u64_stats_update_end(&rx->statss);
- break;
+ if (page_info->can_flip) {
+ /* The other half of the page is free because it was
+ * free when we processed the descriptor. Flip to it.
+ */
+ union gve_rx_data_slot *data_slot =
+ &rx->data.data_ring[idx];
+
+ gve_rx_flip_buff(page_info, &data_slot->addr);
+ page_info->can_flip = 0;
+ } else {
+ /* It is possible that the networking stack has already
+ * finished processing all outstanding packets in the buffer
+ * and it can be reused.
+ * Flipping is unnecessary here - if the networking stack still
+ * owns half the page it is impossible to tell which half. Either
+ * the whole page is free or it needs to be replaced.
+ */
+ int recycle = gve_rx_can_recycle_buffer(page_info->page);
+
+ if (recycle < 0) {
+ if (!rx->data.raw_addressing)
+ gve_schedule_reset(priv);
+ return false;
+ }
+ if (!recycle) {
+ /* We can't reuse the buffer - alloc a new one*/
+ union gve_rx_data_slot *data_slot =
+ &rx->data.data_ring[idx];
+ struct device *dev = &priv->pdev->dev;
+
+ gve_rx_free_buffer(dev, page_info, data_slot);
+ page_info->page = NULL;
+ if (gve_rx_alloc_buffer(priv, dev, page_info, data_slot))
+ break;
+ }
}
fill_cnt++;
}