]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
ixgbe: avoid premature Rx buffer reuse
authorBjörn Töpel <bjorn.topel@intel.com>
Tue, 25 Aug 2020 17:27:35 +0000 (19:27 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 30 Dec 2020 10:25:45 +0000 (11:25 +0100)
[ Upstream commit a06316dc87bdc000f7f39a315476957af2ba0f05 ]

The page recycle code, incorrectly, relied on that a page fragment
could not be freed inside xdp_do_redirect(). This assumption leads to
that page fragments that are used by the stack/XDP redirect can be
reused and overwritten.

To avoid this, store the page count prior invoking xdp_do_redirect().

Fixes: 6453073987ba ("ixgbe: add initial support for xdp redirect")
Reported-and-analyzed-by: Li RongQing <lirongqing@baidu.com>
Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
Tested-by: Sandeep Penigalapati <sandeep.penigalapati@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c

index 4243ff4ec4b1d28303182b380a09821a4d4e4a57..faee77fa0804411df6ba104c9f16d62d5ea27d22 100644 (file)
@@ -1943,7 +1943,8 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
        return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
 }
 
-static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
+static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer,
+                                   int rx_buffer_pgcnt)
 {
        unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
        struct page *page = rx_buffer->page;
@@ -1954,7 +1955,7 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
 
 #if (PAGE_SIZE < 8192)
        /* if we are only owner of page we can reuse it */
-       if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
+       if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
                return false;
 #else
        /* The last offset is a bit aggressive in that we assume the
@@ -2019,11 +2020,18 @@ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
 static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
                                                   union ixgbe_adv_rx_desc *rx_desc,
                                                   struct sk_buff **skb,
-                                                  const unsigned int size)
+                                                  const unsigned int size,
+                                                  int *rx_buffer_pgcnt)
 {
        struct ixgbe_rx_buffer *rx_buffer;
 
        rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+       *rx_buffer_pgcnt =
+#if (PAGE_SIZE < 8192)
+               page_count(rx_buffer->page);
+#else
+               0;
+#endif
        prefetchw(rx_buffer->page);
        *skb = rx_buffer->skb;
 
@@ -2053,9 +2061,10 @@ skip_sync:
 
 static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
                                struct ixgbe_rx_buffer *rx_buffer,
-                               struct sk_buff *skb)
+                               struct sk_buff *skb,
+                               int rx_buffer_pgcnt)
 {
-       if (ixgbe_can_reuse_rx_page(rx_buffer)) {
+       if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
                /* hand second half of page back to the ring */
                ixgbe_reuse_rx_page(rx_ring, rx_buffer);
        } else {
@@ -2299,6 +2308,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                union ixgbe_adv_rx_desc *rx_desc;
                struct ixgbe_rx_buffer *rx_buffer;
                struct sk_buff *skb;
+               int rx_buffer_pgcnt;
                unsigned int size;
 
                /* return some buffers to hardware, one at a time is too slow */
@@ -2318,7 +2328,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                 */
                dma_rmb();
 
-               rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
+               rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt);
 
                /* retrieve a buffer from the ring */
                if (!skb) {
@@ -2360,7 +2370,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                        break;
                }
 
-               ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
+               ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
                cleaned_count++;
 
                /* place incomplete frames back on ring for completion */