]> www.infradead.org Git - users/hch/dma-mapping.git/commitdiff
octeon_ep: Add SKB allocation failures handling in __octep_oq_process_rx()
authorAleksandr Mishin <amishin@t-argos.ru>
Thu, 17 Oct 2024 10:06:51 +0000 (13:06 +0300)
committerAndrew Lunn <andrew@lunn.ch>
Sat, 19 Oct 2024 21:20:07 +0000 (16:20 -0500)
build_skb() returns NULL in case of a memory allocation failure so handle
it inside __octep_oq_process_rx() to avoid NULL pointer dereference.

__octep_oq_process_rx() is called during NAPI polling by the driver. If
skb allocation fails, keep on pulling packets out of the Rx DMA queue: we
shouldn't break the polling immediately and thus falsely indicate to the
octep_napi_poll() that the Rx pressure is going down. As there is no
associated skb in this case, don't process the packets and don't push them
up the network stack - they are skipped.

Helper function is implemented to unmmap/flush all the fragment buffers
used by the dropped packet. 'alloc_failures' counter is incremented to
mark the skb allocation error in driver statistics.

Found by Linux Verification Center (linuxtesting.org) with SVACE.

Fixes: 37d79d059606 ("octeon_ep: add Tx/Rx processing and interrupt support")
Suggested-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Aleksandr Mishin <amishin@t-argos.ru>
Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: Andrew Lunn <andrew@lunn.ch>
drivers/net/ethernet/marvell/octeon_ep/octep_rx.c

index a889c1510518f33e761c0cbc14caa3e6481bc396..8af75cb37c3ee8e6a0e0d85fde4f9a9dc14442fd 100644 (file)
@@ -360,6 +360,27 @@ static void octep_oq_next_pkt(struct octep_oq *oq,
                *read_idx = 0;
 }
 
+/**
+ * octep_oq_drop_rx() - Free the resources associated with a packet.
+ *
+ * @oq: Octeon Rx queue data structure.
+ * @buff_info: Current packet buffer info.
+ * @read_idx: Current packet index in the ring.
+ * @desc_used: Current packet descriptor number.
+ *
+ */
+static void octep_oq_drop_rx(struct octep_oq *oq,
+                            struct octep_rx_buffer *buff_info,
+                            u32 *read_idx, u32 *desc_used)
+{
+       int data_len = buff_info->len - oq->max_single_buffer_size;
+
+       while (data_len > 0) {
+               octep_oq_next_pkt(oq, buff_info, read_idx, desc_used);
+               data_len -= oq->buffer_size;
+       };
+}
+
 /**
  * __octep_oq_process_rx() - Process hardware Rx queue and push to stack.
  *
@@ -419,6 +440,12 @@ static int __octep_oq_process_rx(struct octep_device *oct,
                octep_oq_next_pkt(oq, buff_info, &read_idx, &desc_used);
 
                skb = build_skb((void *)resp_hw, PAGE_SIZE);
+               if (!skb) {
+                       octep_oq_drop_rx(oq, buff_info,
+                                        &read_idx, &desc_used);
+                       oq->stats.alloc_failures++;
+                       continue;
+               }
                skb_reserve(skb, data_offset);
 
                rx_bytes += buff_info->len;