]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
ixgbe: Let the Rx buffer allocation clear status bits instead of cleanup
authorJoe Jin <joe.jin@oracle.com>
Thu, 17 May 2012 14:55:03 +0000 (22:55 +0800)
committerJoe Jin <joe.jin@oracle.com>
Thu, 17 May 2012 14:55:03 +0000 (22:55 +0800)
This change makes it so that we always clear the status/error bits in the
Rx descriptor in the allocation path instead of the cleanup path.  The
advantage to this is that we spend less time modifying data.  As such we
can modify the data once and then let it go cold in the cache instead of
writing it, reading it, and then writing it again.

(cherry picked from commit f990b79bc80ca7a23b8a6c33241c439072d0b85b)
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Stephen Ko <stephen.s.ko@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: Joe Jin <joe.jin@oracle.com>
drivers/net/ixgbe/ixgbe_main.c

index 86586ab0a62b72cff09d376449fdc8dbc8cc5f11..5a747f77223507bc289895f258c38b9fe2007bff 100644 (file)
@@ -1098,8 +1098,75 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
        writel(val, rx_ring->tail);
 }
 
+static bool ixgbe_alloc_mapped_skb(struct ixgbe_ring *rx_ring,
+                                  struct ixgbe_rx_buffer *bi)
+{
+       struct sk_buff *skb = bi->skb;
+       dma_addr_t dma = bi->dma;
+
+       if (dma)
+               return true;
+
+       if (likely(!skb)) {
+               skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+                                               rx_ring->rx_buf_len);
+               bi->skb = skb;
+               if (!skb) {
+                       rx_ring->rx_stats.alloc_rx_buff_failed++;
+                       return false;
+               }
+
+               /* initialize skb for ring */
+               skb_record_rx_queue(skb, rx_ring->queue_index);
+       }
+
+       dma = dma_map_single(rx_ring->dev, skb->data,
+                            rx_ring->rx_buf_len, DMA_FROM_DEVICE);
+
+       if (dma_mapping_error(rx_ring->dev, dma)) {
+               rx_ring->rx_stats.alloc_rx_buff_failed++;
+               return false;
+       }
+
+       bi->dma = dma;
+       return true;
+}
+
+static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
+                                   struct ixgbe_rx_buffer *bi)
+{
+       struct page *page = bi->page;
+       dma_addr_t page_dma = bi->page_dma;
+       unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
+
+       if (page_dma)
+               return true;
+
+       if (!page) {
+               page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+               bi->page = page;
+               if (unlikely(!page)) {
+                       rx_ring->rx_stats.alloc_rx_page_failed++;
+                       return false;
+               }
+       }
+
+       page_dma = dma_map_page(rx_ring->dev, page,
+                               page_offset, PAGE_SIZE / 2,
+                               DMA_FROM_DEVICE);
+
+       if (dma_mapping_error(rx_ring->dev, page_dma)) {
+               rx_ring->rx_stats.alloc_rx_page_failed++;
+               return false;
+       }
+
+       bi->page_dma = page_dma;
+       bi->page_offset = page_offset;
+       return true;
+}
+
 /**
- * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
+ * ixgbe_alloc_rx_buffers - Replace used receive buffers
  * @rx_ring: ring to place buffers on
  * @cleaned_count: number of buffers to replace
  **/
@@ -1107,82 +1174,49 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
 {
        union ixgbe_adv_rx_desc *rx_desc;
        struct ixgbe_rx_buffer *bi;
-       struct sk_buff *skb;
        u16 i = rx_ring->next_to_use;
 
-       /* do nothing if no valid netdev defined */
-       if (!rx_ring->netdev)
+       /* nothing to do or no valid netdev defined */
+       if (!cleaned_count || !rx_ring->netdev)
                return;
 
-       while (cleaned_count--) {
-               rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
-               bi = &rx_ring->rx_buffer_info[i];
-               skb = bi->skb;
+       rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
+       bi = &rx_ring->rx_buffer_info[i];
+       i -= rx_ring->count;
 
-               if (!skb) {
-                       skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
-                                                       rx_ring->rx_buf_len);
-                       if (!skb) {
-                               rx_ring->rx_stats.alloc_rx_buff_failed++;
-                               goto no_buffers;
-                       }
-                       /* initialize queue mapping */
-                       skb_record_rx_queue(skb, rx_ring->queue_index);
-                       bi->skb = skb;
-               }
+       while (cleaned_count--) {
+               if (!ixgbe_alloc_mapped_skb(rx_ring, bi))
+                       break;
 
-               if (!bi->dma) {
-                       bi->dma = dma_map_single(rx_ring->dev,
-                                                skb->data,
-                                                rx_ring->rx_buf_len,
-                                                DMA_FROM_DEVICE);
-                       if (dma_mapping_error(rx_ring->dev, bi->dma)) {
-                               rx_ring->rx_stats.alloc_rx_buff_failed++;
-                               bi->dma = 0;
-                               goto no_buffers;
-                       }
-               }
+               /* Refresh the desc even if buffer_addrs didn't change
+                * because each write-back erases this info. */
 
                if (ring_is_ps_enabled(rx_ring)) {
-                       if (!bi->page) {
-                               bi->page = netdev_alloc_page(rx_ring->netdev);
-                               if (!bi->page) {
-                                       rx_ring->rx_stats.alloc_rx_page_failed++;
-                                       goto no_buffers;
-                               }
-                       }
+                       rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
 
-                       if (!bi->page_dma) {
-                               /* use a half page if we're re-using */
-                               bi->page_offset ^= PAGE_SIZE / 2;
-                               bi->page_dma = dma_map_page(rx_ring->dev,
-                                                           bi->page,
-                                                           bi->page_offset,
-                                                           PAGE_SIZE / 2,
-                                                           DMA_FROM_DEVICE);
-                               if (dma_mapping_error(rx_ring->dev,
-                                                     bi->page_dma)) {
-                                       rx_ring->rx_stats.alloc_rx_page_failed++;
-                                       bi->page_dma = 0;
-                                       goto no_buffers;
-                               }
-                       }
+                       if (!ixgbe_alloc_mapped_page(rx_ring, bi))
+                               break;
 
-                       /* Refresh the desc even if buffer_addrs didn't change
-                        * because each write-back erases this info. */
                        rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
-                       rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
                } else {
                        rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
-                       rx_desc->read.hdr_addr = 0;
                }
 
+               rx_desc++;
+               bi++;
                i++;
-               if (i == rx_ring->count)
-                       i = 0;
+               if (unlikely(!i)) {
+                       rx_desc = IXGBE_RX_DESC_ADV(rx_ring, 0);
+                       bi = rx_ring->rx_buffer_info;
+                       i -= rx_ring->count;
+               }
+
+               /* clear the hdr_addr for the next_to_use descriptor */
+               rx_desc->read.hdr_addr = 0;
        }
 
-no_buffers:
+       i += rx_ring->count;
+
        if (rx_ring->next_to_use != i) {
                rx_ring->next_to_use = i;
                ixgbe_release_rx_desc(rx_ring, i);
@@ -1590,8 +1624,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 
                budget--;
 next_desc:
-               rx_desc->wb.upper.status_error = 0;
-
                if (!budget)
                        break;