struct sk_buff *skb;
                entry = vp->dirty_rx % RX_RING_SIZE;
                if (vp->rx_skbuff[entry] == NULL) {
-                       skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
+                       skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
                        if (skb == NULL) {
                                static unsigned long last_jif;
                                if (time_after(jiffies, last_jif + 10 * HZ)) {
                                break;                  /* Bad news!  */
                        }
 
-                       skb_reserve(skb, NET_IP_ALIGN);
                        vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
                        vp->rx_skbuff[entry] = skb;
                }
 
                        pr_debug("%s: rx slot %d status 0x%x len %d\n",
                               dev->name, rx_tail, status, len);
 
-               new_skb = netdev_alloc_skb(dev, buflen + NET_IP_ALIGN);
+               new_skb = netdev_alloc_skb_ip_align(dev, buflen);
                if (!new_skb) {
                        dev->stats.rx_dropped++;
                        goto rx_next;
                }
 
-               skb_reserve(new_skb, NET_IP_ALIGN);
-
                dma_unmap_single(&cp->pdev->dev, mapping,
                                 buflen, PCI_DMA_FROMDEVICE);
 
                struct sk_buff *skb;
                dma_addr_t mapping;
 
-               skb = netdev_alloc_skb(dev, cp->rx_buf_sz + NET_IP_ALIGN);
+               skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
                if (!skb)
                        goto err_out;
 
-               skb_reserve(skb, NET_IP_ALIGN);
-
                mapping = dma_map_single(&cp->pdev->dev, skb->data,
                                         cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
                cp->rx_skb[i] = skb;
 
                /* Malloc up new buffer, compatible with net-2e. */
                /* Omit the four octet CRC from the length. */
 
-               skb = netdev_alloc_skb(dev, pkt_size + NET_IP_ALIGN);
+               skb = netdev_alloc_skb_ip_align(dev, pkt_size);
                if (likely(skb)) {
-                       skb_reserve (skb, NET_IP_ALIGN);        /* 16 byte align the IP fields. */
 #if RX_BUF_IDX == 3
                        wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
 #else
 
 
                        packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
                                        RRS_PKT_SIZE_MASK) - 4; /* CRC */
-                       skb = netdev_alloc_skb(netdev,
-                                              packet_size + NET_IP_ALIGN);
+                       skb = netdev_alloc_skb_ip_align(netdev, packet_size);
                        if (skb == NULL) {
                                dev_warn(&pdev->dev, "%s: Memory squeeze,"
                                        "deferring packet.\n", netdev->name);
                                goto skip_pkt;
                        }
-                       skb_reserve(skb, NET_IP_ALIGN);
                        skb->dev = netdev;
                        memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
                        skb_put(skb, packet_size);
 
 
                rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
 
-               skb = netdev_alloc_skb(adapter->netdev,
-                                      adapter->rx_buffer_len + NET_IP_ALIGN);
+               skb = netdev_alloc_skb_ip_align(adapter->netdev,
+                                               adapter->rx_buffer_len);
                if (unlikely(!skb)) {
                        /* Better luck next round */
                        adapter->netdev->stats.rx_dropped++;
                        break;
                }
 
-               /*
-                * Make buffer alignment 2 beyond a 16 byte boundary
-                * this will result in a 16 byte aligned IP header after
-                * the 14 byte MAC header is removed
-                */
-               skb_reserve(skb, NET_IP_ALIGN);
-
                buffer_info->alloced = 1;
                buffer_info->skb = skb;
                buffer_info->length = (u16) adapter->rx_buffer_len;
 
                if (rxd->status.ok && rxd->status.pkt_size >= 60) {
                        int rx_size = (int)(rxd->status.pkt_size - 4);
                        /* alloc new buffer */
-                       skb = netdev_alloc_skb(netdev, rx_size + NET_IP_ALIGN);
+                       skb = netdev_alloc_skb_ip_align(netdev, rx_size);
                        if (NULL == skb) {
                                printk(KERN_WARNING
                                        "%s: Mem squeeze, deferring packet.\n",
                                netdev->stats.rx_dropped++;
                                break;
                        }
-                       skb_reserve(skb, NET_IP_ALIGN);
                        skb->dev = netdev;
                        memcpy(skb->data, rxd->packet, rx_size);
                        skb_put(skb, rx_size);
 
                if (len < copybreak) {
                        struct sk_buff *nskb;
 
-                       nskb = netdev_alloc_skb(dev, len + NET_IP_ALIGN);
+                       nskb = netdev_alloc_skb_ip_align(dev, len);
                        if (!nskb) {
                                /* forget packet, just rearm desc */
                                priv->stats.rx_dropped++;
                                continue;
                        }
 
-                       /* since we're copying the data, we can align
-                        * them properly */
-                       skb_reserve(nskb, NET_IP_ALIGN);
                        dma_sync_single_for_cpu(kdev, desc->address,
                                                len, DMA_FROM_DEVICE);
                        memcpy(nskb->data, skb->data, len);
 
        if ((adapter->cap == 0x400) && !vtm)
                vlanf = 0;
 
-       skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
+       skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
        if (!skb) {
                if (net_ratelimit())
                        dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
                return;
        }
 
-       skb_reserve(skb, NET_IP_ALIGN);
-
        skb_fill_rx_data(adapter, skb, rxcp);
 
        if (do_pkt_csum(rxcp, adapter->rx_csum))
 
                return NULL;
        }
 
-       skb = netdev_alloc_skb(priv->dev, CPMAC_SKB_SIZE);
+       skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE);
        if (likely(skb)) {
-               skb_reserve(skb, 2);
                skb_put(desc->skb, desc->datalen);
                desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
                desc->skb->ip_summed = CHECKSUM_NONE;
 
        priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
        for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) {
-               skb = netdev_alloc_skb(dev, CPMAC_SKB_SIZE);
+               skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE);
                if (unlikely(!skb)) {
                        res = -ENOMEM;
                        goto fail_desc;
                }
-               skb_reserve(skb, 2);
                desc->skb = skb;
                desc->data_mapping = dma_map_single(&dev->dev, skb->data,
                                                    CPMAC_SKB_SIZE,
 
                        entry = np->old_rx % RX_RING_SIZE;
                        /* Dropped packets don't need to re-allocate */
                        if (np->rx_skbuff[entry] == NULL) {
-                               skb = netdev_alloc_skb (dev, np->rx_buf_sz);
+                               skb = netdev_alloc_skb_ip_align(dev,
+                                                               np->rx_buf_sz);
                                if (skb == NULL) {
                                        np->rx_ring[entry].fraginfo = 0;
                                        printk (KERN_INFO
                                        break;
                                }
                                np->rx_skbuff[entry] = skb;
-                               /* 16 byte align the IP header */
-                               skb_reserve (skb, 2);
                                np->rx_ring[entry].fraginfo =
                                    cpu_to_le64 (pci_map_single
                                         (np->pdev, skb->data, np->rx_buf_sz,
        /* Allocate the rx buffers */
        for (i = 0; i < RX_RING_SIZE; i++) {
                /* Allocated fixed size of skbuff */
-               struct sk_buff *skb = netdev_alloc_skb (dev, np->rx_buf_sz);
+               struct sk_buff *skb;
+
+               skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
                np->rx_skbuff[i] = skb;
                if (skb == NULL) {
                        printk (KERN_ERR
                                dev->name);
                        break;
                }
-               skb_reserve (skb, 2);   /* 16 byte align the IP header. */
                /* Rubicon now supports 40 bits of addressing space. */
                np->rx_ring[i].fraginfo =
                    cpu_to_le64 ( pci_map_single (
                                                  PCI_DMA_FROMDEVICE);
                                skb_put (skb = np->rx_skbuff[entry], pkt_len);
                                np->rx_skbuff[entry] = NULL;
-                       } else if ((skb = netdev_alloc_skb(dev, pkt_len + 2))) {
+                       } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
                                pci_dma_sync_single_for_cpu(np->pdev,
                                                            desc_to_dma(desc),
                                                            np->rx_buf_sz,
                                                            PCI_DMA_FROMDEVICE);
-                               /* 16 byte align the IP header */
-                               skb_reserve (skb, 2);
                                skb_copy_to_linear_data (skb,
                                                  np->rx_skbuff[entry]->data,
                                                  pkt_len);
                struct sk_buff *skb;
                /* Dropped packets don't need to re-allocate */
                if (np->rx_skbuff[entry] == NULL) {
-                       skb = netdev_alloc_skb(dev, np->rx_buf_sz);
+                       skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
                        if (skb == NULL) {
                                np->rx_ring[entry].fraginfo = 0;
                                printk (KERN_INFO
                                break;
                        }
                        np->rx_skbuff[entry] = skb;
-                       /* 16 byte align the IP header */
-                       skb_reserve (skb, 2);
                        np->rx_ring[entry].fraginfo =
                            cpu_to_le64 (pci_map_single
                                         (np->pdev, skb->data, np->rx_buf_sz,
 
 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
 {
-       if (!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
+       if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
                return -ENOMEM;
 
-       /* Align, init, and map the RFD. */
-       skb_reserve(rx->skb, NET_IP_ALIGN);
+       /* Init, and map the RFD. */
        skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
        rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
                RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
 
                 * of reassembly being done in the stack */
                if (length < copybreak) {
                        struct sk_buff *new_skb =
-                           netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
+                           netdev_alloc_skb_ip_align(netdev, length);
                        if (new_skb) {
-                               skb_reserve(new_skb, NET_IP_ALIGN);
                                skb_copy_to_linear_data_offset(new_skb,
                                                               -NET_IP_ALIGN,
                                                               (skb->data -
        struct e1000_buffer *buffer_info;
        struct sk_buff *skb;
        unsigned int i;
-       unsigned int bufsz = 256 -
-                            16 /*for skb_reserve */ -
-                            NET_IP_ALIGN;
+       unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
 
        i = rx_ring->next_to_use;
        buffer_info = &rx_ring->buffer_info[i];
                        goto check_page;
                }
 
-               skb = netdev_alloc_skb(netdev, bufsz);
+               skb = netdev_alloc_skb_ip_align(netdev, bufsz);
                if (unlikely(!skb)) {
                        /* Better luck next round */
                        adapter->alloc_rx_buff_failed++;
                        DPRINTK(PROBE, ERR, "skb align check failed: %u bytes "
                                             "at %p\n", bufsz, skb->data);
                        /* Try again, without freeing the previous */
-                       skb = netdev_alloc_skb(netdev, bufsz);
+                       skb = netdev_alloc_skb_ip_align(netdev, bufsz);
                        /* Failed allocation, critical failure */
                        if (!skb) {
                                dev_kfree_skb(oldskb);
                        /* Use new allocation */
                        dev_kfree_skb(oldskb);
                }
-               /* Make buffer alignment 2 beyond a 16 byte boundary
-                * this will result in a 16 byte aligned IP header after
-                * the 14 byte MAC header is removed
-                */
-               skb_reserve(skb, NET_IP_ALIGN);
-
                buffer_info->skb = skb;
                buffer_info->length = adapter->rx_buffer_len;
 check_page:
        struct e1000_buffer *buffer_info;
        struct sk_buff *skb;
        unsigned int i;
-       unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
+       unsigned int bufsz = adapter->rx_buffer_len;
 
        i = rx_ring->next_to_use;
        buffer_info = &rx_ring->buffer_info[i];
                        goto map_skb;
                }
 
-               skb = netdev_alloc_skb(netdev, bufsz);
+               skb = netdev_alloc_skb_ip_align(netdev, bufsz);
                if (unlikely(!skb)) {
                        /* Better luck next round */
                        adapter->alloc_rx_buff_failed++;
                        DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
                                             "at %p\n", bufsz, skb->data);
                        /* Try again, without freeing the previous */
-                       skb = netdev_alloc_skb(netdev, bufsz);
+                       skb = netdev_alloc_skb_ip_align(netdev, bufsz);
                        /* Failed allocation, critical failure */
                        if (!skb) {
                                dev_kfree_skb(oldskb);
                        /* Use new allocation */
                        dev_kfree_skb(oldskb);
                }
-               /* Make buffer alignment 2 beyond a 16 byte boundary
-                * this will result in a 16 byte aligned IP header after
-                * the 14 byte MAC header is removed
-                */
-               skb_reserve(skb, NET_IP_ALIGN);
-
                buffer_info->skb = skb;
                buffer_info->length = adapter->rx_buffer_len;
 map_skb:
 
        struct e1000_buffer *buffer_info;
        struct sk_buff *skb;
        unsigned int i;
-       unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
+       unsigned int bufsz = adapter->rx_buffer_len;
 
        i = rx_ring->next_to_use;
        buffer_info = &rx_ring->buffer_info[i];
                        goto map_skb;
                }
 
-               skb = netdev_alloc_skb(netdev, bufsz);
+               skb = netdev_alloc_skb_ip_align(netdev, bufsz);
                if (!skb) {
                        /* Better luck next round */
                        adapter->alloc_rx_buff_failed++;
                        break;
                }
 
-               /*
-                * Make buffer alignment 2 beyond a 16 byte boundary
-                * this will result in a 16 byte aligned IP header after
-                * the 14 byte MAC header is removed
-                */
-               skb_reserve(skb, NET_IP_ALIGN);
-
                buffer_info->skb = skb;
 map_skb:
                buffer_info->dma = pci_map_single(pdev, skb->data,
                             cpu_to_le64(ps_page->dma);
                }
 
-               skb = netdev_alloc_skb(netdev,
-                                      adapter->rx_ps_bsize0 + NET_IP_ALIGN);
+               skb = netdev_alloc_skb_ip_align(netdev,
+                                               adapter->rx_ps_bsize0);
 
                if (!skb) {
                        adapter->alloc_rx_buff_failed++;
                        break;
                }
 
-               /*
-                * Make buffer alignment 2 beyond a 16 byte boundary
-                * this will result in a 16 byte aligned IP header after
-                * the 14 byte MAC header is removed
-                */
-               skb_reserve(skb, NET_IP_ALIGN);
-
                buffer_info->skb = skb;
                buffer_info->dma = pci_map_single(pdev, skb->data,
                                                  adapter->rx_ps_bsize0,
        struct e1000_buffer *buffer_info;
        struct sk_buff *skb;
        unsigned int i;
-       unsigned int bufsz = 256 -
-                            16 /* for skb_reserve */ -
-                            NET_IP_ALIGN;
+       unsigned int bufsz = 256 - 16 /* for skb_reserve */;
 
        i = rx_ring->next_to_use;
        buffer_info = &rx_ring->buffer_info[i];
                        goto check_page;
                }
 
-               skb = netdev_alloc_skb(netdev, bufsz);
+               skb = netdev_alloc_skb_ip_align(netdev, bufsz);
                if (unlikely(!skb)) {
                        /* Better luck next round */
                        adapter->alloc_rx_buff_failed++;
                        break;
                }
 
-               /* Make buffer alignment 2 beyond a 16 byte boundary
-                * this will result in a 16 byte aligned IP header after
-                * the 14 byte MAC header is removed
-                */
-               skb_reserve(skb, NET_IP_ALIGN);
-
                buffer_info->skb = skb;
 check_page:
                /* allocate a new page if necessary */
                 */
                if (length < copybreak) {
                        struct sk_buff *new_skb =
-                           netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
+                           netdev_alloc_skb_ip_align(netdev, length);
                        if (new_skb) {
-                               skb_reserve(new_skb, NET_IP_ALIGN);
                                skb_copy_to_linear_data_offset(new_skb,
                                                               -NET_IP_ALIGN,
                                                               (skb->data -
 
        max_index_mask = q_skba->len - 1;
        for (i = 0; i < fill_wqes; i++) {
                u64 tmp_addr;
-               struct sk_buff *skb = netdev_alloc_skb(dev, packet_size);
+               struct sk_buff *skb;
+
+               skb = netdev_alloc_skb_ip_align(dev, packet_size);
                if (!skb) {
                        q_skba->os_skbs = fill_wqes - i;
                        if (q_skba->os_skbs == q_skba->len - 2) {
                        }
                        break;
                }
-               skb_reserve(skb, NET_IP_ALIGN);
 
                skb_arr[index] = skb;
                tmp_addr = ehea_map_vaddr(skb->data);
 {
        return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
                                  nr_of_wqes, EHEA_RWQE2_TYPE,
-                                 EHEA_RQ2_PKT_SIZE + NET_IP_ALIGN);
+                                 EHEA_RQ2_PKT_SIZE);
 }
 
 
 {
        return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
                                  nr_of_wqes, EHEA_RWQE3_TYPE,
-                                 EHEA_MAX_PACKET_SIZE + NET_IP_ALIGN);
+                                 EHEA_MAX_PACKET_SIZE);
 }
 
 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
 
        dev_kfree_skb_any(buf->os_buf);
 }
 
-static inline struct sk_buff *enic_rq_alloc_skb(struct net_device *netdev,
-       unsigned int size)
-{
-       struct sk_buff *skb;
-
-       skb = netdev_alloc_skb(netdev, size + NET_IP_ALIGN);
-
-       if (skb)
-               skb_reserve(skb, NET_IP_ALIGN);
-
-       return skb;
-}
-
 static int enic_rq_alloc_buf(struct vnic_rq *rq)
 {
        struct enic *enic = vnic_dev_priv(rq->vdev);
        unsigned int os_buf_index = 0;
        dma_addr_t dma_addr;
 
-       skb = enic_rq_alloc_skb(netdev, len);
+       skb = netdev_alloc_skb_ip_align(netdev, len);
        if (!skb)
                return -ENOMEM;
 
 
 
                if (ethoc_update_rx_stats(priv, &bd) == 0) {
                        int size = bd.stat >> 16;
-                       struct sk_buff *skb = netdev_alloc_skb(dev, size);
+                       struct sk_buff *skb;
 
                        size -= 4; /* strip the CRC */
-                       skb_reserve(skb, 2); /* align TCP/IP header */
+                       skb = netdev_alloc_skb_ip_align(dev, size);
 
                        if (likely(skb)) {
                                void *src = phys_to_virt(bd.addr);
 
 /* A few values that may be tweaked. */
 /* Size of each temporary Rx buffer, calculated as:
  * 1518 bytes (ethernet packet) + 2 bytes (to get 8 byte alignment for
- * the card) + 8 bytes of status info + 8 bytes for the Rx Checksum +
- * 2 more because we use skb_reserve.
+ * the card) + 8 bytes of status info + 8 bytes for the Rx Checksum
  */
-#define PKT_BUF_SZ             1538
+#define PKT_BUF_SZ             1536
 
 /* For now, this is going to be set to the maximum size of an ethernet
  * packet.  Eventually, we may want to make it a variable that is
        }
        /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
        for (i = 0; i < RX_RING_SIZE; i++) {
-               struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz);
+               struct sk_buff *skb;
+
+               skb = netdev_alloc_skb_ip_align(dev, hmp->rx_buf_sz);
                hmp->rx_skbuff[i] = skb;
                if (skb == NULL)
                        break;
 
-               skb_reserve(skb, 2); /* 16 byte align the IP header. */
                 hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
                        skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
                hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
         * card.  -KDU
         */
        hmp->rx_buf_sz = (dev->mtu <= 1492 ? PKT_BUF_SZ :
-               (((dev->mtu+26+7) & ~7) + 2 + 16));
+               (((dev->mtu+26+7) & ~7) + 16));
 
        /* Initialize all Rx descriptors. */
        for (i = 0; i < RX_RING_SIZE; i++) {
 
                }
 
                if (!buffer_info->skb) {
-                       skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN);
+                       skb = netdev_alloc_skb_ip_align(netdev, bufsz);
                        if (!skb) {
                                adapter->alloc_rx_buff_failed++;
                                goto no_buffers;
                        }
 
-                       /* Make buffer alignment 2 beyond a 16 byte boundary
-                        * this will result in a 16 byte aligned IP header after
-                        * the 14 byte MAC header is removed
-                        */
-                       skb_reserve(skb, NET_IP_ALIGN);
-
                        buffer_info->skb = skb;
                        buffer_info->dma = pci_map_single(pdev, skb->data,
                                                          bufsz,
 
                }
 
                if (!buffer_info->skb) {
-                       skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN);
+                       skb = netdev_alloc_skb_ip_align(netdev, bufsz);
                        if (!skb) {
                                adapter->alloc_rx_buff_failed++;
                                goto no_buffers;
                        }
 
-                       /* Make buffer alignment 2 beyond a 16 byte boundary
-                        * this will result in a 16 byte aligned IP header after
-                        * the 14 byte MAC header is removed
-                        */
-                       skb_reserve(skb, NET_IP_ALIGN);
-
                        buffer_info->skb = skb;
                        buffer_info->dma = pci_map_single(pdev, skb->data,
                                                          bufsz,
 
 
        IPG_DEBUG_MSG("_get_rxbuff\n");
 
-       skb = netdev_alloc_skb(dev, sp->rxsupport_size + NET_IP_ALIGN);
+       skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size);
        if (!skb) {
                sp->rx_buff[entry] = NULL;
                return -ENOMEM;
        }
 
-       /* Adjust the data start location within the buffer to
-        * align IP address field to a 16 byte boundary.
-        */
-       skb_reserve(skb, NET_IP_ALIGN);
-
        /* Associate the receive buffer with the IPG NIC. */
        skb->dev = dev;
 
 
                 * of reassembly being done in the stack */
                if (length < copybreak) {
                        struct sk_buff *new_skb =
-                           netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
+                           netdev_alloc_skb_ip_align(netdev, length);
                        if (new_skb) {
-                               skb_reserve(new_skb, NET_IP_ALIGN);
                                skb_copy_to_linear_data_offset(new_skb,
                                                               -NET_IP_ALIGN,
                                                               (skb->data -
                        goto map_skb;
                }
 
-               skb = netdev_alloc_skb(netdev, adapter->rx_buffer_len
-                                      + NET_IP_ALIGN);
+               skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len);
                if (unlikely(!skb)) {
                        /* Better luck next round */
                        adapter->alloc_rx_buff_failed++;
                        break;
                }
 
-               /* Make buffer alignment 2 beyond a 16 byte boundary
-                * this will result in a 16 byte aligned IP header after
-                * the 14 byte MAC header is removed
-                */
-               skb_reserve(skb, NET_IP_ALIGN);
-
                buffer_info->skb = skb;
                buffer_info->length = adapter->rx_buffer_len;
 map_skb:
 
 
                if (!bi->skb) {
                        struct sk_buff *skb;
-                       skb = netdev_alloc_skb(adapter->netdev,
-                                              (rx_ring->rx_buf_len +
-                                               NET_IP_ALIGN));
+                       skb = netdev_alloc_skb_ip_align(adapter->netdev,
+                                                       rx_ring->rx_buf_len);
 
                        if (!skb) {
                                adapter->alloc_rx_buff_failed++;
                                goto no_buffers;
                        }
 
-                       /*
-                        * Make buffer alignment 2 beyond a 16 byte boundary
-                        * this will result in a 16 byte aligned IP header after
-                        * the 14 byte MAC header is removed
-                        */
-                       skb_reserve(skb, NET_IP_ALIGN);
-
                        bi->skb = skb;
                        bi->dma = pci_map_single(pdev, skb->data,
                                                 rx_ring->rx_buf_len,
 
                if (unlikely(!netif_running(nds[desc->channel])))
                        goto err;
 
-               skb = netdev_alloc_skb(dev, desc->pkt_length + 2);
+               skb = netdev_alloc_skb_ip_align(dev, desc->pkt_length);
                if (likely(skb != NULL)) {
-                       skb_reserve(skb, 2);
                        skb_copy_to_linear_data(skb, buf, desc->pkt_length);
                        skb_put(skb, desc->pkt_length);
                        skb->protocol = eth_type_trans(skb, nds[desc->channel]);
 
                        dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
 
                        /* Malloc up new buffer. */
-                       skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2);
+                       skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
 
                        if (!skb_new)
                                break;
                        if (devcs & ETH_RX_MP)
                                dev->stats.multicast++;
 
-                       /* 16 bit align */
-                       skb_reserve(skb_new, 2);
-
                        lp->rx_skb[lp->rx_next_done] = skb_new;
                }
 
 
 
        /* check the status */
        if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
-               struct sk_buff *skb = netdev_alloc_skb(netdev, len + 2);
+               struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len);
 
                dev_dbg(&adapter->pdev->dev, "%s, got package, len: %d\n",
                        __func__, len);
                        if (status & RXSR_MULTICAST)
                                netdev->stats.multicast++;
 
-                       /* Align socket buffer in 4-byte boundary for
-                                better performance. */
-                       skb_reserve(skb, 2);
                        data = (u32 *)skb_put(skb, len);
 
                        ks8842_select_bank(adapter, 17);
 
 
        for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
                dma_addr_t dma_addr;
-               struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4);
+               struct sk_buff *skb;
 
+               skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
                if (skb == NULL)
                        return -1;
-               skb_reserve(skb, 2);
                dma_addr = dma_map_single(dev->dev.parent, skb->data,
                                          PKT_BUF_SZ, DMA_FROM_DEVICE);
                rbd->v_next = rbd+1;
                                                 (dma_addr_t)SWAP32(rbd->b_data),
                                                 PKT_BUF_SZ, DMA_FROM_DEVICE);
                                /* Get fresh skbuff to replace filled one. */
-                               newskb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4);
+                               newskb = netdev_alloc_skb_ip_align(dev,
+                                                                  PKT_BUF_SZ);
                                if (newskb == NULL) {
                                        skb = NULL;     /* drop pkt */
                                        goto memory_squeeze;
                                }
-                               skb_reserve(newskb, 2);
 
                                /* Pass up the skb already on the Rx ring. */
                                skb_put(skb, pkt_len);
                                rbd->b_data = SWAP32(dma_addr);
                                DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
                        } else
-                               skb = netdev_alloc_skb(dev, pkt_len + 2);
+                               skb = netdev_alloc_skb_ip_align(dev, pkt_len);
 memory_squeeze:
                        if (skb == NULL) {
                                /* XXX tulip.c can defer packets here!! */
                                        dma_sync_single_for_cpu(dev->dev.parent,
                                                                (dma_addr_t)SWAP32(rbd->b_data),
                                                                PKT_BUF_SZ, DMA_FROM_DEVICE);
-                                       skb_reserve(skb, 2);
                                        memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len);
                                        dma_sync_single_for_device(dev->dev.parent,
                                                                   (dma_addr_t)SWAP32(rbd->b_data),
 
        if (pkt_size >= rx_copybreak)
                goto out;
 
-       skb = netdev_alloc_skb(tp->dev, pkt_size + NET_IP_ALIGN);
+       skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
        if (!skb)
                goto out;
 
        pci_dma_sync_single_for_cpu(tp->pci_dev, addr, pkt_size,
                                    PCI_DMA_FROMDEVICE);
-       skb_reserve(skb, NET_IP_ALIGN);
        skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size);
        *sk_buff = skb;
        done = true;
 
 
                rx_len -= rx_size_align + 4;
 
-               skb = netdev_alloc_skb(dev, pkt_size + NET_IP_ALIGN);
+               skb = netdev_alloc_skb_ip_align(dev, pkt_size);
                if (unlikely(!skb)) {
                        if (printk_ratelimit())
                                printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n",
                        goto next;
                }
 
-               skb_reserve(skb, NET_IP_ALIGN);
-
                if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) {
                        memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset),
                                rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset);
 
                                        }
                                        skb_reserve(newskb, 2);
                                } else {
-                                       skb = netdev_alloc_skb(dev, len + 2);
-                                       if (skb) {
-                                               skb_reserve(skb, 2);
+                                       skb = netdev_alloc_skb_ip_align(dev, len);
+                                       if (skb)
                                                skb_copy_to_linear_data(skb, rd->skb->data, len);
-                                       }
+
                                        newskb = rd->skb;
                                }
 memory_squeeze:
 
        if (pkt_size >= rx_copybreak)
                goto out;
 
-       skb = netdev_alloc_skb(tp->dev, pkt_size + 2);
+       skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
        if (!skb)
                goto out;
 
        pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
                                PCI_DMA_FROMDEVICE);
-       skb_reserve(skb, 2);
        skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
        *sk_buff = skb;
        done = true;
 
                goto error;
 
        if (len < RX_COPY_THRESHOLD) {
-               skb = netdev_alloc_skb(dev, len + 2);
+               skb = netdev_alloc_skb_ip_align(dev, len);
                if (!skb)
                        goto resubmit;
 
-               skb_reserve(skb, 2);
                pci_dma_sync_single_for_cpu(skge->hw->pdev,
                                            pci_unmap_addr(e, mapaddr),
                                            len, PCI_DMA_FROMDEVICE);
                skge_rx_reuse(e, skge->rx_buf_size);
        } else {
                struct sk_buff *nskb;
-               nskb = netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN);
+
+               nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size);
                if (!nskb)
                        goto resubmit;
 
-               skb_reserve(nskb, NET_IP_ALIGN);
                pci_unmap_single(skge->hw->pdev,
                                 pci_unmap_addr(e, mapaddr),
                                 pci_unmap_len(e, maplen),
 
 {
        struct sk_buff *skb;
 
-       skb = netdev_alloc_skb(sky2->netdev, length + 2);
+       skb = netdev_alloc_skb_ip_align(sky2->netdev, length);
        if (likely(skb)) {
-               skb_reserve(skb, 2);
                pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr,
                                            length, PCI_DMA_FROMDEVICE);
                skb_copy_from_linear_data(re->skb, skb->data, length);
 
                if (tmpCStat & TLAN_CSTAT_EOC)
                        eoc = 1;
 
-               new_skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
+               new_skb = netdev_alloc_skb_ip_align(dev,
+                                                   TLAN_MAX_FRAME_SIZE + 5);
                if ( !new_skb )
                        goto drop_and_reuse;
 
                skb->protocol = eth_type_trans( skb, dev );
                netif_rx( skb );
 
-               skb_reserve( new_skb, NET_IP_ALIGN );
                head_list->buffer[0].address = pci_map_single(priv->pciDev,
                                                              new_skb->data,
                                                              TLAN_MAX_FRAME_SIZE,
                list->cStat = TLAN_CSTAT_READY;
                list->frameSize = TLAN_MAX_FRAME_SIZE;
                list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
-               skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
+               skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
                if ( !skb ) {
                        pr_err("TLAN: out of memory for received data.\n" );
                        break;
                }
 
-               skb_reserve( skb, NET_IP_ALIGN );
                list->buffer[0].address = pci_map_single(priv->pciDev,
                                                         skb->data,
                                                         TLAN_MAX_FRAME_SIZE,
 
                int rx = data->rxhead;
                struct sk_buff *skb;
 
-               data->rxskbs[rx] = skb = netdev_alloc_skb(dev,
-                                                         TSI108_RXBUF_SIZE + 2);
+               skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE);
+               data->rxskbs[rx] = skb;
                if (!skb)
                        break;
 
-               skb_reserve(skb, 2); /* Align the data on a 4-byte boundary. */
-
                data->rxring[rx].buf0 = dma_map_single(NULL, skb->data,
                                                        TSI108_RX_SKB_SIZE,
                                                        DMA_FROM_DEVICE);
        for (i = 0; i < TSI108_RXRING_LEN; i++) {
                struct sk_buff *skb;
 
-               skb = netdev_alloc_skb(dev, TSI108_RXBUF_SIZE + NET_IP_ALIGN);
+               skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE);
                if (!skb) {
                        /* Bah.  No memory for now, but maybe we'll get
                         * some more later.
                }
 
                data->rxskbs[i] = skb;
-               /* Align the payload on a 4-byte boundary */
-               skb_reserve(skb, 2);
                data->rxskbs[i] = skb;
                data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
                data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
 
                                }
                        }
                } else {
-                       struct sk_buff *skb;
+                       struct sk_buff *skb = NULL;
                        /* Length should omit the CRC */
                        int pkt_len = data_size - 4;
 
                        /* Check if the packet is long enough to accept without
                           copying to a minimally-sized skbuff. */
-                       if (pkt_len < rx_copybreak &&
-                               (skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN)) != NULL) {
-                               skb_reserve(skb, NET_IP_ALIGN); /* 16 byte align the IP header */
+                       if (pkt_len < rx_copybreak)
+                               skb = netdev_alloc_skb_ip_align(dev, pkt_len);
+                       if (skb) {
                                pci_dma_sync_single_for_cpu(rp->pdev,
                                                            rp->rx_skbuff_dma[entry],
                                                            rp->rx_buf_sz,
 
        if (pkt_size < rx_copybreak) {
                struct sk_buff *new_skb;
 
-               new_skb = netdev_alloc_skb(vptr->dev, pkt_size + 2);
+               new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size);
                if (new_skb) {
                        new_skb->ip_summed = rx_skb[0]->ip_summed;
-                       skb_reserve(new_skb, 2);
                        skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
                        *rx_skb = new_skb;
                        ret = 0;
 
        do {
                struct skb_vnet_hdr *hdr;
 
-               skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
+               skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
                if (unlikely(!skb)) {
                        oom = true;
                        break;
                }
 
-               skb_reserve(skb, NET_IP_ALIGN);
                skb_put(skb, MAX_PACKET_LEN);
 
                hdr = skb_vnet_hdr(skb);
        do {
                skb_frag_t *f;
 
-               skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
+               skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
                if (unlikely(!skb)) {
                        oom = true;
                        break;
                }
 
-               skb_reserve(skb, NET_IP_ALIGN);
-
                f = &skb_shinfo(skb)->frags[0];
                f->page = get_a_page(vi, gfp);
                if (!f->page) {
 
 
 struct sk_buff *napi_get_frags(struct napi_struct *napi)
 {
-       struct net_device *dev = napi->dev;
        struct sk_buff *skb = napi->skb;
 
        if (!skb) {
-               skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
-               if (!skb)
-                       goto out;
-
-               skb_reserve(skb, NET_IP_ALIGN);
-
-               napi->skb = skb;
+               skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
+               if (skb)
+                       napi->skb = skb;
        }
-
-out:
        return skb;
 }
 EXPORT_SYMBOL(napi_get_frags);