/* Second pass, allocate new skbs */
        for (i = 0; i < NUM_RX_BUFF; ++i) {
-               struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
+               struct sk_buff *skb;
+
+               skb = netdev_alloc_skb_ip_align(dev->ndev, rx_skb_size);
                if (!skb) {
                        ret = -ENOMEM;
                        goto oom;
                BUG_ON(!dev->rx_skb[i]);
                dev_kfree_skb(dev->rx_skb[i]);
 
-               skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
                dev->rx_desc[i].data_ptr =
-                   dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
-                                  DMA_FROM_DEVICE) + 2;
+                   dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
+                                  rx_sync_size, DMA_FROM_DEVICE)
+                                  + NET_IP_ALIGN;
                dev->rx_skb[i] = skb;
        }
  skip:
        }
 }
 
-static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
-                                   gfp_t flags)
+static int
+__emac_prepare_rx_skb(struct sk_buff *skb, struct emac_instance *dev, int slot)
 {
-       struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
        if (unlikely(!skb))
                return -ENOMEM;
 
        dev->rx_skb[slot] = skb;
        dev->rx_desc[slot].data_len = 0;
 
-       skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
        dev->rx_desc[slot].data_ptr =
-           dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
-                          DMA_FROM_DEVICE) + 2;
+           dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
+                          dev->rx_sync_size, DMA_FROM_DEVICE) + NET_IP_ALIGN;
        wmb();
        dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
            (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
        return 0;
 }
 
+static int
+emac_alloc_rx_skb(struct emac_instance *dev, int slot)
+{
+       struct sk_buff *skb;
+
+       skb = __netdev_alloc_skb_ip_align(dev->ndev, dev->rx_skb_size,
+                                         GFP_KERNEL);
+
+       return __emac_prepare_rx_skb(skb, dev, slot);
+}
+
+static int
+emac_alloc_rx_skb_napi(struct emac_instance *dev, int slot)
+{
+       struct sk_buff *skb;
+
+       skb = napi_alloc_skb(&dev->mal->napi, dev->rx_skb_size);
+
+       return __emac_prepare_rx_skb(skb, dev, slot);
+}
+
 static void emac_print_link_status(struct emac_instance *dev)
 {
        if (netif_carrier_ok(dev->ndev))
 
        /* Allocate RX ring */
        for (i = 0; i < NUM_RX_BUFF; ++i)
-               if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
+               if (emac_alloc_rx_skb(dev, i)) {
                        printk(KERN_ERR "%s: failed to allocate RX ring\n",
                               ndev->name);
                        goto oom;
        DBG2(dev, "recycle %d %d" NL, slot, len);
 
        if (len)
-               dma_map_single(&dev->ofdev->dev, skb->data - 2,
-                              EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
+               dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
+                              SKB_DATA_ALIGN(len + NET_IP_ALIGN),
+                              DMA_FROM_DEVICE);
 
        dev->rx_desc[slot].data_len = 0;
        wmb();
                int len = dev->rx_desc[slot].data_len;
                int tot_len = dev->rx_sg_skb->len + len;
 
-               if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
+               if (unlikely(tot_len + NET_IP_ALIGN > dev->rx_skb_size)) {
                        ++dev->estats.rx_dropped_mtu;
                        dev_kfree_skb(dev->rx_sg_skb);
                        dev->rx_sg_skb = NULL;
                }
 
                if (len && len < EMAC_RX_COPY_THRESH) {
-                       struct sk_buff *copy_skb =
-                           alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
+                       struct sk_buff *copy_skb;
+
+                       copy_skb = napi_alloc_skb(&dev->mal->napi, len);
                        if (unlikely(!copy_skb))
                                goto oom;
 
-                       skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
-                       memcpy(copy_skb->data - 2, skb->data - 2, len + 2);
+                       memcpy(copy_skb->data - NET_IP_ALIGN,
+                              skb->data - NET_IP_ALIGN,
+                              len + NET_IP_ALIGN);
                        emac_recycle_rx_skb(dev, slot, len);
                        skb = copy_skb;
-               } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
+               } else if (unlikely(emac_alloc_rx_skb_napi(dev, slot)))
                        goto oom;
 
                skb_put(skb, len);
        sg:
                if (ctrl & MAL_RX_CTRL_FIRST) {
                        BUG_ON(dev->rx_sg_skb);
-                       if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
+                       if (unlikely(emac_alloc_rx_skb_napi(dev, slot))) {
                                DBG(dev, "rx OOM %d" NL, slot);
                                ++dev->estats.rx_dropped_oom;
                                emac_recycle_rx_skb(dev, slot, 0);