Replaced deprecating dev_alloc_skb with netdev_alloc_skb in drivers/net/ethernet
  - Removed extra skb->dev = dev after netdev_alloc_skb
Signed-off-by: Pradeep A Dalvi <netdev@pradeepdalvi.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
        netdev_dbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
                   first_frag, last_frag, len);
 
-       skb = dev_alloc_skb(len + RX_OFFSET);
+       skb = netdev_alloc_skb(bp->dev, len + RX_OFFSET);
        if (!skb) {
                bp->stats.rx_dropped++;
                for (frag = first_frag; ; frag = NEXT_RX(frag)) {
 
        }
 
        /* Malloc up new buffer. */
-       skb = dev_alloc_skb(length + 2);
+       skb = netdev_alloc_skb(dev, length + 2);
        if (skb == NULL) {
                if (net_debug)  /* I don't think we want to do this to a stressed system */
                        printk("%s: Memory squeeze, dropping packet.\n", dev->name);
        }
 
        /* Malloc up new buffer. */
-       skb = dev_alloc_skb(length + 2);
+       skb = netdev_alloc_skb(dev, length + 2);
        if (skb == NULL) {
 #if 0          /* Again, this seems a cruel thing to do */
                printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
 
                if (rstat0 & RSTAT0_CRCI)
                        length -= 4;
 
-               skb = dev_alloc_skb(length + 2);
+               skb = netdev_alloc_skb(dev, length + 2);
                if (likely(skb != NULL)) {
                        struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry];
                        skb_reserve(skb, 2);
 
 
                /* Move data from DM9000 */
                if (GoodPacket &&
-                   ((skb = dev_alloc_skb(RxLen + 4)) != NULL)) {
+                   ((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) {
                        skb_reserve(skb, 2);
                        rdptr = (u8 *) skb_put(skb, RxLen - 4);
 
 
                                                dev->stats.rx_fifo_errors++;
                                } else {
                                        struct sk_buff *skb;
+                                       skb = netdev_alloc_skb(dev,
+                                                       pkt_len + 2);
 
-                                       if ((skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+                                       if (skb != NULL) {
                                                unsigned char *p;
                                                skb_reserve(skb, 2);    /* Align to 16 bytes */
                                                p = skb_put(skb, pkt_len);
 
                          rx_tail, status, len, copying_skb);
 
                buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
-               copy_skb = dev_alloc_skb (buflen);
+               copy_skb = netdev_alloc_skb(de->dev, buflen);
                if (unlikely(!copy_skb)) {
                        de->net_stats.rx_dropped++;
                        drop = 1;
        for (i = 0; i < DE_RX_RING_SIZE; i++) {
                struct sk_buff *skb;
 
-               skb = dev_alloc_skb(de->rx_buf_sz);
+               skb = netdev_alloc_skb(de->dev, de->rx_buf_sz);
                if (!skb)
                        goto err_out;
 
-               skb->dev = de->dev;
-
                de->rx_skb[i].mapping = pci_map_single(de->pdev,
                        skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
                de->rx_skb[i].skb = skb;
 
     struct sk_buff *ret;
     u_long i=0, tmp;
 
-    p = dev_alloc_skb(IEEE802_3_SZ + DE4X5_ALIGN + 2);
+    p = netdev_alloc_skb(dev, IEEE802_3_SZ + DE4X5_ALIGN + 2);
     if (!p) return NULL;
 
     tmp = virt_to_bus(p->data);
 #else
     if (lp->state != OPEN) return (struct sk_buff *)1; /* Fake out the open */
 
-    p = dev_alloc_skb(len + 2);
+    p = netdev_alloc_skb(dev, len + 2);
     if (!p) return NULL;
 
     skb_reserve(p, 2);                                /* Align */
 
                        struct sk_buff *skb;
                        dma_addr_t mapping;
 
-                       skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
+                       skb = tp->rx_buffers[entry].skb =
+                               netdev_alloc_skb(dev, PKT_BUF_SZ);
                        if (skb == NULL)
                                break;
 
                                                 PCI_DMA_FROMDEVICE);
                        tp->rx_buffers[entry].mapping = mapping;
 
-                       skb->dev = dev;                 /* Mark as being used by this device. */
                        tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
                        refilled++;
                }
                                /* Check if the packet is long enough to accept without copying
                                   to a minimally-sized skbuff. */
                                if (pkt_len < tulip_rx_copybreak &&
-                                   (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+                                   (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
                                        skb_reserve(skb, 2);    /* 16 byte align the IP header */
                                        pci_dma_sync_single_for_cpu(tp->pdev,
                                                                   tp->rx_buffers[entry].mapping,
                        /* Check if the packet is long enough to accept without copying
                           to a minimally-sized skbuff. */
                        if (pkt_len < tulip_rx_copybreak &&
-                           (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+                           (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
                                pci_dma_sync_single_for_cpu(tp->pdev,
                                                            tp->rx_buffers[entry].mapping,
 
                dma_addr_t mapping;
 
                /* Note the receive buffer must be longword aligned.
-                  dev_alloc_skb() provides 16 byte alignment.  But do *not*
+                  netdev_alloc_skb() provides 16 byte alignment.  But do *not*
                   use skb_reserve() to align the IP header! */
-               struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
+               struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
                tp->rx_buffers[i].skb = skb;
                if (skb == NULL)
                        break;
                mapping = pci_map_single(tp->pdev, skb->data,
                                         PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
                tp->rx_buffers[i].mapping = mapping;
-               skb->dev = dev;                 /* Mark as being used by this device. */
                tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */
                tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
        }
 
 
        /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
        for (i = 0; i < RX_RING_SIZE; i++) {
-               struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+               struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
                np->rx_skbuff[i] = skb;
                if (skb == NULL)
                        break;
                        /* Check if the packet is long enough to accept without copying
                           to a minimally-sized skbuff. */
                        if (pkt_len < rx_copybreak &&
-                           (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+                           (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
                                pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
                                                            np->rx_skbuff[entry]->len,
                struct sk_buff *skb;
                entry = np->dirty_rx % RX_RING_SIZE;
                if (np->rx_skbuff[entry] == NULL) {
-                       skb = dev_alloc_skb(np->rx_buf_sz);
+                       skb = netdev_alloc_skb(dev, np->rx_buf_sz);
                        np->rx_skbuff[entry] = skb;
                        if (skb == NULL)
                                break;                  /* Better luck next round. */
 
                        pkt_len = 1518;
                }
 
-               skb = dev_alloc_skb(pkt_len + 2);
+               skb = netdev_alloc_skb(dev, pkt_len + 2);
                if (skb == NULL) {
                        dev->stats.rx_dropped++;
                        goto out;
 
                return;
        }
 
-       skb = dev_alloc_skb(size+2);
+       skb = netdev_alloc_skb(dev, size + 2);
        if (skb == NULL) {
                printk("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size);
                return;
 
                printk(KERN_WARNING "%s: Illegal packet size: %d!\n", dev->name, size);
        }
        else { /* Good packet? */
-               skb = dev_alloc_skb(size+2);
+               skb = netdev_alloc_skb(dev, size + 2);
                if (skb == NULL) { /* Yeah, but no place to put it... */
                        printk(KERN_WARNING "%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size);
                        dev->stats.rx_dropped++;
 
 
        /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
        for (i = 0; i < RX_RING_SIZE; i++) {
-               struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + 2);
+               struct sk_buff *skb =
+                       netdev_alloc_skb(dev, np->rx_buf_sz + 2);
                np->rx_skbuff[i] = skb;
                if (skb == NULL)
                        break;
-               skb->dev = dev;         /* Mark as being used by this device. */
                skb_reserve(skb, 2);    /* 16 byte align the IP header. */
                np->rx_ring[i].frag[0].addr = cpu_to_le32(
                        dma_map_single(&np->pci_dev->dev, skb->data,
                        /* Check if the packet is long enough to accept without copying
                           to a minimally-sized skbuff. */
                        if (pkt_len < rx_copybreak &&
-                           (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+                           (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
                                dma_sync_single_for_cpu(&np->pci_dev->dev,
                                                le32_to_cpu(desc->frag[0].addr),
                struct sk_buff *skb;
                entry = np->dirty_rx % RX_RING_SIZE;
                if (np->rx_skbuff[entry] == NULL) {
-                       skb = dev_alloc_skb(np->rx_buf_sz + 2);
+                       skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
                        np->rx_skbuff[entry] = skb;
                        if (skb == NULL)
                                break;          /* Better luck next round. */
-                       skb->dev = dev;         /* Mark as being used by this device. */
                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
                        np->rx_ring[entry].frag[0].addr = cpu_to_le32(
                                dma_map_single(&np->pci_dev->dev, skb->data,
 
                        printk(KERN_ERR "%s packet receive error %x\n",
                               __func__, cmd_word);
 
-               skb = dev_alloc_skb(pkt_len + 5);
+               skb = netdev_alloc_skb(dev, pkt_len + 5);
                if (skb != NULL) {
                        /* Align IP on 16 byte boundaries */
                        skb_reserve(skb, 2);
 
        while (np->really_rx_count != RX_RING_SIZE) {
                struct sk_buff *skb;
 
-               skb = dev_alloc_skb(np->rx_buf_sz);
+               skb = netdev_alloc_skb(dev, np->rx_buf_sz);
                if (skb == NULL)
                        break;  /* Better luck next round. */
 
                while (np->lack_rxbuf->skbuff)
                        np->lack_rxbuf = np->lack_rxbuf->next_desc_logical;
 
-               skb->dev = dev; /* Mark as being used by this device. */
                np->lack_rxbuf->skbuff = skb;
                np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data,
                        np->rx_buf_sz, PCI_DMA_FROMDEVICE);
 
        /* allocate skb for rx buffers */
        for (i = 0; i < RX_RING_SIZE; i++) {
-               struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+               struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
 
                if (skb == NULL) {
                        np->lack_rxbuf = &np->rx_ring[i];
 
                ++np->really_rx_count;
                np->rx_ring[i].skbuff = skb;
-               skb->dev = dev; /* Mark as being used by this device. */
                np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data,
                        np->rx_buf_sz, PCI_DMA_FROMDEVICE);
                np->rx_ring[i].status = RXOWN;
                        /* Check if the packet is long enough to accept without copying
                           to a minimally-sized skbuff. */
                        if (pkt_len < rx_copybreak &&
-                           (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+                           (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
                                pci_dma_sync_single_for_cpu(np->pci_dev,
                                                            np->cur_rx->buffer,
 
                 * include that when passing upstream as it messes up
                 * bridging applications.
                 */
-               skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN);
+               skb = netdev_alloc_skb(dev, pkt_len - 4 + NET_IP_ALIGN);
 
                if (unlikely(!skb)) {
                        printk("%s: Memory squeeze, dropping packet.\n",
 
        bdp = fep->rx_bd_base;
        for (i = 0; i < RX_RING_SIZE; i++) {
-               skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
+               skb = netdev_alloc_skb(dev, FEC_ENET_RX_FRSIZE);
                if (!skb) {
                        fec_enet_free_buffers(ndev);
                        return -ENOMEM;
 
        struct sk_buff *skb;
 
        while (!bcom_queue_full(rxtsk)) {
-               skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE);
+               skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE);
                if (!skb)
                        return -EAGAIN;
 
 
                /* skbs are allocated on open, so now we allocate a new one,
                 * and remove the old (with the packet) */
-               skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE);
+               skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE);
                if (!skb) {
                        /* Can't get a new one : reuse the same & drop pkt */
                        dev_notice(&dev->dev, "Low memory - dropped packet.\n");
 
 
                        if (pkt_len <= fpi->rx_copybreak) {
                                /* +2 to make IP header L1 cache aligned */
-                               skbn = dev_alloc_skb(pkt_len + 2);
+                               skbn = netdev_alloc_skb(dev, pkt_len + 2);
                                if (skbn != NULL) {
                                        skb_reserve(skbn, 2);   /* align IP header */
                                        skb_copy_from_linear_data(skb,
                                        skbn = skbt;
                                }
                        } else {
-                               skbn = dev_alloc_skb(ENET_RX_FRSIZE);
+                               skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
 
                                if (skbn)
                                        skb_align(skbn, ENET_RX_ALIGN);
 
                        if (pkt_len <= fpi->rx_copybreak) {
                                /* +2 to make IP header L1 cache aligned */
-                               skbn = dev_alloc_skb(pkt_len + 2);
+                               skbn = netdev_alloc_skb(dev, pkt_len + 2);
                                if (skbn != NULL) {
                                        skb_reserve(skbn, 2);   /* align IP header */
                                        skb_copy_from_linear_data(skb,
                                        skbn = skbt;
                                }
                        } else {
-                               skbn = dev_alloc_skb(ENET_RX_FRSIZE);
+                               skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
 
                                if (skbn)
                                        skb_align(skbn, ENET_RX_ALIGN);
         * Initialize the receive buffer descriptors.
         */
        for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
-               skb = dev_alloc_skb(ENET_RX_FRSIZE);
+               skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
                if (skb == NULL) {
                        dev_warn(fep->dev,
                                 "Memory squeeze, unable to allocate skb\n");
        struct fs_enet_private *fep = netdev_priv(dev);
 
        /* Alloc new skb */
-       new_skb = dev_alloc_skb(skb->len + 4);
+       new_skb = netdev_alloc_skb(dev, skb->len + 4);
        if (!new_skb) {
                if (net_ratelimit()) {
                        dev_warn(fep->dev,
 
 
        skb = __skb_dequeue(&ugeth->rx_recycle);
        if (!skb)
-               skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length +
-                                   UCC_GETH_RX_DATA_BUF_ALIGNMENT);
+               skb = netdev_alloc_skb(ugeth->ndev,
+                                     ugeth->ug_info->uf_info.max_rx_buf_length +
+                                     UCC_GETH_RX_DATA_BUF_ALIGNMENT);
        if (skb == NULL)
                return NULL;
 
                    (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
                                              1)));
 
-       skb->dev = ugeth->ndev;
-
        out_be32(&((struct qe_bd __iomem *)bd)->buf,
                      dma_map_single(ugeth->dev,
                                     skb->data,
 
                                dev->stats.rx_errors++;
                                break;
                        }
-                       skb = dev_alloc_skb(pkt_len+3);
+                       skb = netdev_alloc_skb(dev, pkt_len + 3);
                        if (skb == NULL) {
                                printk("%s: Memory squeeze, dropping packet (len %d).\n",
                                           dev->name, pkt_len);