static int gfar_close(struct net_device *dev);
 struct sk_buff *gfar_new_skb(struct net_device *dev);
 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
-               struct sk_buff *skb);
+                          struct sk_buff *skb);
 static int gfar_set_mac_address(struct net_device *dev);
 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
 static irqreturn_t gfar_error(int irq, void *dev_id);
                tx_queue->tx_bd_dma_base = addr;
                tx_queue->dev = ndev;
                /* enet DMA only understands physical addresses */
-               addr    += sizeof(struct txbd8) * tx_queue->tx_ring_size;
-               vaddr   += sizeof(struct txbd8) * tx_queue->tx_ring_size;
+               addr  += sizeof(struct txbd8) * tx_queue->tx_ring_size;
+               vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
        }
 
        /* Start the rx descriptor ring where the tx ring leaves off */
                rx_queue->rx_bd_base = vaddr;
                rx_queue->rx_bd_dma_base = addr;
                rx_queue->dev = ndev;
-               addr    += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
-               vaddr   += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
+               addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
+               vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
        }
 
        /* Setup the skbuff rings */
        for (i = 0; i < priv->num_tx_queues; i++) {
                tx_queue = priv->tx_queue[i];
                tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
-                                 tx_queue->tx_ring_size, GFP_KERNEL);
+                                             tx_queue->tx_ring_size,
+                                             GFP_KERNEL);
                if (!tx_queue->tx_skbuff) {
                        netif_err(priv, ifup, ndev,
                                  "Could not allocate tx_skbuff\n");
        for (i = 0; i < priv->num_rx_queues; i++) {
                rx_queue = priv->rx_queue[i];
                rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
-                                 rx_queue->rx_ring_size, GFP_KERNEL);
+                                             rx_queue->rx_ring_size,
+                                             GFP_KERNEL);
 
                if (!rx_queue->rx_skbuff) {
                        netif_err(priv, ifup, ndev,
        int i;
 
        baddr = ®s->tbase0;
-       for(i = 0; i < priv->num_tx_queues; i++) {
+       for (i = 0; i < priv->num_tx_queues; i++) {
                gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
-               baddr   += 2;
+               baddr += 2;
        }
 
        baddr = ®s->rbase0;
-       for(i = 0; i < priv->num_rx_queues; i++) {
+       for (i = 0; i < priv->num_rx_queues; i++) {
                gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
-               baddr   += 2;
+               baddr += 2;
        }
 }
 
 
        for (i = 0; i < priv->num_rx_queues; i++) {
                rx_packets += priv->rx_queue[i]->stats.rx_packets;
-               rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
+               rx_bytes   += priv->rx_queue[i]->stats.rx_bytes;
                rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
        }
 
        dev->stats.rx_packets = rx_packets;
-       dev->stats.rx_bytes = rx_bytes;
+       dev->stats.rx_bytes   = rx_bytes;
        dev->stats.rx_dropped = rx_dropped;
 
        for (i = 0; i < priv->num_tx_queues; i++) {
                tx_packets += priv->tx_queue[i]->stats.tx_packets;
        }
 
-       dev->stats.tx_bytes = tx_bytes;
+       dev->stats.tx_bytes   = tx_bytes;
        dev->stats.tx_packets = tx_packets;
 
        return &dev->stats;
 static inline int gfar_uses_fcb(struct gfar_private *priv)
 {
        return gfar_is_vlan_on(priv) ||
-               (priv->ndev->features & NETIF_F_RXCSUM) ||
-               (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
+              (priv->ndev->features & NETIF_F_RXCSUM) ||
+              (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
 }
 
 static void free_tx_pointers(struct gfar_private *priv)
 }
 
 static int gfar_parse_group(struct device_node *np,
-               struct gfar_private *priv, const char *model)
+                           struct gfar_private *priv, const char *model)
 {
        u32 *queue_mask;
 
        priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
        priv->gfargrp[priv->num_grps].priv = priv;
        spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
-       if(priv->mode == MQ_MG_MODE) {
-               queue_mask = (u32 *)of_get_property(np,
-                                       "fsl,rx-bit-map", NULL);
-               priv->gfargrp[priv->num_grps].rx_bit_map =
-                       queue_mask ?  *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
-               queue_mask = (u32 *)of_get_property(np,
-                                       "fsl,tx-bit-map", NULL);
-               priv->gfargrp[priv->num_grps].tx_bit_map =
-                       queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
+       if (priv->mode == MQ_MG_MODE) {
+               queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
+               priv->gfargrp[priv->num_grps].rx_bit_map = queue_mask ?
+                       *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
+               queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
+               priv->gfargrp[priv->num_grps].tx_bit_map = queue_mask ?
+                       *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
        } else {
                priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
                priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
        } else {
                priv->mode = SQ_SG_MODE;
                err = gfar_parse_group(np, priv, model);
-               if(err)
+               if (err)
                        goto err_grp_init;
        }
 
                priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
 
        mac_addr = of_get_mac_address(np);
+
        if (mac_addr)
                memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
 
        if (model && !strcasecmp(model, "TSEC"))
-               priv->device_flags =
-                       FSL_GIANFAR_DEV_HAS_GIGABIT |
-                       FSL_GIANFAR_DEV_HAS_COALESCE |
-                       FSL_GIANFAR_DEV_HAS_RMON |
-                       FSL_GIANFAR_DEV_HAS_MULTI_INTR;
+               priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+                                    FSL_GIANFAR_DEV_HAS_COALESCE |
+                                    FSL_GIANFAR_DEV_HAS_RMON |
+                                    FSL_GIANFAR_DEV_HAS_MULTI_INTR;
+
        if (model && !strcasecmp(model, "eTSEC"))
-               priv->device_flags =
-                       FSL_GIANFAR_DEV_HAS_GIGABIT |
-                       FSL_GIANFAR_DEV_HAS_COALESCE |
-                       FSL_GIANFAR_DEV_HAS_RMON |
-                       FSL_GIANFAR_DEV_HAS_MULTI_INTR |
-                       FSL_GIANFAR_DEV_HAS_PADDING |
-                       FSL_GIANFAR_DEV_HAS_CSUM |
-                       FSL_GIANFAR_DEV_HAS_VLAN |
-                       FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
-                       FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
-                       FSL_GIANFAR_DEV_HAS_TIMER;
+               priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+                                    FSL_GIANFAR_DEV_HAS_COALESCE |
+                                    FSL_GIANFAR_DEV_HAS_RMON |
+                                    FSL_GIANFAR_DEV_HAS_MULTI_INTR |
+                                    FSL_GIANFAR_DEV_HAS_PADDING |
+                                    FSL_GIANFAR_DEV_HAS_CSUM |
+                                    FSL_GIANFAR_DEV_HAS_VLAN |
+                                    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
+                                    FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
+                                    FSL_GIANFAR_DEV_HAS_TIMER;
 
        ctype = of_get_property(np, "phy-connection-type", NULL);
 
 }
 
 static int gfar_hwtstamp_ioctl(struct net_device *netdev,
-                       struct ifreq *ifr, int cmd)
+                              struct ifreq *ifr, int cmd)
 {
        struct hwtstamp_config config;
        struct gfar_private *priv = netdev_priv(netdev);
 {
        unsigned int new_bit_map = 0x0;
        int mask = 0x1 << (max_qs - 1), i;
+
        for (i = 0; i < max_qs; i++) {
                if (bit_map & mask)
                        new_bit_map = new_bit_map + (1 << i);
 
        /* MPC8313 Rev 2.0 and higher; All MPC837x */
        if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
-                       (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+           (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
                priv->errata |= GFAR_ERRATA_74;
 
        /* MPC8313 and MPC837x all rev */
        if ((pvr == 0x80850010 && mod == 0x80b0) ||
-                       (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+           (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
                priv->errata |= GFAR_ERRATA_76;
 
        /* MPC8313 and MPC837x all rev */
        if ((pvr == 0x80850010 && mod == 0x80b0) ||
-                       (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+           (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
                priv->errata |= GFAR_ERRATA_A002;
 
        /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
        if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
-                       (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
+           (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
                priv->errata |= GFAR_ERRATA_12;
 
        if (priv->errata)
 
        /* Register for napi ...We are registering NAPI for each grp */
        for (i = 0; i < priv->num_grps; i++)
-               netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
+               netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
+                              GFAR_DEV_WEIGHT);
 
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
                dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
-                       NETIF_F_RXCSUM;
+                                  NETIF_F_RXCSUM;
                dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
-                       NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
+                                NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
        }
 
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
                priv->padding = 0;
 
        if (dev->features & NETIF_F_IP_CSUM ||
-                       priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
+           priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
                dev->needed_headroom = GMAC_FCB_LEN;
 
        /* Program the isrg regs only if number of grps > 1 */
         * basically reverses the queue numbers
         */
        for (i = 0; i< priv->num_grps; i++) {
-               priv->gfargrp[i].tx_bit_map = reverse_bitmap(
-                               priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
-               priv->gfargrp[i].rx_bit_map = reverse_bitmap(
-                               priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
+               priv->gfargrp[i].tx_bit_map =
+                       reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
+               priv->gfargrp[i].rx_bit_map =
+                       reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
        }
 
        /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
         */
        for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
                priv->gfargrp[grp_idx].num_rx_queues = 0x0;
+
                for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
-                               priv->num_rx_queues) {
+                                priv->num_rx_queues) {
                        priv->gfargrp[grp_idx].num_rx_queues++;
                        priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
                        rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
                        rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
                }
                priv->gfargrp[grp_idx].num_tx_queues = 0x0;
+
                for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
-                               priv->num_tx_queues) {
+                                priv->num_tx_queues) {
                        priv->gfargrp[grp_idx].num_tx_queues++;
                        priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
                        tstat = tstat | (TSTAT_CLEAR_THALT >> i);
        }
 
        device_init_wakeup(&dev->dev,
-               priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+                          priv->device_flags &
+                          FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
 
        /* fill out IRQ number and name fields */
        for (i = 0; i < priv->num_grps; i++) {
        for (i = 0; i < priv->num_rx_queues; i++)
                netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
                            i, priv->rx_queue[i]->rx_ring_size);
-       for(i = 0; i < priv->num_tx_queues; i++)
+       for (i = 0; i < priv->num_tx_queues; i++)
                netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
                            i, priv->tx_queue[i]->tx_ring_size);
 
        u32 tempval;
 
        int magic_packet = priv->wol_en &&
-               (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+                          (priv->device_flags &
+                           FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
 
        netif_device_detach(ndev);
 
        unsigned long flags;
        u32 tempval;
        int magic_packet = priv->wol_en &&
-               (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+                          (priv->device_flags &
+                           FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
 
        if (!netif_running(ndev)) {
                netif_device_attach(ndev);
        }
 
        if (ecntrl & ECNTRL_REDUCED_MODE) {
-               if (ecntrl & ECNTRL_REDUCED_MII_MODE)
+               if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
                        return PHY_INTERFACE_MODE_RMII;
+               }
                else {
                        phy_interface_t interface = priv->interface;
 
        phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
 
        phy_write(tbiphy, MII_ADVERTISE,
-                       ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
-                       ADVERTISE_1000XPSE_ASYM);
+                 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
+                 ADVERTISE_1000XPSE_ASYM);
 
-       phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
-                       BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
+       phy_write(tbiphy, MII_BMCR,
+                 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
+                 BMCR_SPEED1000);
 }
 
 static void init_registers(struct net_device *dev)
        regs = priv->gfargrp[0].regs;
        /* Stop the DMA, and wait for it to stop */
        tempval = gfar_read(®s->dmactrl);
-       if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
-           != (DMACTRL_GRS | DMACTRL_GTS)) {
+       if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
+           (DMACTRL_GRS | DMACTRL_GTS)) {
                int ret;
 
                tempval |= (DMACTRL_GRS | DMACTRL_GTS);
        } else {
                for (i = 0; i < priv->num_grps; i++)
                        free_irq(priv->gfargrp[i].interruptTransmit,
-                                       &priv->gfargrp[i]);
+                                &priv->gfargrp[i]);
        }
 
        free_skb_resources(priv);
                        continue;
 
                dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
-                               txbdp->length, DMA_TO_DEVICE);
+                                txbdp->length, DMA_TO_DEVICE);
                txbdp->lstatus = 0;
                for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
-                               j++) {
+                    j++) {
                        txbdp++;
                        dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
-                                       txbdp->length, DMA_TO_DEVICE);
+                                      txbdp->length, DMA_TO_DEVICE);
                }
                txbdp++;
                dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
        for (i = 0; i < rx_queue->rx_ring_size; i++) {
                if (rx_queue->rx_skbuff[i]) {
                        dma_unmap_single(&priv->ofdev->dev,
-                                       rxbdp->bufPtr, priv->rx_buffer_size,
-                                       DMA_FROM_DEVICE);
+                                        rxbdp->bufPtr, priv->rx_buffer_size,
+                                        DMA_FROM_DEVICE);
                        dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
                        rx_queue->rx_skbuff[i] = NULL;
                }
        /* Go through all the buffer descriptors and free their data buffers */
        for (i = 0; i < priv->num_tx_queues; i++) {
                struct netdev_queue *txq;
+
                tx_queue = priv->tx_queue[i];
                txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
-               if(tx_queue->tx_skbuff)
+               if (tx_queue->tx_skbuff)
                        free_skb_tx_queue(tx_queue);
                netdev_tx_reset_queue(txq);
        }
 
        for (i = 0; i < priv->num_rx_queues; i++) {
                rx_queue = priv->rx_queue[i];
-               if(rx_queue->rx_skbuff)
+               if (rx_queue->rx_skbuff)
                        free_skb_rx_queue(rx_queue);
        }
 
        dma_free_coherent(&priv->ofdev->dev,
-                       sizeof(struct txbd8) * priv->total_tx_ring_size +
-                       sizeof(struct rxbd8) * priv->total_rx_ring_size,
-                       priv->tx_queue[0]->tx_bd_base,
-                       priv->tx_queue[0]->tx_bd_dma_base);
+                         sizeof(struct txbd8) * priv->total_tx_ring_size +
+                         sizeof(struct rxbd8) * priv->total_rx_ring_size,
+                         priv->tx_queue[0]->tx_bd_base,
+                         priv->tx_queue[0]->tx_bd_dma_base);
        skb_queue_purge(&priv->rx_recycle);
 }
 
 }
 
 void gfar_configure_coalescing(struct gfar_private *priv,
-       unsigned long tx_mask, unsigned long rx_mask)
+                              unsigned long tx_mask, unsigned long rx_mask)
 {
        struct gfar __iomem *regs = priv->gfargrp[0].regs;
        u32 __iomem *baddr;
         * multiple queues, there's only single reg to program
         */
        gfar_write(®s->txic, 0);
-       if(likely(priv->tx_queue[0]->txcoalescing))
+       if (likely(priv->tx_queue[0]->txcoalescing))
                gfar_write(®s->txic, priv->tx_queue[0]->txic);
 
        gfar_write(®s->rxic, 0);
-       if(unlikely(priv->rx_queue[0]->rxcoalescing))
+       if (unlikely(priv->rx_queue[0]->rxcoalescing))
                gfar_write(®s->rxic, priv->rx_queue[0]->rxic);
 
        if (priv->mode == MQ_MG_MODE) {
                /* Install our interrupt handlers for Error,
                 * Transmit, and Receive
                 */
-               if ((err = request_irq(grp->interruptError, gfar_error, 0,
-                               grp->int_name_er,grp)) < 0) {
+               if ((err = request_irq(grp->interruptError, gfar_error,
+                                      0, grp->int_name_er, grp)) < 0) {
                        netif_err(priv, intr, dev, "Can't get IRQ %d\n",
                                  grp->interruptError);
 
                }
 
                if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
-                               0, grp->int_name_tx, grp)) < 0) {
+                                      0, grp->int_name_tx, grp)) < 0) {
                        netif_err(priv, intr, dev, "Can't get IRQ %d\n",
                                  grp->interruptTransmit);
                        goto tx_irq_fail;
                }
 
-               if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
-                               grp->int_name_rx, grp)) < 0) {
+               if ((err = request_irq(grp->interruptReceive, gfar_receive,
+                                      0, grp->int_name_rx, grp)) < 0) {
                        netif_err(priv, intr, dev, "Can't get IRQ %d\n",
                                  grp->interruptReceive);
                        goto rx_irq_fail;
                }
        } else {
-               if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
-                               grp->int_name_tx, grp)) < 0) {
+               if ((err = request_irq(grp->interruptTransmit, gfar_interrupt,
+                                      0, grp->int_name_tx, grp)) < 0) {
                        netif_err(priv, intr, dev, "Can't get IRQ %d\n",
                                  grp->interruptTransmit);
                        goto err_irq_fail;
 }
 
 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
-               int fcb_length)
+                                   int fcb_length)
 {
        u8 flags = 0;
 
 }
 
 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
-                              struct txbd8 *base, int ring_size)
+                                     struct txbd8 *base, int ring_size)
 {
        struct txbd8 *new_bd = bdp + stride;
 
 }
 
 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
-               int ring_size)
+                                     int ring_size)
 {
        return skip_txbd(bdp, 1, base, ring_size);
 }
         * before start of transmission.
         */
        if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
-                       skb->ip_summed == CHECKSUM_PARTIAL &&
-                       skb->len > 2500)) {
+                    skb->ip_summed == CHECKSUM_PARTIAL &&
+                    skb->len > 2500)) {
                int ret;
 
                ret = skb_checksum_help(skb);
 
        /* check if time stamp should be generated */
        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
-                       priv->hwts_tx_en)) {
+                    priv->hwts_tx_en)) {
                do_tstamp = 1;
                fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
        }
 
        /* make space for additional header when fcb is needed */
        if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
-                       vlan_tx_tag_present(skb) ||
-                       unlikely(do_tstamp)) &&
-                       (skb_headroom(skb) < fcb_length)) {
+            vlan_tx_tag_present(skb) ||
+            unlikely(do_tstamp)) &&
+           (skb_headroom(skb) < fcb_length)) {
                struct sk_buff *skb_new;
 
                skb_new = skb_realloc_headroom(skb, fcb_length);
        /* Time stamp insertion requires one additional TxBD */
        if (unlikely(do_tstamp))
                txbdp_tstamp = txbdp = next_txbd(txbdp, base,
-                               tx_queue->tx_ring_size);
+                                                tx_queue->tx_ring_size);
 
        if (nr_frags == 0) {
                if (unlikely(do_tstamp))
                        txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
-                                       TXBD_INTERRUPT);
+                                                         TXBD_INTERRUPT);
                else
                        lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
        } else {
                        length = skb_shinfo(skb)->frags[i].size;
 
                        lstatus = txbdp->lstatus | length |
-                               BD_LFLAG(TXBD_READY);
+                                 BD_LFLAG(TXBD_READY);
 
                        /* Handle the last BD specially */
                        if (i == nr_frags - 1)
        if (CHECKSUM_PARTIAL == skb->ip_summed) {
                fcb = gfar_add_fcb(skb);
                /* as specified by errata */
-               if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12)
-                            && ((unsigned long)fcb % 0x20) > 0x18)) {
+               if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) &&
+                            ((unsigned long)fcb % 0x20) > 0x18)) {
                        __skb_pull(skb, GMAC_FCB_LEN);
                        skb_checksum_help(skb);
                } else {
        }
 
        txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
-                       skb_headlen(skb), DMA_TO_DEVICE);
+                                            skb_headlen(skb), DMA_TO_DEVICE);
 
        /* If time stamping is requested one additional TxBD must be set up. The
         * first TxBD points to the FCB and must have a data length of
        if (unlikely(do_tstamp)) {
                txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length;
                txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
-                               (skb_headlen(skb) - fcb_length);
+                                        (skb_headlen(skb) - fcb_length);
                lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
        } else {
                lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
         * (wrapping if necessary)
         */
        tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
-               TX_RING_MOD_MASK(tx_queue->tx_ring_size);
+                             TX_RING_MOD_MASK(tx_queue->tx_ring_size);
 
        tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
 
 
        frame_size += priv->padding;
 
-       tempsize =
-           (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
-           INCREMENTAL_BUFFER_SIZE;
+       tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
+                  INCREMENTAL_BUFFER_SIZE;
 
        /* Only stop and start the controller if it isn't already
         * stopped, and we changed something
        tempval = gfar_read(®s->maccfg2);
 
        if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
-                       gfar_has_errata(priv, GFAR_ERRATA_74))
+           gfar_has_errata(priv, GFAR_ERRATA_74))
                tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
        else
                tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
 static void gfar_reset_task(struct work_struct *work)
 {
        struct gfar_private *priv = container_of(work, struct gfar_private,
-                       reset_task);
+                                                reset_task);
        struct net_device *dev = priv->ndev;
 
        if (dev->flags & IFF_UP) {
         * as many bytes as needed to align the data properly
         */
        skb_reserve(skb, RXBUF_ALIGNMENT -
-               (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
+                   (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
 }
 
 /* Interrupt Handler for Transmit complete */
 
                /* Only clean completed frames */
                if ((lstatus & BD_LFLAG(TXBD_READY)) &&
-                               (lstatus & BD_LENGTH_MASK))
+                   (lstatus & BD_LENGTH_MASK))
                        break;
 
                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
                        buflen = bdp->length;
 
                dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
-                               buflen, DMA_TO_DEVICE);
+                                buflen, DMA_TO_DEVICE);
 
                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
                        struct skb_shared_hwtstamps shhwtstamps;
                        u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
+
                        memset(&shhwtstamps, 0, sizeof(shhwtstamps));
                        shhwtstamps.hwtstamp = ns_to_ktime(*ns);
                        skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
                bdp = next_txbd(bdp, base, tx_ring_size);
 
                for (i = 0; i < frags; i++) {
-                       dma_unmap_page(&priv->ofdev->dev,
-                                       bdp->bufPtr,
-                                       bdp->length,
-                                       DMA_TO_DEVICE);
+                       dma_unmap_page(&priv->ofdev->dev, bdp->bufPtr,
+                                      bdp->length, DMA_TO_DEVICE);
                        bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
                        bdp = next_txbd(bdp, base, tx_ring_size);
                }
                 * we add this skb back into the pool, if it's the right size
                 */
                if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
-                               skb_recycle_check(skb, priv->rx_buffer_size +
-                                       RXBUF_ALIGNMENT)) {
+                   skb_recycle_check(skb, priv->rx_buffer_size +
+                                     RXBUF_ALIGNMENT)) {
                        gfar_align_skb(skb);
                        skb_queue_head(&priv->rx_recycle, skb);
                } else
                tx_queue->tx_skbuff[skb_dirtytx] = NULL;
 
                skb_dirtytx = (skb_dirtytx + 1) &
-                       TX_RING_MOD_MASK(tx_ring_size);
+                             TX_RING_MOD_MASK(tx_ring_size);
 
                howmany++;
                spin_lock_irqsave(&tx_queue->txlock, flags);
 }
 
 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
-               struct sk_buff *skb)
+                          struct sk_buff *skb)
 {
        struct net_device *dev = rx_queue->dev;
        struct gfar_private *priv = netdev_priv(dev);
        if (priv->hwts_rx_en) {
                struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
                u64 *ns = (u64 *) skb->data;
+
                memset(shhwtstamps, 0, sizeof(*shhwtstamps));
                shhwtstamps->hwtstamp = ns_to_ktime(*ns);
        }
 
        while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
                struct sk_buff *newskb;
+
                rmb();
 
                /* Add another skb for the future */
                skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
 
                dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
-                               priv->rx_buffer_size, DMA_FROM_DEVICE);
+                                priv->rx_buffer_size, DMA_FROM_DEVICE);
 
                if (unlikely(!(bdp->status & RXBD_ERR) &&
-                               bdp->length > priv->rx_buffer_size))
+                            bdp->length > priv->rx_buffer_size))
                        bdp->status = RXBD_LARGE;
 
                /* We drop the frame if we failed to allocate a new buffer */
                if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
-                                bdp->status & RXBD_ERR)) {
+                            bdp->status & RXBD_ERR)) {
                        count_errors(bdp->status, dev);
 
                        if (unlikely(!newskb))
                                rx_queue->stats.rx_bytes += pkt_len;
                                skb_record_rx_queue(skb, rx_queue->qindex);
                                gfar_process_frame(dev, skb, amount_pull,
-                                               &rx_queue->grp->napi);
+                                                  &rx_queue->grp->napi);
 
                        } else {
                                netif_warn(priv, rx_err, dev, "Missing skb!\n");
                bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
 
                /* update to point at the next skb */
-               rx_queue->skb_currx =
-                   (rx_queue->skb_currx + 1) &
-                   RX_RING_MOD_MASK(rx_queue->rx_ring_size);
+               rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
+                                     RX_RING_MOD_MASK(rx_queue->rx_ring_size);
        }
 
        /* Update the current rxbd pointer to be the next one */
 
 static int gfar_poll(struct napi_struct *napi, int budget)
 {
-       struct gfar_priv_grp *gfargrp = container_of(napi,
-                       struct gfar_priv_grp, napi);
+       struct gfar_priv_grp *gfargrp =
+               container_of(napi, struct gfar_priv_grp, napi);
        struct gfar_private *priv = gfargrp->priv;
        struct gfar __iomem *regs = gfargrp->regs;
        struct gfar_priv_tx_q *tx_queue = NULL;
        gfar_write(®s->ievent, IEVENT_RTX_MASK);
 
        while (num_queues && left_over_budget) {
-
                budget_per_queue = left_over_budget/num_queues;
                left_over_budget = 0;
 
                        tx_queue = priv->tx_queue[rx_queue->qindex];
 
                        tx_cleaned += gfar_clean_tx_ring(tx_queue);
-                       rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
-                                                       budget_per_queue);
+                       rx_cleaned_per_queue =
+                               gfar_clean_rx_ring(rx_queue, budget_per_queue);
                        rx_cleaned += rx_cleaned_per_queue;
-                       if(rx_cleaned_per_queue < budget_per_queue) {
+                       if (rx_cleaned_per_queue < budget_per_queue) {
                                left_over_budget = left_over_budget +
-                                       (budget_per_queue - rx_cleaned_per_queue);
+                                       (budget_per_queue -
+                                        rx_cleaned_per_queue);
                                set_bit(i, &serviced_queues);
                                num_queues--;
                        }
                /* If we are coalescing interrupts, update the timer
                 * Otherwise, clear it
                 */
-               gfar_configure_coalescing(priv,
-                               gfargrp->rx_bit_map, gfargrp->tx_bit_map);
+               gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
+                                         gfargrp->tx_bit_map);
        }
 
        return rx_cleaned;
                        disable_irq(priv->gfargrp[i].interruptReceive);
                        disable_irq(priv->gfargrp[i].interruptError);
                        gfar_interrupt(priv->gfargrp[i].interruptTransmit,
-                                               &priv->gfargrp[i]);
+                                      &priv->gfargrp[i]);
                        enable_irq(priv->gfargrp[i].interruptError);
                        enable_irq(priv->gfargrp[i].interruptReceive);
                        enable_irq(priv->gfargrp[i].interruptTransmit);
                for (i = 0; i < priv->num_grps; i++) {
                        disable_irq(priv->gfargrp[i].interruptTransmit);
                        gfar_interrupt(priv->gfargrp[i].interruptTransmit,
-                                               &priv->gfargrp[i]);
+                                      &priv->gfargrp[i]);
                        enable_irq(priv->gfargrp[i].interruptTransmit);
                }
        }
        int idx;
        static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
 
-       for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
+       for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
                gfar_set_mac_for_addr(dev, idx, zero_arr);
 }
 
 
        /* Hmm... */
        if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
-               netdev_dbg(dev, "error interrupt (ievent=0x%08x imask=0x%08x)\n",
+               netdev_dbg(dev,
+                          "error interrupt (ievent=0x%08x imask=0x%08x)\n",
                           events, gfar_read(®s->imask));
 
        /* Update the error counters */