int sgdma_initialize(struct altera_tse_private *priv)
 {
-       priv->txctrlreg = SGDMA_CTRLREG_ILASTD;
+       priv->txctrlreg = SGDMA_CTRLREG_ILASTD |
+                     SGDMA_CTRLREG_INTEN;
 
        priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
+                     SGDMA_CTRLREG_INTEN |
                      SGDMA_CTRLREG_ILASTD;
 
+       priv->sgdmadesclen = sizeof(sgdma_descrip);
+
        INIT_LIST_HEAD(&priv->txlisthd);
        INIT_LIST_HEAD(&priv->rxlisthd);
 
                return -EINVAL;
        }
 
+       /* Initialize descriptor memory to all 0's, sync memory to cache */
+       memset(priv->tx_dma_desc, 0, priv->txdescmem);
+       memset(priv->rx_dma_desc, 0, priv->rxdescmem);
+
+       dma_sync_single_for_device(priv->device, priv->txdescphys,
+                                  priv->txdescmem, DMA_TO_DEVICE);
+
+       dma_sync_single_for_device(priv->device, priv->rxdescphys,
+                                  priv->rxdescmem, DMA_TO_DEVICE);
+
        return 0;
 }
 
        iowrite32(0, &prxsgdma->control);
 }
 
+/* For SGDMA, interrupts remain enabled after initially enabling,
+ * so no need to provide implementations for abstract enable
+ * and disable
+ */
+
 void sgdma_enable_rxirq(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-       priv->rxctrlreg |= SGDMA_CTRLREG_INTEN;
-       tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
 }
 
 void sgdma_enable_txirq(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
-       priv->txctrlreg |= SGDMA_CTRLREG_INTEN;
-       tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
 }
 
-/* for SGDMA, RX interrupts remain enabled after enabling */
 void sgdma_disable_rxirq(struct altera_tse_private *priv)
 {
 }
 
-/* for SGDMA, TX interrupts remain enabled after enabling */
 void sgdma_disable_txirq(struct altera_tse_private *priv)
 {
 }
        return ready;
 }
 
-int sgdma_add_rx_desc(struct altera_tse_private *priv,
-                     struct tse_buffer *rxbuffer)
+void sgdma_start_rxdma(struct altera_tse_private *priv)
+{
+       sgdma_async_read(priv);
+}
+
+void sgdma_add_rx_desc(struct altera_tse_private *priv,
+                      struct tse_buffer *rxbuffer)
 {
        queue_rx(priv, rxbuffer);
-       return sgdma_async_read(priv);
 }
 
 /* status is returned on upper 16 bits,
        unsigned int pktstatus = 0;
        struct tse_buffer *rxbuffer = NULL;
 
-       dma_sync_single_for_cpu(priv->device,
-                               priv->rxdescphys,
-                               priv->rxdescmem,
-                               DMA_BIDIRECTIONAL);
+       u32 sts = ioread32(&csr->status);
 
        desc = &base[0];
-       if ((ioread32(&csr->status) & SGDMA_STSREG_EOP) ||
-           (desc->status & SGDMA_STATUS_EOP)) {
+       if (sts & SGDMA_STSREG_EOP) {
+               dma_sync_single_for_cpu(priv->device,
+                                       priv->rxdescphys,
+                                       priv->sgdmadesclen,
+                                       DMA_FROM_DEVICE);
+
                pktlength = desc->bytes_xferred;
                pktstatus = desc->status & 0x3f;
                rxstatus = pktstatus;
                rxstatus = rxstatus << 16;
                rxstatus |= (pktlength & 0xffff);
 
-               desc->status = 0;
+               if (rxstatus) {
+                       desc->status = 0;
 
-               rxbuffer = dequeue_rx(priv);
-               if (rxbuffer == NULL)
-                       netdev_err(priv->dev,
-                                  "sgdma rx and rx queue empty!\n");
+                       rxbuffer = dequeue_rx(priv);
+                       if (rxbuffer == NULL)
+                               netdev_info(priv->dev,
+                                           "sgdma rx and rx queue empty!\n");
+
+                       /* Clear control */
+                       iowrite32(0, &csr->control);
+                       /* clear status */
+                       iowrite32(0xf, &csr->status);
 
-               /* kick the rx sgdma after reaping this descriptor */
+                       /* kick the rx sgdma after reaping this descriptor */
+                       pktsrx = sgdma_async_read(priv);
+
+               } else {
+                       /* If the SGDMA indicated an end of packet on recv,
+                        * then it's expected that the rxstatus from the
+                        * descriptor is non-zero - meaning a valid packet
+                        * with a nonzero length, or an error has been
+                        * indicated. if not, then all we can do is signal
+                        * an error and return no packet received. Most likely
+                        * there is a system design error, or an error in the
+                        * underlying kernel (cache or cache management problem)
+                        */
+                       netdev_err(priv->dev,
+                                  "SGDMA RX Error Info: %x, %x, %x\n",
+                                  sts, desc->status, rxstatus);
+               }
+       } else if (sts == 0) {
                pktsrx = sgdma_async_read(priv);
        }
 
        struct sgdma_descrip *cdesc = &descbase[0];
        struct sgdma_descrip *ndesc = &descbase[1];
 
-       unsigned int sts = ioread32(&csr->status);
        struct tse_buffer *rxbuffer = NULL;
 
        if (!sgdma_rxbusy(priv)) {
                rxbuffer = queue_rx_peekhead(priv);
-               if (rxbuffer == NULL)
+               if (rxbuffer == NULL) {
+                       netdev_err(priv->dev, "no rx buffers available\n");
                        return 0;
+               }
 
                sgdma_descrip(cdesc,            /* current descriptor */
                              ndesc,            /* next descriptor */
                              0,                /* read fixed: NA for rx dma */
                              0);               /* SOP: NA for rx DMA */
 
-               /* clear control and status */
-               iowrite32(0, &csr->control);
-
-               /* If status available, clear those bits */
-               if (sts & 0xf)
-                       iowrite32(0xf, &csr->status);
-
                dma_sync_single_for_device(priv->device,
                                           priv->rxdescphys,
-                                          priv->rxdescmem,
-                                          DMA_BIDIRECTIONAL);
+                                          priv->sgdmadesclen,
+                                          DMA_TO_DEVICE);
 
                iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
                          &csr->next_descrip);
        iowrite32(0x1f, &csr->status);
 
        dma_sync_single_for_device(priv->device, priv->txdescphys,
-                                  priv->txdescmem, DMA_TO_DEVICE);
+                                  priv->sgdmadesclen, DMA_TO_DEVICE);
 
        iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
                  &csr->next_descrip);
 
                dev_kfree_skb_any(rxbuffer->skb);
                return -EINVAL;
        }
+       rxbuffer->dma_addr &= (dma_addr_t)~3;
        rxbuffer->len = len;
        return 0;
 }
                priv->dev->stats.rx_bytes += pktlength;
 
                entry = next_entry;
+
+               tse_rx_refill(priv);
        }
 
-       tse_rx_refill(priv);
        return count;
 }
 
        struct altera_tse_private *priv;
        unsigned long int flags;
 
-
        if (unlikely(!dev)) {
                pr_err("%s: invalid dev pointer\n", __func__);
                return IRQ_NONE;
        /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
         * start address
         */
-       tse_clear_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
+       tse_set_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
        tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
                                         ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
 
        /* Set the MAC options */
        cmd = ioread32(&mac->command_config);
-       cmd |= MAC_CMDCFG_PAD_EN;       /* Padding Removal on Receive */
+       cmd &= ~MAC_CMDCFG_PAD_EN;      /* No padding Removal on Receive */
        cmd &= ~MAC_CMDCFG_CRC_FWD;     /* CRC Removal */
        cmd |= MAC_CMDCFG_RX_ERR_DISC;  /* Automatically discard frames
                                         * with CRC errors
        cmd |= MAC_CMDCFG_CNTL_FRM_ENA;
        cmd &= ~MAC_CMDCFG_TX_ENA;
        cmd &= ~MAC_CMDCFG_RX_ENA;
+
+       /* Default speed and duplex setting, full/100 */
+       cmd &= ~MAC_CMDCFG_HD_ENA;
+       cmd &= ~MAC_CMDCFG_ETH_SPEED;
+       cmd &= ~MAC_CMDCFG_ENA_10;
+
        iowrite32(cmd, &mac->command_config);
 
        if (netif_msg_hw(priv))
 
        spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
 
-       /* Start MAC Rx/Tx */
-       spin_lock(&priv->mac_cfg_lock);
-       tse_set_mac(priv, true);
-       spin_unlock(&priv->mac_cfg_lock);
-
        if (priv->phydev)
                phy_start(priv->phydev);
 
        napi_enable(&priv->napi);
        netif_start_queue(dev);
 
+       priv->dmaops->start_rxdma(priv);
+
+       /* Start MAC Rx/Tx */
+       spin_lock(&priv->mac_cfg_lock);
+       tse_set_mac(priv, true);
+       spin_unlock(&priv->mac_cfg_lock);
+
        return 0;
 
 tx_request_irq_error:
        .ndo_validate_addr      = eth_validate_addr,
 };
 
-
 static int request_and_map(struct platform_device *pdev, const char *name,
                           struct resource **res, void __iomem **ptr)
 {
        .get_rx_status = sgdma_rx_status,
        .init_dma = sgdma_initialize,
        .uninit_dma = sgdma_uninitialize,
+       .start_rxdma = sgdma_start_rxdma,
 };
 
 struct altera_dmaops altera_dtype_msgdma = {
        .get_rx_status = msgdma_rx_status,
        .init_dma = msgdma_initialize,
        .uninit_dma = msgdma_uninitialize,
+       .start_rxdma = msgdma_start_rxdma,
 };
 
 static struct of_device_id altera_tse_ids[] = {