struct ath_buf *bf;
        int error = 0;
 
-       do {
-               spin_lock_init(&sc->rx.rxflushlock);
-               sc->sc_flags &= ~SC_OP_RXFLUSH;
-               spin_lock_init(&sc->rx.rxbuflock);
-
-               sc->rx.bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
-                                          min(sc->cachelsz,
-                                              (u16)64));
+       spin_lock_init(&sc->rx.rxflushlock);
+       sc->sc_flags &= ~SC_OP_RXFLUSH;
+       spin_lock_init(&sc->rx.rxbuflock);
 
-               DPRINTF(sc, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
-                       sc->cachelsz, sc->rx.bufsize);
+       sc->rx.bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
+                                min(sc->cachelsz, (u16)64));
 
-               /* Initialize rx descriptors */
+       DPRINTF(sc, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
+               sc->cachelsz, sc->rx.bufsize);
 
-               error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
-                                         "rx", nbufs, 1);
-               if (error != 0) {
-                       DPRINTF(sc, ATH_DBG_FATAL,
-                               "failed to allocate rx descriptors: %d\n", error);
-                       break;
-               }
+       /* Initialize rx descriptors */
 
-               list_for_each_entry(bf, &sc->rx.rxbuf, list) {
-                       skb = ath_rxbuf_alloc(sc, sc->rx.bufsize, GFP_KERNEL);
-                       if (skb == NULL) {
-                               error = -ENOMEM;
-                               break;
-                       }
+       error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
+                                 "rx", nbufs, 1);
+       if (error != 0) {
+               DPRINTF(sc, ATH_DBG_FATAL,
+                       "failed to allocate rx descriptors: %d\n", error);
+               goto err;
+       }
 
-                       bf->bf_mpdu = skb;
-                       bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
-                                                        sc->rx.bufsize,
-                                                        DMA_FROM_DEVICE);
-                       if (unlikely(dma_mapping_error(sc->dev,
-                                 bf->bf_buf_addr))) {
-                               dev_kfree_skb_any(skb);
-                               bf->bf_mpdu = NULL;
-                               DPRINTF(sc, ATH_DBG_FATAL,
-                                       "dma_mapping_error() on RX init\n");
-                               error = -ENOMEM;
-                               break;
-                       }
-                       bf->bf_dmacontext = bf->bf_buf_addr;
+       list_for_each_entry(bf, &sc->rx.rxbuf, list) {
+               skb = ath_rxbuf_alloc(sc, sc->rx.bufsize, GFP_KERNEL);
+               if (skb == NULL) {
+                       error = -ENOMEM;
+                       goto err;
                }
-               sc->rx.rxlink = NULL;
 
-       } while (0);
+               bf->bf_mpdu = skb;
+               bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
+                                                sc->rx.bufsize,
+                                                DMA_FROM_DEVICE);
+               if (unlikely(dma_mapping_error(sc->dev,
+                                              bf->bf_buf_addr))) {
+                       dev_kfree_skb_any(skb);
+                       bf->bf_mpdu = NULL;
+                       DPRINTF(sc, ATH_DBG_FATAL,
+                               "dma_mapping_error() on RX init\n");
+                       error = -ENOMEM;
+                       goto err;
+               }
+               bf->bf_dmacontext = bf->bf_buf_addr;
+       }
+       sc->rx.rxlink = NULL;
 
+err:
        if (error)
                ath_rx_cleanup(sc);
 
        list_for_each_entry(bf, &sc->rx.rxbuf, list) {
                skb = bf->bf_mpdu;
                if (skb) {
-                       dma_unmap_single(sc->dev,
-                                        bf->bf_buf_addr,
-                                        sc->rx.bufsize,
-                                        DMA_FROM_DEVICE);
+                       dma_unmap_single(sc->dev, bf->bf_buf_addr,
+                                        sc->rx.bufsize, DMA_FROM_DEVICE);
                        dev_kfree_skb(skb);
                }
        }
 
 {
        int error = 0;
 
-       do {
-               spin_lock_init(&sc->tx.txbuflock);
+       spin_lock_init(&sc->tx.txbuflock);
 
-               error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
-                       "tx", nbufs, 1);
-               if (error != 0) {
-                       DPRINTF(sc, ATH_DBG_FATAL,
-                               "Failed to allocate tx descriptors: %d\n",
-                               error);
-                       break;
-               }
-
-               error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
-                                         "beacon", ATH_BCBUF, 1);
-               if (error != 0) {
-                       DPRINTF(sc, ATH_DBG_FATAL,
-                               "Failed to allocate beacon descriptors: %d\n",
-                               error);
-                       break;
-               }
+       error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
+                                 "tx", nbufs, 1);
+       if (error != 0) {
+               DPRINTF(sc, ATH_DBG_FATAL,
+                       "Failed to allocate tx descriptors: %d\n", error);
+               goto err;
+       }
 
-       } while (0);
+       error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
+                                 "beacon", ATH_BCBUF, 1);
+       if (error != 0) {
+               DPRINTF(sc, ATH_DBG_FATAL,
+                       "Failed to allocate beacon descriptors: %d\n", error);
+               goto err;
+       }
 
+err:
        if (error != 0)
                ath_tx_cleanup(sc);
 
        return error;
 }
 
-int ath_tx_cleanup(struct ath_softc *sc)
+void ath_tx_cleanup(struct ath_softc *sc)
 {
        if (sc->beacon.bdma.dd_desc_len != 0)
                ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
 
        if (sc->tx.txdma.dd_desc_len != 0)
                ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
-
-       return 0;
 }
 
 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)