rt2x00_set_field32(®, CSR14_BEACON_GEN, 0);
        rt2x00pci_register_write(rt2x00dev, CSR14, reg);
 
-       rt2x00queue_map_txskb(entry);
-
+       if (rt2x00queue_map_txskb(entry)) {
+               ERROR(rt2x00dev, "Fail to map beacon, aborting\n");
+               goto out;
+       }
+       /*
+        * Enable beaconing again.
+        */
+       rt2x00_set_field32(®, CSR14_BEACON_GEN, 1);
        /*
         * Write the TX descriptor for the beacon.
         */
         * Dump beacon to userspace through debugfs.
         */
        rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
-
+out:
        /*
         * Enable beaconing again.
         */
 
        rt2x00_set_field32(®, CSR14_BEACON_GEN, 0);
        rt2x00pci_register_write(rt2x00dev, CSR14, reg);
 
-       rt2x00queue_map_txskb(entry);
+       if (rt2x00queue_map_txskb(entry)) {
+               ERROR(rt2x00dev, "Fail to map beacon, aborting\n");
+               goto out;
+       }
 
        /*
         * Write the TX descriptor for the beacon.
         * Dump beacon to userspace through debugfs.
         */
        rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
-
+out:
        /*
         * Enable beaconing again.
         */
 
 /**
  * rt2x00queue_map_txskb - Map a skb into DMA for TX purposes.
  * @entry: Pointer to &struct queue_entry
+ *
+ * Returns -ENOMEM if mapping fail, 0 otherwise.
  */
-void rt2x00queue_map_txskb(struct queue_entry *entry);
+int rt2x00queue_map_txskb(struct queue_entry *entry);
 
 /**
  * rt2x00queue_unmap_skb - Unmap a skb from DMA.
 
        skbdesc->entry = entry;
 
        if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
-               skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
-                                                 skb->data,
-                                                 skb->len,
-                                                 DMA_FROM_DEVICE);
+               dma_addr_t skb_dma;
+
+               skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
+                                        DMA_FROM_DEVICE);
+               if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) {
+                       dev_kfree_skb_any(skb);
+                       return NULL;
+               }
+
+               skbdesc->skb_dma = skb_dma;
                skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
        }
 
        return skb;
 }
 
-void rt2x00queue_map_txskb(struct queue_entry *entry)
+int rt2x00queue_map_txskb(struct queue_entry *entry)
 {
        struct device *dev = entry->queue->rt2x00dev->dev;
        struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
 
        skbdesc->skb_dma =
            dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
+
+       if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma)))
+               return -ENOMEM;
+
        skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
 
        /*
         * Map the skb to DMA.
         */
-       if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags))
-               rt2x00queue_map_txskb(entry);
+       if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags) &&
+           rt2x00queue_map_txskb(entry))
+               return -ENOMEM;
 
        return 0;
 }