cluster_start = curr = (gp->rx_new & ~(4 - 1));
        count = 0;
        kick = -1;
-       wmb();
+       dma_wmb();
        while (curr != limit) {
                curr = NEXT_RX(curr);
                if (++count == 4) {
                if (gem_intme(entry))
                        ctrl |= TXDCTRL_INTME;
                txd->buffer = cpu_to_le64(mapping);
-               wmb();
+               dma_wmb();
                txd->control_word = cpu_to_le64(ctrl);
                entry = NEXT_TX(entry);
        } else {
 
                        txd = &gp->init_block->txd[entry];
                        txd->buffer = cpu_to_le64(mapping);
-                       wmb();
+                       dma_wmb();
                        txd->control_word = cpu_to_le64(this_ctrl | len);
 
                        if (gem_intme(entry))
                }
                txd = &gp->init_block->txd[first_entry];
                txd->buffer = cpu_to_le64(first_mapping);
-               wmb();
+               dma_wmb();
                txd->control_word =
                        cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len);
        }
                        gp->rx_skbs[i] = NULL;
                }
                rxd->status_word = 0;
-               wmb();
+               dma_wmb();
                rxd->buffer = 0;
        }
 
                                        RX_BUF_ALLOC_SIZE(gp),
                                        PCI_DMA_FROMDEVICE);
                rxd->buffer = cpu_to_le64(dma_addr);
-               wmb();
+               dma_wmb();
                rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
                skb_reserve(skb, RX_OFFSET);
        }
                struct gem_txd *txd = &gb->txd[i];
 
                txd->control_word = 0;
-               wmb();
+               dma_wmb();
                txd->buffer = 0;
        }
        wmb();
 
 static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
 {
        rxd->rx_addr = (__force hme32)addr;
-       wmb();
+       dma_wmb();
        rxd->rx_flags = (__force hme32)flags;
 }
 
 static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
 {
        txd->tx_addr = (__force hme32)addr;
-       wmb();
+       dma_wmb();
        txd->tx_flags = (__force hme32)flags;
 }
 
 static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
 {
        rxd->rx_addr = (__force hme32)cpu_to_le32(addr);
-       wmb();
+       dma_wmb();
        rxd->rx_flags = (__force hme32)cpu_to_le32(flags);
 }
 
 static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
 {
        txd->tx_addr = (__force hme32)cpu_to_le32(addr);
-       wmb();
+       dma_wmb();
        txd->tx_flags = (__force hme32)cpu_to_le32(flags);
 }
 
        sbus_readl(__reg)
 #define hme_write_rxd(__hp, __rxd, __flags, __addr) \
 do {   (__rxd)->rx_addr = (__force hme32)(u32)(__addr); \
-       wmb(); \
+       dma_wmb(); \
        (__rxd)->rx_flags = (__force hme32)(u32)(__flags); \
 } while(0)
 #define hme_write_txd(__hp, __txd, __flags, __addr) \
 do {   (__txd)->tx_addr = (__force hme32)(u32)(__addr); \
-       wmb(); \
+       dma_wmb(); \
        (__txd)->tx_flags = (__force hme32)(u32)(__flags); \
 } while(0)
 #define hme_read_desc32(__hp, __p)     ((__force u32)(hme32)*(__p))
        readl(__reg)
 #define hme_write_rxd(__hp, __rxd, __flags, __addr) \
 do {   (__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \
-       wmb(); \
+       dma_wmb(); \
        (__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \
 } while(0)
 #define hme_write_txd(__hp, __txd, __flags, __addr) \
 do {   (__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \
-       wmb(); \
+       dma_wmb(); \
        (__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \
 } while(0)
 static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p)
 
        if (desc->hdr.state != VIO_DESC_READY)
                return 1;
 
-       rmb();
+       dma_rmb();
 
        viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
               desc->hdr.state, desc->hdr.ack,
        /* This has to be a non-SMP write barrier because we are writing
         * to memory which is shared with the peer LDOM.
         */
-       wmb();
+       dma_wmb();
 
        d->hdr.state = VIO_DESC_READY;
 
         * is marked READY, but start_cons was false.
         * If so, vnet_ack() should send out the missed "start" trigger.
         *
-        * Note that the wmb() above makes sure the cookies et al. are
+        * Note that the dma_wmb() above makes sure the cookies et al. are
         * not globally visible before the VIO_DESC_READY, and that the
         * stores are ordered correctly by the compiler. The consumer will
         * not proceed until the VIO_DESC_READY is visible assuring that