XFS_BUF_STALE(bp);
        XFS_BUF_CLR_IODONE_FUNC(bp);
        XFS_BUF_CLR_BDSTRAT_FUNC(bp);
-       if (!(fl & XFS_B_ASYNC)) {
+       if (!(fl & XBF_ASYNC)) {
                /*
                 * Mark b_error and B_ERROR _both_.
                 * Lot's of chunkcache code assumes that.
 
        ({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; })
 
 
-#define XFS_B_ASYNC            XBF_ASYNC
-#define XFS_B_DELWRI           XBF_DELWRI
-#define XFS_B_READ             XBF_READ
-#define XFS_B_WRITE            XBF_WRITE
-#define XFS_B_STALE            XBF_STALE
-
-#define XFS_BUF_TRYLOCK                XBF_TRYLOCK
-#define XFS_INCORE_TRYLOCK     XBF_TRYLOCK
-#define XFS_BUF_LOCK           XBF_LOCK
-#define XFS_BUF_MAPPED         XBF_MAPPED
-
-#define BUF_BUSY               XBF_DONT_BLOCK
-
 #define XFS_BUF_BFLAGS(bp)     ((bp)->b_flags)
 #define XFS_BUF_ZEROFLAGS(bp)  ((bp)->b_flags &= \
                ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED))
 
-#define XFS_BUF_STALE(bp)      ((bp)->b_flags |= XFS_B_STALE)
-#define XFS_BUF_UNSTALE(bp)    ((bp)->b_flags &= ~XFS_B_STALE)
-#define XFS_BUF_ISSTALE(bp)    ((bp)->b_flags & XFS_B_STALE)
+#define XFS_BUF_STALE(bp)      ((bp)->b_flags |= XBF_STALE)
+#define XFS_BUF_UNSTALE(bp)    ((bp)->b_flags &= ~XBF_STALE)
+#define XFS_BUF_ISSTALE(bp)    ((bp)->b_flags & XBF_STALE)
 #define XFS_BUF_SUPER_STALE(bp)        do {                            \
                                        XFS_BUF_STALE(bp);      \
                                        xfs_buf_delwri_dequeue(bp);     \
                                        XFS_BUF_DONE(bp);       \
                                } while (0)
 
-#define XFS_BUF_MANAGE         XBF_FS_MANAGED
 #define XFS_BUF_UNMANAGE(bp)   ((bp)->b_flags &= ~XBF_FS_MANAGED)
 
 #define XFS_BUF_DELAYWRITE(bp)         ((bp)->b_flags |= XBF_DELWRI)
 
 #define xfs_biomove(bp, off, len, data, rw) \
            xfs_buf_iomove((bp), (off), (len), (data), \
-               ((rw) == XFS_B_WRITE) ? XBRW_WRITE : XBRW_READ)
+               ((rw) == XBF_WRITE) ? XBRW_WRITE : XBRW_READ)
 
 #define xfs_biozero(bp, off, len) \
            xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
 
                xfs_iflags_clear(ip, XFS_ITRUNCATED);
                ret = -filemap_fdatawrite(mapping);
        }
-       if (flags & XFS_B_ASYNC)
+       if (flags & XBF_ASYNC)
                return ret;
        ret2 = xfs_wait_on_pages(ip, first, last);
        if (!ret)
 
        }
 
        error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ?
-                               0 : XFS_B_ASYNC, FI_NONE);
+                               0 : XBF_ASYNC, FI_NONE);
        xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 
  out_wait:
        if (flags & SYNC_TRYLOCK) {
                ASSERT(!(flags & SYNC_WAIT));
 
-               bp = xfs_getsb(mp, XFS_BUF_TRYLOCK);
+               bp = xfs_getsb(mp, XBF_TRYLOCK);
                if (!bp)
                        goto out;
 
 
         * the flush lock when the I/O completes.
         */
        bp = xfs_incore(dqp->q_mount->m_ddev_targp, dqp->q_blkno,
-                   XFS_QI_DQCHUNKLEN(dqp->q_mount),
-                   XFS_INCORE_TRYLOCK);
+                   XFS_QI_DQCHUNKLEN(dqp->q_mount), XBF_TRYLOCK);
        if (bp != NULL) {
                if (XFS_BUF_ISDELAYWRITE(bp)) {
                        int     error;
 
        }
        mp = dqp->q_mount;
        bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno,
-                   XFS_QI_DQCHUNKLEN(mp),
-                   XFS_INCORE_TRYLOCK);
+                   XFS_QI_DQCHUNKLEN(mp), XBF_TRYLOCK);
        if (bp != NULL) {
                if (XFS_BUF_ISDELAYWRITE(bp)) {
                        dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
 
        ASSERT(agno != NULLAGNUMBER);
 
        error = xfs_read_agf(mp, tp, agno,
-                       (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XFS_BUF_TRYLOCK : 0,
+                       (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
                        bpp);
        if (error)
                return error;
 
                        dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
                        blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
                        error = xfs_read_buf(mp, mp->m_ddev_targp, dblkno,
-                                            blkcnt,
-                                            XFS_BUF_LOCK | XBF_DONT_BLOCK,
+                                            blkcnt, XBF_LOCK | XBF_DONT_BLOCK,
                                             &bp);
                        if (error)
                                return(error);
 
                        tmp = (valuelen < XFS_BUF_SIZE(bp))
                                ? valuelen : XFS_BUF_SIZE(bp);
-                       xfs_biomove(bp, 0, tmp, dst, XFS_B_READ);
+                       xfs_biomove(bp, 0, tmp, dst, XBF_READ);
                        xfs_buf_relse(bp);
                        dst += tmp;
                        valuelen -= tmp;
                blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
 
                bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt,
-                                XFS_BUF_LOCK | XBF_DONT_BLOCK);
+                                XBF_LOCK | XBF_DONT_BLOCK);
                ASSERT(bp);
                ASSERT(!XFS_BUF_GETERROR(bp));
 
                tmp = (valuelen < XFS_BUF_SIZE(bp)) ? valuelen :
                                                        XFS_BUF_SIZE(bp);
-               xfs_biomove(bp, 0, tmp, src, XFS_B_WRITE);
+               xfs_biomove(bp, 0, tmp, src, XBF_WRITE);
                if (tmp < XFS_BUF_SIZE(bp))
                        xfs_biozero(bp, tmp, XFS_BUF_SIZE(bp) - tmp);
                if ((error = xfs_bwrite(mp, bp))) {/* GROT: NOTE: synchronous write */
                /*
                 * If the "remote" value is in the cache, remove it.
                 */
-               bp = xfs_incore(mp->m_ddev_targp, dblkno, blkcnt,
-                               XFS_INCORE_TRYLOCK);
+               bp = xfs_incore(mp->m_ddev_targp, dblkno, blkcnt, XBF_TRYLOCK);
                if (bp) {
                        XFS_BUF_STALE(bp);
                        XFS_BUF_UNDELAYWRITE(bp);
 
                                                map.br_blockcount);
                        bp = xfs_trans_get_buf(*trans,
                                        dp->i_mount->m_ddev_targp,
-                                       dblkno, dblkcnt, XFS_BUF_LOCK);
+                                       dblkno, dblkcnt, XBF_LOCK);
                        xfs_trans_binval(*trans, bp);
                        /*
                         * Roll to next transaction.
 
        xfs_daddr_t             d;
 
        /* need to sort out how callers deal with failures first */
-       ASSERT(!(flags & XFS_BUF_TRYLOCK));
+       ASSERT(!(flags & XBF_TRYLOCK));
 
        d = xfs_btree_ptr_to_daddr(cur, ptr);
        *bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d,
        int                     error;
 
        /* need to sort out how callers deal with failures first */
-       ASSERT(!(flags & XFS_BUF_TRYLOCK));
+       ASSERT(!(flags & XBF_TRYLOCK));
 
        d = xfs_btree_ptr_to_daddr(cur, ptr);
        error = xfs_trans_read_buf(mp, cur->bc_tp, mp->m_ddev_targp, d,
 
                d = XFS_AGB_TO_DADDR(mp, agno, agbno + (j * blks_per_cluster));
                fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
                                         mp->m_bsize * blks_per_cluster,
-                                        XFS_BUF_LOCK);
+                                        XBF_LOCK);
                ASSERT(fbuf);
                ASSERT(!XFS_BUF_GETERROR(fbuf));
 
 
                                "an error %d on %s.  Returning error.",
                                error, mp->m_fsname);
                } else {
-                       ASSERT(buf_flags & XFS_BUF_TRYLOCK);
+                       ASSERT(buf_flags & XBF_TRYLOCK);
                }
                return error;
        }
        if (error)
                return error;
 
-       error = xfs_imap_to_bp(mp, tp, &imap, &bp, XFS_BUF_LOCK, imap_flags);
+       error = xfs_imap_to_bp(mp, tp, &imap, &bp, XBF_LOCK, imap_flags);
        if (error)
                return error;
 
                return error;
 
        if (!bp) {
-               ASSERT(buf_flags & XFS_BUF_TRYLOCK);
+               ASSERT(buf_flags & XBF_TRYLOCK);
                ASSERT(tp == NULL);
                *bpp = NULL;
                return EAGAIN;
         * Get pointers to the on-disk inode and the buffer containing it.
         */
        error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp,
-                              XFS_BUF_LOCK, iget_flags);
+                              XBF_LOCK, iget_flags);
        if (error)
                return error;
        dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
                 * Here we put the head pointer into our next pointer,
                 * and then we fall through to point the head at us.
                 */
-               error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK);
+               error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
                if (error)
                        return error;
 
                 * of dealing with the buffer when there is no need to
                 * change it.
                 */
-               error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK);
+               error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
                if (error) {
                        cmn_err(CE_WARN,
                                "xfs_iunlink_remove: xfs_itobp()  returned an error %d on %s.  Returning error.",
                 * Now last_ibp points to the buffer previous to us on
                 * the unlinked list.  Pull us from the list.
                 */
-               error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK);
+               error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
                if (error) {
                        cmn_err(CE_WARN,
                                "xfs_iunlink_remove: xfs_itobp()  returned an error %d on %s.  Returning error.",
 
                bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, 
                                        mp->m_bsize * blks_per_cluster,
-                                       XFS_BUF_LOCK);
+                                       XBF_LOCK);
 
                pre_flushed = 0;
                lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
 
        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 
-       error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XFS_BUF_LOCK);
+       error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XBF_LOCK);
        if (error)
                return error;
 
         * Get the buffer containing the on-disk inode.
         */
        error = xfs_itobp(mp, NULL, ip, &dip, &bp,
-                               noblock ? XFS_BUF_TRYLOCK : XFS_BUF_LOCK);
+                               noblock ? XBF_TRYLOCK : XBF_LOCK);
        if (error || !bp) {
                xfs_ifunlock(ip);
                return error;
 
 
        mp = ip->i_mount;
        bp = xfs_incore(mp->m_ddev_targp, iip->ili_format.ilf_blkno,
-                   iip->ili_format.ilf_len, XFS_INCORE_TRYLOCK);
+                   iip->ili_format.ilf_len, XBF_TRYLOCK);
 
        if (bp != NULL) {
                if (XFS_BUF_ISDELAYWRITE(bp)) {
 
        }
 
        mp = log->l_mp;
-       buf_flags = XFS_BUF_LOCK;
+       buf_flags = XBF_LOCK;
        if (!(flags & XFS_BLI_INODE_BUF))
-               buf_flags |= XFS_BUF_MAPPED;
+               buf_flags |= XBF_MAPPED;
 
        bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, buf_flags);
        if (XFS_BUF_ISERROR(bp)) {
        }
 
        bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len,
-                         XFS_BUF_LOCK);
+                         XBF_LOCK);
        if (XFS_BUF_ISERROR(bp)) {
                xfs_ioerror_alert("xlog_recover_do..(read#2)", mp,
                                  bp, in_f->ilf_blkno);
        /*
         * Get the on disk inode to find the next inode in the bucket.
         */
-       error = xfs_itobp(mp, NULL, ip, &dip, &ibp, XFS_BUF_LOCK);
+       error = xfs_itobp(mp, NULL, ip, &dip, &ibp, XBF_LOCK);
        if (error)
                goto fail_iput;
 
 
         * access to the superblock.
         */
        sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
-       extra_flags = XFS_BUF_LOCK | XFS_BUF_MANAGE | XFS_BUF_MAPPED;
+       extra_flags = XBF_LOCK | XBF_FS_MANAGED | XBF_MAPPED;
 
        bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR, BTOBB(sector_size),
                          extra_flags);
 
        ASSERT(mp->m_sb_bp != NULL);
        bp = mp->m_sb_bp;
-       if (flags & XFS_BUF_TRYLOCK) {
+       if (flags & XBF_TRYLOCK) {
                if (!XFS_BUF_CPSEMA(bp)) {
                        return NULL;
                }
 
        xfs_buf_log_item_t      *bip;
 
        if (flags == 0)
-               flags = XFS_BUF_LOCK | XFS_BUF_MAPPED;
+               flags = XBF_LOCK | XBF_MAPPED;
 
        /*
         * Default to a normal get_buf() call if the tp is NULL.
         */
        if (tp == NULL)
-               return xfs_buf_get(target_dev, blkno, len, flags | BUF_BUSY);
+               return xfs_buf_get(target_dev, blkno, len,
+                                  flags | XBF_DONT_BLOCK);
 
        /*
         * If we find the buffer in the cache with this transaction
        }
 
        /*
-        * We always specify the BUF_BUSY flag within a transaction so
-        * that get_buf does not try to push out a delayed write buffer
+        * We always specify the XBF_DONT_BLOCK flag within a transaction
+        * so that get_buf does not try to push out a delayed write buffer
         * which might cause another transaction to take place (if the
         * buffer was delayed alloc).  Such recursive transactions can
         * easily deadlock with our current transaction as well as cause
         * us to run out of stack space.
         */
-       bp = xfs_buf_get(target_dev, blkno, len, flags | BUF_BUSY);
+       bp = xfs_buf_get(target_dev, blkno, len, flags | XBF_DONT_BLOCK);
        if (bp == NULL) {
                return NULL;
        }
        int                     error;
 
        if (flags == 0)
-               flags = XFS_BUF_LOCK | XFS_BUF_MAPPED;
+               flags = XBF_LOCK | XBF_MAPPED;
 
        /*
         * Default to a normal get_buf() call if the tp is NULL.
         */
        if (tp == NULL) {
-               bp = xfs_buf_read(target, blkno, len, flags | BUF_BUSY);
+               bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK);
                if (!bp)
-                       return (flags & XFS_BUF_TRYLOCK) ?
+                       return (flags & XBF_TRYLOCK) ?
                                        EAGAIN : XFS_ERROR(ENOMEM);
 
                if (XFS_BUF_GETERROR(bp) != 0) {
        }
 
        /*
-        * We always specify the BUF_BUSY flag within a transaction so
-        * that get_buf does not try to push out a delayed write buffer
+        * We always specify the XBF_DONT_BLOCK flag within a transaction
+        * so that get_buf does not try to push out a delayed write buffer
         * which might cause another transaction to take place (if the
         * buffer was delayed alloc).  Such recursive transactions can
         * easily deadlock with our current transaction as well as cause
         * us to run out of stack space.
         */
-       bp = xfs_buf_read(target, blkno, len, flags | BUF_BUSY);
+       bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK);
        if (bp == NULL) {
                *bpp = NULL;
                return 0;
        if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp))
                cmn_err(CE_NOTE, "about to pop assert, bp == 0x%p", bp);
 #endif
-       ASSERT((XFS_BUF_BFLAGS(bp) & (XFS_B_STALE|XFS_B_DELWRI)) !=
-                                               (XFS_B_STALE|XFS_B_DELWRI));
+       ASSERT((XFS_BUF_BFLAGS(bp) & (XBF_STALE|XBF_DELWRI)) !=
+                                    (XBF_STALE|XBF_DELWRI));
 
        trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
        xfs_buf_relse(bp);
 
                    iattr->ia_size > ip->i_d.di_size) {
                        code = xfs_flush_pages(ip,
                                        ip->i_d.di_size, iattr->ia_size,
-                                       XFS_B_ASYNC, FI_NONE);
+                                       XBF_ASYNC, FI_NONE);
                }
 
                /* wait for all I/O to complete */
                 */
                truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
                if (truncated && VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0)
-                       xfs_flush_pages(ip, 0, -1, XFS_B_ASYNC, FI_NONE);
+                       xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE);
        }
 
        if (ip->i_d.di_nlink != 0) {