* but it is much faster.
         */
        xfs_buf_item_zone = kmem_zone_init((sizeof(xfs_buf_log_item_t) +
-                               (((XFS_MAX_BLOCKSIZE / XFS_BLI_CHUNK) /
+                               (((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) /
                                  NBWORD) * sizeof(int))), "xfs_buf_item");
        if (!xfs_buf_item_zone)
                goto out_destroy_trans_zone;
 
        for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++)
                xfs_qm_dqinit_core(curid, type, d);
        xfs_trans_dquot_buf(tp, bp,
-                           (type & XFS_DQ_USER ? XFS_BLI_UDQUOT_BUF :
-                           ((type & XFS_DQ_PROJ) ? XFS_BLI_PDQUOT_BUF :
-                            XFS_BLI_GDQUOT_BUF)));
+                           (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
+                           ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
+                            XFS_BLF_GDQUOT_BUF)));
        xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
 }
 
 
        nbytes = last - first + 1;
        bfset(bip->bli_logged, first, nbytes);
        for (x = 0; x < nbytes; x++) {
-               chunk_num = byte >> XFS_BLI_SHIFT;
+               chunk_num = byte >> XFS_BLF_SHIFT;
                word_num = chunk_num >> BIT_TO_WORD_SHIFT;
                bit_num = chunk_num & (NBWORD - 1);
                wordp = &(bip->bli_format.blf_data_map[word_num]);
                 * cancel flag in it.
                 */
                trace_xfs_buf_item_size_stale(bip);
-               ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
+               ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
                return 1;
        }
 
                } else if (next_bit != last_bit + 1) {
                        last_bit = next_bit;
                        nvecs++;
-               } else if (xfs_buf_offset(bp, next_bit * XFS_BLI_CHUNK) !=
-                          (xfs_buf_offset(bp, last_bit * XFS_BLI_CHUNK) +
-                           XFS_BLI_CHUNK)) {
+               } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
+                          (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
+                           XFS_BLF_CHUNK)) {
                        last_bit = next_bit;
                        nvecs++;
                } else {
                 * cancel flag in it.
                 */
                trace_xfs_buf_item_format_stale(bip);
-               ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
+               ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
                bip->bli_format.blf_size = nvecs;
                return;
        }
                 * keep counting and scanning.
                 */
                if (next_bit == -1) {
-                       buffer_offset = first_bit * XFS_BLI_CHUNK;
+                       buffer_offset = first_bit * XFS_BLF_CHUNK;
                        vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
-                       vecp->i_len = nbits * XFS_BLI_CHUNK;
+                       vecp->i_len = nbits * XFS_BLF_CHUNK;
                        vecp->i_type = XLOG_REG_TYPE_BCHUNK;
                        nvecs++;
                        break;
                } else if (next_bit != last_bit + 1) {
-                       buffer_offset = first_bit * XFS_BLI_CHUNK;
+                       buffer_offset = first_bit * XFS_BLF_CHUNK;
                        vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
-                       vecp->i_len = nbits * XFS_BLI_CHUNK;
+                       vecp->i_len = nbits * XFS_BLF_CHUNK;
                        vecp->i_type = XLOG_REG_TYPE_BCHUNK;
                        nvecs++;
                        vecp++;
                        first_bit = next_bit;
                        last_bit = next_bit;
                        nbits = 1;
-               } else if (xfs_buf_offset(bp, next_bit << XFS_BLI_SHIFT) !=
-                          (xfs_buf_offset(bp, last_bit << XFS_BLI_SHIFT) +
-                           XFS_BLI_CHUNK)) {
-                       buffer_offset = first_bit * XFS_BLI_CHUNK;
+               } else if (xfs_buf_offset(bp, next_bit << XFS_BLF_SHIFT) !=
+                          (xfs_buf_offset(bp, last_bit << XFS_BLF_SHIFT) +
+                           XFS_BLF_CHUNK)) {
+                       buffer_offset = first_bit * XFS_BLF_CHUNK;
                        vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
-                       vecp->i_len = nbits * XFS_BLI_CHUNK;
+                       vecp->i_len = nbits * XFS_BLF_CHUNK;
                        vecp->i_type = XLOG_REG_TYPE_BCHUNK;
 /* You would think we need to bump the nvecs here too, but we do not
  * this number is used by recovery, and it gets confused by the boundary
                ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
                ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
                ASSERT(XFS_BUF_ISSTALE(bp));
-               ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
+               ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
                trace_xfs_buf_item_unpin_stale(bip);
 
                /*
         */
        if (bip->bli_flags & XFS_BLI_STALE) {
                trace_xfs_buf_item_unlock_stale(bip);
-               ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
+               ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
                if (!aborted) {
                        atomic_dec(&bip->bli_refcount);
                        return;
        }
 
        /*
-        * chunks is the number of XFS_BLI_CHUNK size pieces
+        * chunks is the number of XFS_BLF_CHUNK size pieces
         * the buffer can be divided into. Make sure not to
         * truncate any pieces.  map_size is the size of the
         * bitmap needed to describe the chunks of the buffer.
         */
-       chunks = (int)((XFS_BUF_COUNT(bp) + (XFS_BLI_CHUNK - 1)) >> XFS_BLI_SHIFT);
+       chunks = (int)((XFS_BUF_COUNT(bp) + (XFS_BLF_CHUNK - 1)) >> XFS_BLF_SHIFT);
        map_size = (int)((chunks + NBWORD) >> BIT_TO_WORD_SHIFT);
 
        bip = (xfs_buf_log_item_t*)kmem_zone_zalloc(xfs_buf_item_zone,
        /*
         * Convert byte offsets to bit numbers.
         */
-       first_bit = first >> XFS_BLI_SHIFT;
-       last_bit = last >> XFS_BLI_SHIFT;
+       first_bit = first >> XFS_BLF_SHIFT;
+       last_bit = last >> XFS_BLF_SHIFT;
 
        /*
         * Calculate the total number of bits to be set.
 
  * This flag indicates that the buffer contains on disk inodes
  * and requires special recovery handling.
  */
-#define        XFS_BLI_INODE_BUF       0x1
+#define        XFS_BLF_INODE_BUF       0x1
 /*
  * This flag indicates that the buffer should not be replayed
  * during recovery because its blocks are being freed.
  */
-#define        XFS_BLI_CANCEL          0x2
+#define        XFS_BLF_CANCEL          0x2
 /*
  * This flag indicates that the buffer contains on disk
  * user or group dquots and may require special recovery handling.
  */
-#define        XFS_BLI_UDQUOT_BUF      0x4
-#define XFS_BLI_PDQUOT_BUF     0x8
-#define        XFS_BLI_GDQUOT_BUF      0x10
+#define        XFS_BLF_UDQUOT_BUF      0x4
+#define XFS_BLF_PDQUOT_BUF     0x8
+#define        XFS_BLF_GDQUOT_BUF      0x10
 
-#define        XFS_BLI_CHUNK           128
-#define        XFS_BLI_SHIFT           7
+#define        XFS_BLF_CHUNK           128
+#define        XFS_BLF_SHIFT           7
 #define        BIT_TO_WORD_SHIFT       5
 #define        NBWORD                  (NBBY * sizeof(unsigned int))
 
 
 
                switch (ITEM_TYPE(item)) {
                case XFS_LI_BUF:
-                       if (!(buf_f->blf_flags & XFS_BLI_CANCEL)) {
+                       if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
                                trace_xfs_log_recover_item_reorder_head(log,
                                                        trans, item, pass);
                                list_move(&item->ri_list, &trans->r_itemq);
        /*
         * If this isn't a cancel buffer item, then just return.
         */
-       if (!(flags & XFS_BLI_CANCEL)) {
+       if (!(flags & XFS_BLF_CANCEL)) {
                trace_xfs_log_recover_buf_not_cancel(log, buf_f);
                return;
        }
  * Check to see whether the buffer being recovered has a corresponding
  * entry in the buffer cancel record table.  If it does then return 1
  * so that it will be cancelled, otherwise return 0.  If the buffer is
- * actually a buffer cancel item (XFS_BLI_CANCEL is set), then decrement
+ * actually a buffer cancel item (XFS_BLF_CANCEL is set), then decrement
  * the refcount on the entry in the table and remove it from the table
  * if this is the last reference.
  *
                 * There is nothing in the table built in pass one,
                 * so this buffer must not be cancelled.
                 */
-               ASSERT(!(flags & XFS_BLI_CANCEL));
+               ASSERT(!(flags & XFS_BLF_CANCEL));
                return 0;
        }
 
                 * There is no corresponding entry in the table built
                 * in pass one, so this buffer has not been cancelled.
                 */
-               ASSERT(!(flags & XFS_BLI_CANCEL));
+               ASSERT(!(flags & XFS_BLF_CANCEL));
                return 0;
        }
 
                         * one in the table and remove it if this is the
                         * last reference.
                         */
-                       if (flags & XFS_BLI_CANCEL) {
+                       if (flags & XFS_BLF_CANCEL) {
                                bcp->bc_refcount--;
                                if (bcp->bc_refcount == 0) {
                                        if (prevp == NULL) {
         * We didn't find a corresponding entry in the table, so
         * return 0 so that the buffer is NOT cancelled.
         */
-       ASSERT(!(flags & XFS_BLI_CANCEL));
+       ASSERT(!(flags & XFS_BLF_CANCEL));
        return 0;
 }
 
                        nbits = xfs_contig_bits(data_map, map_size,
                                                         bit);
                        ASSERT(nbits > 0);
-                       reg_buf_offset = bit << XFS_BLI_SHIFT;
-                       reg_buf_bytes = nbits << XFS_BLI_SHIFT;
+                       reg_buf_offset = bit << XFS_BLF_SHIFT;
+                       reg_buf_bytes = nbits << XFS_BLF_SHIFT;
                        item_index++;
                }
 
                }
 
                ASSERT(item->ri_buf[item_index].i_addr != NULL);
-               ASSERT((item->ri_buf[item_index].i_len % XFS_BLI_CHUNK) == 0);
+               ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
                ASSERT((reg_buf_offset + reg_buf_bytes) <= XFS_BUF_COUNT(bp));
 
                /*
                nbits = xfs_contig_bits(data_map, map_size, bit);
                ASSERT(nbits > 0);
                ASSERT(item->ri_buf[i].i_addr != NULL);
-               ASSERT(item->ri_buf[i].i_len % XFS_BLI_CHUNK == 0);
+               ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
                ASSERT(XFS_BUF_COUNT(bp) >=
-                      ((uint)bit << XFS_BLI_SHIFT)+(nbits<<XFS_BLI_SHIFT));
+                      ((uint)bit << XFS_BLF_SHIFT)+(nbits<<XFS_BLF_SHIFT));
 
                /*
                 * Do a sanity check if this is a dquot buffer. Just checking
                 */
                error = 0;
                if (buf_f->blf_flags &
-                  (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
+                  (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
                        if (item->ri_buf[i].i_addr == NULL) {
                                cmn_err(CE_ALERT,
                                        "XFS: NULL dquot in %s.", __func__);
                }
 
                memcpy(xfs_buf_offset(bp,
-                       (uint)bit << XFS_BLI_SHIFT),    /* dest */
+                       (uint)bit << XFS_BLF_SHIFT),    /* dest */
                        item->ri_buf[i].i_addr,         /* source */
-                       nbits<<XFS_BLI_SHIFT);          /* length */
+                       nbits<<XFS_BLF_SHIFT);          /* length */
  next:
                i++;
                bit += nbits;
        }
 
        type = 0;
-       if (buf_f->blf_flags & XFS_BLI_UDQUOT_BUF)
+       if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
                type |= XFS_DQ_USER;
-       if (buf_f->blf_flags & XFS_BLI_PDQUOT_BUF)
+       if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
                type |= XFS_DQ_PROJ;
-       if (buf_f->blf_flags & XFS_BLI_GDQUOT_BUF)
+       if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
                type |= XFS_DQ_GROUP;
        /*
         * This type of quotas was turned off, so ignore this buffer
  * here which overlaps that may be stale.
  *
  * When meta-data buffers are freed at run time we log a buffer item
- * with the XFS_BLI_CANCEL bit set to indicate that previous copies
+ * with the XFS_BLF_CANCEL bit set to indicate that previous copies
  * of the buffer in the log should not be replayed at recovery time.
  * This is so that if the blocks covered by the buffer are reused for
  * file data before we crash we don't end up replaying old, freed
        if (pass == XLOG_RECOVER_PASS1) {
                /*
                 * In this pass we're only looking for buf items
-                * with the XFS_BLI_CANCEL bit set.
+                * with the XFS_BLF_CANCEL bit set.
                 */
                xlog_recover_do_buffer_pass1(log, buf_f);
                return 0;
 
        mp = log->l_mp;
        buf_flags = XBF_LOCK;
-       if (!(flags & XFS_BLI_INODE_BUF))
+       if (!(flags & XFS_BLF_INODE_BUF))
                buf_flags |= XBF_MAPPED;
 
        bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, buf_flags);
        }
 
        error = 0;
-       if (flags & XFS_BLI_INODE_BUF) {
+       if (flags & XFS_BLF_INODE_BUF) {
                error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
        } else if (flags &
-                 (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
+                 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
                xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
        } else {
                xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
 
 #define XLOG_RHASH(tid)        \
        ((((__uint32_t)tid)>>XLOG_RHASH_SHIFT) & (XLOG_RHASH_SIZE-1))
 
-#define XLOG_MAX_REGIONS_IN_ITEM   (XFS_MAX_BLOCKSIZE / XFS_BLI_CHUNK / 2 + 1)
+#define XLOG_MAX_REGIONS_IN_ITEM   (XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK / 2 + 1)
 
 
 /*
 
        xfs_buf_item_init(bp, tp->t_mountp);
        bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
        ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
-       ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
+       ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
        ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
        if (reset_recur)
                bip->bli_recur = 0;
        bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
        ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
        ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
-       ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
+       ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 
        /*
 
        bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
        ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
-       ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
+       ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
        ASSERT(atomic_read(&bip->bli_refcount) > 0);
        bip->bli_flags |= XFS_BLI_HOLD;
        trace_xfs_trans_bhold(bip);
 
        bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
        ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
-       ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
+       ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
        ASSERT(atomic_read(&bip->bli_refcount) > 0);
        ASSERT(bip->bli_flags & XFS_BLI_HOLD);
        bip->bli_flags &= ~XFS_BLI_HOLD;
                bip->bli_flags &= ~XFS_BLI_STALE;
                ASSERT(XFS_BUF_ISSTALE(bp));
                XFS_BUF_UNSTALE(bp);
-               bip->bli_format.blf_flags &= ~XFS_BLI_CANCEL;
+               bip->bli_format.blf_flags &= ~XFS_BLF_CANCEL;
        }
 
        lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
                ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
                ASSERT(XFS_BUF_ISSTALE(bp));
                ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
-               ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_INODE_BUF));
-               ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
+               ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_INODE_BUF));
+               ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
                ASSERT(lidp->lid_flags & XFS_LID_DIRTY);
                ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
                return;
         * in the buf log item.  The STALE flag will be used in
         * xfs_buf_item_unpin() to determine if it should clean up
         * when the last reference to the buf item is given up.
-        * We set the XFS_BLI_CANCEL flag in the buf log format structure
+        * We set the XFS_BLF_CANCEL flag in the buf log format structure
         * and log the buf item.  This will be used at recovery time
         * to determine that copies of the buffer in the log before
         * this should not be replayed.
        XFS_BUF_STALE(bp);
        bip->bli_flags |= XFS_BLI_STALE;
        bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_DIRTY);
-       bip->bli_format.blf_flags &= ~XFS_BLI_INODE_BUF;
-       bip->bli_format.blf_flags |= XFS_BLI_CANCEL;
+       bip->bli_format.blf_flags &= ~XFS_BLF_INODE_BUF;
+       bip->bli_format.blf_flags |= XFS_BLF_CANCEL;
        memset((char *)(bip->bli_format.blf_data_map), 0,
              (bip->bli_format.blf_map_size * sizeof(uint)));
        lidp->lid_flags |= XFS_LID_DIRTY;
        bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 
-       bip->bli_format.blf_flags |= XFS_BLI_INODE_BUF;
+       bip->bli_format.blf_flags |= XFS_BLF_INODE_BUF;
 }
 
 /*
        ASSERT(XFS_BUF_ISBUSY(bp));
        ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
        ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
-       ASSERT(type == XFS_BLI_UDQUOT_BUF ||
-              type == XFS_BLI_PDQUOT_BUF ||
-              type == XFS_BLI_GDQUOT_BUF);
+       ASSERT(type == XFS_BLF_UDQUOT_BUF ||
+              type == XFS_BLF_PDQUOT_BUF ||
+              type == XFS_BLF_GDQUOT_BUF);
 
        bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
        ASSERT(atomic_read(&bip->bli_refcount) > 0);