ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
 
-       cur = xfs_btree_alloc_cursor(mp, tp, btnum);
+       cur = xfs_btree_alloc_cursor(mp, tp, btnum, mp->m_ag_maxlevels);
        cur->bc_ag.abt.active = false;
 
        if (btnum == XFS_BTNUM_CNT) {
 
        struct xfs_btree_cur    *cur;
        ASSERT(whichfork != XFS_COW_FORK);
 
-       cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP);
+       cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP,
+                       mp->m_bm_maxlevels[whichfork]);
        cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
        cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);
 
 
 
 #define        XFS_BTREE_MAXLEVELS     9       /* max of all btrees */
 
+/*
+ * The btree cursor zone hands out cursors that can handle up to this many
+ * levels.  This is the known maximum for all btree types.
+ */
+#define XFS_BTREE_CUR_CACHE_MAXLEVELS  (9)
+
 struct xfs_btree_ops {
        /* size of the key and record structures */
        size_t  key_len;
 xfs_btree_alloc_cursor(
        struct xfs_mount        *mp,
        struct xfs_trans        *tp,
-       xfs_btnum_t             btnum)
+       xfs_btnum_t             btnum,
+       uint8_t                 maxlevels)
 {
        struct xfs_btree_cur    *cur;
 
+       ASSERT(maxlevels <= XFS_BTREE_CUR_CACHE_MAXLEVELS);
+
        cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL);
        cur->bc_tp = tp;
        cur->bc_mp = mp;
        cur->bc_btnum = btnum;
-       cur->bc_maxlevels = XFS_BTREE_MAXLEVELS;
+       cur->bc_maxlevels = maxlevels;
 
        return cur;
 }
 
 {
        struct xfs_btree_cur    *cur;
 
-       cur = xfs_btree_alloc_cursor(mp, tp, btnum);
+       cur = xfs_btree_alloc_cursor(mp, tp, btnum,
+                       M_IGEO(mp)->inobt_maxlevels);
        if (btnum == XFS_BTNUM_INO) {
                cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2);
                cur->bc_ops = &xfs_inobt_ops;
 
 
        ASSERT(pag->pag_agno < mp->m_sb.sb_agcount);
 
-       cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_REFC);
+       cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_REFC,
+                       mp->m_refc_maxlevels);
        cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
 
        cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
 
        struct xfs_btree_cur    *cur;
 
        /* Overlapping btree; 2 keys per pointer. */
-       cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP);
+       cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP,
+                       mp->m_rmap_maxlevels);
        cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING;
        cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
        cur->bc_ops = &xfs_rmapbt_ops;
 
                goto out_destroy_log_ticket_zone;
 
        xfs_btree_cur_zone = kmem_cache_create("xfs_btree_cur",
-                               xfs_btree_cur_sizeof(XFS_BTREE_MAXLEVELS),
-                                              0, 0, NULL);
+                       xfs_btree_cur_sizeof(XFS_BTREE_CUR_CACHE_MAXLEVELS),
+                       0, 0, NULL);
        if (!xfs_btree_cur_zone)
                goto out_destroy_bmap_free_item_zone;