*/
 struct xfs_writepage_ctx {
        struct xfs_bmbt_irec    imap;
-       unsigned int            io_type;
+       int                     fork;
        unsigned int            data_seq;
        unsigned int            cow_seq;
        struct xfs_ioend        *ioend;
         */
        error = blk_status_to_errno(ioend->io_bio->bi_status);
        if (unlikely(error)) {
-               switch (ioend->io_type) {
-               case XFS_IO_COW:
+               if (ioend->io_fork == XFS_COW_FORK)
                        xfs_reflink_cancel_cow_range(ip, offset, size, true);
-                       break;
-               }
-
                goto done;
        }
 
        /*
-        * Success:  commit the COW or unwritten blocks if needed.
+        * Success: commit the COW or unwritten blocks if needed.
         */
-       switch (ioend->io_type) {
-       case XFS_IO_COW:
+       if (ioend->io_fork == XFS_COW_FORK)
                error = xfs_reflink_end_cow(ip, offset, size);
-               break;
-       case XFS_IO_UNWRITTEN:
-               /* writeback should never update isize */
+       else if (ioend->io_state == XFS_EXT_UNWRITTEN)
                error = xfs_iomap_write_unwritten(ip, offset, size, false);
-               break;
-       default:
+       else
                ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
-               break;
-       }
 
 done:
        if (ioend->io_append_trans)
        struct xfs_ioend        *ioend = bio->bi_private;
        struct xfs_mount        *mp = XFS_I(ioend->io_inode)->i_mount;
 
-       if (ioend->io_type == XFS_IO_UNWRITTEN || ioend->io_type == XFS_IO_COW)
+       if (ioend->io_fork == XFS_COW_FORK ||
+           ioend->io_state == XFS_EXT_UNWRITTEN)
                queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
        else if (ioend->io_append_trans)
                queue_work(mp->m_data_workqueue, &ioend->io_work);
         * covers the offset. Be careful to check this first because the caller
         * can revalidate a COW mapping without updating the data seqno.
         */
-       if (wpc->io_type == XFS_IO_COW)
+       if (wpc->fork == XFS_COW_FORK)
                return true;
 
        /*
        xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset), end_fsb;
        xfs_fileoff_t           cow_fsb = NULLFILEOFF;
        struct xfs_bmbt_irec    imap;
-       int                     whichfork = XFS_DATA_FORK;
        struct xfs_iext_cursor  icur;
        int                     error = 0;
 
        if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
                wpc->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
                xfs_iunlock(ip, XFS_ILOCK_SHARED);
+
+               wpc->fork = XFS_COW_FORK;
+
                /*
                 * Truncate can race with writeback since writeback doesn't
                 * take the iolock and truncate decreases the file size before
                 * will kill the contents anyway.
                 */
                if (offset > i_size_read(inode)) {
-                       wpc->io_type = XFS_IO_HOLE;
+                       wpc->imap.br_blockcount = end_fsb - offset_fsb;
+                       wpc->imap.br_startoff = offset_fsb;
+                       wpc->imap.br_startblock = HOLESTARTBLOCK;
+                       wpc->imap.br_state = XFS_EXT_NORM;
                        return 0;
                }
-               whichfork = XFS_COW_FORK;
-               wpc->io_type = XFS_IO_COW;
+
                goto allocate_blocks;
        }
 
        wpc->data_seq = READ_ONCE(ip->i_df.if_seq);
        xfs_iunlock(ip, XFS_ILOCK_SHARED);
 
+       wpc->fork = XFS_DATA_FORK;
+
        if (imap.br_startoff > offset_fsb) {
                /* landed in a hole or beyond EOF */
                imap.br_blockcount = imap.br_startoff - offset_fsb;
                imap.br_startoff = offset_fsb;
                imap.br_startblock = HOLESTARTBLOCK;
-               wpc->io_type = XFS_IO_HOLE;
+               imap.br_state = XFS_EXT_NORM;
        } else {
                /*
                 * Truncate to the next COW extent if there is one.  This is the
                    cow_fsb < imap.br_startoff + imap.br_blockcount)
                        imap.br_blockcount = cow_fsb - imap.br_startoff;
 
-               if (isnullstartblock(imap.br_startblock)) {
-                       /* got a delalloc extent */
-                       wpc->io_type = XFS_IO_DELALLOC;
+               /* got a delalloc extent? */
+               if (isnullstartblock(imap.br_startblock))
                        goto allocate_blocks;
-               }
-
-               if (imap.br_state == XFS_EXT_UNWRITTEN)
-                       wpc->io_type = XFS_IO_UNWRITTEN;
-               else
-                       wpc->io_type = XFS_IO_OVERWRITE;
        }
 
        wpc->imap = imap;
-       trace_xfs_map_blocks_found(ip, offset, count, wpc->io_type, &imap);
+       trace_xfs_map_blocks_found(ip, offset, count, wpc->fork, &imap);
        return 0;
 allocate_blocks:
-       error = xfs_iomap_write_allocate(ip, whichfork, offset, &imap,
-                       whichfork == XFS_COW_FORK ?
+       error = xfs_iomap_write_allocate(ip, wpc->fork, offset, &imap,
+                       wpc->fork == XFS_COW_FORK ?
                                         &wpc->cow_seq : &wpc->data_seq);
        if (error)
                return error;
-       ASSERT(whichfork == XFS_COW_FORK || cow_fsb == NULLFILEOFF ||
+       ASSERT(wpc->fork == XFS_COW_FORK || cow_fsb == NULLFILEOFF ||
               imap.br_startoff + imap.br_blockcount <= cow_fsb);
        wpc->imap = imap;
-       trace_xfs_map_blocks_alloc(ip, offset, count, wpc->io_type, &imap);
+       trace_xfs_map_blocks_alloc(ip, offset, count, wpc->fork, &imap);
        return 0;
 }
 
        int                     status)
 {
        /* Convert CoW extents to regular */
-       if (!status && ioend->io_type == XFS_IO_COW) {
+       if (!status && ioend->io_fork == XFS_COW_FORK) {
                /*
                 * Yuk. This can do memory allocation, but is not a
                 * transactional operation so everything is done in GFP_KERNEL
 
        /* Reserve log space if we might write beyond the on-disk inode size. */
        if (!status &&
-           ioend->io_type != XFS_IO_UNWRITTEN &&
+           (ioend->io_fork == XFS_COW_FORK ||
+            ioend->io_state != XFS_EXT_UNWRITTEN) &&
            xfs_ioend_is_append(ioend) &&
            !ioend->io_append_trans)
                status = xfs_setfilesize_trans_alloc(ioend);
 static struct xfs_ioend *
 xfs_alloc_ioend(
        struct inode            *inode,
-       unsigned int            type,
+       int                     fork,
+       xfs_exntst_t            state,
        xfs_off_t               offset,
        struct block_device     *bdev,
        sector_t                sector)
 
        ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
        INIT_LIST_HEAD(&ioend->io_list);
-       ioend->io_type = type;
+       ioend->io_fork = fork;
+       ioend->io_state = state;
        ioend->io_inode = inode;
        ioend->io_size = 0;
        ioend->io_offset = offset;
        sector = xfs_fsb_to_db(ip, wpc->imap.br_startblock) +
                ((offset - XFS_FSB_TO_B(mp, wpc->imap.br_startoff)) >> 9);
 
-       if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
+       if (!wpc->ioend ||
+           wpc->fork != wpc->ioend->io_fork ||
+           wpc->imap.br_state != wpc->ioend->io_state ||
            sector != bio_end_sector(wpc->ioend->io_bio) ||
            offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
                if (wpc->ioend)
                        list_add(&wpc->ioend->io_list, iolist);
-               wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset,
-                               bdev, sector);
+               wpc->ioend = xfs_alloc_ioend(inode, wpc->fork,
+                               wpc->imap.br_state, offset, bdev, sector);
        }
 
        if (!__bio_try_merge_page(wpc->ioend->io_bio, page, len, poff)) {
                error = xfs_map_blocks(wpc, inode, file_offset);
                if (error)
                        break;
-               if (wpc->io_type == XFS_IO_HOLE)
+               if (wpc->imap.br_startblock == HOLESTARTBLOCK)
                        continue;
                xfs_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
                                 &submit_list);
        struct page             *page,
        struct writeback_control *wbc)
 {
-       struct xfs_writepage_ctx wpc = {
-               .io_type = XFS_IO_HOLE,
-       };
+       struct xfs_writepage_ctx wpc = { };
        int                     ret;
 
        ret = xfs_do_writepage(page, wbc, &wpc);
        struct address_space    *mapping,
        struct writeback_control *wbc)
 {
-       struct xfs_writepage_ctx wpc = {
-               .io_type = XFS_IO_HOLE,
-       };
+       struct xfs_writepage_ctx wpc = { };
        int                     ret;
 
        xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
 
 DEFINE_READPAGE_EVENT(xfs_vm_readpage);
 DEFINE_READPAGE_EVENT(xfs_vm_readpages);
 
-TRACE_DEFINE_ENUM(XFS_IO_HOLE);
-TRACE_DEFINE_ENUM(XFS_IO_DELALLOC);
-TRACE_DEFINE_ENUM(XFS_IO_UNWRITTEN);
-TRACE_DEFINE_ENUM(XFS_IO_OVERWRITE);
-TRACE_DEFINE_ENUM(XFS_IO_COW);
-
 DECLARE_EVENT_CLASS(xfs_imap_class,
        TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
-                int type, struct xfs_bmbt_irec *irec),
-       TP_ARGS(ip, offset, count, type, irec),
+                int whichfork, struct xfs_bmbt_irec *irec),
+       TP_ARGS(ip, offset, count, whichfork, irec),
        TP_STRUCT__entry(
                __field(dev_t, dev)
                __field(xfs_ino_t, ino)
                __field(loff_t, size)
                __field(loff_t, offset)
                __field(size_t, count)
-               __field(int, type)
+               __field(int, whichfork)
                __field(xfs_fileoff_t, startoff)
                __field(xfs_fsblock_t, startblock)
                __field(xfs_filblks_t, blockcount)
                __entry->size = ip->i_d.di_size;
                __entry->offset = offset;
                __entry->count = count;
-               __entry->type = type;
+               __entry->whichfork = whichfork;
                __entry->startoff = irec ? irec->br_startoff : 0;
                __entry->startblock = irec ? irec->br_startblock : 0;
                __entry->blockcount = irec ? irec->br_blockcount : 0;
        ),
        TP_printk("dev %d:%d ino 0x%llx size 0x%llx offset 0x%llx count %zd "
-                 "type %s startoff 0x%llx startblock %lld blockcount 0x%llx",
+                 "fork %s startoff 0x%llx startblock %lld blockcount 0x%llx",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  __entry->ino,
                  __entry->size,
                  __entry->offset,
                  __entry->count,
-                 __print_symbolic(__entry->type, XFS_IO_TYPES),
+                 __entry->whichfork == XFS_COW_FORK ? "cow" : "data",
                  __entry->startoff,
                  (int64_t)__entry->startblock,
                  __entry->blockcount)
 )
 
-#define DEFINE_IOMAP_EVENT(name)       \
+#define DEFINE_IMAP_EVENT(name)        \
 DEFINE_EVENT(xfs_imap_class, name,     \
        TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \
-                int type, struct xfs_bmbt_irec *irec),         \
-       TP_ARGS(ip, offset, count, type, irec))
-DEFINE_IOMAP_EVENT(xfs_map_blocks_found);
-DEFINE_IOMAP_EVENT(xfs_map_blocks_alloc);
-DEFINE_IOMAP_EVENT(xfs_iomap_alloc);
-DEFINE_IOMAP_EVENT(xfs_iomap_found);
+                int whichfork, struct xfs_bmbt_irec *irec),            \
+       TP_ARGS(ip, offset, count, whichfork, irec))
+DEFINE_IMAP_EVENT(xfs_map_blocks_found);
+DEFINE_IMAP_EVENT(xfs_map_blocks_alloc);
+DEFINE_IMAP_EVENT(xfs_iomap_alloc);
+DEFINE_IMAP_EVENT(xfs_iomap_found);
 
 DECLARE_EVENT_CLASS(xfs_simple_io_class,
        TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
 DEFINE_INODE_EVENT(xfs_reflink_set_inode_flag);
 DEFINE_INODE_EVENT(xfs_reflink_unset_inode_flag);
 DEFINE_ITRUNC_EVENT(xfs_reflink_update_inode_size);
-DEFINE_IOMAP_EVENT(xfs_reflink_remap_imap);
+DEFINE_IMAP_EVENT(xfs_reflink_remap_imap);
 TRACE_EVENT(xfs_reflink_remap_blocks_loop,
        TP_PROTO(struct xfs_inode *src, xfs_fileoff_t soffset,
                 xfs_filblks_t len, struct xfs_inode *dest,