* Since we don't pass back blockdev info, we can't return bmap
         * information for rt files either.
         */
-       if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip))
+       if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip))
                return 0;
        return iomap_bmap(mapping, block, &xfs_iomap_ops);
 }
 
         * by virtue of the hole punch.
         */
        error = xfs_free_file_space(ip, offset, len);
-       if (error)
-               goto out;
+       if (error || xfs_is_always_cow_inode(ip))
+               return error;
 
-       error = xfs_alloc_file_space(ip, round_down(offset, blksize),
+       return xfs_alloc_file_space(ip, round_down(offset, blksize),
                                     round_up(offset + len, blksize) -
                                     round_down(offset, blksize),
                                     XFS_BMAPI_PREALLOC);
-out:
-       return error;
-
 }
 
 static int
 
                 * We can't properly handle unaligned direct I/O to reflink
                 * files yet, as we can't unshare a partial block.
                 */
-               if (xfs_is_reflink_inode(ip)) {
+               if (xfs_is_cow_inode(ip)) {
                        trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
                        return -EREMCHG;
                }
                                goto out_unlock;
                }
 
-               if (mode & FALLOC_FL_ZERO_RANGE)
+               if (mode & FALLOC_FL_ZERO_RANGE) {
                        error = xfs_zero_file_space(ip, offset, len);
-               else {
-                       if (mode & FALLOC_FL_UNSHARE_RANGE) {
-                               error = xfs_reflink_unshare(ip, offset, len);
-                               if (error)
-                                       goto out_unlock;
+               } else if (mode & FALLOC_FL_UNSHARE_RANGE) {
+                       error = xfs_reflink_unshare(ip, offset, len);
+                       if (error)
+                               goto out_unlock;
+
+                       if (!xfs_is_always_cow_inode(ip)) {
+                               error = xfs_alloc_file_space(ip, offset, len,
+                                               XFS_BMAPI_PREALLOC);
                        }
+               } else {
+                       /*
+                        * If always_cow mode we can't use preallocations and
+                        * thus should not create them.
+                        */
+                       if (xfs_is_always_cow_inode(ip)) {
+                               error = -EOPNOTSUPP;
+                               goto out_unlock;
+                       }
+
                        error = xfs_alloc_file_space(ip, offset, len,
                                                     XFS_BMAPI_PREALLOC);
                }
 
 STATIC xfs_fsblock_t
 xfs_iomap_prealloc_size(
        struct xfs_inode        *ip,
+       int                     whichfork,
        loff_t                  offset,
        loff_t                  count,
        struct xfs_iext_cursor  *icur)
 {
        struct xfs_mount        *mp = ip->i_mount;
-       struct xfs_ifork        *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+       struct xfs_ifork        *ifp = XFS_IFORK_PTR(ip, whichfork);
        xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
        struct xfs_bmbt_irec    prev;
        int                     shift = 0;
         * themselves.  Second the lookup in the extent list is generally faster
         * than going out to the shared extent tree.
         */
-       if (xfs_is_reflink_inode(ip)) {
+       if (xfs_is_cow_inode(ip)) {
+               if (!ip->i_cowfp) {
+                       ASSERT(!xfs_is_reflink_inode(ip));
+                       xfs_ifork_init_cow(ip);
+               }
                cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb,
                                &ccur, &cmap);
                if (!cow_eof && cmap.br_startoff <= offset_fsb) {
                 * overwriting shared extents.   This includes zeroing of
                 * existing extents that contain data.
                 */
-               if (!xfs_is_reflink_inode(ip) ||
+               if (!xfs_is_cow_inode(ip) ||
                    ((flags & IOMAP_ZERO) && imap.br_state != XFS_EXT_NORM)) {
                        trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
                                        &imap);
                xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
 
                /* Trim the mapping to the nearest shared extent boundary. */
-               error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
+               error = xfs_inode_need_cow(ip, &imap, &shared);
                if (error)
                        goto out_unlock;
 
                 */
                count = min_t(loff_t, count, 1024 * PAGE_SIZE);
                end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
+
+               if (xfs_is_always_cow_inode(ip))
+                       whichfork = XFS_COW_FORK;
        }
 
        error = xfs_qm_dqattach_locked(ip, false);
        if (error)
                goto out_unlock;
 
-       if (eof && whichfork == XFS_DATA_FORK) {
-               prealloc_blocks = xfs_iomap_prealloc_size(ip, offset, count,
-                               &icur);
+       if (eof) {
+               prealloc_blocks = xfs_iomap_prealloc_size(ip, whichfork, offset,
+                               count, &icur);
                if (prealloc_blocks) {
                        xfs_extlen_t    align;
                        xfs_off_t       end_offset;
         * COW writes may allocate delalloc space or convert unwritten COW
         * extents, so we need to make sure to take the lock exclusively here.
         */
-       if (xfs_is_reflink_inode(ip) && is_write) {
+       if (xfs_is_cow_inode(ip) && is_write) {
                /*
                 * FIXME: It could still overwrite on unshared extents and not
                 * need allocation.
         * check, so if we got ILOCK_SHARED for a write and but we're now a
         * reflink inode we have to switch to ILOCK_EXCL and relock.
         */
-       if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_reflink_inode(ip)) {
+       if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_cow_inode(ip)) {
                xfs_iunlock(ip, mode);
                mode = XFS_ILOCK_EXCL;
                goto relock;
         * Break shared extents if necessary. Checks for non-blocking IO have
         * been done up front, so we don't need to do them here.
         */
-       if (xfs_is_reflink_inode(ip)) {
+       if (xfs_is_cow_inode(ip)) {
                struct xfs_bmbt_irec    orig = imap;
 
                /* if zeroing doesn't need COW allocation, then we are done. */
 
         */
        uint32_t                m_generation;
 
+       bool                    m_always_cow;
        bool                    m_fail_unmount;
 #ifdef DEBUG
        /*
 
        int                     error = 0;
 
        /* Holes, unwritten, and delalloc extents cannot be shared */
-       if (!xfs_is_reflink_inode(ip) || !xfs_bmap_is_real_extent(irec)) {
+       if (!xfs_is_cow_inode(ip) || !xfs_bmap_is_real_extent(irec)) {
                *shared = false;
                return 0;
        }
        }
 }
 
+bool
+xfs_inode_need_cow(
+       struct xfs_inode        *ip,
+       struct xfs_bmbt_irec    *imap,
+       bool                    *shared)
+{
+       /* We can't update any real extents in always COW mode. */
+       if (xfs_is_always_cow_inode(ip) &&
+           !isnullstartblock(imap->br_startblock)) {
+               *shared = true;
+               return 0;
+       }
+
+       /* Trim the mapping to the nearest shared extent boundary. */
+       return xfs_reflink_trim_around_shared(ip, imap, shared);
+}
+
 static int
 xfs_reflink_convert_cow_locked(
        struct xfs_inode        *ip,
        if (got.br_startoff > offset_fsb) {
                xfs_trim_extent(imap, imap->br_startoff,
                                got.br_startoff - imap->br_startoff);
-               return xfs_reflink_trim_around_shared(ip, imap, shared);
+               return xfs_inode_need_cow(ip, imap, shared);
        }
 
        *shared = true;
        xfs_extlen_t            resblks = 0;
 
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
-       ASSERT(xfs_is_reflink_inode(ip));
+       if (!ip->i_cowfp) {
+               ASSERT(!xfs_is_reflink_inode(ip));
+               xfs_ifork_init_cow(ip);
+       }
 
        error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
        if (error || !*shared)
        int                     error;
 
        trace_xfs_reflink_cancel_cow_range(ip, offset, count);
-       ASSERT(xfs_is_reflink_inode(ip));
+       ASSERT(ip->i_cowfp);
 
        offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
        if (count == NULLFILEOFF)
 
 #ifndef __XFS_REFLINK_H
 #define __XFS_REFLINK_H 1
 
+static inline bool xfs_is_always_cow_inode(struct xfs_inode *ip)
+{
+       return ip->i_mount->m_always_cow &&
+               xfs_sb_version_hasreflink(&ip->i_mount->m_sb);
+}
+
+static inline bool xfs_is_cow_inode(struct xfs_inode *ip)
+{
+       return xfs_is_reflink_inode(ip) || xfs_is_always_cow_inode(ip);
+}
+
 extern int xfs_reflink_find_shared(struct xfs_mount *mp, struct xfs_trans *tp,
                xfs_agnumber_t agno, xfs_agblock_t agbno, xfs_extlen_t aglen,
                xfs_agblock_t *fbno, xfs_extlen_t *flen, bool find_maximal);
 extern int xfs_reflink_trim_around_shared(struct xfs_inode *ip,
                struct xfs_bmbt_irec *irec, bool *shared);
+bool xfs_inode_need_cow(struct xfs_inode *ip, struct xfs_bmbt_irec *imap,
+               bool *shared);
 
 extern int xfs_reflink_allocate_cow(struct xfs_inode *ip,
                struct xfs_bmbt_irec *imap, bool *shared, uint *lockmode,
 
                }
        }
 
-       if (xfs_sb_version_hasreflink(&mp->m_sb) && mp->m_sb.sb_rblocks) {
-               xfs_alert(mp,
+       if (xfs_sb_version_hasreflink(&mp->m_sb)) {
+               if (mp->m_sb.sb_rblocks) {
+                       xfs_alert(mp,
        "reflink not compatible with realtime device!");
-               error = -EINVAL;
-               goto out_filestream_unmount;
+                       error = -EINVAL;
+                       goto out_filestream_unmount;
+               }
+
+               if (xfs_globals.always_cow) {
+                       xfs_info(mp, "using DEBUG-only always_cow mode.");
+                       mp->m_always_cow = true;
+               }
        }
 
        if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) {
 
        int     log_recovery_delay;     /* log recovery delay (secs) */
        int     mount_delay;            /* mount setup delay (secs) */
        bool    bug_on_assert;          /* BUG() the kernel on assert failure */
+       bool    always_cow;             /* use COW fork for all overwrites */
 };
 extern struct xfs_globals      xfs_globals;
 
 
 }
 XFS_SYSFS_ATTR_RW(mount_delay);
 
+static ssize_t
+always_cow_store(
+       struct kobject  *kobject,
+       const char      *buf,
+       size_t          count)
+{
+       ssize_t         ret;
+
+       ret = kstrtobool(buf, &xfs_globals.always_cow);
+       if (ret < 0)
+               return ret;
+       return count;
+}
+
+static ssize_t
+always_cow_show(
+       struct kobject  *kobject,
+       char            *buf)
+{
+       return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.always_cow);
+}
+XFS_SYSFS_ATTR_RW(always_cow);
+
 static struct attribute *xfs_dbg_attrs[] = {
        ATTR_LIST(bug_on_assert),
        ATTR_LIST(log_recovery_delay),
        ATTR_LIST(mount_delay),
+       ATTR_LIST(always_cow),
        NULL,
 };