#include <linux/mman.h>
 #include <linux/sched/mm.h>
 #include <linux/crc32.h>
+#include <linux/task_io_accounting_ops.h>
 
 #include "zonefs.h"
 
        .end_io                 = zonefs_file_write_dio_end_io,
 };
 
+static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
+{
+       struct inode *inode = file_inode(iocb->ki_filp);
+       struct zonefs_inode_info *zi = ZONEFS_I(inode);
+       struct block_device *bdev = inode->i_sb->s_bdev;
+       unsigned int max;
+       struct bio *bio;
+       ssize_t size;
+       int nr_pages;
+       ssize_t ret;
+
+       nr_pages = iov_iter_npages(from, BIO_MAX_PAGES);
+       if (!nr_pages)
+               return 0;
+
+       max = queue_max_zone_append_sectors(bdev_get_queue(bdev));
+       max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize);
+       iov_iter_truncate(from, max);
+
+       bio = bio_alloc_bioset(GFP_NOFS, nr_pages, &fs_bio_set);
+       if (!bio)
+               return -ENOMEM;
+
+       bio_set_dev(bio, bdev);
+       bio->bi_iter.bi_sector = zi->i_zsector;
+       bio->bi_write_hint = iocb->ki_hint;
+       bio->bi_ioprio = iocb->ki_ioprio;
+       bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
+       if (iocb->ki_flags & IOCB_DSYNC)
+               bio->bi_opf |= REQ_FUA;
+
+       ret = bio_iov_iter_get_pages(bio, from);
+       if (unlikely(ret)) {
+               bio_io_error(bio);
+               return ret;
+       }
+       size = bio->bi_iter.bi_size;
+       task_io_account_write(ret);
+
+       if (iocb->ki_flags & IOCB_HIPRI)
+               bio_set_polled(bio, iocb);
+
+       ret = submit_bio_wait(bio);
+
+       bio_put(bio);
+
+       zonefs_file_write_dio_end_io(iocb, size, ret, 0);
+       if (ret >= 0) {
+               iocb->ki_pos += size;
+               return size;
+       }
+
+       return ret;
+}
+
 /*
  * Handle direct writes. For sequential zone files, this is the only possible
  * write path. For these files, check that the user is issuing writes
        struct inode *inode = file_inode(iocb->ki_filp);
        struct zonefs_inode_info *zi = ZONEFS_I(inode);
        struct super_block *sb = inode->i_sb;
+       bool sync = is_sync_kiocb(iocb);
+       bool append = false;
        size_t count;
        ssize_t ret;
 
         * as this can cause write reordering (e.g. the first aio gets EAGAIN
         * on the inode lock but the second goes through but is now unaligned).
         */
-       if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !is_sync_kiocb(iocb) &&
+       if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !sync &&
            (iocb->ki_flags & IOCB_NOWAIT))
                return -EOPNOTSUPP;
 
        }
 
        /* Enforce sequential writes (append only) in sequential zones */
-       mutex_lock(&zi->i_truncate_mutex);
-       if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && iocb->ki_pos != zi->i_wpoffset) {
+       if (zi->i_ztype == ZONEFS_ZTYPE_SEQ) {
+               mutex_lock(&zi->i_truncate_mutex);
+               if (iocb->ki_pos != zi->i_wpoffset) {
+                       mutex_unlock(&zi->i_truncate_mutex);
+                       ret = -EINVAL;
+                       goto inode_unlock;
+               }
                mutex_unlock(&zi->i_truncate_mutex);
-               ret = -EINVAL;
-               goto inode_unlock;
+               append = sync;
        }
-       mutex_unlock(&zi->i_truncate_mutex);
 
-       ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops,
-                          &zonefs_write_dio_ops, is_sync_kiocb(iocb));
+       if (append)
+               ret = zonefs_file_dio_append(iocb, from);
+       else
+               ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops,
+                                  &zonefs_write_dio_ops, sync);
        if (zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
            (ret > 0 || ret == -EIOCBQUEUED)) {
                if (ret > 0)