generic_fill_statx_atomic_writes(stat,
                        queue_atomic_write_unit_min_bytes(bd_queue),
-                       queue_atomic_write_unit_max_bytes(bd_queue));
+                       queue_atomic_write_unit_max_bytes(bd_queue),
+                       0);
        }
 
        stat->blksize = bdev_io_min(bdev);
 
                        awu_max = sbi->s_awu_max;
                }
 
-               generic_fill_statx_atomic_writes(stat, awu_min, awu_max);
+               generic_fill_statx_atomic_writes(stat, awu_min, awu_max, 0);
        }
 
        flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
 
  * @stat:      Where to fill in the attribute flags
  * @unit_min:  Minimum supported atomic write length in bytes
  * @unit_max:  Maximum supported atomic write length in bytes
+ * @unit_max_opt: Optimised maximum supported atomic write length in bytes
  *
  * Fill in the STATX{_ATTR}_WRITE_ATOMIC flags in the kstat structure from
  * atomic write unit_min and unit_max values.
  */
 void generic_fill_statx_atomic_writes(struct kstat *stat,
                                      unsigned int unit_min,
-                                     unsigned int unit_max)
+                                     unsigned int unit_max,
+                                     unsigned int unit_max_opt)
 {
        /* Confirm that the request type is known */
        stat->result_mask |= STATX_WRITE_ATOMIC;
        if (unit_min) {
                stat->atomic_write_unit_min = unit_min;
                stat->atomic_write_unit_max = unit_max;
+               stat->atomic_write_unit_max_opt = unit_max_opt;
                /* Initially only allow 1x segment */
                stat->atomic_write_segments_max = 1;
 
        tmp.stx_atomic_write_unit_min = stat->atomic_write_unit_min;
        tmp.stx_atomic_write_unit_max = stat->atomic_write_unit_max;
        tmp.stx_atomic_write_segments_max = stat->atomic_write_segments_max;
+       tmp.stx_atomic_write_unit_max_opt = stat->atomic_write_unit_max_opt;
 
        return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
 }
 
 
        if (xfs_inode_can_atomicwrite(ip))
                unit_min = unit_max = ip->i_mount->m_sb.sb_blocksize;
-       generic_fill_statx_atomic_writes(stat, unit_min, unit_max);
+       generic_fill_statx_atomic_writes(stat, unit_min, unit_max, 0);
 }
 
 STATIC int
 
 void generic_fill_statx_attr(struct inode *inode, struct kstat *stat);
 void generic_fill_statx_atomic_writes(struct kstat *stat,
                                      unsigned int unit_min,
-                                     unsigned int unit_max);
+                                     unsigned int unit_max,
+                                     unsigned int unit_max_opt);
 extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int);
 extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int);
 void __inode_add_bytes(struct inode *inode, loff_t bytes);
 
        u32             dio_read_offset_align;
        u32             atomic_write_unit_min;
        u32             atomic_write_unit_max;
+       u32             atomic_write_unit_max_opt;
        u32             atomic_write_segments_max;
 };
 
 
        /* File offset alignment for direct I/O reads */
        __u32   stx_dio_read_offset_align;
 
-       /* 0xb8 */
-       __u64   __spare3[9];    /* Spare space for future expansion */
+       /* Optimised max atomic write unit in bytes */
+       __u32   stx_atomic_write_unit_max_opt;
+       __u32   __spare2[1];
+
+       /* 0xc0 */
+       __u64   __spare3[8];    /* Spare space for future expansion */
 
        /* 0x100 */
 };