int err = 0;
        int metadata = !extent_op->is_data;
 
-       if (trans->aborted)
+       if (TRANS_ABORTED(trans))
                return 0;
 
        if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA))
 {
        int ret = 0;
 
-       if (trans->aborted) {
+       if (TRANS_ABORTED(trans)) {
                if (insert_reserved)
                        btrfs_pin_extent(trans->fs_info, node->bytenr,
                                         node->num_bytes, 1);
        int run_all = count == (unsigned long)-1;
 
        /* We'll clean this up in btrfs_cleanup_transaction */
-       if (trans->aborted)
+       if (TRANS_ABORTED(trans))
                return 0;
 
        if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags))
        else
                unpin = &fs_info->freed_extents[0];
 
-       while (!trans->aborted) {
+       while (!TRANS_ABORTED(trans)) {
                struct extent_state *cached_state = NULL;
 
                mutex_lock(&fs_info->unused_bg_unpin_mutex);
                u64 trimmed = 0;
 
                ret = -EROFS;
-               if (!trans->aborted)
+               if (!TRANS_ABORTED(trans))
                        ret = btrfs_discard_extent(fs_info,
                                                   block_group->start,
                                                   block_group->length,
 
 
        cur_trans = fs_info->running_transaction;
        if (cur_trans) {
-               if (cur_trans->aborted) {
+               if (TRANS_ABORTED(cur_trans)) {
                        spin_unlock(&fs_info->trans_lock);
                        return cur_trans->aborted;
                }
 {
        return (trans->state >= TRANS_STATE_COMMIT_START &&
                trans->state < TRANS_STATE_UNBLOCKED &&
-               !trans->aborted);
+               !TRANS_ABORTED(trans));
 }
 
 /* wait for commit against the current transaction to become unblocked
 
                wait_event(fs_info->transaction_wait,
                           cur_trans->state >= TRANS_STATE_UNBLOCKED ||
-                          cur_trans->aborted);
+                          TRANS_ABORTED(cur_trans));
                btrfs_put_transaction(cur_trans);
        } else {
                spin_unlock(&fs_info->trans_lock);
        if (throttle)
                btrfs_run_delayed_iputs(info);
 
-       if (trans->aborted ||
+       if (TRANS_ABORTED(trans) ||
            test_bit(BTRFS_FS_STATE_ERROR, &info->fs_state)) {
                wake_up_process(info->transaction_kthread);
                err = -EIO;
                                            struct btrfs_transaction *trans)
 {
        wait_event(fs_info->transaction_blocked_wait,
-                  trans->state >= TRANS_STATE_COMMIT_START || trans->aborted);
+                  trans->state >= TRANS_STATE_COMMIT_START ||
+                  TRANS_ABORTED(trans));
 }
 
 /*
                                        struct btrfs_transaction *trans)
 {
        wait_event(fs_info->transaction_wait,
-                  trans->state >= TRANS_STATE_UNBLOCKED || trans->aborted);
+                  trans->state >= TRANS_STATE_UNBLOCKED ||
+                  TRANS_ABORTED(trans));
 }
 
 /*
        trans->dirty = true;
 
        /* Stop the commit early if ->aborted is set */
-       if (unlikely(READ_ONCE(cur_trans->aborted))) {
+       if (TRANS_ABORTED(cur_trans)) {
                ret = cur_trans->aborted;
                btrfs_end_transaction(trans);
                return ret;
 
                wait_for_commit(cur_trans);
 
-               if (unlikely(cur_trans->aborted))
+               if (TRANS_ABORTED(cur_trans))
                        ret = cur_trans->aborted;
 
                btrfs_put_transaction(cur_trans);
                        spin_unlock(&fs_info->trans_lock);
 
                        wait_for_commit(prev_trans);
-                       ret = prev_trans->aborted;
+                       ret = READ_ONCE(prev_trans->aborted);
 
                        btrfs_put_transaction(prev_trans);
                        if (ret)
        wait_event(cur_trans->writer_wait,
                   atomic_read(&cur_trans->num_writers) == 1);
 
-       /* ->aborted might be set after the previous check, so check it */
-       if (unlikely(READ_ONCE(cur_trans->aborted))) {
+       if (TRANS_ABORTED(cur_trans)) {
                ret = cur_trans->aborted;
                goto scrub_continue;
        }
         * The tasks which save the space cache and inode cache may also
         * update ->aborted, check it.
         */
-       if (unlikely(READ_ONCE(cur_trans->aborted))) {
+       if (TRANS_ABORTED(cur_trans)) {
                ret = cur_trans->aborted;
                mutex_unlock(&fs_info->tree_log_mutex);
                mutex_unlock(&fs_info->reloc_mutex);
 
        struct btrfs_block_rsv *orig_rsv;
        refcount_t use_count;
        unsigned int type;
+       /*
+        * Error code of transaction abort, set outside of locks and must use
+        * the READ_ONCE/WRITE_ONCE access
+        */
        short aborted;
        bool adding_csums;
        bool allocating_chunk;
        struct list_head new_bgs;
 };
 
+/*
+ * The abort status can be changed between calls and is not protected by locks.
+ * This accepts btrfs_transaction and btrfs_trans_handle as types. Once it's
+ * set to a non-zero value it does not change, so the macro should be in checks
+ * but is not necessary for further reads of the value.
+ */
+#define TRANS_ABORTED(trans)           (unlikely(READ_ONCE((trans)->aborted)))
+
 struct btrfs_pending_snapshot {
        struct dentry *dentry;
        struct inode *dir;