qrecord->data_rsv = reserved;
                        qrecord->data_rsv_refroot = generic_ref->ref_root;
                }
-               qrecord->bytenr = generic_ref->bytenr;
                qrecord->num_bytes = generic_ref->num_bytes;
                qrecord->old_roots = NULL;
        }
        if (qrecord) {
                int ret;
 
-               ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, qrecord);
+               ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, qrecord,
+                                                      head_ref->bytenr);
                if (ret) {
                        /* Clean up if insertion fails or item exists. */
                        xa_release(&delayed_refs->dirty_extents,
-                                  qrecord->bytenr >> fs_info->sectorsize_bits);
+                                  head_ref->bytenr >> fs_info->sectorsize_bits);
                        /* Caller responsible for freeing qrecord on error. */
                        if (ret < 0)
                                return ERR_PTR(ret);
                kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
 
        if (qrecord_inserted)
-               return btrfs_qgroup_trace_extent_post(trans, record);
+               return btrfs_qgroup_trace_extent_post(trans, record, generic_ref->bytenr);
        return 0;
 
 free_record:
 
  * Return <0 for insertion failure, caller can free @record safely.
  */
 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
-                               struct btrfs_delayed_ref_root *delayed_refs,
-                               struct btrfs_qgroup_extent_record *record)
+                                    struct btrfs_delayed_ref_root *delayed_refs,
+                                    struct btrfs_qgroup_extent_record *record,
+                                    u64 bytenr)
 {
        struct btrfs_qgroup_extent_record *existing, *ret;
-       const unsigned long index = (record->bytenr >> fs_info->sectorsize_bits);
+       const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
 
        if (!btrfs_qgroup_full_accounting(fs_info))
                return 1;
 
 #if BITS_PER_LONG == 32
-       if (record->bytenr >= MAX_LFS_FILESIZE) {
+       if (bytenr >= MAX_LFS_FILESIZE) {
                btrfs_err_rl(fs_info,
 "qgroup record for extent at %llu is beyond 32bit page cache and xarray index limit",
-                            record->bytenr);
+                            bytenr);
                btrfs_err_32bit_limit(fs_info);
                return -EOVERFLOW;
        }
 #endif
 
        lockdep_assert_held(&delayed_refs->lock);
-       trace_btrfs_qgroup_trace_extent(fs_info, record);
+       trace_btrfs_qgroup_trace_extent(fs_info, record, bytenr);
 
        xa_lock(&delayed_refs->dirty_extents);
        existing = xa_load(&delayed_refs->dirty_extents, index);
  * transaction committing, but not now as qgroup accounting will be wrong again.
  */
 int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
-                                  struct btrfs_qgroup_extent_record *qrecord)
+                                  struct btrfs_qgroup_extent_record *qrecord,
+                                  u64 bytenr)
 {
        struct btrfs_backref_walk_ctx ctx = { 0 };
        int ret;
        if (trans->fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
                return 0;
 
-       ctx.bytenr = qrecord->bytenr;
+       ctx.bytenr = bytenr;
        ctx.fs_info = trans->fs_info;
 
        ret = btrfs_find_all_roots(&ctx, true);
        }
 
        delayed_refs = &trans->transaction->delayed_refs;
-       record->bytenr = bytenr;
        record->num_bytes = num_bytes;
        record->old_roots = NULL;
 
        spin_lock(&delayed_refs->lock);
-       ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
+       ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record, bytenr);
        spin_unlock(&delayed_refs->lock);
        if (ret) {
                /* Clean up if insertion fails or item exists. */
                kfree(record);
                return 0;
        }
-       return btrfs_qgroup_trace_extent_post(trans, record);
+       return btrfs_qgroup_trace_extent_post(trans, record, bytenr);
 }
 
 /*
        delayed_refs = &trans->transaction->delayed_refs;
        qgroup_to_skip = delayed_refs->qgroup_to_skip;
        xa_for_each(&delayed_refs->dirty_extents, index, record) {
+               const u64 bytenr = (((u64)index) << fs_info->sectorsize_bits);
+
                num_dirty_extents++;
-               trace_btrfs_qgroup_account_extents(fs_info, record);
+               trace_btrfs_qgroup_account_extents(fs_info, record, bytenr);
 
                if (!ret && !(fs_info->qgroup_flags &
                              BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)) {
                        struct btrfs_backref_walk_ctx ctx = { 0 };
 
-                       ctx.bytenr = record->bytenr;
+                       ctx.bytenr = bytenr;
                        ctx.fs_info = fs_info;
 
                        /*
                                ulist_del(record->old_roots, qgroup_to_skip,
                                          0);
                        }
-                       ret = btrfs_qgroup_account_extent(trans, record->bytenr,
+                       ret = btrfs_qgroup_account_extent(trans, bytenr,
                                                          record->num_bytes,
                                                          record->old_roots,
                                                          new_roots);
 
  * Record a dirty extent, and info qgroup to update quota on it
  */
 struct btrfs_qgroup_extent_record {
-       u64 bytenr;
+       /*
+        * The bytenr of the extent is given by its index in the dirty_extents
+        * xarray of struct btrfs_delayed_ref_root left shifted by
+        * fs_info->sectorsize_bits.
+        */
+
        u64 num_bytes;
 
        /*
 int btrfs_qgroup_trace_extent_nolock(
                struct btrfs_fs_info *fs_info,
                struct btrfs_delayed_ref_root *delayed_refs,
-               struct btrfs_qgroup_extent_record *record);
+               struct btrfs_qgroup_extent_record *record,
+               u64 bytenr);
 int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
-                                  struct btrfs_qgroup_extent_record *qrecord);
+                                  struct btrfs_qgroup_extent_record *qrecord,
+                                  u64 bytenr);
 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
                              u64 num_bytes);
 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
 
 
 DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
        TP_PROTO(const struct btrfs_fs_info *fs_info,
-                const struct btrfs_qgroup_extent_record *rec),
+                const struct btrfs_qgroup_extent_record *rec,
+                u64 bytenr),
 
-       TP_ARGS(fs_info, rec),
+       TP_ARGS(fs_info, rec, bytenr),
 
        TP_STRUCT__entry_btrfs(
                __field(        u64,  bytenr            )
        ),
 
        TP_fast_assign_btrfs(fs_info,
-               __entry->bytenr         = rec->bytenr;
+               __entry->bytenr         = bytenr;
                __entry->num_bytes      = rec->num_bytes;
        ),
 
 DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_account_extents,
 
        TP_PROTO(const struct btrfs_fs_info *fs_info,
-                const struct btrfs_qgroup_extent_record *rec),
+                const struct btrfs_qgroup_extent_record *rec,
+                u64 bytenr),
 
-       TP_ARGS(fs_info, rec)
+       TP_ARGS(fs_info, rec, bytenr)
 );
 
 DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_trace_extent,
 
        TP_PROTO(const struct btrfs_fs_info *fs_info,
-                const struct btrfs_qgroup_extent_record *rec),
+                const struct btrfs_qgroup_extent_record *rec,
+                u64 bytenr),
 
-       TP_ARGS(fs_info, rec)
+       TP_ARGS(fs_info, rec, bytenr)
 );
 
 TRACE_EVENT(qgroup_num_dirty_extents,