i == READ ? "read" : "write",
a.v->io_time[i], LRU_TIME_MAX);
+ unsigned stripe_sectors = BCH_ALLOC_V4_BACKPOINTERS_START(a.v) * sizeof(u64) >
+ offsetof(struct bch_alloc_v4, stripe_sectors)
+ ? a.v->stripe_sectors
+ : 0;
+
switch (a.v->data_type) {
case BCH_DATA_free:
case BCH_DATA_need_gc_gens:
case BCH_DATA_need_discard:
- bkey_fsck_err_on(bch2_bucket_sectors_total(*a.v) || a.v->stripe,
+ bkey_fsck_err_on(stripe_sectors ||
+ a.v->dirty_sectors ||
+ a.v->cached_sectors ||
+ a.v->stripe,
c, err, alloc_key_empty_but_have_data,
- "empty data type free but have data");
+ "empty data type free but have data %u.%u.%u %u",
+ stripe_sectors,
+ a.v->dirty_sectors,
+ a.v->cached_sectors,
+ a.v->stripe);
break;
case BCH_DATA_sb:
case BCH_DATA_journal:
case BCH_DATA_btree:
case BCH_DATA_user:
case BCH_DATA_parity:
- bkey_fsck_err_on(!bch2_bucket_sectors_dirty(*a.v),
+ bkey_fsck_err_on(!a.v->dirty_sectors &&
+ !stripe_sectors,
c, err, alloc_key_dirty_sectors_0,
"data_type %s but dirty_sectors==0",
bch2_data_type_str(a.v->data_type));
break;
case BCH_DATA_cached:
bkey_fsck_err_on(!a.v->cached_sectors ||
- bch2_bucket_sectors_dirty(*a.v) ||
+ a.v->dirty_sectors ||
+ stripe_sectors ||
a.v->stripe,
c, err, alloc_key_cached_inconsistency,
"data type inconsistency");
a->stripe = swab32(a->stripe);
a->nr_external_backpointers = swab32(a->nr_external_backpointers);
a->fragmentation_lru = swab64(a->fragmentation_lru);
+ a->stripe_sectors = swab32(a->stripe_sectors);
bps = alloc_v4_backpointers(a);
for (bp = bps; bp < bps + BCH_ALLOC_V4_NR_BACKPOINTERS(a); bp++) {
prt_printf(out, "need_discard %llu\n", BCH_ALLOC_V4_NEED_DISCARD(a));
prt_printf(out, "need_inc_gen %llu\n", BCH_ALLOC_V4_NEED_INC_GEN(a));
prt_printf(out, "dirty_sectors %u\n", a->dirty_sectors);
+ prt_printf(out, "stripe_sectors %u\n", a->stripe_sectors);
prt_printf(out, "cached_sectors %u\n", a->cached_sectors);
prt_printf(out, "stripe %u\n", a->stripe);
prt_printf(out, "stripe_redundancy %u\n", a->stripe_redundancy);
a->v.gen++;
a->v.data_type = 0;
a->v.dirty_sectors = 0;
+ a->v.stripe_sectors = 0;
a->v.cached_sectors = 0;
a->v.io_time[READ] = bch2_current_io_time(c, READ);
a->v.io_time[WRITE] = bch2_current_io_time(c, WRITE);
{
dst->gen = src.gen;
dst->data_type = src.data_type;
+ dst->stripe_sectors = src.stripe_sectors;
dst->dirty_sectors = src.dirty_sectors;
dst->cached_sectors = src.cached_sectors;
dst->stripe = src.stripe;
{
dst->gen = src.gen;
dst->data_type = src.data_type;
+ dst->stripe_sectors = src.stripe_sectors;
dst->dirty_sectors = src.dirty_sectors;
dst->cached_sectors = src.cached_sectors;
dst->stripe = src.stripe;
static inline unsigned bch2_bucket_sectors_total(struct bch_alloc_v4 a)
{
- return a.dirty_sectors + a.cached_sectors;
+ return a.stripe_sectors + a.dirty_sectors + a.cached_sectors;
}
static inline unsigned bch2_bucket_sectors_dirty(struct bch_alloc_v4 a)
{
- return a.dirty_sectors;
+ return a.stripe_sectors + a.dirty_sectors;
}
static inline unsigned bch2_bucket_sectors_fragmented(struct bch_dev *ca,
{
if (a.stripe)
return data_type == BCH_DATA_parity ? data_type : BCH_DATA_stripe;
- if (a.dirty_sectors)
+ if (bch2_bucket_sectors_dirty(a))
return data_type;
if (a.cached_sectors)
return BCH_DATA_cached;
__u32 stripe;
__u32 nr_external_backpointers;
__u64 fragmentation_lru;
+ __u32 stripe_sectors;
+ __u32 pad;
} __packed __aligned(8);
#define BCH_ALLOC_V4_U64s_V0 6
l.oldest_gen != r.oldest_gen ||
l.data_type != r.data_type ||
l.dirty_sectors != r.dirty_sectors ||
+ l.stripe_sectors != r.stripe_sectors ||
l.cached_sectors != r.cached_sectors ||
l.stripe_redundancy != r.stripe_redundancy ||
l.stripe != r.stripe;
copy_bucket_field(alloc_key_gen_wrong, gen);
copy_bucket_field(alloc_key_dirty_sectors_wrong, dirty_sectors);
+ copy_bucket_field(alloc_key_stripe_sectors_wrong, stripe_sectors);
copy_bucket_field(alloc_key_cached_sectors_wrong, cached_sectors);
copy_bucket_field(alloc_key_stripe_wrong, stripe);
copy_bucket_field(alloc_key_stripe_redundancy_wrong, stripe_redundancy);
g->gen_valid = true;
g->gen = p.ptr.gen;
g->data_type = 0;
+ g->stripe_sectors = 0;
g->dirty_sectors = 0;
g->cached_sectors = 0;
} else {
g->gen_valid = true;
g->gen = p.ptr.gen;
g->data_type = data_type;
+ g->stripe_sectors = 0;
g->dirty_sectors = 0;
g->cached_sectors = 0;
} else {
static int __mark_pointer(struct btree_trans *trans, struct bch_dev *ca,
struct bkey_s_c k,
- const struct bch_extent_ptr *ptr,
+ const struct extent_ptr_decoded *p,
s64 sectors, enum bch_data_type ptr_data_type,
struct bch_alloc_v4 *a)
{
- u32 *dst_sectors = !ptr->cached
- ? &a->dirty_sectors
- : &a->cached_sectors;
- int ret = bch2_bucket_ref_update(trans, ca, k, ptr, sectors, ptr_data_type,
+ u32 *dst_sectors = p->has_ec ? &a->stripe_sectors :
+ !p->ptr.cached ? &a->dirty_sectors :
+ &a->cached_sectors;
+ int ret = bch2_bucket_ref_update(trans, ca, k, &p->ptr, sectors, ptr_data_type,
a->gen, a->data_type, dst_sectors);
if (ret)
if (flags & BTREE_TRIGGER_transactional) {
struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, bucket);
ret = PTR_ERR_OR_ZERO(a) ?:
- __mark_pointer(trans, ca, k, &p.ptr, *sectors, bp.data_type, &a->v);
+ __mark_pointer(trans, ca, k, &p, *sectors, bp.data_type, &a->v);
if (ret)
goto err;
bucket_lock(g);
struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old;
- ret = __mark_pointer(trans, ca, k, &p.ptr, *sectors, bp.data_type, &new);
+ ret = __mark_pointer(trans, ca, k, &p, *sectors, bp.data_type, &new);
if (!ret) {
alloc_to_bucket(g, new);
bch2_dev_usage_update(c, ca, &old, &new, 0, true);
u32 stripe;
u32 dirty_sectors;
u32 cached_sectors;
-};
+ u32 stripe_sectors;
+} __aligned(sizeof(long));
struct bucket_array {
struct rcu_head rcu;