return d ? max(0, ca->mi.bucket_size - d) : 0;
}
+static inline unsigned bch2_bucket_sectors_unstriped(struct bch_alloc_v4 a)
+{
+ return a.data_type == BCH_DATA_stripe ? a.dirty_sectors : 0;
+}
+
static inline enum bch_data_type alloc_data_type(struct bch_alloc_v4 a,
enum bch_data_type data_type)
{
u->d[old->data_type].sectors -= bch2_bucket_sectors_dirty(*old);
u->d[new->data_type].sectors += bch2_bucket_sectors_dirty(*new);
- u->d[BCH_DATA_cached].sectors += new->cached_sectors;
- u->d[BCH_DATA_cached].sectors -= old->cached_sectors;
-
u->d[old->data_type].fragmented -= bch2_bucket_sectors_fragmented(ca, *old);
u->d[new->data_type].fragmented += bch2_bucket_sectors_fragmented(ca, *new);
+ u->d[BCH_DATA_cached].sectors -= old->cached_sectors;
+ u->d[BCH_DATA_cached].sectors += new->cached_sectors;
+
+ unsigned old_unstriped = bch2_bucket_sectors_unstriped(*old);
+ u->d[BCH_DATA_unstriped].buckets -= old_unstriped != 0;
+ u->d[BCH_DATA_unstriped].sectors -= old_unstriped;
+
+ unsigned new_unstriped = bch2_bucket_sectors_unstriped(*new);
+ u->d[BCH_DATA_unstriped].buckets += new_unstriped != 0;
+ u->d[BCH_DATA_unstriped].sectors += new_unstriped;
+
preempt_enable();
}
arg.bucket_size = ca->mi.bucket_size;
arg.nr_buckets = ca->mi.nbuckets - ca->mi.first_bucket;
- for (i = 0; i < BCH_DATA_NR; i++) {
+ for (i = 0; i < ARRAY_SIZE(arg.d); i++) {
arg.d[i].buckets = src.d[i].buckets;
arg.d[i].sectors = src.d[i].sectors;
arg.d[i].fragmented = src.d[i].fragmented;