]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
bcachefs: Use try_cmpxchg() family of functions instead of cmpxchg()
authorUros Bizjak <ubizjak@gmail.com>
Thu, 23 May 2024 09:19:26 +0000 (11:19 +0200)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 14 Jul 2024 23:00:12 +0000 (19:00 -0400)
Use try_cmpxchg() family of functions instead of
cmpxchg (*ptr, old, new) == old. x86 CMPXCHG instruction returns
success in ZF flag, so this change saves a compare after cmpxchg
(and related move instruction in front of cmpxchg).

Also, try_cmpxchg() implicitly assigns old *ptr value to "old" when
cmpxchg fails. There is no need to re-read the value in the loop.

No functional change intended.

Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Cc: Kent Overstreet <kent.overstreet@linux.dev>
Cc: Brian Foster <bfoster@redhat.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_cache.c
fs/bcachefs/btree_io.c
fs/bcachefs/btree_trans_commit.c
fs/bcachefs/btree_update_interior.c
fs/bcachefs/buckets.c
fs/bcachefs/buckets.h
fs/bcachefs/io_write.c
fs/bcachefs/journal.c
fs/bcachefs/journal.h
fs/bcachefs/journal_io.c
fs/bcachefs/two_state_shared_lock.h

index 4f5e411771ba1f10df0666fb23fc0c8dcf9ef4ef..6a9b248217a08d00a567ca0f6e268577317ca0d2 100644 (file)
@@ -602,8 +602,8 @@ int bch2_btree_cache_cannibalize_lock(struct btree_trans *trans, struct closure
        struct btree_cache *bc = &c->btree_cache;
        struct task_struct *old;
 
-       old = cmpxchg(&bc->alloc_lock, NULL, current);
-       if (old == NULL || old == current)
+       old = NULL;
+       if (try_cmpxchg(&bc->alloc_lock, &old, current) || old == current)
                goto success;
 
        if (!cl) {
@@ -614,8 +614,8 @@ int bch2_btree_cache_cannibalize_lock(struct btree_trans *trans, struct closure
        closure_wait(&bc->alloc_wait, cl);
 
        /* Try again, after adding ourselves to waitlist */
-       old = cmpxchg(&bc->alloc_lock, NULL, current);
-       if (old == NULL || old == current) {
+       old = NULL;
+       if (try_cmpxchg(&bc->alloc_lock, &old, current) || old == current) {
                /* We raced */
                closure_wake_up(&bc->alloc_wait);
                goto success;
index 7bca15c604f539ffd15962c1073c749bfd49cbba..0f3d01225878c746e758c844a64f4463274001bd 100644 (file)
@@ -1796,15 +1796,16 @@ int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
 static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
                                      struct btree_write *w)
 {
-       unsigned long old, new, v = READ_ONCE(b->will_make_reachable);
+       unsigned long old, new;
 
+       old = READ_ONCE(b->will_make_reachable);
        do {
-               old = new = v;
+               new = old;
                if (!(old & 1))
                        break;
 
                new &= ~1UL;
-       } while ((v = cmpxchg(&b->will_make_reachable, old, new)) != old);
+       } while (!try_cmpxchg(&b->will_make_reachable, &old, new));
 
        if (old & 1)
                closure_put(&((struct btree_update *) new)->cl);
@@ -1815,14 +1816,14 @@ static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
 static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
 {
        struct btree_write *w = btree_prev_write(b);
-       unsigned long old, new, v;
+       unsigned long old, new;
        unsigned type = 0;
 
        bch2_btree_complete_write(c, b, w);
 
-       v = READ_ONCE(b->flags);
+       old = READ_ONCE(b->flags);
        do {
-               old = new = v;
+               new = old;
 
                if ((old & (1U << BTREE_NODE_dirty)) &&
                    (old & (1U << BTREE_NODE_need_write)) &&
@@ -1842,7 +1843,7 @@ static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
                        new &= ~(1U << BTREE_NODE_write_in_flight);
                        new &= ~(1U << BTREE_NODE_write_in_flight_inner);
                }
-       } while ((v = cmpxchg(&b->flags, old, new)) != old);
+       } while (!try_cmpxchg(&b->flags, &old, new));
 
        if (new & (1U << BTREE_NODE_write_in_flight))
                __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type);
@@ -2014,8 +2015,9 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
         * dirty bit requires a write lock, we can't race with other threads
         * redirtying it:
         */
+       old = READ_ONCE(b->flags);
        do {
-               old = new = READ_ONCE(b->flags);
+               new = old;
 
                if (!(old & (1 << BTREE_NODE_dirty)))
                        return;
@@ -2046,7 +2048,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
                new |=  (1 << BTREE_NODE_write_in_flight_inner);
                new |=  (1 << BTREE_NODE_just_written);
                new ^=  (1 << BTREE_NODE_write_idx);
-       } while (cmpxchg_acquire(&b->flags, old, new) != old);
+       } while (!try_cmpxchg_acquire(&b->flags, &old, new));
 
        if (new & (1U << BTREE_NODE_need_write))
                return;
index 74e1ff225674c671fa10be121763c80e6190d9bb..5e67dcb30f331e6eabc2736c0ce708640ec6a473 100644 (file)
@@ -228,14 +228,14 @@ static int __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
        struct btree_write *w = container_of(pin, struct btree_write, journal);
        struct btree *b = container_of(w, struct btree, writes[i]);
        struct btree_trans *trans = bch2_trans_get(c);
-       unsigned long old, new, v;
+       unsigned long old, new;
        unsigned idx = w - b->writes;
 
        btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
-       v = READ_ONCE(b->flags);
 
+       old = READ_ONCE(b->flags);
        do {
-               old = new = v;
+               new = old;
 
                if (!(old & (1 << BTREE_NODE_dirty)) ||
                    !!(old & (1 << BTREE_NODE_write_idx)) != idx ||
@@ -245,7 +245,7 @@ static int __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
                new &= ~BTREE_WRITE_TYPE_MASK;
                new |= BTREE_WRITE_journal_reclaim;
                new |= 1 << BTREE_NODE_need_write;
-       } while ((v = cmpxchg(&b->flags, old, new)) != old);
+       } while (!try_cmpxchg(&b->flags, &old, new));
 
        btree_node_write_if_need(c, b, SIX_LOCK_read);
        six_unlock_read(&b->c.lock);
index d5f7992969d1badde051bdda2e5297b8b2a4935e..c58ceb328645ad3277aaab70629fa13b3226576a 100644 (file)
@@ -1356,7 +1356,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as,
        struct bch_fs *c = as->c;
        struct bkey_packed *k;
        struct printbuf buf = PRINTBUF;
-       unsigned long old, new, v;
+       unsigned long old, new;
 
        BUG_ON(insert->k.type == KEY_TYPE_btree_ptr_v2 &&
               !btree_ptr_sectors_written(insert));
@@ -1395,14 +1395,14 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as,
        bch2_btree_bset_insert_key(trans, path, b, node_iter, insert);
        set_btree_node_dirty_acct(c, b);
 
-       v = READ_ONCE(b->flags);
+       old = READ_ONCE(b->flags);
        do {
-               old = new = v;
+               new = old;
 
                new &= ~BTREE_WRITE_TYPE_MASK;
                new |= BTREE_WRITE_interior;
                new |= 1 << BTREE_NODE_need_write;
-       } while ((v = cmpxchg(&b->flags, old, new)) != old);
+       } while (!try_cmpxchg(&b->flags, &old, new));
 
        printbuf_exit(&buf);
 }
index 314ee3e0187f48a224b94a38f9ca101a97f7d59e..5145066330ed1f40784416eb75676b771aa3656c 100644 (file)
@@ -916,13 +916,13 @@ void bch2_trans_account_disk_usage_change(struct btree_trans *trans)
         */
        s64 should_not_have_added = added - (s64) disk_res_sectors;
        if (unlikely(should_not_have_added > 0)) {
-               u64 old, new, v = atomic64_read(&c->sectors_available);
+               u64 old, new;
 
+               old = atomic64_read(&c->sectors_available);
                do {
-                       old = v;
                        new = max_t(s64, 0, old - should_not_have_added);
-               } while ((v = atomic64_cmpxchg(&c->sectors_available,
-                                              old, new)) != old);
+               } while (!atomic64_try_cmpxchg(&c->sectors_available,
+                                              &old, new));
 
                added -= should_not_have_added;
                warn = true;
@@ -1523,7 +1523,7 @@ int __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
                              u64 sectors, int flags)
 {
        struct bch_fs_pcpu *pcpu;
-       u64 old, v, get;
+       u64 old, get;
        s64 sectors_available;
        int ret;
 
@@ -1534,17 +1534,16 @@ int __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
        if (sectors <= pcpu->sectors_available)
                goto out;
 
-       v = atomic64_read(&c->sectors_available);
+       old = atomic64_read(&c->sectors_available);
        do {
-               old = v;
                get = min((u64) sectors + SECTORS_CACHE, old);
 
                if (get < sectors) {
                        preempt_enable();
                        goto recalculate;
                }
-       } while ((v = atomic64_cmpxchg(&c->sectors_available,
-                                      old, old - get)) != old);
+       } while (!atomic64_try_cmpxchg(&c->sectors_available,
+                                      &old, old - get));
 
        pcpu->sectors_available         += get;
 
index 8ad4be73860cc049e729bbaccf5cc1da0730e106..05a1c98754f266e7adf392a7bc4204250a76bfae 100644 (file)
@@ -432,13 +432,13 @@ static inline int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reserv
 #ifdef __KERNEL__
        u64 old, new;
 
+       old = this_cpu_read(c->pcpu->sectors_available);
        do {
-               old = this_cpu_read(c->pcpu->sectors_available);
                if (sectors > old)
                        return __bch2_disk_reservation_add(c, res, sectors, flags);
 
                new = old - sectors;
-       } while (this_cpu_cmpxchg(c->pcpu->sectors_available, old, new) != old);
+       } while (!this_cpu_try_cmpxchg(c->pcpu->sectors_available, &old, new));
 
        this_cpu_add(*c->online_reserved, sectors);
        res->sectors                    += sectors;
index 05e0cbef420bc790a5694cc7c85aba3724e580ed..c6197e6aa0b8c0424e1db1a587f916985b085254 100644 (file)
@@ -69,11 +69,10 @@ void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
        u64 io_latency = time_after64(now, submit_time)
                ? now - submit_time
                : 0;
-       u64 old, new, v = atomic64_read(latency);
+       u64 old, new;
 
+       old = atomic64_read(latency);
        do {
-               old = v;
-
                /*
                 * If the io latency was reasonably close to the current
                 * latency, skip doing the update and atomic operation - most of
@@ -84,7 +83,7 @@ void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
                        break;
 
                new = ewma_add(old, io_latency, 5);
-       } while ((v = atomic64_cmpxchg(latency, old, new)) != old);
+       } while (!atomic64_try_cmpxchg(latency, &old, new));
 
        bch2_congested_acct(ca, io_latency, now, rw);
 
index 10b19791ec98e482c4a498d34b493bcfb76edd10..649e3a01608af408d70f6b2f498f01255589f70c 100644 (file)
@@ -230,7 +230,6 @@ static void __journal_entry_close(struct journal *j, unsigned closed_val, bool t
        struct bch_fs *c = container_of(j, struct bch_fs, journal);
        struct journal_buf *buf = journal_cur_buf(j);
        union journal_res_state old, new;
-       u64 v = atomic64_read(&j->reservations.counter);
        unsigned sectors;
 
        BUG_ON(closed_val != JOURNAL_ENTRY_CLOSED_VAL &&
@@ -238,15 +237,16 @@ static void __journal_entry_close(struct journal *j, unsigned closed_val, bool t
 
        lockdep_assert_held(&j->lock);
 
+       old.v = atomic64_read(&j->reservations.counter);
        do {
-               old.v = new.v = v;
+               new.v = old.v;
                new.cur_entry_offset = closed_val;
 
                if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL ||
                    old.cur_entry_offset == new.cur_entry_offset)
                        return;
-       } while ((v = atomic64_cmpxchg(&j->reservations.counter,
-                                      old.v, new.v)) != old.v);
+       } while (!atomic64_try_cmpxchg(&j->reservations.counter,
+                                      &old.v, new.v));
 
        if (!__journal_entry_is_open(old))
                return;
@@ -353,7 +353,6 @@ static int journal_entry_open(struct journal *j)
                ((journal_cur_seq(j) + 1) & JOURNAL_BUF_MASK);
        union journal_res_state old, new;
        int u64s;
-       u64 v;
 
        lockdep_assert_held(&j->lock);
        BUG_ON(journal_entry_is_open(j));
@@ -432,9 +431,9 @@ static int journal_entry_open(struct journal *j)
         */
        j->cur_entry_u64s = u64s;
 
-       v = atomic64_read(&j->reservations.counter);
+       old.v = atomic64_read(&j->reservations.counter);
        do {
-               old.v = new.v = v;
+               new.v = old.v;
 
                BUG_ON(old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL);
 
@@ -446,8 +445,8 @@ static int journal_entry_open(struct journal *j)
 
                /* Handle any already added entries */
                new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
-       } while ((v = atomic64_cmpxchg(&j->reservations.counter,
-                                      old.v, new.v)) != old.v);
+       } while (!atomic64_try_cmpxchg(&j->reservations.counter,
+                                      &old.v, new.v));
 
        if (nr_unwritten_journal_entries(j) == 1)
                mod_delayed_work(j->wq,
index bc6b9c39dcb4c6a079debe6f602cce0586aa85bd..377a3750406e4c86d6da57716d9ef1d81f607dbf 100644 (file)
@@ -327,10 +327,10 @@ static inline int journal_res_get_fast(struct journal *j,
                                       unsigned flags)
 {
        union journal_res_state old, new;
-       u64 v = atomic64_read(&j->reservations.counter);
 
+       old.v = atomic64_read(&j->reservations.counter);
        do {
-               old.v = new.v = v;
+               new.v = old.v;
 
                /*
                 * Check if there is still room in the current journal
@@ -356,8 +356,8 @@ static inline int journal_res_get_fast(struct journal *j,
 
                if (flags & JOURNAL_RES_GET_CHECK)
                        return 1;
-       } while ((v = atomic64_cmpxchg(&j->reservations.counter,
-                                      old.v, new.v)) != old.v);
+       } while (!atomic64_try_cmpxchg(&j->reservations.counter,
+                                      &old.v, new.v));
 
        res->ref        = true;
        res->idx        = old.idx;
index 63608ee44f08090447281c7448caa8611a6164ca..3fa63cacfd949e63a565c0b08064cfb031e05f20 100644 (file)
@@ -1588,7 +1588,7 @@ static CLOSURE_CALLBACK(journal_write_done)
        struct bch_fs *c = container_of(j, struct bch_fs, journal);
        struct bch_replicas_padded replicas;
        union journal_res_state old, new;
-       u64 v, seq = le64_to_cpu(w->data->seq);
+       u64 seq = le64_to_cpu(w->data->seq);
        int err = 0;
 
        bch2_time_stats_update(!JSET_NO_FLUSH(w->data)
@@ -1647,14 +1647,15 @@ static CLOSURE_CALLBACK(journal_write_done)
                if (j->watermark != BCH_WATERMARK_stripe)
                        journal_reclaim_kick(&c->journal);
 
-               v = atomic64_read(&j->reservations.counter);
+               old.v = atomic64_read(&j->reservations.counter);
                do {
-                       old.v = new.v = v;
+                       new.v = old.v;
                        BUG_ON(journal_state_count(new, new.unwritten_idx));
                        BUG_ON(new.unwritten_idx != (seq & JOURNAL_BUF_MASK));
 
                        new.unwritten_idx++;
-               } while ((v = atomic64_cmpxchg(&j->reservations.counter, old.v, new.v)) != old.v);
+               } while (!atomic64_try_cmpxchg(&j->reservations.counter,
+                                              &old.v, new.v));
 
                closure_wake_up(&w->wait);
                completed = true;
index 9058017720025a8f1e0f0d21b8965c70080b4fe1..7f647846b511fb33f532d8f5d0c6b29461ee9891 100644 (file)
@@ -36,15 +36,14 @@ static inline void bch2_two_state_unlock(two_state_lock_t *lock, int s)
 static inline bool bch2_two_state_trylock(two_state_lock_t *lock, int s)
 {
        long i = s ? 1 : -1;
-       long v = atomic_long_read(&lock->v), old;
+       long old;
 
+       old = atomic_long_read(&lock->v);
        do {
-               old = v;
-
-               if (i > 0 ? v < 0 : v > 0)
+               if (i > 0 ? old < 0 : old > 0)
                        return false;
-       } while ((v = atomic_long_cmpxchg_acquire(&lock->v,
-                                       old, old + i)) != old);
+       } while (!atomic_long_try_cmpxchg_acquire(&lock->v, &old, old + i));
+
        return true;
 }