start_lock = jiffies;
        lock_buffer(bh);
-       jbd_lock_bh_state(bh);
+       spin_lock(&jh->b_state_lock);
 
        /* If it takes too long to lock the buffer, trace it */
        time_lock = jbd2_time_diff(start_lock, jiffies);
 
        error = -EROFS;
        if (is_handle_aborted(handle)) {
-               jbd_unlock_bh_state(bh);
+               spin_unlock(&jh->b_state_lock);
                goto out;
        }
        error = 0;
         */
        if (buffer_shadow(bh)) {
                JBUFFER_TRACE(jh, "on shadow: sleep");
-               jbd_unlock_bh_state(bh);
+               spin_unlock(&jh->b_state_lock);
                wait_on_bit_io(&bh->b_state, BH_Shadow, TASK_UNINTERRUPTIBLE);
                goto repeat;
        }
                JBUFFER_TRACE(jh, "generate frozen data");
                if (!frozen_buffer) {
                        JBUFFER_TRACE(jh, "allocate memory for buffer");
-                       jbd_unlock_bh_state(bh);
+                       spin_unlock(&jh->b_state_lock);
                        frozen_buffer = jbd2_alloc(jh2bh(jh)->b_size,
                                                   GFP_NOFS | __GFP_NOFAIL);
                        goto repeat;
        jh->b_next_transaction = transaction;
 
 done:
-       jbd_unlock_bh_state(bh);
+       spin_unlock(&jh->b_state_lock);
 
        /*
         * If we are about to journal a buffer, then any revoke pending on it is
         * that case: the transaction must have deleted the buffer for it to be
         * reused here.
         */
-       jbd_lock_bh_state(bh);
+       spin_lock(&jh->b_state_lock);
        J_ASSERT_JH(jh, (jh->b_transaction == transaction ||
                jh->b_transaction == NULL ||
                (jh->b_transaction == journal->j_committing_transaction &&
                jh->b_next_transaction = transaction;
                spin_unlock(&journal->j_list_lock);
        }
-       jbd_unlock_bh_state(bh);
+       spin_unlock(&jh->b_state_lock);
 
        /*
         * akpm: I added this.  ext3_alloc_branch can pick up new indirect
                committed_data = jbd2_alloc(jh2bh(jh)->b_size,
                                            GFP_NOFS|__GFP_NOFAIL);
 
-       jbd_lock_bh_state(bh);
+       spin_lock(&jh->b_state_lock);
        if (!jh->b_committed_data) {
                /* Copy out the current buffer contents into the
                 * preserved, committed copy. */
                JBUFFER_TRACE(jh, "generate b_committed data");
                if (!committed_data) {
-                       jbd_unlock_bh_state(bh);
+                       spin_unlock(&jh->b_state_lock);
                        goto repeat;
                }
 
                committed_data = NULL;
                memcpy(jh->b_committed_data, bh->b_data, bh->b_size);
        }
-       jbd_unlock_bh_state(bh);
+       spin_unlock(&jh->b_state_lock);
 out:
        jbd2_journal_put_journal_head(jh);
        if (unlikely(committed_data))
         */
        if (jh->b_transaction != transaction &&
            jh->b_next_transaction != transaction) {
-               jbd_lock_bh_state(bh);
+               spin_lock(&jh->b_state_lock);
                J_ASSERT_JH(jh, jh->b_transaction == transaction ||
                                jh->b_next_transaction == transaction);
-               jbd_unlock_bh_state(bh);
+               spin_unlock(&jh->b_state_lock);
        }
        if (jh->b_modified == 1) {
                /* If it's in our transaction it must be in BJ_Metadata list. */
                if (jh->b_transaction == transaction &&
                    jh->b_jlist != BJ_Metadata) {
-                       jbd_lock_bh_state(bh);
+                       spin_lock(&jh->b_state_lock);
                        if (jh->b_transaction == transaction &&
                            jh->b_jlist != BJ_Metadata)
                                pr_err("JBD2: assertion failure: h_type=%u "
                                       jh->b_jlist);
                        J_ASSERT_JH(jh, jh->b_transaction != transaction ||
                                        jh->b_jlist == BJ_Metadata);
-                       jbd_unlock_bh_state(bh);
+                       spin_unlock(&jh->b_state_lock);
                }
                goto out;
        }
 
        journal = transaction->t_journal;
-       jbd_lock_bh_state(bh);
+       spin_lock(&jh->b_state_lock);
 
        if (jh->b_modified == 0) {
                /*
        __jbd2_journal_file_buffer(jh, transaction, BJ_Metadata);
        spin_unlock(&journal->j_list_lock);
 out_unlock_bh:
-       jbd_unlock_bh_state(bh);
+       spin_unlock(&jh->b_state_lock);
 out:
        JBUFFER_TRACE(jh, "exit");
        return ret;
 
        BUFFER_TRACE(bh, "entry");
 
-       jbd_lock_bh_state(bh);
+       jh = jbd2_journal_grab_journal_head(bh);
+       if (!jh) {
+               __bforget(bh);
+               return 0;
+       }
 
-       if (!buffer_jbd(bh))
-               goto not_jbd;
-       jh = bh2jh(bh);
+       spin_lock(&jh->b_state_lock);
 
        /* Critical error: attempting to delete a bitmap buffer, maybe?
         * Don't do any jbd operations, and return an error. */
                spin_unlock(&journal->j_list_lock);
        }
 drop:
-       jbd_unlock_bh_state(bh);
        __brelse(bh);
+       spin_unlock(&jh->b_state_lock);
+       jbd2_journal_put_journal_head(jh);
        if (drop_reserve) {
                /* no need to reserve log space for this block -bzzz */
                handle->h_buffer_credits++;
        }
        return err;
-
-not_jbd:
-       jbd_unlock_bh_state(bh);
-       __bforget(bh);
-       goto drop;
 }
 
 /**
  *
  * j_list_lock is held.
  *
- * jbd_lock_bh_state(jh2bh(jh)) is held.
+ * jh->b_state_lock is held.
  */
 
 static inline void
  *
  * Called with j_list_lock held, and the journal may not be locked.
  *
- * jbd_lock_bh_state(jh2bh(jh)) is held.
+ * jh->b_state_lock is held.
  */
 
 static inline void
        transaction_t *transaction;
        struct buffer_head *bh = jh2bh(jh);
 
-       J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
+       lockdep_assert_held(&jh->b_state_lock);
        transaction = jh->b_transaction;
        if (transaction)
                assert_spin_locked(&transaction->t_journal->j_list_lock);
 
        /* Get reference so that buffer cannot be freed before we unlock it */
        get_bh(bh);
-       jbd_lock_bh_state(bh);
+       spin_lock(&jh->b_state_lock);
        spin_lock(&journal->j_list_lock);
        __jbd2_journal_unfile_buffer(jh);
        spin_unlock(&journal->j_list_lock);
-       jbd_unlock_bh_state(bh);
+       spin_unlock(&jh->b_state_lock);
        jbd2_journal_put_journal_head(jh);
        __brelse(bh);
 }
 /*
  * Called from jbd2_journal_try_to_free_buffers().
  *
- * Called under jbd_lock_bh_state(bh)
+ * Called under jh->b_state_lock
  */
 static void
 __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
                if (!jh)
                        continue;
 
-               jbd_lock_bh_state(bh);
+               spin_lock(&jh->b_state_lock);
                __journal_try_to_free_buffer(journal, bh);
+               spin_unlock(&jh->b_state_lock);
                jbd2_journal_put_journal_head(jh);
-               jbd_unlock_bh_state(bh);
                if (buffer_jbd(bh))
                        goto busy;
        } while ((bh = bh->b_this_page) != head);
  *
  * Called under j_list_lock.
  *
- * Called under jbd_lock_bh_state(bh).
+ * Called under jh->b_state_lock.
  */
 static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
 {
 
        /* OK, we have data buffer in journaled mode */
        write_lock(&journal->j_state_lock);
-       jbd_lock_bh_state(bh);
+       spin_lock(&jh->b_state_lock);
        spin_lock(&journal->j_list_lock);
 
        /*
                 * for commit and try again.
                 */
                if (partial_page) {
-                       jbd2_journal_put_journal_head(jh);
                        spin_unlock(&journal->j_list_lock);
-                       jbd_unlock_bh_state(bh);
+                       spin_unlock(&jh->b_state_lock);
                        write_unlock(&journal->j_state_lock);
+                       jbd2_journal_put_journal_head(jh);
                        return -EBUSY;
                }
                /*
                set_buffer_freed(bh);
                if (journal->j_running_transaction && buffer_jbddirty(bh))
                        jh->b_next_transaction = journal->j_running_transaction;
-               jbd2_journal_put_journal_head(jh);
                spin_unlock(&journal->j_list_lock);
-               jbd_unlock_bh_state(bh);
+               spin_unlock(&jh->b_state_lock);
                write_unlock(&journal->j_state_lock);
+               jbd2_journal_put_journal_head(jh);
                return 0;
        } else {
                /* Good, the buffer belongs to the running transaction.
         * here.
         */
        jh->b_modified = 0;
-       jbd2_journal_put_journal_head(jh);
        spin_unlock(&journal->j_list_lock);
-       jbd_unlock_bh_state(bh);
+       spin_unlock(&jh->b_state_lock);
        write_unlock(&journal->j_state_lock);
+       jbd2_journal_put_journal_head(jh);
 zap_buffer_unlocked:
        clear_buffer_dirty(bh);
        J_ASSERT_BH(bh, !buffer_jbddirty(bh));
        int was_dirty = 0;
        struct buffer_head *bh = jh2bh(jh);
 
-       J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
+       lockdep_assert_held(&jh->b_state_lock);
        assert_spin_locked(&transaction->t_journal->j_list_lock);
 
        J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
 void jbd2_journal_file_buffer(struct journal_head *jh,
                                transaction_t *transaction, int jlist)
 {
-       jbd_lock_bh_state(jh2bh(jh));
+       spin_lock(&jh->b_state_lock);
        spin_lock(&transaction->t_journal->j_list_lock);
        __jbd2_journal_file_buffer(jh, transaction, jlist);
        spin_unlock(&transaction->t_journal->j_list_lock);
-       jbd_unlock_bh_state(jh2bh(jh));
+       spin_unlock(&jh->b_state_lock);
 }
 
 /*
  * buffer on that transaction's metadata list.
  *
  * Called under j_list_lock
- * Called under jbd_lock_bh_state(jh2bh(jh))
+ * Called under jh->b_state_lock
  *
  * When this function returns true, there's no next transaction to refile to
  * and the caller has to drop jh reference through
        int was_dirty, jlist;
        struct buffer_head *bh = jh2bh(jh);
 
-       J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
+       lockdep_assert_held(&jh->b_state_lock);
        if (jh->b_transaction)
                assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock);
 
  */
 void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
 {
-       struct buffer_head *bh = jh2bh(jh);
        bool drop;
 
-       /* Get reference so that buffer cannot be freed before we unlock it */
-       get_bh(bh);
-       jbd_lock_bh_state(bh);
+       spin_lock(&jh->b_state_lock);
        spin_lock(&journal->j_list_lock);
        drop = __jbd2_journal_refile_buffer(jh);
-       jbd_unlock_bh_state(bh);
+       spin_unlock(&jh->b_state_lock);
        spin_unlock(&journal->j_list_lock);
-       __brelse(bh);
        if (drop)
                jbd2_journal_put_journal_head(jh);
 }
 
 #ifndef JOURNAL_HEAD_H_INCLUDED
 #define JOURNAL_HEAD_H_INCLUDED
 
+#include <linux/spinlock.h>
+
 typedef unsigned int           tid_t;          /* Unique transaction ID */
 typedef struct transaction_s   transaction_t;  /* Compound transaction type */
 
         */
        struct buffer_head *b_bh;
 
+       /*
+        * Protect the buffer head state
+        */
+       spinlock_t b_state_lock;
+
        /*
         * Reference count - see description in journal.c
         * [jbd_lock_bh_journal_head()]
        int b_jcount;
 
        /*
-        * Journalling list for this buffer [jbd_lock_bh_state()]
+        * Journalling list for this buffer [b_state_lock]
         * NOTE: We *cannot* combine this with b_modified into a bitfield
         * as gcc would then (which the C standard allows but which is
         * very unuseful) make 64-bit accesses to the bitfield and clobber
        /*
         * This flag signals the buffer has been modified by
         * the currently running transaction
-        * [jbd_lock_bh_state()]
+        * [b_state_lock]
         */
        unsigned b_modified;
 
        /*
         * Copy of the buffer data frozen for writing to the log.
-        * [jbd_lock_bh_state()]
+        * [b_state_lock]
         */
        char *b_frozen_data;
 
        /*
         * Pointer to a saved copy of the buffer containing no uncommitted
         * deallocation references, so that allocations can avoid overwriting
-        * uncommitted deletes. [jbd_lock_bh_state()]
+        * uncommitted deletes. [b_state_lock]
         */
        char *b_committed_data;
 
         * metadata: either the running transaction or the committing
         * transaction (if there is one).  Only applies to buffers on a
         * transaction's data or metadata journaling list.
-        * [j_list_lock] [jbd_lock_bh_state()]
+        * [j_list_lock] [b_state_lock]
         * Either of these locks is enough for reading, both are needed for
         * changes.
         */
         * Pointer to the running compound transaction which is currently
         * modifying the buffer's metadata, if there was already a transaction
         * committing it when the new transaction touched it.
-        * [t_list_lock] [jbd_lock_bh_state()]
+        * [t_list_lock] [b_state_lock]
         */
        transaction_t *b_next_transaction;
 
        /*
         * Doubly-linked list of buffers on a transaction's data, metadata or
-        * forget queue. [t_list_lock] [jbd_lock_bh_state()]
+        * forget queue. [t_list_lock] [b_state_lock]
         */
        struct journal_head *b_tnext, *b_tprev;