unsigned int new_offset;
        struct buffer_head *bh_in = jh2bh(jh_in);
        struct jbd2_buffer_trigger_type *triggers;
+       journal_t *journal = transaction->t_journal;
 
        /*
         * The buffer really shouldn't be locked: only the current committing
        J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in));
 
        new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL);
+       /* keep subsequent assertions sane */
+       new_bh->b_state = 0;
+       init_buffer(new_bh, NULL, NULL);
+       atomic_set(&new_bh->b_count, 1);
+       new_jh = jbd2_journal_add_journal_head(new_bh); /* This sleeps */
 
        /*
         * If a new transaction has already done a buffer copy-out, then
                kunmap_atomic(mapped_data, KM_USER0);
        }
 
-       /* keep subsequent assertions sane */
-       new_bh->b_state = 0;
-       init_buffer(new_bh, NULL, NULL);
-       atomic_set(&new_bh->b_count, 1);
-       jbd_unlock_bh_state(bh_in);
-
-       new_jh = jbd2_journal_add_journal_head(new_bh); /* This sleeps */
-
        set_bh_page(new_bh, new_page, new_offset);
        new_jh->b_transaction = NULL;
        new_bh->b_size = jh2bh(jh_in)->b_size;
         * copying is moved to the transaction's shadow queue.
         */
        JBUFFER_TRACE(jh_in, "file as BJ_Shadow");
-       jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow);
+       spin_lock(&journal->j_list_lock);
+       __jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow);
+       spin_unlock(&journal->j_list_lock);
+       jbd_unlock_bh_state(bh_in);
+
        JBUFFER_TRACE(new_jh, "file as BJ_IO");
        jbd2_journal_file_buffer(new_jh, transaction, BJ_IO);