static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
                            int mirror_num, u64 physical_for_dev_replace);
 static void copy_nocow_pages_worker(struct btrfs_work *work);
+static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
 
 
        wake_up(&sctx->list_wait);
 }
 
-static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
+static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
 {
        while (atomic_read(&fs_info->scrub_pause_req)) {
                mutex_unlock(&fs_info->scrub_lock);
        }
 }
 
+static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
+{
+       atomic_inc(&fs_info->scrubs_paused);
+       wake_up(&fs_info->scrub_pause_wait);
+
+       mutex_lock(&fs_info->scrub_lock);
+       __scrub_blocked_if_needed(fs_info);
+       atomic_dec(&fs_info->scrubs_paused);
+       mutex_unlock(&fs_info->scrub_lock);
+
+       wake_up(&fs_info->scrub_pause_wait);
+}
+
 /*
  * used for workers that require transaction commits (i.e., for the
  * NOCOW case)
 
        wait_event(sctx->list_wait,
                   atomic_read(&sctx->bios_in_flight) == 0);
-       atomic_inc(&fs_info->scrubs_paused);
-       wake_up(&fs_info->scrub_pause_wait);
+       scrub_blocked_if_needed(fs_info);
 
        /* FIXME it might be better to start readahead at commit root */
        key_start.objectid = logical;
        if (!IS_ERR(reada2))
                btrfs_reada_wait(reada2);
 
-       mutex_lock(&fs_info->scrub_lock);
-       scrub_blocked_if_needed(fs_info);
-       atomic_dec(&fs_info->scrubs_paused);
-       mutex_unlock(&fs_info->scrub_lock);
-
-       wake_up(&fs_info->scrub_pause_wait);
 
        /*
         * collect all data csums for the stripe to avoid seeking during
                        wait_event(sctx->list_wait,
                                   atomic_read(&sctx->bios_in_flight) == 0);
                        atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
-                       atomic_inc(&fs_info->scrubs_paused);
-                       wake_up(&fs_info->scrub_pause_wait);
-
-                       mutex_lock(&fs_info->scrub_lock);
                        scrub_blocked_if_needed(fs_info);
-                       atomic_dec(&fs_info->scrubs_paused);
-                       mutex_unlock(&fs_info->scrub_lock);
-
-                       wake_up(&fs_info->scrub_pause_wait);
                }
 
                key.objectid = logical;
                wait_event(sctx->list_wait,
                           atomic_read(&sctx->bios_in_flight) == 0);
                atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
-               atomic_inc(&fs_info->scrubs_paused);
-               wake_up(&fs_info->scrub_pause_wait);
                wait_event(sctx->list_wait,
                           atomic_read(&sctx->workers_pending) == 0);
-
-               mutex_lock(&fs_info->scrub_lock);
                scrub_blocked_if_needed(fs_info);
-               atomic_dec(&fs_info->scrubs_paused);
-               mutex_unlock(&fs_info->scrub_lock);
-
-               wake_up(&fs_info->scrub_pause_wait);
 
                btrfs_put_block_group(cache);
                if (ret)
         * checking @scrub_pause_req here, we can avoid
         * race between committing transaction and scrubbing.
         */
-       scrub_blocked_if_needed(fs_info);
+       __scrub_blocked_if_needed(fs_info);
        atomic_inc(&fs_info->scrubs_running);
        mutex_unlock(&fs_info->scrub_lock);