unsigned prev_free_sectors;
 
        /* the following test is not needed, but it tests the replay code */
-       if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev)
+       if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev)
                return;
 
        spin_lock_irq(&ic->endio_wait.lock);
 
 next_chunk:
 
-       if (unlikely(dm_suspended(ic->ti)))
+       if (unlikely(dm_post_suspending(ic->ti)))
                goto unlock_ret;
 
        range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
 
 #define DMF_NOFLUSH_SUSPENDING 5
 #define DMF_DEFERRED_REMOVE 6
 #define DMF_SUSPENDED_INTERNALLY 7
+#define DMF_POST_SUSPENDING 8
 
 #define DM_NUMA_NODE NUMA_NO_NODE
 static int dm_numa_node = DM_NUMA_NODE;
        if (!dm_suspended_md(md)) {
                dm_table_presuspend_targets(map);
                set_bit(DMF_SUSPENDED, &md->flags);
+               set_bit(DMF_POST_SUSPENDING, &md->flags);
                dm_table_postsuspend_targets(map);
        }
        /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
        if (r)
                goto out_unlock;
 
+       set_bit(DMF_POST_SUSPENDING, &md->flags);
        dm_table_postsuspend_targets(map);
+       clear_bit(DMF_POST_SUSPENDING, &md->flags);
 
 out_unlock:
        mutex_unlock(&md->suspend_lock);
        (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
                            DMF_SUSPENDED_INTERNALLY);
 
+       set_bit(DMF_POST_SUSPENDING, &md->flags);
        dm_table_postsuspend_targets(map);
+       clear_bit(DMF_POST_SUSPENDING, &md->flags);
 }
 
 static void __dm_internal_resume(struct mapped_device *md)
        return test_bit(DMF_SUSPENDED, &md->flags);
 }
 
+static int dm_post_suspending_md(struct mapped_device *md)
+{
+       return test_bit(DMF_POST_SUSPENDING, &md->flags);
+}
+
 int dm_suspended_internally_md(struct mapped_device *md)
 {
        return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
 }
 EXPORT_SYMBOL_GPL(dm_suspended);
 
+int dm_post_suspending(struct dm_target *ti)
+{
+       return dm_post_suspending_md(dm_table_get_md(ti->table));
+}
+EXPORT_SYMBOL_GPL(dm_post_suspending);
+
 int dm_noflush_suspending(struct dm_target *ti)
 {
        return __noflush_suspending(dm_table_get_md(ti->table));
 
 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
 struct gendisk *dm_disk(struct mapped_device *md);
 int dm_suspended(struct dm_target *ti);
+int dm_post_suspending(struct dm_target *ti);
 int dm_noflush_suspending(struct dm_target *ti);
 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
 union map_info *dm_get_rq_mapinfo(struct request *rq);