c_region.sector = from_cblock(mg->op->cblock) * cache->sectors_per_block;
c_region.count = cache->sectors_per_block;
- if (promote)
- dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, &mg->k);
- else
- dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, &mg->k);
+ if (promote) {
+ dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region,
+ BIT(DM_KCOPYD_EARLY_CALLBACK), copy_complete, &mg->k);
+ } else {
+ dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region,
+ BIT(DM_KCOPYD_EARLY_CALLBACK), copy_complete, &mg->k);
+ }
}
static void bio_drop_shared_lock(struct cache *cache, struct bio *bio)
mutex_destroy(&job->lock);
mempool_free(job, &kc->job_pool);
}
- fn(read_err, write_err, context);
+ if (fn)
+ fn(read_err, write_err, context);
if (atomic_dec_and_test(&kc->nr_jobs))
wake_up(&kc->destroyq);
}
}
- if (op_is_write(job->rw))
+ if (op_is_write(job->rw)) {
+ if (job->flags & BIT(DM_KCOPYD_EARLY_CALLBACK)) {
+ job->fn(job->read_err, job->write_err, job->context);
+ job->fn = NULL;
+ }
push(&kc->complete_jobs, job);
-
- else {
+ } else {
job->rw = WRITE;
push(&kc->io_jobs, job);
}
sub_job->dests[i].count = count;
}
+ sub_job->flags &= ~BIT(DM_KCOPYD_EARLY_CALLBACK);
sub_job->fn = segment_complete;
sub_job->context = sub_job;
dispatch_job(sub_job);
to.count = len;
dm_kcopyd_copy(pool->copier, &from, 1, &to,
- 0, copy_complete, m);
+ BIT(DM_KCOPYD_EARLY_CALLBACK), copy_complete, m);
/*
* Do we need to zero a tail region?
/* FIXME: make this configurable */
#define DM_KCOPYD_MAX_REGIONS 8
-#define DM_KCOPYD_IGNORE_ERROR 1
-#define DM_KCOPYD_WRITE_SEQ 2
+#define DM_KCOPYD_IGNORE_ERROR 1
+#define DM_KCOPYD_WRITE_SEQ 2
+#define DM_KCOPYD_EARLY_CALLBACK 3
struct dm_kcopyd_throttle {
unsigned throttle;