]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
dm-crypt: track tag_offset in convert_context
authorHou Tao <houtao1@huawei.com>
Mon, 20 Jan 2025 08:29:51 +0000 (16:29 +0800)
committerMikulas Patocka <mpatocka@redhat.com>
Tue, 21 Jan 2025 21:02:12 +0000 (22:02 +0100)
dm-crypt uses tag_offset to index the integrity metadata for each crypt
sector. When the initial crypt_convert() returns BLK_STS_DEV_RESOURCE,
dm-crypt will try to continue the crypt/decrypt procedure in a kworker.
However, it resets tag_offset as zero instead of using the tag_offset
related with current sector. It may return unexpected data when using
random IV or return unexpected integrity related error.

Fix the problem by tracking tag_offset in per-IO convert_context.
Therefore, when the crypt/decrypt procedure continues in a kworker, it
could use the next tag_offset saved in convert_context.

Fixes: 8abec36d1274 ("dm crypt: do not wait for backlogged crypto request completion in softirq")
Cc: stable@vger.kernel.org
Signed-off-by: Hou Tao <houtao1@huawei.com>
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
drivers/md/dm-crypt.c

index 09f5c1a04faef8c8b39d1e96b2a26f1791aa2f76..02a2919f4e5aad9f5e9abdb7210657e43068ffba 100644 (file)
@@ -59,6 +59,7 @@ struct convert_context {
        struct bio *bio_out;
        struct bvec_iter iter_out;
        atomic_t cc_pending;
+       unsigned int tag_offset;
        u64 cc_sector;
        union {
                struct skcipher_request *req;
@@ -1256,6 +1257,7 @@ static void crypt_convert_init(struct crypt_config *cc,
        if (bio_out)
                ctx->iter_out = bio_out->bi_iter;
        ctx->cc_sector = sector + cc->iv_offset;
+       ctx->tag_offset = 0;
        init_completion(&ctx->restart);
 }
 
@@ -1588,7 +1590,6 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_
 static blk_status_t crypt_convert(struct crypt_config *cc,
                         struct convert_context *ctx, bool atomic, bool reset_pending)
 {
-       unsigned int tag_offset = 0;
        unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
        int r;
 
@@ -1611,9 +1612,9 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
                atomic_inc(&ctx->cc_pending);
 
                if (crypt_integrity_aead(cc))
-                       r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset);
+                       r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, ctx->tag_offset);
                else
-                       r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset);
+                       r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, ctx->tag_offset);
 
                switch (r) {
                /*
@@ -1633,8 +1634,8 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
                                         * exit and continue processing in a workqueue
                                         */
                                        ctx->r.req = NULL;
+                                       ctx->tag_offset++;
                                        ctx->cc_sector += sector_step;
-                                       tag_offset++;
                                        return BLK_STS_DEV_RESOURCE;
                                }
                        } else {
@@ -1648,8 +1649,8 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
                 */
                case -EINPROGRESS:
                        ctx->r.req = NULL;
+                       ctx->tag_offset++;
                        ctx->cc_sector += sector_step;
-                       tag_offset++;
                        continue;
                /*
                 * The request was already processed (synchronously).
@@ -1657,7 +1658,7 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
                case 0:
                        atomic_dec(&ctx->cc_pending);
                        ctx->cc_sector += sector_step;
-                       tag_offset++;
+                       ctx->tag_offset++;
                        if (!atomic)
                                cond_resched();
                        continue;