struct ablk_ctx {
        struct buffer_desc *src;
        struct buffer_desc *dst;
+       u8 iv[MAX_IVLEN];
+       bool encrypt;
 };
 
 struct aead_ctx {
        case CTL_FLAG_PERFORM_ABLK: {
                struct skcipher_request *req = crypt->data.ablk_req;
                struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
+               struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+               unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+               unsigned int offset;
+
+               if (ivsize > 0) {
+                       offset = req->cryptlen - ivsize;
+                       if (req_ctx->encrypt) {
+                               scatterwalk_map_and_copy(req->iv, req->dst,
+                                                        offset, ivsize, 0);
+                       } else {
+                               memcpy(req->iv, req_ctx->iv, ivsize);
+                               memzero_explicit(req_ctx->iv, ivsize);
+                       }
+               }
 
                if (req_ctx->dst) {
                        free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
        struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
        struct buffer_desc src_hook;
        struct device *dev = &pdev->dev;
+       unsigned int offset;
        gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
                                GFP_KERNEL : GFP_ATOMIC;
 
                return -EAGAIN;
 
        dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
+       req_ctx->encrypt = encrypt;
 
        crypt = get_crypt_desc();
        if (!crypt)
 
        BUG_ON(ivsize && !req->iv);
        memcpy(crypt->iv, req->iv, ivsize);
+       if (ivsize > 0 && !encrypt) {
+               offset = req->cryptlen - ivsize;
+               scatterwalk_map_and_copy(req_ctx->iv, req->src, offset, ivsize, 0);
+       }
        if (req->src != req->dst) {
                struct buffer_desc dst_hook;
                crypt->mode |= NPE_OP_NOT_IN_PLACE;