gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
                       GFP_KERNEL : GFP_ATOMIC;
        int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+       int src_len, dst_len = 0;
        struct aead_edesc *edesc;
        int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
        unsigned int authsize = ctx->authsize;
 
        if (unlikely(req->dst != req->src)) {
-               src_nents = sg_nents_for_len(req->src, req->assoclen +
-                                            req->cryptlen);
+               src_len = req->assoclen + req->cryptlen;
+               dst_len = src_len + (encrypt ? authsize : (-authsize));
+
+               src_nents = sg_nents_for_len(req->src, src_len);
                if (unlikely(src_nents < 0)) {
                        dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
-                               req->assoclen + req->cryptlen);
+                               src_len);
                        return ERR_PTR(src_nents);
                }
 
-               dst_nents = sg_nents_for_len(req->dst, req->assoclen +
-                                            req->cryptlen +
-                                               (encrypt ? authsize :
-                                                          (-authsize)));
+               dst_nents = sg_nents_for_len(req->dst, dst_len);
                if (unlikely(dst_nents < 0)) {
                        dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
-                               req->assoclen + req->cryptlen +
-                               (encrypt ? authsize : (-authsize)));
+                               dst_len);
                        return ERR_PTR(dst_nents);
                }
        } else {
-               src_nents = sg_nents_for_len(req->src, req->assoclen +
-                                            req->cryptlen +
-                                            (encrypt ? authsize : 0));
+               src_len = req->assoclen + req->cryptlen +
+                         (encrypt ? authsize : 0);
+
+               src_nents = sg_nents_for_len(req->src, src_len);
                if (unlikely(src_nents < 0)) {
                        dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
-                               req->assoclen + req->cryptlen +
-                               (encrypt ? authsize : 0));
+                               src_len);
                        return ERR_PTR(src_nents);
                }
        }
 
        sec4_sg_index = 0;
        if (mapped_src_nents > 1) {
-               sg_to_sec4_sg_last(req->src, mapped_src_nents,
+               sg_to_sec4_sg_last(req->src, src_len,
                                   edesc->sec4_sg + sec4_sg_index, 0);
                sec4_sg_index += mapped_src_nents;
        }
        if (mapped_dst_nents > 1) {
-               sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
+               sg_to_sec4_sg_last(req->dst, dst_len,
                                   edesc->sec4_sg + sec4_sg_index, 0);
        }
 
                dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
        }
        if (dst_sg_idx)
-               sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg +
+               sg_to_sec4_sg_last(req->src, req->cryptlen, edesc->sec4_sg +
                                   !!ivsize, 0);
 
        if (mapped_dst_nents > 1) {
-               sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
+               sg_to_sec4_sg_last(req->dst, req->cryptlen,
                                   edesc->sec4_sg + dst_sg_idx, 0);
        }
 
 
        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
                       GFP_KERNEL : GFP_ATOMIC;
        int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+       int src_len, dst_len = 0;
        struct aead_edesc *edesc;
        dma_addr_t qm_sg_dma, iv_dma = 0;
        int ivsize = 0;
        }
 
        if (likely(req->src == req->dst)) {
-               src_nents = sg_nents_for_len(req->src, req->assoclen +
-                                            req->cryptlen +
-                                               (encrypt ? authsize : 0));
+               src_len = req->assoclen + req->cryptlen +
+                         (encrypt ? authsize : 0);
+
+               src_nents = sg_nents_for_len(req->src, src_len);
                if (unlikely(src_nents < 0)) {
                        dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
-                               req->assoclen + req->cryptlen +
-                               (encrypt ? authsize : 0));
+                               src_len);
                        qi_cache_free(edesc);
                        return ERR_PTR(src_nents);
                }
                        return ERR_PTR(-ENOMEM);
                }
        } else {
-               src_nents = sg_nents_for_len(req->src, req->assoclen +
-                                            req->cryptlen);
+               src_len = req->assoclen + req->cryptlen;
+               dst_len = src_len + (encrypt ? authsize : (-authsize));
+
+               src_nents = sg_nents_for_len(req->src, src_len);
                if (unlikely(src_nents < 0)) {
                        dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
-                               req->assoclen + req->cryptlen);
+                               src_len);
                        qi_cache_free(edesc);
                        return ERR_PTR(src_nents);
                }
 
-               dst_nents = sg_nents_for_len(req->dst, req->assoclen +
-                                            req->cryptlen +
-                                            (encrypt ? authsize :
-                                                       (-authsize)));
+               dst_nents = sg_nents_for_len(req->dst, dst_len);
                if (unlikely(dst_nents < 0)) {
                        dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
-                               req->assoclen + req->cryptlen +
-                               (encrypt ? authsize : (-authsize)));
+                               dst_len);
                        qi_cache_free(edesc);
                        return ERR_PTR(dst_nents);
                }
                dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
                qm_sg_index++;
        }
-       sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+       sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
        qm_sg_index += mapped_src_nents;
 
        if (mapped_dst_nents > 1)
-               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
-                                qm_sg_index, 0);
+               sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
 
        qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
        if (dma_mapping_error(qidev, qm_sg_dma)) {
        edesc->drv_req.drv_ctx = drv_ctx;
 
        dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
-       sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
+       sg_to_qm_sg_last(req->src, req->cryptlen, sg_table + 1, 0);
 
        if (mapped_dst_nents > 1)
-               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+               sg_to_qm_sg_last(req->dst, req->cryptlen, sg_table +
                                 dst_sg_idx, 0);
 
        edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
 
        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
                      GFP_KERNEL : GFP_ATOMIC;
        int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+       int src_len, dst_len = 0;
        struct aead_edesc *edesc;
        dma_addr_t qm_sg_dma, iv_dma = 0;
        int ivsize = 0;
        }
 
        if (unlikely(req->dst != req->src)) {
-               src_nents = sg_nents_for_len(req->src, req->assoclen +
-                                            req->cryptlen);
+               src_len = req->assoclen + req->cryptlen;
+               dst_len = src_len + (encrypt ? authsize : (-authsize));
+
+               src_nents = sg_nents_for_len(req->src, src_len);
                if (unlikely(src_nents < 0)) {
                        dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
-                               req->assoclen + req->cryptlen);
+                               src_len);
                        qi_cache_free(edesc);
                        return ERR_PTR(src_nents);
                }
 
-               dst_nents = sg_nents_for_len(req->dst, req->assoclen +
-                                            req->cryptlen +
-                                            (encrypt ? authsize :
-                                                       (-authsize)));
+               dst_nents = sg_nents_for_len(req->dst, dst_len);
                if (unlikely(dst_nents < 0)) {
                        dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
-                               req->assoclen + req->cryptlen +
-                               (encrypt ? authsize : (-authsize)));
+                               dst_len);
                        qi_cache_free(edesc);
                        return ERR_PTR(dst_nents);
                }
                        mapped_dst_nents = 0;
                }
        } else {
-               src_nents = sg_nents_for_len(req->src, req->assoclen +
-                                            req->cryptlen +
-                                               (encrypt ? authsize : 0));
+               src_len = req->assoclen + req->cryptlen +
+                         (encrypt ? authsize : 0);
+
+               src_nents = sg_nents_for_len(req->src, src_len);
                if (unlikely(src_nents < 0)) {
                        dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
-                               req->assoclen + req->cryptlen +
-                               (encrypt ? authsize : 0));
+                               src_len);
                        qi_cache_free(edesc);
                        return ERR_PTR(src_nents);
                }
                dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
                qm_sg_index++;
        }
-       sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+       sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
        qm_sg_index += mapped_src_nents;
 
        if (mapped_dst_nents > 1)
-               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
-                                qm_sg_index, 0);
+               sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
 
        qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
        if (dma_mapping_error(dev, qm_sg_dma)) {
        edesc->qm_sg_bytes = qm_sg_bytes;
 
        dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
-       sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
+       sg_to_qm_sg_last(req->src, req->cryptlen, sg_table + 1, 0);
 
        if (mapped_dst_nents > 1)
-               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+               sg_to_qm_sg_last(req->dst, req->cryptlen, sg_table +
                                 dst_sg_idx, 0);
 
        edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
 
        if (to_hash) {
                struct dpaa2_sg_entry *sg_table;
+               int src_len = req->nbytes - *next_buflen;
 
-               src_nents = sg_nents_for_len(req->src,
-                                            req->nbytes - (*next_buflen));
+               src_nents = sg_nents_for_len(req->src, src_len);
                if (src_nents < 0) {
                        dev_err(ctx->dev, "Invalid number of src SG.\n");
                        return src_nents;
                        goto unmap_ctx;
 
                if (mapped_nents) {
-                       sg_to_qm_sg_last(req->src, mapped_nents,
+                       sg_to_qm_sg_last(req->src, src_len,
                                         sg_table + qm_sg_src_index, 0);
                        if (*next_buflen)
                                scatterwalk_map_and_copy(next_buf, req->src,
        if (ret)
                goto unmap_ctx;
 
-       sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
+       sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
 
        edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
                                          DMA_TO_DEVICE);
                struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
 
                qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
-               sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
+               sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
                edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
                                                  qm_sg_bytes, DMA_TO_DEVICE);
                if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
 
        if (to_hash) {
                struct dpaa2_sg_entry *sg_table;
+               int src_len = req->nbytes - *next_buflen;
 
-               src_nents = sg_nents_for_len(req->src,
-                                            req->nbytes - *next_buflen);
+               src_nents = sg_nents_for_len(req->src, src_len);
                if (src_nents < 0) {
                        dev_err(ctx->dev, "Invalid number of src SG.\n");
                        return src_nents;
                if (ret)
                        goto unmap_ctx;
 
-               sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
+               sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
 
                if (*next_buflen)
                        scatterwalk_map_and_copy(next_buf, req->src,
        if (ret)
                goto unmap;
 
-       sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
+       sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
 
        edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
                                          DMA_TO_DEVICE);
 
        if (to_hash) {
                struct dpaa2_sg_entry *sg_table;
+               int src_len = req->nbytes - *next_buflen;
 
-               src_nents = sg_nents_for_len(req->src,
-                                            req->nbytes - (*next_buflen));
+               src_nents = sg_nents_for_len(req->src, src_len);
                if (src_nents < 0) {
                        dev_err(ctx->dev, "Invalid number of src SG.\n");
                        return src_nents;
                if (mapped_nents > 1) {
                        int qm_sg_bytes;
 
-                       sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
+                       sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
                        qm_sg_bytes = pad_sg_nents(mapped_nents) *
                                      sizeof(*sg_table);
                        edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
 
                unsigned int sgsize = sizeof(*sg) *
                                      pad_sg_nents(first_sg + nents);
 
-               sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
+               sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
 
                src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
                if (dma_mapping_error(ctx->jrdev, src_dma)) {
 
        if (to_hash) {
                int pad_nents;
+               int src_len = req->nbytes - *next_buflen;
 
-               src_nents = sg_nents_for_len(req->src,
-                                            req->nbytes - (*next_buflen));
+               src_nents = sg_nents_for_len(req->src, src_len);
                if (src_nents < 0) {
                        dev_err(jrdev, "Invalid number of src SG.\n");
                        return src_nents;
                        goto unmap_ctx;
 
                if (mapped_nents)
-                       sg_to_sec4_sg_last(req->src, mapped_nents,
+                       sg_to_sec4_sg_last(req->src, src_len,
                                           edesc->sec4_sg + sec4_sg_src_index,
                                           0);
                else
 
        if (to_hash) {
                int pad_nents;
+               int src_len = req->nbytes - *next_buflen;
 
-               src_nents = sg_nents_for_len(req->src,
-                                            req->nbytes - *next_buflen);
+               src_nents = sg_nents_for_len(req->src, src_len);
                if (src_nents < 0) {
                        dev_err(jrdev, "Invalid number of src SG.\n");
                        return src_nents;
                if (ret)
                        goto unmap_ctx;
 
-               sg_to_sec4_sg_last(req->src, mapped_nents,
-                                  edesc->sec4_sg + 1, 0);
+               sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
 
                if (*next_buflen) {
                        scatterwalk_map_and_copy(next_buf, req->src,
 
                                   0);
 
        if (sec4_sg_index)
-               sg_to_sec4_sg_last(req_ctx->fixup_src, src_nents,
+               sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
                                   edesc->sec4_sg + !!diff_size, 0);
 
        if (dst_nents > 1)
-               sg_to_sec4_sg_last(req->dst, dst_nents,
+               sg_to_sec4_sg_last(req->dst, req->dst_len,
                                   edesc->sec4_sg + sec4_sg_index, 0);
 
        /* Save nents for later use in Job Descriptor */
 
  * but does not have final bit; instead, returns last entry
  */
 static inline struct qm_sg_entry *
-sg_to_qm_sg(struct scatterlist *sg, int sg_count,
+sg_to_qm_sg(struct scatterlist *sg, int len,
            struct qm_sg_entry *qm_sg_ptr, u16 offset)
 {
-       while (sg_count && sg) {
-               dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
-                                sg_dma_len(sg), offset);
+       int ent_len;
+
+       while (len) {
+               ent_len = min_t(int, sg_dma_len(sg), len);
+
+               dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len,
+                                offset);
                qm_sg_ptr++;
                sg = sg_next(sg);
-               sg_count--;
+               len -= ent_len;
        }
        return qm_sg_ptr - 1;
 }
  * convert scatterlist to h/w link table format
  * scatterlist must have been previously dma mapped
  */
-static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
+static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len,
                                    struct qm_sg_entry *qm_sg_ptr, u16 offset)
 {
-       qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
+       qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset);
        qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
 }
 
 
  * but does not have final bit; instead, returns last entry
  */
 static inline struct dpaa2_sg_entry *
-sg_to_qm_sg(struct scatterlist *sg, int sg_count,
+sg_to_qm_sg(struct scatterlist *sg, int len,
            struct dpaa2_sg_entry *qm_sg_ptr, u16 offset)
 {
-       while (sg_count && sg) {
-               dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
-                                sg_dma_len(sg), offset);
+       int ent_len;
+
+       while (len) {
+               ent_len = min_t(int, sg_dma_len(sg), len);
+
+               dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len,
+                                offset);
                qm_sg_ptr++;
                sg = sg_next(sg);
-               sg_count--;
+               len -= ent_len;
        }
        return qm_sg_ptr - 1;
 }
  * convert scatterlist to h/w link table format
  * scatterlist must have been previously dma mapped
  */
-static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
+static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len,
                                    struct dpaa2_sg_entry *qm_sg_ptr,
                                    u16 offset)
 {
-       qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
+       qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset);
        dpaa2_sg_set_final(qm_sg_ptr, true);
 }
 
 
  * but does not have final bit; instead, returns last entry
  */
 static inline struct sec4_sg_entry *
-sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
+sg_to_sec4_sg(struct scatterlist *sg, int len,
              struct sec4_sg_entry *sec4_sg_ptr, u16 offset)
 {
-       while (sg_count) {
-               dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg),
-                                  sg_dma_len(sg), offset);
+       int ent_len;
+
+       while (len) {
+               ent_len = min_t(int, sg_dma_len(sg), len);
+
+               dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), ent_len,
+                                  offset);
                sec4_sg_ptr++;
                sg = sg_next(sg);
-               sg_count--;
+               len -= ent_len;
        }
        return sec4_sg_ptr - 1;
 }
  * convert scatterlist to h/w link table format
  * scatterlist must have been previously dma mapped
  */
-static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
+static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int len,
                                      struct sec4_sg_entry *sec4_sg_ptr,
                                      u16 offset)
 {
-       sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
+       sec4_sg_ptr = sg_to_sec4_sg(sg, len, sec4_sg_ptr, offset);
        sg_to_sec4_set_last(sec4_sg_ptr);
 }