do {
                unsigned int n;
-               const u8 *p;
 
-               p = scatterwalk_next(&walk, len, &n);
-               gcm_update_mac(dg, p, n, buf, &buf_count, ctx);
-               scatterwalk_done_src(&walk, p, n);
+               n = scatterwalk_next(&walk, len);
+               gcm_update_mac(dg, walk.addr, n, buf, &buf_count, ctx);
+               scatterwalk_done_src(&walk,  n);
 
                if (unlikely(len / SZ_4K > (len - n) / SZ_4K)) {
                        kernel_neon_end();
 
 
        do {
                unsigned int n;
-               const u8 *p;
 
-               p = scatterwalk_next(&walk, len, &n);
-               macp = ce_aes_ccm_auth_data(mac, p, n, macp, ctx->key_enc,
-                                           num_rounds(ctx));
-               scatterwalk_done_src(&walk, p, n);
+               n = scatterwalk_next(&walk, len);
+               macp = ce_aes_ccm_auth_data(mac, walk.addr, n, macp,
+                                           ctx->key_enc, num_rounds(ctx));
+               scatterwalk_done_src(&walk, n);
                len -= n;
        } while (len);
 }
 
 
        do {
                unsigned int n;
-               const u8 *p;
 
-               p = scatterwalk_next(&walk, len, &n);
-               gcm_update_mac(dg, p, n, buf, &buf_count, ctx);
-               scatterwalk_done_src(&walk, p, n);
+               n = scatterwalk_next(&walk, len);
+               gcm_update_mac(dg, walk.addr, n, buf, &buf_count, ctx);
+               scatterwalk_done_src(&walk, n);
                len -= n;
        } while (len);
 
 
 
        do {
                unsigned int n, orig_n;
-               const u8 *p, *orig_p;
+               const u8 *p;
 
-               orig_p = scatterwalk_next(&walk, assoclen, &orig_n);
-               p = orig_p;
+               orig_n = scatterwalk_next(&walk, assoclen);
+               p = walk.addr;
                n = orig_n;
 
                while (n > 0) {
                        }
                }
 
-               scatterwalk_done_src(&walk, orig_p, orig_n);
+               scatterwalk_done_src(&walk, orig_n);
                assoclen -= orig_n;
        } while (assoclen);
 }
 
 
        do {
                unsigned int n, orig_n;
-               const u8 *p, *orig_p;
+               const u8 *p;
 
-               orig_p = scatterwalk_next(&walk, assoclen, &orig_n);
-               p = orig_p;
+               orig_n = scatterwalk_next(&walk, assoclen);
+               p = walk.addr;
                n = orig_n;
 
                if (n + buflen < GHASH_BLOCK_SIZE) {
                                memcpy(&buffer[0], p, buflen);
                }
 
-               scatterwalk_done_src(&walk, orig_p, orig_n);
+               scatterwalk_done_src(&walk, orig_n);
                assoclen -= orig_n;
        } while (assoclen);
 
 
 struct gcm_sg_walk {
        struct scatter_walk walk;
        unsigned int walk_bytes;
-       u8 *walk_ptr;
        unsigned int walk_bytes_remain;
        u8 buf[AES_BLOCK_SIZE];
        unsigned int buf_bytes;
 {
        if (gw->walk_bytes_remain == 0)
                return 0;
-       gw->walk_ptr = scatterwalk_next(&gw->walk, gw->walk_bytes_remain,
-                                       &gw->walk_bytes);
+       gw->walk_bytes = scatterwalk_next(&gw->walk, gw->walk_bytes_remain);
        return gw->walk_bytes;
 }
 
 {
        gw->walk_bytes_remain -= nbytes;
        if (out)
-               scatterwalk_done_dst(&gw->walk, gw->walk_ptr, nbytes);
+               scatterwalk_done_dst(&gw->walk, nbytes);
        else
-               scatterwalk_done_src(&gw->walk, gw->walk_ptr, nbytes);
-       gw->walk_ptr = NULL;
+               scatterwalk_done_src(&gw->walk, nbytes);
 }
 
 static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
        }
 
        if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
-               gw->ptr = gw->walk_ptr;
+               gw->ptr = gw->walk.addr;
                gw->nbytes = gw->walk_bytes;
                goto out;
        }
 
        while (1) {
                n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
-               memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
+               memcpy(gw->buf + gw->buf_bytes, gw->walk.addr, n);
                gw->buf_bytes += n;
                _gcm_sg_unmap_and_advance(gw, n, false);
                if (gw->buf_bytes >= minbytesneeded) {
        }
 
        if (gw->walk_bytes >= minbytesneeded) {
-               gw->ptr = gw->walk_ptr;
+               gw->ptr = gw->walk.addr;
                gw->nbytes = gw->walk_bytes;
                goto out;
        }
 
-       scatterwalk_unmap(gw->walk_ptr);
-       gw->walk_ptr = NULL;
+       /* XXX */
+       scatterwalk_unmap(gw->walk.addr);
 
        gw->ptr = gw->buf;
        gw->nbytes = sizeof(gw->buf);
                        if (!_gcm_sg_clamp_and_map(gw))
                                return i;
                        n = min(gw->walk_bytes, bytesdone - i);
-                       memcpy(gw->walk_ptr, gw->buf + i, n);
+                       memcpy(gw->walk.addr, gw->buf + i, n);
                        _gcm_sg_unmap_and_advance(gw, n, true);
                }
        } else
 
 
        scatterwalk_start(&walk, sg_src);
        while (assoclen != 0) {
-               unsigned int size;
-               const u8 *mapped = scatterwalk_next(&walk, assoclen, &size);
+               unsigned int size = scatterwalk_next(&walk, assoclen);
+               const u8 *src = walk.addr;
                unsigned int left = size;
-               const u8 *src = mapped;
 
                if (pos + size >= AEGIS128_BLOCK_SIZE) {
                        if (pos > 0) {
                pos += left;
                assoclen -= size;
 
-               scatterwalk_done_src(&walk, mapped, size);
+               scatterwalk_done_src(&walk, size);
        }
 
        if (pos > 0) {
 
        scatterwalk_start(&walk, sg_src);
 
        while (assoclen) {
-               unsigned int orig_len_this_step;
-               const u8 *orig_src = scatterwalk_next(&walk, assoclen,
-                                                     &orig_len_this_step);
+               unsigned int orig_len_this_step = scatterwalk_next(
+                       &walk, assoclen);
                unsigned int len_this_step = orig_len_this_step;
                unsigned int len;
-               const u8 *src = orig_src;
+               const u8 *src = walk.addr;
 
                if (unlikely(pos)) {
                        len = min(len_this_step, 16 - pos);
                        pos = len_this_step;
                }
 next:
-               scatterwalk_done_src(&walk, orig_src, orig_len_this_step);
+               scatterwalk_done_src(&walk, orig_len_this_step);
                if (need_resched()) {
                        kernel_fpu_end();
                        kernel_fpu_begin();
 
 
        scatterwalk_start(&walk, sg_src);
        while (assoclen != 0) {
-               unsigned int size;
-               const u8 *mapped = scatterwalk_next(&walk, assoclen, &size);
+               unsigned int size = scatterwalk_next(&walk, assoclen);
+               const u8 *src = walk.addr;
                unsigned int left = size;
-               const u8 *src = mapped;
 
                if (pos + size >= AEGIS_BLOCK_SIZE) {
                        if (pos > 0) {
 
                pos += left;
                assoclen -= size;
-               scatterwalk_done_src(&walk, mapped, size);
+               scatterwalk_done_src(&walk, size);
        }
 
        if (pos > 0) {
 
                                    unsigned int nbytes)
 {
        do {
-               const void *src_addr;
                unsigned int to_copy;
 
-               src_addr = scatterwalk_next(walk, nbytes, &to_copy);
-               memcpy(buf, src_addr, to_copy);
-               scatterwalk_done_src(walk, src_addr, to_copy);
+               to_copy = scatterwalk_next(walk, nbytes);
+               memcpy(buf, walk->addr, to_copy);
+               scatterwalk_done_src(walk, to_copy);
                buf += to_copy;
                nbytes -= to_copy;
        } while (nbytes);
                                  unsigned int nbytes)
 {
        do {
-               void *dst_addr;
                unsigned int to_copy;
 
-               dst_addr = scatterwalk_next(walk, nbytes, &to_copy);
-               memcpy(dst_addr, buf, to_copy);
-               scatterwalk_done_dst(walk, dst_addr, to_copy);
+               to_copy = scatterwalk_next(walk, nbytes);
+               memcpy(walk->addr, buf, to_copy);
+               scatterwalk_done_dst(walk, to_copy);
                buf += to_copy;
                nbytes -= to_copy;
        } while (nbytes);
 
 
 static inline void skcipher_map_src(struct skcipher_walk *walk)
 {
-       walk->src.virt.addr = scatterwalk_map(&walk->in);
+       /* XXX */
+       walk->in.__addr = scatterwalk_map(&walk->in);
+       walk->src.virt.addr = walk->in.addr;
 }
 
 static inline void skcipher_map_dst(struct skcipher_walk *walk)
 {
-       walk->dst.virt.addr = scatterwalk_map(&walk->out);
+       /* XXX */
+       walk->out.__addr = scatterwalk_map(&walk->out);
+       walk->dst.virt.addr = walk->out.addr;
 }
 
 static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
                goto dst_done;
        }
 
-       scatterwalk_done_dst(&walk->out, walk->dst.virt.addr, n);
+       scatterwalk_done_dst(&walk->out, n);
 dst_done:
 
        if (res > 0)
 
        struct scatter_walk walk;
        struct nx_sg *nx_sg = nx_dst;
        unsigned int n, len = *src_len;
-       char *dst;
 
        /* we need to fast forward through @start bytes first */
        scatterwalk_start_at_pos(&walk, sg_src, start);
 
        while (len && (nx_sg - nx_dst) < sglen) {
-               dst = scatterwalk_next(&walk, len, &n);
+               n = scatterwalk_next(&walk, len);
 
-               nx_sg = nx_build_sg_list(nx_sg, dst, &n, sglen - (nx_sg - nx_dst));
+               nx_sg = nx_build_sg_list(nx_sg, walk.addr, &n, sglen - (nx_sg - nx_dst));
 
-               scatterwalk_done_src(&walk, dst, n);
+               scatterwalk_done_src(&walk, n);
                len -= n;
        }
        /* update to_process */
 
 struct scatterlist;
 struct seq_file;
 struct sk_buff;
+union crypto_no_such_thing;
 
 struct crypto_instance {
        struct crypto_alg alg;
 struct scatter_walk {
        struct scatterlist *sg;
        unsigned int offset;
+       union {
+               void *const addr;
+
+               /* Private API field, do not touch. */
+               union crypto_no_such_thing *__addr;
+       };
 };
 
 struct crypto_attr_alg {
 
  * scatterwalk_next() - Get the next data buffer in a scatterlist walk
  * @walk: the scatter_walk
  * @total: the total number of bytes remaining, > 0
- * @nbytes_ret: (out) the next number of bytes available, <= @total
  *
- * Return: A virtual address for the next segment of data from the scatterlist.
- *        The caller must call scatterwalk_done_src() or scatterwalk_done_dst()
- *        when it is done using this virtual address.
+ * A virtual address for the next segment of data from the scatterlist will
+ * be placed into @walk->addr.  The caller must call scatterwalk_done_src()
+ * or scatterwalk_done_dst() when it is done using this virtual address.
+ *
+ * Returns: the next number of bytes available, <= @total
  */
-static inline void *scatterwalk_next(struct scatter_walk *walk,
-                                    unsigned int total,
-                                    unsigned int *nbytes_ret)
+static inline unsigned int scatterwalk_next(struct scatter_walk *walk,
+                                           unsigned int total)
 {
-       *nbytes_ret = scatterwalk_clamp(walk, total);
-       return scatterwalk_map(walk);
+       unsigned int nbytes = scatterwalk_clamp(walk, total);
+
+       walk->__addr = scatterwalk_map(walk);
+       return nbytes;
 }
 
 static inline void scatterwalk_unmap(const void *vaddr)
 /**
  * scatterwalk_done_src() - Finish one step of a walk of source scatterlist
  * @walk: the scatter_walk
- * @vaddr: the address returned by scatterwalk_next()
  * @nbytes: the number of bytes processed this step, less than or equal to the
  *         number of bytes that scatterwalk_next() returned.
  *
- * Use this if the @vaddr was not written to, i.e. it is source data.
+ * Use this if the mapped address was not written to, i.e. it is source data.
  */
 static inline void scatterwalk_done_src(struct scatter_walk *walk,
-                                       const void *vaddr, unsigned int nbytes)
+                                       unsigned int nbytes)
 {
-       scatterwalk_unmap(vaddr);
+       scatterwalk_unmap(walk->addr);
        scatterwalk_advance(walk, nbytes);
 }
 
 /**
  * scatterwalk_done_dst() - Finish one step of a walk of destination scatterlist
  * @walk: the scatter_walk
- * @vaddr: the address returned by scatterwalk_next()
  * @nbytes: the number of bytes processed this step, less than or equal to the
  *         number of bytes that scatterwalk_next() returned.
  *
- * Use this if the @vaddr may have been written to, i.e. it is destination data.
+ * Use this if the mapped address may have been written to, i.e. it is
+ * destination data.
  */
 static inline void scatterwalk_done_dst(struct scatter_walk *walk,
-                                       void *vaddr, unsigned int nbytes)
+                                       unsigned int nbytes)
 {
-       scatterwalk_unmap(vaddr);
+       scatterwalk_unmap(walk->addr);
        /*
         * Explicitly check ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE instead of just
         * relying on flush_dcache_page() being a no-op when not implemented,