};
 
 struct ahash_save_req_state {
-       struct list_head head;
        struct ahash_request *req0;
-       struct ahash_request *cur;
-       int (*op)(struct ahash_request *req);
        crypto_completion_t compl;
        void *data;
        struct scatterlist sg;
        u8 *page;
        unsigned int offset;
        unsigned int nbytes;
+       bool update;
 };
 
-static void ahash_reqchain_done(void *data, int err);
 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt);
 static void ahash_restore_req(struct ahash_request *req);
 static void ahash_def_finup_done1(void *data, int err);
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
 
-static bool ahash_request_hasvirt(struct ahash_request *req)
-{
-       return ahash_request_isvirt(req);
-}
-
 static int ahash_reqchain_virt(struct ahash_save_req_state *state,
                               int err, u32 mask)
 {
-       struct ahash_request *req = state->cur;
+       struct ahash_request *req = state->req0;
+       struct crypto_ahash *tfm;
+
+       tfm = crypto_ahash_reqtfm(req);
 
        for (;;) {
                unsigned len = state->nbytes;
 
-               req->base.err = err;
-
                if (!state->offset)
                        break;
 
                state->offset += len;
                req->nbytes = len;
 
-               err = state->op(req);
+               err = crypto_ahash_alg(tfm)->update(req);
                if (err == -EINPROGRESS) {
-                       if (!list_empty(&state->head) ||
-                           state->offset < state->nbytes)
+                       if (state->offset < state->nbytes)
                                err = -EBUSY;
                        break;
                }
                                 struct ahash_save_req_state *state,
                                 int err, u32 mask)
 {
-       struct ahash_request *req = state->cur;
-       struct crypto_ahash *tfm;
-       struct ahash_request *n;
-       bool update;
        u8 *page;
 
        err = ahash_reqchain_virt(state, err, mask);
        if (err == -EINPROGRESS || err == -EBUSY)
                goto out;
 
-       if (req != req0)
-               list_add_tail(&req->base.list, &req0->base.list);
-
-       tfm = crypto_ahash_reqtfm(req);
-       update = state->op == crypto_ahash_alg(tfm)->update;
-
-       list_for_each_entry_safe(req, n, &state->head, base.list) {
-               list_del_init(&req->base.list);
-
-               req->base.flags &= mask;
-               req->base.complete = ahash_reqchain_done;
-               req->base.data = state;
-               state->cur = req;
-
-               if (update && ahash_request_isvirt(req) && req->nbytes) {
-                       unsigned len = req->nbytes;
-                       u8 *result = req->result;
-
-                       state->src = req->svirt;
-                       state->nbytes = len;
-
-                       len = min(PAGE_SIZE, len);
-
-                       memcpy(state->page, req->svirt, len);
-                       state->offset = len;
-
-                       ahash_request_set_crypt(req, &state->sg, result, len);
-               }
-
-               err = state->op(req);
-
-               if (err == -EINPROGRESS) {
-                       if (!list_empty(&state->head) ||
-                           state->offset < state->nbytes)
-                               err = -EBUSY;
-                       goto out;
-               }
-
-               if (err == -EBUSY)
-                       goto out;
-
-               err = ahash_reqchain_virt(state, err, mask);
-               if (err == -EINPROGRESS || err == -EBUSY)
-                       goto out;
-
-               list_add_tail(&req->base.list, &req0->base.list);
-       }
-
        page = state->page;
        if (page) {
                memset(page, 0, PAGE_SIZE);
        data = state->data;
 
        if (err == -EINPROGRESS) {
-               if (!list_empty(&state->head) || state->offset < state->nbytes)
+               if (state->offset < state->nbytes)
                        return;
                goto notify;
        }
        int err;
 
        if (crypto_ahash_req_chain(tfm) ||
-           (!ahash_request_chained(req) &&
-            (!update || !ahash_request_isvirt(req))))
+           !update || !ahash_request_isvirt(req))
                return op(req);
 
-       if (update && ahash_request_hasvirt(req)) {
-               gfp_t gfp;
-               u32 flags;
-
-               flags = ahash_request_flags(req);
-               gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-                     GFP_KERNEL : GFP_ATOMIC;
-               page = (void *)__get_free_page(gfp);
+       if (update && ahash_request_isvirt(req)) {
+               page = (void *)__get_free_page(GFP_ATOMIC);
                err = -ENOMEM;
                if (!page)
-                       goto out_set_chain;
+                       goto out;
        }
 
        state = &state0;
                state = req->base.data;
        }
 
-       state->op = op;
-       state->cur = req;
+       state->update = update;
        state->page = page;
        state->offset = 0;
        state->nbytes = 0;
-       INIT_LIST_HEAD(&state->head);
 
        if (page)
                sg_init_one(&state->sg, page, PAGE_SIZE);
        }
 
        err = op(req);
-       if (err == -EBUSY || err == -EINPROGRESS)
-               return -EBUSY;
+       if (err == -EINPROGRESS || err == -EBUSY) {
+               if (state->offset < state->nbytes)
+                       err = -EBUSY;
+               return err;
+       }
 
        return ahash_reqchain_finish(req, state, err, ~0);
 
 out_free_page:
        free_page((unsigned long)page);
 
-out_set_chain:
-       req->base.err = err;
+out:
        return err;
 }
 
 {
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 
-       if (likely(tfm->using_shash)) {
-               int err;
-
-               err = crypto_shash_init(prepare_shash_desc(req, tfm));
-               req->base.err = err;
-               return err;
-       }
-
+       if (likely(tfm->using_shash))
+               return crypto_shash_init(prepare_shash_desc(req, tfm));
        if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
                return -ENOKEY;
-
        return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->init);
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_init);
 {
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
        struct ahash_save_req_state *state;
-       gfp_t gfp;
-       u32 flags;
 
        if (!ahash_is_async(tfm))
                return 0;
 
-       flags = ahash_request_flags(req);
-       gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?  GFP_KERNEL : GFP_ATOMIC;
-       state = kmalloc(sizeof(*state), gfp);
+       state = kmalloc(sizeof(*state), GFP_ATOMIC);
        if (!state)
                return -ENOMEM;
 
 {
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 
-       if (likely(tfm->using_shash)) {
-               int err;
-
-               err = shash_ahash_update(req, ahash_request_ctx(req));
-               req->base.err = err;
-               return err;
-       }
-
+       if (likely(tfm->using_shash))
+               return shash_ahash_update(req, ahash_request_ctx(req));
        return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->update);
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_update);
 {
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 
-       if (likely(tfm->using_shash)) {
-               int err;
-
-               err = crypto_shash_final(ahash_request_ctx(req), req->result);
-               req->base.err = err;
-               return err;
-       }
-
+       if (likely(tfm->using_shash))
+               return crypto_shash_final(ahash_request_ctx(req), req->result);
        return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->final);
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_final);
 {
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 
-       if (likely(tfm->using_shash)) {
-               int err;
-
-               err = shash_ahash_finup(req, ahash_request_ctx(req));
-               req->base.err = err;
-               return err;
-       }
-
+       if (likely(tfm->using_shash))
+               return shash_ahash_finup(req, ahash_request_ctx(req));
        if (!crypto_ahash_alg(tfm)->finup ||
-           (!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req)))
+           (!crypto_ahash_req_chain(tfm) && ahash_request_isvirt(req)))
                return ahash_def_finup(req);
-
        return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->finup);
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
 {
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 
-       if (likely(tfm->using_shash)) {
-               int err;
-
-               err = shash_ahash_digest(req, prepare_shash_desc(req, tfm));
-               req->base.err = err;
-               return err;
-       }
-
-       if (!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req))
+       if (likely(tfm->using_shash))
+               return shash_ahash_digest(req, prepare_shash_desc(req, tfm));
+       if (!crypto_ahash_req_chain(tfm) && ahash_request_isvirt(req))
                return ahash_def_digest(req);
-
        if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
                return -ENOKEY;
-
        return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->digest);
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_digest);