struct safexcel_request *request;
        int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
 
+       /* If a request wasn't properly dequeued because of a lack of resources,
+        * proceeded it first,
+        */
+       req = priv->ring[ring].req;
+       backlog = priv->ring[ring].backlog;
+       if (req)
+               goto handle_req;
+
        while (true) {
                spin_lock_bh(&priv->ring[ring].queue_lock);
                backlog = crypto_get_backlog(&priv->ring[ring].queue);
                req = crypto_dequeue_request(&priv->ring[ring].queue);
                spin_unlock_bh(&priv->ring[ring].queue_lock);
 
-               if (!req)
+               if (!req) {
+                       priv->ring[ring].req = NULL;
+                       priv->ring[ring].backlog = NULL;
                        goto finalize;
+               }
 
+handle_req:
                request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
-               if (!request) {
-                       spin_lock_bh(&priv->ring[ring].queue_lock);
-                       crypto_enqueue_request(&priv->ring[ring].queue, req);
-                       spin_unlock_bh(&priv->ring[ring].queue_lock);
-                       goto finalize;
-               }
+               if (!request)
+                       goto request_failed;
 
                ctx = crypto_tfm_ctx(req->tfm);
                ret = ctx->send(req, ring, request, &commands, &results);
                if (ret) {
                        kfree(request);
-                       req->complete(req, ret);
-                       goto finalize;
+                       goto request_failed;
                }
 
                if (backlog)
                nreq++;
        }
 
+request_failed:
+       /* Not enough resources to handle all the requests. Bail out and save
+        * the request and the backlog for the next dequeue call (per-ring).
+        */
+       priv->ring[ring].req = req;
+       priv->ring[ring].backlog = backlog;
+
 finalize:
        if (!nreq)
                return;
 
 
                /* The ring is currently handling at least one request */
                bool busy;
+
+               /* Store for current requests when bailing out of the dequeueing
+                * function when no enough resources are available.
+                */
+               struct crypto_async_request *req;
+               struct crypto_async_request *backlog;
        } ring[EIP197_MAX_RINGS];
 };