spin_unlock_irqrestore(&sc->mr_io.all.lock, flags);
 
        list_for_each_entry_safe(mr, tmp, &all_list, list) {
-               if (mr->state == SMBDIRECT_MR_INVALIDATED)
-                       ib_dma_unmap_sg(sc->ib.dev, mr->sgt.sgl,
-                               mr->sgt.nents, mr->dir);
+               if (mr->sgt.nents)
+                       ib_dma_unmap_sg(sc->ib.dev, mr->sgt.sgl, mr->sgt.nents, mr->dir);
                ib_dereg_mr(mr->mr);
                kfree(mr->sgt.sgl);
                list_del(&mr->list);
        ib_dma_unmap_sg(sc->ib.dev, mr->sgt.sgl, mr->sgt.nents, mr->dir);
 
 dma_map_error:
+       mr->sgt.nents = 0;
        mr->state = SMBDIRECT_MR_ERROR;
        if (atomic_dec_and_test(&sc->mr_io.used.count))
                wake_up(&sc->mr_io.cleanup.wait_queue);
                 */
                mr->state = SMBDIRECT_MR_INVALIDATED;
 
-       if (mr->state == SMBDIRECT_MR_INVALIDATED) {
+       if (mr->sgt.nents) {
                ib_dma_unmap_sg(sc->ib.dev, mr->sgt.sgl, mr->sgt.nents, mr->dir);
+               mr->sgt.nents = 0;
+       }
+
+       if (mr->state == SMBDIRECT_MR_INVALIDATED) {
                mr->state = SMBDIRECT_MR_READY;
                if (atomic_inc_return(&sc->mr_io.ready.count) == 1)
                        wake_up(&sc->mr_io.ready.wait_queue);