* @malloc:    allocate mem from a pool.
  * @free:      free mem from a pool.
  * @shrink:    shrink the pool.
+ * @sleep_mapped: whether zpool driver can sleep during map.
  * @map:       map a handle.
  * @unmap:     unmap a handle.
  * @total_size:        get total size of a pool.
        int (*shrink)(void *pool, unsigned int pages,
                                unsigned int *reclaimed);
 
+       bool sleep_mapped;
        void *(*map)(void *pool, unsigned long handle,
                                enum zpool_mapmode mm);
        void (*unmap)(void *pool, unsigned long handle);
 int zpool_unregister_driver(struct zpool_driver *driver);
 
 bool zpool_evictable(struct zpool *pool);
+bool zpool_can_sleep_mapped(struct zpool *pool);
 
 #endif
 
        void *pool;
        const struct zpool_ops *ops;
        bool evictable;
+       bool can_sleep_mapped;
 
        struct list_head list;
 };
        zpool->pool = driver->create(name, gfp, ops, zpool);
        zpool->ops = ops;
        zpool->evictable = driver->shrink && ops && ops->evict;
+       zpool->can_sleep_mapped = driver->sleep_mapped;
 
        if (!zpool->pool) {
                pr_err("couldn't create %s pool\n", type);
        return zpool->evictable;
 }
 
+/**
+ * zpool_can_sleep_mapped - Test if zpool can sleep when do mapped.
+ * @zpool:     The zpool to test
+ *
+ * Returns: true if zpool can sleep; false otherwise.
+ */
+bool zpool_can_sleep_mapped(struct zpool *zpool)
+{
+       return zpool->can_sleep_mapped;
+}
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
 MODULE_DESCRIPTION("Common API for compressed memory storage");
 
        struct scatterlist input, output;
        struct crypto_acomp_ctx *acomp_ctx;
 
-       u8 *src;
+       u8 *src, *tmp = NULL;
        unsigned int dlen;
        int ret;
        struct writeback_control wbc = {
                .sync_mode = WB_SYNC_NONE,
        };
 
+       if (!zpool_can_sleep_mapped(pool)) {
+               tmp = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+               if (!tmp)
+                       return -ENOMEM;
+       }
+
        /* extract swpentry from data */
        zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
        swpentry = zhdr->swpentry; /* here */
                /* entry was invalidated */
                spin_unlock(&tree->lock);
                zpool_unmap_handle(pool, handle);
+               kfree(tmp);
                return 0;
        }
        spin_unlock(&tree->lock);
                dlen = PAGE_SIZE;
                src = (u8 *)zhdr + sizeof(struct zswap_header);
 
+               if (!zpool_can_sleep_mapped(pool)) {
+
+                       memcpy(tmp, src, entry->length);
+                       src = tmp;
+
+                       zpool_unmap_handle(pool, handle);
+               }
+
                mutex_lock(acomp_ctx->mutex);
                sg_init_one(&input, src, entry->length);
                sg_init_table(&output, 1);
        spin_unlock(&tree->lock);
 
 end:
-       zpool_unmap_handle(pool, handle);
+       if (zpool_can_sleep_mapped(pool))
+               zpool_unmap_handle(pool, handle);
+       else
+               kfree(tmp);
+
        return ret;
 }
 
        struct zswap_entry *entry;
        struct scatterlist input, output;
        struct crypto_acomp_ctx *acomp_ctx;
-       u8 *src, *dst;
+       u8 *src, *dst, *tmp;
        unsigned int dlen;
        int ret;
 
                dst = kmap_atomic(page);
                zswap_fill_page(dst, entry->value);
                kunmap_atomic(dst);
+               ret = 0;
                goto freeentry;
        }
 
+       if (!zpool_can_sleep_mapped(entry->pool->zpool)) {
+
+               tmp = kmalloc(entry->length, GFP_ATOMIC);
+               if (!tmp) {
+                       ret = -ENOMEM;
+                       goto freeentry;
+               }
+       }
+
        /* decompress */
        dlen = PAGE_SIZE;
        src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO);
        if (zpool_evictable(entry->pool->zpool))
                src += sizeof(struct zswap_header);
 
+       if (!zpool_can_sleep_mapped(entry->pool->zpool)) {
+
+               memcpy(tmp, src, entry->length);
+               src = tmp;
+
+               zpool_unmap_handle(entry->pool->zpool, entry->handle);
+       }
+
        acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
        mutex_lock(acomp_ctx->mutex);
        sg_init_one(&input, src, entry->length);
        ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
        mutex_unlock(acomp_ctx->mutex);
 
-       zpool_unmap_handle(entry->pool->zpool, entry->handle);
+       if (zpool_can_sleep_mapped(entry->pool->zpool))
+               zpool_unmap_handle(entry->pool->zpool, entry->handle);
+       else
+               kfree(tmp);
+
        BUG_ON(ret);
 
 freeentry:
        zswap_entry_put(tree, entry);
        spin_unlock(&tree->lock);
 
-       return 0;
+       return ret;
 }
 
 /* frees an entry in zswap */