struct nvme_dsm_range *range;
        struct bio *bio;
 
-       range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
-       if (!range)
-               return BLK_STS_RESOURCE;
+       range = kmalloc_array(segments, sizeof(*range),
+                               GFP_ATOMIC | __GFP_NOWARN);
+       if (!range) {
+               /*
+                * If we fail allocation our range, fallback to the controller
+                * discard page. If that's also busy, it's safe to return
+                * busy, as we know we can make progress once that's freed.
+                */
+               if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
+                       return BLK_STS_RESOURCE;
+
+               range = page_address(ns->ctrl->discard_page);
+       }
 
        __rq_for_each_bio(bio, req) {
                u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
        }
 
        if (WARN_ON_ONCE(n != segments)) {
-               kfree(range);
+               if (virt_to_page(range) == ns->ctrl->discard_page)
+                       clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
+               else
+                       kfree(range);
                return BLK_STS_IOERR;
        }
 
                                blk_rq_bytes(req) >> ns->lba_shift);
        }
        if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
-               kfree(page_address(req->special_vec.bv_page) +
-                     req->special_vec.bv_offset);
+               struct nvme_ns *ns = req->rq_disk->private_data;
+               struct page *page = req->special_vec.bv_page;
+
+               if (page == ns->ctrl->discard_page)
+                       clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
+               else
+                       kfree(page_address(page) + req->special_vec.bv_offset);
        }
 }
 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
        ida_simple_remove(&nvme_instance_ida, ctrl->instance);
        kfree(ctrl->effects);
        nvme_mpath_uninit(ctrl);
+       kfree(ctrl->discard_page);
 
        if (subsys) {
                mutex_lock(&subsys->lock);
        memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
        ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
 
+       BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
+                       PAGE_SIZE);
+       ctrl->discard_page = alloc_page(GFP_KERNEL);
+       if (!ctrl->discard_page) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
        ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL);
        if (ret < 0)
                goto out;
 out_release_instance:
        ida_simple_remove(&nvme_instance_ida, ctrl->instance);
 out:
+       if (ctrl->discard_page)
+               __free_page(ctrl->discard_page);
        return ret;
 }
 EXPORT_SYMBOL_GPL(nvme_init_ctrl);