CONFIGFS_ATTR(nvmet_ns_, enable);
 
+static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
+{
+       return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io);
+}
+
+static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
+               const char *page, size_t count)
+{
+       struct nvmet_ns *ns = to_nvmet_ns(item);
+       bool val;
+
+       if (strtobool(page, &val))
+               return -EINVAL;
+
+       mutex_lock(&ns->subsys->lock);
+       if (ns->enabled) {
+               pr_err("disable ns before setting buffered_io value.\n");
+               mutex_unlock(&ns->subsys->lock);
+               return -EINVAL;
+       }
+
+       ns->buffered_io = val;
+       mutex_unlock(&ns->subsys->lock);
+       return count;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, buffered_io);
+
 static struct configfs_attribute *nvmet_ns_attrs[] = {
        &nvmet_ns_attr_device_path,
        &nvmet_ns_attr_device_nguid,
        &nvmet_ns_attr_device_uuid,
        &nvmet_ns_attr_enable,
+       &nvmet_ns_attr_buffered_io,
        NULL,
 };
 
 
 
 #include "nvmet.h"
 
+struct workqueue_struct *buffered_io_wq;
 static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
 static DEFINE_IDA(cntlid_ida);
 
        ns->nsid = nsid;
        ns->subsys = subsys;
        uuid_gen(&ns->uuid);
+       ns->buffered_io = false;
 
        return ns;
 }
 {
        int error;
 
+       buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
+                       WQ_MEM_RECLAIM, 0);
+       if (!buffered_io_wq) {
+               error = -ENOMEM;
+               goto out;
+       }
        error = nvmet_init_discovery();
        if (error)
                goto out;
        nvmet_exit_configfs();
        nvmet_exit_discovery();
        ida_destroy(&cntlid_ida);
+       destroy_workqueue(buffered_io_wq);
 
        BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
        BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
 
 void nvmet_file_ns_disable(struct nvmet_ns *ns)
 {
        if (ns->file) {
+               if (ns->buffered_io)
+                       flush_workqueue(buffered_io_wq);
                mempool_destroy(ns->bvec_pool);
                ns->bvec_pool = NULL;
                kmem_cache_destroy(ns->bvec_cache);
 
 int nvmet_file_ns_enable(struct nvmet_ns *ns)
 {
-       int ret;
+       int flags = O_RDWR | O_LARGEFILE;
        struct kstat stat;
+       int ret;
+
+       if (!ns->buffered_io)
+               flags |= O_DIRECT;
 
-       ns->file = filp_open(ns->device_path,
-                       O_RDWR | O_LARGEFILE | O_DIRECT, 0);
+       ns->file = filp_open(ns->device_path, flags, 0);
        if (IS_ERR(ns->file)) {
                pr_err("failed to open file %s: (%ld)\n",
                                ns->device_path, PTR_ERR(ns->file));
 
        iocb->ki_pos = pos;
        iocb->ki_filp = req->ns->file;
-       iocb->ki_flags = IOCB_DIRECT | ki_flags;
+       iocb->ki_flags = ki_flags | iocb_flags(req->ns->file);
 
        ret = call_iter(iocb, &iter);
 
        nvmet_file_submit_bvec(req, pos, bv_cnt, total_len);
 }
 
+static void nvmet_file_buffered_io_work(struct work_struct *w)
+{
+       struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
+
+       nvmet_file_execute_rw(req);
+}
+
+static void nvmet_file_execute_rw_buffered_io(struct nvmet_req *req)
+{
+       INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
+       queue_work(buffered_io_wq, &req->f.work);
+}
+
 static void nvmet_file_flush_work(struct work_struct *w)
 {
        struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
        switch (cmd->common.opcode) {
        case nvme_cmd_read:
        case nvme_cmd_write:
-               req->execute = nvmet_file_execute_rw;
+               if (req->ns->buffered_io)
+                       req->execute = nvmet_file_execute_rw_buffered_io;
+               else
+                       req->execute = nvmet_file_execute_rw;
                req->data_len = nvmet_rw_len(req);
                return 0;
        case nvme_cmd_flush: