dev->discard_alignment      = le32_to_cpu(rsp->discard_alignment);
        dev->secure_discard         = le16_to_cpu(rsp->secure_discard);
        dev->rotational             = rsp->rotational;
+       dev->wc                     = !!(rsp->cache_policy & RNBD_WRITEBACK);
+       dev->fua                    = !!(rsp->cache_policy & RNBD_FUA);
 
        dev->max_hw_sectors = sess->max_io_size / SECTOR_SIZE;
        dev->max_segments = BMAX_SEGMENTS;
        blk_queue_max_segments(dev->queue, dev->max_segments);
        blk_queue_io_opt(dev->queue, dev->sess->max_io_size);
        blk_queue_virt_boundary(dev->queue, SZ_4K - 1);
-       blk_queue_write_cache(dev->queue, true, true);
+       blk_queue_write_cache(dev->queue, dev->wc, dev->fua);
        dev->queue->queuedata = dev;
 }
 
        }
 
        rnbd_clt_info(dev,
-                      "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, rotational: %d)\n",
+                      "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, rotational: %d, wc: %d, fua: %d)\n",
                       dev->gd->disk_name, dev->nsectors,
                       dev->logical_block_size, dev->physical_block_size,
                       dev->max_write_same_sectors, dev->max_discard_sectors,
                       dev->discard_granularity, dev->discard_alignment,
                       dev->secure_discard, dev->max_segments,
-                      dev->max_hw_sectors, dev->rotational);
+                      dev->max_hw_sectors, dev->rotational, dev->wc, dev->fua);
 
        mutex_unlock(&dev->lock);
 
 
        __le32          device_id;
 };
 
+enum rnbd_cache_policy {
+       RNBD_FUA = 1 << 0,
+       RNBD_WRITEBACK = 1 << 1,
+};
+
 /**
  * struct rnbd_msg_open_rsp - response message to RNBD_MSG_OPEN
  * @hdr:               message header
  * @max_segments:      max segments hardware support in one transfer
  * @secure_discard:    supports secure discard
  * @rotation:          is a rotational disc?
+ * @cache_policy:      support write-back caching or FUA?
  */
 struct rnbd_msg_open_rsp {
        struct rnbd_msg_hdr     hdr;
        __le16                  max_segments;
        __le16                  secure_discard;
        u8                      rotational;
-       u8                      reserved[11];
+       u8                      cache_policy;
+       u8                      reserved[10];
 };
 
 /**
 
                                        struct rnbd_srv_sess_dev *sess_dev)
 {
        struct rnbd_dev *rnbd_dev = sess_dev->rnbd_dev;
+       struct request_queue *q = bdev_get_queue(rnbd_dev->bdev);
 
        rsp->hdr.type = cpu_to_le16(RNBD_MSG_OPEN_RSP);
        rsp->device_id =
                cpu_to_le32(rnbd_dev_get_discard_alignment(rnbd_dev));
        rsp->secure_discard =
                cpu_to_le16(rnbd_dev_get_secure_discard(rnbd_dev));
-       rsp->rotational =
-               !blk_queue_nonrot(bdev_get_queue(rnbd_dev->bdev));
+       rsp->rotational = !blk_queue_nonrot(q);
+       rsp->cache_policy = 0;
+       if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
+               rsp->cache_policy |= RNBD_WRITEBACK;
+       if (blk_queue_fua(q))
+               rsp->cache_policy |= RNBD_FUA;
 }
 
 static struct rnbd_srv_sess_dev *