sector_t *highs;
        struct dm_target *targets;
 
+       unsigned discards_supported:1;
+
        /*
         * Indicates the rw permissions for the new logical
         * device.  This should be a combination of FMODE_READ
 
        INIT_LIST_HEAD(&t->devices);
        atomic_set(&t->holders, 0);
+       t->discards_supported = 1;
 
        if (!num_targets)
                num_targets = KEYS_PER_NODE;
 
        t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
 
+       if (!tgt->num_discard_requests)
+               t->discards_supported = 0;
+
        return 0;
 
  bad:
        else
                queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
 
+       if (!dm_table_supports_discards(t))
+               queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
+       else
+               queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+
        dm_table_set_integrity(t);
 
        /*
        return t->md;
 }
 
+static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
+                                 sector_t start, sector_t len, void *data)
+{
+       struct request_queue *q = bdev_get_queue(dev->bdev);
+
+       return q && blk_queue_discard(q);
+}
+
+bool dm_table_supports_discards(struct dm_table *t)
+{
+       struct dm_target *ti;
+       unsigned i = 0;
+
+       if (!t->discards_supported)
+               return 0;
+
+       /*
+        * Ensure that at least one underlying device supports discards.
+        * t->devices includes internal dm devices such as mirror logs
+        * so we need to use iterate_devices here, which targets
+        * supporting discard must provide.
+        */
+       while (i < dm_table_get_num_targets(t)) {
+               ti = dm_table_get_target(t, i++);
+
+               if (ti->type->iterate_devices &&
+                   ti->type->iterate_devices(ti, device_discard_capable, NULL))
+                       return 1;
+       }
+
+       return 0;
+}
+
 EXPORT_SYMBOL(dm_vcalloc);
 EXPORT_SYMBOL(dm_get_device);
 EXPORT_SYMBOL(dm_put_device);
 
        return 0;
 }
 
+/*
+ * Perform all io with a single clone.
+ */
+static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
+{
+       struct bio *clone, *bio = ci->bio;
+       struct dm_target_io *tio;
+
+       tio = alloc_tio(ci, ti);
+       clone = clone_bio(bio, ci->sector, ci->idx,
+                         bio->bi_vcnt - ci->idx, ci->sector_count,
+                         ci->md->bs);
+       __map_bio(ti, clone, tio);
+       ci->sector_count = 0;
+}
+
+static int __clone_and_map_discard(struct clone_info *ci)
+{
+       struct dm_target *ti;
+       sector_t max;
+
+       ti = dm_table_find_target(ci->map, ci->sector);
+       if (!dm_target_is_valid(ti))
+               return -EIO;
+
+       /*
+        * Even though the device advertised discard support,
+        * reconfiguration might have changed that since the
+        * check was performed.
+        */
+
+       if (!ti->num_discard_requests)
+               return -EOPNOTSUPP;
+
+       max = max_io_len(ci->md, ci->sector, ti);
+
+       if (ci->sector_count > max)
+               /*
+                * FIXME: Handle a discard that spans two or more targets.
+                */
+               return -EOPNOTSUPP;
+
+       __clone_and_map_simple(ci, ti);
+
+       return 0;
+}
+
 static int __clone_and_map(struct clone_info *ci)
 {
        struct bio *clone, *bio = ci->bio;
        if (unlikely(bio_empty_barrier(bio)))
                return __clone_and_map_empty_barrier(ci);
 
+       if (unlikely(bio->bi_rw & REQ_DISCARD))
+               return __clone_and_map_discard(ci);
+
        ti = dm_table_find_target(ci->map, ci->sector);
        if (!dm_target_is_valid(ti))
                return -EIO;
 
        max = max_io_len(ci->md, ci->sector, ti);
 
-       /*
-        * Allocate a target io object.
-        */
-       tio = alloc_tio(ci, ti);
-
        if (ci->sector_count <= max) {
                /*
                 * Optimise for the simple case where we can do all of
                 * the remaining io with a single clone.
                 */
-               clone = clone_bio(bio, ci->sector, ci->idx,
-                                 bio->bi_vcnt - ci->idx, ci->sector_count,
-                                 ci->md->bs);
-               __map_bio(ti, clone, tio);
-               ci->sector_count = 0;
+               __clone_and_map_simple(ci, ti);
 
        } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
                /*
                        len += bv_len;
                }
 
+               tio = alloc_tio(ci, ti);
                clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
                                  ci->md->bs);
                __map_bio(ti, clone, tio);
                                        return -EIO;
 
                                max = max_io_len(ci->md, ci->sector, ti);
-
-                               tio = alloc_tio(ci, ti);
                        }
 
                        len = min(remaining, max);
 
+                       tio = alloc_tio(ci, ti);
                        clone = split_bvec(bio, ci->sector, ci->idx,
                                           bv->bv_offset + offset, len,
                                           ci->md->bs);