if (((1U << i) & m->data_opts.rewrite_ptrs) &&
                            (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
                            !ptr->cached) {
-                               bch2_extent_ptr_set_cached(bkey_i_to_s(insert), ptr);
+                               bch2_extent_ptr_set_cached(c, &m->op.opts,
+                                                          bkey_i_to_s(insert), ptr);
                                rewrites_found |= 1U << i;
                        }
                        i++;
                            durability - ptr_durability >= m->op.opts.data_replicas) {
                                durability -= ptr_durability;
 
-                               bch2_extent_ptr_set_cached(bkey_i_to_s(insert), &entry->ptr);
+                               bch2_extent_ptr_set_cached(c, &m->op.opts,
+                                                          bkey_i_to_s(insert), &entry->ptr);
                                goto restart_drop_extra_replicas;
                        }
                }
                        bch2_extent_ptr_decoded_append(insert, &p);
 
                bch2_bkey_narrow_crcs(insert, (struct bch_extent_crc_unpacked) { 0 });
-               bch2_extent_normalize(c, bkey_i_to_s(insert));
+               bch2_extent_normalize_by_opts(c, &m->op.opts, bkey_i_to_s(insert));
 
                ret = bch2_sum_sector_overwrites(trans, &iter, insert,
                                                 &should_check_enospc,
 int bch2_extent_drop_ptrs(struct btree_trans *trans,
                          struct btree_iter *iter,
                          struct bkey_s_c k,
-                         struct data_update_opts data_opts)
+                         struct bch_io_opts *io_opts,
+                         struct data_update_opts *data_opts)
 {
        struct bch_fs *c = trans->c;
        struct bkey_i *n;
        if (ret)
                return ret;
 
-       while (data_opts.kill_ptrs) {
-               unsigned i = 0, drop = __fls(data_opts.kill_ptrs);
+       while (data_opts->kill_ptrs) {
+               unsigned i = 0, drop = __fls(data_opts->kill_ptrs);
 
                bch2_bkey_drop_ptrs_noerror(bkey_i_to_s(n), ptr, i++ == drop);
-               data_opts.kill_ptrs ^= 1U << drop;
+               data_opts->kill_ptrs ^= 1U << drop;
        }
 
        /*
         * will do the appropriate thing with it (turning it into a
         * KEY_TYPE_error key, or just a discard if it was a cached extent)
         */
-       bch2_extent_normalize(c, bkey_i_to_s(n));
+       bch2_extent_normalize_by_opts(c, io_opts, bkey_i_to_s(n));
 
        /*
         * Since we're not inserting through an extent iterator
                m->data_opts.rewrite_ptrs = 0;
                /* if iter == NULL, it's just a promote */
                if (iter)
-                       ret = bch2_extent_drop_ptrs(trans, iter, k, m->data_opts);
+                       ret = bch2_extent_drop_ptrs(trans, iter, k, &io_opts, &m->data_opts);
                goto out;
        }
 
 
 int bch2_extent_drop_ptrs(struct btree_trans *,
                          struct btree_iter *,
                          struct bkey_s_c,
-                         struct data_update_opts);
+                         struct bch_io_opts *,
+                         struct data_update_opts *);
 
 void bch2_data_update_exit(struct data_update *);
 int bch2_data_update_init(struct btree_trans *, struct btree_iter *,
 
        return NULL;
 }
 
-void bch2_extent_ptr_set_cached(struct bkey_s k, struct bch_extent_ptr *ptr)
+static bool want_cached_ptr(struct bch_fs *c, struct bch_io_opts *opts,
+                           struct bch_extent_ptr *ptr)
+{
+       if (!opts->promote_target ||
+           !bch2_dev_in_target(c, ptr->dev, opts->promote_target))
+               return false;
+
+       struct bch_dev *ca = bch2_dev_rcu_noerror(c, ptr->dev);
+
+       return ca && bch2_dev_is_readable(ca) && !dev_ptr_stale_rcu(ca, ptr);
+}
+
+void bch2_extent_ptr_set_cached(struct bch_fs *c,
+                               struct bch_io_opts *opts,
+                               struct bkey_s k,
+                               struct bch_extent_ptr *ptr)
 {
        struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
        union bch_extent_entry *entry;
-       union bch_extent_entry *ec = NULL;
+       struct extent_ptr_decoded p;
 
-       bkey_extent_entry_for_each(ptrs, entry) {
+       rcu_read_lock();
+       if (!want_cached_ptr(c, opts, ptr)) {
+               bch2_bkey_drop_ptr_noerror(k, ptr);
+               goto out;
+       }
+
+       /*
+        * Stripes can't contain cached data, for - reasons.
+        *
+        * Possibly something we can fix in the future?
+        */
+       bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
                if (&entry->ptr == ptr) {
-                       ptr->cached = true;
-                       if (ec)
-                               extent_entry_drop(k, ec);
-                       return;
+                       if (p.has_ec)
+                               bch2_bkey_drop_ptr_noerror(k, ptr);
+                       else
+                               ptr->cached = true;
+                       goto out;
                }
 
-               if (extent_entry_is_stripe_ptr(entry))
-                       ec = entry;
-               else if (extent_entry_is_ptr(entry))
-                       ec = NULL;
-       }
-
        BUG();
+out:
+       rcu_read_unlock();
 }
 
 /*
- * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
+ * bch2_extent_normalize - clean up an extent, dropping stale pointers etc.
  *
  * Returns true if @k should be dropped entirely
  *
        rcu_read_lock();
        bch2_bkey_drop_ptrs(k, ptr,
                ptr->cached &&
-               (ca = bch2_dev_rcu(c, ptr->dev)) &&
-               dev_ptr_stale_rcu(ca, ptr) > 0);
+               (!(ca = bch2_dev_rcu(c, ptr->dev)) ||
+                dev_ptr_stale_rcu(ca, ptr) > 0));
+       rcu_read_unlock();
+
+       return bkey_deleted(k.k);
+}
+
+/*
+ * bch2_extent_normalize_by_opts - clean up an extent, dropping stale pointers etc.
+ *
+ * Like bch2_extent_normalize(), but also only keeps a single cached pointer on
+ * the promote target.
+ */
+bool bch2_extent_normalize_by_opts(struct bch_fs *c,
+                                  struct bch_io_opts *opts,
+                                  struct bkey_s k)
+{
+       struct bkey_ptrs ptrs;
+       bool have_cached_ptr;
+
+       rcu_read_lock();
+restart_drop_ptrs:
+       ptrs = bch2_bkey_ptrs(k);
+       have_cached_ptr = false;
+
+       bkey_for_each_ptr(ptrs, ptr)
+               if (ptr->cached) {
+                       if (have_cached_ptr || !want_cached_ptr(c, opts, ptr)) {
+                               bch2_bkey_drop_ptr(k, ptr);
+                               goto restart_drop_ptrs;
+                       }
+                       have_cached_ptr = true;
+               }
        rcu_read_unlock();
 
        return bkey_deleted(k.k);
 
 struct bch_extent_ptr *
 bch2_extent_has_ptr(struct bkey_s_c, struct extent_ptr_decoded, struct bkey_s);
 
-void bch2_extent_ptr_set_cached(struct bkey_s, struct bch_extent_ptr *);
+void bch2_extent_ptr_set_cached(struct bch_fs *, struct bch_io_opts *,
+                               struct bkey_s, struct bch_extent_ptr *);
 
+bool bch2_extent_normalize_by_opts(struct bch_fs *, struct bch_io_opts *, struct bkey_s);
 bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
+
 void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *, const struct bch_extent_ptr *);
 void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *,
                            struct bkey_s_c);
 
        if (!data_opts.rewrite_ptrs &&
            !data_opts.extra_replicas) {
                if (data_opts.kill_ptrs)
-                       return bch2_extent_drop_ptrs(trans, iter, k, data_opts);
+                       return bch2_extent_drop_ptrs(trans, iter, k, &io_opts, &data_opts);
                return 0;
        }