static void free_iova_rcaches(struct iova_domain *iovad);
 static void fq_destroy_all_entries(struct iova_domain *iovad);
 static void fq_flush_timeout(struct timer_list *t);
+
+static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
+{
+       struct iova_domain *iovad;
+
+       iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead);
+
+       free_cpu_cached_iovas(cpu, iovad);
+       return 0;
+}
+
 static void free_global_cached_iovas(struct iova_domain *iovad);
 
 static struct iova *to_iova(struct rb_node *node)
        iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
        rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
        rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
+       cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, &iovad->cpuhp_dead);
        init_iova_rcaches(iovad);
 }
 EXPORT_SYMBOL_GPL(init_iova_domain);
 {
        mutex_lock(&iova_cache_mutex);
        if (!iova_cache_users) {
+               int ret;
+
+               ret = cpuhp_setup_state_multi(CPUHP_IOMMU_IOVA_DEAD, "iommu/iova:dead", NULL,
+                                       iova_cpuhp_dead);
+               if (ret) {
+                       mutex_unlock(&iova_cache_mutex);
+                       pr_err("Couldn't register cpuhp handler\n");
+                       return ret;
+               }
+
                iova_cache = kmem_cache_create(
                        "iommu_iova", sizeof(struct iova), 0,
                        SLAB_HWCACHE_ALIGN, NULL);
                if (!iova_cache) {
+                       cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
                        mutex_unlock(&iova_cache_mutex);
                        pr_err("Couldn't create iova cache\n");
                        return -ENOMEM;
                return;
        }
        iova_cache_users--;
-       if (!iova_cache_users)
+       if (!iova_cache_users) {
+               cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
                kmem_cache_destroy(iova_cache);
+       }
        mutex_unlock(&iova_cache_mutex);
 }
 EXPORT_SYMBOL_GPL(iova_cache_put);
 {
        struct iova *iova, *tmp;
 
+       cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
+                                           &iovad->cpuhp_dead);
+
        free_iova_flush_queue(iovad);
        free_iova_rcaches(iovad);
        rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)