#endif
 }
 
-void __init xen_ident_map_ISA(void)
-{
-       unsigned long pa;
-
-       /*
-        * If we're dom0, then linear map the ISA machine addresses into
-        * the kernel's address space.
-        */
-       if (!xen_initial_domain())
-               return;
-
-       xen_raw_printk("Xen: setup ISA identity maps\n");
-
-       for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) {
-               pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO);
-
-               if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0))
-                       BUG();
-       }
-
-       xen_flush_tlb();
-}
-
 static void __init xen_post_allocator_init(void)
 {
        pv_mmu_ops.set_pte = xen_set_pte;
 
 
        return len;
 }
+
+static unsigned long __init xen_release_chunk(unsigned long start,
+                                             unsigned long end)
+{
+       return xen_do_chunk(start, end, true);
+}
+
 static unsigned long __init xen_populate_chunk(
        const struct e820entry *list, size_t map_size,
        unsigned long max_pfn, unsigned long *last_pfn,
        }
        return done;
 }
+
+static void __init xen_set_identity_and_release_chunk(
+       unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
+       unsigned long *released, unsigned long *identity)
+{
+       unsigned long pfn;
+
+       /*
+        * If the PFNs are currently mapped, the VA mapping also needs
+        * to be updated to be 1:1.
+        */
+       for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
+               (void)HYPERVISOR_update_va_mapping(
+                       (unsigned long)__va(pfn << PAGE_SHIFT),
+                       mfn_pte(pfn, PAGE_KERNEL_IO), 0);
+
+       if (start_pfn < nr_pages)
+               *released += xen_release_chunk(
+                       start_pfn, min(end_pfn, nr_pages));
+
+       *identity += set_phys_range_identity(start_pfn, end_pfn);
+}
+
 static unsigned long __init xen_set_identity_and_release(
        const struct e820entry *list, size_t map_size, unsigned long nr_pages)
 {
                        if (entry->type == E820_RAM)
                                end_pfn = PFN_UP(entry->addr);
 
-                       if (start_pfn < end_pfn) {
-                               if (start_pfn < nr_pages)
-                                       released += xen_do_chunk(
-                                               start_pfn, min(end_pfn, nr_pages), true);
+                       if (start_pfn < end_pfn)
+                               xen_set_identity_and_release_chunk(
+                                       start_pfn, end_pfn, nr_pages,
+                                       &released, &identity);
 
-                               identity += set_phys_range_identity(
-                                       start_pfn, end_pfn);
-                       }
                        start = end;
                }
        }