comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
        depends on X86_32 && X86_SUMMIT && (!HIGHMEM64G || !ACPI)
 
-config K8_NUMA
+config AMD_NUMA
        def_bool y
        prompt "Old style AMD Opteron NUMA detection"
        depends on X86_64 && NUMA && PCI
        ---help---
-         Enable K8 NUMA node topology detection.  You should say Y here if
-         you have a multi processor AMD K8 system. This uses an old
-         method to read the NUMA configuration directly from the builtin
-         Northbridge of Opteron. It is recommended to use X86_64_ACPI_NUMA
-         instead, which also takes priority if both are compiled in.
+         Enable AMD NUMA node topology detection.  You should say Y here if
+         you have a multi processor AMD system. This uses an old method to
+         read the NUMA configuration directly from the builtin Northbridge
+         of Opteron. It is recommended to use X86_64_ACPI_NUMA instead,
+         which also takes priority if both are compiled in.
 
 config X86_64_ACPI_NUMA
        def_bool y
 
 
 #include <linux/pci.h>
 
-extern struct pci_device_id k8_nb_ids[];
+extern struct pci_device_id amd_nb_ids[];
 struct bootnode;
 
-extern int early_is_k8_nb(u32 value);
-extern int cache_k8_northbridges(void);
-extern void k8_flush_garts(void);
-extern int k8_get_nodes(struct bootnode *nodes);
-extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn);
-extern int k8_scan_nodes(void);
+extern int early_is_amd_nb(u32 value);
+extern int cache_amd_northbridges(void);
+extern void amd_flush_garts(void);
+extern int amd_get_nodes(struct bootnode *nodes);
+extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn);
+extern int amd_scan_nodes(void);
 
-struct k8_northbridge_info {
+struct amd_northbridge_info {
        u16 num;
        u8 gart_supported;
        struct pci_dev **nb_misc;
 };
-extern struct k8_northbridge_info k8_northbridges;
+extern struct amd_northbridge_info amd_northbridges;
 
 #ifdef CONFIG_AMD_NB
 
-static inline struct pci_dev *node_to_k8_nb_misc(int node)
+static inline struct pci_dev *node_to_amd_nb_misc(int node)
 {
-       return (node < k8_northbridges.num) ? k8_northbridges.nb_misc[node] : NULL;
+       return (node < amd_northbridges.num) ? amd_northbridges.nb_misc[node] : NULL;
 }
 
 #else
 
-static inline struct pci_dev *node_to_k8_nb_misc(int node)
+static inline struct pci_dev *node_to_amd_nb_misc(int node)
 {
        return NULL;
 }
 
 
 static u32 *flush_words;
 
-struct pci_device_id k8_nb_ids[] = {
+struct pci_device_id amd_nb_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) },
        {}
 };
-EXPORT_SYMBOL(k8_nb_ids);
+EXPORT_SYMBOL(amd_nb_ids);
 
-struct k8_northbridge_info k8_northbridges;
-EXPORT_SYMBOL(k8_northbridges);
+struct amd_northbridge_info amd_northbridges;
+EXPORT_SYMBOL(amd_northbridges);
 
-static struct pci_dev *next_k8_northbridge(struct pci_dev *dev)
+static struct pci_dev *next_amd_northbridge(struct pci_dev *dev)
 {
        do {
                dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
                if (!dev)
                        break;
-       } while (!pci_match_id(&k8_nb_ids[0], dev));
+       } while (!pci_match_id(&amd_nb_ids[0], dev));
        return dev;
 }
 
-int cache_k8_northbridges(void)
+int cache_amd_northbridges(void)
 {
        int i;
        struct pci_dev *dev;
 
-       if (k8_northbridges.num)
+       if (amd_northbridges.num)
                return 0;
 
        dev = NULL;
-       while ((dev = next_k8_northbridge(dev)) != NULL)
-               k8_northbridges.num++;
+       while ((dev = next_amd_northbridge(dev)) != NULL)
+               amd_northbridges.num++;
 
        /* some CPU families (e.g. family 0x11) do not support GART */
        if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
            boot_cpu_data.x86 == 0x15)
-               k8_northbridges.gart_supported = 1;
+               amd_northbridges.gart_supported = 1;
 
-       k8_northbridges.nb_misc = kmalloc((k8_northbridges.num + 1) *
+       amd_northbridges.nb_misc = kmalloc((amd_northbridges.num + 1) *
                                          sizeof(void *), GFP_KERNEL);
-       if (!k8_northbridges.nb_misc)
+       if (!amd_northbridges.nb_misc)
                return -ENOMEM;
 
-       if (!k8_northbridges.num) {
-               k8_northbridges.nb_misc[0] = NULL;
+       if (!amd_northbridges.num) {
+               amd_northbridges.nb_misc[0] = NULL;
                return 0;
        }
 
-       if (k8_northbridges.gart_supported) {
-               flush_words = kmalloc(k8_northbridges.num * sizeof(u32),
+       if (amd_northbridges.gart_supported) {
+               flush_words = kmalloc(amd_northbridges.num * sizeof(u32),
                                      GFP_KERNEL);
                if (!flush_words) {
-                       kfree(k8_northbridges.nb_misc);
+                       kfree(amd_northbridges.nb_misc);
                        return -ENOMEM;
                }
        }
 
        dev = NULL;
        i = 0;
-       while ((dev = next_k8_northbridge(dev)) != NULL) {
-               k8_northbridges.nb_misc[i] = dev;
-               if (k8_northbridges.gart_supported)
+       while ((dev = next_amd_northbridge(dev)) != NULL) {
+               amd_northbridges.nb_misc[i] = dev;
+               if (amd_northbridges.gart_supported)
                        pci_read_config_dword(dev, 0x9c, &flush_words[i++]);
        }
-       k8_northbridges.nb_misc[i] = NULL;
+       amd_northbridges.nb_misc[i] = NULL;
        return 0;
 }
-EXPORT_SYMBOL_GPL(cache_k8_northbridges);
+EXPORT_SYMBOL_GPL(cache_amd_northbridges);
 
 /* Ignores subdevice/subvendor but as far as I can figure out
    they're useless anyways */
-int __init early_is_k8_nb(u32 device)
+int __init early_is_amd_nb(u32 device)
 {
        struct pci_device_id *id;
        u32 vendor = device & 0xffff;
        device >>= 16;
-       for (id = k8_nb_ids; id->vendor; id++)
+       for (id = amd_nb_ids; id->vendor; id++)
                if (vendor == id->vendor && device == id->device)
                        return 1;
        return 0;
 }
 
-void k8_flush_garts(void)
+void amd_flush_garts(void)
 {
        int flushed, i;
        unsigned long flags;
        static DEFINE_SPINLOCK(gart_lock);
 
-       if (!k8_northbridges.gart_supported)
+       if (!amd_northbridges.gart_supported)
                return;
 
        /* Avoid races between AGP and IOMMU. In theory it's not needed
           that it doesn't matter to serialize more. -AK */
        spin_lock_irqsave(&gart_lock, flags);
        flushed = 0;
-       for (i = 0; i < k8_northbridges.num; i++) {
-               pci_write_config_dword(k8_northbridges.nb_misc[i], 0x9c,
+       for (i = 0; i < amd_northbridges.num; i++) {
+               pci_write_config_dword(amd_northbridges.nb_misc[i], 0x9c,
                                       flush_words[i]|1);
                flushed++;
        }
-       for (i = 0; i < k8_northbridges.num; i++) {
+       for (i = 0; i < amd_northbridges.num; i++) {
                u32 w;
                /* Make sure the hardware actually executed the flush*/
                for (;;) {
-                       pci_read_config_dword(k8_northbridges.nb_misc[i],
+                       pci_read_config_dword(amd_northbridges.nb_misc[i],
                                              0x9c, &w);
                        if (!(w & 1))
                                break;
        if (!flushed)
                printk("nothing to flush?\n");
 }
-EXPORT_SYMBOL_GPL(k8_flush_garts);
+EXPORT_SYMBOL_GPL(amd_flush_garts);
 
-static __init int init_k8_nbs(void)
+static __init int init_amd_nbs(void)
 {
        int err = 0;
 
-       err = cache_k8_northbridges();
+       err = cache_amd_northbridges();
 
        if (err < 0)
-               printk(KERN_NOTICE "K8 NB: Cannot enumerate AMD northbridges.\n");
+               printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
 
        return err;
 }
 
 /* This has to go after the PCI subsystem */
-fs_initcall(init_k8_nbs);
+fs_initcall(init_amd_nbs);
 
  * Do an PCI bus scan by hand because we're running before the PCI
  * subsystem.
  *
- * All K8 AGP bridges are AGPv3 compliant, so we can do this scan
+ * All AMD AGP bridges are AGPv3 compliant, so we can do this scan
  * generically. It's probably overkill to always scan all slots because
  * the AGP bridges should be always an own bus on the HT hierarchy,
  * but do it here for future safety.
                dev_limit = bus_dev_ranges[i].dev_limit;
 
                for (slot = dev_base; slot < dev_limit; slot++) {
-                       if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00)))
+                       if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
                                continue;
 
                        ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL);
                dev_limit = bus_dev_ranges[i].dev_limit;
 
                for (slot = dev_base; slot < dev_limit; slot++) {
-                       if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00)))
+                       if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
                                continue;
 
                        ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL);
                dev_limit = bus_dev_ranges[i].dev_limit;
 
                for (slot = dev_base; slot < dev_limit; slot++) {
-                       if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00)))
+                       if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
                                continue;
 
                        iommu_detected = 1;
                dev_base = bus_dev_ranges[i].dev_base;
                dev_limit = bus_dev_ranges[i].dev_limit;
                for (slot = dev_base; slot < dev_limit; slot++) {
-                       if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00)))
+                       if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
                                continue;
 
                        write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
 
 static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node)
 {
        struct amd_l3_cache *l3;
-       struct pci_dev *dev = node_to_k8_nb_misc(node);
+       struct pci_dev *dev = node_to_amd_nb_misc(node);
 
        l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC);
        if (!l3) {
                        return;
 
        /* not in virtualized environments */
-       if (k8_northbridges.num == 0)
+       if (amd_northbridges.num == 0)
                return;
 
        /*
         * never freed but this is done only on shutdown so it doesn't matter.
         */
        if (!l3_caches) {
-               int size = k8_northbridges.num * sizeof(struct amd_l3_cache *);
+               int size = amd_northbridges.num * sizeof(struct amd_l3_cache *);
 
                l3_caches = kzalloc(size, GFP_ATOMIC);
                if (!l3_caches)
 
 
        spin_lock_irqsave(&iommu_bitmap_lock, flags);
        if (need_flush) {
-               k8_flush_garts();
+               amd_flush_garts();
                need_flush = false;
        }
        spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
 {
        int i;
 
-       if (!k8_northbridges.gart_supported)
+       if (!amd_northbridges.gart_supported)
                return;
 
-       for (i = 0; i < k8_northbridges.num; i++) {
-               struct pci_dev *dev = k8_northbridges.nb_misc[i];
+       for (i = 0; i < amd_northbridges.num; i++) {
+               struct pci_dev *dev = amd_northbridges.nb_misc[i];
 
                enable_gart_translation(dev, __pa(agp_gatt_table));
        }
 
        /* Flush the GART-TLB to remove stale entries */
-       k8_flush_garts();
+       amd_flush_garts();
 }
 
 /*
        if (!fix_up_north_bridges)
                return;
 
-       if (!k8_northbridges.gart_supported)
+       if (!amd_northbridges.gart_supported)
                return;
 
        pr_info("PCI-DMA: Restoring GART aperture settings\n");
 
-       for (i = 0; i < k8_northbridges.num; i++) {
-               struct pci_dev *dev = k8_northbridges.nb_misc[i];
+       for (i = 0; i < amd_northbridges.num; i++) {
+               struct pci_dev *dev = amd_northbridges.nb_misc[i];
 
                /*
                 * Don't enable translations just yet.  That is the next
  * Private Northbridge GATT initialization in case we cannot use the
  * AGP driver for some reason.
  */
-static __init int init_k8_gatt(struct agp_kern_info *info)
+static __init int init_amd_gatt(struct agp_kern_info *info)
 {
        unsigned aper_size, gatt_size, new_aper_size;
        unsigned aper_base, new_aper_base;
 
        aper_size = aper_base = info->aper_size = 0;
        dev = NULL;
-       for (i = 0; i < k8_northbridges.num; i++) {
-               dev = k8_northbridges.nb_misc[i];
+       for (i = 0; i < amd_northbridges.num; i++) {
+               dev = amd_northbridges.nb_misc[i];
                new_aper_base = read_aperture(dev, &new_aper_size);
                if (!new_aper_base)
                        goto nommu;
        if (!no_agp)
                return;
 
-       if (!k8_northbridges.gart_supported)
+       if (!amd_northbridges.gart_supported)
                return;
 
-       for (i = 0; i < k8_northbridges.num; i++) {
+       for (i = 0; i < amd_northbridges.num; i++) {
                u32 ctl;
 
-               dev = k8_northbridges.nb_misc[i];
+               dev = amd_northbridges.nb_misc[i];
                pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
 
                ctl &= ~GARTEN;
        unsigned long scratch;
        long i;
 
-       if (!k8_northbridges.gart_supported)
+       if (!amd_northbridges.gart_supported)
                return 0;
 
 #ifndef CONFIG_AGP_AMD64
        no_agp = 1;
 #else
        /* Makefile puts PCI initialization via subsys_initcall first. */
-       /* Add other K8 AGP bridge drivers here */
+       /* Add other AMD AGP bridge drivers here */
        no_agp = no_agp ||
                (agp_amd64_init() < 0) ||
                (agp_copy_info(agp_bridge, &info) < 0);
        if (no_iommu ||
            (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
            !gart_iommu_aperture ||
-           (no_agp && init_k8_gatt(&info) < 0)) {
+           (no_agp && init_amd_gatt(&info) < 0)) {
                if (max_pfn > MAX_DMA32_PFN) {
                        pr_warning("More than 4GB of memory but GART IOMMU not available.\n");
                        pr_warning("falling back to iommu=soft.\n");
 
 void __init setup_arch(char **cmdline_p)
 {
        int acpi = 0;
-       int k8 = 0;
+       int amd = 0;
        unsigned long flags;
 
 #ifdef CONFIG_X86_32
        acpi = acpi_numa_init();
 #endif
 
-#ifdef CONFIG_K8_NUMA
+#ifdef CONFIG_AMD_NUMA
        if (!acpi)
-               k8 = !k8_numa_init(0, max_pfn);
+               amd = !amd_numa_init(0, max_pfn);
 #endif
 
-       initmem_init(0, max_pfn, acpi, k8);
+       initmem_init(0, max_pfn, acpi, amd);
        memblock_find_dma_reserve();
        dma32_reserve_bootmem();
 
 
 obj-$(CONFIG_MMIOTRACE_TEST)   += testmmiotrace.o
 
 obj-$(CONFIG_NUMA)             += numa.o numa_$(BITS).o
-obj-$(CONFIG_K8_NUMA)          += k8topology_64.o
+obj-$(CONFIG_AMD_NUMA)         += amdtopology_64.o
 obj-$(CONFIG_ACPI_NUMA)                += srat_$(BITS).o
 
 obj-$(CONFIG_HAVE_MEMBLOCK)            += memblock.o
 
 /*
- * AMD K8 NUMA support.
+ * AMD NUMA support.
  * Discover the memory map and associated nodes.
  *
- * This version reads it directly from the K8 northbridge.
+ * This version reads it directly from the AMD northbridge.
  *
  * Copyright 2002,2003 Andi Kleen, SuSE Labs.
  */
 {
        /*
         * need to get the APIC ID of the BSP so can use that to
-        * create apicid_to_node in k8_scan_nodes()
+        * create apicid_to_node in amd_scan_nodes()
         */
 #ifdef CONFIG_X86_MPPARSE
        /*
        early_init_lapic_mapping();
 }
 
-int __init k8_get_nodes(struct bootnode *physnodes)
+int __init amd_get_nodes(struct bootnode *physnodes)
 {
        int i;
        int ret = 0;
        return ret;
 }
 
-int __init k8_numa_init(unsigned long start_pfn, unsigned long end_pfn)
+int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn)
 {
        unsigned long start = PFN_PHYS(start_pfn);
        unsigned long end = PFN_PHYS(end_pfn);
        return 0;
 }
 
-int __init k8_scan_nodes(void)
+int __init amd_scan_nodes(void)
 {
        unsigned int bits;
        unsigned int cores;
 
 static char *cmdline __initdata;
 
 static int __init setup_physnodes(unsigned long start, unsigned long end,
-                                       int acpi, int k8)
+                                       int acpi, int amd)
 {
        int nr_nodes = 0;
        int ret = 0;
        if (acpi)
                nr_nodes = acpi_get_nodes(physnodes);
 #endif
-#ifdef CONFIG_K8_NUMA
-       if (k8)
-               nr_nodes = k8_get_nodes(physnodes);
+#ifdef CONFIG_AMD_NUMA
+       if (amd)
+               nr_nodes = amd_get_nodes(physnodes);
 #endif
        /*
         * Basic sanity checking on the physical node map: there may be errors
-        * if the SRAT or K8 incorrectly reported the topology or the mem=
+        * if the SRAT or AMD code incorrectly reported the topology or the mem=
         * kernel parameter is used.
         */
        for (i = 0; i < nr_nodes; i++) {
  * numa=fake command-line option.
  */
 static int __init numa_emulation(unsigned long start_pfn,
-                       unsigned long last_pfn, int acpi, int k8)
+                       unsigned long last_pfn, int acpi, int amd)
 {
        u64 addr = start_pfn << PAGE_SHIFT;
        u64 max_addr = last_pfn << PAGE_SHIFT;
        int num_nodes;
        int i;
 
-       num_phys_nodes = setup_physnodes(addr, max_addr, acpi, k8);
+       num_phys_nodes = setup_physnodes(addr, max_addr, acpi, amd);
        /*
         * If the numa=fake command-line contains a 'M' or 'G', it represents
         * the fixed node size.  Otherwise, if it is just a single number N,
 #endif /* CONFIG_NUMA_EMU */
 
 void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn,
-                               int acpi, int k8)
+                               int acpi, int amd)
 {
        int i;
 
        nodes_clear(node_online_map);
 
 #ifdef CONFIG_NUMA_EMU
-       if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, k8))
+       if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, amd))
                return;
        nodes_clear(node_possible_map);
        nodes_clear(node_online_map);
        nodes_clear(node_online_map);
 #endif
 
-#ifdef CONFIG_K8_NUMA
-       if (!numa_off && k8 && !k8_scan_nodes())
+#ifdef CONFIG_AMD_NUMA
+       if (!numa_off && amd && !amd_scan_nodes())
                return;
        nodes_clear(node_possible_map);
        nodes_clear(node_online_map);
 
 
 static void amd64_tlbflush(struct agp_memory *temp)
 {
-       k8_flush_garts();
+       amd_flush_garts();
 }
 
 static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
        u32 temp;
        struct aper_size_info_32 *values;
 
-       dev = k8_northbridges.nb_misc[0];
+       dev = amd_northbridges.nb_misc[0];
        if (dev==NULL)
                return 0;
 
        unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
        int i;
 
-       if (!k8_northbridges.gart_supported)
+       if (!amd_northbridges.gart_supported)
                return 0;
 
        /* Configure AGP regs in each x86-64 host bridge. */
-       for (i = 0; i < k8_northbridges.num; i++) {
+       for (i = 0; i < amd_northbridges.num; i++) {
                agp_bridge->gart_bus_addr =
-                               amd64_configure(k8_northbridges.nb_misc[i],
+                               amd64_configure(amd_northbridges.nb_misc[i],
                                                gatt_bus);
        }
-       k8_flush_garts();
+       amd_flush_garts();
        return 0;
 }
 
        u32 tmp;
        int i;
 
-       if (!k8_northbridges.gart_supported)
+       if (!amd_northbridges.gart_supported)
                return;
 
-       for (i = 0; i < k8_northbridges.num; i++) {
-               struct pci_dev *dev = k8_northbridges.nb_misc[i];
+       for (i = 0; i < amd_northbridges.num; i++) {
+               struct pci_dev *dev = amd_northbridges.nb_misc[i];
                /* disable gart translation */
                pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp);
                tmp &= ~GARTEN;
 {
        int i;
 
-       if (cache_k8_northbridges() < 0)
+       if (cache_amd_northbridges() < 0)
                return -ENODEV;
 
-       if (!k8_northbridges.gart_supported)
+       if (!amd_northbridges.gart_supported)
                return -ENODEV;
 
        i = 0;
-       for (i = 0; i < k8_northbridges.num; i++) {
-               struct pci_dev *dev = k8_northbridges.nb_misc[i];
+       for (i = 0; i < amd_northbridges.num; i++) {
+               struct pci_dev *dev = amd_northbridges.nb_misc[i];
                if (fix_northbridge(dev, pdev, cap_ptr) < 0) {
                        dev_err(&dev->dev, "no usable aperture found\n");
 #ifdef __x86_64__
        }
 
        /* shadow x86-64 registers into ULi registers */
-       pci_read_config_dword (k8_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE,
+       pci_read_config_dword (amd_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE,
                               &httfea);
 
        /* if x86-64 aperture base is beyond 4G, exit here */
        pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp);
 
        /* shadow x86-64 registers into NVIDIA registers */
-       pci_read_config_dword (k8_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE,
+       pci_read_config_dword (amd_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE,
                               &apbase);
 
        /* if x86-64 aperture base is beyond 4G, exit here */
                }
 
                /* First check that we have at least one AMD64 NB */
-               if (!pci_dev_present(k8_nb_ids))
+               if (!pci_dev_present(amd_nb_ids))
                        return -ENODEV;
 
                /* Look for any AGP bridge */
 
 
        opstate_init();
 
-       if (cache_k8_northbridges() < 0)
+       if (cache_amd_northbridges() < 0)
                goto err_ret;
 
        msrs = msrs_alloc();
         * to finish initialization of the MC instances.
         */
        err = -ENODEV;
-       for (nb = 0; nb < k8_northbridges.num; nb++) {
+       for (nb = 0; nb < amd_northbridges.num; nb++) {
                if (!pvt_lookup[nb])
                        continue;