{ 0x00, 0UL},        /* scrubbing off */
 };
 
+static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
+                                     u32 *val, const char *func)
+{
+       int err = 0;
+
+       err = pci_read_config_dword(pdev, offset, val);
+       if (err)
+               amd64_warn("%s: error reading F%dx%03x.\n",
+                          func, PCI_FUNC(pdev->devfn), offset);
+
+       return err;
+}
+
+int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
+                               u32 val, const char *func)
+{
+       int err = 0;
+
+       err = pci_write_config_dword(pdev, offset, val);
+       if (err)
+               amd64_warn("%s: error writing to F%dx%03x.\n",
+                          func, PCI_FUNC(pdev->devfn), offset);
+
+       return err;
+}
+
+/*
+ *
+ * Depending on the family, F2 DCT reads need special handling:
+ *
+ * K8: has a single DCT only
+ *
+ * F10h: each DCT has its own set of regs
+ *     DCT0 -> F2x040..
+ *     DCT1 -> F2x140..
+ *
+ * F15h: we select which DCT we access using F1x10C[DctCfgSel]
+ *
+ */
+static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
+                              const char *func)
+{
+       if (addr >= 0x100)
+               return -EINVAL;
+
+       return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
+}
+
+static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
+                                const char *func)
+{
+       return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
+}
+
+static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
+                                const char *func)
+{
+       u32 reg = 0;
+       u8 dct  = 0;
+
+       if (addr >= 0x140 && addr <= 0x1a0) {
+               dct   = 1;
+               addr -= 0x100;
+       }
+
+       amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®);
+       reg &= 0xfffffffe;
+       reg |= dct;
+       amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
+
+       return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
+}
+
 /*
  * Memory scrubber control interface. For K8, memory scrubbing is handled by
  * hardware and can involve L2 cache, dcache as well as the main memory. With
 }
 
 /* Display and decode various NB registers for debug purposes. */
-static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
+static void dump_misc_regs(struct amd64_pvt *pvt)
 {
        debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
 
                amd64_dump_dramcfg_low(pvt->dclr1, 1);
 }
 
-/* Read in both of DBAM registers */
 static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
 {
-       amd64_read_pci_cfg(pvt->F2, DBAM0, &pvt->dbam0);
-
-       if (boot_cpu_data.x86 >= 0x10)
-               amd64_read_pci_cfg(pvt->F2, DBAM1, &pvt->dbam1);
+       amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
+       amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
 }
 
 /*
 /*
  * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
  */
-static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
+static void read_dct_base_mask(struct amd64_pvt *pvt)
 {
        int cs, reg;
 
 
        for (cs = 0; cs < pvt->cs_count; cs++) {
                reg = K8_DCSB0 + (cs * 4);
-               if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsb0[cs]))
+
+               if (!amd64_read_dct_pci_cfg(pvt, reg, &pvt->dcsb0[cs]))
                        debugf0("  DCSB0[%d]=0x%08x reg: F2x%x\n",
                                cs, pvt->dcsb0[cs], reg);
 
-               /* If DCT are NOT ganged, then read in DCT1's base */
-               if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
+               if (!dct_ganging_enabled(pvt)) {
                        reg = F10_DCSB1 + (cs * 4);
-                       if (!amd64_read_pci_cfg(pvt->F2, reg,
-                                               &pvt->dcsb1[cs]))
+
+                       if (!amd64_read_dct_pci_cfg(pvt, reg, &pvt->dcsb1[cs]))
                                debugf0("  DCSB1[%d]=0x%08x reg: F2x%x\n",
                                        cs, pvt->dcsb1[cs], reg);
-               } else {
-                       pvt->dcsb1[cs] = 0;
                }
        }
 
        for (cs = 0; cs < pvt->num_dcsm; cs++) {
                reg = K8_DCSM0 + (cs * 4);
-               if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsm0[cs]))
+
+               if (!amd64_read_dct_pci_cfg(pvt, reg, &pvt->dcsm0[cs]))
                        debugf0("    DCSM0[%d]=0x%08x reg: F2x%x\n",
                                cs, pvt->dcsm0[cs], reg);
 
-               /* If DCT are NOT ganged, then read in DCT1's mask */
-               if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
+               if (!dct_ganging_enabled(pvt)) {
                        reg = F10_DCSM1 + (cs * 4);
-                       if (!amd64_read_pci_cfg(pvt->F2, reg,
-                                               &pvt->dcsm1[cs]))
+
+                       if (!amd64_read_dct_pci_cfg(pvt, reg, &pvt->dcsm1[cs]))
                                debugf0("    DCSM1[%d]=0x%08x reg: F2x%x\n",
                                        cs, pvt->dcsm1[cs], reg);
-               } else {
-                       pvt->dcsm1[cs] = 0;
                }
        }
 }
 {
        int flag, err = 0;
 
-       err = amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
+       err = amd64_read_dct_pci_cfg(pvt, F10_DCLR_0, &pvt->dclr0);
        if (err)
                return err;
 
         * both controllers since DIMMs can be placed in either one.
         */
        for (i = 0; i < ARRAY_SIZE(dbams); i++) {
-               if (amd64_read_pci_cfg(pvt->F2, dbams[i], &dbam))
+               if (amd64_read_dct_pci_cfg(pvt, dbams[i], &dbam))
                        goto err_reg;
 
                for (j = 0; j < 4; j++) {
 static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
 {
 
-       if (!amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_LOW,
-                               &pvt->dram_ctl_select_low)) {
-               debugf0("F2x110 (DCTL Sel. Low): 0x%08x, "
-                       "High range addresses at: 0x%x\n",
-                       pvt->dram_ctl_select_low,
-                       dct_sel_baseaddr(pvt));
+       if (!amd64_read_dct_pci_cfg(pvt, F10_DCTL_SEL_LOW, &pvt->dct_sel_low)) {
+               debugf0("F2x110 (DCTL Sel. Low): 0x%08x, High range addrs at: 0x%x\n",
+                       pvt->dct_sel_low, dct_sel_baseaddr(pvt));
 
                debugf0("  DCT mode: %s, All DCTs on: %s\n",
                        (dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
                        dct_sel_interleave_addr(pvt));
        }
 
-       amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_HIGH,
-                          &pvt->dram_ctl_select_high);
+       amd64_read_dct_pci_cfg(pvt, F10_DCTL_SEL_HIGH, &pvt->dct_sel_hi);
 }
 
 /*
 static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
                                int hi_range_sel, u32 intlv_en)
 {
-       u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1;
+       u32 cs, temp, dct_sel_high = (pvt->dct_sel_low >> 1) & 1;
 
        if (dct_ganging_enabled(pvt))
                cs = 0;
         */
        hole_off = (pvt->dhar & 0x0000FF80);
        hole_valid = (pvt->dhar & 0x1);
-       dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16;
+       dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
 
        debugf1("   HoleOffset=0x%x  HoleValid=0x%x IntlvSel=0x%x\n",
                        hole_off, hole_valid, intlv_sel);
                        .read_dram_base_limit   = k8_read_dram_base_limit,
                        .map_sysaddr_to_csrow   = k8_map_sysaddr_to_csrow,
                        .dbam_to_cs             = k8_dbam_to_chip_select,
+                       .read_dct_pci_cfg       = k8_read_dct_pci_cfg,
                }
        },
        [F10_CPUS] = {
                        .read_dram_ctl_register = f10_read_dram_ctl_register,
                        .map_sysaddr_to_csrow   = f10_map_sysaddr_to_csrow,
                        .dbam_to_cs             = f10_dbam_to_chip_select,
+                       .read_dct_pci_cfg       = f10_read_dct_pci_cfg,
+               }
+       },
+       [F15_CPUS] = {
+               .ctl_name = "F15h",
+               .ops = {
+                       .read_dct_pci_cfg       = f15_read_dct_pci_cfg,
                }
        },
 };
                }
        }
 
-       amd64_read_dct_base_mask(pvt);
+       read_dct_base_mask(pvt);
 
        amd64_read_pci_cfg(pvt->F1, K8_DHAR, &pvt->dhar);
        amd64_read_dbam_reg(pvt);
 
        amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
 
-       amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
-       amd64_read_pci_cfg(pvt->F2, F10_DCHR_0, &pvt->dchr0);
+       amd64_read_dct_pci_cfg(pvt, F10_DCLR_0, &pvt->dclr0);
+       amd64_read_dct_pci_cfg(pvt, F10_DCHR_0, &pvt->dchr0);
 
-       if (boot_cpu_data.x86 >= 0x10) {
-               if (!dct_ganging_enabled(pvt)) {
-                       amd64_read_pci_cfg(pvt->F2, F10_DCLR_1, &pvt->dclr1);
-                       amd64_read_pci_cfg(pvt->F2, F10_DCHR_1, &pvt->dchr1);
-               }
-               amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
+       if (!dct_ganging_enabled(pvt)) {
+               amd64_read_dct_pci_cfg(pvt, F10_DCLR_1, &pvt->dclr1);
+               amd64_read_dct_pci_cfg(pvt, F10_DCHR_1, &pvt->dchr1);
        }
 
+       if (boot_cpu_data.x86 >= 0x10)
+               amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
+
        if (boot_cpu_data.x86 == 0x10 &&
            boot_cpu_data.x86_model > 7 &&
            /* F3x180[EccSymbolSize]=1 => x8 symbols */
        else
                pvt->syn_type = 4;
 
-       amd64_dump_misc_regs(pvt);
+       dump_misc_regs(pvt);
 }
 
 /*
        s->nbctl_valid = true;
 
        value |= mask;
-       pci_write_config_dword(F3, K8_NBCTL, value);
+       amd64_write_pci_cfg(F3, K8_NBCTL, value);
 
        amd64_read_pci_cfg(F3, K8_NBCFG, &value);
 
 
                /* Attempt to turn on DRAM ECC Enable */
                value |= K8_NBCFG_ECC_ENABLE;
-               pci_write_config_dword(F3, K8_NBCFG, value);
+               amd64_write_pci_cfg(F3, K8_NBCFG, value);
 
                amd64_read_pci_cfg(F3, K8_NBCFG, &value);
 
        value &= ~mask;
        value |= s->old_nbctl;
 
-       pci_write_config_dword(F3, K8_NBCTL, value);
+       amd64_write_pci_cfg(F3, K8_NBCTL, value);
 
        /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
        if (!s->flags.nb_ecc_prev) {
                amd64_read_pci_cfg(F3, K8_NBCFG, &value);
                value &= ~K8_NBCFG_ECC_ENABLE;
-               pci_write_config_dword(F3, K8_NBCFG, value);
+               amd64_write_pci_cfg(F3, K8_NBCFG, value);
        }
 
        /* restore the NB Enable MCGCTL bit */
 
                                        /* NOTE: Extra mask bit vs K8 */
 #define f10_dhar_offset(dhar)          ((dhar & F10_DHAR_OFFSET_MASK) << 16)
 
+#define DCT_CFG_SEL                    0x10C
 
 /* F10 High BASE/LIMIT registers */
 #define F10_DRAM_BASE_HIGH             0x140
 
 
 #define F10_DCTL_SEL_LOW               0x110
-#define dct_sel_baseaddr(pvt)          ((pvt->dram_ctl_select_low) & 0xFFFFF800)
-#define dct_sel_interleave_addr(pvt)   (((pvt->dram_ctl_select_low) >> 6) & 0x3)
-#define dct_high_range_enabled(pvt)    (pvt->dram_ctl_select_low & BIT(0))
-#define dct_interleave_enabled(pvt)    (pvt->dram_ctl_select_low & BIT(2))
-#define dct_ganging_enabled(pvt)       (pvt->dram_ctl_select_low & BIT(4))
-#define dct_data_intlv_enabled(pvt)    (pvt->dram_ctl_select_low & BIT(5))
-#define dct_dram_enabled(pvt)          (pvt->dram_ctl_select_low & BIT(8))
-#define dct_memory_cleared(pvt)                (pvt->dram_ctl_select_low & BIT(10))
+#define dct_sel_baseaddr(pvt)          ((pvt->dct_sel_low) & 0xFFFFF800)
+#define dct_sel_interleave_addr(pvt)   (((pvt->dct_sel_low) >> 6) & 0x3)
+#define dct_high_range_enabled(pvt)    (pvt->dct_sel_low & BIT(0))
+#define dct_interleave_enabled(pvt)    (pvt->dct_sel_low & BIT(2))
+#define dct_ganging_enabled(pvt)       (pvt->dct_sel_low & BIT(4))
+#define dct_data_intlv_enabled(pvt)    (pvt->dct_sel_low & BIT(5))
+#define dct_dram_enabled(pvt)          (pvt->dct_sel_low & BIT(8))
+#define dct_memory_cleared(pvt)                (pvt->dct_sel_low & BIT(10))
 
 #define F10_DCTL_SEL_HIGH              0x114
 
        return PCI_SLOT(pdev->devfn) - 0x18;
 }
 
-enum amd64_chipset_families {
+enum amd_families {
        K8_CPUS = 0,
        F10_CPUS,
+       F15_CPUS,
+       NUM_FAMILIES,
 };
 
 /* Error injection control structure */
        u64 top_mem;            /* top of memory below 4GB */
        u64 top_mem2;           /* top of memory above 4GB */
 
-       u32 dram_ctl_select_low;        /* DRAM Controller Select Low Reg */
-       u32 dram_ctl_select_high;       /* DRAM Controller Select High Reg */
-       u32 online_spare;               /* On-Line spare Reg */
+       u32 dct_sel_low;        /* DRAM Controller Select Low Reg */
+       u32 dct_sel_hi;         /* DRAM Controller Select High Reg */
+       u32 online_spare;       /* On-Line spare Reg */
 
        /* x4 or x8 syndromes in use */
        u8 syn_type;
        void (*map_sysaddr_to_csrow)    (struct mem_ctl_info *mci,
                                         struct err_regs *info, u64 SystemAddr);
        int (*dbam_to_cs)               (struct amd64_pvt *pvt, int cs_mode);
+       int (*read_dct_pci_cfg)         (struct amd64_pvt *pvt, int offset,
+                                        u32 *val, const char *func);
 };
 
 struct amd64_family_type {
        struct low_ops ops;
 };
 
-static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
-                                          u32 *val, const char *func)
-{
-       int err = 0;
+int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
+                               u32 val, const char *func);
 
-       err = pci_read_config_dword(pdev, offset, val);
-       if (err)
-               amd64_warn("%s: error reading F%dx%x.\n",
-                          func, PCI_FUNC(pdev->devfn), offset);
+#define amd64_read_pci_cfg(pdev, offset, val)  \
+       __amd64_read_pci_cfg_dword(pdev, offset, val, __func__)
 
-       return err;
-}
+#define amd64_write_pci_cfg(pdev, offset, val) \
+       __amd64_write_pci_cfg_dword(pdev, offset, val, __func__)
 
-#define amd64_read_pci_cfg(pdev, offset, val)  \
-       amd64_read_pci_cfg_dword(pdev, offset, val, __func__)
+#define amd64_read_dct_pci_cfg(pvt, offset, val) \
+       pvt->ops->read_dct_pci_cfg(pvt, offset, val, __func__)
 
 /*
  * For future CPU versions, verify the following as new 'slow' rates appear and