static struct mem_ctl_info *mci_lookup[MAX_NUMNODES];
 static struct amd64_pvt *pvt_lookup[MAX_NUMNODES];
 
+/*
+ * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only
+ * for DDR2 DRAM mapping.
+ */
+u32 revf_quad_ddr2_shift[] = {
+       0,      /* 0000b NULL DIMM (128mb) */
+       28,     /* 0001b 256mb */
+       29,     /* 0010b 512mb */
+       29,     /* 0011b 512mb */
+       29,     /* 0100b 512mb */
+       30,     /* 0101b 1gb */
+       30,     /* 0110b 1gb */
+       31,     /* 0111b 2gb */
+       31,     /* 1000b 2gb */
+       32,     /* 1001b 4gb */
+       32,     /* 1010b 4gb */
+       33,     /* 1011b 8gb */
+       0,      /* 1100b future */
+       0,      /* 1101b future */
+       0,      /* 1110b future */
+       0       /* 1111b future */
+};
+
+/*
+ * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
+ * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
+ * or higher value'.
+ *
+ *FIXME: Produce a better mapping/linearisation.
+ */
+
+struct scrubrate scrubrates[] = {
+       { 0x01, 1600000000UL},
+       { 0x02, 800000000UL},
+       { 0x03, 400000000UL},
+       { 0x04, 200000000UL},
+       { 0x05, 100000000UL},
+       { 0x06, 50000000UL},
+       { 0x07, 25000000UL},
+       { 0x08, 12284069UL},
+       { 0x09, 6274509UL},
+       { 0x0A, 3121951UL},
+       { 0x0B, 1560975UL},
+       { 0x0C, 781440UL},
+       { 0x0D, 390720UL},
+       { 0x0E, 195300UL},
+       { 0x0F, 97650UL},
+       { 0x10, 48854UL},
+       { 0x11, 24427UL},
+       { 0x12, 12213UL},
+       { 0x13, 6101UL},
+       { 0x14, 3051UL},
+       { 0x15, 1523UL},
+       { 0x16, 761UL},
+       { 0x00, 0UL},        /* scrubbing off */
+};
+
 /*
  * Memory scrubber control interface. For K8, memory scrubbing is handled by
  * hardware and can involve L2 cache, dcache as well as the main memory. With
        u32 page, offset;
 
        /* Extract the syndrome parts and form a 16-bit syndrome */
-       syndrome = EXTRACT_HIGH_SYNDROME(info->nbsl) << 8;
-       syndrome |= EXTRACT_LOW_SYNDROME(info->nbsh);
+       syndrome  = HIGH_SYNDROME(info->nbsl) << 8;
+       syndrome |= LOW_SYNDROME(info->nbsh);
 
        /* CHIPKILL enabled */
        if (info->nbcfg & K8_NBCFG_CHIPKILL) {
        if (csrow >= 0) {
                error_address_to_page_and_offset(sys_addr, &page, &offset);
 
-               syndrome = EXTRACT_HIGH_SYNDROME(info->nbsl) << 8;
-               syndrome |= EXTRACT_LOW_SYNDROME(info->nbsh);
+               syndrome  = HIGH_SYNDROME(info->nbsl) << 8;
+               syndrome |= LOW_SYNDROME(info->nbsh);
 
                /*
                 * Is CHIPKILL on? If so, then we can attempt to use the
 static inline void amd64_decode_gart_tlb_error(struct mem_ctl_info *mci,
                                         struct amd64_error_info_regs *info)
 {
-       u32 err_code;
-       u32 ec_tt;              /* error code transaction type (2b) */
-       u32 ec_ll;              /* error code cache level (2b) */
-
-       err_code = EXTRACT_ERROR_CODE(info->nbsl);
-       ec_ll = EXTRACT_LL_CODE(err_code);
-       ec_tt = EXTRACT_TT_CODE(err_code);
+       u32 ec = ERROR_CODE(info->nbsl);
 
        amd64_mc_printk(mci, KERN_ERR,
                     "GART TLB event: transaction type(%s), "
-                    "cache level(%s)\n", tt_msgs[ec_tt], ll_msgs[ec_ll]);
+                    "cache level(%s)\n", TT_MSG(ec), LL_MSG(ec));
 }
 
 static inline void amd64_decode_mem_cache_error(struct mem_ctl_info *mci,
                                      struct amd64_error_info_regs *info)
 {
-       u32 err_code;
-       u32 ec_rrrr;            /* error code memory transaction (4b) */
-       u32 ec_tt;              /* error code transaction type (2b) */
-       u32 ec_ll;              /* error code cache level (2b) */
-
-       err_code = EXTRACT_ERROR_CODE(info->nbsl);
-       ec_ll = EXTRACT_LL_CODE(err_code);
-       ec_tt = EXTRACT_TT_CODE(err_code);
-       ec_rrrr = EXTRACT_RRRR_CODE(err_code);
+       u32 ec = ERROR_CODE(info->nbsl);
 
        amd64_mc_printk(mci, KERN_ERR,
                     "cache hierarchy error: memory transaction type(%s), "
                     "transaction type(%s), cache level(%s)\n",
-                    rrrr_msgs[ec_rrrr], tt_msgs[ec_tt], ll_msgs[ec_ll]);
+                    RRRR_MSG(ec), TT_MSG(ec), LL_MSG(ec));
 }
 
 
 static void amd64_decode_bus_error(struct mem_ctl_info *mci,
                                   struct amd64_error_info_regs *info)
 {
-       u32 err_code, ext_ec;
-       u32 ec_pp;              /* error code participating processor (2p) */
-       u32 ec_to;              /* error code timed out (1b) */
-       u32 ec_rrrr;            /* error code memory transaction (4b) */
-       u32 ec_ii;              /* error code memory or I/O (2b) */
-       u32 ec_ll;              /* error code cache level (2b) */
-
-       ext_ec = EXTRACT_EXT_ERROR_CODE(info->nbsl);
-       err_code = EXTRACT_ERROR_CODE(info->nbsl);
-
-       ec_ll = EXTRACT_LL_CODE(err_code);
-       ec_ii = EXTRACT_II_CODE(err_code);
-       ec_rrrr = EXTRACT_RRRR_CODE(err_code);
-       ec_to = EXTRACT_TO_CODE(err_code);
-       ec_pp = EXTRACT_PP_CODE(err_code);
+       u32 ec  = ERROR_CODE(info->nbsl);
+       u32 xec = EXT_ERROR_CODE(info->nbsl);
 
        amd64_mc_printk(mci, KERN_ERR,
                "BUS ERROR:\n"
                "  participating processor(%s)\n"
                "  memory transaction type(%s)\n"
                "  cache level(%s) Error Found by: %s\n",
-               to_msgs[ec_to],
-               ii_msgs[ec_ii],
-               pp_msgs[ec_pp],
-               rrrr_msgs[ec_rrrr],
-               ll_msgs[ec_ll],
+               TO_MSG(ec), II_MSG(ec), PP_MSG(ec), RRRR_MSG(ec), LL_MSG(ec),
                (info->nbsh & K8_NBSH_ERR_SCRUBER) ?
                        "Scrubber" : "Normal Operation");
 
-       /* If this was an 'observed' error, early out */
-       if (ec_pp == K8_NBSL_PP_OBS)
-               return;         /* We aren't the node involved */
+
+       /* Bail early out if this was an 'observed' error */
+       if (PP(ec) == K8_NBSL_PP_OBS)
+               return;
 
        /* Parse out the extended error code for ECC events */
-       switch (ext_ec) {
+       switch (xec) {
        /* F10 changed to one Extended ECC error code */
        case F10_NBSL_EXT_ERR_RES:              /* Reserved field */
        case F10_NBSL_EXT_ERR_ECC:              /* F10 ECC ext err code */
                (regs->nbsh & K8_NBSH_CORE3) ? "True" : "False");
 
 
-       err_code = EXTRACT_ERROR_CODE(regs->nbsl);
+       err_code = ERROR_CODE(regs->nbsl);
 
        /* Determine which error type:
         *      1) GART errors - non-fatal, developmental events
         *      3) BUS errors
         *      4) Unknown error
         */
-       if (TEST_TLB_ERROR(err_code)) {
+       if (TLB_ERROR(err_code)) {
                /*
                 * GART errors are intended to help graphics driver developers
                 * to detect bad GART PTEs. It is recommended by AMD to disable
 
                debugf1("GART TLB error\n");
                amd64_decode_gart_tlb_error(mci, info);
-       } else if (TEST_MEM_ERROR(err_code)) {
+       } else if (MEM_ERROR(err_code)) {
                debugf1("Memory/Cache error\n");
                amd64_decode_mem_cache_error(mci, info);
-       } else if (TEST_BUS_ERROR(err_code)) {
+       } else if (BUS_ERROR(err_code)) {
                debugf1("Bus (Link/DRAM) error\n");
                amd64_decode_bus_error(mci, info);
        } else {
                             err_code);
        }
 
-       ext_ec = EXTRACT_EXT_ERROR_CODE(regs->nbsl);
+       ext_ec = EXT_ERROR_CODE(regs->nbsl);
        amd64_mc_printk(mci, KERN_ERR,
                "ExtErr=(0x%x) %s\n", ext_ec, ext_msgs[ext_ec]);
 
-       if (((ext_ec >= F10_NBSL_EXT_ERR_CRC &&
-                       ext_ec <= F10_NBSL_EXT_ERR_TGT) ||
-                       (ext_ec == F10_NBSL_EXT_ERR_RMW)) &&
-                       EXTRACT_LDT_LINK(info->nbsh)) {
-
-               amd64_mc_printk(mci, KERN_ERR,
-                       "Error on hypertransport link: %s\n",
-                       htlink_msgs[
-                       EXTRACT_LDT_LINK(info->nbsh)]);
-       }
-
        /*
         * Check the UE bit of the NB status high register, if set generate some
         * logs. If NOT a GART error, then process the event as a NO-INFO event.
 
 #include <linux/edac.h>
 #include <asm/msr.h>
 #include "edac_core.h"
+#include "edac_mce_amd.h"
 
 #define amd64_printk(level, fmt, arg...) \
        edac_printk(level, "amd64", fmt, ##arg)
 #define K8_NBSL                                0x48
 
 
-#define EXTRACT_HIGH_SYNDROME(x)       (((x) >> 24) & 0xff)
-#define EXTRACT_EXT_ERROR_CODE(x)      (((x) >> 16) & 0x1f)
-
 /* Family F10h: Normalized Extended Error Codes */
 #define F10_NBSL_EXT_ERR_RES           0x0
 #define F10_NBSL_EXT_ERR_CRC           0x1
 #define K8_NBSL_EXT_ERR_CHIPKILL_ECC   0x8
 #define K8_NBSL_EXT_ERR_DRAM_PARITY    0xD
 
-#define EXTRACT_ERROR_CODE(x)          ((x) & 0xffff)
-#define        TEST_TLB_ERROR(x)               (((x) & 0xFFF0) == 0x0010)
-#define        TEST_MEM_ERROR(x)               (((x) & 0xFF00) == 0x0100)
-#define        TEST_BUS_ERROR(x)               (((x) & 0xF800) == 0x0800)
-#define        EXTRACT_TT_CODE(x)              (((x) >> 2) & 0x3)
-#define        EXTRACT_II_CODE(x)              (((x) >> 2) & 0x3)
-#define        EXTRACT_LL_CODE(x)              (((x) >> 0) & 0x3)
-#define        EXTRACT_RRRR_CODE(x)            (((x) >> 4) & 0xf)
-#define        EXTRACT_TO_CODE(x)              (((x) >> 8) & 0x1)
-#define        EXTRACT_PP_CODE(x)              (((x) >> 9) & 0x3)
-
 /*
  * The following are for BUS type errors AFTER values have been normalized by
  * shifting right
 #define K8_NBSH_CORE1                  BIT(1)
 #define K8_NBSH_CORE0                  BIT(0)
 
-#define EXTRACT_LDT_LINK(x)            (((x) >> 4) & 0x7)
 #define EXTRACT_ERR_CPU_MAP(x)         ((x) & 0xF)
-#define EXTRACT_LOW_SYNDROME(x)                (((x) >> 15) & 0xff)
 
 
 #define K8_NBEAL                       0x50
 
-#include "amd64_edac.h"
-
-/*
- * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only
- * for DDR2 DRAM mapping.
- */
-u32 revf_quad_ddr2_shift[] = {
-       0,      /* 0000b NULL DIMM (128mb) */
-       28,     /* 0001b 256mb */
-       29,     /* 0010b 512mb */
-       29,     /* 0011b 512mb */
-       29,     /* 0100b 512mb */
-       30,     /* 0101b 1gb */
-       30,     /* 0110b 1gb */
-       31,     /* 0111b 2gb */
-       31,     /* 1000b 2gb */
-       32,     /* 1001b 4gb */
-       32,     /* 1010b 4gb */
-       33,     /* 1011b 8gb */
-       0,      /* 1100b future */
-       0,      /* 1101b future */
-       0,      /* 1110b future */
-       0       /* 1111b future */
-};
-
-/*
- * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
- * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
- * or higher value'.
- *
- *FIXME: Produce a better mapping/linearisation.
- */
-
-struct scrubrate scrubrates[] = {
-       { 0x01, 1600000000UL},
-       { 0x02, 800000000UL},
-       { 0x03, 400000000UL},
-       { 0x04, 200000000UL},
-       { 0x05, 100000000UL},
-       { 0x06, 50000000UL},
-       { 0x07, 25000000UL},
-       { 0x08, 12284069UL},
-       { 0x09, 6274509UL},
-       { 0x0A, 3121951UL},
-       { 0x0B, 1560975UL},
-       { 0x0C, 781440UL},
-       { 0x0D, 390720UL},
-       { 0x0E, 195300UL},
-       { 0x0F, 97650UL},
-       { 0x10, 48854UL},
-       { 0x11, 24427UL},
-       { 0x12, 12213UL},
-       { 0x13, 6101UL},
-       { 0x14, 3051UL},
-       { 0x15, 1523UL},
-       { 0x16, 761UL},
-       { 0x00, 0UL},        /* scrubbing off */
-};
+#include <linux/module.h>
+#include "edac_mce_amd.h"
 
 /*
  * string representation for the different MCA reported error types, see F3x48
        "generic",
        "reserved"
 };
+EXPORT_SYMBOL_GPL(tt_msgs);
 
 const char *ll_msgs[] = {      /* cache level */
        "L0",
        "L2",
        "L3/generic"
 };
+EXPORT_SYMBOL_GPL(ll_msgs);
 
 const char *rrrr_msgs[] = {
        "generic",
        "reserved RRRR= 14",
        "reserved RRRR= 15"
 };
+EXPORT_SYMBOL_GPL(rrrr_msgs);
 
 const char *pp_msgs[] = {      /* participating processor */
        "local node originated (SRC)",
        "local node observed as 3rd party (OBS)",
        "generic"
 };
+EXPORT_SYMBOL_GPL(pp_msgs);
 
 const char *to_msgs[] = {
        "no timeout",
        "timed out"
 };
+EXPORT_SYMBOL_GPL(to_msgs);
 
 const char *ii_msgs[] = {      /* memory or i/o */
        "mem access",
        "i/o access",
        "generic"
 };
+EXPORT_SYMBOL_GPL(ii_msgs);
 
 /* Map the 5 bits of Extended Error code to the string table. */
 const char *ext_msgs[] = {     /* extended error */
        "L3 Cache LRU error",           /* 1_1110b */
        "Res 0x1FF error"               /* 1_1111b */
 };
-
-const char *htlink_msgs[] = {
-       "none",
-       "1",
-       "2",
-       "1 2",
-       "3",
-       "1 3",
-       "2 3",
-       "1 2 3"
-};
+EXPORT_SYMBOL_GPL(ext_msgs);