]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
EDAC/amd64: Check return value of amd_smn_read()
authorYazen Ghannam <yazen.ghannam@amd.com>
Thu, 6 Jun 2024 16:12:55 +0000 (11:12 -0500)
committerBorislav Petkov (AMD) <bp@alien8.de>
Wed, 12 Jun 2024 09:33:45 +0000 (11:33 +0200)
Check the return value of amd_smn_read() before saving a value. This
ensures invalid values aren't saved. The struct umc instance is
initialized to 0 during memory allocation. Therefore, a bad read will
keep the value as 0 providing the expected Read-as-Zero behavior.

Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
Link: https://lore.kernel.org/r/20240606-fix-smn-bad-read-v4-2-ffde21931c3f@amd.com
drivers/edac/amd64_edac.c

index dfc4fb8f7a7a4c13b35abfd04b679257255ad84c..ddfbdb66b794d78d6bbff978bfa520a7b55c542f 100644 (file)
@@ -1438,6 +1438,7 @@ static void umc_read_base_mask(struct amd64_pvt *pvt)
        u32 *base, *base_sec;
        u32 *mask, *mask_sec;
        int cs, umc;
+       u32 tmp;
 
        for_each_umc(umc) {
                umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
@@ -1450,13 +1451,17 @@ static void umc_read_base_mask(struct amd64_pvt *pvt)
                        base_reg = umc_base_reg + (cs * 4);
                        base_reg_sec = umc_base_reg_sec + (cs * 4);
 
-                       if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
+                       if (!amd_smn_read(pvt->mc_node_id, base_reg, &tmp)) {
+                               *base = tmp;
                                edac_dbg(0, "  DCSB%d[%d]=0x%08x reg: 0x%x\n",
                                         umc, cs, *base, base_reg);
+                       }
 
-                       if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
+                       if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, &tmp)) {
+                               *base_sec = tmp;
                                edac_dbg(0, "    DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
                                         umc, cs, *base_sec, base_reg_sec);
+                       }
                }
 
                umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
@@ -1469,13 +1474,17 @@ static void umc_read_base_mask(struct amd64_pvt *pvt)
                        mask_reg = umc_mask_reg + (cs * 4);
                        mask_reg_sec = umc_mask_reg_sec + (cs * 4);
 
-                       if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
+                       if (!amd_smn_read(pvt->mc_node_id, mask_reg, &tmp)) {
+                               *mask = tmp;
                                edac_dbg(0, "  DCSM%d[%d]=0x%08x reg: 0x%x\n",
                                         umc, cs, *mask, mask_reg);
+                       }
 
-                       if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
+                       if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, &tmp)) {
+                               *mask_sec = tmp;
                                edac_dbg(0, "    DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
                                         umc, cs, *mask_sec, mask_reg_sec);
+                       }
                }
        }
 }
@@ -2894,7 +2903,7 @@ static void umc_read_mc_regs(struct amd64_pvt *pvt)
 {
        u8 nid = pvt->mc_node_id;
        struct amd64_umc *umc;
-       u32 i, umc_base;
+       u32 i, tmp, umc_base;
 
        /* Read registers from each UMC */
        for_each_umc(i) {
@@ -2902,11 +2911,20 @@ static void umc_read_mc_regs(struct amd64_pvt *pvt)
                umc_base = get_umc_base(i);
                umc = &pvt->umc[i];
 
-               amd_smn_read(nid, umc_base + get_umc_reg(pvt, UMCCH_DIMM_CFG), &umc->dimm_cfg);
-               amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
-               amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
-               amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
-               amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
+               if (!amd_smn_read(nid, umc_base + get_umc_reg(pvt, UMCCH_DIMM_CFG), &tmp))
+                       umc->dimm_cfg = tmp;
+
+               if (!amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &tmp))
+                       umc->umc_cfg = tmp;
+
+               if (!amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &tmp))
+                       umc->sdp_ctrl = tmp;
+
+               if (!amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &tmp))
+                       umc->ecc_ctrl = tmp;
+
+               if (!amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &tmp))
+                       umc->umc_cap_hi = tmp;
        }
 }
 
@@ -3635,16 +3653,21 @@ static void gpu_read_mc_regs(struct amd64_pvt *pvt)
 {
        u8 nid = pvt->mc_node_id;
        struct amd64_umc *umc;
-       u32 i, umc_base;
+       u32 i, tmp, umc_base;
 
        /* Read registers from each UMC */
        for_each_umc(i) {
                umc_base = gpu_get_umc_base(pvt, i, 0);
                umc = &pvt->umc[i];
 
-               amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
-               amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
-               amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
+               if (!amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &tmp))
+                       umc->umc_cfg = tmp;
+
+               if (!amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &tmp))
+                       umc->sdp_ctrl = tmp;
+
+               if (!amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &tmp))
+                       umc->ecc_ctrl = tmp;
        }
 }