From: Mikhail Kshevetskiy Date: Wed, 17 Sep 2025 21:54:01 +0000 (+0300) Subject: mtd: spinand: repeat reading in regular mode if continuous reading fails X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=010dc7f2dd6a0078ade3f88f627ed5fbf45ceb94;p=users%2Fhch%2Fmisc.git mtd: spinand: repeat reading in regular mode if continuous reading fails Continuous reading may result in multiple flash pages reading in one operation. Unfortunately, not all spinand controllers support such large reading. They will read less data. Unfortunately, the operation can't be continued. In this case: * disable continuous reading on this (not good enough) spi controller * repeat reading in regular mode. Signed-off-by: Mikhail Kshevetskiy Signed-off-by: Miquel Raynal --- diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index 58ec85c98ab8..f92133b8e1a6 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -430,8 +430,16 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand, * Dirmap accesses are allowed to toggle the CS. * Toggling the CS during a continuous read is forbidden. */ - if (nbytes && req->continuous) - return -EIO; + if (nbytes && req->continuous) { + /* + * Spi controller with broken support of continuous + * reading was detected. Disable future use of + * continuous reading and return -EAGAIN to retry + * reading within regular mode. + */ + spinand->cont_read_possible = false; + return -EAGAIN; + } } if (req->datalen) @@ -899,10 +907,19 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from, old_stats = mtd->ecc_stats; - if (spinand_use_cont_read(mtd, from, ops)) + if (spinand_use_cont_read(mtd, from, ops)) { ret = spinand_mtd_continuous_page_read(mtd, from, ops, &max_bitflips); - else + if (ret == -EAGAIN && !spinand->cont_read_possible) { + /* + * Spi controller with broken support of continuous + * reading was detected (see spinand_read_from_cache_op()), + * repeat reading in regular mode. + */ + ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips); + } + } else { ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips); + } if (ops->stats) { ops->stats->uncorrectable_errors +=