#define CDNS_XSPI_DLL_RST_N BIT(24)
#define CDNS_XSPI_DLL_LOCK BIT(0)
+/* Marvell overlay registers - clock */
+#define MRVL_XSPI_CLK_CTRL_AUX_REG 0x2020
+#define MRVL_XSPI_CLK_ENABLE BIT(0)
+#define MRVL_XSPI_CLK_DIV GENMASK(4, 1)
+#define MRVL_XSPI_IRQ_ENABLE BIT(6)
+#define MRVL_XSPI_CLOCK_IO_HZ 800000000
+#define MRVL_XSPI_CLOCK_DIVIDED(div) ((MRVL_XSPI_CLOCK_IO_HZ) / (div))
+#define MRVL_DEFAULT_CLK 25000000
+
enum cdns_xspi_stig_instr_type {
CDNS_XSPI_STIG_INSTR_TYPE_0,
CDNS_XSPI_STIG_INSTR_TYPE_1,
.mrvl_hw_overlay = false,
};
+const int cdns_mrvl_xspi_clk_div_list[] = {
+ 4, //0x0 = Divide by 4. SPI clock is 200 MHz.
+ 6, //0x1 = Divide by 6. SPI clock is 133.33 MHz.
+ 8, //0x2 = Divide by 8. SPI clock is 100 MHz.
+ 10, //0x3 = Divide by 10. SPI clock is 80 MHz.
+ 12, //0x4 = Divide by 12. SPI clock is 66.666 MHz.
+ 16, //0x5 = Divide by 16. SPI clock is 50 MHz.
+ 18, //0x6 = Divide by 18. SPI clock is 44.44 MHz.
+ 20, //0x7 = Divide by 20. SPI clock is 40 MHz.
+ 24, //0x8 = Divide by 24. SPI clock is 33.33 MHz.
+ 32, //0x9 = Divide by 32. SPI clock is 25 MHz.
+ 40, //0xA = Divide by 40. SPI clock is 20 MHz.
+ 50, //0xB = Divide by 50. SPI clock is 16 MHz.
+ 64, //0xC = Divide by 64. SPI clock is 12.5 MHz.
+ 128 //0xD = Divide by 128. SPI clock is 6.25 MHz.
+};
+
struct cdns_xspi_dev {
struct platform_device *pdev;
struct device *dev;
return cdns_xspi_is_dll_locked(cdns_xspi);
}
+static bool cdns_mrvl_xspi_setup_clock(struct cdns_xspi_dev *cdns_xspi,
+ int requested_clk)
+{
+ int i = 0;
+ int clk_val;
+ u32 clk_reg;
+ bool update_clk = false;
+
+ while (i < ARRAY_SIZE(cdns_mrvl_xspi_clk_div_list)) {
+ clk_val = MRVL_XSPI_CLOCK_DIVIDED(
+ cdns_mrvl_xspi_clk_div_list[i]);
+ if (clk_val <= requested_clk)
+ break;
+ i++;
+ }
+
+ dev_dbg(cdns_xspi->dev, "Found clk div: %d, clk val: %d\n",
+ cdns_mrvl_xspi_clk_div_list[i],
+ MRVL_XSPI_CLOCK_DIVIDED(
+ cdns_mrvl_xspi_clk_div_list[i]));
+
+ clk_reg = readl(cdns_xspi->auxbase + MRVL_XSPI_CLK_CTRL_AUX_REG);
+
+ if (FIELD_GET(MRVL_XSPI_CLK_DIV, clk_reg) != i) {
+ clk_reg &= ~MRVL_XSPI_CLK_ENABLE;
+ writel(clk_reg,
+ cdns_xspi->auxbase + MRVL_XSPI_CLK_CTRL_AUX_REG);
+ clk_reg = FIELD_PREP(MRVL_XSPI_CLK_DIV, i);
+ clk_reg &= ~MRVL_XSPI_CLK_DIV;
+ clk_reg |= FIELD_PREP(MRVL_XSPI_CLK_DIV, i);
+ clk_reg |= MRVL_XSPI_CLK_ENABLE;
+ clk_reg |= MRVL_XSPI_IRQ_ENABLE;
+ update_clk = true;
+ }
+
+ if (update_clk)
+ writel(clk_reg,
+ cdns_xspi->auxbase + MRVL_XSPI_CLK_CTRL_AUX_REG);
+
+ return update_clk;
+}
+
static int cdns_xspi_wait_for_controller_idle(struct cdns_xspi_dev *cdns_xspi)
{
u32 ctrl_stat;
return ret;
}
+static int marvell_xspi_mem_op_execute(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct cdns_xspi_dev *cdns_xspi =
+ spi_controller_get_devdata(mem->spi->controller);
+ int ret = 0;
+
+ cdns_mrvl_xspi_setup_clock(cdns_xspi, mem->spi->max_speed_hz);
+
+ ret = cdns_xspi_mem_op(cdns_xspi, mem, op);
+
+ return ret;
+}
+
+
static int cdns_xspi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
struct cdns_xspi_dev *cdns_xspi =
.adjust_op_size = cdns_xspi_adjust_mem_op_size,
};
+static const struct spi_controller_mem_ops marvell_xspi_mem_ops = {
+ .exec_op = marvell_xspi_mem_op_execute,
+ .adjust_op_size = cdns_xspi_adjust_mem_op_size,
+};
+
static irqreturn_t cdns_xspi_irq_handler(int this_irq, void *dev)
{
struct cdns_xspi_dev *cdns_xspi = dev;
if (!cdns_xspi->driver_data)
return -ENODEV;
- host->mem_ops = &cadence_xspi_mem_ops;
+ if (cdns_xspi->driver_data->mrvl_hw_overlay)
+ host->mem_ops = &marvell_xspi_mem_ops;
+ else
+ host->mem_ops = &cadence_xspi_mem_ops;
host->dev.of_node = pdev->dev.of_node;
host->bus_num = -1;
return ret;
}
- if (cdns_xspi->driver_data->mrvl_hw_overlay)
+ if (cdns_xspi->driver_data->mrvl_hw_overlay) {
+ cdns_mrvl_xspi_setup_clock(cdns_xspi, MRVL_DEFAULT_CLK);
cdns_xspi_configure_phy(cdns_xspi);
+ }
cdns_xspi_print_phy_config(cdns_xspi);