blob: 2ffa201a66edf3c87b07aaece1e45edf2c7525cd [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Michael Kurz337ff2a2017-01-22 16:04:30 +01002/*
3 * (C) Copyright 2016
4 *
5 * Michael Kurz, <michi.kurz@gmail.com>
6 *
7 * STM32 QSPI driver
Michael Kurz337ff2a2017-01-22 16:04:30 +01008 */
9
Patrick Delaunay72e220c2020-11-06 19:01:53 +010010#define LOG_CATEGORY UCLASS_SPI
11
Michael Kurz337ff2a2017-01-22 16:04:30 +010012#include <common.h>
Patrice Chotard7188fac2018-05-14 15:42:51 +020013#include <clk.h>
Simon Glassdfb7c082020-07-19 10:15:34 -060014#include <dm.h>
Simon Glass0f2af882020-05-10 11:40:05 -060015#include <log.h>
Patrice Chotardb38c8972018-05-14 15:42:56 +020016#include <reset.h>
Simon Glassdfb7c082020-07-19 10:15:34 -060017#include <spi.h>
Christophe Kerello6958a0d2019-04-05 11:46:50 +020018#include <spi-mem.h>
Patrice Chotard1e49e282021-01-20 14:42:02 +010019#include <watchdog.h>
Simon Glass9bc15642020-02-03 07:36:16 -070020#include <dm/device_compat.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060021#include <linux/bitops.h>
Simon Glassdbd79542020-05-10 11:40:11 -060022#include <linux/delay.h>
Christophe Kerello6958a0d2019-04-05 11:46:50 +020023#include <linux/iopoll.h>
Patrice Chotardb8954782018-05-14 15:42:55 +020024#include <linux/ioport.h>
Simon Glassbdd5f812023-09-14 18:21:46 -060025#include <linux/printk.h>
Christophe Kerello6958a0d2019-04-05 11:46:50 +020026#include <linux/sizes.h>
Michael Kurz337ff2a2017-01-22 16:04:30 +010027
28struct stm32_qspi_regs {
29 u32 cr; /* 0x00 */
30 u32 dcr; /* 0x04 */
31 u32 sr; /* 0x08 */
32 u32 fcr; /* 0x0C */
33 u32 dlr; /* 0x10 */
34 u32 ccr; /* 0x14 */
35 u32 ar; /* 0x18 */
36 u32 abr; /* 0x1C */
37 u32 dr; /* 0x20 */
38 u32 psmkr; /* 0x24 */
39 u32 psmar; /* 0x28 */
40 u32 pir; /* 0x2C */
41 u32 lptr; /* 0x30 */
42};
43
44/*
45 * QUADSPI control register
46 */
47#define STM32_QSPI_CR_EN BIT(0)
48#define STM32_QSPI_CR_ABORT BIT(1)
49#define STM32_QSPI_CR_DMAEN BIT(2)
50#define STM32_QSPI_CR_TCEN BIT(3)
51#define STM32_QSPI_CR_SSHIFT BIT(4)
52#define STM32_QSPI_CR_DFM BIT(6)
53#define STM32_QSPI_CR_FSEL BIT(7)
Christophe Kerello6958a0d2019-04-05 11:46:50 +020054#define STM32_QSPI_CR_FTHRES_SHIFT 8
Michael Kurz337ff2a2017-01-22 16:04:30 +010055#define STM32_QSPI_CR_TEIE BIT(16)
56#define STM32_QSPI_CR_TCIE BIT(17)
57#define STM32_QSPI_CR_FTIE BIT(18)
58#define STM32_QSPI_CR_SMIE BIT(19)
59#define STM32_QSPI_CR_TOIE BIT(20)
60#define STM32_QSPI_CR_APMS BIT(22)
61#define STM32_QSPI_CR_PMM BIT(23)
62#define STM32_QSPI_CR_PRESCALER_MASK GENMASK(7, 0)
Christophe Kerello6958a0d2019-04-05 11:46:50 +020063#define STM32_QSPI_CR_PRESCALER_SHIFT 24
Michael Kurz337ff2a2017-01-22 16:04:30 +010064
65/*
66 * QUADSPI device configuration register
67 */
68#define STM32_QSPI_DCR_CKMODE BIT(0)
69#define STM32_QSPI_DCR_CSHT_MASK GENMASK(2, 0)
Christophe Kerello6958a0d2019-04-05 11:46:50 +020070#define STM32_QSPI_DCR_CSHT_SHIFT 8
Michael Kurz337ff2a2017-01-22 16:04:30 +010071#define STM32_QSPI_DCR_FSIZE_MASK GENMASK(4, 0)
Christophe Kerello6958a0d2019-04-05 11:46:50 +020072#define STM32_QSPI_DCR_FSIZE_SHIFT 16
Michael Kurz337ff2a2017-01-22 16:04:30 +010073
74/*
75 * QUADSPI status register
76 */
77#define STM32_QSPI_SR_TEF BIT(0)
78#define STM32_QSPI_SR_TCF BIT(1)
79#define STM32_QSPI_SR_FTF BIT(2)
80#define STM32_QSPI_SR_SMF BIT(3)
81#define STM32_QSPI_SR_TOF BIT(4)
82#define STM32_QSPI_SR_BUSY BIT(5)
Michael Kurz337ff2a2017-01-22 16:04:30 +010083
84/*
85 * QUADSPI flag clear register
86 */
87#define STM32_QSPI_FCR_CTEF BIT(0)
88#define STM32_QSPI_FCR_CTCF BIT(1)
89#define STM32_QSPI_FCR_CSMF BIT(3)
90#define STM32_QSPI_FCR_CTOF BIT(4)
91
92/*
93 * QUADSPI communication configuration register
94 */
95#define STM32_QSPI_CCR_DDRM BIT(31)
96#define STM32_QSPI_CCR_DHHC BIT(30)
97#define STM32_QSPI_CCR_SIOO BIT(28)
Christophe Kerello6958a0d2019-04-05 11:46:50 +020098#define STM32_QSPI_CCR_FMODE_SHIFT 26
99#define STM32_QSPI_CCR_DMODE_SHIFT 24
100#define STM32_QSPI_CCR_DCYC_SHIFT 18
101#define STM32_QSPI_CCR_ABSIZE_SHIFT 16
102#define STM32_QSPI_CCR_ABMODE_SHIFT 14
103#define STM32_QSPI_CCR_ADSIZE_SHIFT 12
104#define STM32_QSPI_CCR_ADMODE_SHIFT 10
105#define STM32_QSPI_CCR_IMODE_SHIFT 8
Michael Kurz337ff2a2017-01-22 16:04:30 +0100106
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200107#define STM32_QSPI_CCR_IND_WRITE 0
108#define STM32_QSPI_CCR_IND_READ 1
109#define STM32_QSPI_CCR_MEM_MAP 3
Michael Kurz337ff2a2017-01-22 16:04:30 +0100110
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200111#define STM32_QSPI_MAX_MMAP_SZ SZ_256M
112#define STM32_QSPI_MAX_CHIP 2
Michael Kurz337ff2a2017-01-22 16:04:30 +0100113
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200114#define STM32_QSPI_FIFO_TIMEOUT_US 30000
115#define STM32_QSPI_CMD_TIMEOUT_US 1000000
116#define STM32_BUSY_TIMEOUT_US 100000
117#define STM32_ABT_TIMEOUT_US 100000
Michael Kurz337ff2a2017-01-22 16:04:30 +0100118
Michael Kurz337ff2a2017-01-22 16:04:30 +0100119struct stm32_qspi_priv {
120 struct stm32_qspi_regs *regs;
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200121 void __iomem *mm_base;
122 resource_size_t mm_size;
Patrice Chotardb8442fb2017-07-18 09:29:09 +0200123 ulong clock_rate;
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200124 int cs_used;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100125};
126
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200127static int _stm32_qspi_wait_for_not_busy(struct stm32_qspi_priv *priv)
Michael Kurz337ff2a2017-01-22 16:04:30 +0100128{
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200129 u32 sr;
130 int ret;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100131
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200132 ret = readl_poll_timeout(&priv->regs->sr, sr,
133 !(sr & STM32_QSPI_SR_BUSY),
134 STM32_BUSY_TIMEOUT_US);
135 if (ret)
Patrick Delaunay72e220c2020-11-06 19:01:53 +0100136 log_err("busy timeout (stat:%#x)\n", sr);
Michael Kurz337ff2a2017-01-22 16:04:30 +0100137
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200138 return ret;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100139}
140
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200141static int _stm32_qspi_wait_cmd(struct stm32_qspi_priv *priv,
142 const struct spi_mem_op *op)
Michael Kurz337ff2a2017-01-22 16:04:30 +0100143{
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200144 u32 sr;
Daniil Stas485b4b12021-05-23 22:24:49 +0000145 int ret = 0;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100146
Patrice Chotardf9c8b7b2022-05-12 09:17:37 +0200147 ret = readl_poll_timeout(&priv->regs->sr, sr,
148 sr & STM32_QSPI_SR_TCF,
149 STM32_QSPI_CMD_TIMEOUT_US);
150 if (ret) {
151 log_err("cmd timeout (stat:%#x)\n", sr);
152 } else if (readl(&priv->regs->sr) & STM32_QSPI_SR_TEF) {
153 log_err("transfer error (stat:%#x)\n", sr);
154 ret = -EIO;
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200155 }
156
Patrice Chotardf9c8b7b2022-05-12 09:17:37 +0200157 /* clear flags */
158 writel(STM32_QSPI_FCR_CTCF | STM32_QSPI_FCR_CTEF, &priv->regs->fcr);
159
Daniil Stas485b4b12021-05-23 22:24:49 +0000160 if (!ret)
161 ret = _stm32_qspi_wait_for_not_busy(priv);
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200162
163 return ret;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100164}
165
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200166static void _stm32_qspi_read_fifo(u8 *val, void __iomem *addr)
Michael Kurz337ff2a2017-01-22 16:04:30 +0100167{
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200168 *val = readb(addr);
Stefan Roese80877fa2022-09-02 14:10:46 +0200169 schedule();
Michael Kurz337ff2a2017-01-22 16:04:30 +0100170}
171
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200172static void _stm32_qspi_write_fifo(u8 *val, void __iomem *addr)
Christophe Kerelloe3b34cb2018-05-14 15:42:54 +0200173{
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200174 writeb(*val, addr);
Christophe Kerelloe3b34cb2018-05-14 15:42:54 +0200175}
176
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200177static int _stm32_qspi_poll(struct stm32_qspi_priv *priv,
178 const struct spi_mem_op *op)
Michael Kurz337ff2a2017-01-22 16:04:30 +0100179{
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200180 void (*fifo)(u8 *val, void __iomem *addr);
181 u32 len = op->data.nbytes, sr;
182 u8 *buf;
183 int ret;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100184
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200185 if (op->data.dir == SPI_MEM_DATA_IN) {
186 fifo = _stm32_qspi_read_fifo;
187 buf = op->data.buf.in;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100188
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200189 } else {
190 fifo = _stm32_qspi_write_fifo;
191 buf = (u8 *)op->data.buf.out;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100192 }
193
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200194 while (len--) {
195 ret = readl_poll_timeout(&priv->regs->sr, sr,
196 sr & STM32_QSPI_SR_FTF,
197 STM32_QSPI_FIFO_TIMEOUT_US);
198 if (ret) {
Patrick Delaunay72e220c2020-11-06 19:01:53 +0100199 log_err("fifo timeout (len:%d stat:%#x)\n", len, sr);
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200200 return ret;
201 }
Michael Kurz337ff2a2017-01-22 16:04:30 +0100202
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200203 fifo(buf++, &priv->regs->dr);
Michael Kurz337ff2a2017-01-22 16:04:30 +0100204 }
Christophe Kerelloa1a725a2018-07-09 15:32:37 +0200205
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200206 return 0;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100207}
208
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200209static int stm32_qspi_mm(struct stm32_qspi_priv *priv,
210 const struct spi_mem_op *op)
Michael Kurz337ff2a2017-01-22 16:04:30 +0100211{
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200212 memcpy_fromio(op->data.buf.in, priv->mm_base + op->addr.val,
213 op->data.nbytes);
Michael Kurz337ff2a2017-01-22 16:04:30 +0100214
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200215 return 0;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100216}
217
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200218static int _stm32_qspi_tx(struct stm32_qspi_priv *priv,
219 const struct spi_mem_op *op,
220 u8 mode)
Michael Kurz337ff2a2017-01-22 16:04:30 +0100221{
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200222 if (!op->data.nbytes)
223 return 0;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100224
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200225 if (mode == STM32_QSPI_CCR_MEM_MAP)
226 return stm32_qspi_mm(priv, op);
227
228 return _stm32_qspi_poll(priv, op);
Michael Kurz337ff2a2017-01-22 16:04:30 +0100229}
230
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200231static int _stm32_qspi_get_mode(u8 buswidth)
Michael Kurz337ff2a2017-01-22 16:04:30 +0100232{
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200233 if (buswidth == 4)
234 return 3;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100235
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200236 return buswidth;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100237}
238
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200239static int stm32_qspi_exec_op(struct spi_slave *slave,
240 const struct spi_mem_op *op)
Michael Kurz337ff2a2017-01-22 16:04:30 +0100241{
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200242 struct stm32_qspi_priv *priv = dev_get_priv(slave->dev->parent);
243 u32 cr, ccr, addr_max;
244 u8 mode = STM32_QSPI_CCR_IND_WRITE;
245 int timeout, ret;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100246
Patrick Delaunay72e220c2020-11-06 19:01:53 +0100247 dev_dbg(slave->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
248 op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
249 op->dummy.buswidth, op->data.buswidth,
250 op->addr.val, op->data.nbytes);
Michael Kurz337ff2a2017-01-22 16:04:30 +0100251
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200252 addr_max = op->addr.val + op->data.nbytes + 1;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100253
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200254 if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes) {
255 if (addr_max < priv->mm_size && op->addr.buswidth)
256 mode = STM32_QSPI_CCR_MEM_MAP;
257 else
258 mode = STM32_QSPI_CCR_IND_READ;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100259 }
260
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200261 if (op->data.nbytes)
262 writel(op->data.nbytes - 1, &priv->regs->dlr);
Michael Kurz337ff2a2017-01-22 16:04:30 +0100263
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200264 ccr = (mode << STM32_QSPI_CCR_FMODE_SHIFT);
265 ccr |= op->cmd.opcode;
266 ccr |= (_stm32_qspi_get_mode(op->cmd.buswidth)
267 << STM32_QSPI_CCR_IMODE_SHIFT);
Michael Kurz337ff2a2017-01-22 16:04:30 +0100268
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200269 if (op->addr.nbytes) {
270 ccr |= ((op->addr.nbytes - 1) << STM32_QSPI_CCR_ADSIZE_SHIFT);
271 ccr |= (_stm32_qspi_get_mode(op->addr.buswidth)
272 << STM32_QSPI_CCR_ADMODE_SHIFT);
273 }
Michael Kurz337ff2a2017-01-22 16:04:30 +0100274
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200275 if (op->dummy.buswidth && op->dummy.nbytes)
276 ccr |= (op->dummy.nbytes * 8 / op->dummy.buswidth
277 << STM32_QSPI_CCR_DCYC_SHIFT);
Michael Kurz337ff2a2017-01-22 16:04:30 +0100278
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200279 if (op->data.nbytes)
280 ccr |= (_stm32_qspi_get_mode(op->data.buswidth)
281 << STM32_QSPI_CCR_DMODE_SHIFT);
Michael Kurz337ff2a2017-01-22 16:04:30 +0100282
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200283 writel(ccr, &priv->regs->ccr);
Michael Kurz337ff2a2017-01-22 16:04:30 +0100284
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200285 if (op->addr.nbytes && mode != STM32_QSPI_CCR_MEM_MAP)
286 writel(op->addr.val, &priv->regs->ar);
Michael Kurz337ff2a2017-01-22 16:04:30 +0100287
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200288 ret = _stm32_qspi_tx(priv, op, mode);
289 /*
290 * Abort in:
291 * -error case
292 * -read memory map: prefetching must be stopped if we read the last
293 * byte of device (device size - fifo size). like device size is not
294 * knows, the prefetching is always stop.
295 */
296 if (ret || mode == STM32_QSPI_CCR_MEM_MAP)
297 goto abort;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100298
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200299 /* Wait end of tx in indirect mode */
300 ret = _stm32_qspi_wait_cmd(priv, op);
301 if (ret)
302 goto abort;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100303
304 return 0;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100305
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200306abort:
307 setbits_le32(&priv->regs->cr, STM32_QSPI_CR_ABORT);
Michael Kurz337ff2a2017-01-22 16:04:30 +0100308
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200309 /* Wait clear of abort bit by hw */
310 timeout = readl_poll_timeout(&priv->regs->cr, cr,
311 !(cr & STM32_QSPI_CR_ABORT),
312 STM32_ABT_TIMEOUT_US);
Michael Kurz337ff2a2017-01-22 16:04:30 +0100313
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200314 writel(STM32_QSPI_FCR_CTCF, &priv->regs->fcr);
Michael Kurz337ff2a2017-01-22 16:04:30 +0100315
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200316 if (ret || timeout)
Patrick Delaunay72e220c2020-11-06 19:01:53 +0100317 dev_err(slave->dev, "ret:%d abort timeout:%d\n", ret, timeout);
Michael Kurz337ff2a2017-01-22 16:04:30 +0100318
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200319 return ret;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100320}
321
322static int stm32_qspi_probe(struct udevice *bus)
323{
Michael Kurz337ff2a2017-01-22 16:04:30 +0100324 struct stm32_qspi_priv *priv = dev_get_priv(bus);
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200325 struct resource res;
Patrice Chotard8e0497a2018-05-14 15:42:49 +0200326 struct clk clk;
Patrice Chotardb38c8972018-05-14 15:42:56 +0200327 struct reset_ctl reset_ctl;
Patrice Chotard8e0497a2018-05-14 15:42:49 +0200328 int ret;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100329
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200330 ret = dev_read_resource_byname(bus, "qspi", &res);
331 if (ret) {
332 dev_err(bus, "can't get regs base addresses(ret = %d)!\n", ret);
333 return ret;
334 }
Michael Kurz337ff2a2017-01-22 16:04:30 +0100335
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200336 priv->regs = (struct stm32_qspi_regs *)res.start;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100337
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200338 ret = dev_read_resource_byname(bus, "qspi_mm", &res);
339 if (ret) {
340 dev_err(bus, "can't get mmap base address(ret = %d)!\n", ret);
341 return ret;
342 }
343
344 priv->mm_base = (void __iomem *)res.start;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100345
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200346 priv->mm_size = resource_size(&res);
347 if (priv->mm_size > STM32_QSPI_MAX_MMAP_SZ)
348 return -EINVAL;
349
Patrick Delaunay72e220c2020-11-06 19:01:53 +0100350 dev_dbg(bus, "regs=<0x%p> mapped=<0x%p> mapped_size=<0x%lx>\n",
351 priv->regs, priv->mm_base, priv->mm_size);
Michael Kurz337ff2a2017-01-22 16:04:30 +0100352
Vikas Manocha9f28b5c2017-04-10 15:02:50 -0700353 ret = clk_get_by_index(bus, 0, &clk);
354 if (ret < 0)
355 return ret;
356
357 ret = clk_enable(&clk);
Vikas Manocha9f28b5c2017-04-10 15:02:50 -0700358 if (ret) {
359 dev_err(bus, "failed to enable clock\n");
360 return ret;
361 }
Patrice Chotardb8442fb2017-07-18 09:29:09 +0200362
363 priv->clock_rate = clk_get_rate(&clk);
Patrick Delaunay02502072019-06-21 15:26:55 +0200364 if (!priv->clock_rate) {
Patrice Chotardb8442fb2017-07-18 09:29:09 +0200365 clk_disable(&clk);
Patrick Delaunay02502072019-06-21 15:26:55 +0200366 return -EINVAL;
Patrice Chotardb8442fb2017-07-18 09:29:09 +0200367 }
368
Patrice Chotardb38c8972018-05-14 15:42:56 +0200369 ret = reset_get_by_index(bus, 0, &reset_ctl);
370 if (ret) {
371 if (ret != -ENOENT) {
372 dev_err(bus, "failed to get reset\n");
373 clk_disable(&clk);
374 return ret;
375 }
376 } else {
377 /* Reset QSPI controller */
378 reset_assert(&reset_ctl);
379 udelay(2);
380 reset_deassert(&reset_ctl);
381 }
Michael Kurz337ff2a2017-01-22 16:04:30 +0100382
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200383 priv->cs_used = -1;
384
Michael Kurz337ff2a2017-01-22 16:04:30 +0100385 setbits_le32(&priv->regs->cr, STM32_QSPI_CR_SSHIFT);
386
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200387 /* Set dcr fsize to max address */
388 setbits_le32(&priv->regs->dcr,
389 STM32_QSPI_DCR_FSIZE_MASK << STM32_QSPI_DCR_FSIZE_SHIFT);
Michael Kurz337ff2a2017-01-22 16:04:30 +0100390
Michael Kurz337ff2a2017-01-22 16:04:30 +0100391 return 0;
392}
393
394static int stm32_qspi_claim_bus(struct udevice *dev)
395{
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200396 struct stm32_qspi_priv *priv = dev_get_priv(dev->parent);
Simon Glassb75b15b2020-12-03 16:55:23 -0700397 struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
Patrick Delaunay02502072019-06-21 15:26:55 +0200398 int slave_cs = slave_plat->cs;
Christophe Kerelloe3b34cb2018-05-14 15:42:54 +0200399
Patrick Delaunay02502072019-06-21 15:26:55 +0200400 if (slave_cs >= STM32_QSPI_MAX_CHIP)
Christophe Kerelloe3b34cb2018-05-14 15:42:54 +0200401 return -ENODEV;
402
Patrick Delaunay02502072019-06-21 15:26:55 +0200403 if (priv->cs_used != slave_cs) {
Patrick Delaunay02502072019-06-21 15:26:55 +0200404 priv->cs_used = slave_cs;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100405
Patrice Chotarde1c85652023-04-03 08:04:43 +0200406 /* Set chip select */
407 clrsetbits_le32(&priv->regs->cr, STM32_QSPI_CR_FSEL,
408 priv->cs_used ? STM32_QSPI_CR_FSEL : 0);
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200409 }
Michael Kurz337ff2a2017-01-22 16:04:30 +0100410
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200411 setbits_le32(&priv->regs->cr, STM32_QSPI_CR_EN);
Michael Kurz337ff2a2017-01-22 16:04:30 +0100412
413 return 0;
414}
415
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200416static int stm32_qspi_release_bus(struct udevice *dev)
Michael Kurz337ff2a2017-01-22 16:04:30 +0100417{
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200418 struct stm32_qspi_priv *priv = dev_get_priv(dev->parent);
Michael Kurz337ff2a2017-01-22 16:04:30 +0100419
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200420 clrbits_le32(&priv->regs->cr, STM32_QSPI_CR_EN);
Michael Kurz337ff2a2017-01-22 16:04:30 +0100421
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200422 return 0;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100423}
424
425static int stm32_qspi_set_speed(struct udevice *bus, uint speed)
426{
Michael Kurz337ff2a2017-01-22 16:04:30 +0100427 struct stm32_qspi_priv *priv = dev_get_priv(bus);
Patrick Delaunay0a861152018-05-14 15:42:50 +0200428 u32 qspi_clk = priv->clock_rate;
429 u32 prescaler = 255;
430 u32 csht;
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200431 int ret;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100432
Michael Kurz337ff2a2017-01-22 16:04:30 +0100433 if (speed > 0) {
Patrick Delaunay02502072019-06-21 15:26:55 +0200434 prescaler = 0;
435 if (qspi_clk) {
436 prescaler = DIV_ROUND_UP(qspi_clk, speed) - 1;
437 if (prescaler > 255)
438 prescaler = 255;
439 }
Michael Kurz337ff2a2017-01-22 16:04:30 +0100440 }
441
Patrick Delaunay0a861152018-05-14 15:42:50 +0200442 csht = DIV_ROUND_UP((5 * qspi_clk) / (prescaler + 1), 100000000);
Michael Kurz337ff2a2017-01-22 16:04:30 +0100443 csht = (csht - 1) & STM32_QSPI_DCR_CSHT_MASK;
444
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200445 ret = _stm32_qspi_wait_for_not_busy(priv);
446 if (ret)
447 return ret;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100448
449 clrsetbits_le32(&priv->regs->cr,
450 STM32_QSPI_CR_PRESCALER_MASK <<
451 STM32_QSPI_CR_PRESCALER_SHIFT,
452 prescaler << STM32_QSPI_CR_PRESCALER_SHIFT);
453
Michael Kurz337ff2a2017-01-22 16:04:30 +0100454 clrsetbits_le32(&priv->regs->dcr,
455 STM32_QSPI_DCR_CSHT_MASK << STM32_QSPI_DCR_CSHT_SHIFT,
456 csht << STM32_QSPI_DCR_CSHT_SHIFT);
457
Patrick Delaunay72e220c2020-11-06 19:01:53 +0100458 dev_dbg(bus, "regs=%p, speed=%d\n", priv->regs,
459 (qspi_clk / (prescaler + 1)));
Michael Kurz337ff2a2017-01-22 16:04:30 +0100460
461 return 0;
462}
463
464static int stm32_qspi_set_mode(struct udevice *bus, uint mode)
465{
466 struct stm32_qspi_priv *priv = dev_get_priv(bus);
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200467 int ret;
Patrick Delaunay72e220c2020-11-06 19:01:53 +0100468 const char *str_rx, *str_tx;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100469
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200470 ret = _stm32_qspi_wait_for_not_busy(priv);
471 if (ret)
472 return ret;
Michael Kurz337ff2a2017-01-22 16:04:30 +0100473
474 if ((mode & SPI_CPHA) && (mode & SPI_CPOL))
475 setbits_le32(&priv->regs->dcr, STM32_QSPI_DCR_CKMODE);
476 else if (!(mode & SPI_CPHA) && !(mode & SPI_CPOL))
477 clrbits_le32(&priv->regs->dcr, STM32_QSPI_DCR_CKMODE);
478 else
479 return -ENODEV;
480
481 if (mode & SPI_CS_HIGH)
482 return -ENODEV;
483
Michael Kurz337ff2a2017-01-22 16:04:30 +0100484 if (mode & SPI_RX_QUAD)
Patrick Delaunay72e220c2020-11-06 19:01:53 +0100485 str_rx = "quad";
Michael Kurz337ff2a2017-01-22 16:04:30 +0100486 else if (mode & SPI_RX_DUAL)
Patrick Delaunay72e220c2020-11-06 19:01:53 +0100487 str_rx = "dual";
Michael Kurz337ff2a2017-01-22 16:04:30 +0100488 else
Patrick Delaunay72e220c2020-11-06 19:01:53 +0100489 str_rx = "single";
Michael Kurz337ff2a2017-01-22 16:04:30 +0100490
491 if (mode & SPI_TX_QUAD)
Patrick Delaunay72e220c2020-11-06 19:01:53 +0100492 str_tx = "quad";
Michael Kurz337ff2a2017-01-22 16:04:30 +0100493 else if (mode & SPI_TX_DUAL)
Patrick Delaunay72e220c2020-11-06 19:01:53 +0100494 str_tx = "dual";
Michael Kurz337ff2a2017-01-22 16:04:30 +0100495 else
Patrick Delaunay72e220c2020-11-06 19:01:53 +0100496 str_tx = "single";
497
498 dev_dbg(bus, "regs=%p, mode=%d rx: %s, tx: %s\n",
499 priv->regs, mode, str_rx, str_tx);
Michael Kurz337ff2a2017-01-22 16:04:30 +0100500
501 return 0;
502}
503
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200504static const struct spi_controller_mem_ops stm32_qspi_mem_ops = {
505 .exec_op = stm32_qspi_exec_op,
506};
507
Michael Kurz337ff2a2017-01-22 16:04:30 +0100508static const struct dm_spi_ops stm32_qspi_ops = {
509 .claim_bus = stm32_qspi_claim_bus,
510 .release_bus = stm32_qspi_release_bus,
Michael Kurz337ff2a2017-01-22 16:04:30 +0100511 .set_speed = stm32_qspi_set_speed,
512 .set_mode = stm32_qspi_set_mode,
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200513 .mem_ops = &stm32_qspi_mem_ops,
Michael Kurz337ff2a2017-01-22 16:04:30 +0100514};
515
516static const struct udevice_id stm32_qspi_ids[] = {
Christophe Kerello4c00d552018-05-14 15:42:53 +0200517 { .compatible = "st,stm32f469-qspi" },
Michael Kurz337ff2a2017-01-22 16:04:30 +0100518 { }
519};
520
521U_BOOT_DRIVER(stm32_qspi) = {
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200522 .name = "stm32_qspi",
523 .id = UCLASS_SPI,
Michael Kurz337ff2a2017-01-22 16:04:30 +0100524 .of_match = stm32_qspi_ids,
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200525 .ops = &stm32_qspi_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700526 .priv_auto = sizeof(struct stm32_qspi_priv),
Christophe Kerello6958a0d2019-04-05 11:46:50 +0200527 .probe = stm32_qspi_probe,
Michael Kurz337ff2a2017-01-22 16:04:30 +0100528};