blob: 93ab2b5635f3b14a9091e456f639923148091d62 [file] [log] [blame]
Stefan Roese1c60fe72014-11-07 12:37:49 +01001/*
2 * Copyright (C) 2012 Altera Corporation <www.altera.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * - Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * - Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * - Neither the name of the Altera Corporation nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL ALTERA CORPORATION BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
Simon Glass0f2af882020-05-10 11:40:05 -060028#include <log.h>
Stefan Roese1c60fe72014-11-07 12:37:49 +010029#include <asm/io.h>
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +053030#include <dma.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060031#include <linux/bitops.h>
Simon Glassdbd79542020-05-10 11:40:11 -060032#include <linux/delay.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090033#include <linux/errno.h>
Marek Vasutdae51dd2016-04-27 23:18:55 +020034#include <wait_bit.h>
Vignesh R4ca60192016-07-06 10:20:56 +053035#include <spi.h>
Vignesh Raghavendra27516a32020-01-27 10:36:39 +053036#include <spi-mem.h>
Vignesh Rad4bd8a2018-01-24 10:44:07 +053037#include <malloc.h>
Stefan Roese1c60fe72014-11-07 12:37:49 +010038#include "cadence_qspi.h"
39
T Karthik Reddy3b49fbf2022-05-12 04:05:34 -060040__weak void cadence_qspi_apb_enable_linear_mode(bool enable)
41{
42 return;
43}
44
Stefan Roese1c60fe72014-11-07 12:37:49 +010045void cadence_qspi_apb_controller_enable(void *reg_base)
46{
47 unsigned int reg;
48 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthy3a5ae122016-11-29 12:58:30 +000049 reg |= CQSPI_REG_CONFIG_ENABLE;
Stefan Roese1c60fe72014-11-07 12:37:49 +010050 writel(reg, reg_base + CQSPI_REG_CONFIG);
Stefan Roese1c60fe72014-11-07 12:37:49 +010051}
52
53void cadence_qspi_apb_controller_disable(void *reg_base)
54{
55 unsigned int reg;
56 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthy3a5ae122016-11-29 12:58:30 +000057 reg &= ~CQSPI_REG_CONFIG_ENABLE;
Stefan Roese1c60fe72014-11-07 12:37:49 +010058 writel(reg, reg_base + CQSPI_REG_CONFIG);
Stefan Roese1c60fe72014-11-07 12:37:49 +010059}
60
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +053061void cadence_qspi_apb_dac_mode_enable(void *reg_base)
62{
63 unsigned int reg;
64
65 reg = readl(reg_base + CQSPI_REG_CONFIG);
66 reg |= CQSPI_REG_CONFIG_DIRECT;
67 writel(reg, reg_base + CQSPI_REG_CONFIG);
68}
69
Pratyush Yadave1814ad2021-06-26 00:47:09 +053070static unsigned int cadence_qspi_calc_dummy(const struct spi_mem_op *op,
71 bool dtr)
72{
73 unsigned int dummy_clk;
74
Marek Vasut545be192021-09-14 05:21:48 +020075 if (!op->dummy.nbytes || !op->dummy.buswidth)
76 return 0;
77
Pratyush Yadave1814ad2021-06-26 00:47:09 +053078 dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth);
79 if (dtr)
80 dummy_clk /= 2;
81
82 return dummy_clk;
83}
84
Ashok Reddy Somaf5817652022-08-24 05:38:47 -060085static u32 cadence_qspi_calc_rdreg(struct cadence_spi_priv *priv)
Pratyush Yadave1814ad2021-06-26 00:47:09 +053086{
87 u32 rdreg = 0;
88
Ashok Reddy Somaf5817652022-08-24 05:38:47 -060089 rdreg |= priv->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
90 rdreg |= priv->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
91 rdreg |= priv->data_width << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
Pratyush Yadave1814ad2021-06-26 00:47:09 +053092
93 return rdreg;
94}
95
96static int cadence_qspi_buswidth_to_inst_type(u8 buswidth)
97{
98 switch (buswidth) {
99 case 0:
100 case 1:
101 return CQSPI_INST_TYPE_SINGLE;
102
103 case 2:
104 return CQSPI_INST_TYPE_DUAL;
105
106 case 4:
107 return CQSPI_INST_TYPE_QUAD;
108
109 case 8:
110 return CQSPI_INST_TYPE_OCTAL;
111
112 default:
113 return -ENOTSUPP;
114 }
115}
116
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600117static int cadence_qspi_set_protocol(struct cadence_spi_priv *priv,
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530118 const struct spi_mem_op *op)
119{
120 int ret;
121
Apurva Nandanb88f55c2023-04-12 16:28:54 +0530122 /*
123 * For an op to be DTR, cmd phase along with every other non-empty
124 * phase should have dtr field set to 1. If an op phase has zero
125 * nbytes, ignore its dtr field; otherwise, check its dtr field.
126 * Also, dummy checks not performed here Since supports_op()
127 * already checks that all or none of the fields are DTR.
128 */
129 priv->dtr = op->cmd.dtr &&
130 (!op->addr.nbytes || op->addr.dtr) &&
131 (!op->data.nbytes || op->data.dtr);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530132
133 ret = cadence_qspi_buswidth_to_inst_type(op->cmd.buswidth);
134 if (ret < 0)
135 return ret;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600136 priv->inst_width = ret;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530137
138 ret = cadence_qspi_buswidth_to_inst_type(op->addr.buswidth);
139 if (ret < 0)
140 return ret;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600141 priv->addr_width = ret;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530142
143 ret = cadence_qspi_buswidth_to_inst_type(op->data.buswidth);
144 if (ret < 0)
145 return ret;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600146 priv->data_width = ret;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530147
148 return 0;
149}
150
Stefan Roese1c60fe72014-11-07 12:37:49 +0100151/* Return 1 if idle, otherwise return 0 (busy). */
152static unsigned int cadence_qspi_wait_idle(void *reg_base)
153{
154 unsigned int start, count = 0;
155 /* timeout in unit of ms */
156 unsigned int timeout = 5000;
157
158 start = get_timer(0);
159 for ( ; get_timer(start) < timeout ; ) {
160 if (CQSPI_REG_IS_IDLE(reg_base))
161 count++;
162 else
163 count = 0;
164 /*
165 * Ensure the QSPI controller is in true idle state after
166 * reading back the same idle status consecutively
167 */
168 if (count >= CQSPI_POLL_IDLE_RETRY)
169 return 1;
170 }
171
172 /* Timeout, still in busy mode. */
Jan Kiszkabc6c7532023-10-30 17:20:29 +0100173 printf("QSPI: QSPI is still busy after poll for %d ms.\n", timeout);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100174 return 0;
175}
176
177void cadence_qspi_apb_readdata_capture(void *reg_base,
178 unsigned int bypass, unsigned int delay)
179{
180 unsigned int reg;
181 cadence_qspi_apb_controller_disable(reg_base);
182
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000183 reg = readl(reg_base + CQSPI_REG_RD_DATA_CAPTURE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100184
185 if (bypass)
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000186 reg |= CQSPI_REG_RD_DATA_CAPTURE_BYPASS;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100187 else
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000188 reg &= ~CQSPI_REG_RD_DATA_CAPTURE_BYPASS;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100189
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000190 reg &= ~(CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK
191 << CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100192
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000193 reg |= (delay & CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK)
194 << CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100195
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000196 writel(reg, reg_base + CQSPI_REG_RD_DATA_CAPTURE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100197
198 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100199}
200
201void cadence_qspi_apb_config_baudrate_div(void *reg_base,
202 unsigned int ref_clk_hz, unsigned int sclk_hz)
203{
204 unsigned int reg;
205 unsigned int div;
206
207 cadence_qspi_apb_controller_disable(reg_base);
208 reg = readl(reg_base + CQSPI_REG_CONFIG);
209 reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
210
Phil Edworthy8f24a442016-11-29 12:58:27 +0000211 /*
212 * The baud_div field in the config reg is 4 bits, and the ref clock is
213 * divided by 2 * (baud_div + 1). Round up the divider to ensure the
214 * SPI clock rate is less than or equal to the requested clock rate.
215 */
216 div = DIV_ROUND_UP(ref_clk_hz, sclk_hz * 2) - 1;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100217
Chin Liang See91b2c192016-08-07 22:50:40 +0800218 /* ensure the baud rate doesn't exceed the max value */
219 if (div > CQSPI_REG_CONFIG_BAUD_MASK)
220 div = CQSPI_REG_CONFIG_BAUD_MASK;
221
Phil Edworthy67824ad2016-11-29 12:58:28 +0000222 debug("%s: ref_clk %dHz sclk %dHz Div 0x%x, actual %dHz\n", __func__,
223 ref_clk_hz, sclk_hz, div, ref_clk_hz / (2 * (div + 1)));
224
Chin Liang See91b2c192016-08-07 22:50:40 +0800225 reg |= (div << CQSPI_REG_CONFIG_BAUD_LSB);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100226 writel(reg, reg_base + CQSPI_REG_CONFIG);
227
228 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100229}
230
Phil Edworthyeef2edc2016-11-29 12:58:31 +0000231void cadence_qspi_apb_set_clk_mode(void *reg_base, uint mode)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100232{
233 unsigned int reg;
234
235 cadence_qspi_apb_controller_disable(reg_base);
236 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000237 reg &= ~(CQSPI_REG_CONFIG_CLK_POL | CQSPI_REG_CONFIG_CLK_PHA);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100238
Phil Edworthyeef2edc2016-11-29 12:58:31 +0000239 if (mode & SPI_CPOL)
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000240 reg |= CQSPI_REG_CONFIG_CLK_POL;
Phil Edworthyeef2edc2016-11-29 12:58:31 +0000241 if (mode & SPI_CPHA)
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000242 reg |= CQSPI_REG_CONFIG_CLK_PHA;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100243
244 writel(reg, reg_base + CQSPI_REG_CONFIG);
245
246 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100247}
248
249void cadence_qspi_apb_chipselect(void *reg_base,
250 unsigned int chip_select, unsigned int decoder_enable)
251{
252 unsigned int reg;
253
254 cadence_qspi_apb_controller_disable(reg_base);
255
256 debug("%s : chipselect %d decode %d\n", __func__, chip_select,
257 decoder_enable);
258
259 reg = readl(reg_base + CQSPI_REG_CONFIG);
260 /* docoder */
261 if (decoder_enable) {
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000262 reg |= CQSPI_REG_CONFIG_DECODE;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100263 } else {
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000264 reg &= ~CQSPI_REG_CONFIG_DECODE;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100265 /* Convert CS if without decoder.
266 * CS0 to 4b'1110
267 * CS1 to 4b'1101
268 * CS2 to 4b'1011
269 * CS3 to 4b'0111
270 */
271 chip_select = 0xF & ~(1 << chip_select);
272 }
273
274 reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
275 << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
276 reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
277 << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
278 writel(reg, reg_base + CQSPI_REG_CONFIG);
279
280 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100281}
282
283void cadence_qspi_apb_delay(void *reg_base,
284 unsigned int ref_clk, unsigned int sclk_hz,
285 unsigned int tshsl_ns, unsigned int tsd2d_ns,
286 unsigned int tchsh_ns, unsigned int tslch_ns)
287{
288 unsigned int ref_clk_ns;
289 unsigned int sclk_ns;
290 unsigned int tshsl, tchsh, tslch, tsd2d;
291 unsigned int reg;
292
293 cadence_qspi_apb_controller_disable(reg_base);
294
295 /* Convert to ns. */
Phil Edworthy1fdd9232016-11-29 12:58:33 +0000296 ref_clk_ns = DIV_ROUND_UP(1000000000, ref_clk);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100297
298 /* Convert to ns. */
Phil Edworthy1fdd9232016-11-29 12:58:33 +0000299 sclk_ns = DIV_ROUND_UP(1000000000, sclk_hz);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100300
Phil Edworthy1fdd9232016-11-29 12:58:33 +0000301 /* The controller adds additional delay to that programmed in the reg */
302 if (tshsl_ns >= sclk_ns + ref_clk_ns)
303 tshsl_ns -= sclk_ns + ref_clk_ns;
304 if (tchsh_ns >= sclk_ns + 3 * ref_clk_ns)
305 tchsh_ns -= sclk_ns + 3 * ref_clk_ns;
306 tshsl = DIV_ROUND_UP(tshsl_ns, ref_clk_ns);
307 tchsh = DIV_ROUND_UP(tchsh_ns, ref_clk_ns);
308 tslch = DIV_ROUND_UP(tslch_ns, ref_clk_ns);
309 tsd2d = DIV_ROUND_UP(tsd2d_ns, ref_clk_ns);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100310
311 reg = ((tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
312 << CQSPI_REG_DELAY_TSHSL_LSB);
313 reg |= ((tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
314 << CQSPI_REG_DELAY_TCHSH_LSB);
315 reg |= ((tslch & CQSPI_REG_DELAY_TSLCH_MASK)
316 << CQSPI_REG_DELAY_TSLCH_LSB);
317 reg |= ((tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
318 << CQSPI_REG_DELAY_TSD2D_LSB);
319 writel(reg, reg_base + CQSPI_REG_DELAY);
320
321 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100322}
323
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600324void cadence_qspi_apb_controller_init(struct cadence_spi_priv *priv)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100325{
326 unsigned reg;
327
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600328 cadence_qspi_apb_controller_disable(priv->regbase);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100329
330 /* Configure the device size and address bytes */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600331 reg = readl(priv->regbase + CQSPI_REG_SIZE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100332 /* Clear the previous value */
333 reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
334 reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600335 reg |= (priv->page_size << CQSPI_REG_SIZE_PAGE_LSB);
336 reg |= (priv->block_size << CQSPI_REG_SIZE_BLOCK_LSB);
337 writel(reg, priv->regbase + CQSPI_REG_SIZE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100338
339 /* Configure the remap address register, no remap */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600340 writel(0, priv->regbase + CQSPI_REG_REMAP);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100341
Vikas Manocha215cea02015-07-02 18:29:43 -0700342 /* Indirect mode configurations */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600343 writel(priv->fifo_depth / 2, priv->regbase + CQSPI_REG_SRAMPARTITION);
Vikas Manocha215cea02015-07-02 18:29:43 -0700344
Stefan Roese1c60fe72014-11-07 12:37:49 +0100345 /* Disable all interrupts */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600346 writel(0, priv->regbase + CQSPI_REG_IRQMASK);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100347
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600348 cadence_qspi_apb_controller_enable(priv->regbase);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100349}
350
T Karthik Reddy73701e72022-05-12 04:05:32 -0600351int cadence_qspi_apb_exec_flash_cmd(void *reg_base, unsigned int reg)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100352{
353 unsigned int retry = CQSPI_REG_RETRY;
354
355 /* Write the CMDCTRL without start execution. */
356 writel(reg, reg_base + CQSPI_REG_CMDCTRL);
357 /* Start execute */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000358 reg |= CQSPI_REG_CMDCTRL_EXECUTE;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100359 writel(reg, reg_base + CQSPI_REG_CMDCTRL);
360
361 while (retry--) {
362 reg = readl(reg_base + CQSPI_REG_CMDCTRL);
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000363 if ((reg & CQSPI_REG_CMDCTRL_INPROGRESS) == 0)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100364 break;
365 udelay(1);
366 }
367
368 if (!retry) {
369 printf("QSPI: flash command execution timeout\n");
370 return -EIO;
371 }
372
373 /* Polling QSPI idle status. */
374 if (!cadence_qspi_wait_idle(reg_base))
375 return -EIO;
376
Dhruva Gole94fcaf02023-04-12 16:28:56 +0530377 /* Flush the CMDCTRL reg after the execution */
378 writel(0, reg_base + CQSPI_REG_CMDCTRL);
379
Stefan Roese1c60fe72014-11-07 12:37:49 +0100380 return 0;
381}
382
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600383static int cadence_qspi_setup_opcode_ext(struct cadence_spi_priv *priv,
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530384 const struct spi_mem_op *op,
385 unsigned int shift)
386{
387 unsigned int reg;
388 u8 ext;
389
390 if (op->cmd.nbytes != 2)
391 return -EINVAL;
392
393 /* Opcode extension is the LSB. */
394 ext = op->cmd.opcode & 0xff;
395
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600396 reg = readl(priv->regbase + CQSPI_REG_OP_EXT_LOWER);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530397 reg &= ~(0xff << shift);
398 reg |= ext << shift;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600399 writel(reg, priv->regbase + CQSPI_REG_OP_EXT_LOWER);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530400
401 return 0;
402}
403
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600404static int cadence_qspi_enable_dtr(struct cadence_spi_priv *priv,
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530405 const struct spi_mem_op *op,
406 unsigned int shift,
407 bool enable)
408{
409 unsigned int reg;
410 int ret;
411
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600412 reg = readl(priv->regbase + CQSPI_REG_CONFIG);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530413
414 if (enable) {
415 reg |= CQSPI_REG_CONFIG_DTR_PROTO;
416 reg |= CQSPI_REG_CONFIG_DUAL_OPCODE;
417
418 /* Set up command opcode extension. */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600419 ret = cadence_qspi_setup_opcode_ext(priv, op, shift);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530420 if (ret)
421 return ret;
422 } else {
423 reg &= ~CQSPI_REG_CONFIG_DTR_PROTO;
424 reg &= ~CQSPI_REG_CONFIG_DUAL_OPCODE;
425 }
426
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600427 writel(reg, priv->regbase + CQSPI_REG_CONFIG);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530428
429 return 0;
430}
431
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600432int cadence_qspi_apb_command_read_setup(struct cadence_spi_priv *priv,
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530433 const struct spi_mem_op *op)
434{
435 int ret;
436 unsigned int reg;
437
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600438 ret = cadence_qspi_set_protocol(priv, op);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530439 if (ret)
440 return ret;
441
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600442 ret = cadence_qspi_enable_dtr(priv, op, CQSPI_REG_OP_EXT_STIG_LSB,
443 priv->dtr);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530444 if (ret)
445 return ret;
446
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600447 reg = cadence_qspi_calc_rdreg(priv);
448 writel(reg, priv->regbase + CQSPI_REG_RD_INSTR);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530449
450 return 0;
451}
452
Stefan Roese1c60fe72014-11-07 12:37:49 +0100453/* For command RDID, RDSR. */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600454int cadence_qspi_apb_command_read(struct cadence_spi_priv *priv,
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530455 const struct spi_mem_op *op)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100456{
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600457 void *reg_base = priv->regbase;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100458 unsigned int reg;
459 unsigned int read_len;
460 int status;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530461 unsigned int rxlen = op->data.nbytes;
462 void *rxbuf = op->data.buf.in;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530463 unsigned int dummy_clk;
464 u8 opcode;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100465
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600466 if (priv->dtr)
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530467 opcode = op->cmd.opcode >> 8;
468 else
469 opcode = op->cmd.opcode;
470
Tejas Bhumkare56bc922024-01-28 12:07:46 +0530471 if (opcode == CMD_4BYTE_OCTAL_READ && !priv->dtr)
472 opcode = CMD_4BYTE_FAST_READ;
473
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530474 reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
475
476 /* Set up dummy cycles. */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600477 dummy_clk = cadence_qspi_calc_dummy(op, priv->dtr);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530478 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
479 return -ENOTSUPP;
480
481 if (dummy_clk)
482 reg |= (dummy_clk & CQSPI_REG_CMDCTRL_DUMMY_MASK)
483 << CQSPI_REG_CMDCTRL_DUMMY_LSB;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100484
485 reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
486
487 /* 0 means 1 byte. */
488 reg |= (((rxlen - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
489 << CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
Dhruva Gole24d8de62023-01-03 12:01:11 +0530490
491 /* setup ADDR BIT field */
492 if (op->addr.nbytes) {
493 writel(op->addr.val, priv->regbase + CQSPI_REG_CMDADDRESS);
494 /*
495 * address bytes are zero indexed
496 */
497 reg |= (((op->addr.nbytes - 1) &
498 CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) <<
499 CQSPI_REG_CMDCTRL_ADD_BYTES_LSB);
500 reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
501 }
502
Stefan Roese1c60fe72014-11-07 12:37:49 +0100503 status = cadence_qspi_apb_exec_flash_cmd(reg_base, reg);
504 if (status != 0)
505 return status;
506
507 reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
508
509 /* Put the read value into rx_buf */
510 read_len = (rxlen > 4) ? 4 : rxlen;
511 memcpy(rxbuf, &reg, read_len);
512 rxbuf += read_len;
513
514 if (rxlen > 4) {
515 reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
516
517 read_len = rxlen - read_len;
518 memcpy(rxbuf, &reg, read_len);
519 }
520 return 0;
521}
522
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600523int cadence_qspi_apb_command_write_setup(struct cadence_spi_priv *priv,
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530524 const struct spi_mem_op *op)
525{
526 int ret;
527 unsigned int reg;
528
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600529 ret = cadence_qspi_set_protocol(priv, op);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530530 if (ret)
531 return ret;
532
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600533 ret = cadence_qspi_enable_dtr(priv, op, CQSPI_REG_OP_EXT_STIG_LSB,
534 priv->dtr);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530535 if (ret)
536 return ret;
537
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600538 reg = cadence_qspi_calc_rdreg(priv);
539 writel(reg, priv->regbase + CQSPI_REG_RD_INSTR);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530540
541 return 0;
542}
543
Stefan Roese1c60fe72014-11-07 12:37:49 +0100544/* For commands: WRSR, WREN, WRDI, CHIP_ERASE, BE, etc. */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600545int cadence_qspi_apb_command_write(struct cadence_spi_priv *priv,
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530546 const struct spi_mem_op *op)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100547{
548 unsigned int reg = 0;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100549 unsigned int wr_data;
550 unsigned int wr_len;
Apurva Nandan52ff9b92023-04-12 16:28:55 +0530551 unsigned int dummy_clk;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530552 unsigned int txlen = op->data.nbytes;
553 const void *txbuf = op->data.buf.out;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600554 void *reg_base = priv->regbase;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530555 u8 opcode;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100556
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600557 if (priv->dtr)
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530558 opcode = op->cmd.opcode >> 8;
559 else
560 opcode = op->cmd.opcode;
561
562 reg |= opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530563
Apurva Nandan52ff9b92023-04-12 16:28:55 +0530564 /* setup ADDR BIT field */
565 if (op->addr.nbytes) {
566 writel(op->addr.val, priv->regbase + CQSPI_REG_CMDADDRESS);
567 /*
568 * address bytes are zero indexed
569 */
570 reg |= (((op->addr.nbytes - 1) &
571 CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) <<
572 CQSPI_REG_CMDCTRL_ADD_BYTES_LSB);
573 reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
574 }
575
576 /* Set up dummy cycles. */
577 dummy_clk = cadence_qspi_calc_dummy(op, priv->dtr);
578 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
579 return -EOPNOTSUPP;
580
581 if (dummy_clk)
582 reg |= (dummy_clk & CQSPI_REG_CMDCTRL_DUMMY_MASK)
583 << CQSPI_REG_CMDCTRL_DUMMY_LSB;
584
Stefan Roese1c60fe72014-11-07 12:37:49 +0100585 if (txlen) {
586 /* writing data = yes */
587 reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
588 reg |= ((txlen - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
589 << CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
590
591 wr_len = txlen > 4 ? 4 : txlen;
592 memcpy(&wr_data, txbuf, wr_len);
593 writel(wr_data, reg_base +
594 CQSPI_REG_CMDWRITEDATALOWER);
595
596 if (txlen > 4) {
597 txbuf += wr_len;
598 wr_len = txlen - wr_len;
599 memcpy(&wr_data, txbuf, wr_len);
600 writel(wr_data, reg_base +
601 CQSPI_REG_CMDWRITEDATAUPPER);
602 }
603 }
604
605 /* Execute the command */
606 return cadence_qspi_apb_exec_flash_cmd(reg_base, reg);
607}
608
609/* Opcode + Address (3/4 bytes) + dummy bytes (0-4 bytes) */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600610int cadence_qspi_apb_read_setup(struct cadence_spi_priv *priv,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530611 const struct spi_mem_op *op)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100612{
613 unsigned int reg;
614 unsigned int rd_reg;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100615 unsigned int dummy_clk;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530616 unsigned int dummy_bytes = op->dummy.nbytes;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530617 int ret;
618 u8 opcode;
619
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600620 ret = cadence_qspi_set_protocol(priv, op);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530621 if (ret)
622 return ret;
623
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600624 ret = cadence_qspi_enable_dtr(priv, op, CQSPI_REG_OP_EXT_READ_LSB,
625 priv->dtr);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530626 if (ret)
627 return ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100628
629 /* Setup the indirect trigger address */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600630 writel(priv->trigger_address,
631 priv->regbase + CQSPI_REG_INDIRECTTRIGGER);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100632
Stefan Roese1c60fe72014-11-07 12:37:49 +0100633 /* Configure the opcode */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600634 if (priv->dtr)
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530635 opcode = op->cmd.opcode >> 8;
636 else
637 opcode = op->cmd.opcode;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100638
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530639 rd_reg = opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600640 rd_reg |= cadence_qspi_calc_rdreg(priv);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100641
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600642 writel(op->addr.val, priv->regbase + CQSPI_REG_INDIRECTRDSTARTADDR);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100643
Stefan Roese1c60fe72014-11-07 12:37:49 +0100644 if (dummy_bytes) {
Stefan Roese1c60fe72014-11-07 12:37:49 +0100645 /* Convert to clock cycles. */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600646 dummy_clk = cadence_qspi_calc_dummy(op, priv->dtr);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530647
648 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
649 return -ENOTSUPP;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100650
651 if (dummy_clk)
652 rd_reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
653 << CQSPI_REG_RD_INSTR_DUMMY_LSB;
654 }
655
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600656 writel(rd_reg, priv->regbase + CQSPI_REG_RD_INSTR);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100657
658 /* set device size */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600659 reg = readl(priv->regbase + CQSPI_REG_SIZE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100660 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530661 reg |= (op->addr.nbytes - 1);
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600662 writel(reg, priv->regbase + CQSPI_REG_SIZE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100663 return 0;
664}
665
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600666static u32 cadence_qspi_get_rd_sram_level(struct cadence_spi_priv *priv)
Marek Vasut8c177432016-04-27 23:38:05 +0200667{
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600668 u32 reg = readl(priv->regbase + CQSPI_REG_SDRAMLEVEL);
Marek Vasut8c177432016-04-27 23:38:05 +0200669 reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
670 return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
671}
672
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600673static int cadence_qspi_wait_for_data(struct cadence_spi_priv *priv)
Marek Vasut8c177432016-04-27 23:38:05 +0200674{
675 unsigned int timeout = 10000;
676 u32 reg;
677
678 while (timeout--) {
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600679 reg = cadence_qspi_get_rd_sram_level(priv);
Marek Vasut8c177432016-04-27 23:38:05 +0200680 if (reg)
681 return reg;
682 udelay(1);
683 }
684
685 return -ETIMEDOUT;
686}
687
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530688static int
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600689cadence_qspi_apb_indirect_read_execute(struct cadence_spi_priv *priv,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530690 unsigned int n_rx, u8 *rxbuf)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100691{
Marek Vasut8c177432016-04-27 23:38:05 +0200692 unsigned int remaining = n_rx;
693 unsigned int bytes_to_read = 0;
694 int ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100695
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600696 writel(n_rx, priv->regbase + CQSPI_REG_INDIRECTRDBYTES);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100697
698 /* Start the indirect read transfer */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000699 writel(CQSPI_REG_INDIRECTRD_START,
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600700 priv->regbase + CQSPI_REG_INDIRECTRD);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100701
Marek Vasut8c177432016-04-27 23:38:05 +0200702 while (remaining > 0) {
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600703 ret = cadence_qspi_wait_for_data(priv);
Marek Vasut8c177432016-04-27 23:38:05 +0200704 if (ret < 0) {
705 printf("Indirect write timed out (%i)\n", ret);
706 goto failrd;
707 }
Stefan Roese1c60fe72014-11-07 12:37:49 +0100708
Marek Vasut8c177432016-04-27 23:38:05 +0200709 bytes_to_read = ret;
710
711 while (bytes_to_read != 0) {
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600712 bytes_to_read *= priv->fifo_width;
Marek Vasut8c177432016-04-27 23:38:05 +0200713 bytes_to_read = bytes_to_read > remaining ?
714 remaining : bytes_to_read;
Goldschmidt Simon16cbd092018-01-24 10:44:05 +0530715 /*
716 * Handle non-4-byte aligned access to avoid
717 * data abort.
718 */
719 if (((uintptr_t)rxbuf % 4) || (bytes_to_read % 4))
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600720 readsb(priv->ahbbase, rxbuf, bytes_to_read);
Goldschmidt Simon16cbd092018-01-24 10:44:05 +0530721 else
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600722 readsl(priv->ahbbase, rxbuf,
Goldschmidt Simon16cbd092018-01-24 10:44:05 +0530723 bytes_to_read >> 2);
724 rxbuf += bytes_to_read;
Marek Vasut8c177432016-04-27 23:38:05 +0200725 remaining -= bytes_to_read;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600726 bytes_to_read = cadence_qspi_get_rd_sram_level(priv);
Marek Vasut8c177432016-04-27 23:38:05 +0200727 }
728 }
729
730 /* Check indirect done status */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600731 ret = wait_for_bit_le32(priv->regbase + CQSPI_REG_INDIRECTRD,
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100732 CQSPI_REG_INDIRECTRD_DONE, 1, 10, 0);
Marek Vasut8c177432016-04-27 23:38:05 +0200733 if (ret) {
734 printf("Indirect read completion error (%i)\n", ret);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100735 goto failrd;
736 }
737
738 /* Clear indirect completion status */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000739 writel(CQSPI_REG_INDIRECTRD_DONE,
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600740 priv->regbase + CQSPI_REG_INDIRECTRD);
Marek Vasut8c177432016-04-27 23:38:05 +0200741
Marek Vasut84d4f732021-09-14 05:22:31 +0200742 /* Check indirect done status */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600743 ret = wait_for_bit_le32(priv->regbase + CQSPI_REG_INDIRECTRD,
Marek Vasut84d4f732021-09-14 05:22:31 +0200744 CQSPI_REG_INDIRECTRD_DONE, 0, 10, 0);
745 if (ret) {
746 printf("Indirect read clear completion error (%i)\n", ret);
747 goto failrd;
748 }
749
Stefan Roese1c60fe72014-11-07 12:37:49 +0100750 return 0;
751
752failrd:
753 /* Cancel the indirect read */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000754 writel(CQSPI_REG_INDIRECTRD_CANCEL,
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600755 priv->regbase + CQSPI_REG_INDIRECTRD);
Marek Vasut8c177432016-04-27 23:38:05 +0200756 return ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100757}
758
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600759int cadence_qspi_apb_read_execute(struct cadence_spi_priv *priv,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530760 const struct spi_mem_op *op)
761{
Vignesh Raghavendra68f82662019-12-05 15:46:06 +0530762 u64 from = op->addr.val;
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530763 void *buf = op->data.buf.in;
764 size_t len = op->data.nbytes;
765
Ashok Reddy Somaf63e6022022-11-29 04:41:34 -0700766 cadence_qspi_apb_enable_linear_mode(true);
T Karthik Reddy3b49fbf2022-05-12 04:05:34 -0600767
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600768 if (priv->use_dac_mode && (from + len < priv->ahbsize)) {
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530769 if (len < 256 ||
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600770 dma_memcpy(buf, priv->ahbbase + from, len) < 0) {
771 memcpy_fromio(buf, priv->ahbbase + from, len);
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530772 }
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600773 if (!cadence_qspi_wait_idle(priv->regbase))
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530774 return -EIO;
775 return 0;
776 }
777
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600778 return cadence_qspi_apb_indirect_read_execute(priv, len, buf);
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530779}
780
Stefan Roese1c60fe72014-11-07 12:37:49 +0100781/* Opcode + Address (3/4 bytes) */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600782int cadence_qspi_apb_write_setup(struct cadence_spi_priv *priv,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530783 const struct spi_mem_op *op)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100784{
785 unsigned int reg;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530786 int ret;
787 u8 opcode;
788
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600789 ret = cadence_qspi_set_protocol(priv, op);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530790 if (ret)
791 return ret;
792
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600793 ret = cadence_qspi_enable_dtr(priv, op, CQSPI_REG_OP_EXT_WRITE_LSB,
794 priv->dtr);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530795 if (ret)
796 return ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100797
Stefan Roese1c60fe72014-11-07 12:37:49 +0100798 /* Setup the indirect trigger address */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600799 writel(priv->trigger_address,
800 priv->regbase + CQSPI_REG_INDIRECTTRIGGER);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100801
Stefan Roese1c60fe72014-11-07 12:37:49 +0100802 /* Configure the opcode */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600803 if (priv->dtr)
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530804 opcode = op->cmd.opcode >> 8;
805 else
806 opcode = op->cmd.opcode;
807
808 reg = opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600809 reg |= priv->data_width << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
810 reg |= priv->addr_width << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB;
811 writel(reg, priv->regbase + CQSPI_REG_WR_INSTR);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100812
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600813 reg = cadence_qspi_calc_rdreg(priv);
814 writel(reg, priv->regbase + CQSPI_REG_RD_INSTR);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530815
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600816 writel(op->addr.val, priv->regbase + CQSPI_REG_INDIRECTWRSTARTADDR);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100817
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600818 if (priv->dtr) {
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530819 /*
820 * Some flashes like the cypress Semper flash expect a 4-byte
821 * dummy address with the Read SR command in DTR mode, but this
822 * controller does not support sending address with the Read SR
823 * command. So, disable write completion polling on the
824 * controller's side. spi-nor will take care of polling the
825 * status register.
826 */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600827 reg = readl(priv->regbase + CQSPI_REG_WR_COMPLETION_CTRL);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530828 reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600829 writel(reg, priv->regbase + CQSPI_REG_WR_COMPLETION_CTRL);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530830 }
831
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600832 reg = readl(priv->regbase + CQSPI_REG_SIZE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100833 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530834 reg |= (op->addr.nbytes - 1);
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600835 writel(reg, priv->regbase + CQSPI_REG_SIZE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100836 return 0;
837}
838
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530839static int
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600840cadence_qspi_apb_indirect_write_execute(struct cadence_spi_priv *priv,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530841 unsigned int n_tx, const u8 *txbuf)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100842{
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600843 unsigned int page_size = priv->page_size;
Marek Vasutdae51dd2016-04-27 23:18:55 +0200844 unsigned int remaining = n_tx;
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530845 const u8 *bb_txbuf = txbuf;
846 void *bounce_buf = NULL;
Marek Vasutdae51dd2016-04-27 23:18:55 +0200847 unsigned int write_bytes;
848 int ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100849
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530850 /*
851 * Use bounce buffer for non 32 bit aligned txbuf to avoid data
852 * aborts
853 */
854 if ((uintptr_t)txbuf % 4) {
855 bounce_buf = malloc(n_tx);
856 if (!bounce_buf)
857 return -ENOMEM;
858 memcpy(bounce_buf, txbuf, n_tx);
859 bb_txbuf = bounce_buf;
860 }
861
Stefan Roese1c60fe72014-11-07 12:37:49 +0100862 /* Configure the indirect read transfer bytes */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600863 writel(n_tx, priv->regbase + CQSPI_REG_INDIRECTWRBYTES);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100864
865 /* Start the indirect write transfer */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000866 writel(CQSPI_REG_INDIRECTWR_START,
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600867 priv->regbase + CQSPI_REG_INDIRECTWR);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100868
Pratyush Yadav8dcf3e22021-06-26 00:47:08 +0530869 /*
870 * Some delay is required for the above bit to be internally
871 * synchronized by the QSPI module.
872 */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600873 ndelay(priv->wr_delay);
Pratyush Yadav8dcf3e22021-06-26 00:47:08 +0530874
Marek Vasutdae51dd2016-04-27 23:18:55 +0200875 while (remaining > 0) {
876 write_bytes = remaining > page_size ? page_size : remaining;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600877 writesl(priv->ahbbase, bb_txbuf, write_bytes >> 2);
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530878 if (write_bytes % 4)
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600879 writesb(priv->ahbbase,
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530880 bb_txbuf + rounddown(write_bytes, 4),
881 write_bytes % 4);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100882
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600883 ret = wait_for_bit_le32(priv->regbase + CQSPI_REG_SDRAMLEVEL,
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100884 CQSPI_REG_SDRAMLEVEL_WR_MASK <<
885 CQSPI_REG_SDRAMLEVEL_WR_LSB, 0, 10, 0);
Marek Vasutdae51dd2016-04-27 23:18:55 +0200886 if (ret) {
887 printf("Indirect write timed out (%i)\n", ret);
888 goto failwr;
889 }
Stefan Roese1c60fe72014-11-07 12:37:49 +0100890
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530891 bb_txbuf += write_bytes;
Marek Vasutdae51dd2016-04-27 23:18:55 +0200892 remaining -= write_bytes;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100893 }
894
Marek Vasutdae51dd2016-04-27 23:18:55 +0200895 /* Check indirect done status */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600896 ret = wait_for_bit_le32(priv->regbase + CQSPI_REG_INDIRECTWR,
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100897 CQSPI_REG_INDIRECTWR_DONE, 1, 10, 0);
Marek Vasutdae51dd2016-04-27 23:18:55 +0200898 if (ret) {
899 printf("Indirect write completion error (%i)\n", ret);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100900 goto failwr;
901 }
902
903 /* Clear indirect completion status */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000904 writel(CQSPI_REG_INDIRECTWR_DONE,
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600905 priv->regbase + CQSPI_REG_INDIRECTWR);
Marek Vasut84d4f732021-09-14 05:22:31 +0200906
907 /* Check indirect done status */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600908 ret = wait_for_bit_le32(priv->regbase + CQSPI_REG_INDIRECTWR,
Marek Vasut84d4f732021-09-14 05:22:31 +0200909 CQSPI_REG_INDIRECTWR_DONE, 0, 10, 0);
910 if (ret) {
911 printf("Indirect write clear completion error (%i)\n", ret);
912 goto failwr;
913 }
914
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530915 if (bounce_buf)
916 free(bounce_buf);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100917 return 0;
918
919failwr:
920 /* Cancel the indirect write */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000921 writel(CQSPI_REG_INDIRECTWR_CANCEL,
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600922 priv->regbase + CQSPI_REG_INDIRECTWR);
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530923 if (bounce_buf)
924 free(bounce_buf);
Marek Vasutdae51dd2016-04-27 23:18:55 +0200925 return ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100926}
927
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600928int cadence_qspi_apb_write_execute(struct cadence_spi_priv *priv,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530929 const struct spi_mem_op *op)
930{
931 u32 to = op->addr.val;
932 const void *buf = op->data.buf.out;
933 size_t len = op->data.nbytes;
934
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530935 /*
936 * Some flashes like the Cypress Semper flash expect a dummy 4-byte
937 * address (all 0s) with the read status register command in DTR mode.
938 * But this controller does not support sending dummy address bytes to
939 * the flash when it is polling the write completion register in DTR
940 * mode. So, we can not use direct mode when in DTR mode for writing
941 * data.
942 */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600943 cadence_qspi_apb_enable_linear_mode(true);
944 if (!priv->dtr && priv->use_dac_mode && (to + len < priv->ahbsize)) {
945 memcpy_toio(priv->ahbbase + to, buf, len);
946 if (!cadence_qspi_wait_idle(priv->regbase))
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530947 return -EIO;
948 return 0;
949 }
950
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600951 return cadence_qspi_apb_indirect_write_execute(priv, len, buf);
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530952}
953
Stefan Roese1c60fe72014-11-07 12:37:49 +0100954void cadence_qspi_apb_enter_xip(void *reg_base, char xip_dummy)
955{
956 unsigned int reg;
957
958 /* enter XiP mode immediately and enable direct mode */
959 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000960 reg |= CQSPI_REG_CONFIG_ENABLE;
961 reg |= CQSPI_REG_CONFIG_DIRECT;
962 reg |= CQSPI_REG_CONFIG_XIP_IMM;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100963 writel(reg, reg_base + CQSPI_REG_CONFIG);
964
965 /* keep the XiP mode */
966 writel(xip_dummy, reg_base + CQSPI_REG_MODE_BIT);
967
968 /* Enable mode bit at devrd */
969 reg = readl(reg_base + CQSPI_REG_RD_INSTR);
970 reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
971 writel(reg, reg_base + CQSPI_REG_RD_INSTR);
972}