blob: d033184aa466c55bb794263d8103fabf9bb0b0c7 [file] [log] [blame]
Stefan Roese1c60fe72014-11-07 12:37:49 +01001/*
2 * Copyright (C) 2012 Altera Corporation <www.altera.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * - Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * - Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * - Neither the name of the Altera Corporation nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL ALTERA CORPORATION BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <common.h>
Simon Glass0f2af882020-05-10 11:40:05 -060029#include <log.h>
Stefan Roese1c60fe72014-11-07 12:37:49 +010030#include <asm/io.h>
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +053031#include <dma.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060032#include <linux/bitops.h>
Simon Glassdbd79542020-05-10 11:40:11 -060033#include <linux/delay.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090034#include <linux/errno.h>
Marek Vasutdae51dd2016-04-27 23:18:55 +020035#include <wait_bit.h>
Vignesh R4ca60192016-07-06 10:20:56 +053036#include <spi.h>
Vignesh Raghavendra27516a32020-01-27 10:36:39 +053037#include <spi-mem.h>
Vignesh Rad4bd8a2018-01-24 10:44:07 +053038#include <malloc.h>
Stefan Roese1c60fe72014-11-07 12:37:49 +010039#include "cadence_qspi.h"
40
T Karthik Reddy3b49fbf2022-05-12 04:05:34 -060041__weak void cadence_qspi_apb_enable_linear_mode(bool enable)
42{
43 return;
44}
45
Stefan Roese1c60fe72014-11-07 12:37:49 +010046void cadence_qspi_apb_controller_enable(void *reg_base)
47{
48 unsigned int reg;
49 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthy3a5ae122016-11-29 12:58:30 +000050 reg |= CQSPI_REG_CONFIG_ENABLE;
Stefan Roese1c60fe72014-11-07 12:37:49 +010051 writel(reg, reg_base + CQSPI_REG_CONFIG);
Stefan Roese1c60fe72014-11-07 12:37:49 +010052}
53
54void cadence_qspi_apb_controller_disable(void *reg_base)
55{
56 unsigned int reg;
57 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthy3a5ae122016-11-29 12:58:30 +000058 reg &= ~CQSPI_REG_CONFIG_ENABLE;
Stefan Roese1c60fe72014-11-07 12:37:49 +010059 writel(reg, reg_base + CQSPI_REG_CONFIG);
Stefan Roese1c60fe72014-11-07 12:37:49 +010060}
61
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +053062void cadence_qspi_apb_dac_mode_enable(void *reg_base)
63{
64 unsigned int reg;
65
66 reg = readl(reg_base + CQSPI_REG_CONFIG);
67 reg |= CQSPI_REG_CONFIG_DIRECT;
68 writel(reg, reg_base + CQSPI_REG_CONFIG);
69}
70
Pratyush Yadave1814ad2021-06-26 00:47:09 +053071static unsigned int cadence_qspi_calc_dummy(const struct spi_mem_op *op,
72 bool dtr)
73{
74 unsigned int dummy_clk;
75
Marek Vasut545be192021-09-14 05:21:48 +020076 if (!op->dummy.nbytes || !op->dummy.buswidth)
77 return 0;
78
Pratyush Yadave1814ad2021-06-26 00:47:09 +053079 dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth);
80 if (dtr)
81 dummy_clk /= 2;
82
83 return dummy_clk;
84}
85
Ashok Reddy Somaf5817652022-08-24 05:38:47 -060086static u32 cadence_qspi_calc_rdreg(struct cadence_spi_priv *priv)
Pratyush Yadave1814ad2021-06-26 00:47:09 +053087{
88 u32 rdreg = 0;
89
Ashok Reddy Somaf5817652022-08-24 05:38:47 -060090 rdreg |= priv->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
91 rdreg |= priv->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
92 rdreg |= priv->data_width << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
Pratyush Yadave1814ad2021-06-26 00:47:09 +053093
94 return rdreg;
95}
96
97static int cadence_qspi_buswidth_to_inst_type(u8 buswidth)
98{
99 switch (buswidth) {
100 case 0:
101 case 1:
102 return CQSPI_INST_TYPE_SINGLE;
103
104 case 2:
105 return CQSPI_INST_TYPE_DUAL;
106
107 case 4:
108 return CQSPI_INST_TYPE_QUAD;
109
110 case 8:
111 return CQSPI_INST_TYPE_OCTAL;
112
113 default:
114 return -ENOTSUPP;
115 }
116}
117
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600118static int cadence_qspi_set_protocol(struct cadence_spi_priv *priv,
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530119 const struct spi_mem_op *op)
120{
121 int ret;
122
Apurva Nandanb88f55c2023-04-12 16:28:54 +0530123 /*
124 * For an op to be DTR, cmd phase along with every other non-empty
125 * phase should have dtr field set to 1. If an op phase has zero
126 * nbytes, ignore its dtr field; otherwise, check its dtr field.
127 * Also, dummy checks not performed here Since supports_op()
128 * already checks that all or none of the fields are DTR.
129 */
130 priv->dtr = op->cmd.dtr &&
131 (!op->addr.nbytes || op->addr.dtr) &&
132 (!op->data.nbytes || op->data.dtr);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530133
134 ret = cadence_qspi_buswidth_to_inst_type(op->cmd.buswidth);
135 if (ret < 0)
136 return ret;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600137 priv->inst_width = ret;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530138
139 ret = cadence_qspi_buswidth_to_inst_type(op->addr.buswidth);
140 if (ret < 0)
141 return ret;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600142 priv->addr_width = ret;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530143
144 ret = cadence_qspi_buswidth_to_inst_type(op->data.buswidth);
145 if (ret < 0)
146 return ret;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600147 priv->data_width = ret;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530148
149 return 0;
150}
151
Stefan Roese1c60fe72014-11-07 12:37:49 +0100152/* Return 1 if idle, otherwise return 0 (busy). */
153static unsigned int cadence_qspi_wait_idle(void *reg_base)
154{
155 unsigned int start, count = 0;
156 /* timeout in unit of ms */
157 unsigned int timeout = 5000;
158
159 start = get_timer(0);
160 for ( ; get_timer(start) < timeout ; ) {
161 if (CQSPI_REG_IS_IDLE(reg_base))
162 count++;
163 else
164 count = 0;
165 /*
166 * Ensure the QSPI controller is in true idle state after
167 * reading back the same idle status consecutively
168 */
169 if (count >= CQSPI_POLL_IDLE_RETRY)
170 return 1;
171 }
172
173 /* Timeout, still in busy mode. */
Jan Kiszkabc6c7532023-10-30 17:20:29 +0100174 printf("QSPI: QSPI is still busy after poll for %d ms.\n", timeout);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100175 return 0;
176}
177
178void cadence_qspi_apb_readdata_capture(void *reg_base,
179 unsigned int bypass, unsigned int delay)
180{
181 unsigned int reg;
182 cadence_qspi_apb_controller_disable(reg_base);
183
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000184 reg = readl(reg_base + CQSPI_REG_RD_DATA_CAPTURE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100185
186 if (bypass)
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000187 reg |= CQSPI_REG_RD_DATA_CAPTURE_BYPASS;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100188 else
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000189 reg &= ~CQSPI_REG_RD_DATA_CAPTURE_BYPASS;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100190
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000191 reg &= ~(CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK
192 << CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100193
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000194 reg |= (delay & CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK)
195 << CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100196
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000197 writel(reg, reg_base + CQSPI_REG_RD_DATA_CAPTURE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100198
199 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100200}
201
202void cadence_qspi_apb_config_baudrate_div(void *reg_base,
203 unsigned int ref_clk_hz, unsigned int sclk_hz)
204{
205 unsigned int reg;
206 unsigned int div;
207
208 cadence_qspi_apb_controller_disable(reg_base);
209 reg = readl(reg_base + CQSPI_REG_CONFIG);
210 reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
211
Phil Edworthy8f24a442016-11-29 12:58:27 +0000212 /*
213 * The baud_div field in the config reg is 4 bits, and the ref clock is
214 * divided by 2 * (baud_div + 1). Round up the divider to ensure the
215 * SPI clock rate is less than or equal to the requested clock rate.
216 */
217 div = DIV_ROUND_UP(ref_clk_hz, sclk_hz * 2) - 1;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100218
Chin Liang See91b2c192016-08-07 22:50:40 +0800219 /* ensure the baud rate doesn't exceed the max value */
220 if (div > CQSPI_REG_CONFIG_BAUD_MASK)
221 div = CQSPI_REG_CONFIG_BAUD_MASK;
222
Phil Edworthy67824ad2016-11-29 12:58:28 +0000223 debug("%s: ref_clk %dHz sclk %dHz Div 0x%x, actual %dHz\n", __func__,
224 ref_clk_hz, sclk_hz, div, ref_clk_hz / (2 * (div + 1)));
225
Chin Liang See91b2c192016-08-07 22:50:40 +0800226 reg |= (div << CQSPI_REG_CONFIG_BAUD_LSB);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100227 writel(reg, reg_base + CQSPI_REG_CONFIG);
228
229 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100230}
231
Phil Edworthyeef2edc2016-11-29 12:58:31 +0000232void cadence_qspi_apb_set_clk_mode(void *reg_base, uint mode)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100233{
234 unsigned int reg;
235
236 cadence_qspi_apb_controller_disable(reg_base);
237 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000238 reg &= ~(CQSPI_REG_CONFIG_CLK_POL | CQSPI_REG_CONFIG_CLK_PHA);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100239
Phil Edworthyeef2edc2016-11-29 12:58:31 +0000240 if (mode & SPI_CPOL)
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000241 reg |= CQSPI_REG_CONFIG_CLK_POL;
Phil Edworthyeef2edc2016-11-29 12:58:31 +0000242 if (mode & SPI_CPHA)
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000243 reg |= CQSPI_REG_CONFIG_CLK_PHA;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100244
245 writel(reg, reg_base + CQSPI_REG_CONFIG);
246
247 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100248}
249
250void cadence_qspi_apb_chipselect(void *reg_base,
251 unsigned int chip_select, unsigned int decoder_enable)
252{
253 unsigned int reg;
254
255 cadence_qspi_apb_controller_disable(reg_base);
256
257 debug("%s : chipselect %d decode %d\n", __func__, chip_select,
258 decoder_enable);
259
260 reg = readl(reg_base + CQSPI_REG_CONFIG);
261 /* docoder */
262 if (decoder_enable) {
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000263 reg |= CQSPI_REG_CONFIG_DECODE;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100264 } else {
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000265 reg &= ~CQSPI_REG_CONFIG_DECODE;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100266 /* Convert CS if without decoder.
267 * CS0 to 4b'1110
268 * CS1 to 4b'1101
269 * CS2 to 4b'1011
270 * CS3 to 4b'0111
271 */
272 chip_select = 0xF & ~(1 << chip_select);
273 }
274
275 reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
276 << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
277 reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
278 << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
279 writel(reg, reg_base + CQSPI_REG_CONFIG);
280
281 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100282}
283
284void cadence_qspi_apb_delay(void *reg_base,
285 unsigned int ref_clk, unsigned int sclk_hz,
286 unsigned int tshsl_ns, unsigned int tsd2d_ns,
287 unsigned int tchsh_ns, unsigned int tslch_ns)
288{
289 unsigned int ref_clk_ns;
290 unsigned int sclk_ns;
291 unsigned int tshsl, tchsh, tslch, tsd2d;
292 unsigned int reg;
293
294 cadence_qspi_apb_controller_disable(reg_base);
295
296 /* Convert to ns. */
Phil Edworthy1fdd9232016-11-29 12:58:33 +0000297 ref_clk_ns = DIV_ROUND_UP(1000000000, ref_clk);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100298
299 /* Convert to ns. */
Phil Edworthy1fdd9232016-11-29 12:58:33 +0000300 sclk_ns = DIV_ROUND_UP(1000000000, sclk_hz);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100301
Phil Edworthy1fdd9232016-11-29 12:58:33 +0000302 /* The controller adds additional delay to that programmed in the reg */
303 if (tshsl_ns >= sclk_ns + ref_clk_ns)
304 tshsl_ns -= sclk_ns + ref_clk_ns;
305 if (tchsh_ns >= sclk_ns + 3 * ref_clk_ns)
306 tchsh_ns -= sclk_ns + 3 * ref_clk_ns;
307 tshsl = DIV_ROUND_UP(tshsl_ns, ref_clk_ns);
308 tchsh = DIV_ROUND_UP(tchsh_ns, ref_clk_ns);
309 tslch = DIV_ROUND_UP(tslch_ns, ref_clk_ns);
310 tsd2d = DIV_ROUND_UP(tsd2d_ns, ref_clk_ns);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100311
312 reg = ((tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
313 << CQSPI_REG_DELAY_TSHSL_LSB);
314 reg |= ((tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
315 << CQSPI_REG_DELAY_TCHSH_LSB);
316 reg |= ((tslch & CQSPI_REG_DELAY_TSLCH_MASK)
317 << CQSPI_REG_DELAY_TSLCH_LSB);
318 reg |= ((tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
319 << CQSPI_REG_DELAY_TSD2D_LSB);
320 writel(reg, reg_base + CQSPI_REG_DELAY);
321
322 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100323}
324
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600325void cadence_qspi_apb_controller_init(struct cadence_spi_priv *priv)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100326{
327 unsigned reg;
328
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600329 cadence_qspi_apb_controller_disable(priv->regbase);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100330
331 /* Configure the device size and address bytes */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600332 reg = readl(priv->regbase + CQSPI_REG_SIZE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100333 /* Clear the previous value */
334 reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
335 reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600336 reg |= (priv->page_size << CQSPI_REG_SIZE_PAGE_LSB);
337 reg |= (priv->block_size << CQSPI_REG_SIZE_BLOCK_LSB);
338 writel(reg, priv->regbase + CQSPI_REG_SIZE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100339
340 /* Configure the remap address register, no remap */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600341 writel(0, priv->regbase + CQSPI_REG_REMAP);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100342
Vikas Manocha215cea02015-07-02 18:29:43 -0700343 /* Indirect mode configurations */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600344 writel(priv->fifo_depth / 2, priv->regbase + CQSPI_REG_SRAMPARTITION);
Vikas Manocha215cea02015-07-02 18:29:43 -0700345
Stefan Roese1c60fe72014-11-07 12:37:49 +0100346 /* Disable all interrupts */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600347 writel(0, priv->regbase + CQSPI_REG_IRQMASK);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100348
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600349 cadence_qspi_apb_controller_enable(priv->regbase);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100350}
351
T Karthik Reddy73701e72022-05-12 04:05:32 -0600352int cadence_qspi_apb_exec_flash_cmd(void *reg_base, unsigned int reg)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100353{
354 unsigned int retry = CQSPI_REG_RETRY;
355
356 /* Write the CMDCTRL without start execution. */
357 writel(reg, reg_base + CQSPI_REG_CMDCTRL);
358 /* Start execute */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000359 reg |= CQSPI_REG_CMDCTRL_EXECUTE;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100360 writel(reg, reg_base + CQSPI_REG_CMDCTRL);
361
362 while (retry--) {
363 reg = readl(reg_base + CQSPI_REG_CMDCTRL);
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000364 if ((reg & CQSPI_REG_CMDCTRL_INPROGRESS) == 0)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100365 break;
366 udelay(1);
367 }
368
369 if (!retry) {
370 printf("QSPI: flash command execution timeout\n");
371 return -EIO;
372 }
373
374 /* Polling QSPI idle status. */
375 if (!cadence_qspi_wait_idle(reg_base))
376 return -EIO;
377
Dhruva Gole94fcaf02023-04-12 16:28:56 +0530378 /* Flush the CMDCTRL reg after the execution */
379 writel(0, reg_base + CQSPI_REG_CMDCTRL);
380
Stefan Roese1c60fe72014-11-07 12:37:49 +0100381 return 0;
382}
383
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600384static int cadence_qspi_setup_opcode_ext(struct cadence_spi_priv *priv,
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530385 const struct spi_mem_op *op,
386 unsigned int shift)
387{
388 unsigned int reg;
389 u8 ext;
390
391 if (op->cmd.nbytes != 2)
392 return -EINVAL;
393
394 /* Opcode extension is the LSB. */
395 ext = op->cmd.opcode & 0xff;
396
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600397 reg = readl(priv->regbase + CQSPI_REG_OP_EXT_LOWER);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530398 reg &= ~(0xff << shift);
399 reg |= ext << shift;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600400 writel(reg, priv->regbase + CQSPI_REG_OP_EXT_LOWER);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530401
402 return 0;
403}
404
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600405static int cadence_qspi_enable_dtr(struct cadence_spi_priv *priv,
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530406 const struct spi_mem_op *op,
407 unsigned int shift,
408 bool enable)
409{
410 unsigned int reg;
411 int ret;
412
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600413 reg = readl(priv->regbase + CQSPI_REG_CONFIG);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530414
415 if (enable) {
416 reg |= CQSPI_REG_CONFIG_DTR_PROTO;
417 reg |= CQSPI_REG_CONFIG_DUAL_OPCODE;
418
419 /* Set up command opcode extension. */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600420 ret = cadence_qspi_setup_opcode_ext(priv, op, shift);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530421 if (ret)
422 return ret;
423 } else {
424 reg &= ~CQSPI_REG_CONFIG_DTR_PROTO;
425 reg &= ~CQSPI_REG_CONFIG_DUAL_OPCODE;
426 }
427
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600428 writel(reg, priv->regbase + CQSPI_REG_CONFIG);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530429
430 return 0;
431}
432
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600433int cadence_qspi_apb_command_read_setup(struct cadence_spi_priv *priv,
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530434 const struct spi_mem_op *op)
435{
436 int ret;
437 unsigned int reg;
438
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600439 ret = cadence_qspi_set_protocol(priv, op);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530440 if (ret)
441 return ret;
442
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600443 ret = cadence_qspi_enable_dtr(priv, op, CQSPI_REG_OP_EXT_STIG_LSB,
444 priv->dtr);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530445 if (ret)
446 return ret;
447
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600448 reg = cadence_qspi_calc_rdreg(priv);
449 writel(reg, priv->regbase + CQSPI_REG_RD_INSTR);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530450
451 return 0;
452}
453
Stefan Roese1c60fe72014-11-07 12:37:49 +0100454/* For command RDID, RDSR. */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600455int cadence_qspi_apb_command_read(struct cadence_spi_priv *priv,
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530456 const struct spi_mem_op *op)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100457{
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600458 void *reg_base = priv->regbase;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100459 unsigned int reg;
460 unsigned int read_len;
461 int status;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530462 unsigned int rxlen = op->data.nbytes;
463 void *rxbuf = op->data.buf.in;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530464 unsigned int dummy_clk;
465 u8 opcode;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100466
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600467 if (priv->dtr)
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530468 opcode = op->cmd.opcode >> 8;
469 else
470 opcode = op->cmd.opcode;
471
472 reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
473
474 /* Set up dummy cycles. */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600475 dummy_clk = cadence_qspi_calc_dummy(op, priv->dtr);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530476 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
477 return -ENOTSUPP;
478
479 if (dummy_clk)
480 reg |= (dummy_clk & CQSPI_REG_CMDCTRL_DUMMY_MASK)
481 << CQSPI_REG_CMDCTRL_DUMMY_LSB;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100482
483 reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
484
485 /* 0 means 1 byte. */
486 reg |= (((rxlen - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
487 << CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
Dhruva Gole24d8de62023-01-03 12:01:11 +0530488
489 /* setup ADDR BIT field */
490 if (op->addr.nbytes) {
491 writel(op->addr.val, priv->regbase + CQSPI_REG_CMDADDRESS);
492 /*
493 * address bytes are zero indexed
494 */
495 reg |= (((op->addr.nbytes - 1) &
496 CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) <<
497 CQSPI_REG_CMDCTRL_ADD_BYTES_LSB);
498 reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
499 }
500
Stefan Roese1c60fe72014-11-07 12:37:49 +0100501 status = cadence_qspi_apb_exec_flash_cmd(reg_base, reg);
502 if (status != 0)
503 return status;
504
505 reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
506
507 /* Put the read value into rx_buf */
508 read_len = (rxlen > 4) ? 4 : rxlen;
509 memcpy(rxbuf, &reg, read_len);
510 rxbuf += read_len;
511
512 if (rxlen > 4) {
513 reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
514
515 read_len = rxlen - read_len;
516 memcpy(rxbuf, &reg, read_len);
517 }
518 return 0;
519}
520
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600521int cadence_qspi_apb_command_write_setup(struct cadence_spi_priv *priv,
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530522 const struct spi_mem_op *op)
523{
524 int ret;
525 unsigned int reg;
526
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600527 ret = cadence_qspi_set_protocol(priv, op);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530528 if (ret)
529 return ret;
530
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600531 ret = cadence_qspi_enable_dtr(priv, op, CQSPI_REG_OP_EXT_STIG_LSB,
532 priv->dtr);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530533 if (ret)
534 return ret;
535
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600536 reg = cadence_qspi_calc_rdreg(priv);
537 writel(reg, priv->regbase + CQSPI_REG_RD_INSTR);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530538
539 return 0;
540}
541
Stefan Roese1c60fe72014-11-07 12:37:49 +0100542/* For commands: WRSR, WREN, WRDI, CHIP_ERASE, BE, etc. */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600543int cadence_qspi_apb_command_write(struct cadence_spi_priv *priv,
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530544 const struct spi_mem_op *op)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100545{
546 unsigned int reg = 0;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100547 unsigned int wr_data;
548 unsigned int wr_len;
Apurva Nandan52ff9b92023-04-12 16:28:55 +0530549 unsigned int dummy_clk;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530550 unsigned int txlen = op->data.nbytes;
551 const void *txbuf = op->data.buf.out;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600552 void *reg_base = priv->regbase;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530553 u8 opcode;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100554
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600555 if (priv->dtr)
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530556 opcode = op->cmd.opcode >> 8;
557 else
558 opcode = op->cmd.opcode;
559
560 reg |= opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530561
Apurva Nandan52ff9b92023-04-12 16:28:55 +0530562 /* setup ADDR BIT field */
563 if (op->addr.nbytes) {
564 writel(op->addr.val, priv->regbase + CQSPI_REG_CMDADDRESS);
565 /*
566 * address bytes are zero indexed
567 */
568 reg |= (((op->addr.nbytes - 1) &
569 CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) <<
570 CQSPI_REG_CMDCTRL_ADD_BYTES_LSB);
571 reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
572 }
573
574 /* Set up dummy cycles. */
575 dummy_clk = cadence_qspi_calc_dummy(op, priv->dtr);
576 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
577 return -EOPNOTSUPP;
578
579 if (dummy_clk)
580 reg |= (dummy_clk & CQSPI_REG_CMDCTRL_DUMMY_MASK)
581 << CQSPI_REG_CMDCTRL_DUMMY_LSB;
582
Stefan Roese1c60fe72014-11-07 12:37:49 +0100583 if (txlen) {
584 /* writing data = yes */
585 reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
586 reg |= ((txlen - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
587 << CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
588
589 wr_len = txlen > 4 ? 4 : txlen;
590 memcpy(&wr_data, txbuf, wr_len);
591 writel(wr_data, reg_base +
592 CQSPI_REG_CMDWRITEDATALOWER);
593
594 if (txlen > 4) {
595 txbuf += wr_len;
596 wr_len = txlen - wr_len;
597 memcpy(&wr_data, txbuf, wr_len);
598 writel(wr_data, reg_base +
599 CQSPI_REG_CMDWRITEDATAUPPER);
600 }
601 }
602
603 /* Execute the command */
604 return cadence_qspi_apb_exec_flash_cmd(reg_base, reg);
605}
606
607/* Opcode + Address (3/4 bytes) + dummy bytes (0-4 bytes) */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600608int cadence_qspi_apb_read_setup(struct cadence_spi_priv *priv,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530609 const struct spi_mem_op *op)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100610{
611 unsigned int reg;
612 unsigned int rd_reg;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100613 unsigned int dummy_clk;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530614 unsigned int dummy_bytes = op->dummy.nbytes;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530615 int ret;
616 u8 opcode;
617
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600618 ret = cadence_qspi_set_protocol(priv, op);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530619 if (ret)
620 return ret;
621
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600622 ret = cadence_qspi_enable_dtr(priv, op, CQSPI_REG_OP_EXT_READ_LSB,
623 priv->dtr);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530624 if (ret)
625 return ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100626
627 /* Setup the indirect trigger address */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600628 writel(priv->trigger_address,
629 priv->regbase + CQSPI_REG_INDIRECTTRIGGER);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100630
Stefan Roese1c60fe72014-11-07 12:37:49 +0100631 /* Configure the opcode */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600632 if (priv->dtr)
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530633 opcode = op->cmd.opcode >> 8;
634 else
635 opcode = op->cmd.opcode;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100636
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530637 rd_reg = opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600638 rd_reg |= cadence_qspi_calc_rdreg(priv);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100639
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600640 writel(op->addr.val, priv->regbase + CQSPI_REG_INDIRECTRDSTARTADDR);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100641
Stefan Roese1c60fe72014-11-07 12:37:49 +0100642 if (dummy_bytes) {
Stefan Roese1c60fe72014-11-07 12:37:49 +0100643 /* Convert to clock cycles. */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600644 dummy_clk = cadence_qspi_calc_dummy(op, priv->dtr);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530645
646 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
647 return -ENOTSUPP;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100648
649 if (dummy_clk)
650 rd_reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
651 << CQSPI_REG_RD_INSTR_DUMMY_LSB;
652 }
653
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600654 writel(rd_reg, priv->regbase + CQSPI_REG_RD_INSTR);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100655
656 /* set device size */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600657 reg = readl(priv->regbase + CQSPI_REG_SIZE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100658 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530659 reg |= (op->addr.nbytes - 1);
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600660 writel(reg, priv->regbase + CQSPI_REG_SIZE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100661 return 0;
662}
663
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600664static u32 cadence_qspi_get_rd_sram_level(struct cadence_spi_priv *priv)
Marek Vasut8c177432016-04-27 23:38:05 +0200665{
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600666 u32 reg = readl(priv->regbase + CQSPI_REG_SDRAMLEVEL);
Marek Vasut8c177432016-04-27 23:38:05 +0200667 reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
668 return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
669}
670
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600671static int cadence_qspi_wait_for_data(struct cadence_spi_priv *priv)
Marek Vasut8c177432016-04-27 23:38:05 +0200672{
673 unsigned int timeout = 10000;
674 u32 reg;
675
676 while (timeout--) {
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600677 reg = cadence_qspi_get_rd_sram_level(priv);
Marek Vasut8c177432016-04-27 23:38:05 +0200678 if (reg)
679 return reg;
680 udelay(1);
681 }
682
683 return -ETIMEDOUT;
684}
685
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530686static int
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600687cadence_qspi_apb_indirect_read_execute(struct cadence_spi_priv *priv,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530688 unsigned int n_rx, u8 *rxbuf)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100689{
Marek Vasut8c177432016-04-27 23:38:05 +0200690 unsigned int remaining = n_rx;
691 unsigned int bytes_to_read = 0;
692 int ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100693
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600694 writel(n_rx, priv->regbase + CQSPI_REG_INDIRECTRDBYTES);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100695
696 /* Start the indirect read transfer */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000697 writel(CQSPI_REG_INDIRECTRD_START,
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600698 priv->regbase + CQSPI_REG_INDIRECTRD);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100699
Marek Vasut8c177432016-04-27 23:38:05 +0200700 while (remaining > 0) {
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600701 ret = cadence_qspi_wait_for_data(priv);
Marek Vasut8c177432016-04-27 23:38:05 +0200702 if (ret < 0) {
703 printf("Indirect write timed out (%i)\n", ret);
704 goto failrd;
705 }
Stefan Roese1c60fe72014-11-07 12:37:49 +0100706
Marek Vasut8c177432016-04-27 23:38:05 +0200707 bytes_to_read = ret;
708
709 while (bytes_to_read != 0) {
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600710 bytes_to_read *= priv->fifo_width;
Marek Vasut8c177432016-04-27 23:38:05 +0200711 bytes_to_read = bytes_to_read > remaining ?
712 remaining : bytes_to_read;
Goldschmidt Simon16cbd092018-01-24 10:44:05 +0530713 /*
714 * Handle non-4-byte aligned access to avoid
715 * data abort.
716 */
717 if (((uintptr_t)rxbuf % 4) || (bytes_to_read % 4))
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600718 readsb(priv->ahbbase, rxbuf, bytes_to_read);
Goldschmidt Simon16cbd092018-01-24 10:44:05 +0530719 else
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600720 readsl(priv->ahbbase, rxbuf,
Goldschmidt Simon16cbd092018-01-24 10:44:05 +0530721 bytes_to_read >> 2);
722 rxbuf += bytes_to_read;
Marek Vasut8c177432016-04-27 23:38:05 +0200723 remaining -= bytes_to_read;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600724 bytes_to_read = cadence_qspi_get_rd_sram_level(priv);
Marek Vasut8c177432016-04-27 23:38:05 +0200725 }
726 }
727
728 /* Check indirect done status */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600729 ret = wait_for_bit_le32(priv->regbase + CQSPI_REG_INDIRECTRD,
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100730 CQSPI_REG_INDIRECTRD_DONE, 1, 10, 0);
Marek Vasut8c177432016-04-27 23:38:05 +0200731 if (ret) {
732 printf("Indirect read completion error (%i)\n", ret);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100733 goto failrd;
734 }
735
736 /* Clear indirect completion status */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000737 writel(CQSPI_REG_INDIRECTRD_DONE,
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600738 priv->regbase + CQSPI_REG_INDIRECTRD);
Marek Vasut8c177432016-04-27 23:38:05 +0200739
Marek Vasut84d4f732021-09-14 05:22:31 +0200740 /* Check indirect done status */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600741 ret = wait_for_bit_le32(priv->regbase + CQSPI_REG_INDIRECTRD,
Marek Vasut84d4f732021-09-14 05:22:31 +0200742 CQSPI_REG_INDIRECTRD_DONE, 0, 10, 0);
743 if (ret) {
744 printf("Indirect read clear completion error (%i)\n", ret);
745 goto failrd;
746 }
747
Stefan Roese1c60fe72014-11-07 12:37:49 +0100748 return 0;
749
750failrd:
751 /* Cancel the indirect read */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000752 writel(CQSPI_REG_INDIRECTRD_CANCEL,
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600753 priv->regbase + CQSPI_REG_INDIRECTRD);
Marek Vasut8c177432016-04-27 23:38:05 +0200754 return ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100755}
756
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600757int cadence_qspi_apb_read_execute(struct cadence_spi_priv *priv,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530758 const struct spi_mem_op *op)
759{
Vignesh Raghavendra68f82662019-12-05 15:46:06 +0530760 u64 from = op->addr.val;
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530761 void *buf = op->data.buf.in;
762 size_t len = op->data.nbytes;
763
Ashok Reddy Somaf63e6022022-11-29 04:41:34 -0700764 cadence_qspi_apb_enable_linear_mode(true);
T Karthik Reddy3b49fbf2022-05-12 04:05:34 -0600765
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600766 if (priv->use_dac_mode && (from + len < priv->ahbsize)) {
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530767 if (len < 256 ||
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600768 dma_memcpy(buf, priv->ahbbase + from, len) < 0) {
769 memcpy_fromio(buf, priv->ahbbase + from, len);
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530770 }
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600771 if (!cadence_qspi_wait_idle(priv->regbase))
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530772 return -EIO;
773 return 0;
774 }
775
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600776 return cadence_qspi_apb_indirect_read_execute(priv, len, buf);
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530777}
778
Stefan Roese1c60fe72014-11-07 12:37:49 +0100779/* Opcode + Address (3/4 bytes) */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600780int cadence_qspi_apb_write_setup(struct cadence_spi_priv *priv,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530781 const struct spi_mem_op *op)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100782{
783 unsigned int reg;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530784 int ret;
785 u8 opcode;
786
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600787 ret = cadence_qspi_set_protocol(priv, op);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530788 if (ret)
789 return ret;
790
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600791 ret = cadence_qspi_enable_dtr(priv, op, CQSPI_REG_OP_EXT_WRITE_LSB,
792 priv->dtr);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530793 if (ret)
794 return ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100795
Stefan Roese1c60fe72014-11-07 12:37:49 +0100796 /* Setup the indirect trigger address */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600797 writel(priv->trigger_address,
798 priv->regbase + CQSPI_REG_INDIRECTTRIGGER);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100799
Stefan Roese1c60fe72014-11-07 12:37:49 +0100800 /* Configure the opcode */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600801 if (priv->dtr)
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530802 opcode = op->cmd.opcode >> 8;
803 else
804 opcode = op->cmd.opcode;
805
806 reg = opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600807 reg |= priv->data_width << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
808 reg |= priv->addr_width << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB;
809 writel(reg, priv->regbase + CQSPI_REG_WR_INSTR);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100810
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600811 reg = cadence_qspi_calc_rdreg(priv);
812 writel(reg, priv->regbase + CQSPI_REG_RD_INSTR);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530813
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600814 writel(op->addr.val, priv->regbase + CQSPI_REG_INDIRECTWRSTARTADDR);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100815
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600816 if (priv->dtr) {
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530817 /*
818 * Some flashes like the cypress Semper flash expect a 4-byte
819 * dummy address with the Read SR command in DTR mode, but this
820 * controller does not support sending address with the Read SR
821 * command. So, disable write completion polling on the
822 * controller's side. spi-nor will take care of polling the
823 * status register.
824 */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600825 reg = readl(priv->regbase + CQSPI_REG_WR_COMPLETION_CTRL);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530826 reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600827 writel(reg, priv->regbase + CQSPI_REG_WR_COMPLETION_CTRL);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530828 }
829
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600830 reg = readl(priv->regbase + CQSPI_REG_SIZE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100831 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530832 reg |= (op->addr.nbytes - 1);
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600833 writel(reg, priv->regbase + CQSPI_REG_SIZE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100834 return 0;
835}
836
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530837static int
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600838cadence_qspi_apb_indirect_write_execute(struct cadence_spi_priv *priv,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530839 unsigned int n_tx, const u8 *txbuf)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100840{
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600841 unsigned int page_size = priv->page_size;
Marek Vasutdae51dd2016-04-27 23:18:55 +0200842 unsigned int remaining = n_tx;
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530843 const u8 *bb_txbuf = txbuf;
844 void *bounce_buf = NULL;
Marek Vasutdae51dd2016-04-27 23:18:55 +0200845 unsigned int write_bytes;
846 int ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100847
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530848 /*
849 * Use bounce buffer for non 32 bit aligned txbuf to avoid data
850 * aborts
851 */
852 if ((uintptr_t)txbuf % 4) {
853 bounce_buf = malloc(n_tx);
854 if (!bounce_buf)
855 return -ENOMEM;
856 memcpy(bounce_buf, txbuf, n_tx);
857 bb_txbuf = bounce_buf;
858 }
859
Stefan Roese1c60fe72014-11-07 12:37:49 +0100860 /* Configure the indirect read transfer bytes */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600861 writel(n_tx, priv->regbase + CQSPI_REG_INDIRECTWRBYTES);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100862
863 /* Start the indirect write transfer */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000864 writel(CQSPI_REG_INDIRECTWR_START,
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600865 priv->regbase + CQSPI_REG_INDIRECTWR);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100866
Pratyush Yadav8dcf3e22021-06-26 00:47:08 +0530867 /*
868 * Some delay is required for the above bit to be internally
869 * synchronized by the QSPI module.
870 */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600871 ndelay(priv->wr_delay);
Pratyush Yadav8dcf3e22021-06-26 00:47:08 +0530872
Marek Vasutdae51dd2016-04-27 23:18:55 +0200873 while (remaining > 0) {
874 write_bytes = remaining > page_size ? page_size : remaining;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600875 writesl(priv->ahbbase, bb_txbuf, write_bytes >> 2);
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530876 if (write_bytes % 4)
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600877 writesb(priv->ahbbase,
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530878 bb_txbuf + rounddown(write_bytes, 4),
879 write_bytes % 4);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100880
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600881 ret = wait_for_bit_le32(priv->regbase + CQSPI_REG_SDRAMLEVEL,
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100882 CQSPI_REG_SDRAMLEVEL_WR_MASK <<
883 CQSPI_REG_SDRAMLEVEL_WR_LSB, 0, 10, 0);
Marek Vasutdae51dd2016-04-27 23:18:55 +0200884 if (ret) {
885 printf("Indirect write timed out (%i)\n", ret);
886 goto failwr;
887 }
Stefan Roese1c60fe72014-11-07 12:37:49 +0100888
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530889 bb_txbuf += write_bytes;
Marek Vasutdae51dd2016-04-27 23:18:55 +0200890 remaining -= write_bytes;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100891 }
892
Marek Vasutdae51dd2016-04-27 23:18:55 +0200893 /* Check indirect done status */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600894 ret = wait_for_bit_le32(priv->regbase + CQSPI_REG_INDIRECTWR,
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100895 CQSPI_REG_INDIRECTWR_DONE, 1, 10, 0);
Marek Vasutdae51dd2016-04-27 23:18:55 +0200896 if (ret) {
897 printf("Indirect write completion error (%i)\n", ret);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100898 goto failwr;
899 }
900
901 /* Clear indirect completion status */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000902 writel(CQSPI_REG_INDIRECTWR_DONE,
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600903 priv->regbase + CQSPI_REG_INDIRECTWR);
Marek Vasut84d4f732021-09-14 05:22:31 +0200904
905 /* Check indirect done status */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600906 ret = wait_for_bit_le32(priv->regbase + CQSPI_REG_INDIRECTWR,
Marek Vasut84d4f732021-09-14 05:22:31 +0200907 CQSPI_REG_INDIRECTWR_DONE, 0, 10, 0);
908 if (ret) {
909 printf("Indirect write clear completion error (%i)\n", ret);
910 goto failwr;
911 }
912
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530913 if (bounce_buf)
914 free(bounce_buf);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100915 return 0;
916
917failwr:
918 /* Cancel the indirect write */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000919 writel(CQSPI_REG_INDIRECTWR_CANCEL,
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600920 priv->regbase + CQSPI_REG_INDIRECTWR);
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530921 if (bounce_buf)
922 free(bounce_buf);
Marek Vasutdae51dd2016-04-27 23:18:55 +0200923 return ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100924}
925
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600926int cadence_qspi_apb_write_execute(struct cadence_spi_priv *priv,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530927 const struct spi_mem_op *op)
928{
929 u32 to = op->addr.val;
930 const void *buf = op->data.buf.out;
931 size_t len = op->data.nbytes;
932
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530933 /*
934 * Some flashes like the Cypress Semper flash expect a dummy 4-byte
935 * address (all 0s) with the read status register command in DTR mode.
936 * But this controller does not support sending dummy address bytes to
937 * the flash when it is polling the write completion register in DTR
938 * mode. So, we can not use direct mode when in DTR mode for writing
939 * data.
940 */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600941 cadence_qspi_apb_enable_linear_mode(true);
942 if (!priv->dtr && priv->use_dac_mode && (to + len < priv->ahbsize)) {
943 memcpy_toio(priv->ahbbase + to, buf, len);
944 if (!cadence_qspi_wait_idle(priv->regbase))
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530945 return -EIO;
946 return 0;
947 }
948
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600949 return cadence_qspi_apb_indirect_write_execute(priv, len, buf);
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530950}
951
Stefan Roese1c60fe72014-11-07 12:37:49 +0100952void cadence_qspi_apb_enter_xip(void *reg_base, char xip_dummy)
953{
954 unsigned int reg;
955
956 /* enter XiP mode immediately and enable direct mode */
957 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000958 reg |= CQSPI_REG_CONFIG_ENABLE;
959 reg |= CQSPI_REG_CONFIG_DIRECT;
960 reg |= CQSPI_REG_CONFIG_XIP_IMM;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100961 writel(reg, reg_base + CQSPI_REG_CONFIG);
962
963 /* keep the XiP mode */
964 writel(xip_dummy, reg_base + CQSPI_REG_MODE_BIT);
965
966 /* Enable mode bit at devrd */
967 reg = readl(reg_base + CQSPI_REG_RD_INSTR);
968 reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
969 writel(reg, reg_base + CQSPI_REG_RD_INSTR);
970}